Revert "Replace y_buffer_8bit with a downsampling pyramid"
This reverts commit cfb46c78e7badb3ecc25479368f6ea985fdfa512.
Reason for revert: Assertion failure in
AV1/ResizeInternalTestLarge.TestInternalResizeWorks/0
Bug: aomedia:3362
Change-Id: I92d060a6e327429966935d9c45f7994572e08f7e
diff --git a/aom_dsp/aom_dsp.cmake b/aom_dsp/aom_dsp.cmake
index 6671dfa..c5c2db7 100644
--- a/aom_dsp/aom_dsp.cmake
+++ b/aom_dsp/aom_dsp.cmake
@@ -176,7 +176,7 @@
# Flow estimation library
if(NOT CONFIG_REALTIME_ONLY)
- list(APPEND AOM_DSP_ENCODER_SOURCES "${AOM_ROOT}/aom_dsp/pyramid.c"
+ list(APPEND AOM_DSP_ENCODER_SOURCES
"${AOM_ROOT}/aom_dsp/flow_estimation/corner_detect.c"
"${AOM_ROOT}/aom_dsp/flow_estimation/corner_match.c"
"${AOM_ROOT}/aom_dsp/flow_estimation/disflow.c"
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 41d9874..e2c9764 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -2039,7 +2039,7 @@
# Flow estimation library
if (aom_config("CONFIG_REALTIME_ONLY") ne "yes") {
- add_proto qw/double av1_compute_cross_correlation/, "const unsigned char *im1, int stride1, int x1, int y1, const unsigned char *im2, int stride2, int x2, int y2";
+ add_proto qw/double av1_compute_cross_correlation/, "unsigned char *im1, int stride1, int x1, int y1, unsigned char *im2, int stride2, int x2, int y2";
specialize qw/av1_compute_cross_correlation sse4_1 avx2/;
}
diff --git a/aom_dsp/flow_estimation/corner_detect.c b/aom_dsp/flow_estimation/corner_detect.c
index d97ab58..c49e3fa 100644
--- a/aom_dsp/flow_estimation/corner_detect.c
+++ b/aom_dsp/flow_estimation/corner_detect.c
@@ -21,7 +21,7 @@
// Fast_9 wrapper
#define FAST_BARRIER 18
-int av1_fast_corner_detect(const unsigned char *buf, int width, int height,
+int av1_fast_corner_detect(unsigned char *buf, int width, int height,
int stride, int *points, int max_points) {
int num_points;
xy *const frm_corners_xy = aom_fast9_detect_nonmax(buf, width, height, stride,
diff --git a/aom_dsp/flow_estimation/corner_detect.h b/aom_dsp/flow_estimation/corner_detect.h
index 1c6ee72..4481c4e 100644
--- a/aom_dsp/flow_estimation/corner_detect.h
+++ b/aom_dsp/flow_estimation/corner_detect.h
@@ -20,7 +20,7 @@
extern "C" {
#endif
-int av1_fast_corner_detect(const unsigned char *buf, int width, int height,
+int av1_fast_corner_detect(unsigned char *buf, int width, int height,
int stride, int *points, int max_points);
#ifdef __cplusplus
diff --git a/aom_dsp/flow_estimation/corner_match.c b/aom_dsp/flow_estimation/corner_match.c
index 18264c9..f675604 100644
--- a/aom_dsp/flow_estimation/corner_match.c
+++ b/aom_dsp/flow_estimation/corner_match.c
@@ -19,7 +19,6 @@
#include "aom_dsp/flow_estimation/corner_match.h"
#include "aom_dsp/flow_estimation/flow_estimation.h"
#include "aom_dsp/flow_estimation/ransac.h"
-#include "aom_dsp/pyramid.h"
#include "aom_scale/yv12config.h"
#define SEARCH_SZ 9
@@ -30,8 +29,7 @@
/* Compute var(im) * MATCH_SZ_SQ over a MATCH_SZ by MATCH_SZ window of im,
centered at (x, y).
*/
-static double compute_variance(const unsigned char *im, int stride, int x,
- int y) {
+static double compute_variance(unsigned char *im, int stride, int x, int y) {
int sum = 0;
int sumsq = 0;
int var;
@@ -50,9 +48,9 @@
correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
of each image, centered at (x1, y1) and (x2, y2) respectively.
*/
-double av1_compute_cross_correlation_c(const unsigned char *im1, int stride1,
- int x1, int y1, const unsigned char *im2,
- int stride2, int x2, int y2) {
+double av1_compute_cross_correlation_c(unsigned char *im1, int stride1, int x1,
+ int y1, unsigned char *im2, int stride2,
+ int x2, int y2) {
int v1, v2;
int sum1 = 0;
int sum2 = 0;
@@ -86,9 +84,9 @@
(point1y - point2y) * (point1y - point2y)) <= thresh * thresh;
}
-static void improve_correspondence(const unsigned char *frm,
- const unsigned char *ref, int width,
- int height, int frm_stride, int ref_stride,
+static void improve_correspondence(unsigned char *frm, unsigned char *ref,
+ int width, int height, int frm_stride,
+ int ref_stride,
Correspondence *correspondences,
int num_correspondences) {
int i;
@@ -145,8 +143,8 @@
}
}
-int aom_determine_correspondence(const unsigned char *src, int *src_corners,
- int num_src_corners, const unsigned char *ref,
+int aom_determine_correspondence(unsigned char *src, int *src_corners,
+ int num_src_corners, unsigned char *ref,
int *ref_corners, int num_ref_corners,
int width, int height, int src_stride,
int ref_stride, int *correspondence_pts) {
@@ -213,31 +211,25 @@
}
int av1_compute_global_motion_feature_based(
- TransformationType type, const ImagePyramid *src_pyramid, int *src_corners,
- int num_src_corners, const ImagePyramid *ref_pyramid,
- int *num_inliers_by_motion, MotionModel *params_by_motion,
- int num_motions) {
+ TransformationType type, unsigned char *src_buffer, int src_width,
+ int src_height, int src_stride, int *src_corners, int num_src_corners,
+ YV12_BUFFER_CONFIG *ref, int bit_depth, int *num_inliers_by_motion,
+ MotionModel *params_by_motion, int num_motions) {
int i;
int num_ref_corners;
int num_correspondences;
int *correspondences;
int ref_corners[2 * MAX_CORNERS];
+ unsigned char *ref_buffer = ref->y_buffer;
RansacFunc ransac = av1_get_ransac_type(type);
- assert(src_pyramid->valid);
- const uint8_t *src_buffer = src_pyramid->layers[0].buffer;
- const int src_width = src_pyramid->layers[0].width;
- const int src_height = src_pyramid->layers[0].height;
- const int src_stride = src_pyramid->layers[0].stride;
+ if (ref->flags & YV12_FLAG_HIGHBITDEPTH) {
+ ref_buffer = av1_downconvert_frame(ref, bit_depth);
+ }
- assert(ref_pyramid->valid);
- const uint8_t *ref_buffer = ref_pyramid->layers[0].buffer;
- const int ref_width = ref_pyramid->layers[0].width;
- const int ref_height = ref_pyramid->layers[0].height;
- const int ref_stride = ref_pyramid->layers[0].stride;
-
- num_ref_corners = av1_fast_corner_detect(
- ref_buffer, ref_width, ref_height, ref_stride, ref_corners, MAX_CORNERS);
+ num_ref_corners =
+ av1_fast_corner_detect(ref_buffer, ref->y_width, ref->y_height,
+ ref->y_stride, ref_corners, MAX_CORNERS);
// find correspondences between the two images
correspondences =
@@ -246,7 +238,7 @@
num_correspondences = aom_determine_correspondence(
src_buffer, (int *)src_corners, num_src_corners, ref_buffer,
(int *)ref_corners, num_ref_corners, src_width, src_height, src_stride,
- ref_stride, correspondences);
+ ref->y_stride, correspondences);
ransac(correspondences, num_correspondences, num_inliers_by_motion,
params_by_motion, num_motions);
diff --git a/aom_dsp/flow_estimation/corner_match.h b/aom_dsp/flow_estimation/corner_match.h
index e1e8a87..71afadf 100644
--- a/aom_dsp/flow_estimation/corner_match.h
+++ b/aom_dsp/flow_estimation/corner_match.h
@@ -32,16 +32,17 @@
int rx, ry;
} Correspondence;
-int aom_determine_correspondence(const unsigned char *src, int *src_corners,
- int num_src_corners, const unsigned char *ref,
+int aom_determine_correspondence(unsigned char *src, int *src_corners,
+ int num_src_corners, unsigned char *ref,
int *ref_corners, int num_ref_corners,
int width, int height, int src_stride,
int ref_stride, int *correspondence_pts);
int av1_compute_global_motion_feature_based(
- TransformationType type, const ImagePyramid *src_pyramid, int *src_corners,
- int num_src_corners, const ImagePyramid *ref_pyramid,
- int *num_inliers_by_motion, MotionModel *params_by_motion, int num_motions);
+ TransformationType type, unsigned char *src_buffer, int src_width,
+ int src_height, int src_stride, int *src_corners, int num_src_corners,
+ YV12_BUFFER_CONFIG *ref, int bit_depth, int *num_inliers_by_motion,
+ MotionModel *params_by_motion, int num_motions);
#ifdef __cplusplus
}
diff --git a/aom_dsp/flow_estimation/disflow.c b/aom_dsp/flow_estimation/disflow.c
index ac84090..2a6ad4b 100644
--- a/aom_dsp/flow_estimation/disflow.c
+++ b/aom_dsp/flow_estimation/disflow.c
@@ -16,7 +16,6 @@
#include "aom_dsp/flow_estimation/disflow.h"
#include "aom_dsp/flow_estimation/flow_estimation.h"
#include "aom_dsp/flow_estimation/ransac.h"
-#include "aom_dsp/pyramid.h"
#include "aom_scale/yv12config.h"
@@ -49,7 +48,7 @@
unsigned char *level_buffer;
double *level_dx_buffer;
double *level_dy_buffer;
-} FlowPyramid;
+} ImagePyramid;
// Don't use points around the frame border since they are less reliable
static INLINE int valid_point(int x, int y, int width, int height) {
@@ -328,7 +327,7 @@
}
}
-static void free_pyramid(FlowPyramid *pyr) {
+static void free_pyramid(ImagePyramid *pyr) {
aom_free(pyr->level_buffer);
if (pyr->has_gradient) {
aom_free(pyr->level_dx_buffer);
@@ -337,9 +336,9 @@
aom_free(pyr);
}
-static FlowPyramid *alloc_pyramid(int width, int height, int pad_size,
- int compute_gradient) {
- FlowPyramid *pyr = aom_calloc(1, sizeof(*pyr));
+static ImagePyramid *alloc_pyramid(int width, int height, int pad_size,
+ int compute_gradient) {
+ ImagePyramid *pyr = aom_calloc(1, sizeof(*pyr));
if (!pyr) return NULL;
pyr->has_gradient = compute_gradient;
// 2 * width * height is the upper bound for a buffer that fits
@@ -367,7 +366,7 @@
return pyr;
}
-static INLINE void update_level_dims(FlowPyramid *frm_pyr, int level) {
+static INLINE void update_level_dims(ImagePyramid *frm_pyr, int level) {
frm_pyr->widths[level] = frm_pyr->widths[level - 1] >> 1;
frm_pyr->heights[level] = frm_pyr->heights[level - 1] >> 1;
frm_pyr->strides[level] = frm_pyr->widths[level] + 2 * frm_pyr->pad_size;
@@ -380,10 +379,10 @@
}
// Compute coarse to fine pyramids for a frame
-static void compute_flow_pyramids(const unsigned char *frm, const int frm_width,
+static void compute_flow_pyramids(unsigned char *frm, const int frm_width,
const int frm_height, const int frm_stride,
int n_levels, int pad_size, int compute_grad,
- FlowPyramid *frm_pyr) {
+ ImagePyramid *frm_pyr) {
int cur_width, cur_height, cur_stride, cur_loc;
assert((frm_width >> n_levels) > 0);
assert((frm_height >> n_levels) > 0);
@@ -481,7 +480,7 @@
}
// make sure flow_u and flow_v start at 0
-static bool compute_flow_field(FlowPyramid *frm_pyr, FlowPyramid *ref_pyr,
+static bool compute_flow_field(ImagePyramid *frm_pyr, ImagePyramid *ref_pyr,
double *flow_u, double *flow_v) {
int cur_width, cur_height, cur_stride, cur_loc, patch_loc, patch_center;
double *u_upscale =
@@ -540,27 +539,17 @@
}
int av1_compute_global_motion_disflow_based(
- TransformationType type, const ImagePyramid *frm_pyramid, int *frm_corners,
- int num_frm_corners, const ImagePyramid *ref_pyramid,
- int *num_inliers_by_motion, MotionModel *params_by_motion,
- int num_motions) {
+ TransformationType type, unsigned char *frm_buffer, int frm_width,
+ int frm_height, int frm_stride, int *frm_corners, int num_frm_corners,
+ YV12_BUFFER_CONFIG *ref, int bit_depth, int *num_inliers_by_motion,
+ MotionModel *params_by_motion, int num_motions) {
+ unsigned char *ref_buffer = ref->y_buffer;
+ const int ref_width = ref->y_width;
+ const int ref_height = ref->y_height;
const int pad_size = AOMMAX(PATCH_SIZE, MIN_PAD);
int num_correspondences;
double *correspondences;
RansacFuncDouble ransac = av1_get_ransac_double_prec_type(type);
-
- assert(frm_pyramid->valid);
- const uint8_t *frm_buffer = frm_pyramid->layers[0].buffer;
- const int frm_width = frm_pyramid->layers[0].width;
- const int frm_height = frm_pyramid->layers[0].height;
- const int frm_stride = frm_pyramid->layers[0].stride;
-
- assert(ref_pyramid->valid);
- const uint8_t *ref_buffer = ref_pyramid->layers[0].buffer;
- const int ref_width = ref_pyramid->layers[0].width;
- const int ref_height = ref_pyramid->layers[0].height;
- const int ref_stride = ref_pyramid->layers[0].stride;
-
assert(frm_width == ref_width);
assert(frm_height == ref_height);
@@ -569,6 +558,10 @@
frm_width < frm_height ? get_msb(frm_width) : get_msb(frm_height);
const int n_levels = AOMMIN(msb, N_LEVELS);
+ if (ref->flags & YV12_FLAG_HIGHBITDEPTH) {
+ ref_buffer = av1_downconvert_frame(ref, bit_depth);
+ }
+
// TODO(sarahparker) We will want to do the source pyramid computation
// outside of this function so it doesn't get recomputed for every
// reference. We also don't need to compute every pyramid level for the
@@ -577,21 +570,21 @@
// once the full implementation is working.
// Allocate frm image pyramids
int compute_gradient = 1;
- FlowPyramid *frm_pyr =
+ ImagePyramid *frm_pyr =
alloc_pyramid(frm_width, frm_height, pad_size, compute_gradient);
if (!frm_pyr) return 0;
compute_flow_pyramids(frm_buffer, frm_width, frm_height, frm_stride, n_levels,
pad_size, compute_gradient, frm_pyr);
// Allocate ref image pyramids
compute_gradient = 0;
- FlowPyramid *ref_pyr =
+ ImagePyramid *ref_pyr =
alloc_pyramid(ref_width, ref_height, pad_size, compute_gradient);
if (!ref_pyr) {
free_pyramid(frm_pyr);
return 0;
}
- compute_flow_pyramids(ref_buffer, ref_width, ref_height, ref_stride, n_levels,
- pad_size, compute_gradient, ref_pyr);
+ compute_flow_pyramids(ref_buffer, ref_width, ref_height, ref->y_stride,
+ n_levels, pad_size, compute_gradient, ref_pyr);
int ret = 0;
double *flow_u =
diff --git a/aom_dsp/flow_estimation/disflow.h b/aom_dsp/flow_estimation/disflow.h
index 0373736..52fb261 100644
--- a/aom_dsp/flow_estimation/disflow.h
+++ b/aom_dsp/flow_estimation/disflow.h
@@ -20,9 +20,10 @@
#endif
int av1_compute_global_motion_disflow_based(
- TransformationType type, const ImagePyramid *frm_pyramid, int *frm_corners,
- int num_frm_corners, const ImagePyramid *ref_pyramid,
- int *num_inliers_by_motion, MotionModel *params_by_motion, int num_motions);
+ TransformationType type, unsigned char *frm_buffer, int frm_width,
+ int frm_height, int frm_stride, int *frm_corners, int num_frm_corners,
+ YV12_BUFFER_CONFIG *ref, int bit_depth, int *num_inliers_by_motion,
+ MotionModel *params_by_motion, int num_motions);
#ifdef __cplusplus
}
diff --git a/aom_dsp/flow_estimation/flow_estimation.c b/aom_dsp/flow_estimation/flow_estimation.c
index 1169773..d8cf8bd 100644
--- a/aom_dsp/flow_estimation/flow_estimation.c
+++ b/aom_dsp/flow_estimation/flow_estimation.c
@@ -18,22 +18,42 @@
#include "aom_scale/yv12config.h"
int aom_compute_global_motion(TransformationType type,
- const ImagePyramid *src_pyramid, int *src_corners,
- int num_src_corners,
- const ImagePyramid *ref_pyramid,
+ unsigned char *src_buffer, int src_width,
+ int src_height, int src_stride, int *src_corners,
+ int num_src_corners, YV12_BUFFER_CONFIG *ref,
+ int bit_depth,
GlobalMotionEstimationType gm_estimation_type,
int *num_inliers_by_motion,
MotionModel *params_by_motion, int num_motions) {
switch (gm_estimation_type) {
case GLOBAL_MOTION_FEATURE_BASED:
return av1_compute_global_motion_feature_based(
- type, src_pyramid, src_corners, num_src_corners, ref_pyramid,
- num_inliers_by_motion, params_by_motion, num_motions);
+ type, src_buffer, src_width, src_height, src_stride, src_corners,
+ num_src_corners, ref, bit_depth, num_inliers_by_motion,
+ params_by_motion, num_motions);
case GLOBAL_MOTION_DISFLOW_BASED:
return av1_compute_global_motion_disflow_based(
- type, src_pyramid, src_corners, num_src_corners, ref_pyramid,
- num_inliers_by_motion, params_by_motion, num_motions);
+ type, src_buffer, src_width, src_height, src_stride, src_corners,
+ num_src_corners, ref, bit_depth, num_inliers_by_motion,
+ params_by_motion, num_motions);
default: assert(0 && "Unknown global motion estimation type");
}
return 0;
}
+
+unsigned char *av1_downconvert_frame(YV12_BUFFER_CONFIG *frm, int bit_depth) {
+ int i, j;
+ uint16_t *orig_buf = CONVERT_TO_SHORTPTR(frm->y_buffer);
+ uint8_t *buf_8bit = frm->y_buffer_8bit;
+ assert(buf_8bit);
+ if (!frm->buf_8bit_valid) {
+ for (i = 0; i < frm->y_height; ++i) {
+ for (j = 0; j < frm->y_width; ++j) {
+ buf_8bit[i * frm->y_stride + j] =
+ orig_buf[i * frm->y_stride + j] >> (bit_depth - 8);
+ }
+ }
+ frm->buf_8bit_valid = 1;
+ }
+ return buf_8bit;
+}
diff --git a/aom_dsp/flow_estimation/flow_estimation.h b/aom_dsp/flow_estimation/flow_estimation.h
index 45d8309..ab9d328 100644
--- a/aom_dsp/flow_estimation/flow_estimation.h
+++ b/aom_dsp/flow_estimation/flow_estimation.h
@@ -12,7 +12,6 @@
#ifndef AOM_AOM_DSP_FLOW_ESTIMATION_H_
#define AOM_AOM_DSP_FLOW_ESTIMATION_H_
-#include "aom_dsp/pyramid.h"
#include "aom_ports/mem.h"
#include "aom_scale/yv12config.h"
@@ -49,13 +48,16 @@
} MotionModel;
int aom_compute_global_motion(TransformationType type,
- const ImagePyramid *src_pyramid, int *src_corners,
- int num_src_corners,
- const ImagePyramid *ref_pyramid,
+ unsigned char *src_buffer, int src_width,
+ int src_height, int src_stride, int *src_corners,
+ int num_src_corners, YV12_BUFFER_CONFIG *ref,
+ int bit_depth,
GlobalMotionEstimationType gm_estimation_type,
int *num_inliers_by_motion,
MotionModel *params_by_motion, int num_motions);
+unsigned char *av1_downconvert_frame(YV12_BUFFER_CONFIG *frm, int bit_depth);
+
#ifdef __cplusplus
}
#endif
diff --git a/aom_dsp/flow_estimation/x86/corner_match_avx2.c b/aom_dsp/flow_estimation/x86/corner_match_avx2.c
index 5276cfa..9830ad8 100644
--- a/aom_dsp/flow_estimation/x86/corner_match_avx2.c
+++ b/aom_dsp/flow_estimation/x86/corner_match_avx2.c
@@ -28,10 +28,9 @@
correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
of each image, centered at (x1, y1) and (x2, y2) respectively.
*/
-double av1_compute_cross_correlation_avx2(const unsigned char *im1, int stride1,
- int x1, int y1,
- const unsigned char *im2, int stride2,
- int x2, int y2) {
+double av1_compute_cross_correlation_avx2(unsigned char *im1, int stride1,
+ int x1, int y1, unsigned char *im2,
+ int stride2, int x2, int y2) {
int i, stride1_i = 0, stride2_i = 0;
__m256i temp1, sum_vec, sumsq2_vec, cross_vec, v, v1_1, v2_1;
const __m128i mask = _mm_load_si128((__m128i *)byte_mask);
diff --git a/aom_dsp/flow_estimation/x86/corner_match_sse4.c b/aom_dsp/flow_estimation/x86/corner_match_sse4.c
index ac860c2..40eec6c 100644
--- a/aom_dsp/flow_estimation/x86/corner_match_sse4.c
+++ b/aom_dsp/flow_estimation/x86/corner_match_sse4.c
@@ -32,9 +32,8 @@
correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
of each image, centered at (x1, y1) and (x2, y2) respectively.
*/
-double av1_compute_cross_correlation_sse4_1(const unsigned char *im1,
- int stride1, int x1, int y1,
- const unsigned char *im2,
+double av1_compute_cross_correlation_sse4_1(unsigned char *im1, int stride1,
+ int x1, int y1, unsigned char *im2,
int stride2, int x2, int y2) {
int i;
// 2 16-bit partial sums in lanes 0, 4 (== 2 32-bit partial sums in lanes 0,
diff --git a/aom_dsp/pyramid.c b/aom_dsp/pyramid.c
deleted file mode 100644
index c5ecc6f..0000000
--- a/aom_dsp/pyramid.c
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- * Copyright (c) 2022, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "aom_dsp/pyramid.h"
-#include "aom_mem/aom_mem.h"
-#include "aom_ports/bitops.h"
-#include "aom_util/aom_thread.h"
-
-// TODO(rachelbarker): Move needed code from av1/ to aom_dsp/
-#include "av1/common/resize.h"
-
-#include <assert.h>
-#include <string.h>
-
-// Lifecycle:
-// * Frame buffer alloc code calls aom_get_pyramid_alloc_size()
-// to work out how much space is needed for a given number of pyramid
-// levels. This is counted in the size checked against the max allocation
-// limit
-// * Then calls aom_alloc_pyramid() to actually create the pyramid
-// * Pyramid is initially marked as invalid (no data)
-// * Whenever pyramid is needed, we check the valid flag. If set, use existing
-// data. If not set, compute full pyramid
-// * Whenever frame buffer is reused, clear the valid flag
-// * Whenever frame buffer is resized, reallocate pyramid
-
-size_t aom_get_pyramid_alloc_size(int width, int height, int n_levels,
- bool image_is_16bit) {
- // Limit number of levels on small frames
- const int msb = get_msb(AOMMIN(width, height));
- const int max_levels = AOMMAX(msb - MIN_PYRAMID_SIZE_LOG2, 1);
- n_levels = AOMMIN(n_levels, max_levels);
-
- size_t alloc_size = 0;
- alloc_size += sizeof(ImagePyramid);
- alloc_size += n_levels * sizeof(PyramidLayer);
-
- // Calculate how much memory is needed for downscaled frame buffers
- size_t buffer_size = 0;
-
- // Work out if we need to allocate a few extra bytes for alignment.
- // aom_memalign() will ensure that the start of the allocation is aligned
- // to a multiple of PYRAMID_ALIGNMENT. But we want the first image pixel
- // to be aligned, not the first byte of the allocation.
- //
- // In the loop below, we ensure that the stride of every image is a multiple
- // of PYRAMID_ALIGNMENT. Thus the allocated size of each pyramid level will
- // also be a multiple of PYRAMID_ALIGNMENT. Thus, as long as we can get the
- // first pixel in the first pyramid layer aligned properly, that will
- // automatically mean that the first pixel of every row of every layer is
- // properly aligned too.
- //
- // Thus all we need to consider is the first pixel in the first layer.
- // This is located at offset
- // extra_bytes + level_stride * PYRAMID_PADDING + PYRAMID_PADDING
- // bytes into the buffer. Since level_stride is a multiple of
- // PYRAMID_ALIGNMENT, we can ignore that. So we need
- // extra_bytes + PYRAMID_PADDING = multiple of PYRAMID_ALIGNMENT
- //
- // To solve this, we can round PYRAMID_PADDING up to the next multiple
- // of PYRAMID_ALIGNMENT, then subtract the orginal value to calculate
- // how many extra bytes are needed.
- size_t first_px_offset =
- (PYRAMID_PADDING + PYRAMID_ALIGNMENT - 1) & ~(PYRAMID_ALIGNMENT - 1);
- size_t extra_bytes = first_px_offset - PYRAMID_PADDING;
- buffer_size += extra_bytes;
-
- // If the original image is stored in an 8-bit buffer, then we can point the
- // lowest pyramid level at that buffer rather than allocating a new one.
- int first_allocated_level = image_is_16bit ? 0 : 1;
-
- for (int level = first_allocated_level; level < n_levels; level++) {
- int level_width = width >> level;
- int level_height = height >> level;
-
- // Allocate padding for each layer
- int padded_width = level_width + 2 * PYRAMID_PADDING;
- int padded_height = level_height + 2 * PYRAMID_PADDING;
-
- // Align the layer stride to be a multiple of PYRAMID_ALIGNMENT
- // This ensures that, as long as the top-left pixel in this pyramid level is
- // properly aligned, then so will the leftmost pixel in every row of the
- // pyramid level.
- int level_stride =
- (padded_width + PYRAMID_ALIGNMENT - 1) & ~(PYRAMID_ALIGNMENT - 1);
-
- buffer_size += level_stride * padded_height;
- }
-
- alloc_size += buffer_size;
-
- return alloc_size;
-}
-
-ImagePyramid *aom_alloc_pyramid(int width, int height, int n_levels,
- bool image_is_16bit) {
- // Limit number of levels on small frames
- const int msb = get_msb(AOMMIN(width, height));
- const int max_levels = AOMMAX(msb - MIN_PYRAMID_SIZE_LOG2, 1);
- n_levels = AOMMIN(n_levels, max_levels);
-
- ImagePyramid *pyr = aom_calloc(1, sizeof(*pyr));
- if (!pyr) {
- return NULL;
- }
-
- pyr->layers = aom_calloc(n_levels, sizeof(PyramidLayer));
- if (!pyr->layers) {
- aom_free(pyr);
- return NULL;
- }
-
- pyr->valid = false;
- pyr->n_levels = n_levels;
-
- // Compute sizes and offsets for each pyramid level
- // These are gathered up first, so that we can allocate all pyramid levels
- // in a single buffer
- size_t buffer_size = 0;
- size_t *layer_offsets = aom_calloc(n_levels, sizeof(size_t));
- if (!layer_offsets) {
- aom_free(pyr);
- aom_free(pyr->layers);
- return NULL;
- }
-
- // Work out if we need to allocate a few extra bytes for alignment.
- // aom_memalign() will ensure that the start of the allocation is aligned
- // to a multiple of PYRAMID_ALIGNMENT. But we want the first image pixel
- // to be aligned, not the first byte of the allocation.
- //
- // In the loop below, we ensure that the stride of every image is a multiple
- // of PYRAMID_ALIGNMENT. Thus the allocated size of each pyramid level will
- // also be a multiple of PYRAMID_ALIGNMENT. Thus, as long as we can get the
- // first pixel in the first pyramid layer aligned properly, that will
- // automatically mean that the first pixel of every row of every layer is
- // properly aligned too.
- //
- // Thus all we need to consider is the first pixel in the first layer.
- // This is located at offset
- // extra_bytes + level_stride * PYRAMID_PADDING + PYRAMID_PADDING
- // bytes into the buffer. Since level_stride is a multiple of
- // PYRAMID_ALIGNMENT, we can ignore that. So we need
- // extra_bytes + PYRAMID_PADDING = multiple of PYRAMID_ALIGNMENT
- //
- // To solve this, we can round PYRAMID_PADDING up to the next multiple
- // of PYRAMID_ALIGNMENT, then subtract the orginal value to calculate
- // how many extra bytes are needed.
- size_t first_px_offset =
- (PYRAMID_PADDING + PYRAMID_ALIGNMENT - 1) & ~(PYRAMID_ALIGNMENT - 1);
- size_t extra_bytes = first_px_offset - PYRAMID_PADDING;
- buffer_size += extra_bytes;
-
- // If the original image is stored in an 8-bit buffer, then we can point the
- // lowest pyramid level at that buffer rather than allocating a new one.
- int first_allocated_level = image_is_16bit ? 0 : 1;
-
- for (int level = first_allocated_level; level < n_levels; level++) {
- PyramidLayer *layer = &pyr->layers[level];
-
- int level_width = width >> level;
- int level_height = height >> level;
-
- // Allocate padding for each layer
- int padded_width = level_width + 2 * PYRAMID_PADDING;
- int padded_height = level_height + 2 * PYRAMID_PADDING;
-
- // Align the layer stride to be a multiple of PYRAMID_ALIGNMENT
- // This ensures that, as long as the top-left pixel in this pyramid level is
- // properly aligned, then so will the leftmost pixel in every row of the
- // pyramid level.
- int level_stride =
- (padded_width + PYRAMID_ALIGNMENT - 1) & ~(PYRAMID_ALIGNMENT - 1);
-
- size_t level_alloc_start = buffer_size;
- size_t level_start =
- level_alloc_start + PYRAMID_PADDING * level_stride + PYRAMID_PADDING;
-
- buffer_size += level_stride * padded_height;
-
- layer_offsets[level] = level_start;
- layer->width = level_width;
- layer->height = level_height;
- layer->stride = level_stride;
- }
-
- pyr->buffer_alloc =
- aom_memalign(PYRAMID_ALIGNMENT, buffer_size * sizeof(*pyr->buffer_alloc));
- if (!pyr->buffer_alloc) {
- aom_free(pyr);
- aom_free(pyr->layers);
- aom_free(layer_offsets);
- return NULL;
- }
-
- // Fill in pointers for each level
- // If image is 8-bit, then the lowest level is left unconfigured for now,
- // and will be set up properly when the pyramid is filled in
- for (int level = first_allocated_level; level < n_levels; level++) {
- PyramidLayer *layer = &pyr->layers[level];
- layer->buffer = pyr->buffer_alloc + layer_offsets[level];
- }
-
-#if CONFIG_MULTITHREAD
- pthread_mutex_init(&pyr->mutex, NULL);
-#endif // CONFIG_MULTITHREAD
-
- aom_free(layer_offsets);
- return pyr;
-}
-
-// Fill the border region of a pyramid frame.
-// This must be called after the main image area is filled out.
-// `img_buf` should point to the first pixel in the image area,
-// ie. it should be pyr->level_buffer + pyr->level_loc[level].
-static INLINE void fill_border(uint8_t *img_buf, const int width,
- const int height, const int stride) {
- // Fill left and right areas
- for (int row = 0; row < height; row++) {
- uint8_t *row_start = &img_buf[row * stride];
- uint8_t left_pixel = row_start[0];
- memset(row_start - PYRAMID_PADDING, left_pixel, PYRAMID_PADDING);
- uint8_t right_pixel = row_start[width - 1];
- memset(row_start + width, right_pixel, PYRAMID_PADDING);
- }
-
- // Fill top area
- for (int row = -PYRAMID_PADDING; row < 0; row++) {
- uint8_t *row_start = &img_buf[row * stride];
- memcpy(row_start - PYRAMID_PADDING, img_buf - PYRAMID_PADDING,
- width + 2 * PYRAMID_PADDING);
- }
-
- // Fill bottom area
- uint8_t *last_row_start = &img_buf[(height - 1) * stride];
- for (int row = height; row < height + PYRAMID_PADDING; row++) {
- uint8_t *row_start = &img_buf[row * stride];
- memcpy(row_start - PYRAMID_PADDING, last_row_start - PYRAMID_PADDING,
- width + 2 * PYRAMID_PADDING);
- }
-}
-
-// Compute coarse to fine pyramids for a frame
-// This must only be called while holding frm_pyr->mutex
-static INLINE void fill_pyramid(const YV12_BUFFER_CONFIG *frm, int bit_depth,
- ImagePyramid *frm_pyr) {
- int n_levels = frm_pyr->n_levels;
- const int frm_width = frm->y_width;
- const int frm_height = frm->y_height;
- const int frm_stride = frm->y_stride;
- assert((frm_width >> n_levels) >= 0);
- assert((frm_height >> n_levels) >= 0);
-
- PyramidLayer *first_layer = &frm_pyr->layers[0];
- if (frm->flags & YV12_FLAG_HIGHBITDEPTH) {
- // For frames stored in a 16-bit buffer, we need to downconvert to 8 bits
- assert(first_layer->width == frm_width);
- assert(first_layer->height == frm_height);
-
- uint16_t *frm_buffer = CONVERT_TO_SHORTPTR(frm->y_buffer);
- uint8_t *pyr_buffer = first_layer->buffer;
- int pyr_stride = first_layer->stride;
- for (int y = 0; y < frm_height; y++) {
- uint16_t *frm_row = frm_buffer + y * frm_stride;
- uint8_t *pyr_row = pyr_buffer + y * pyr_stride;
- for (int x = 0; x < frm_width; x++) {
- pyr_row[x] = frm_row[x] >> (bit_depth - 8);
- }
- }
-
- fill_border(pyr_buffer, frm_width, frm_height, pyr_stride);
- } else {
- // For frames stored in an 8-bit buffer, we need to configure the first
- // pyramid layer to point at the original image buffer
- first_layer->buffer = frm->y_buffer;
- first_layer->width = frm_width;
- first_layer->height = frm_height;
- first_layer->stride = frm_stride;
- }
-
- // Fill in the remaining levels through progressive downsampling
- for (int level = 1; level < n_levels; ++level) {
- PyramidLayer *prev_layer = &frm_pyr->layers[level - 1];
- uint8_t *prev_buffer = prev_layer->buffer;
- int prev_stride = prev_layer->stride;
-
- PyramidLayer *this_layer = &frm_pyr->layers[level];
- uint8_t *this_buffer = this_layer->buffer;
- int this_width = this_layer->width;
- int this_height = this_layer->height;
- int this_stride = this_layer->stride;
-
- // Compute the this pyramid level by downsampling the current level.
- //
- // We downsample by a factor of exactly 2, clipping the rightmost and
- // bottommost pixel off of the current level if needed. We do this for
- // two main reasons:
- //
- // 1) In the disflow code, when stepping from a higher pyramid level to a
- // lower pyramid level, we need to not just interpolate the flow field
- // but also to scale each flow vector by the upsampling ratio.
- // So it is much more convenient if this ratio is simply 2.
- //
- // 2) Up/downsampling by a factor of 2 can be implemented much more
- // efficiently than up/downsampling by a generic ratio.
- // TODO(rachelbarker): Use optimized downsample-by-2 function
- av1_resize_plane(prev_buffer, this_height << 1, this_width << 1,
- prev_stride, this_buffer, this_height, this_width,
- this_stride);
- fill_border(this_buffer, this_width, this_height, this_stride);
- }
-}
-
-// Fill out a downsampling pyramid for a given frame.
-//
-// The top level (index 0) will always be an 8-bit copy of the input frame,
-// regardless of the input bit depth. Additional levels are then downscaled
-// by powers of 2.
-//
-// For small input frames, the number of levels actually constructed
-// will be limited so that the smallest image is at least MIN_PYRAMID_SIZE
-// pixels along each side.
-//
-// However, if the input frame has a side of length < MIN_PYRAMID_SIZE,
-// we will still construct the top level.
-void aom_compute_pyramid(const YV12_BUFFER_CONFIG *frm, int bit_depth,
- ImagePyramid *pyr) {
- assert(pyr);
-
- // Per the comments in the ImagePyramid struct, we must take this mutex
- // before reading or writing the "valid" flag, and hold it while computing
- // the pyramid, to ensure proper behaviour if multiple threads call this
- // function simultaneously
-#if CONFIG_MULTITHREAD
- pthread_mutex_lock(&pyr->mutex);
-#endif // CONFIG_MULTITHREAD
-
- if (!pyr->valid) {
- fill_pyramid(frm, bit_depth, pyr);
- pyr->valid = true;
- }
-
- // At this point, the pyramid is guaranteed to be valid, and can be safely
- // read from without holding the mutex any more
-
-#if CONFIG_MULTITHREAD
- pthread_mutex_unlock(&pyr->mutex);
-#endif // CONFIG_MULTITHREAD
-}
-
-// Mark a pyramid as no longer containing valid data.
-// This must be done whenever the corresponding frame buffer is reused
-void aom_invalidate_pyramid(ImagePyramid *pyr) {
- if (pyr) {
-#if CONFIG_MULTITHREAD
- pthread_mutex_lock(&pyr->mutex);
-#endif // CONFIG_MULTITHREAD
- pyr->valid = false;
-#if CONFIG_MULTITHREAD
- pthread_mutex_unlock(&pyr->mutex);
-#endif // CONFIG_MULTITHREAD
- }
-}
-
-// Release the memory associated with a pyramid
-void aom_free_pyramid(ImagePyramid *pyr) {
- if (pyr) {
-#if CONFIG_MULTITHREAD
- pthread_mutex_destroy(&pyr->mutex);
-#endif // CONFIG_MULTITHREAD
- aom_free(pyr->buffer_alloc);
- aom_free(pyr->layers);
- aom_free(pyr);
- }
-}
diff --git a/aom_dsp/pyramid.h b/aom_dsp/pyramid.h
deleted file mode 100644
index f38fcf8..0000000
--- a/aom_dsp/pyramid.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2022, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#ifndef AOM_AOM_DSP_PYRAMID_H_
-#define AOM_AOM_DSP_PYRAMID_H_
-
-#include <stddef.h>
-#include <stdint.h>
-#include <stdbool.h>
-
-#include "config/aom_config.h"
-
-#include "aom_scale/yv12config.h"
-#include "aom_util/aom_thread.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Minimum dimensions of a downsampled image
-#define MIN_PYRAMID_SIZE_LOG2 3
-#define MIN_PYRAMID_SIZE (1 << MIN_PYRAMID_SIZE_LOG2)
-
-// Size of border around each pyramid image, in pixels
-// Similarly to the border around regular image buffers, this border is filled
-// with copies of the outermost pixels of the frame, to allow for more efficient
-// convolution code
-// TODO(rachelbarker): How many pixels do we actually need here?
-// I think we only need 9 for disflow, but how many for corner matching?
-#define PYRAMID_PADDING 16
-
-// Byte alignment of each line within the image pyramids.
-// That is, the first pixel inside the image (ie, not in the border region),
-// on each row of each pyramid level, is aligned to this byte alignment.
-// This value must be a power of 2.
-#define PYRAMID_ALIGNMENT 32
-
-typedef struct {
- uint8_t *buffer;
- int width;
- int height;
- int stride;
-} PyramidLayer;
-
-// Struct for an image pyramid
-typedef struct image_pyramid {
-#if CONFIG_MULTITHREAD
- // Mutex which is used to prevent the pyramid being computed twice at the
- // same time
- //
- // Semantics:
- // * This mutex must be held whenever reading or writing the `valid` flag
- //
- // * This mutex must also be held while computing the image pyramid,
- // to ensure that only one thread may do so at a time.
- //
- // * However, once you have read the valid flag and seen a true value,
- // it is safe to drop the mutex and read from the remaining fields.
- // This is because, once the image pyramid is computed, its contents
- // will not be changed until the parent frame buffer is recycled,
- // which will not happen until there are no more outstanding references
- // to the frame buffer.
- pthread_mutex_t mutex;
-#endif
- // Flag indicating whether the pyramid contains valid data
- bool valid;
- // Number of allocated/filled levels in this pyramid
- int n_levels;
- // Pointer to allocated buffer
- uint8_t *buffer_alloc;
- // Data for each level
- // The `buffer` pointers inside this array point into the region which
- // is stored in the `buffer_alloc` field here
- PyramidLayer *layers;
-} ImagePyramid;
-
-size_t aom_get_pyramid_alloc_size(int width, int height, int n_levels,
- bool image_is_16bit);
-
-ImagePyramid *aom_alloc_pyramid(int width, int height, int n_levels,
- bool image_is_16bit);
-
-// Fill out a downsampling pyramid for a given frame.
-//
-// The top level (index 0) will always be an 8-bit copy of the input frame,
-// regardless of the input bit depth. Additional levels are then downscaled
-// by powers of 2.
-//
-// For small input frames, the number of levels actually constructed
-// will be limited so that the smallest image is at least MIN_PYRAMID_SIZE
-// pixels along each side.
-//
-// However, if the input frame has a side of length < MIN_PYRAMID_SIZE,
-// we will still construct the top level.
-void aom_compute_pyramid(const YV12_BUFFER_CONFIG *frm, int bit_depth,
- ImagePyramid *pyr);
-
-// Mark a pyramid as no longer containing valid data.
-// This must be done whenever the corresponding frame buffer is reused
-void aom_invalidate_pyramid(ImagePyramid *pyr);
-
-// Release the memory associated with a pyramid
-void aom_free_pyramid(ImagePyramid *pyr);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // AOM_AOM_DSP_PYRAMID_H_
diff --git a/aom_scale/generic/yv12config.c b/aom_scale/generic/yv12config.c
index 8bd0d66..c1081e7 100644
--- a/aom_scale/generic/yv12config.c
+++ b/aom_scale/generic/yv12config.c
@@ -12,7 +12,6 @@
#include <assert.h>
#include "aom/internal/aom_image_internal.h"
-#include "aom_dsp/pyramid.h"
#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "aom_scale/yv12config.h"
@@ -32,11 +31,7 @@
if (ybf->buffer_alloc_sz > 0) {
aom_free(ybf->buffer_alloc);
}
-#if CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
- if (ybf->y_pyramid) {
- aom_free_pyramid(ybf->y_pyramid);
- }
-#endif // CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
+ if (ybf->y_buffer_8bit) aom_free(ybf->y_buffer_8bit);
aom_remove_metadata_from_frame_buffer(ybf);
/* buffer_alloc isn't accessed by most functions. Rather y_buffer,
u_buffer and v_buffer point to buffer_alloc and are used. Clear out
@@ -56,7 +51,7 @@
const uint64_t uvplane_size, const int aligned_width,
const int aligned_height, const int uv_width, const int uv_height,
const int uv_stride, const int uv_border_w, const int uv_border_h,
- int num_pyramid_levels, int alloc_y_plane_only) {
+ int alloc_y_buffer_8bit, int alloc_y_plane_only) {
if (ybf) {
const int aom_byte_align = (byte_alignment == 0) ? 1 : byte_alignment;
const uint64_t frame_size =
@@ -67,20 +62,14 @@
#if CONFIG_REALTIME_ONLY || !CONFIG_AV1_ENCODER
// We should only need an 8-bit version of the source frame if we are
// encoding in non-realtime mode
- (void)num_pyramid_levels;
- assert(num_pyramid_levels == 0);
+ assert(alloc_y_buffer_8bit == 0);
#endif // CONFIG_REALTIME_ONLY || !CONFIG_AV1_ENCODER
#if defined AOM_MAX_ALLOCABLE_MEMORY
// The size of ybf->buffer_alloc.
uint64_t alloc_size = frame_size;
-#if CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
- // The size of ybf->y_pyramid
- if (num_pyramid_levels > 0) {
- alloc_size += aom_get_pyramid_alloc_size(
- aligned_width, aligned_height, num_pyramid_levels, use_highbitdepth);
- }
-#endif // CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
+ // The size of ybf->y_buffer_8bit.
+ if (use_highbitdepth && alloc_y_buffer_8bit) alloc_size += yplane_size;
// The decoder may allocate REF_FRAMES frame buffers in the frame buffer
// pool. Bound the total amount of allocated memory as if these REF_FRAMES
// frame buffers were allocated in a single allocation.
@@ -176,16 +165,17 @@
ybf->use_external_reference_buffers = 0;
-#if CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
- if (ybf->y_pyramid) {
- aom_free_pyramid(ybf->y_pyramid);
- ybf->y_pyramid = NULL;
+ if (use_highbitdepth && alloc_y_buffer_8bit) {
+ if (ybf->y_buffer_8bit) aom_free(ybf->y_buffer_8bit);
+ ybf->y_buffer_8bit = (uint8_t *)aom_memalign(32, (size_t)yplane_size);
+ if (!ybf->y_buffer_8bit) return AOM_CODEC_MEM_ERROR;
+ } else {
+ if (ybf->y_buffer_8bit) {
+ aom_free(ybf->y_buffer_8bit);
+ ybf->y_buffer_8bit = NULL;
+ ybf->buf_8bit_valid = 0;
+ }
}
- if (num_pyramid_levels > 0) {
- ybf->y_pyramid = aom_alloc_pyramid(aligned_width, aligned_height,
- num_pyramid_levels, use_highbitdepth);
- }
-#endif // CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
ybf->corrupted = 0; /* assume not corrupted by errors */
return 0;
@@ -225,7 +215,7 @@
int border, int byte_alignment,
aom_codec_frame_buffer_t *fb,
aom_get_frame_buffer_cb_fn_t cb, void *cb_priv,
- int num_pyramid_levels, int alloc_y_plane_only) {
+ int alloc_y_buffer_8bit, int alloc_y_plane_only) {
#if CONFIG_SIZE_LIMIT
if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
return AOM_CODEC_MEM_ERROR;
@@ -252,7 +242,7 @@
ybf, width, height, ss_x, ss_y, use_highbitdepth, border,
byte_alignment, fb, cb, cb_priv, y_stride, yplane_size, uvplane_size,
aligned_width, aligned_height, uv_width, uv_height, uv_stride,
- uv_border_w, uv_border_h, num_pyramid_levels, alloc_y_plane_only);
+ uv_border_w, uv_border_h, alloc_y_buffer_8bit, alloc_y_plane_only);
}
return AOM_CODEC_MEM_ERROR;
}
diff --git a/aom_scale/yv12config.h b/aom_scale/yv12config.h
index 2d33bb4..581e923 100644
--- a/aom_scale/yv12config.h
+++ b/aom_scale/yv12config.h
@@ -32,10 +32,6 @@
#define AOM_ENC_ALLINTRA_BORDER 64
#define AOM_DEC_BORDER_IN_PIXELS 64
-#if CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
-struct image_pyramid;
-#endif // CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
-
/*!\endcond */
/*!
* \brief YV12 frame buffer data structure
@@ -94,11 +90,10 @@
// external reference frame is no longer used.
uint8_t *store_buf_adr[3];
- // Global motion search data
-#if CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
- // 8-bit downsampling pyramid for the Y plane
- struct image_pyramid *y_pyramid;
-#endif // CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
+ // If the frame is stored in a 16-bit buffer, this stores an 8-bit version
+ // for use in global motion detection. It is allocated on-demand.
+ uint8_t *y_buffer_8bit;
+ int buf_8bit_valid;
uint8_t *buffer_alloc;
size_t buffer_alloc_sz;
@@ -135,21 +130,14 @@
// NULL, then libaom is using the frame buffer callbacks to handle memory.
// If cb is not NULL, libaom will call cb with minimum size in bytes needed
// to decode the current frame. If cb is NULL, libaom will allocate memory
-// internally to decode the current frame.
-//
-// If num_pyramid_levels > 0, then an image pyramid will be allocated with
-// the specified number of levels. This should be done for source and ref
-// frame buffers in the encoder, so that the pyramid can be used for global
-// motion estimation. In other contexts, num_pyramid_levels should be 0
-// to avoid allocating memory which will not be used.
-//
-// Returns 0 on success. Returns < 0 on failure.
+// internally to decode the current frame. Returns 0 on success. Returns < 0
+// on failure.
int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
int ss_x, int ss_y, int use_highbitdepth,
int border, int byte_alignment,
aom_codec_frame_buffer_t *fb,
aom_get_frame_buffer_cb_fn_t cb, void *cb_priv,
- int num_pyramid_levels, int alloc_y_plane_only);
+ int alloc_y_buffer_8bit, int alloc_y_plane_only);
int aom_free_frame_buffer(YV12_BUFFER_CONFIG *ybf);
diff --git a/av1/av1_cx_iface.c b/av1/av1_cx_iface.c
index 4c39cf7..eb09f26 100644
--- a/av1/av1_cx_iface.c
+++ b/av1/av1_cx_iface.c
@@ -2947,7 +2947,7 @@
subsampling_x, subsampling_y, use_highbitdepth, lag_in_frames,
src_border_in_pixels, cpi->common.features.byte_alignment,
ctx->num_lap_buffers, (cpi->oxcf.kf_cfg.key_freq_max == 0),
- cpi->image_pyramid_levels);
+ cpi->oxcf.tool_cfg.enable_global_motion);
}
if (!ppi->lookahead)
aom_internal_error(&ppi->error, AOM_CODEC_MEM_ERROR,
diff --git a/av1/common/av1_common_int.h b/av1/common/av1_common_int.h
index d62f555..b4f5783 100644
--- a/av1/common/av1_common_int.h
+++ b/av1/common/av1_common_int.h
@@ -1132,9 +1132,7 @@
if (new_fb_idx == INVALID_IDX) return NULL;
cm->cur_frame = &cm->buffer_pool->frame_bufs[new_fb_idx];
-#if CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
- aom_invalidate_pyramid(cm->cur_frame->buf.y_pyramid);
-#endif // CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
+ cm->cur_frame->buf.buf_8bit_valid = 0;
av1_zero(cm->cur_frame->interp_filter_selected);
return cm->cur_frame;
}
diff --git a/av1/common/resize.c b/av1/common/resize.c
index de891e8..242930c 100644
--- a/av1/common/resize.c
+++ b/av1/common/resize.c
@@ -1369,7 +1369,7 @@
AV1_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled,
const InterpFilter filter, const int phase, const bool use_optimized_scaler,
const bool for_psnr, const int border_in_pixels,
- const int num_pyramid_levels) {
+ const bool alloc_y_buffer_8bit) {
// If scaling is performed for the sole purpose of calculating PSNR, then our
// target dimensions are superres upscaled width/height. Otherwise our target
// dimensions are coded width/height.
@@ -1389,7 +1389,7 @@
scaled, scaled_width, scaled_height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
border_in_pixels, cm->features.byte_alignment, NULL, NULL, NULL,
- num_pyramid_levels, 0))
+ alloc_y_buffer_8bit, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate scaled buffer");
@@ -1422,9 +1422,6 @@
#endif
return scaled;
} else {
-#if CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
- aom_invalidate_pyramid(unscaled->y_pyramid);
-#endif // CONFIG_AV1_ENCODER && !CONFIG_REALTIME_ONLY
return unscaled;
}
}
diff --git a/av1/common/resize.h b/av1/common/resize.h
index 9126777..4e8ee0f 100644
--- a/av1/common/resize.h
+++ b/av1/common/resize.h
@@ -75,7 +75,7 @@
AV1_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled,
const InterpFilter filter, const int phase, const bool use_optimized_scaler,
const bool for_psnr, const int border_in_pixels,
- const int num_pyramid_levels);
+ const bool alloc_y_buffer_8bit);
void av1_resize_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst, int bd,
diff --git a/av1/encoder/allintra_vis.c b/av1/encoder/allintra_vis.c
index 0d935a7..fca671c 100644
--- a/av1/encoder/allintra_vis.c
+++ b/av1/encoder/allintra_vis.c
@@ -447,7 +447,7 @@
&cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->image_pyramid_levels, 0))
+ NULL, cpi->oxcf.tool_cfg.enable_global_motion, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
cpi->norm_wiener_variance = 0;
diff --git a/av1/encoder/encode_strategy.c b/av1/encoder/encode_strategy.c
index a7f8d6a..afce0da 100644
--- a/av1/encoder/encode_strategy.c
+++ b/av1/encoder/encode_strategy.c
@@ -801,7 +801,7 @@
oxcf->frm_dim_cfg.height, cm->seq_params->subsampling_x,
cm->seq_params->subsampling_y, cm->seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->image_pyramid_levels, 0);
+ NULL, cpi->oxcf.tool_cfg.enable_global_motion, 0);
if (ret)
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate tf_buf_second_arf");
@@ -905,7 +905,8 @@
if (apply_filtering && is_psnr_calc_enabled(cpi)) {
cpi->source = av1_realloc_and_scale_if_required(
cm, source_buffer, &cpi->scaled_source, cm->features.interp_filter, 0,
- false, true, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ false, true, cpi->oxcf.border_in_pixels,
+ cpi->oxcf.tool_cfg.enable_global_motion);
cpi->unscaled_source = source_buffer;
}
#if CONFIG_COLLECT_COMPONENT_TIMING
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index 8aef613..e5491d1 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -642,8 +642,6 @@
init_buffer_indices(&cpi->force_intpel_info, cm->remapped_ref_idx);
av1_noise_estimate_init(&cpi->noise_estimate, cm->width, cm->height);
-
- cpi->image_pyramid_levels = oxcf->tool_cfg.enable_global_motion ? 1 : 0;
}
void av1_change_config_seq(struct AV1_PRIMARY *ppi,
@@ -2174,7 +2172,7 @@
&cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x,
seq_params->subsampling_y, seq_params->use_highbitdepth,
cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL,
- NULL, cpi->image_pyramid_levels, 0))
+ NULL, cpi->oxcf.tool_cfg.enable_global_motion, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
@@ -2518,7 +2516,8 @@
cpi->source = av1_realloc_and_scale_if_required(
cm, unscaled, &cpi->scaled_source, filter_scaler, phase_scaler, true,
- false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ false, cpi->oxcf.border_in_pixels,
+ cpi->oxcf.tool_cfg.enable_global_motion);
if (frame_is_intra_only(cm) || resize_pending != 0) {
memset(cpi->consec_zero_mv, 0,
((cm->mi_params.mi_rows * cm->mi_params.mi_cols) >> 2) *
@@ -2529,7 +2528,7 @@
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source, filter_scaler,
phase_scaler, true, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->oxcf.tool_cfg.enable_global_motion);
}
if (cpi->sf.rt_sf.use_temporal_noise_estimate) {
@@ -2714,9 +2713,7 @@
cpi->sf.interp_sf.adaptive_interp_filter_search)
cpi->interp_search_flags.interp_filter_search_mask =
av1_setup_interp_filter_search_mask(cpi);
-#if !CONFIG_REALTIME_ONLY
- aom_invalidate_pyramid(cpi->source->y_pyramid);
-#endif // !CONFIG_REALTIME_ONLY
+ cpi->source->buf_8bit_valid = 0;
av1_setup_frame_size(cpi);
@@ -2791,7 +2788,8 @@
}
cpi->source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_source, &cpi->scaled_source, EIGHTTAP_REGULAR, 0,
- false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ false, false, cpi->oxcf.border_in_pixels,
+ cpi->oxcf.tool_cfg.enable_global_motion);
#if CONFIG_TUNE_BUTTERAUGLI
if (oxcf->tune_cfg.tuning == AOM_TUNE_BUTTERAUGLI) {
@@ -2811,7 +2809,7 @@
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
EIGHTTAP_REGULAR, 0, false, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->oxcf.tool_cfg.enable_global_motion);
}
int scale_references = 0;
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
index 80bafc9..d13f08f 100644
--- a/av1/encoder/encoder.h
+++ b/av1/encoder/encoder.h
@@ -1950,9 +1950,9 @@
YV12_BUFFER_CONFIG *ref_buf[REF_FRAMES];
/*!
- * Pointer to the downsampling pyramid for the source frame.
+ * Pointer to the source frame buffer.
*/
- ImagePyramid *src_pyramid;
+ unsigned char *src_buffer;
/*!
* Holds the number of valid reference frames in past and future directions
@@ -3462,12 +3462,6 @@
* Block level thresholds to force zeromv-skip at partition level.
*/
unsigned int zeromv_skip_thresh_exit_part[BLOCK_SIZES_ALL];
-
- /*!
- * Number of downsampling pyramid levels to allocate for each frame
- * This is currently only used for global motion
- */
- int image_pyramid_levels;
} AV1_COMP;
/*!
diff --git a/av1/encoder/encoder_alloc.h b/av1/encoder/encoder_alloc.h
index 6b0f661..f4c345f 100644
--- a/av1/encoder/encoder_alloc.h
+++ b/av1/encoder/encoder_alloc.h
@@ -379,7 +379,7 @@
cm->seq_params->subsampling_x, cm->seq_params->subsampling_y,
cm->seq_params->use_highbitdepth, AOM_BORDER_IN_PIXELS,
cm->features.byte_alignment, NULL, NULL, NULL,
- cpi->image_pyramid_levels, 0))
+ cpi->oxcf.tool_cfg.enable_global_motion, 0))
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to reallocate scaled source buffer");
assert(cpi->scaled_source.y_crop_width == scaled_width);
diff --git a/av1/encoder/encoder_utils.c b/av1/encoder/encoder_utils.c
index 4f8fc0e..beb8f54 100644
--- a/av1/encoder/encoder_utils.c
+++ b/av1/encoder/encoder_utils.c
@@ -1029,12 +1029,13 @@
cpi->source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_source, &cpi->scaled_source, cm->features.interp_filter,
- 0, false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ 0, false, false, cpi->oxcf.border_in_pixels,
+ cpi->oxcf.tool_cfg.enable_global_motion);
if (cpi->unscaled_last_source != NULL) {
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
cm->features.interp_filter, 0, false, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->oxcf.tool_cfg.enable_global_motion);
}
av1_setup_frame(cpi);
diff --git a/av1/encoder/ethread.c b/av1/encoder/ethread.c
index f182a6b..518d77e 100644
--- a/av1/encoder/ethread.c
+++ b/av1/encoder/ethread.c
@@ -2278,7 +2278,7 @@
// Compute global motion for the given ref_buf_idx.
av1_compute_gm_for_valid_ref_frames(
cpi, gm_info->ref_buf, ref_buf_idx, gm_info->num_src_corners,
- gm_info->src_corners, gm_info->src_pyramid,
+ gm_info->src_corners, gm_info->src_buffer,
gm_thread_data->params_by_motion, gm_thread_data->segment_map,
gm_info->segment_map_w, gm_info->segment_map_h);
diff --git a/av1/encoder/global_motion_facade.c b/av1/encoder/global_motion_facade.c
index fa047c4..0df070a 100644
--- a/av1/encoder/global_motion_facade.c
+++ b/av1/encoder/global_motion_facade.c
@@ -13,12 +13,10 @@
#include "aom_dsp/flow_estimation/corner_detect.h"
#include "aom_dsp/flow_estimation/flow_estimation.h"
-#include "aom_dsp/pyramid.h"
#include "av1/common/warped_motion.h"
#include "av1/encoder/encoder.h"
#include "av1/encoder/ethread.h"
#include "av1/encoder/rdopt.h"
-#include "av1/encoder/global_motion_facade.h"
// Highest motion model to search.
#define GLOBAL_TRANS_TYPES_ENC 3
@@ -82,7 +80,7 @@
// different motion models and finds the best.
static AOM_INLINE void compute_global_motion_for_ref_frame(
AV1_COMP *cpi, YV12_BUFFER_CONFIG *ref_buf[REF_FRAMES], int frame,
- int num_src_corners, int *src_corners, const ImagePyramid *src_pyramid,
+ int num_src_corners, int *src_corners, unsigned char *src_buffer,
MotionModel *params_by_motion, uint8_t *segment_map,
const int segment_map_w, const int segment_map_h,
const WarpedMotionParams *ref_params) {
@@ -105,11 +103,6 @@
assert(ref_buf[frame] != NULL);
TransformationType model;
- int bit_depth = cpi->common.seq_params->bit_depth;
- YV12_BUFFER_CONFIG *ref = ref_buf[frame];
- ImagePyramid *ref_pyramid = ref->y_pyramid;
- aom_compute_pyramid(ref, bit_depth, ref_pyramid);
-
// TODO(sarahparker, debargha): Explore do_adaptive_gm_estimation = 1
const int do_adaptive_gm_estimation = 0;
@@ -130,10 +123,11 @@
params_by_motion[i].num_inliers = 0;
}
- aom_compute_global_motion(model, src_pyramid, src_corners, num_src_corners,
- ref_pyramid, gm_estimation_type,
- inliers_by_motion, params_by_motion,
- RANSAC_NUM_MOTIONS);
+ aom_compute_global_motion(model, src_buffer, src_width, src_height,
+ src_stride, src_corners, num_src_corners,
+ ref_buf[frame], cpi->common.seq_params->bit_depth,
+ gm_estimation_type, inliers_by_motion,
+ params_by_motion, RANSAC_NUM_MOTIONS);
int64_t ref_frame_error = 0;
for (i = 0; i < RANSAC_NUM_MOTIONS; ++i) {
if (inliers_by_motion[i] == 0) continue;
@@ -226,7 +220,7 @@
// Computes global motion for the given reference frame.
void av1_compute_gm_for_valid_ref_frames(
AV1_COMP *cpi, YV12_BUFFER_CONFIG *ref_buf[REF_FRAMES], int frame,
- int num_src_corners, int *src_corners, const ImagePyramid *src_pyramid,
+ int num_src_corners, int *src_corners, unsigned char *src_buffer,
MotionModel *params_by_motion, uint8_t *segment_map, int segment_map_w,
int segment_map_h) {
AV1_COMMON *const cm = &cpi->common;
@@ -235,7 +229,7 @@
: &default_warp_params;
compute_global_motion_for_ref_frame(
- cpi, ref_buf, frame, num_src_corners, src_corners, src_pyramid,
+ cpi, ref_buf, frame, num_src_corners, src_corners, src_buffer,
params_by_motion, segment_map, segment_map_w, segment_map_h, ref_params);
}
@@ -243,7 +237,7 @@
static AOM_INLINE void compute_global_motion_for_references(
AV1_COMP *cpi, YV12_BUFFER_CONFIG *ref_buf[REF_FRAMES],
FrameDistPair reference_frame[REF_FRAMES - 1], int num_ref_frames,
- int num_src_corners, int *src_corners, const ImagePyramid *src_pyramid,
+ int num_src_corners, int *src_corners, unsigned char *src_buffer,
MotionModel *params_by_motion, uint8_t *segment_map,
const int segment_map_w, const int segment_map_h) {
// Computation of frame corners for the source frame will be done already.
@@ -254,7 +248,7 @@
for (int frame = 0; frame < num_ref_frames; frame++) {
int ref_frame = reference_frame[frame].frame;
av1_compute_gm_for_valid_ref_frames(
- cpi, ref_buf, ref_frame, num_src_corners, src_corners, src_pyramid,
+ cpi, ref_buf, ref_frame, num_src_corners, src_corners, src_buffer,
params_by_motion, segment_map, segment_map_w, segment_map_h);
// If global motion w.r.t. current ref frame is
// INVALID/TRANSLATION/IDENTITY, skip the evaluation of global motion w.r.t
@@ -420,6 +414,14 @@
GlobalMotionInfo *const gm_info = &cpi->gm_info;
YV12_BUFFER_CONFIG *source = cpi->source;
+ gm_info->src_buffer = source->y_buffer;
+ if (source->flags & YV12_FLAG_HIGHBITDEPTH) {
+ // The source buffer is 16-bit, so we need to convert to 8 bits for the
+ // following code. We cache the result until the source frame is released.
+ gm_info->src_buffer =
+ av1_downconvert_frame(source, cpi->common.seq_params->bit_depth);
+ }
+
gm_info->segment_map_w =
(source->y_width + WARP_ERROR_BLOCK) >> WARP_ERROR_BLOCK_LOG;
gm_info->segment_map_h =
@@ -447,14 +449,9 @@
// If at least one valid reference frame exists in past/future directions,
// compute interest points of source frame using FAST features.
if (gm_info->num_ref_frames[0] > 0 || gm_info->num_ref_frames[1] > 0) {
- aom_compute_pyramid(source, cpi->common.seq_params->bit_depth,
- source->y_pyramid);
- gm_info->src_pyramid = source->y_pyramid;
-
- PyramidLayer *layer0 = &source->y_pyramid->layers[0];
gm_info->num_src_corners = av1_fast_corner_detect(
- layer0->buffer, layer0->width, layer0->height, layer0->stride,
- gm_info->src_corners, MAX_CORNERS);
+ gm_info->src_buffer, source->y_width, source->y_height,
+ source->y_stride, gm_info->src_corners, MAX_CORNERS);
}
}
@@ -474,7 +471,7 @@
compute_global_motion_for_references(
cpi, gm_info->ref_buf, gm_info->reference_frames[dir],
gm_info->num_ref_frames[dir], gm_info->num_src_corners,
- gm_info->src_corners, gm_info->src_pyramid, params_by_motion,
+ gm_info->src_corners, gm_info->src_buffer, params_by_motion,
segment_map, gm_info->segment_map_w, gm_info->segment_map_h);
}
diff --git a/av1/encoder/global_motion_facade.h b/av1/encoder/global_motion_facade.h
index 7707831..52df19d 100644
--- a/av1/encoder/global_motion_facade.h
+++ b/av1/encoder/global_motion_facade.h
@@ -20,7 +20,7 @@
void av1_compute_gm_for_valid_ref_frames(
struct AV1_COMP *cpi, YV12_BUFFER_CONFIG *ref_buf[REF_FRAMES], int frame,
- int num_src_corners, int *src_corners, const ImagePyramid *src_pyramid,
+ int num_src_corners, int *src_corners, unsigned char *src_buffer,
MotionModel *params_by_motion, uint8_t *segment_map, int segment_map_w,
int segment_map_h);
void av1_compute_global_motion_facade(struct AV1_COMP *cpi);
diff --git a/av1/encoder/lookahead.c b/av1/encoder/lookahead.c
index 337a92b..10fbb77 100644
--- a/av1/encoder/lookahead.c
+++ b/av1/encoder/lookahead.c
@@ -46,7 +46,7 @@
unsigned int width, unsigned int height, unsigned int subsampling_x,
unsigned int subsampling_y, int use_highbitdepth, unsigned int depth,
const int border_in_pixels, int byte_alignment, int num_lap_buffers,
- bool is_all_intra, int num_pyramid_levels) {
+ bool is_all_intra, int enable_global_motion) {
int lag_in_frames = AOMMAX(1, depth);
// For all-intra frame encoding, previous source frames are not required.
@@ -82,7 +82,7 @@
if (aom_realloc_frame_buffer(
&ctx->buf[i].img, width, height, subsampling_x, subsampling_y,
use_highbitdepth, border_in_pixels, byte_alignment, NULL, NULL,
- NULL, num_pyramid_levels, 0)) {
+ NULL, enable_global_motion, 0)) {
goto fail;
}
}
diff --git a/av1/encoder/lookahead.h b/av1/encoder/lookahead.h
index e86ca4f..bd7cae4 100644
--- a/av1/encoder/lookahead.h
+++ b/av1/encoder/lookahead.h
@@ -70,7 +70,7 @@
unsigned int width, unsigned int height, unsigned int subsampling_x,
unsigned int subsampling_y, int use_highbitdepth, unsigned int depth,
const int border_in_pixels, int byte_alignment, int num_lap_buffers,
- bool is_all_intra, int num_pyramid_levels);
+ bool is_all_intra, int enable_global_motion);
/**\brief Destroys the lookahead stage
*/
diff --git a/av1/encoder/temporal_filter.c b/av1/encoder/temporal_filter.c
index 7540bd8..db2f098 100644
--- a/av1/encoder/temporal_filter.c
+++ b/av1/encoder/temporal_filter.c
@@ -1289,7 +1289,7 @@
seq_params->subsampling_x, seq_params->subsampling_y,
seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels,
cm->features.byte_alignment, NULL, NULL, NULL,
- cpi->image_pyramid_levels, 0);
+ cpi->oxcf.tool_cfg.enable_global_motion, 0);
if (ret) {
aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate tf_info");
diff --git a/av1/encoder/tune_butteraugli.c b/av1/encoder/tune_butteraugli.c
index b7f0722..2f057e1 100644
--- a/av1/encoder/tune_butteraugli.c
+++ b/av1/encoder/tune_butteraugli.c
@@ -264,12 +264,13 @@
cpi->source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_source, &cpi->scaled_source, cm->features.interp_filter,
- 0, false, false, cpi->oxcf.border_in_pixels, cpi->image_pyramid_levels);
+ 0, false, false, cpi->oxcf.border_in_pixels,
+ cpi->oxcf.tool_cfg.enable_global_motion);
if (cpi->unscaled_last_source != NULL) {
cpi->last_source = av1_realloc_and_scale_if_required(
cm, cpi->unscaled_last_source, &cpi->scaled_last_source,
cm->features.interp_filter, 0, false, false, cpi->oxcf.border_in_pixels,
- cpi->image_pyramid_levels);
+ cpi->oxcf.tool_cfg.enable_global_motion);
}
av1_setup_butteraugli_source(cpi);
diff --git a/av1/qmode_rc/ducky_encode.cc b/av1/qmode_rc/ducky_encode.cc
index 96ec803..7e0d691 100644
--- a/av1/qmode_rc/ducky_encode.cc
+++ b/av1/qmode_rc/ducky_encode.cc
@@ -263,7 +263,8 @@
seq_params->subsampling_x, seq_params->subsampling_y,
seq_params->use_highbitdepth, lag_in_frames, cpi->oxcf.border_in_pixels,
cpi->common.features.byte_alignment,
- /*num_lap_buffers=*/0, /*is_all_intra=*/0, cpi->image_pyramid_levels);
+ /*num_lap_buffers=*/0, /*is_all_intra=*/0,
+ cpi->oxcf.tool_cfg.enable_global_motion);
av1_tf_info_alloc(&cpi->ppi->tf_info, cpi);
assert(ppi->lookahead != nullptr);
diff --git a/test/corner_match_test.cc b/test/corner_match_test.cc
index 93ca8ec..673205a 100644
--- a/test/corner_match_test.cc
+++ b/test/corner_match_test.cc
@@ -27,9 +27,9 @@
using libaom_test::ACMRandom;
-typedef double (*ComputeCrossCorrFunc)(const unsigned char *im1, int stride1,
- int x1, int y1, const unsigned char *im2,
- int stride2, int x2, int y2);
+typedef double (*ComputeCrossCorrFunc)(unsigned char *im1, int stride1, int x1,
+ int y1, unsigned char *im2, int stride2,
+ int x2, int y2);
using std::make_tuple;
using std::tuple;