Use g_bit_depth during input validation

The libaom encoder requires the input frames passed to it by the
application for encoding have the same bit-depth as codec bit-depth. If
the input bit-depth is less than codec bit-depth, then the application
must upshift the frame before passing it. The application may
communicate the actual input bit-depth via g_input_bit_depth so that
quality metrics are computed with reference to actual input.

As the input is expected to have same precision as codec bit-depth,
this commit modifies the input validator to use codec bit-depth.

Also updated API documentation to reflect these changes.

Bug: 503993976
Bug: 503987489

Change-Id: Id7d7aa60681462d58a905a63df9bc18c52afe49a
diff --git a/aom/aom_encoder.h b/aom/aom_encoder.h
index 9d412af..2e7f6bb 100644
--- a/aom/aom_encoder.h
+++ b/aom/aom_encoder.h
@@ -471,11 +471,15 @@
    */
   aom_bit_depth_t g_bit_depth;
 
-  /*!\brief Bit-depth of the input frames
+  /*!\brief Bit-depth of the input source
    *
-   * This value identifies the bit_depth of the input frames in bits.
-   * Note that the frames passed as input to the encoder must have
-   * this bit-depth.
+   * This value identifies the actual bit-depth of the input source in bits.
+   * Note that the frames passed as input to the encoder must match codec
+   * bit-depth. If there is a mismatch between source bit-depth and codec
+   * bit-depth, then the application is required to upshift the frame to the
+   * codec bit-depth before passing it for encoding. Additionally, this variable
+   * is used by the library to compute quality metrics at source bit-depth. So,
+   * source bit-depth must not exceed codec bit-depth.
    */
   unsigned int g_input_bit_depth;
 
diff --git a/av1/arg_defs.c b/av1/arg_defs.c
index b4025ee..2eb3b99 100644
--- a/av1/arg_defs.c
+++ b/av1/arg_defs.c
@@ -194,7 +194,8 @@
               "Display warnings, but do not prompt user to continue"),
   .bitdeptharg =
       ARG_DEF_ENUM("b", "bit-depth", 1, "Bit depth for codec", bitdepth_enum),
-  .inbitdeptharg = ARG_DEF(NULL, "input-bit-depth", 1, "Bit depth of input"),
+  .inbitdeptharg =
+      ARG_DEF(NULL, "input-bit-depth", 1, "Actual bit depth of input source"),
 
   .input_chroma_subsampling_x = ARG_DEF(NULL, "input-chroma-subsampling-x", 1,
                                         "Chroma subsampling x value"),
diff --git a/av1/av1_cx_iface.c b/av1/av1_cx_iface.c
index 99402f6..324cecb 100644
--- a/av1/av1_cx_iface.c
+++ b/av1/av1_cx_iface.c
@@ -802,6 +802,8 @@
   RANGE_CHECK_HI(extra_cfg, cq_level, 63);
   RANGE_CHECK(cfg, g_bit_depth, AOM_BITS_8, AOM_BITS_12);
   RANGE_CHECK(cfg, g_input_bit_depth, 8, 12);
+  if (cfg->g_input_bit_depth > cfg->g_bit_depth)
+    ERROR("Input bit-depth must not exceed codec bit-depth");
   RANGE_CHECK(extra_cfg, content, AOM_CONTENT_DEFAULT, AOM_CONTENT_INVALID - 1);
 
   if (cfg->g_pass >= AOM_RC_SECOND_PASS) {
@@ -842,10 +844,6 @@
       cfg->g_bit_depth > AOM_BITS_10) {
     ERROR("Codec bit-depth 12 not supported in profile < 2");
   }
-  if (cfg->g_profile <= (unsigned int)PROFILE_1 &&
-      cfg->g_input_bit_depth > 10) {
-    ERROR("Source bit-depth 12 not supported in profile < 2");
-  }
 
   if (cfg->rc_end_usage == AOM_Q) {
     RANGE_CHECK_HI(cfg, use_fixed_qp_offsets, 2);
@@ -1023,7 +1021,7 @@
 #if CONFIG_AV1_HIGHBITDEPTH
   if (ctx->extra_cfg.validate_hbd_input &&
       (img->fmt & AOM_IMG_FMT_HIGHBITDEPTH)) {
-    const unsigned int bit_depth = ctx->oxcf.input_cfg.input_bit_depth;
+    const unsigned int bit_depth = ctx->cfg.g_bit_depth;
     const int max_val = 1 << bit_depth;
     // Note there is no high bitdepth version of NV12 defined. If one is
     // added, `num_planes` should be 2 in that case.
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
index 0e38bb8..36510f0 100644
--- a/av1/encoder/encoder.h
+++ b/av1/encoder/encoder.h
@@ -768,7 +768,7 @@
 typedef struct {
   // Indicates the framerate of the input video.
   double init_framerate;
-  // Indicates the bit-depth of the input video.
+  // Indicates the actual bit-depth of the input video.
   unsigned int input_bit_depth;
   // Indicates the maximum number of frames to be encoded.
   unsigned int limit;
diff --git a/test/postproc_filters_test.cc b/test/postproc_filters_test.cc
index f907aef..4a61d2e 100644
--- a/test/postproc_filters_test.cc
+++ b/test/postproc_filters_test.cc
@@ -22,18 +22,14 @@
 
 namespace {
 
-class PostprocFiltersTest
-    : public ::libaom_test::CodecTestWith2Params<int, unsigned int>,
-      public ::libaom_test::EncoderTest {
+class PostprocFiltersTest : public ::libaom_test::CodecTestWithParam<int>,
+                            public ::libaom_test::EncoderTest {
  protected:
   PostprocFiltersTest()
       : EncoderTest(GET_PARAM(0)), set_skip_postproc_filtering_(false),
-        frame_number_(0), cpu_used_(GET_PARAM(1)), bd_(GET_PARAM(2)) {}
+        frame_number_(0), cpu_used_(GET_PARAM(1)) {}
 
-  void SetUp() override {
-    InitializeConfig(::libaom_test::kAllIntra);
-    cfg_.g_input_bit_depth = bd_;
-  }
+  void SetUp() override { InitializeConfig(::libaom_test::kAllIntra); }
 
   void PreEncodeFrameHook(::libaom_test::VideoSource *video,
                           ::libaom_test::Encoder *encoder) override {
@@ -121,7 +117,6 @@
   static constexpr int kFrames = 30;
   static constexpr unsigned int kCqLevel = 18;
   int cpu_used_;
-  unsigned int bd_;
 };
 
 class PostprocFiltersTestLarge : public PostprocFiltersTest {};
@@ -130,11 +125,9 @@
 
 TEST_P(PostprocFiltersTestLarge, MD5Match) { DoTest(); }
 
-AV1_INSTANTIATE_TEST_SUITE(PostprocFiltersTest, ::testing::Values(9),
-                           ::testing::Values(8, 10));
+AV1_INSTANTIATE_TEST_SUITE(PostprocFiltersTest, ::testing::Values(9));
 
 // Test cpu_used 3 and 6.
-AV1_INSTANTIATE_TEST_SUITE(PostprocFiltersTestLarge, ::testing::Values(3, 6),
-                           ::testing::Values(8, 10));
+AV1_INSTANTIATE_TEST_SUITE(PostprocFiltersTestLarge, ::testing::Values(3, 6));
 
 }  // namespace