[CFL] Load luma as prediction for chroma

Loads the stored reconstructed luma pixels for each trasnform block
inside a prediction block. Supports 4:4:4 and 4:2:0 chroma subsampling
modes.

The CFL_CTX struct is now in cfl.h with appropriate forward declarations

Change-Id: I44c117899414a10a8318d14ecaed402f803de97d
diff --git a/av1/common/blockd.h b/av1/common/blockd.h
index 8b80332..c53a3ff 100644
--- a/av1/common/blockd.h
+++ b/av1/common/blockd.h
@@ -31,7 +31,9 @@
 #include "av1/common/pvq_state.h"
 #include "av1/decoder/decint.h"
 #endif
-
+#if CONFIG_CFL
+#include "av1/common/cfl.h"
+#endif
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -552,19 +554,6 @@
 
 typedef int16_t EobThresholdMD[TX_SIZES_ALL][TX_TYPES];
 
-#if CONFIG_CFL
-typedef struct {
-  // Pixel buffer containing the luma pixels used as prediction for chroma
-  uint8_t y_pix[MAX_SB_SQUARE];
-
-  // Height and width of the luma prediction block currently in the pixel buffer
-  int y_height, y_width;
-
-  // CfL Performs its own block level DC_PRED for each chromatic plane
-  int dc_pred[CFL_PRED_PLANES];
-} CFL_CTX;
-#endif
-
 typedef struct macroblockd {
   struct macroblockd_plane plane[MAX_MB_PLANE];
   uint8_t bmode_blocks_wl;
diff --git a/av1/common/cfl.c b/av1/common/cfl.c
index 53e117e..b49890e 100644
--- a/av1/common/cfl.c
+++ b/av1/common/cfl.c
@@ -11,6 +11,21 @@
 
 #include "av1/common/cfl.h"
 #include "av1/common/common_data.h"
+#include "av1/common/onyxc_int.h"
+
+#include "aom/internal/aom_codec_internal.h"
+
+void cfl_init(CFL_CTX *cfl, AV1_COMMON *cm, int subsampling_x,
+              int subsampling_y) {
+  if (!((subsampling_x == 0 && subsampling_y == 0) ||
+        (subsampling_x == 1 && subsampling_y == 1))) {
+    aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
+                       "Only 4:4:4 and 4:2:0 are currently supported by CfL");
+  }
+  memset(&cfl->y_pix, 0, sizeof(uint8_t) * MAX_SB_SQUARE);
+  cfl->subsampling_x = subsampling_x;
+  cfl->subsampling_y = subsampling_y;
+}
 
 // CfL computes its own block-level DC_PRED. This is required to compute both
 // alpha_cb and alpha_cr before the prediction are computed.
@@ -72,14 +87,19 @@
 
 // Predict the current transform block using CfL.
 // it is assumed that dst points at the start of the transform block
-void cfl_predict_block(uint8_t *dst, int dst_stride, TX_SIZE tx_size,
-                       int dc_pred) {
+void cfl_predict_block(const CFL_CTX *cfl, uint8_t *dst, int dst_stride,
+                       int row, int col, TX_SIZE tx_size, int dc_pred) {
   const int tx_block_width = tx_size_wide[tx_size];
   const int tx_block_height = tx_size_high[tx_size];
 
+  // TODO(ltrudeau) implement alpha
+  // Place holder for alpha
+  const double alpha = 0;
+  const double y_avg = cfl_load(cfl, dst, dst_stride, row, col, tx_size);
+
   for (int j = 0; j < tx_block_height; j++) {
     for (int i = 0; i < tx_block_width; i++) {
-      dst[i] = dc_pred;
+      dst[i] = (uint8_t)(alpha * y_avg + dc_pred + 0.5);
     }
     dst += dst_stride;
   }
@@ -117,3 +137,102 @@
     cfl->y_height = OD_MAXI((row << tx_off_log2) + tx_height, cfl->y_height);
   }
 }
+
+// Load from the CfL pixel buffer into output
+double cfl_load(const CFL_CTX *cfl, uint8_t *output, int output_stride, int row,
+                int col, TX_SIZE tx_size) {
+  const int tx_width = tx_size_wide[tx_size];
+  const int tx_height = tx_size_high[tx_size];
+  const int sub_x = cfl->subsampling_x;
+  const int sub_y = cfl->subsampling_y;
+  const int tx_off_log2 = tx_size_wide_log2[0];
+
+  const uint8_t *y_pix;
+
+  int diff_width = 0;
+  int diff_height = 0;
+
+  int pred_row_offset = 0;
+  int output_row_offset = 0;
+  int top_left, bot_left;
+
+  // TODO(ltrudeau) add support for 4:2:2
+  if (sub_y == 0 && sub_x == 0) {
+    y_pix = &cfl->y_pix[(row * MAX_SB_SIZE + col) << tx_off_log2];
+    int uv_width = (col << tx_off_log2) + tx_width;
+    diff_width = uv_width - cfl->y_width;
+    int uv_height = (row << tx_off_log2) + tx_width;
+    diff_height = uv_height - cfl->y_height;
+    for (int j = 0; j < tx_height; j++) {
+      for (int i = 0; i < tx_width; i++) {
+        // In 4:4:4, pixels match 1 to 1
+        output[output_row_offset + i] = y_pix[pred_row_offset + i];
+      }
+      pred_row_offset += MAX_SB_SIZE;
+      output_row_offset += output_stride;
+    }
+  } else if (sub_y == 1 && sub_x == 1) {
+    y_pix = &cfl->y_pix[(row * MAX_SB_SIZE + col) << (tx_off_log2 + sub_y)];
+    int uv_width = ((col << tx_off_log2) + tx_width) << sub_x;
+    diff_width = (uv_width - cfl->y_width) >> sub_x;
+    int uv_height = ((row << tx_off_log2) + tx_width) << sub_y;
+    diff_height = (uv_height - cfl->y_height) >> sub_y;
+    for (int j = 0; j < tx_height; j++) {
+      for (int i = 0; i < tx_width; i++) {
+        top_left = (pred_row_offset + i) << sub_y;
+        bot_left = top_left + MAX_SB_SIZE;
+        // In 4:2:0, average pixels in 2x2 grid
+        output[output_row_offset + i] = OD_SHR_ROUND(
+            y_pix[top_left] + y_pix[top_left + 1]        // Top row
+                + y_pix[bot_left] + y_pix[bot_left + 1]  // Bottom row
+            ,
+            2);
+      }
+      pred_row_offset += MAX_SB_SIZE;
+      output_row_offset += output_stride;
+    }
+  } else {
+    assert(0);  // Unsupported chroma subsampling
+  }
+  // Due to frame boundary issues, it is possible that the total area of
+  // covered by Chroma exceeds that of Luma. When this happens, we write over
+  // the broken data by repeating the last columns and/or rows.
+  //
+  // Note that in order to manage the case where both rows and columns
+  // overrun,
+  // we apply rows first. This way, when the rows overrun the bottom of the
+  // frame, the columns will be copied over them.
+  if (diff_width > 0) {
+    int last_pixel;
+    output_row_offset = tx_width - diff_width;
+
+    for (int j = 0; j < tx_height; j++) {
+      last_pixel = output_row_offset - 1;
+      for (int i = 0; i < diff_width; i++) {
+        output[output_row_offset + i] = output[last_pixel];
+      }
+      output_row_offset += output_stride;
+    }
+  }
+
+  if (diff_height > 0) {
+    output_row_offset = diff_height * output_stride;
+    const int last_row_offset = output_row_offset - output_stride;
+    for (int j = 0; j < diff_height; j++) {
+      for (int i = 0; i < tx_width; i++) {
+        output[output_row_offset + i] = output[last_row_offset + i];
+      }
+      output_row_offset += output_stride;
+    }
+  }
+
+  int avg = 0;
+  output_row_offset = 0;
+  for (int j = 0; j < tx_height; j++) {
+    for (int i = 0; i < tx_width; i++) {
+      avg += output[output_row_offset + i];
+    }
+    output_row_offset += output_stride;
+  }
+  return avg / (double)(tx_width * tx_height);
+}
diff --git a/av1/common/cfl.h b/av1/common/cfl.h
index 3edcb0f..371df70 100644
--- a/av1/common/cfl.h
+++ b/av1/common/cfl.h
@@ -12,15 +12,41 @@
 #ifndef AV1_COMMON_CFL_H_
 #define AV1_COMMON_CFL_H_
 
-#include "av1/common/blockd.h"
 #include "av1/common/enums.h"
 
+// Forward declaration of AV1_COMMON, in order to avoid creating a cyclic
+// dependency by importing av1/common/onyxc_int.h
+typedef struct AV1Common AV1_COMMON;
+
+// Forward declaration of MACROBLOCK, in order to avoid creating a cyclic
+// dependency by importing av1/common/blockd.h
+typedef struct macroblockd MACROBLOCKD;
+
+typedef struct {
+  // Pixel buffer containing the luma pixels used as prediction for chroma
+  uint8_t y_pix[MAX_SB_SQUARE];
+
+  // Height and width of the luma prediction block currently in the pixel buffer
+  int y_height, y_width;
+
+  // Chroma subsampling
+  int subsampling_x, subsampling_y;
+
+  // CfL Performs its own block level DC_PRED for each chromatic plane
+  int dc_pred[CFL_PRED_PLANES];
+} CFL_CTX;
+
+void cfl_init(CFL_CTX *cfl, AV1_COMMON *cm, int subsampling_x,
+              int subsampling_y);
+
 void cfl_dc_pred(MACROBLOCKD *xd, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
 
-void cfl_predict_block(uint8_t *dst, int dst_stride, TX_SIZE tx_size,
-                       int dc_pred);
+void cfl_predict_block(const CFL_CTX *cfl, uint8_t *dst, int dst_stride,
+                       int row, int col, TX_SIZE tx_size, int dc_pred);
 
 void cfl_store(CFL_CTX *cfl, const uint8_t *input, int input_stride, int row,
                int col, TX_SIZE tx_size);
 
+double cfl_load(const CFL_CTX *cfl, uint8_t *output, int output_stride, int row,
+                int col, TX_SIZE tx_size);
 #endif  // AV1_COMMON_CFL_H_
diff --git a/av1/common/onyxc_int.h b/av1/common/onyxc_int.h
index de52359..53a3800 100644
--- a/av1/common/onyxc_int.h
+++ b/av1/common/onyxc_int.h
@@ -35,6 +35,9 @@
 #if CONFIG_PVQ
 #include "av1/common/pvq.h"
 #endif
+#if CONFIG_CFL
+#include "av1/common/cfl.h"
+#endif
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -535,7 +538,8 @@
 #endif
 #if CONFIG_CFL
     xd->cfl = cfl;
-    memset(&cfl->y_pix, 0, sizeof(uint8_t) * MAX_SB_SQUARE);
+    cfl_init(cfl, cm, xd->plane[AOM_PLANE_U].subsampling_x,
+             xd->plane[AOM_PLANE_U].subsampling_y);
 #endif
     xd->above_context[i] = cm->above_context[i];
     if (xd->plane[i].plane_type == PLANE_TYPE_Y) {
diff --git a/av1/common/reconintra.c b/av1/common/reconintra.c
index 5477d6d..3d98895 100644
--- a/av1/common/reconintra.c
+++ b/av1/common/reconintra.c
@@ -2308,7 +2308,7 @@
       // is signaled.
       cfl_dc_pred(xd, get_plane_block_size(block_idx, pd), tx_size);
     }
-    cfl_predict_block(dst, pd->dst.stride, tx_size,
+    cfl_predict_block(xd->cfl, dst, pd->dst.stride, blk_row, blk_col, tx_size,
                       xd->cfl->dc_pred[plane - 1]);
   }
 #endif
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index 35a9f57..0ddac92 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -5051,8 +5051,10 @@
 #endif  // #if CONFIG_EC_ADAPT
 
 #if CONFIG_CFL
-  td->mb.e_mbd.cfl = &this_tile->cfl;
-  memset(&this_tile->cfl.y_pix, 0, sizeof(uint8_t) * MAX_SB_SQUARE);
+  MACROBLOCKD *const xd = &td->mb.e_mbd;
+  xd->cfl = &this_tile->cfl;
+  cfl_init(xd->cfl, cm, xd->plane[AOM_PLANE_U].subsampling_x,
+           xd->plane[AOM_PLANE_U].subsampling_y);
 #endif
 
 #if CONFIG_PVQ