initial implementation of HLS based on R18

Change-Id: I78bb38e3d61d74442278bd5ed1c4f695b7c8e24a
diff --git a/av1/av1_dx_iface.c b/av1/av1_dx_iface.c
index a63be5f..799803e 100644
--- a/av1/av1_dx_iface.c
+++ b/av1/av1_dx_iface.c
@@ -153,6 +153,7 @@
   return AOM_CODEC_OK;
 }
 
+#if !CONFIG_OBU
 static int parse_bitdepth_colorspace_sampling(BITSTREAM_PROFILE profile,
                                               struct aom_read_bit_buffer *rb) {
   aom_color_space_t color_space;
@@ -200,6 +201,7 @@
   }
   return 1;
 }
+#endif
 
 static aom_codec_err_t decoder_peek_si_internal(
     const uint8_t *data, unsigned int data_sz, aom_codec_stream_info_t *si,
@@ -229,9 +231,19 @@
 
     data += index_size;
     data_sz -= index_size;
+#if CONFIG_OBU
+    if (data + data_sz <= data) return AOM_CODEC_INVALID_PARAM;
+#endif
   }
 
   {
+#if CONFIG_OBU
+    // Proper fix needed
+    si->is_kf = 1;
+    intra_only_flag = 1;
+    si->h = 1;
+
+#else
     int show_frame;
     int error_resilient;
     struct aom_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL };
@@ -298,6 +310,7 @@
         av1_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
       }
     }
+#endif
   }
   if (is_intra_only != NULL) *is_intra_only = intra_only_flag;
   return AOM_CODEC_OK;
diff --git a/av1/common/blockd.h b/av1/common/blockd.h
index bc3bec3..5262d24 100644
--- a/av1/common/blockd.h
+++ b/av1/common/blockd.h
@@ -70,6 +70,10 @@
 typedef enum {
   KEY_FRAME = 0,
   INTER_FRAME = 1,
+#if CONFIG_OBU
+  INTRA_ONLY_FRAME = 2,  // replaces intra-only
+  S_FRAME = 3,
+#endif
   FRAME_TYPES,
 } FRAME_TYPE;
 
diff --git a/av1/common/enums.h b/av1/common/enums.h
index 843e290..4465735 100644
--- a/av1/common/enums.h
+++ b/av1/common/enums.h
@@ -712,6 +712,18 @@
 } FILTER_DEGREE;
 #endif  // CONFIG_LPF_DIRECT
 
+#if CONFIG_OBU
+// R19
+typedef enum {
+  OBU_SEQUENCE_HEADER = 1,
+  OBU_TD = 2,
+  OBU_FRAME_HEADER = 3,
+  OBU_TILE_GROUP = 4,
+  OBU_METADATA = 5,
+  OBU_PADDING = 15,
+} OBU_TYPE;
+#endif
+
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/av1/decoder/decodeframe.c b/av1/decoder/decodeframe.c
index 8492ad3..0447d52 100644
--- a/av1/decoder/decodeframe.c
+++ b/av1/decoder/decodeframe.c
@@ -3312,9 +3312,12 @@
   }
 #endif  // CONFIG_EXT_TILE
 
+// each tile group header is in its own tile group OBU
+#if !CONFIG_OBU
   // Store an index to the location of the tile group information
   pbi->tg_size_bit_offset = rb->bit_offset;
   read_tile_group_range(pbi, rb);
+#endif
 }
 
 static int mem_get_varsize(const uint8_t *src, int sz) {
@@ -3511,9 +3514,10 @@
   *data += size;
 }
 
-static void get_tile_buffers(
-    AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
-    TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
+static void get_tile_buffers(AV1Decoder *pbi, const uint8_t *data,
+                             const uint8_t *data_end,
+                             TileBufferDec (*const tile_buffers)[MAX_TILE_COLS],
+                             int startTile, int endTile) {
   AV1_COMMON *const cm = &pbi->common;
   int r, c;
   const int tile_cols = cm->tile_cols;
@@ -3522,8 +3526,13 @@
   int first_tile_in_tg = 0;
   struct aom_read_bit_buffer rb_tg_hdr;
   uint8_t clear_data[MAX_AV1_HEADER_SIZE];
+#if !CONFIG_OBU
   const size_t hdr_size = pbi->uncomp_hdr_size + pbi->first_partition_size;
   const int tg_size_bit_offset = pbi->tg_size_bit_offset;
+#else
+  const int tg_size_bit_offset = 0;
+#endif
+
 #if CONFIG_DEPENDENT_HORZTILES
   int tile_group_start_col = 0;
   int tile_group_start_row = 0;
@@ -3532,8 +3541,15 @@
   for (r = 0; r < tile_rows; ++r) {
     for (c = 0; c < tile_cols; ++c, ++tc) {
       TileBufferDec *const buf = &tile_buffers[r][c];
+#if CONFIG_OBU
+      const int is_last = (tc == endTile);
+      const size_t hdr_offset = 0;
+#else
       const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1);
       const size_t hdr_offset = (tc && tc == first_tile_in_tg) ? hdr_size : 0;
+#endif
+
+      if (tc < startTile || tc > endTile) continue;
 
       if (data + hdr_offset >= data_end)
         aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
@@ -3621,7 +3637,8 @@
 #endif  // CONFIG_LOOPFILTERING_ACROSS_TILES
 
 static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
-                                   const uint8_t *data_end) {
+                                   const uint8_t *data_end, int startTile,
+                                   int endTile) {
   AV1_COMMON *const cm = &pbi->common;
   const AVxWorkerInterface *const winterface = aom_get_worker_interface();
   const int tile_cols = cm->tile_cols;
@@ -3689,7 +3706,7 @@
     get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
   else
 #endif  // CONFIG_EXT_TILE
-    get_tile_buffers(pbi, data, data_end, tile_buffers);
+    get_tile_buffers(pbi, data, data_end, tile_buffers, startTile, endTile);
 
   if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
     aom_free(pbi->tile_data);
@@ -3708,6 +3725,10 @@
       const TileBufferDec *const buf = &tile_buffers[tile_row][tile_col];
       TileData *const td = pbi->tile_data + tile_cols * tile_row + tile_col;
 
+      if (tile_row * cm->tile_cols + tile_col < startTile ||
+          tile_row * cm->tile_cols + tile_col > endTile)
+        continue;
+
       td->cm = cm;
       td->xd = pbi->mb;
       td->xd.corrupted = 0;
@@ -3769,6 +3790,11 @@
     for (tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) {
       const int col = inv_col_order ? tile_cols - 1 - tile_col : tile_col;
       TileData *const td = pbi->tile_data + tile_cols * row + col;
+
+      if (tile_row * cm->tile_cols + tile_col < startTile ||
+          tile_row * cm->tile_cols + tile_col > endTile)
+        continue;
+
 #if CONFIG_ACCOUNTING
       if (pbi->acct_enabled) {
         td->bit_reader.accounting->last_tell_frac =
@@ -3831,7 +3857,9 @@
       }
     }
 
+#if !CONFIG_OBU
     assert(mi_row > 0);
+#endif
 
 // when Parallel deblocking is enabled, deblocking should not
 // be interleaved with decoding. Instead, deblocking should be done
@@ -3881,8 +3909,11 @@
                           cm->lf.filter_level_v, cm->lf.filter_level_v, 2, 0);
   }
 #else
-  av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
-                        cm->lf.filter_level, 0, 0);
+#if CONFIG_OBU
+  if (endTile == cm->tile_rows * cm->tile_cols - 1)
+#endif
+    av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
+                          cm->lf.filter_level, 0, 0);
 #endif  // CONFIG_LOOPFILTER_LEVEL
 #else
 #if CONFIG_PARALLEL_DEBLOCKING
@@ -3926,11 +3957,16 @@
 #if CONFIG_ANS
     return data_end;
 #else
+#if !CONFIG_OBU
   {
     // Get last tile data.
     TileData *const td = pbi->tile_data + tile_cols * tile_rows - 1;
     return aom_reader_find_end(&td->bit_reader);
   }
+#else
+  TileData *const td = pbi->tile_data + endTile;
+  return aom_reader_find_end(&td->bit_reader);
+#endif
 #endif  // CONFIG_ANS
 #if CONFIG_EXT_TILE
   }
@@ -4081,7 +4117,8 @@
     get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
   else
 #endif  // CONFIG_EXT_TILE
-    get_tile_buffers(pbi, data, data_end, tile_buffers);
+    get_tile_buffers(pbi, data, data_end, tile_buffers, 0,
+                     cm->tile_rows * cm->tile_cols - 1);
 
   for (tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) {
     // Sort the buffers in this tile row based on size in descending order.
@@ -4481,10 +4518,12 @@
   int i, mask, ref_index = 0;
   size_t sz;
 
+#if !CONFIG_OBU
 #if CONFIG_REFERENCE_BUFFER
   /* TODO: Move outside frame loop or inside key-frame branch */
   read_sequence_header(&pbi->seq_params);
 #endif
+#endif
 
   cm->last_frame_type = cm->frame_type;
   cm->last_intra_only = cm->intra_only;
@@ -4494,6 +4533,7 @@
   cm->is_reference_frame = 1;
 #endif  // CONFIG_EXT_REFS
 
+#if !CONFIG_OBU
   if (aom_rb_read_literal(rb, 2) != AOM_FRAME_MARKER)
     aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                        "Invalid frame marker");
@@ -4506,6 +4546,7 @@
   if (cm->profile >= MAX_SUPPORTED_PROFILE)
     aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                        "Unsupported bitstream profile");
+#endif
 
 #if CONFIG_EXT_TILE
   cm->large_scale_tile = aom_rb_read_literal(rb, 1);
@@ -4559,7 +4600,12 @@
     return 0;
   }
 
+#if !CONFIG_OBU
   cm->frame_type = (FRAME_TYPE)aom_rb_read_bit(rb);
+#else
+  cm->frame_type = (FRAME_TYPE)aom_rb_read_literal(rb, 2);  // 2 bits
+  cm->intra_only = cm->frame_type == INTRA_ONLY_FRAME;
+#endif
   cm->show_frame = aom_rb_read_bit(rb);
   cm->error_resilient_mode = aom_rb_read_bit(rb);
 #if CONFIG_REFERENCE_BUFFER
@@ -4608,8 +4654,9 @@
     if (!av1_read_sync_code(rb))
       aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                          "Invalid frame sync code");
-
+#if !CONFIG_OBU
     read_bitdepth_colorspace_sampling(cm, rb, pbi->allow_lowbitdepth);
+#endif
     pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1;
 
     for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
@@ -4644,7 +4691,9 @@
     cm->use_prev_frame_mvs = 0;
 #endif
   } else {
+#if !CONFIG_OBU
     cm->intra_only = cm->show_frame ? 0 : aom_rb_read_bit(rb);
+#endif
     if (cm->intra_only) cm->allow_screen_content_tools = aom_rb_read_bit(rb);
 #if CONFIG_TEMPMV_SIGNALING
     if (cm->intra_only || cm->error_resilient_mode) cm->use_prev_frame_mvs = 0;
@@ -4676,8 +4725,9 @@
       if (!av1_read_sync_code(rb))
         aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
                            "Invalid frame sync code");
-
+#if !CONFIG_OBU
       read_bitdepth_colorspace_sampling(cm, rb, pbi->allow_lowbitdepth);
+#endif
 
       pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
       setup_frame_size(cm, rb);
@@ -4689,7 +4739,13 @@
       cm->ans_window_size_log2 = aom_rb_read_literal(rb, 4) + 8;
 #endif
     } else if (pbi->need_resync != 1) { /* Skip if need resync */
+#if CONFIG_OBU
+      pbi->refresh_frame_flags = (cm->frame_type == S_FRAME)
+                                     ? ~(1 << REF_FRAMES)
+                                     : aom_rb_read_literal(rb, REF_FRAMES);
+#else
       pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
+#endif
 
 #if CONFIG_EXT_REFS
       if (!pbi->refresh_frame_flags) {
@@ -4705,7 +4761,12 @@
         RefBuffer *const ref_frame = &cm->frame_refs[i];
         ref_frame->idx = idx;
         ref_frame->buf = &frame_bufs[idx].buf;
+#if CONFIG_OBU
+        cm->ref_frame_sign_bias[LAST_FRAME + i] =
+            (cm->frame_type == S_FRAME) ? 0 : aom_rb_read_bit(rb);
+#else
         cm->ref_frame_sign_bias[LAST_FRAME + i] = aom_rb_read_bit(rb);
+#endif
 #if CONFIG_REFERENCE_BUFFER
         if (pbi->seq_params.frame_id_numbers_present_flag) {
           int frame_id_length = pbi->seq_params.frame_id_length_minus7 + 7;
@@ -5242,6 +5303,7 @@
   }
 }
 
+// This function is now obsolete
 void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
                       const uint8_t *data_end, const uint8_t **p_data_end) {
   AV1_COMMON *const cm = &pbi->common;
@@ -5454,7 +5516,8 @@
                          "Decode failed. Frame data is corrupted.");
     }
   } else {
-    *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
+    *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end, 0,
+                               cm->tile_rows * cm->tile_cols - 1);
   }
 
 #if CONFIG_CDEF
@@ -5532,3 +5595,460 @@
     cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
 #endif
 }
+
+size_t av1_decode_frame_headers_and_setup(AV1Decoder *pbi, const uint8_t *data,
+                                          const uint8_t *data_end,
+                                          const uint8_t **p_data_end) {
+  AV1_COMMON *const cm = &pbi->common;
+  MACROBLOCKD *const xd = &pbi->mb;
+  struct aom_read_bit_buffer rb;
+  uint8_t clear_data[MAX_AV1_HEADER_SIZE];
+  size_t first_partition_size;
+  YV12_BUFFER_CONFIG *new_fb;
+#if CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
+  RefBuffer *last_fb_ref_buf = &cm->frame_refs[LAST_FRAME - LAST_FRAME];
+#endif  // CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
+
+#if CONFIG_ADAPT_SCAN
+  av1_deliver_eob_threshold(cm, xd);
+#endif
+#if CONFIG_BITSTREAM_DEBUG
+  bitstream_queue_set_frame_read(cm->current_video_frame * 2 + cm->show_frame);
+#endif
+
+#if CONFIG_GLOBAL_MOTION
+  int i;
+  for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
+    set_default_warp_params(&cm->global_motion[i]);
+    set_default_warp_params(&cm->cur_frame->global_motion[i]);
+  }
+  xd->global_motion = cm->global_motion;
+#endif  // CONFIG_GLOBAL_MOTION
+
+  first_partition_size = read_uncompressed_header(
+      pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
+
+#if CONFIG_EXT_TILE
+  // If cm->single_tile_decoding = 0, the independent decoding of a single tile
+  // or a section of a frame is not allowed.
+  if (!cm->single_tile_decoding &&
+      (pbi->dec_tile_row >= 0 || pbi->dec_tile_col >= 0)) {
+    pbi->dec_tile_row = -1;
+    pbi->dec_tile_col = -1;
+  }
+#endif  // CONFIG_EXT_TILE
+
+  pbi->first_partition_size = first_partition_size;
+  pbi->uncomp_hdr_size = aom_rb_bytes_read(&rb);
+  new_fb = get_frame_new_buffer(cm);
+  xd->cur_buf = new_fb;
+#if CONFIG_INTRABC
+#if CONFIG_HIGHBITDEPTH
+  av1_setup_scale_factors_for_frame(
+      &xd->sf_identity, xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height,
+      xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height,
+      cm->use_highbitdepth);
+#else
+  av1_setup_scale_factors_for_frame(
+      &xd->sf_identity, xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height,
+      xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height);
+#endif  // CONFIG_HIGHBITDEPTH
+#endif  // CONFIG_INTRABC
+
+  if (!first_partition_size) {
+    // showing a frame directly
+    *p_data_end = data + aom_rb_bytes_read(&rb);
+    return 0;
+  }
+
+  data += aom_rb_bytes_read(&rb);
+  if (!read_is_valid(data, first_partition_size, data_end))
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+                       "Truncated packet or corrupt header length");
+
+  cm->setup_mi(cm);
+
+#if CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
+  // NOTE(zoeliu): As cm->prev_frame can take neither a frame of
+  //               show_exisiting_frame=1, nor can it take a frame not used as
+  //               a reference, it is probable that by the time it is being
+  //               referred to, the frame buffer it originally points to may
+  //               already get expired and have been reassigned to the current
+  //               newly coded frame. Hence, we need to check whether this is
+  //               the case, and if yes, we have 2 choices:
+  //               (1) Simply disable the use of previous frame mvs; or
+  //               (2) Have cm->prev_frame point to one reference frame buffer,
+  //                   e.g. LAST_FRAME.
+  if (!dec_is_ref_frame_buf(pbi, cm->prev_frame)) {
+    // Reassign the LAST_FRAME buffer to cm->prev_frame.
+    cm->prev_frame = last_fb_ref_buf->idx != INVALID_IDX
+                         ? &cm->buffer_pool->frame_bufs[last_fb_ref_buf->idx]
+                         : NULL;
+  }
+#endif  // CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
+
+#if CONFIG_TEMPMV_SIGNALING
+  if (cm->use_prev_frame_mvs && !frame_can_use_prev_frame_mvs(cm)) {
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+                       "Frame wrongly requests previous frame MVs");
+  }
+#else
+  cm->use_prev_frame_mvs = !cm->error_resilient_mode && cm->prev_frame &&
+#if CONFIG_FRAME_SUPERRES
+                           cm->width == cm->last_width &&
+                           cm->height == cm->last_height &&
+#else
+                           cm->width == cm->prev_frame->buf.y_crop_width &&
+                           cm->height == cm->prev_frame->buf.y_crop_height &&
+#endif  // CONFIG_FRAME_SUPERRES
+                           !cm->last_intra_only && cm->last_show_frame &&
+                           (cm->last_frame_type != KEY_FRAME);
+#endif  // CONFIG_TEMPMV_SIGNALING
+
+#if CONFIG_MFMV
+  av1_setup_frame_buf_refs(cm);
+  av1_setup_motion_field(cm);
+#endif
+
+  av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+#if CONFIG_NO_FRAME_CONTEXT_SIGNALING
+  if (cm->error_resilient_mode || frame_is_intra_only(cm)) {
+    // use the default frame context values
+    *cm->fc = cm->frame_contexts[FRAME_CONTEXT_DEFAULTS];
+    cm->pre_fc = &cm->frame_contexts[FRAME_CONTEXT_DEFAULTS];
+  } else {
+    *cm->fc = cm->frame_contexts[cm->frame_refs[0].idx];
+    cm->pre_fc = &cm->frame_contexts[cm->frame_refs[0].idx];
+  }
+#else
+  *cm->fc = cm->frame_contexts[cm->frame_context_idx];
+  cm->pre_fc = &cm->frame_contexts[cm->frame_context_idx];
+#endif  // CONFIG_NO_FRAME_CONTEXT_SIGNALING
+  if (!cm->fc->initialized)
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+                       "Uninitialized entropy context.");
+
+  av1_zero(cm->counts);
+
+  xd->corrupted = 0;
+  new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
+  if (new_fb->corrupted)
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+                       "Decode failed. Frame data header is corrupted.");
+
+  return first_partition_size;
+}
+
+void av1_decode_tg_tiles_and_wrapup(AV1Decoder *pbi, const uint8_t *data,
+                                    const uint8_t *data_end,
+                                    const uint8_t **p_data_end, int startTile,
+                                    int endTile, int initialize_flag) {
+  AV1_COMMON *const cm = &pbi->common;
+  MACROBLOCKD *const xd = &pbi->mb;
+  int context_updated = 0;
+
+#if CONFIG_LOOP_RESTORATION
+  if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
+      cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
+      cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
+    av1_alloc_restoration_buffers(cm);
+  }
+#endif
+
+#if CONFIG_LOOPFILTER_LEVEL
+  if ((cm->lf.filter_level[0] || cm->lf.filter_level[1]) &&
+      !cm->skip_loop_filter) {
+    av1_loop_filter_frame_init(cm, cm->lf.filter_level[0],
+                               cm->lf.filter_level[1]);
+  }
+#else
+  if (cm->lf.filter_level && !cm->skip_loop_filter) {
+    av1_loop_filter_frame_init(cm, cm->lf.filter_level, cm->lf.filter_level);
+  }
+#endif
+
+  // If encoded in frame parallel mode, frame context is ready after decoding
+  // the frame header.
+  if (cm->frame_parallel_decode && initialize_flag &&
+      cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD) {
+    AVxWorker *const worker = pbi->frame_worker_owner;
+    FrameWorkerData *const frame_worker_data = worker->data1;
+    if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) {
+      context_updated = 1;
+#if CONFIG_NO_FRAME_CONTEXT_SIGNALING
+      cm->frame_contexts[cm->new_fb_idx] = *cm->fc;
+#else
+      cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
+#endif  // CONFIG_NO_FRAME_CONTEXT_SIGNALING
+    }
+    av1_frameworker_lock_stats(worker);
+    pbi->cur_buf->row = -1;
+    pbi->cur_buf->col = -1;
+    frame_worker_data->frame_context_ready = 1;
+    // Signal the main thread that context is ready.
+    av1_frameworker_signal_stats(worker);
+    av1_frameworker_unlock_stats(worker);
+  }
+
+  dec_setup_frame_boundary_info(cm);
+
+  if (pbi->max_threads > 1 && !CONFIG_CB4X4 &&
+#if CONFIG_EXT_TILE
+      pbi->dec_tile_col < 0 &&  // Decoding all columns
+#endif                          // CONFIG_EXT_TILE
+      cm->tile_cols > 1) {
+    // Multi-threaded tile decoder
+    *p_data_end =
+        decode_tiles_mt(pbi, data + pbi->first_partition_size, data_end);
+    if (!xd->corrupted) {
+      if (!cm->skip_loop_filter) {
+// If multiple threads are used to decode tiles, then we use those
+// threads to do parallel loopfiltering.
+#if CONFIG_LOOPFILTER_LEVEL
+        av1_loop_filter_frame_mt(
+            (YV12_BUFFER_CONFIG *)xd->cur_buf, cm, pbi->mb.plane,
+            cm->lf.filter_level[0], cm->lf.filter_level[1], 0, 0,
+            pbi->tile_workers, pbi->num_tile_workers, &pbi->lf_row_sync);
+#else
+        av1_loop_filter_frame_mt((YV12_BUFFER_CONFIG *)xd->cur_buf, cm,
+                                 pbi->mb.plane, cm->lf.filter_level, 0, 0,
+                                 pbi->tile_workers, pbi->num_tile_workers,
+                                 &pbi->lf_row_sync);
+#endif  // CONFIG_LOOPFILTER_LEVEL
+      }
+    } else {
+      aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+                         "Decode failed. Frame data is corrupted.");
+    }
+  } else {
+#if CONFIG_OBU
+    *p_data_end = decode_tiles(pbi, data, data_end, startTile, endTile);
+#else
+    *p_data_end = decode_tiles(
+        pbi, data + pbi->uncomp_hdr_size + pbi->first_partition_size, data_end,
+        startTile, endTile);
+#endif
+  }
+
+  if (endTile != cm->tile_rows * cm->tile_cols - 1) {
+    return;
+  }
+
+#if CONFIG_CDEF
+  if (!cm->skip_loop_filter && !cm->all_lossless) {
+    av1_cdef_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
+  }
+#endif  // CONFIG_CDEF
+
+#if CONFIG_FRAME_SUPERRES
+  superres_post_decode(pbi);
+#endif  // CONFIG_FRAME_SUPERRES
+
+#if CONFIG_LOOP_RESTORATION
+  if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
+      cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
+      cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
+    aom_extend_frame_borders((YV12_BUFFER_CONFIG *)xd->cur_buf);
+    av1_loop_restoration_frame((YV12_BUFFER_CONFIG *)xd->cur_buf, cm,
+                               cm->rst_info, 7, 0, NULL);
+  }
+#endif  // CONFIG_LOOP_RESTORATION
+
+  if (!xd->corrupted) {
+    if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
+      FRAME_CONTEXT **tile_ctxs = aom_malloc(cm->tile_rows * cm->tile_cols *
+                                             sizeof(&pbi->tile_data[0].tctx));
+      aom_cdf_prob **cdf_ptrs =
+          aom_malloc(cm->tile_rows * cm->tile_cols *
+                     sizeof(&pbi->tile_data[0].tctx.partition_cdf[0][0]));
+      make_update_tile_list_dec(pbi, cm->tile_rows, cm->tile_cols, tile_ctxs);
+#if CONFIG_LV_MAP
+      av1_adapt_coef_probs(cm);
+#endif  // CONFIG_LV_MAP
+      av1_adapt_intra_frame_probs(cm);
+      av1_average_tile_coef_cdfs(pbi->common.fc, tile_ctxs, cdf_ptrs,
+                                 cm->tile_rows * cm->tile_cols);
+      av1_average_tile_intra_cdfs(pbi->common.fc, tile_ctxs, cdf_ptrs,
+                                  cm->tile_rows * cm->tile_cols);
+#if CONFIG_PVQ
+      av1_average_tile_pvq_cdfs(pbi->common.fc, tile_ctxs,
+                                cm->tile_rows * cm->tile_cols);
+#endif  // CONFIG_PVQ
+#if CONFIG_ADAPT_SCAN
+      av1_adapt_scan_order(cm);
+#endif  // CONFIG_ADAPT_SCAN
+
+      if (!frame_is_intra_only(cm)) {
+        av1_adapt_inter_frame_probs(cm);
+        av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+        av1_average_tile_inter_cdfs(&pbi->common, pbi->common.fc, tile_ctxs,
+                                    cdf_ptrs, cm->tile_rows * cm->tile_cols);
+        av1_average_tile_mv_cdfs(pbi->common.fc, tile_ctxs, cdf_ptrs,
+                                 cm->tile_rows * cm->tile_cols);
+      }
+      aom_free(tile_ctxs);
+      aom_free(cdf_ptrs);
+    } else {
+      debug_check_frame_counts(cm);
+    }
+  } else {
+    aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+                       "Decode failed. Frame data is corrupted.");
+  }
+
+#if CONFIG_INSPECTION
+  if (pbi->inspect_cb != NULL) {
+    (*pbi->inspect_cb)(pbi, pbi->inspect_ctx);
+  }
+#endif
+
+// Non frame parallel update frame context here.
+#if CONFIG_NO_FRAME_CONTEXT_SIGNALING
+  if (!context_updated) cm->frame_contexts[cm->new_fb_idx] = *cm->fc;
+#else
+  if (!cm->error_resilient_mode && !context_updated)
+    cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
+#endif
+}
+
+#if CONFIG_OBU
+
+static OBU_TYPE read_obu_header(struct aom_read_bit_buffer *rb,
+                                uint32_t *header_size) {
+  OBU_TYPE obu_type;
+  int obu_extension_flag;
+
+  *header_size = 1;
+
+  obu_type = (OBU_TYPE)aom_rb_read_literal(rb, 5);
+  aom_rb_read_literal(rb, 2);  // reserved
+  obu_extension_flag = aom_rb_read_bit(rb);
+  if (obu_extension_flag) {
+    *header_size += 1;
+    aom_rb_read_literal(rb, 3);  // temporal_id
+    aom_rb_read_literal(rb, 2);
+    aom_rb_read_literal(rb, 2);
+    aom_rb_read_literal(rb, 1);  // reserved
+  }
+
+  return obu_type;
+}
+
+static uint32_t read_temporal_delimiter_obu() { return 0; }
+
+static uint32_t read_sequence_header_obu(AV1Decoder *pbi,
+                                         struct aom_read_bit_buffer *rb) {
+  AV1_COMMON *const cm = &pbi->common;
+  SequenceHeader *const seq_params = &pbi->seq_params;
+  uint32_t saved_bit_offset = rb->bit_offset;
+
+  cm->profile = av1_read_profile(rb);
+  aom_rb_read_literal(rb, 4);  // level
+
+  seq_params->frame_id_numbers_present_flag = aom_rb_read_bit(rb);
+  if (seq_params->frame_id_numbers_present_flag) {
+    seq_params->frame_id_length_minus7 = aom_rb_read_literal(rb, 4);
+    seq_params->delta_frame_id_length_minus2 = aom_rb_read_literal(rb, 4);
+  }
+
+  read_bitdepth_colorspace_sampling(cm, rb, pbi->allow_lowbitdepth);
+
+  return ((rb->bit_offset - saved_bit_offset + 7) >> 3);
+}
+
+static uint32_t read_frame_header_obu(AV1Decoder *pbi, const uint8_t *data,
+                                      const uint8_t *data_end,
+                                      const uint8_t **p_data_end) {
+  size_t header_size;
+
+  header_size =
+      av1_decode_frame_headers_and_setup(pbi, data, data_end, p_data_end);
+  return (uint32_t)(pbi->uncomp_hdr_size + header_size);
+}
+
+static uint32_t read_tile_group_header(AV1Decoder *pbi,
+                                       struct aom_read_bit_buffer *rb,
+                                       int *startTile, int *endTile) {
+  AV1_COMMON *const cm = &pbi->common;
+  uint32_t saved_bit_offset = rb->bit_offset;
+
+  *startTile = aom_rb_read_literal(rb, cm->log2_tile_rows + cm->log2_tile_cols);
+  *endTile = aom_rb_read_literal(rb, cm->log2_tile_rows + cm->log2_tile_cols);
+
+  return ((rb->bit_offset - saved_bit_offset + 7) >> 3);
+}
+
+static uint32_t read_one_tile_group_obu(AV1Decoder *pbi,
+                                        struct aom_read_bit_buffer *rb,
+                                        int is_first_tg, const uint8_t *data,
+                                        const uint8_t *data_end,
+                                        const uint8_t **p_data_end,
+                                        int *is_last_tg) {
+  AV1_COMMON *const cm = &pbi->common;
+  int startTile, endTile;
+  uint32_t header_size, tg_payload_size;
+
+  header_size = read_tile_group_header(pbi, rb, &startTile, &endTile);
+  data += header_size;
+  av1_decode_tg_tiles_and_wrapup(pbi, data, data_end, p_data_end, startTile,
+                                 endTile, is_first_tg);
+  tg_payload_size = (uint32_t)(*p_data_end - data);
+
+  // TODO(shan):  For now, assume all tile groups received in order
+  *is_last_tg = endTile == cm->tile_rows * cm->tile_cols - 1;
+
+  return header_size + tg_payload_size;
+}
+
+void av1_decode_frame_from_obus(struct AV1Decoder *pbi, const uint8_t *data,
+                                const uint8_t *data_end,
+                                const uint8_t **p_data_end) {
+  AV1_COMMON *const cm = &pbi->common;
+  int frame_decoding_finished = 0;
+  int is_first_tg_obu_received = 1;
+  int frame_header_received = 0;
+  int frame_header_size = 0;
+
+  // decode frame as a series of OBUs
+  while (!frame_decoding_finished && !cm->error.error_code) {
+    struct aom_read_bit_buffer rb;
+    uint8_t clear_data[80];
+    uint32_t obu_size, obu_header_size, obu_payload_size = 0;
+    OBU_TYPE obu_type;
+
+    init_read_bit_buffer(pbi, &rb, data + 4, data_end, clear_data);
+
+    // every obu is preceded by 4-byte size of obu (obu header + payload size)
+    // The obu size is only needed for tile group OBUs
+    obu_size = mem_get_le32(data);
+    obu_type = read_obu_header(&rb, &obu_header_size);
+    data += (4 + obu_header_size);
+
+    switch (obu_type) {
+      case OBU_TD: obu_payload_size = read_temporal_delimiter_obu(); break;
+      case OBU_SEQUENCE_HEADER:
+        obu_payload_size = read_sequence_header_obu(pbi, &rb);
+        break;
+      case OBU_FRAME_HEADER:
+        // Only decode first frame header received
+        if (!frame_header_received) {
+          frame_header_size = obu_payload_size =
+              read_frame_header_obu(pbi, data, data_end, p_data_end);
+          frame_header_received = 1;
+        } else {
+          obu_payload_size = frame_header_size;
+        }
+        if (cm->show_existing_frame) frame_decoding_finished = 1;
+        break;
+      case OBU_TILE_GROUP:
+        obu_payload_size = read_one_tile_group_obu(
+            pbi, &rb, is_first_tg_obu_received, data, data + obu_size - 1,
+            p_data_end, &frame_decoding_finished);
+        is_first_tg_obu_received = 0;
+        break;
+      default: break;
+    }
+    data += obu_payload_size;
+  }
+}
+#endif
diff --git a/av1/decoder/decodeframe.h b/av1/decoder/decodeframe.h
index a904658..7fa64de 100644
--- a/av1/decoder/decodeframe.h
+++ b/av1/decoder/decodeframe.h
@@ -29,8 +29,25 @@
                          int *height);
 BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb);
 
+// This function is now obsolete
 void av1_decode_frame(struct AV1Decoder *pbi, const uint8_t *data,
                       const uint8_t *data_end, const uint8_t **p_data_end);
+size_t av1_decode_frame_headers_and_setup(struct AV1Decoder *pbi,
+                                          const uint8_t *data,
+                                          const uint8_t *data_end,
+                                          const uint8_t **p_data_end);
+
+void av1_decode_tg_tiles_and_wrapup(struct AV1Decoder *pbi, const uint8_t *data,
+                                    const uint8_t *data_end,
+                                    const uint8_t **p_data_end, int startTile,
+                                    int endTile, int initialize_flag);
+
+#if CONFIG_OBU
+// replaces av1_decode_frame
+void av1_decode_frame_from_obus(struct AV1Decoder *pbi, const uint8_t *data,
+                                const uint8_t *data_end,
+                                const uint8_t **p_data_end);
+#endif
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/av1/decoder/decoder.c b/av1/decoder/decoder.c
index f1a4668..d983a0f 100644
--- a/av1/decoder/decoder.c
+++ b/av1/decoder/decoder.c
@@ -379,7 +379,21 @@
   }
 
   cm->error.setjmp = 1;
+
+#if !CONFIG_OBU
+#if 0
+  // This function is now obsolete
   av1_decode_frame(pbi, source, source + size, psource);
+#endif
+#if 1
+  if (av1_decode_frame_headers_and_setup(pbi, source, source + size, psource)) {
+    av1_decode_tg_tiles_and_wrapup(pbi, source, source + size, psource, 0,
+                                   cm->tile_rows * cm->tile_cols - 1, 1);
+  }
+#endif
+#else
+  av1_decode_frame_from_obus(pbi, source, source + size, psource);
+#endif
 
   swap_frame_buffers(pbi);
 
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 07a28e4..6df3f2d 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -102,15 +102,22 @@
                                              aom_writer *const w, int plane,
                                              int rtile_idx);
 #endif  // CONFIG_LOOP_RESTORATION
-static void write_uncompressed_header(AV1_COMP *cpi,
-                                      struct aom_write_bit_buffer *wb);
+#if CONFIG_OBU
+static void write_uncompressed_header_obu(AV1_COMP *cpi,
+                                          struct aom_write_bit_buffer *wb);
+#else
+static void write_uncompressed_header_frame(AV1_COMP *cpi,
+                                            struct aom_write_bit_buffer *wb);
+#endif
+
 static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data);
+#if !CONFIG_OBU || CONFIG_EXT_TILE
 static int remux_tiles(const AV1_COMMON *const cm, uint8_t *dst,
                        const uint32_t data_size, const uint32_t max_tile_size,
                        const uint32_t max_tile_col_size,
                        int *const tile_size_bytes,
                        int *const tile_col_size_bytes);
-
+#endif
 void av1_encode_token_init(void) {
 #if CONFIG_EXT_TX
   int s;
@@ -3849,6 +3856,7 @@
 }
 #endif  // CONFIG_EXT_TILE
 
+#if !CONFIG_OBU || CONFIG_EXT_TILE
 static uint32_t write_tiles(AV1_COMP *const cpi, uint8_t *const dst,
                             unsigned int *max_tile_size,
                             unsigned int *max_tile_col_size) {
@@ -3981,7 +3989,7 @@
     }
   } else {
 #endif  // CONFIG_EXT_TILE
-    write_uncompressed_header(cpi, &wb);
+    write_uncompressed_header_frame(cpi, &wb);
 
 #if CONFIG_EXT_REFS
     if (cm->show_existing_frame) {
@@ -4155,6 +4163,7 @@
 #endif  // CONFIG_EXT_TILE
   return (uint32_t)total_size;
 }
+#endif
 
 static void write_render_size(const AV1_COMMON *cm,
                               struct aom_write_bit_buffer *wb) {
@@ -4436,8 +4445,9 @@
 }
 #endif
 
-static void write_uncompressed_header(AV1_COMP *cpi,
-                                      struct aom_write_bit_buffer *wb) {
+#if !CONFIG_OBU
+static void write_uncompressed_header_frame(AV1_COMP *cpi,
+                                            struct aom_write_bit_buffer *wb) {
   AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
 
@@ -4736,6 +4746,364 @@
   write_tile_info(cm, wb);
 }
 
+#else
+// New function based on HLS R18
+static void write_uncompressed_header_obu(AV1_COMP *cpi,
+                                          struct aom_write_bit_buffer *wb) {
+  AV1_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
+
+#if CONFIG_EXT_TILE
+  aom_wb_write_literal(wb, cm->large_scale_tile, 1);
+#endif  // CONFIG_EXT_TILE
+
+#if CONFIG_EXT_REFS
+  // NOTE: By default all coded frames to be used as a reference
+  cm->is_reference_frame = 1;
+
+  if (cm->show_existing_frame) {
+    RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+    const int frame_to_show = cm->ref_frame_map[cpi->existing_fb_idx_to_show];
+
+    if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
+      aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
+                         "Buffer %d does not contain a reconstructed frame",
+                         frame_to_show);
+    }
+    ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
+
+    aom_wb_write_bit(wb, 1);  // show_existing_frame
+    aom_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3);
+
+#if CONFIG_REFERENCE_BUFFER
+    if (cpi->seq_params.frame_id_numbers_present_flag) {
+      int frame_id_len = cpi->seq_params.frame_id_length_minus7 + 7;
+      int display_frame_id = cm->ref_frame_id[cpi->existing_fb_idx_to_show];
+      aom_wb_write_literal(wb, display_frame_id, frame_id_len);
+      /* Add a zero byte to prevent emulation of superframe marker */
+      /* Same logic as when when terminating the entropy coder */
+      /* Consider to have this logic only one place */
+      aom_wb_write_literal(wb, 0, 8);
+    }
+#endif
+
+    return;
+  } else {
+#endif  // CONFIG_EXT_REFS
+    aom_wb_write_bit(wb, 0);  // show_existing_frame
+#if CONFIG_EXT_REFS
+  }
+#endif  // CONFIG_EXT_REFS
+
+  cm->frame_type = cm->intra_only ? INTRA_ONLY_FRAME : cm->frame_type;
+  aom_wb_write_literal(wb, cm->frame_type, 2);
+
+  if (cm->intra_only) cm->frame_type = INTRA_ONLY_FRAME;
+
+  aom_wb_write_bit(wb, cm->show_frame);
+  aom_wb_write_bit(wb, cm->error_resilient_mode);
+
+#if CONFIG_REFERENCE_BUFFER
+  cm->invalid_delta_frame_id_minus1 = 0;
+  if (cpi->seq_params.frame_id_numbers_present_flag) {
+    int frame_id_len = cpi->seq_params.frame_id_length_minus7 + 7;
+    aom_wb_write_literal(wb, cm->current_frame_id, frame_id_len);
+  }
+#endif
+
+  if (cm->frame_type == KEY_FRAME) {
+    write_sync_code(wb);
+
+    write_frame_size(cm, wb);
+#if CONFIG_ANS && ANS_MAX_SYMBOLS
+    assert(cpi->common.ans_window_size_log2 >= 8);
+    assert(cpi->common.ans_window_size_log2 < 24);
+    aom_wb_write_literal(wb, cpi->common.ans_window_size_log2 - 8, 4);
+#endif  // CONFIG_ANS && ANS_MAX_SYMBOLS
+    aom_wb_write_bit(wb, cm->allow_screen_content_tools);
+#if CONFIG_AMVR
+    if (cm->allow_screen_content_tools) {
+      if (cm->seq_mv_precision_level == 2) {
+        aom_wb_write_bit(wb, 1);
+      } else {
+        aom_wb_write_bit(wb, 0);
+        aom_wb_write_bit(wb, cm->seq_mv_precision_level == 0);
+      }
+    }
+#endif
+  } else if (cm->frame_type == INTRA_ONLY_FRAME) {
+    if (cm->intra_only) aom_wb_write_bit(wb, cm->allow_screen_content_tools);
+#if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
+    if (!cm->error_resilient_mode) {
+      if (cm->intra_only) {
+        aom_wb_write_bit(wb,
+                         cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
+      }
+    }
+#endif
+#if CONFIG_EXT_REFS
+    cpi->refresh_frame_mask = get_refresh_mask(cpi);
+#endif  // CONFIG_EXT_REFS
+
+    if (cm->intra_only) {
+      write_sync_code(wb);
+
+#if CONFIG_EXT_REFS
+      aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
+#else
+      aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
+#endif  // CONFIG_EXT_REFS
+      write_frame_size(cm, wb);
+
+#if CONFIG_ANS && ANS_MAX_SYMBOLS
+      assert(cpi->common.ans_window_size_log2 >= 8);
+      assert(cpi->common.ans_window_size_log2 < 24);
+      aom_wb_write_literal(wb, cpi->common.ans_window_size_log2 - 8, 4);
+#endif  // CONFIG_ANS && ANS_MAX_SYMBOLS
+    }
+  } else if (cm->frame_type == INTER_FRAME) {
+    MV_REFERENCE_FRAME ref_frame;
+
+#if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
+    if (!cm->error_resilient_mode) {
+      aom_wb_write_bit(wb, cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE);
+      if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE)
+        aom_wb_write_bit(wb,
+                         cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
+    }
+#endif
+
+#if CONFIG_EXT_REFS
+    cpi->refresh_frame_mask = get_refresh_mask(cpi);
+    aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
+#else
+    aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
+#endif  // CONFIG_EXT_REFS
+
+#if CONFIG_EXT_REFS
+    if (!cpi->refresh_frame_mask) {
+      // NOTE: "cpi->refresh_frame_mask == 0" indicates that the coded frame
+      //       will not be used as a reference
+      cm->is_reference_frame = 0;
+    }
+#endif  // CONFIG_EXT_REFS
+
+    for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+      assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
+      aom_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
+                           REF_FRAMES_LOG2);
+      aom_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
+#if CONFIG_REFERENCE_BUFFER
+      if (cpi->seq_params.frame_id_numbers_present_flag) {
+        int i = get_ref_frame_map_idx(cpi, ref_frame);
+        int frame_id_len = cpi->seq_params.frame_id_length_minus7 + 7;
+        int diff_len = cpi->seq_params.delta_frame_id_length_minus2 + 2;
+        int delta_frame_id_minus1 =
+            ((cm->current_frame_id - cm->ref_frame_id[i] +
+              (1 << frame_id_len)) %
+             (1 << frame_id_len)) -
+            1;
+        if (delta_frame_id_minus1 < 0 ||
+            delta_frame_id_minus1 >= (1 << diff_len))
+          cm->invalid_delta_frame_id_minus1 = 1;
+        aom_wb_write_literal(wb, delta_frame_id_minus1, diff_len);
+      }
+#endif
+    }
+
+#if CONFIG_FRAME_SIZE
+    if (cm->error_resilient_mode == 0) {
+      write_frame_size_with_refs(cpi, wb);
+    } else {
+      write_frame_size(cm, wb);
+    }
+#else
+    write_frame_size_with_refs(cpi, wb);
+#endif
+
+#if CONFIG_AMVR
+    if (cm->seq_mv_precision_level == 2) {
+      aom_wb_write_bit(wb, cm->cur_frame_mv_precision_level == 0);
+    }
+#endif
+    aom_wb_write_bit(wb, cm->allow_high_precision_mv);
+
+    fix_interp_filter(cm, cpi->td.counts);
+    write_frame_interp_filter(cm->interp_filter, wb);
+#if CONFIG_TEMPMV_SIGNALING
+    if (frame_might_use_prev_frame_mvs(cm)) {
+      aom_wb_write_bit(wb, cm->use_prev_frame_mvs);
+    }
+#endif
+  } else if (cm->frame_type == S_FRAME) {
+    MV_REFERENCE_FRAME ref_frame;
+
+#if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
+    if (!cm->error_resilient_mode) {
+      aom_wb_write_bit(wb, cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE);
+      if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE)
+        aom_wb_write_bit(wb,
+                         cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
+    }
+#endif
+
+#if CONFIG_EXT_REFS
+    if (!cpi->refresh_frame_mask) {
+      // NOTE: "cpi->refresh_frame_mask == 0" indicates that the coded frame
+      //       will not be used as a reference
+      cm->is_reference_frame = 0;
+    }
+#endif  // CONFIG_EXT_REFS
+
+    for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+      assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
+      aom_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
+                           REF_FRAMES_LOG2);
+      assert(cm->ref_frame_sign_bias[ref_frame] == 0);
+#if CONFIG_REFERENCE_BUFFER
+      if (cpi->seq_params.frame_id_numbers_present_flag) {
+        int i = get_ref_frame_map_idx(cpi, ref_frame);
+        int frame_id_len = cpi->seq_params.frame_id_length_minus7 + 7;
+        int diff_len = cpi->seq_params.delta_frame_id_length_minus2 + 2;
+        int delta_frame_id_minus1 =
+            ((cm->current_frame_id - cm->ref_frame_id[i] +
+              (1 << frame_id_len)) %
+             (1 << frame_id_len)) -
+            1;
+        if (delta_frame_id_minus1 < 0 ||
+            delta_frame_id_minus1 >= (1 << diff_len))
+          cm->invalid_delta_frame_id_minus1 = 1;
+        aom_wb_write_literal(wb, delta_frame_id_minus1, diff_len);
+      }
+#endif
+    }
+
+#if CONFIG_FRAME_SIZE
+    if (cm->error_resilient_mode == 0) {
+      write_frame_size_with_refs(cpi, wb);
+    } else {
+      write_frame_size(cm, wb);
+    }
+#else
+    write_frame_size_with_refs(cpi, wb);
+#endif
+
+    aom_wb_write_bit(wb, cm->allow_high_precision_mv);
+
+    fix_interp_filter(cm, cpi->td.counts);
+    write_frame_interp_filter(cm->interp_filter, wb);
+#if CONFIG_TEMPMV_SIGNALING
+    if (frame_might_use_prev_frame_mvs(cm)) {
+      aom_wb_write_bit(wb, cm->use_prev_frame_mvs);
+    }
+#endif
+  }
+
+#if CONFIG_MFMV
+  if (cm->show_frame == 0) {
+    int arf_offset = AOMMIN(
+        (MAX_GF_INTERVAL - 1),
+        cpi->twopass.gf_group.arf_src_offset[cpi->twopass.gf_group.index]);
+#if CONFIG_EXT_REFS
+    int brf_offset =
+        cpi->twopass.gf_group.brf_src_offset[cpi->twopass.gf_group.index];
+
+    arf_offset = AOMMIN((MAX_GF_INTERVAL - 1), arf_offset + brf_offset);
+#endif
+    aom_wb_write_literal(wb, arf_offset, 4);
+  }
+#endif
+
+#if CONFIG_REFERENCE_BUFFER
+  cm->refresh_mask = cm->frame_type == KEY_FRAME ? 0xFF : get_refresh_mask(cpi);
+#endif
+
+  if (!cm->error_resilient_mode) {
+    aom_wb_write_bit(
+        wb, cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD);
+  }
+#if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
+  aom_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
+#endif
+  assert(cm->mib_size == mi_size_wide[cm->sb_size]);
+  assert(cm->mib_size == 1 << cm->mib_size_log2);
+#if CONFIG_EXT_PARTITION
+  assert(cm->sb_size == BLOCK_128X128 || cm->sb_size == BLOCK_64X64);
+  aom_wb_write_bit(wb, cm->sb_size == BLOCK_128X128 ? 1 : 0);
+#else
+  assert(cm->sb_size == BLOCK_64X64);
+#endif  // CONFIG_EXT_PARTITION
+
+  encode_loopfilter(cm, wb);
+  encode_quantization(cm, wb);
+  encode_segmentation(cm, xd, wb);
+#if CONFIG_DELTA_Q
+  {
+    int i;
+    struct segmentation *const seg = &cm->seg;
+    int segment_quantizer_active = 0;
+    for (i = 0; i < MAX_SEGMENTS; i++) {
+      if (segfeature_active(seg, i, SEG_LVL_ALT_Q)) {
+        segment_quantizer_active = 1;
+      }
+    }
+
+    if (cm->delta_q_present_flag)
+      assert(segment_quantizer_active == 0 && cm->base_qindex > 0);
+    if (segment_quantizer_active == 0 && cm->base_qindex > 0) {
+      aom_wb_write_bit(wb, cm->delta_q_present_flag);
+      if (cm->delta_q_present_flag) {
+        aom_wb_write_literal(wb, OD_ILOG_NZ(cm->delta_q_res) - 1, 2);
+        xd->prev_qindex = cm->base_qindex;
+#if CONFIG_EXT_DELTA_Q
+        assert(seg->abs_delta == SEGMENT_DELTADATA);
+        aom_wb_write_bit(wb, cm->delta_lf_present_flag);
+        if (cm->delta_lf_present_flag) {
+          aom_wb_write_literal(wb, OD_ILOG_NZ(cm->delta_lf_res) - 1, 2);
+          xd->prev_delta_lf_from_base = 0;
+        }
+#endif  // CONFIG_EXT_DELTA_Q
+      }
+    }
+  }
+#endif
+#if CONFIG_CDEF
+  if (!cm->all_lossless) {
+    encode_cdef(cm, wb);
+  }
+#endif
+#if CONFIG_LOOP_RESTORATION
+  encode_restoration_mode(cm, wb);
+#endif  // CONFIG_LOOP_RESTORATION
+  write_tx_mode(cm, &cm->tx_mode, wb);
+
+  if (cpi->allow_comp_inter_inter) {
+    const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
+#if !CONFIG_REF_ADAPT
+    const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
+#endif  // !CONFIG_REF_ADAPT
+
+    aom_wb_write_bit(wb, use_hybrid_pred);
+#if !CONFIG_REF_ADAPT
+    if (!use_hybrid_pred) aom_wb_write_bit(wb, use_compound_pred);
+#endif  // !CONFIG_REF_ADAPT
+  }
+#if CONFIG_EXT_INTER
+  write_compound_tools(cm, wb);
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_EXT_TX
+  aom_wb_write_bit(wb, cm->reduced_tx_set_used);
+#endif  // CONFIG_EXT_TX
+
+#if CONFIG_GLOBAL_MOTION
+  if (!frame_is_intra_only(cm)) write_global_motion(cpi, wb);
+#endif  // CONFIG_GLOBAL_MOTION
+
+  write_tile_info(cm, wb);
+}
+#endif  // CONFIG_OBU
+
 static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
   AV1_COMMON *const cm = &cpi->common;
 #if CONFIG_SUPERTX
@@ -4887,6 +5255,7 @@
   return header_bc->pos;
 }
 
+#if !CONFIG_OBU || CONFIG_EXT_TILE
 static int choose_size_bytes(uint32_t size, int spare_msbs) {
   // Choose the number of bytes required to represent size, without
   // using the 'spare_msbs' number of most significant bits.
@@ -4916,6 +5285,7 @@
     default: assert(0 && "Invalid size"); break;
   }
 }
+
 static int remux_tiles(const AV1_COMMON *const cm, uint8_t *dst,
                        const uint32_t data_size, const uint32_t max_tile_size,
                        const uint32_t max_tile_col_size,
@@ -5024,6 +5394,321 @@
     return wpos;
   }
 }
+#endif
+
+#if CONFIG_OBU
+static uint32_t write_obu_header(OBU_TYPE obu_type, int obu_extension,
+                                 uint8_t *const dst) {
+  struct aom_write_bit_buffer wb = { dst, 0 };
+  uint32_t size = 0;
+
+  aom_wb_write_literal(&wb, (int)obu_type, 5);
+  aom_wb_write_literal(&wb, 0, 2);
+  aom_wb_write_literal(&wb, obu_extension ? 1 : 0, 1);
+  if (obu_extension) {
+    aom_wb_write_literal(&wb, obu_extension & 0xFF, 8);
+  }
+
+  size = aom_wb_bytes_written(&wb);
+  return size;
+}
+
+static uint32_t write_temporal_delimiter_obu() { return 0; }
+
+static uint32_t write_sequence_header_obu(AV1_COMP *cpi, uint8_t *const dst) {
+  AV1_COMMON *const cm = &cpi->common;
+  SequenceHeader *const seq_params = &cpi->seq_params;
+  struct aom_write_bit_buffer wb = { dst, 0 };
+  uint32_t size = 0;
+
+  write_profile(cm->profile, &wb);
+
+  aom_wb_write_literal(&wb, 0, 4);
+
+  seq_params->frame_id_numbers_present_flag = FRAME_ID_NUMBERS_PRESENT_FLAG;
+  aom_wb_write_literal(&wb, seq_params->frame_id_numbers_present_flag, 1);
+  if (seq_params->frame_id_numbers_present_flag) {
+    seq_params->frame_id_length_minus7 = FRAME_ID_LENGTH_MINUS7;
+    seq_params->delta_frame_id_length_minus2 = DELTA_FRAME_ID_LENGTH_MINUS2;
+    aom_wb_write_literal(&wb, seq_params->frame_id_length_minus7, 4);
+    aom_wb_write_literal(&wb, seq_params->delta_frame_id_length_minus2, 4);
+  }
+
+  // color_config
+  write_bitdepth_colorspace_sampling(cm, &wb);
+
+  size = aom_wb_bytes_written(&wb);
+  return size;
+}
+
+static uint32_t write_frame_header_obu(AV1_COMP *cpi, uint8_t *const dst) {
+  AV1_COMMON *const cm = &cpi->common;
+  struct aom_write_bit_buffer wb = { dst, 0 };
+  struct aom_write_bit_buffer compr_hdr_len_wb;
+  uint32_t total_size = 0;
+  uint32_t compr_hdr_size, uncompressed_hdr_size;
+
+  write_uncompressed_header_obu(cpi, &wb);
+
+  if (cm->show_existing_frame) {
+    total_size = aom_wb_bytes_written(&wb);
+    return total_size;
+  }
+
+  // write the tile length code  (Always 4 bytes for now)
+  aom_wb_write_literal(&wb, 3, 2);
+
+  // placeholder for the compressed header length
+  compr_hdr_len_wb = wb;
+  aom_wb_write_literal(&wb, 0, 16);
+
+  uncompressed_hdr_size = aom_wb_bytes_written(&wb);
+  compr_hdr_size = write_compressed_header(cpi, dst + uncompressed_hdr_size);
+  aom_wb_overwrite_literal(&compr_hdr_len_wb, (int)(compr_hdr_size), 16);
+
+  total_size = uncompressed_hdr_size + compr_hdr_size;
+  return total_size;
+}
+
+static uint32_t write_tile_group_header(uint8_t *const dst, int startTile,
+                                        int endTile, int tiles_log2) {
+  struct aom_write_bit_buffer wb = { dst, 0 };
+  uint32_t size = 0;
+
+  aom_wb_write_literal(&wb, startTile, tiles_log2);
+  aom_wb_write_literal(&wb, endTile, tiles_log2);
+
+  size = aom_wb_bytes_written(&wb);
+  return size;
+}
+
+static uint32_t write_tiles_in_tg_obus(AV1_COMP *const cpi, uint8_t *const dst,
+                                       unsigned int *max_tile_size,
+                                       unsigned int *max_tile_col_size,
+                                       uint8_t *const frame_header_obu_location,
+                                       uint32_t frame_header_obu_size,
+                                       int insert_frame_header_obu_flag) {
+  const AV1_COMMON *const cm = &cpi->common;
+  aom_writer mode_bc;
+  int tile_row, tile_col;
+  TOKENEXTRA *(*const tok_buffers)[MAX_TILE_COLS] = cpi->tile_tok;
+  TileBufferEnc(*const tile_buffers)[MAX_TILE_COLS] = cpi->tile_buffers;
+  uint32_t total_size = 0;
+  const int tile_cols = cm->tile_cols;
+  const int tile_rows = cm->tile_rows;
+  unsigned int tile_size = 0;
+  const int n_log2_tiles = cm->log2_tile_rows + cm->log2_tile_cols;
+  // Fixed size tile groups for the moment
+  const int num_tg_hdrs = cm->num_tg;
+  const int tg_size =
+#if CONFIG_EXT_TILE
+      (cm->large_scale_tile)
+          ? 1
+          :
+#endif  // CONFIG_EXT_TILE
+          (tile_rows * tile_cols + num_tg_hdrs - 1) / num_tg_hdrs;
+  int tile_count = 0;
+  int curr_tg_data_size = 0;
+  uint8_t *data = dst;
+  int new_tg = 1;
+#if CONFIG_EXT_TILE
+  const int have_tiles = tile_cols * tile_rows > 1;
+#endif
+
+  *max_tile_size = 0;
+  *max_tile_col_size = 0;
+
+#if CONFIG_EXT_TILE
+  if (cm->large_scale_tile) {
+    for (tile_col = 0; tile_col < tile_cols; tile_col++) {
+      TileInfo tile_info;
+      const int is_last_col = (tile_col == tile_cols - 1);
+      const uint32_t col_offset = total_size;
+
+      av1_tile_set_col(&tile_info, cm, tile_col);
+
+      // The last column does not have a column header
+      if (!is_last_col) total_size += 4;
+
+      for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+        TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
+        const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
+        const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
+        const int data_offset = have_tiles ? 4 : 0;
+        const int tile_idx = tile_row * tile_cols + tile_col;
+        TileDataEnc *this_tile = &cpi->tile_data[tile_idx];
+        av1_tile_set_row(&tile_info, cm, tile_row);
+
+        buf->data = dst + total_size;
+
+        // Is CONFIG_EXT_TILE = 1, every tile in the row has a header,
+        // even for the last one, unless no tiling is used at all.
+        total_size += data_offset;
+        // Initialise tile context from the frame context
+        this_tile->tctx = *cm->fc;
+        cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx;
+#if CONFIG_PVQ
+        cpi->td.mb.pvq_q = &this_tile->pvq_q;
+        cpi->td.mb.daala_enc.state.adapt = &this_tile->tctx.pvq_context;
+#endif  // CONFIG_PVQ
+#if CONFIG_ANS
+        mode_bc.size = 1 << cpi->common.ans_window_size_log2;
+#endif
+        aom_start_encode(&mode_bc, buf->data + data_offset);
+        write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
+        assert(tok == tok_end);
+        aom_stop_encode(&mode_bc);
+        tile_size = mode_bc.pos;
+#if CONFIG_PVQ
+        cpi->td.mb.pvq_q = NULL;
+#endif
+        buf->size = tile_size;
+
+        // Record the maximum tile size we see, so we can compact headers later.
+        *max_tile_size = AOMMAX(*max_tile_size, tile_size);
+
+        if (have_tiles) {
+          // tile header: size of this tile, or copy offset
+          uint32_t tile_header = tile_size;
+          const int tile_copy_mode =
+              ((AOMMAX(cm->tile_width, cm->tile_height) << MI_SIZE_LOG2) <= 256)
+                  ? 1
+                  : 0;
+
+          // If tile_copy_mode = 1, check if this tile is a copy tile.
+          // Very low chances to have copy tiles on the key frames, so don't
+          // search on key frames to reduce unnecessary search.
+          if (cm->frame_type != KEY_FRAME && tile_copy_mode) {
+            const int idendical_tile_offset =
+                find_identical_tile(tile_row, tile_col, tile_buffers);
+
+            if (idendical_tile_offset > 0) {
+              tile_size = 0;
+              tile_header = idendical_tile_offset | 0x80;
+              tile_header <<= 24;
+            }
+          }
+
+          mem_put_le32(buf->data, tile_header);
+        }
+
+        total_size += tile_size;
+      }
+
+      if (!is_last_col) {
+        uint32_t col_size = total_size - col_offset - 4;
+        mem_put_le32(dst + col_offset, col_size);
+
+        // If it is not final packing, record the maximum tile column size we
+        // see, otherwise, check if the tile size is out of the range.
+        *max_tile_col_size = AOMMAX(*max_tile_col_size, col_size);
+      }
+    }
+  } else {
+#endif  // CONFIG_EXT_TILE
+
+    for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+      TileInfo tile_info;
+      const int is_last_row = (tile_row == tile_rows - 1);
+      av1_tile_set_row(&tile_info, cm, tile_row);
+
+      for (tile_col = 0; tile_col < tile_cols; tile_col++) {
+        const int tile_idx = tile_row * tile_cols + tile_col;
+        TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
+        TileDataEnc *this_tile = &cpi->tile_data[tile_idx];
+        const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
+        const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
+        const int is_last_col = (tile_col == tile_cols - 1);
+        const int is_last_tile = is_last_col && is_last_row;
+        int is_last_tile_in_tg = 0;
+
+        if (new_tg) {
+          if (insert_frame_header_obu_flag && tile_idx) {
+            // insert a copy of frame header OBU (including 4-byte size),
+            // except before the first tile group
+            data = dst + total_size;
+            memmove(data, frame_header_obu_location, frame_header_obu_size);
+            total_size += frame_header_obu_size;
+          }
+          data = dst + total_size;
+          // A new tile group begins at this tile.  Write the obu header and
+          // tile group header
+          curr_tg_data_size = write_obu_header(OBU_TILE_GROUP, 0, data + 4);
+          if (n_log2_tiles)
+            curr_tg_data_size += write_tile_group_header(
+                data + curr_tg_data_size + 4, tile_idx,
+                AOMMIN(tile_idx + tg_size - 1, tile_cols * tile_rows - 1),
+                n_log2_tiles);
+          total_size += curr_tg_data_size + 4;
+          new_tg = 0;
+          tile_count = 0;
+        }
+        tile_count++;
+        av1_tile_set_col(&tile_info, cm, tile_col);
+
+        if (tile_count == tg_size || tile_idx == (tile_cols * tile_rows - 1)) {
+          is_last_tile_in_tg = 1;
+          new_tg = 1;
+        } else {
+          is_last_tile_in_tg = 0;
+        }
+
+#if CONFIG_DEPENDENT_HORZTILES
+        av1_tile_set_tg_boundary(&tile_info, cm, tile_row, tile_col);
+#endif
+        buf->data = dst + total_size;
+
+        // The last tile of the tile group does not have a header.
+        if (!is_last_tile_in_tg) total_size += 4;
+
+        // Initialise tile context from the frame context
+        this_tile->tctx = *cm->fc;
+        cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx;
+#if CONFIG_PVQ
+        cpi->td.mb.pvq_q = &this_tile->pvq_q;
+        cpi->td.mb.daala_enc.state.adapt = &this_tile->tctx.pvq_context;
+#endif  // CONFIG_PVQ
+#if CONFIG_ANS
+        mode_bc.size = 1 << cpi->common.ans_window_size_log2;
+#endif  // CONFIG_ANS
+        aom_start_encode(&mode_bc, dst + total_size);
+        write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
+#if !CONFIG_LV_MAP
+#if !CONFIG_PVQ
+        assert(tok == tok_end);
+#endif  // !CONFIG_PVQ
+#endif  // !CONFIG_LV_MAP
+        aom_stop_encode(&mode_bc);
+        tile_size = mode_bc.pos;
+#if CONFIG_PVQ
+        cpi->td.mb.pvq_q = NULL;
+#endif
+        assert(tile_size > 0);
+
+        curr_tg_data_size += (tile_size + (is_last_tile_in_tg ? 0 : 4));
+        buf->size = tile_size;
+
+        if (!is_last_tile) {
+          *max_tile_size = AOMMAX(*max_tile_size, tile_size);
+        }
+        if (!is_last_tile_in_tg) {
+          // size of this tile
+          mem_put_le32(buf->data, tile_size);
+        } else {
+          // write current tile group size
+          mem_put_le32(data, curr_tg_data_size);
+        }
+
+        total_size += tile_size;
+      }
+    }
+#if CONFIG_EXT_TILE
+  }
+#endif  // CONFIG_EXT_TILE
+  return (uint32_t)total_size;
+}
+
+#endif
 
 void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size) {
   uint8_t *data = dst;
@@ -5040,15 +5725,59 @@
 #endif  // CONFIG_EXT_TILE
   unsigned int max_tile_size;
   unsigned int max_tile_col_size;
+#if CONFIG_OBU
+#if !CONFIG_EXT_TILE
+  AV1_COMMON *const cm = &cpi->common;
+#endif
+  uint32_t obu_size;
+  uint8_t *frame_header_location;
+  uint32_t frame_header_size;
+#endif
 
 #if CONFIG_BITSTREAM_DEBUG
   bitstream_queue_reset_write();
 #endif
 
+#if CONFIG_OBU
+  // write temporal delimiter obu, preceded by 4-byte size
+  obu_size = write_obu_header(OBU_TD, 0, data + 4);
+  obu_size += write_temporal_delimiter_obu(/*data + 4 + obu_size*/);
+  mem_put_le32(data, obu_size);
+  data += obu_size + 4;
+
+  // write sequence header obu if KEY_FRAME, preceded by 4-byte size
+  if (cm->frame_type == KEY_FRAME) {
+    obu_size = write_obu_header(OBU_SEQUENCE_HEADER, 0, data + 4);
+    obu_size += write_sequence_header_obu(cpi, data + 4 + obu_size);
+    mem_put_le32(data, obu_size);
+    data += obu_size + 4;
+  }
+
+  // write frame header obu, preceded by 4-byte size
+  frame_header_location = data + 4;
+  obu_size = write_obu_header(OBU_FRAME_HEADER, 0, frame_header_location);
+  frame_header_size = write_frame_header_obu(cpi, data + 4 + obu_size);
+  obu_size += frame_header_size;
+  mem_put_le32(data, obu_size);
+  data += obu_size + 4;
+
+  if (cm->show_existing_frame) {
+    data_size = 0;
+  } else {
+    //  Each tile group obu will be preceded by 4-byte size of the tile group
+    //  obu
+    data_size =
+        write_tiles_in_tg_obus(cpi, data, &max_tile_size, &max_tile_col_size,
+                               frame_header_location - 4, obu_size + 4,
+                               1 /* cm->error_resilient_mode */);
+  }
+
+#endif
+
 #if CONFIG_EXT_TILE
   if (cm->large_scale_tile) {
     // Write the uncompressed header
-    write_uncompressed_header(cpi, &wb);
+    write_uncompressed_header_frame(cpi, &wb);
 
 #if CONFIG_EXT_REFS
     if (cm->show_existing_frame) {
@@ -5085,7 +5814,9 @@
     data_size = write_tiles(cpi, data, &max_tile_size, &max_tile_col_size);
   } else {
 #endif  // CONFIG_EXT_TILE
+#if !CONFIG_OBU
     data_size = write_tiles(cpi, data, &max_tile_size, &max_tile_col_size);
+#endif
 #if CONFIG_EXT_TILE
   }
 #endif  // CONFIG_EXT_TILE