Use type with smaller size

This is to reduce the size of the huge token buffer by at least more
than 10%.

BUG=aomedia:940

Change-Id: I2ee7f7f62e8e8fb819cf7fba40a7ca7f860acaa4
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index fab8888..de1aa97 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -615,7 +615,7 @@
 
   while (p < stop && p->token != EOSB_TOKEN) {
     const int token = p->token;
-    const int eob_val = p->eob_val;
+    const int8_t eob_val = p->eob_val;
     if (token == BLOCK_Z_TOKEN) {
       aom_write_symbol(w, 0, *p->head_cdf, HEAD_TOKENS + 1);
       p++;
diff --git a/av1/encoder/encodemb.c b/av1/encoder/encodemb.c
index f060c48..2fd99b7 100644
--- a/av1/encoder/encodemb.c
+++ b/av1/encoder/encodemb.c
@@ -194,7 +194,7 @@
     const int sz = -(x < 0);
     const int band_cur = band_translate[i];
     const int ctx_cur = (i == 0) ? ctx : get_coef_context(nb, token_cache, i);
-    const int eob_val =
+    const int8_t eob_val =
         (i + 1 == eob) ? (i + 1 == seg_eob ? LAST_EOB : EARLY_EOB) : NO_EOB;
     const int is_first = (i == 0);
 
@@ -270,7 +270,7 @@
         int ctx_next;
         const int band_next = band_translate[i + 1];
         const int token_next = av1_get_token(qcoeff[scan[i + 1]]);
-        const int eob_val_next =
+        const int8_t eob_val_next =
             (i + 2 == eob) ? (i + 2 == seg_eob ? LAST_EOB : EARLY_EOB) : NO_EOB;
 
         token_cache[rc] = av1_pt_energy_class[t0];
diff --git a/av1/encoder/rd.h b/av1/encoder/rd.h
index 7651a00..3bbbb64 100644
--- a/av1/encoder/rd.h
+++ b/av1/encoder/rd.h
@@ -453,7 +453,8 @@
 #endif
 }
 
-static INLINE int av1_get_coeff_token_cost(int token, int eob_val, int is_first,
+static INLINE int av1_get_coeff_token_cost(int token, int8_t eob_val,
+                                           int is_first,
                                            const int *head_cost_table,
                                            const int *tail_cost_table) {
   if (eob_val == LAST_EOB) return av1_cost_zero(128);
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index 99a9bf3..6760e26 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -1614,7 +1614,7 @@
   int(*tail_token_costs)[COEFF_CONTEXTS][TAIL_TOKENS] =
       x->token_tail_costs[tx_size_ctx][type][ref];
   const int seg_eob = av1_get_tx_eob(&cm->seg, mbmi->segment_id, tx_size);
-  int eob_val;
+  int8_t eob_val;
 
 #if CONFIG_HIGHBITDEPTH
   const int cat6_bits = av1_get_cat6_extrabits_size(tx_size, xd->bd);
diff --git a/av1/encoder/tokenize.c b/av1/encoder/tokenize.c
index a5226e6..d66825b 100644
--- a/av1/encoder/tokenize.c
+++ b/av1/encoder/tokenize.c
@@ -305,7 +305,7 @@
 static INLINE void add_token(TOKENEXTRA **t,
                              aom_cdf_prob (*tail_cdf)[CDF_SIZE(ENTROPY_TOKENS)],
                              aom_cdf_prob (*head_cdf)[CDF_SIZE(ENTROPY_TOKENS)],
-                             int eob_val, int first_val, int32_t extra,
+                             int8_t eob_val, int8_t first_val, int32_t extra,
                              uint8_t token) {
   (*t)->token = token;
   (*t)->extra = extra;
@@ -478,8 +478,8 @@
   aom_cdf_prob(
       *const coef_tail_cdfs)[COEFF_CONTEXTS][CDF_SIZE(ENTROPY_TOKENS)] =
       ec_ctx->coef_tail_cdfs[txsize_sqr_map[tx_size]][type][ref];
-  int eob_val;
-  int first_val = 1;
+  int8_t eob_val;
+  int8_t first_val = 1;
   const int seg_eob = av1_get_tx_eob(&cpi->common.seg, segment_id, tx_size);
   const uint8_t *const band = get_band_translate(tx_size);
   int16_t token;
diff --git a/av1/encoder/tokenize.h b/av1/encoder/tokenize.h
index 92d5403..e01fe37 100644
--- a/av1/encoder/tokenize.h
+++ b/av1/encoder/tokenize.h
@@ -38,8 +38,9 @@
   aom_cdf_prob (*tail_cdf)[CDF_SIZE(ENTROPY_TOKENS)];
   aom_cdf_prob (*head_cdf)[CDF_SIZE(ENTROPY_TOKENS)];
   aom_cdf_prob *color_map_cdf;
-  int eob_val;
-  int first_val;
+  // TODO(yaowu: use packed enum type if appropriate)
+  int8_t eob_val;
+  int8_t first_val;
   const aom_prob *context_tree;
   EXTRABIT extra;
   uint8_t token;