Removed B_MODE_INFO
Declared the bmi in BLOCKD as a union instead of B_MODE_INFO.
Then removed B_MODE_INFO completely.
Change-Id: Ieb7469899e265892c66f7aeac87b7f2bf38e7a67
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index 3a70b18..be99256 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -137,12 +137,6 @@
modes for the Y blocks to the left and above us; for interframes, there
is a single probability table. */
-typedef struct
-{
- B_PREDICTION_MODE mode;
- int_mv mv;
-} B_MODE_INFO;
-
union b_mode_info
{
B_PREDICTION_MODE as_mode;
@@ -182,8 +176,6 @@
short *dqcoeff;
unsigned char *predictor;
short *diff;
- short *reference;
-
short *dequant;
/* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
@@ -197,14 +189,13 @@
int eob;
- B_MODE_INFO bmi;
+ union b_mode_info bmi;
} BLOCKD;
typedef struct
{
DECLARE_ALIGNED(16, short, diff[400]); /* from idct diff */
DECLARE_ALIGNED(16, unsigned char, predictor[384]);
-/* not used DECLARE_ALIGNED(16, short, reference[384]); */
DECLARE_ALIGNED(16, short, qcoeff[400]);
DECLARE_ALIGNED(16, short, dqcoeff[400]);
DECLARE_ALIGNED(16, char, eobs[25]);
@@ -284,19 +275,15 @@
static void update_blockd_bmi(MACROBLOCKD *xd)
{
int i;
- if (xd->mode_info_context->mbmi.mode == SPLITMV)
+ int is_4x4;
+ is_4x4 = (xd->mode_info_context->mbmi.mode == SPLITMV) ||
+ (xd->mode_info_context->mbmi.mode == B_PRED);
+
+ if (is_4x4)
{
for (i = 0; i < 16; i++)
{
- BLOCKD *d = &xd->block[i];
- d->bmi.mv.as_int = xd->mode_info_context->bmi[i].mv.as_int;
- }
- }else if (xd->mode_info_context->mbmi.mode == B_PRED)
- {
- for (i = 0; i < 16; i++)
- {
- BLOCKD *d = &xd->block[i];
- d->bmi.mode = xd->mode_info_context->bmi[i].as_mode;
+ xd->block[i].bmi = xd->mode_info_context->bmi[i];
}
}
}
diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c
index 5defa75..d569d88 100644
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -355,7 +355,7 @@
do /* for each subset j */
{
int_mv leftmv, abovemv;
- B_MODE_INFO bmi;
+ int_mv blockmv;
int k; /* first block in subset j */
int mv_contz;
k = vp8_mbsplit_offset[s][j];
@@ -364,30 +364,30 @@
abovemv.as_int = above_block_mv(mi, k, mis);
mv_contz = vp8_mv_cont(&leftmv, &abovemv);
- switch (bmi.mode = (B_PREDICTION_MODE) sub_mv_ref(bc, vp8_sub_mv_ref_prob2 [mv_contz])) /*pc->fc.sub_mv_ref_prob))*/
+ switch ((B_PREDICTION_MODE) sub_mv_ref(bc, vp8_sub_mv_ref_prob2 [mv_contz])) /*pc->fc.sub_mv_ref_prob))*/
{
case NEW4X4:
- read_mv(bc, &bmi.mv.as_mv, (const MV_CONTEXT *) mvc);
- bmi.mv.as_mv.row += best_mv.as_mv.row;
- bmi.mv.as_mv.col += best_mv.as_mv.col;
+ read_mv(bc, &blockmv.as_mv, (const MV_CONTEXT *) mvc);
+ blockmv.as_mv.row += best_mv.as_mv.row;
+ blockmv.as_mv.col += best_mv.as_mv.col;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][3]++;
#endif
break;
case LEFT4X4:
- bmi.mv.as_int = leftmv.as_int;
+ blockmv.as_int = leftmv.as_int;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][0]++;
#endif
break;
case ABOVE4X4:
- bmi.mv.as_int = abovemv.as_int;
+ blockmv.as_int = abovemv.as_int;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][1]++;
#endif
break;
case ZERO4X4:
- bmi.mv.as_int = 0;
+ blockmv.as_int = 0;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][2]++;
#endif
@@ -396,7 +396,7 @@
break;
}
- mbmi->need_to_clamp_mvs = vp8_check_mv_bounds(&bmi.mv,
+ mbmi->need_to_clamp_mvs = vp8_check_mv_bounds(&blockmv,
mb_to_left_edge,
mb_to_right_edge,
mb_to_top_edge,
@@ -412,7 +412,7 @@
fill_offset = &mbsplit_fill_offset[s][(unsigned char)j * mbsplit_fill_count[s]];
do {
- mi->bmi[ *fill_offset].mv.as_int = bmi.mv.as_int;
+ mi->bmi[ *fill_offset].mv.as_int = blockmv.as_int;
fill_offset++;
}while (--fill_count);
}
diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c
index 80e8723..fec5172 100644
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -288,7 +288,7 @@
BLOCKD *b = &xd->block[i];
RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict)
- (b, b->bmi.mode, b->predictor);
+ (b, b->bmi.as_mode, b->predictor);
if (xd->eobs[i] > 1)
{
@@ -974,8 +974,6 @@
vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
- vpx_memcpy(&xd->block[0].bmi, &xd->mode_info_context->bmi[0], sizeof(B_MODE_INFO));
-
#if CONFIG_MULTITHREAD
if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION)
{
diff --git a/vp8/decoder/threading.c b/vp8/decoder/threading.c
index 4ca53fb..77c3f17 100644
--- a/vp8/decoder/threading.c
+++ b/vp8/decoder/threading.c
@@ -186,7 +186,9 @@
for (i = 0; i < 16; i++)
{
BLOCKD *b = &xd->block[i];
- vp8mt_predict_intra4x4(pbi, xd, b->bmi.mode, b->predictor, mb_row, mb_col, i);
+
+ vp8mt_predict_intra4x4(pbi, xd, b->bmi.as_mode, b->predictor, mb_row, mb_col, i);
+
if (xd->eobs[i] > 1)
{
DEQUANT_INVOKE(&pbi->dequant, idct_add)
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index b10f8c0..ced9635 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -1008,28 +1008,32 @@
do
{
- const B_MODE_INFO *const b = cpi->mb.partition_info->bmi + j;
+ B_PREDICTION_MODE blockmode;
+ int_mv blockmv;
const int *const L = vp8_mbsplits [mi->partitioning];
int k = -1; /* first block in subset j */
int mv_contz;
int_mv leftmv, abovemv;
+ blockmode = cpi->mb.partition_info->bmi[j].mode;
+ blockmv = cpi->mb.partition_info->bmi[j].mv;
while (j != L[++k])
if (k >= 16)
assert(0);
+
leftmv.as_int = left_block_mv(m, k);
abovemv.as_int = above_block_mv(m, k, mis);
mv_contz = vp8_mv_cont(&leftmv, &abovemv);
- write_sub_mv_ref(w, b->mode, vp8_sub_mv_ref_prob2 [mv_contz]); //pc->fc.sub_mv_ref_prob);
+ write_sub_mv_ref(w, blockmode, vp8_sub_mv_ref_prob2 [mv_contz]);
- if (b->mode == NEW4X4)
+ if (blockmode == NEW4X4)
{
#ifdef ENTROPY_STATS
active_section = 11;
#endif
- write_mv(w, &b->mv.as_mv, &best_mv, (const MV_CONTEXT *) mvc);
+ write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *) mvc);
}
}
while (++j < cpi->mb.partition_info->count);
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index fbdc89e..bc6eeeb 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -54,7 +54,11 @@
typedef struct
{
int count;
- B_MODE_INFO bmi[16];
+ struct
+ {
+ B_PREDICTION_MODE mode;
+ int_mv mv;
+ } bmi[16];
} PARTITION_INFO;
typedef struct
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 1bb0260..f72fccc 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -272,6 +272,7 @@
// Activity masking based on Tim T's original code
void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
{
+
unsigned int a;
unsigned int b;
unsigned int act = *(x->mb_activity_ptr);
@@ -477,24 +478,9 @@
x->mb_activity_ptr++;
x->mb_norm_activity_ptr++;
- if(cm->frame_type != INTRA_FRAME)
- {
- if (xd->mode_info_context->mbmi.mode != B_PRED)
- {
- for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].mv.as_int = xd->block[i].bmi.mv.as_int;
- }else
- {
- for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode;
- }
- }
- else
- {
- if(xd->mode_info_context->mbmi.mode != B_PRED)
- for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode;
- }
+ /* save the block info */
+ for (i = 0; i < 16; i++)
+ xd->mode_info_context->bmi[i] = xd->block[i].bmi;
// adjust to the next column of macroblocks
x->src.y_buffer += 16;
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index 835c80d..5da69bc 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -36,7 +36,7 @@
BLOCK *be = &x->block[ib];
RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
- (b, b->bmi.mode, b->predictor);
+ (b, b->bmi.as_mode, b->predictor);
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
@@ -89,19 +89,19 @@
switch (x->e_mbd.mode_info_context->mbmi.mode)
{
case DC_PRED:
- d->bmi.mode = B_DC_PRED;
+ d->bmi.as_mode = B_DC_PRED;
break;
case V_PRED:
- d->bmi.mode = B_VE_PRED;
+ d->bmi.as_mode = B_VE_PRED;
break;
case H_PRED:
- d->bmi.mode = B_HE_PRED;
+ d->bmi.as_mode = B_HE_PRED;
break;
case TM_PRED:
- d->bmi.mode = B_TM_PRED;
+ d->bmi.as_mode = B_TM_PRED;
break;
default:
- d->bmi.mode = B_DC_PRED;
+ d->bmi.as_mode = B_DC_PRED;
break;
}
}
diff --git a/vp8/encoder/ethreading.c b/vp8/encoder/ethreading.c
index 1d92f20..3e6ed2a 100644
--- a/vp8/encoder/ethreading.c
+++ b/vp8/encoder/ethreading.c
@@ -232,23 +232,9 @@
x->mb_activity_ptr++;
x->mb_norm_activity_ptr++;
- if(cm->frame_type != INTRA_FRAME)
- {
- if (xd->mode_info_context->mbmi.mode != B_PRED)
- {
- for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].mv.as_int = xd->block[i].bmi.mv.as_int;
- }else
- {
- for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode;
- }
- }
- else {
- if(xd->mode_info_context->mbmi.mode != B_PRED)
- for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode;
- }
+ /* save the block info */
+ for (i = 0; i < 16; i++)
+ xd->mode_info_context->bmi[i] = xd->block[i].bmi;
// adjust to the next column of macroblocks
x->src.y_buffer += 16;
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 7cf5041..e9fc07d 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -100,7 +100,7 @@
{
for (i = 0; i < 16; i++)
{
- x->e_mbd.block[i].bmi.mode = B_DC_PRED;
+ x->e_mbd.block[i].bmi.as_mode = B_DC_PRED;
vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i);
}
}
diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c
index 7bc3b52..a969084 100644
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -47,7 +47,6 @@
extern unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr, int recon_stride);
extern int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *best_ref_mv, int best_rd, int *, int *, int *, int, int *mvcost[2], int, int fullpixel);
extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]);
-extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
@@ -215,7 +214,8 @@
*best_mode = mode;
}
}
- b->bmi.mode = (B_PREDICTION_MODE)(*best_mode);
+
+ b->bmi.as_mode = (B_PREDICTION_MODE)(*best_mode);
vp8_encode_intra4x4block(rtcd, x, ib);
return best_rd;
}
@@ -251,7 +251,7 @@
cost += r;
distortion += d;
- mic->bmi[i].as_mode = xd->block[i].bmi.mode = best_mode;
+ mic->bmi[i].as_mode = best_mode;
// Break out case where we have already exceeded best so far value
// that was passed in
@@ -443,7 +443,7 @@
BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
MACROBLOCKD *xd = &x->e_mbd;
- B_MODE_INFO best_bmodes[16];
+ union b_mode_info best_bmodes[16];
MB_MODE_INFO best_mbmode;
int_mv best_ref_mv;
@@ -485,6 +485,7 @@
vpx_memset(nearest_mv, 0, sizeof(nearest_mv));
vpx_memset(near_mv, 0, sizeof(near_mv));
vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
+ vpx_memset(&best_bmodes, 0, sizeof(best_bmodes));
// set up all the refframe dependent pointers.
@@ -885,7 +886,7 @@
if (this_mode == B_PRED)
for (i = 0; i < 16; i++)
{
- vpx_memcpy(&best_bmodes[i], &x->e_mbd.block[i].bmi, sizeof(B_MODE_INFO));
+ best_bmodes[i].as_mode = x->e_mbd.block[i].bmi.as_mode;
}
// Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
@@ -953,10 +954,11 @@
}
if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
+ {
for (i = 0; i < 16; i++)
{
- x->e_mbd.block[i].bmi.mode = best_bmodes[i].mode;
+ x->e_mbd.block[i].bmi.as_mode = best_bmodes[i].as_mode;
}
-
+ }
update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
}
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index bed2bb5..3ab8ea3 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -650,7 +650,7 @@
vpx_memcpy(best_dqcoeff, b->dqcoeff, 32);
}
}
- b->bmi.mode = (B_PREDICTION_MODE)(*best_mode);
+ b->bmi.as_mode = (B_PREDICTION_MODE)(*best_mode);
IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(best_dqcoeff, b->diff, 32);
RECON_INVOKE(IF_RTCD(&cpi->rtcd.common->recon), recon)(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
@@ -1398,8 +1398,7 @@
{
BLOCKD *bd = &x->e_mbd.block[i];
- bd->bmi.mv.as_mv = bsi.mvs[i].as_mv;
- bd->bmi.mode = bsi.modes[i];
+ bd->bmi.mv.as_int = bsi.mvs[i].as_int;
bd->eob = bsi.eobs[i];
}
@@ -1714,7 +1713,7 @@
BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
MACROBLOCKD *xd = &x->e_mbd;
- B_MODE_INFO best_bmodes[16];
+ union b_mode_info best_bmodes[16];
MB_MODE_INFO best_mbmode;
PARTITION_INFO best_partition;
int_mv best_ref_mv;
@@ -1758,6 +1757,7 @@
unsigned char *v_buffer[4];
vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
+ vpx_memset(&best_bmodes, 0, sizeof(best_bmodes));
if (cpi->ref_frame_flags & VP8_LAST_FLAG)
{
@@ -2319,10 +2319,12 @@
vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO));
- for (i = 0; i < 16; i++)
- {
- vpx_memcpy(&best_bmodes[i], &x->e_mbd.block[i].bmi, sizeof(B_MODE_INFO));
- }
+ if ((this_mode == B_PRED) || (this_mode == SPLITMV))
+ for (i = 0; i < 16; i++)
+ {
+ best_bmodes[i] = x->e_mbd.block[i].bmi;
+ }
+
// Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
@@ -2396,7 +2398,7 @@
if (best_mbmode.mode == B_PRED)
{
for (i = 0; i < 16; i++)
- x->e_mbd.block[i].bmi.mode = best_bmodes[i].mode;
+ x->e_mbd.block[i].bmi.as_mode = best_bmodes[i].as_mode;
}
if (best_mbmode.mode == SPLITMV)