Merge "enable interleaved decoding of mode and mv" into experimental
diff --git a/configure b/configure
index 75b93f4..482657a 100755
--- a/configure
+++ b/configure
@@ -216,15 +216,14 @@
EXPERIMENT_LIST="
csm
featureupdates
- high_precision_mv
sixteenth_subpel_uv
comp_intra_pred
superblocks
pred_filter
lossless
hybridtransform
+ hybridtransform8x8
switchable_interp
- htrans8x8
tx16x16
"
CONFIG_LIST="
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index 64fc065..2932fd4 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -54,7 +54,6 @@
#define PLANE_TYPE_UV 2
#define PLANE_TYPE_Y_WITH_DC 3
-
typedef char ENTROPY_CONTEXT;
typedef struct {
ENTROPY_CONTEXT y1[4];
@@ -179,6 +178,50 @@
B_MODE_COUNT
} B_PREDICTION_MODE;
+#if CONFIG_HYBRIDTRANSFORM8X8
+// convert MB_PREDICTION_MODE to B_PREDICTION_MODE
+static B_PREDICTION_MODE pred_mode_conv(MB_PREDICTION_MODE mode) {
+ B_PREDICTION_MODE b_mode;
+ switch (mode) {
+ case DC_PRED:
+ b_mode = B_DC_PRED;
+ break;
+ case V_PRED:
+ b_mode = B_VE_PRED;
+ break;
+ case H_PRED:
+ b_mode = B_HE_PRED;
+ break;
+ case TM_PRED:
+ b_mode = B_TM_PRED;
+ break;
+ case D45_PRED:
+ b_mode = B_LD_PRED;
+ break;
+ case D135_PRED:
+ b_mode = B_RD_PRED;
+ break;
+ case D117_PRED:
+ b_mode = B_VR_PRED;
+ break;
+ case D153_PRED:
+ b_mode = B_HD_PRED;
+ break;
+ case D27_PRED:
+ b_mode = B_VL_PRED;
+ break;
+ case D63_PRED:
+ b_mode = B_HU_PRED;
+ break;
+ default :
+ // for debug purpose, to be removed after full testing
+ assert(0);
+ break;
+ }
+ return b_mode;
+}
+#endif
+
#define VP8_BINTRAMODES (B_HU_PRED + 1) /* 10 */
#define VP8_SUBMVREFS (1 + NEW4X4 - LEFT4X4)
@@ -361,9 +404,7 @@
vp8_subpix_fn_t subpixel_predict_avg8x4;
vp8_subpix_fn_t subpixel_predict_avg8x8;
vp8_subpix_fn_t subpixel_predict_avg16x16;
-#if CONFIG_HIGH_PRECISION_MV
int allow_high_precision_mv;
-#endif /* CONFIG_HIGH_PRECISION_MV */
void *current_bc;
@@ -389,6 +430,32 @@
} MACROBLOCKD;
+#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM
+// transform mapping
+static void txfm_map(BLOCKD *b, B_PREDICTION_MODE bmode) {
+ switch (bmode) {
+ case B_TM_PRED :
+ case B_RD_PRED :
+ b->bmi.as_mode.tx_type = ADST_ADST;
+ break;
+
+ case B_VE_PRED :
+ case B_VR_PRED :
+ b->bmi.as_mode.tx_type = ADST_DCT;
+ break;
+
+ case B_HE_PRED :
+ case B_HD_PRED :
+ case B_HU_PRED :
+ b->bmi.as_mode.tx_type = DCT_ADST;
+ break;
+
+ default :
+ b->bmi.as_mode.tx_type = DCT_DCT;
+ break;
+ }
+}
+#endif
extern void vp8_build_block_doffsets(MACROBLOCKD *x);
extern void vp8_setup_block_dptrs(MACROBLOCKD *x);
diff --git a/vp8/common/default_coef_probs.h b/vp8/common/default_coef_probs.h
index dfb0e5e..940e971 100644
--- a/vp8/common/default_coef_probs.h
+++ b/vp8/common/default_coef_probs.h
@@ -434,7 +434,7 @@
{ 6, 117, 180, 254, 199, 216, 255, 251, 128, 128, 128}
}
}
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
,
{ /* block Type 3 */
{ /* Coeff Band 0 */
diff --git a/vp8/common/entropy.h b/vp8/common/entropy.h
index 4497a3d..a435448 100644
--- a/vp8/common/entropy.h
+++ b/vp8/common/entropy.h
@@ -60,9 +60,9 @@
/* Coefficients are predicted via a 3-dimensional probability table. */
/* Outside dimension. 0 = Y no DC, 1 = Y2, 2 = UV, 3 = Y with DC */
-
#define BLOCK_TYPES 4
-#if CONFIG_HTRANS8X8
+
+#if CONFIG_HYBRIDTRANSFORM8X8
#define BLOCK_TYPES_8X8 4
#else
#define BLOCK_TYPES_8X8 3
@@ -100,7 +100,7 @@
#define SUBEXP_PARAM 4 /* Subexponential code parameter */
#define MODULUS_PARAM 13 /* Modulus parameter */
-#define COEFUPDATETYPE 1 /* coef update type to use (1/2/3) */
+#define COEFUPDATETYPE 1 /* coef update type to use (1/2) */
extern DECLARE_ALIGNED(16, const unsigned char, vp8_prev_token_class[MAX_ENTROPY_TOKENS]);
diff --git a/vp8/common/entropymv.c b/vp8/common/entropymv.c
index c1ea62d..e04922f 100644
--- a/vp8/common/entropymv.c
+++ b/vp8/common/entropymv.c
@@ -12,7 +12,6 @@
#include "onyxc_int.h"
#include "entropymv.h"
-#if CONFIG_HIGH_PRECISION_MV
const MV_CONTEXT_HP vp8_mv_update_probs_hp[2] = {
{{
237,
@@ -47,7 +46,6 @@
}
}
};
-#endif /* CONFIG_HIGH_PRECISION_MV */
const MV_CONTEXT vp8_mv_update_probs[2] = {
{{
@@ -84,7 +82,6 @@
}
};
-#if CONFIG_HIGH_PRECISION_MV
const vp8_tree_index vp8_small_mvtree_hp [30] = {
2, 16,
4, 10,
@@ -103,7 +100,6 @@
-14, -15
};
struct vp8_token_struct vp8_small_mvencodings_hp [16];
-#endif /* CONFIG_HIGH_PRECISION_MV */
const vp8_tree_index vp8_small_mvtree [14] = {
2, 8,
@@ -196,7 +192,6 @@
}
}
-#if CONFIG_HIGH_PRECISION_MV
static void compute_component_probs_hp(
const unsigned int events [MVvals_hp],
vp8_prob Pnew [MVPcount_hp],
@@ -268,13 +263,10 @@
while (++j < mvlong_width_hp);
}
}
-#endif /* CONFIG_HIGH_PRECISION_MV */
void vp8_entropy_mv_init() {
vp8_tokens_from_tree(vp8_small_mvencodings, vp8_small_mvtree);
-#if CONFIG_HIGH_PRECISION_MV
vp8_tokens_from_tree(vp8_small_mvencodings_hp, vp8_small_mvtree_hp);
-#endif
}
// #define MV_COUNT_TESTING
@@ -293,7 +285,6 @@
printf("},\n");
}
printf("};\n");
-#if CONFIG_HIGH_PRECISION_MV
printf("static const unsigned int\nMVcount_hp[2][MVvals_hp]={\n");
for (i = 0; i < 2; ++i) {
printf(" { ");
@@ -304,7 +295,6 @@
printf("},\n");
}
printf("};\n");
-#endif
#endif /* MV_COUNT_TESTING */
for (i = 0; i < 2; ++i) {
@@ -357,7 +347,6 @@
else cm->fc.mvc[i].prob[MVPbits + t] = prob;
}
}
-#if CONFIG_HIGH_PRECISION_MV
for (i = 0; i < 2; ++i) {
int prob;
unsigned int is_short_ct[2];
@@ -408,5 +397,4 @@
else cm->fc.mvc_hp[i].prob[MVPbits_hp + t] = prob;
}
}
-#endif
}
diff --git a/vp8/common/entropymv.h b/vp8/common/entropymv.h
index 9141c2b..535d9b8 100644
--- a/vp8/common/entropymv.h
+++ b/vp8/common/entropymv.h
@@ -42,7 +42,6 @@
extern const MV_CONTEXT vp8_mv_update_probs[2], vp8_default_mv_context[2];
-#if CONFIG_HIGH_PRECISION_MV
enum {
mv_max_hp = 2047, /* max absolute value of a MV component */
MVvals_hp = (2 * mv_max_hp) + 1, /* # possible values "" */
@@ -69,14 +68,10 @@
extern const MV_CONTEXT_HP vp8_mv_update_probs_hp[2], vp8_default_mv_context_hp[2];
-#endif /* CONFIG_HIGH_PRECISION_MV */
-
extern const vp8_tree_index vp8_small_mvtree[];
extern struct vp8_token_struct vp8_small_mvencodings [8];
-#if CONFIG_HIGH_PRECISION_MV
extern const vp8_tree_index vp8_small_mvtree_hp[];
extern struct vp8_token_struct vp8_small_mvencodings_hp [16];
-#endif
void vp8_entropy_mv_init();
struct VP8Common;
diff --git a/vp8/common/findnearmv.c b/vp8/common/findnearmv.c
index 8a67162..d35e2c4 100644
--- a/vp8/common/findnearmv.c
+++ b/vp8/common/findnearmv.c
@@ -133,7 +133,6 @@
/* Make sure that the 1/8th bits of the Mvs are zero if high_precision
* is not being used, by truncating the last bit towards 0
*/
-#if CONFIG_HIGH_PRECISION_MV
if (!xd->allow_high_precision_mv) {
if (best_mv->as_mv.row & 1)
best_mv->as_mv.row += (best_mv->as_mv.row > 0 ? -1 : 1);
@@ -148,7 +147,6 @@
if (nearby->as_mv.col & 1)
nearby->as_mv.col += (nearby->as_mv.col > 0 ? -1 : 1);
}
-#endif
// TODO: move clamp outside findnearmv
vp8_clamp_mv2(nearest, xd);
diff --git a/vp8/common/idctllm.c b/vp8/common/idctllm.c
index e549fe0..616e493 100644
--- a/vp8/common/idctllm.c
+++ b/vp8/common/idctllm.c
@@ -35,6 +35,8 @@
static const int sinpi8sqrt2 = 35468;
static const int rounding = 0;
+// TODO: these transforms can be further converted into integer forms
+// for complexity optimization
#if CONFIG_HYBRIDTRANSFORM
float idct_4[16] = {
0.500000000000000, 0.653281482438188, 0.500000000000000, 0.270598050073099,
@@ -51,11 +53,52 @@
};
#endif
+#if CONFIG_HYBRIDTRANSFORM8X8
+float idct_8[64] = {
+ 0.353553390593274, 0.490392640201615, 0.461939766255643, 0.415734806151273,
+ 0.353553390593274, 0.277785116509801, 0.191341716182545, 0.097545161008064,
+ 0.353553390593274, 0.415734806151273, 0.191341716182545, -0.097545161008064,
+ -0.353553390593274, -0.490392640201615, -0.461939766255643, -0.277785116509801,
+ 0.353553390593274, 0.277785116509801, -0.191341716182545, -0.490392640201615,
+ -0.353553390593274, 0.097545161008064, 0.461939766255643, 0.415734806151273,
+ 0.353553390593274, 0.097545161008064, -0.461939766255643, -0.277785116509801,
+ 0.353553390593274, 0.415734806151273, -0.191341716182545, -0.490392640201615,
+ 0.353553390593274, -0.097545161008064, -0.461939766255643, 0.277785116509801,
+ 0.353553390593274, -0.415734806151273, -0.191341716182545, 0.490392640201615,
+ 0.353553390593274, -0.277785116509801, -0.191341716182545, 0.490392640201615,
+ -0.353553390593274, -0.097545161008064, 0.461939766255643, -0.415734806151273,
+ 0.353553390593274, -0.415734806151273, 0.191341716182545, 0.097545161008064,
+ -0.353553390593274, 0.490392640201615, -0.461939766255643, 0.277785116509801,
+ 0.353553390593274, -0.490392640201615, 0.461939766255643, -0.415734806151273,
+ 0.353553390593274, -0.277785116509801, 0.191341716182545, -0.097545161008064
+};
+
+float iadst_8[64] = {
+ 0.089131608307533, 0.255357107325376, 0.387095214016349, 0.466553967085785,
+ 0.483002021635509, 0.434217976756762, 0.326790388032145, 0.175227946595735,
+ 0.175227946595735, 0.434217976756762, 0.466553967085785, 0.255357107325376,
+ -0.089131608307533, -0.387095214016348, -0.483002021635509, -0.326790388032145,
+ 0.255357107325376, 0.483002021635509, 0.175227946595735, -0.326790388032145,
+ -0.466553967085785, -0.089131608307533, 0.387095214016349, 0.434217976756762,
+ 0.326790388032145, 0.387095214016349, -0.255357107325376, -0.434217976756762,
+ 0.175227946595735, 0.466553967085786, -0.089131608307534, -0.483002021635509,
+ 0.387095214016349, 0.175227946595735, -0.483002021635509, 0.089131608307533,
+ 0.434217976756762, -0.326790388032145, -0.255357107325377, 0.466553967085785,
+ 0.434217976756762, -0.089131608307533, -0.326790388032145, 0.483002021635509,
+ -0.255357107325376, -0.175227946595735, 0.466553967085785, -0.387095214016348,
+ 0.466553967085785, -0.326790388032145, 0.089131608307533, 0.175227946595735,
+ -0.387095214016348, 0.483002021635509, -0.434217976756762, 0.255357107325376,
+ 0.483002021635509, -0.466553967085785, 0.434217976756762, -0.387095214016348,
+ 0.326790388032145, -0.255357107325375, 0.175227946595736, -0.089131608307532
+};
+#endif
+
#if CONFIG_HYBRIDTRANSFORM
void vp8_iht4x4llm_c(short *input, short *output, int pitch, TX_TYPE tx_type) {
int i, j, k;
float bufa[16], bufb[16]; // buffers are for floating-point test purpose
- // the implementation could be simplified in conjunction with integer transform
+ // the implementation could be simplified in
+ // conjunction with integer transform
short *ip = input;
short *op = output;
int shortpitch = pitch >> 1;
@@ -158,6 +201,113 @@
}
#endif
+#if CONFIG_HYBRIDTRANSFORM8X8
+void vp8_iht8x8llm_c(short *input, short *output, int pitch, TX_TYPE tx_type) {
+ int i, j, k;
+ float bufa[64], bufb[64]; // buffers are for floating-point test purpose
+ // the implementation could be simplified in
+ // conjunction with integer transform
+ short *ip = input;
+ short *op = output;
+ int shortpitch = pitch >> 1;
+
+ float *pfa = &bufa[0];
+ float *pfb = &bufb[0];
+
+ // pointers to vertical and horizontal transforms
+ float *ptv, *pth;
+
+ // load and convert residual array into floating-point
+ for(j = 0; j < 8; j++) {
+ for(i = 0; i < 8; i++) {
+ pfa[i] = (float)ip[i];
+ }
+ pfa += 8;
+ ip += 8;
+ }
+
+ // vertical transformation
+ pfa = &bufa[0];
+ pfb = &bufb[0];
+
+ switch(tx_type) {
+ case ADST_ADST :
+ case ADST_DCT :
+ ptv = &iadst_8[0];
+ break;
+
+ default :
+ ptv = &idct_8[0];
+ break;
+ }
+
+ for(j = 0; j < 8; j++) {
+ for(i = 0; i < 8; i++) {
+ pfb[i] = 0 ;
+ for(k = 0; k < 8; k++) {
+ pfb[i] += ptv[k] * pfa[(k<<3)];
+ }
+ pfa += 1;
+ }
+
+ pfb += 8;
+ ptv += 8;
+ pfa = &bufa[0];
+ }
+
+ // horizontal transformation
+ pfa = &bufa[0];
+ pfb = &bufb[0];
+
+ switch(tx_type) {
+ case ADST_ADST :
+ case DCT_ADST :
+ pth = &iadst_8[0];
+ break;
+
+ default :
+ pth = &idct_8[0];
+ break;
+ }
+
+ for(j = 0; j < 8; j++) {
+ for(i = 0; i < 8; i++) {
+ pfa[i] = 0;
+ for(k = 0; k < 8; k++) {
+ pfa[i] += pfb[k] * pth[k];
+ }
+ pth += 8;
+ }
+
+ pfa += 8;
+ pfb += 8;
+
+ switch(tx_type) {
+ case ADST_ADST :
+ case DCT_ADST :
+ pth = &iadst_8[0];
+ break;
+
+ default :
+ pth = &idct_8[0];
+ break;
+ }
+ }
+
+ // convert to short integer format and load BLOCKD buffer
+ op = output;
+ pfa = &bufa[0];
+
+ for(j = 0; j < 8; j++) {
+ for(i = 0; i < 8; i++) {
+ op[i] = (pfa[i] > 0 ) ? (short)( pfa[i] / 8 + 0.49) :
+ -(short)( - pfa[i] / 8 + 0.49);
+ }
+ op += shortpitch;
+ pfa += 8;
+ }
+}
+#endif
void vp8_short_idct4x4llm_c(short *input, short *output, int pitch) {
int i;
diff --git a/vp8/common/onyxc_int.h b/vp8/common/onyxc_int.h
index 89d437b..a36347d 100644
--- a/vp8/common/onyxc_int.h
+++ b/vp8/common/onyxc_int.h
@@ -56,13 +56,9 @@
vp8_prob coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
MV_CONTEXT mvc[2];
-#if CONFIG_HIGH_PRECISION_MV
MV_CONTEXT_HP mvc_hp[2];
-#endif
MV_CONTEXT pre_mvc[2];
-#if CONFIG_HIGH_PRECISION_MV
MV_CONTEXT_HP pre_mvc_hp[2];
-#endif
vp8_prob pre_bmode_prob [VP8_BINTRAMODES - 1];
vp8_prob pre_ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */
vp8_prob pre_uv_mode_prob [VP8_YMODES][VP8_UV_MODES - 1];
@@ -93,9 +89,7 @@
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
unsigned int MVcount [2] [MVvals];
-#if CONFIG_HIGH_PRECISION_MV
unsigned int MVcount_hp [2] [MVvals_hp];
-#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_prob switchable_interp_prob[VP8_SWITCHABLE_FILTERS+1]
[VP8_SWITCHABLE_FILTERS-1];
diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c
index 0093044..5a11f64 100644
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -224,7 +224,6 @@
} while (++i < 2);
}
-#if CONFIG_HIGH_PRECISION_MV
static int read_mvcomponent_hp(vp8_reader *r, const MV_CONTEXT_HP *mvc) {
const vp8_prob *const p = (const vp8_prob *) mvc;
int x = 0;
@@ -281,7 +280,6 @@
} while (++p < pstop);
} while (++i < 2);
}
-#endif /* CONFIG_HIGH_PRECISION_MV */
// Read the referncence frame
static MV_REFERENCE_FRAME read_ref_frame(VP8D_COMP *pbi,
@@ -447,10 +445,8 @@
VP8_COMMON *const cm = & pbi->common;
vp8_reader *const bc = & pbi->bc;
MV_CONTEXT *const mvc = pbi->common.fc.mvc;
-#if CONFIG_HIGH_PRECISION_MV
MV_CONTEXT_HP *const mvc_hp = pbi->common.fc.mvc_hp;
MACROBLOCKD *const xd = & pbi->mb;
-#endif
vpx_memset(cm->mbskip_pred_probs, 0, sizeof(cm->mbskip_pred_probs));
if (pbi->common.mb_no_coeff_skip) {
@@ -495,11 +491,9 @@
cm->fc.ymode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8);
} while (++i < VP8_YMODES - 1);
}
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv)
read_mvcontexts_hp(bc, mvc_hp);
else
-#endif
read_mvcontexts(bc, mvc);
}
}
@@ -563,9 +557,7 @@
VP8_COMMON *const cm = & pbi->common;
vp8_reader *const bc = & pbi->bc;
MV_CONTEXT *const mvc = pbi->common.fc.mvc;
-#if CONFIG_HIGH_PRECISION_MV
MV_CONTEXT_HP *const mvc_hp = pbi->common.fc.mvc_hp;
-#endif
const int mis = pbi->common.mode_info_stride;
MACROBLOCKD *const xd = & pbi->mb;
@@ -720,14 +712,11 @@
switch (blockmode) {
case NEW4X4:
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &blockmv.as_mv, (const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (blockmv.as_mv.row)]++;
cm->fc.MVcount_hp[1][mv_max_hp + (blockmv.as_mv.col)]++;
- } else
-#endif
- {
+ } else {
read_mv(bc, &blockmv.as_mv, (const MV_CONTEXT *) mvc);
cm->fc.MVcount[0][mv_max + (blockmv.as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (blockmv.as_mv.col >> 1)]++;
@@ -736,14 +725,11 @@
blockmv.as_mv.col += best_mv.as_mv.col;
if (mbmi->second_ref_frame) {
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &secondmv.as_mv, (const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (secondmv.as_mv.row)]++;
cm->fc.MVcount_hp[1][mv_max_hp + (secondmv.as_mv.col)]++;
- } else
-#endif
- {
+ } else {
read_mv(bc, &secondmv.as_mv, (const MV_CONTEXT *) mvc);
cm->fc.MVcount[0][mv_max + (secondmv.as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (secondmv.as_mv.col >> 1)]++;
@@ -852,14 +838,11 @@
break;
case NEWMV:
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &mv->as_mv, (const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (mv->as_mv.row)]++;
cm->fc.MVcount_hp[1][mv_max_hp + (mv->as_mv.col)]++;
- } else
-#endif
- {
+ } else {
read_mv(bc, &mv->as_mv, (const MV_CONTEXT *) mvc);
cm->fc.MVcount[0][mv_max + (mv->as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (mv->as_mv.col >> 1)]++;
@@ -878,15 +861,12 @@
mb_to_top_edge,
mb_to_bottom_edge);
if (mbmi->second_ref_frame) {
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &mbmi->second_mv.as_mv,
(const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (mbmi->second_mv.as_mv.row)]++;
cm->fc.MVcount_hp[1][mv_max_hp + (mbmi->second_mv.as_mv.col)]++;
- } else
-#endif
- {
+ } else {
read_mv(bc, &mbmi->second_mv.as_mv, (const MV_CONTEXT *) mvc);
cm->fc.MVcount[0][mv_max + (mbmi->second_mv.as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (mbmi->second_mv.as_mv.col >> 1)]++;
diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c
index ebc2719..4755842 100644
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -46,7 +46,6 @@
#define COEFCOUNT_TESTING
-
static int merge_index(int v, int n, int modulus) {
int max1 = (n - 1 - modulus / 2) / modulus + 1;
if (v < max1) v = v * modulus + modulus / 2;
@@ -260,7 +259,7 @@
}
}
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
}
@@ -336,29 +335,8 @@
for (i = 0; i < 16; i++) {
BLOCKD *b = &xd->block[i];
int b_mode = xd->mode_info_context->bmi[i].as_mode.first;
- if(active_ht) {
- switch(b_mode) {
- case B_TM_PRED :
- case B_RD_PRED :
- b->bmi.as_mode.tx_type = ADST_ADST;
- break;
-
- case B_VE_PRED :
- case B_VR_PRED :
- b->bmi.as_mode.tx_type = ADST_DCT;
- break ;
-
- case B_HE_PRED :
- case B_HD_PRED :
- case B_HU_PRED :
- b->bmi.as_mode.tx_type = DCT_ADST;
- break;
-
- default :
- b->bmi.as_mode.tx_type = DCT_DCT;
- break;
- }
- }
+ if(active_ht)
+ txfm_map(b, b_mode);
} // loop over 4x4 blocks
}
#endif
@@ -392,7 +370,7 @@
int i8x8mode;
BLOCKD *b;
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
int idx = (ib & 0x02) ? (ib + 2) : ib;
short *q = xd->block[idx].qcoeff;
@@ -410,8 +388,11 @@
RECON_INVOKE(RTCD_VTABLE(recon), intra8x8_predict)
(b, i8x8mode, b->predictor);
-#if CONFIG_HTRANS8X8
- vp8_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride);
+#if CONFIG_HYBRIDTRANSFORM8X8
+ txfm_map(b, pred_mode_conv(i8x8mode));
+ vp8_ht_dequant_idct_add_8x8_c(b->bmi.as_mode.tx_type,
+ q, dq, pre, dst, 16, stride);
+ // vp8_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride);
q += 64;
#else
for (j = 0; j < 4; j++) {
@@ -777,10 +758,8 @@
if (pc->frame_type == KEY_FRAME) {
/* Various keyframe initializations */
vpx_memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
-#if CONFIG_HIGH_PRECISION_MV
vpx_memcpy(pc->fc.mvc_hp, vp8_default_mv_context_hp,
sizeof(vp8_default_mv_context_hp));
-#endif
vp8_init_mbmode_probs(pc);
@@ -841,49 +820,6 @@
}
-static void read_coef_probs3(VP8D_COMP *pbi) {
- const vp8_prob grpupd = 216;
- int i, j, k, l;
- vp8_reader *const bc = & pbi->bc;
- VP8_COMMON *const pc = & pbi->common;
- for (i = 0; i < BLOCK_TYPES; i++)
- for (l = 0; l < ENTROPY_NODES; l++) {
- if (vp8_read(bc, grpupd)) {
- // printf("Decoding %d\n", l);
- for (j = !i; j < COEF_BANDS; j++)
- for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
- if (k >= 3 && ((i == 0 && j == 1) ||
- (i > 0 && j == 0)))
- continue;
- {
- vp8_prob *const p = pc->fc.coef_probs [i][j][k] + l;
- int u = vp8_read(bc, COEF_UPDATE_PROB);
- if (u) *p = read_prob_diff_update(bc, *p);
- }
- }
- }
- }
-
- if (pbi->common.txfm_mode == ALLOW_8X8) {
- for (i = 0; i < BLOCK_TYPES_8X8; i++)
- for (l = 0; l < ENTROPY_NODES; l++) {
- if (vp8_read(bc, grpupd)) {
- for (j = !i; j < COEF_BANDS; j++)
- for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
- if (k >= 3 && ((i == 0 && j == 1) ||
- (i > 0 && j == 0)))
- continue;
- {
- vp8_prob *const p = pc->fc.coef_probs_8x8 [i][j][k] + l;
- int u = vp8_read(bc, COEF_UPDATE_PROB_8X8);
- if (u) *p = read_prob_diff_update(bc, *p);
- }
- }
- }
- }
- }
-}
-
static void read_coef_probs2(VP8D_COMP *pbi) {
const vp8_prob grpupd = 192;
int i, j, k, l;
@@ -1314,10 +1250,8 @@
pc->ref_frame_sign_bias[GOLDEN_FRAME] = vp8_read_bit(bc);
pc->ref_frame_sign_bias[ALTREF_FRAME] = vp8_read_bit(bc);
-#if CONFIG_HIGH_PRECISION_MV
/* Is high precision mv allowed */
xd->allow_high_precision_mv = (unsigned char)vp8_read_bit(bc);
-#endif
// Read the type of subpel filter to use
#if CONFIG_SWITCHABLE_INTERP
if (vp8_read_bit(bc)) {
@@ -1362,9 +1296,7 @@
vp8_copy(pbi->common.fc.pre_sub_mv_ref_prob, pbi->common.fc.sub_mv_ref_prob);
vp8_copy(pbi->common.fc.pre_mbsplit_prob, pbi->common.fc.mbsplit_prob);
vp8_copy(pbi->common.fc.pre_mvc, pbi->common.fc.mvc);
-#if CONFIG_HIGH_PRECISION_MV
vp8_copy(pbi->common.fc.pre_mvc_hp, pbi->common.fc.mvc_hp);
-#endif
vp8_zero(pbi->common.fc.coef_counts);
vp8_zero(pbi->common.fc.coef_counts_8x8);
#if CONFIG_TX16X16
@@ -1377,15 +1309,11 @@
vp8_zero(pbi->common.fc.sub_mv_ref_counts);
vp8_zero(pbi->common.fc.mbsplit_counts);
vp8_zero(pbi->common.fc.MVcount);
-#if CONFIG_HIGH_PRECISION_MV
vp8_zero(pbi->common.fc.MVcount_hp);
-#endif
vp8_zero(pbi->common.fc.mv_ref_ct);
vp8_zero(pbi->common.fc.mv_ref_ct_a);
#if COEFUPDATETYPE == 2
read_coef_probs2(pbi);
-#elif COEFUPDATETYPE == 3
- read_coef_probs3(pbi);
#else
read_coef_probs(pbi);
#endif
diff --git a/vp8/decoder/dequantize.c b/vp8/decoder/dequantize.c
index 6554091..bf44fd6 100644
--- a/vp8/decoder/dequantize.c
+++ b/vp8/decoder/dequantize.c
@@ -79,6 +79,51 @@
}
#endif
+#if CONFIG_HYBRIDTRANSFORM8X8
+void vp8_ht_dequant_idct_add_8x8_c(TX_TYPE tx_type, short *input, short *dq,
+ unsigned char *pred, unsigned char *dest,
+ int pitch, int stride) {
+ short output[64];
+ short *diff_ptr = output;
+ int b, r, c;
+ int i;
+ unsigned char *origdest = dest;
+ unsigned char *origpred = pred;
+
+ input[0] = dq[0] * input[0];
+ for (i = 1; i < 64; i++) {
+ input[i] = dq[1] * input[i];
+ }
+
+ vp8_iht8x8llm_c(input, output, 16, tx_type);
+
+ vpx_memset(input, 0, 128);
+
+ for (b = 0; b < 4; b++) {
+ for (r = 0; r < 4; r++) {
+ for (c = 0; c < 4; c++) {
+ int a = diff_ptr[c] + pred[c];
+
+ if (a < 0)
+ a = 0;
+
+ if (a > 255)
+ a = 255;
+
+ dest[c] = (unsigned char) a;
+ }
+
+ dest += stride;
+ diff_ptr += 8;
+ pred += pitch;
+ }
+ diff_ptr = output + (b + 1) / 2 * 4 * 8 + (b + 1) % 2 * 4;
+ dest = origdest + (b + 1) / 2 * 4 * stride + (b + 1) % 2 * 4;
+ pred = origpred + (b + 1) / 2 * 4 * pitch + (b + 1) % 2 * 4;
+ }
+}
+#endif
+
void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *pred,
unsigned char *dest, int pitch, int stride) {
short output[16];
diff --git a/vp8/decoder/detokenize.c b/vp8/decoder/detokenize.c
index c93b8e9..5f9768d 100644
--- a/vp8/decoder/detokenize.c
+++ b/vp8/decoder/detokenize.c
@@ -473,7 +473,7 @@
const int seg_active = segfeature_active(xd, segment_id, SEG_LVL_EOB);
INT16 *qcoeff_ptr = &xd->qcoeff[0];
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
int bufthred = (xd->mode_info_context->mbmi.mode == I8X8_PRED) ? 16 : 24;
if (xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV &&
@@ -506,7 +506,7 @@
else
seg_eob = 64;
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
for (i = 0; i < bufthred ; i += 4) {
#else
for (i = 0; i < 24; i += 4) {
@@ -528,7 +528,7 @@
qcoeff_ptr += 64;
}
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
type = PLANE_TYPE_UV;
seg_eob = 16;
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 3305ca5..1896145 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -556,7 +556,6 @@
vp8_encode_motion_vector(w, &e, mvc);
}
-#if CONFIG_HIGH_PRECISION_MV
static void write_mv_hp
(
vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT_HP *mvc
@@ -567,7 +566,6 @@
vp8_encode_motion_vector_hp(w, &e, mvc);
}
-#endif
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
@@ -721,9 +719,7 @@
VP8_COMMON *const pc = & cpi->common;
vp8_writer *const w = & cpi->bc;
const MV_CONTEXT *mvc = pc->fc.mvc;
-#if CONFIG_HIGH_PRECISION_MV
const MV_CONTEXT_HP *mvc_hp = pc->fc.mvc_hp;
-#endif
MACROBLOCKD *xd = &cpi->mb.e_mbd;
MODE_INFO *m;
MODE_INFO *prev_m;
@@ -799,11 +795,9 @@
update_mbintra_mode_probs(cpi);
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv)
vp8_write_mvprobs_hp(cpi);
else
-#endif
vp8_write_mvprobs(cpi);
mb_row = 0;
@@ -1007,23 +1001,17 @@
active_section = 5;
#endif
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
write_mv_hp(w, &mi->mv.as_mv, &best_mv, mvc_hp);
- } else
-#endif
- {
+ } else {
write_mv(w, &mi->mv.as_mv, &best_mv, mvc);
}
if (mi->second_ref_frame) {
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
write_mv_hp(w, &mi->second_mv.as_mv,
&best_second_mv, mvc_hp);
- } else
-#endif
- {
+ } else {
write_mv(w, &mi->second_mv.as_mv,
&best_second_mv, mvc);
}
@@ -1068,25 +1056,19 @@
#ifdef ENTROPY_STATS
active_section = 11;
#endif
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
write_mv_hp(w, &blockmv.as_mv, &best_mv,
(const MV_CONTEXT_HP *) mvc_hp);
- } else
-#endif
- {
+ } else {
write_mv(w, &blockmv.as_mv, &best_mv,
(const MV_CONTEXT *) mvc);
}
if (mi->second_ref_frame) {
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
write_mv_hp(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
&best_second_mv, (const MV_CONTEXT_HP *) mvc_hp);
- } else
-#endif
- {
+ } else {
write_mv(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
&best_second_mv, (const MV_CONTEXT *) mvc);
}
@@ -1365,177 +1347,6 @@
#endif
}
-static void update_coef_probs3(VP8_COMP *cpi) {
- const vp8_prob grpupd = 216;
- int i, j, k, t;
- vp8_writer *const w = & cpi->bc;
- int update[2];
- int savings;
- int bestupdndx[2 * ENTROPY_NODES];
-
- vp8_clear_system_state(); // __asm emms;
- // Build the cofficient contexts based on counts collected in encode loop
- build_coeff_contexts(cpi);
-
- i = 0;
- for (i = 0; i < BLOCK_TYPES; ++i) {
- for (t = 0; t < ENTROPY_NODES; ++t) {
- /* dry run to see if there is any udpate at all needed */
- savings = 0;
- update[0] = update[1] = 0;
- for (j = !i; j < COEF_BANDS; ++j) {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
- vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB;
- int s;
- int u = 0;
- if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
-
-#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
- if (s > 0 && newp != *Pold) u = 1;
- if (u)
- savings += s - (int)(vp8_cost_zero(upd));
- else
- savings -= (int)(vp8_cost_zero(upd));
-#else
- s = prob_update_savings(
- cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
- if (s > 0) u = 1;
- if (u)
- savings += s;
-#endif
- // printf(" %d %d %d: %d\n", i, j, k, u);
- update[u]++;
- }
- }
- if (update[1] == 0 || savings < 0) {
- vp8_write(w, 0, grpupd);
- continue;
- }
- vp8_write(w, 1, grpupd);
- for (j = !i; j < COEF_BANDS; ++j) {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
- vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB;
- int s;
- int u = 0;
-
- if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
-#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
- if (s > 0 && newp != *Pold) u = 1;
-#else
- s = prob_update_savings(
- cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
- if (s > 0) u = 1;
-#endif
- // printf(" %d %d %d: %d (%d)\n", i, j, k, u, upd);
- vp8_write(w, u, upd);
-#ifdef ENTROPY_STATS
- if (!cpi->dummy_packing)
- ++ tree_update_hist [i][j][k][t] [u];
-#endif
- if (u) {
- /* send/use new probability */
- write_prob_diff_update(w, newp, *Pold);
- *Pold = newp;
- }
-
- }
- }
- }
- }
-
- if (cpi->common.txfm_mode != ALLOW_8X8) return;
-
- for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
- for (t = 0; t < ENTROPY_NODES; ++t) {
- /* dry run to see if there is any udpate at all needed */
- savings = 0;
- update[0] = update[1] = 0;
- for (j = !i; j < COEF_BANDS; ++j) {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
- vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB_8X8;
- int s;
- int u = 0;
-
- if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
-#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, &newp, upd);
- if (s > 0 && newp != *Pold)
- u = 1;
- if (u)
- savings += s - (int)(vp8_cost_zero(upd));
- else
- savings -= (int)(vp8_cost_zero(upd));
-#else
- s = prob_update_savings(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, newp, upd);
- if (s > 0)
- u = 1;
- if (u)
- savings += s;
-#endif
- update[u]++;
- }
- }
- if (update[1] == 0 || savings < 0) {
- vp8_write(w, 0, grpupd);
- continue;
- }
- vp8_write(w, 1, grpupd);
- for (j = !i; j < COEF_BANDS; ++j) {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
- vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB_8X8;
- int s;
- int u = 0;
-
- if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
-#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, &newp, upd);
- if (s > 0 && newp != *Pold)
- u = 1;
-#else
- s = prob_update_savings(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, newp, upd);
- if (s > 0)
- u = 1;
-#endif
- vp8_write(w, u, upd);
-#ifdef ENTROPY_STATS
- if (!cpi->dummy_packing)
- ++ tree_update_hist_8x8 [i][j][k][t] [u];
-#endif
- if (u) {
- /* send/use new probability */
- write_prob_diff_update(w, newp, *Pold);
- *Pold = newp;
- }
- }
- }
- }
- }
-}
-
static void update_coef_probs2(VP8_COMP *cpi) {
const vp8_prob grpupd = 192;
int i, j, k, t;
@@ -2334,10 +2145,8 @@
vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
-#if CONFIG_HIGH_PRECISION_MV
// Signal whether to allow high MV precision
vp8_write_bit(bc, (xd->allow_high_precision_mv) ? 1 : 0);
-#endif
#if CONFIG_SWITCHABLE_INTERP
if (pc->mcomp_filter_type == SWITCHABLE) {
/* Check to see if only one of the filters is actually used */
@@ -2393,17 +2202,13 @@
vp8_copy(cpi->common.fc.pre_mbsplit_prob, cpi->common.fc.mbsplit_prob);
vp8_copy(cpi->common.fc.pre_i8x8_mode_prob, cpi->common.fc.i8x8_mode_prob);
vp8_copy(cpi->common.fc.pre_mvc, cpi->common.fc.mvc);
-#if CONFIG_HIGH_PRECISION_MV
vp8_copy(cpi->common.fc.pre_mvc_hp, cpi->common.fc.mvc_hp);
-#endif
vp8_zero(cpi->sub_mv_ref_count);
vp8_zero(cpi->mbsplit_count);
vp8_zero(cpi->common.fc.mv_ref_ct)
vp8_zero(cpi->common.fc.mv_ref_ct_a)
#if COEFUPDATETYPE == 2
update_coef_probs2(cpi);
-#elif COEFUPDATETYPE == 3
- update_coef_probs3(cpi);
#else
update_coef_probs(cpi);
#endif
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index 0019d5e..2c1e29c 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -119,12 +119,11 @@
int *mvcost[2];
int mvsadcosts[2][MVfpvals + 1];
int *mvsadcost[2];
-#if CONFIG_HIGH_PRECISION_MV
int mvcosts_hp[2][MVvals_hp + 1];
int *mvcost_hp[2];
int mvsadcosts_hp[2][MVfpvals_hp + 1];
int *mvsadcost_hp[2];
-#endif
+
int mbmode_cost[2][MB_MODE_COUNT];
int intra_uv_mode_cost[2][MB_MODE_COUNT];
int bmode_costs[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES];
@@ -156,9 +155,7 @@
unsigned char *active_ptr;
MV_CONTEXT *mvc;
-#if CONFIG_HIGH_PRECISION_MV
MV_CONTEXT_HP *mvc_hp;
-#endif
unsigned int token_costs[BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
diff --git a/vp8/encoder/dct.c b/vp8/encoder/dct.c
index ba2a692..ad52585 100644
--- a/vp8/encoder/dct.c
+++ b/vp8/encoder/dct.c
@@ -17,6 +17,8 @@
#include "vp8/common/blockd.h"
+// TODO: these transforms can be converted into integer forms to reduce
+// the complexity
float dct_4[16] = {
0.500000000000000, 0.500000000000000, 0.500000000000000, 0.500000000000000,
0.653281482438188, 0.270598050073099, -0.270598050073099, -0.653281482438188,
@@ -32,6 +34,45 @@
};
#endif
+#if CONFIG_HYBRIDTRANSFORM8X8
+float dct_8[64] = {
+ 0.353553390593274, 0.353553390593274, 0.353553390593274, 0.353553390593274,
+ 0.353553390593274, 0.353553390593274, 0.353553390593274, 0.353553390593274,
+ 0.490392640201615, 0.415734806151273, 0.277785116509801, 0.097545161008064,
+ -0.097545161008064, -0.277785116509801, -0.415734806151273, -0.490392640201615,
+ 0.461939766255643, 0.191341716182545, -0.191341716182545, -0.461939766255643,
+ -0.461939766255643, -0.191341716182545, 0.191341716182545, 0.461939766255643,
+ 0.415734806151273, -0.097545161008064, -0.490392640201615, -0.277785116509801,
+ 0.277785116509801, 0.490392640201615, 0.097545161008064, -0.415734806151273,
+ 0.353553390593274, -0.353553390593274, -0.353553390593274, 0.353553390593274,
+ 0.353553390593274, -0.353553390593274, -0.353553390593274, 0.353553390593274,
+ 0.277785116509801, -0.490392640201615, 0.097545161008064, 0.415734806151273,
+ -0.415734806151273, -0.097545161008064, 0.490392640201615, -0.277785116509801,
+ 0.191341716182545, -0.461939766255643, 0.461939766255643, -0.191341716182545,
+ -0.191341716182545, 0.461939766255643, -0.461939766255643, 0.191341716182545,
+ 0.097545161008064, -0.277785116509801, 0.415734806151273, -0.490392640201615,
+ 0.490392640201615, -0.415734806151273, 0.277785116509801, -0.097545161008064
+};
+
+float adst_8[64] = {
+ 0.089131608307533, 0.175227946595735, 0.255357107325376, 0.326790388032145,
+ 0.387095214016349, 0.434217976756762, 0.466553967085785, 0.483002021635509,
+ 0.255357107325376, 0.434217976756762, 0.483002021635509, 0.387095214016349,
+ 0.175227946595735, -0.089131608307533, -0.326790388032145, -0.466553967085785,
+ 0.387095214016349, 0.466553967085785, 0.175227946595735, -0.255357107325376,
+ -0.483002021635509, -0.326790388032145, 0.089131608307533, 0.434217976756762,
+ 0.466553967085785, 0.255357107325376, -0.326790388032145, -0.434217976756762,
+ 0.089131608307533, 0.483002021635509, 0.175227946595735, -0.387095214016348,
+ 0.483002021635509, -0.089131608307533, -0.466553967085785, 0.175227946595735,
+ 0.434217976756762, -0.255357107325376, -0.387095214016348, 0.326790388032145,
+ 0.434217976756762, -0.387095214016348, -0.089131608307533, 0.466553967085786,
+ -0.326790388032145, -0.175227946595735, 0.483002021635509, -0.255357107325375,
+ 0.326790388032145, -0.483002021635509, 0.387095214016349, -0.089131608307534,
+ -0.255357107325377, 0.466553967085785, -0.434217976756762, 0.175227946595736,
+ 0.175227946595735, -0.326790388032145, 0.434217976756762, -0.483002021635509,
+ 0.466553967085785, -0.387095214016348, 0.255357107325376, -0.089131608307532
+};
+#endif
static const int xC1S7 = 16069;
static const int xC2S6 = 15137;
@@ -394,6 +435,112 @@
}
#endif
+#if CONFIG_HYBRIDTRANSFORM8X8
+void vp8_fht8x8_c(short *input, short *output, int pitch, TX_TYPE tx_type) {
+ int i, j, k;
+ float bufa[64], bufb[64]; // buffers are for floating-point test purpose
+ // the implementation could be simplified in
+ // conjunction with integer transform
+ short *ip = input;
+ short *op = output;
+
+ float *pfa = &bufa[0];
+ float *pfb = &bufb[0];
+
+ // pointers to vertical and horizontal transforms
+ float *ptv, *pth;
+
+ // load and convert residual array into floating-point
+ for(j = 0; j < 8; j++) {
+ for(i = 0; i < 8; i++) {
+ pfa[i] = (float)ip[i];
+ }
+ pfa += 8;
+ ip += pitch / 2;
+ }
+
+ // vertical transformation
+ pfa = &bufa[0];
+ pfb = &bufb[0];
+
+ switch(tx_type) {
+ case ADST_ADST :
+ case ADST_DCT :
+ ptv = &adst_8[0];
+ break;
+
+ default :
+ ptv = &dct_8[0];
+ break;
+ }
+
+ for(j = 0; j < 8; j++) {
+ for(i = 0; i < 8; i++) {
+ pfb[i] = 0;
+ for(k = 0; k < 8; k++) {
+ pfb[i] += ptv[k] * pfa[(k<<3)];
+ }
+ pfa += 1;
+ }
+ pfb += 8;
+ ptv += 8;
+ pfa = &bufa[0];
+ }
+
+ // horizontal transformation
+ pfa = &bufa[0];
+ pfb = &bufb[0];
+
+ switch(tx_type) {
+ case ADST_ADST :
+ case DCT_ADST :
+ pth = &adst_8[0];
+ break;
+
+ default :
+ pth = &dct_8[0];
+ break;
+ }
+
+ for(j = 0; j < 8; j++) {
+ for(i = 0; i < 8; i++) {
+ pfa[i] = 0;
+ for(k = 0; k < 8; k++) {
+ pfa[i] += pfb[k] * pth[k];
+ }
+ pth += 8;
+ }
+
+ pfa += 8;
+ pfb += 8;
+
+ switch(tx_type) {
+ case ADST_ADST :
+ case DCT_ADST :
+ pth = &adst_8[0];
+ break;
+
+ default :
+ pth = &dct_8[0];
+ break;
+ }
+ }
+
+ // convert to short integer format and load BLOCKD buffer
+ op = output ;
+ pfa = &bufa[0] ;
+
+ for(j = 0; j < 8; j++) {
+ for(i = 0; i < 8; i++) {
+ op[i] = (pfa[i] > 0 ) ? (short)( 8 * pfa[i] + 0.49) :
+ -(short)(- 8 * pfa[i] + 0.49);
+ }
+ op += 8;
+ pfa += 8;
+ }
+}
+#endif
+
void vp8_short_fdct4x4_c(short *input, short *output, int pitch) {
int i;
int a1, b1, c1, d1;
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 23eed6d..e1d0bf4 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -1054,9 +1054,7 @@
// vp8_zero(cpi->uv_mode_count)
x->mvc = cm->fc.mvc;
-#if CONFIG_HIGH_PRECISION_MV
x->mvc_hp = cm->fc.mvc_hp;
-#endif
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
@@ -1127,9 +1125,7 @@
xd->prev_mode_info_context = cm->prev_mi;
vp8_zero(cpi->MVcount);
-#if CONFIG_HIGH_PRECISION_MV
vp8_zero(cpi->MVcount_hp);
-#endif
vp8_zero(cpi->coef_counts);
vp8_zero(cpi->coef_counts_8x8);
#if CONFIG_TX16X16
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index 325efeb..964046d 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -90,28 +90,7 @@
#if CONFIG_HYBRIDTRANSFORM
if(active_ht) {
b->bmi.as_mode.test = b->bmi.as_mode.first;
- switch(b->bmi.as_mode.first) {
- // case B_DC_PRED :
- case B_TM_PRED :
- case B_RD_PRED :
- b->bmi.as_mode.tx_type = ADST_ADST;
- break;
-
- case B_VE_PRED :
- case B_VR_PRED :
- b->bmi.as_mode.tx_type = ADST_DCT;
- break;
-
- case B_HE_PRED :
- case B_HD_PRED :
- case B_HU_PRED :
- b->bmi.as_mode.tx_type = DCT_ADST;
- break;
-
- default :
- b->bmi.as_mode.tx_type = DCT_DCT;
- break;
- }
+ txfm_map(b, b->bmi.as_mode.first);
vp8_fht4x4_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type);
vp8_ht_quantize_b(be, b);
@@ -329,16 +308,25 @@
}
#endif
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
{
MACROBLOCKD *xd = &x->e_mbd;
int idx = (ib & 0x02) ? (ib + 2) : ib;
// generate residual blocks
vp8_subtract_4b_c(be, b, 16);
- x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
+
+ txfm_map(b, pred_mode_conv(b->bmi.as_mode.first));
+
+ vp8_fht8x8_c(be->src_diff, (x->block + idx)->coeff, 32,
+ b->bmi.as_mode.tx_type);
x->quantize_b_8x8(x->block + idx, xd->block + idx);
- vp8_short_idct8x8_c(xd->block[idx].dqcoeff, xd->block[ib].diff, 32);
+ vp8_iht8x8llm_c(xd->block[idx].dqcoeff, xd->block[ib].diff, 32,
+ b->bmi.as_mode.tx_type);
+
+// x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
+// x->quantize_b_8x8(x->block + idx, xd->block + idx);
+// vp8_short_idct8x8_c(xd->block[idx].dqcoeff, xd->block[ib].diff, 32);
// reconstruct submacroblock
for (i = 0; i < 4; i++) {
diff --git a/vp8/encoder/encodemv.c b/vp8/encoder/encodemv.c
index e2643f0..e5426f6 100644
--- a/vp8/encoder/encodemv.c
+++ b/vp8/encoder/encodemv.c
@@ -20,11 +20,6 @@
extern unsigned int active_section;
#endif
-// #define DEBUG_ENC_MV
-#ifdef DEBUG_ENC_MV
-int enc_mvcount = 0;
-#endif
-
static void encode_mvcomponent(
vp8_writer *const w,
const int v,
@@ -61,47 +56,10 @@
vp8_write(w, v < 0, p [MVPsign]);
}
-#if 0
-static int max_mv_r = 0;
-static int max_mv_c = 0;
-#endif
+
void vp8_encode_motion_vector(vp8_writer *w, const MV *mv, const MV_CONTEXT *mvc) {
-
-#if 0
- {
- if (abs(mv->row >> 1) > max_mv_r) {
- FILE *f = fopen("maxmv.stt", "a");
- max_mv_r = abs(mv->row >> 1);
- fprintf(f, "New Mv Row Max %6d\n", (mv->row >> 1));
-
- if ((abs(mv->row) / 2) != max_mv_r)
- fprintf(f, "MV Row conversion error %6d\n", abs(mv->row) / 2);
-
- fclose(f);
- }
-
- if (abs(mv->col >> 1) > max_mv_c) {
- FILE *f = fopen("maxmv.stt", "a");
- fprintf(f, "New Mv Col Max %6d\n", (mv->col >> 1));
- max_mv_c = abs(mv->col >> 1);
- fclose(f);
- }
- }
-#endif
encode_mvcomponent(w, mv->row >> 1, &mvc[0]);
encode_mvcomponent(w, mv->col >> 1, &mvc[1]);
-#ifdef DEBUG_ENC_MV
- {
- int i;
- printf("%d (np): %d %d\n", enc_mvcount++,
- (mv->row >> 1) << 1, (mv->col >> 1) << 1);
- // for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[0])->prob[i]);
- // printf("\n");
- // for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[1])->prob[i]);
- // printf("\n");
- fflush(stdout);
- }
-#endif
}
@@ -282,43 +240,6 @@
} while (++j <= mv_max);
}
- /*
- {
- int j = -mv_max;
- do
- {
-
- const int c = events [mv_max + j];
- int a = j;
-
- if( j < 0)
- {
- sign_ct [1] += c;
- a = -j;
- }
- else if( j)
- sign_ct [0] += c;
-
- if( a < mvnum_short)
- {
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
- }
- else
- {
- int k = mvlong_width - 1;
- is_short_ct [1] += c; // Long vector
-
- // bit 3 not always encoded.
-
- do
- bit_ct [k] [(a >> k) & 1] += c;
- while( --k >= 0);
- }
- } while( ++j <= mv_max);
- }
- */
-
calc_prob(Pnew + mvpis_short, is_short_ct);
calc_prob(Pnew + MVPsign, sign_ct);
@@ -401,7 +322,6 @@
#endif
}
-#if CONFIG_HIGH_PRECISION_MV
static void encode_mvcomponent_hp(
vp8_writer *const w,
@@ -441,47 +361,12 @@
vp8_write(w, v < 0, p [MVPsign_hp]);
}
-#if 0
-static int max_mv_r = 0;
-static int max_mv_c = 0;
-#endif
+
void vp8_encode_motion_vector_hp(vp8_writer *w, const MV *mv,
const MV_CONTEXT_HP *mvc) {
-#if 0
- {
- if (abs(mv->row >> 1) > max_mv_r) {
- FILE *f = fopen("maxmv.stt", "a");
- max_mv_r = abs(mv->row >> 1);
- fprintf(f, "New Mv Row Max %6d\n", (mv->row >> 1));
-
- if ((abs(mv->row) / 2) != max_mv_r)
- fprintf(f, "MV Row conversion error %6d\n", abs(mv->row) / 2);
-
- fclose(f);
- }
-
- if (abs(mv->col >> 1) > max_mv_c) {
- FILE *f = fopen("maxmv.stt", "a");
- fprintf(f, "New Mv Col Max %6d\n", (mv->col >> 1));
- max_mv_c = abs(mv->col >> 1);
- fclose(f);
- }
- }
-#endif
encode_mvcomponent_hp(w, mv->row, &mvc[0]);
encode_mvcomponent_hp(w, mv->col, &mvc[1]);
-#ifdef DEBUG_ENC_MV
- {
- int i;
- printf("%d (hp): %d %d\n", enc_mvcount++, mv->row, mv->col);
- // for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[0])->prob[i]);
- // printf("\n");
- // for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[1])->prob[i]);
- // printf("\n");
- fflush(stdout);
- }
-#endif
}
@@ -716,4 +601,3 @@
active_section = 5;
#endif
}
-#endif /* CONFIG_HIGH_PRECISION_MV */
diff --git a/vp8/encoder/encodemv.h b/vp8/encoder/encodemv.h
index 09b0935..7e33007 100644
--- a/vp8/encoder/encodemv.h
+++ b/vp8/encoder/encodemv.h
@@ -17,10 +17,8 @@
void vp8_write_mvprobs(VP8_COMP *);
void vp8_encode_motion_vector(vp8_writer *, const MV *, const MV_CONTEXT *);
void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int mvc_flag[2]);
-#if CONFIG_HIGH_PRECISION_MV
void vp8_write_mvprobs_hp(VP8_COMP *);
void vp8_encode_motion_vector_hp(vp8_writer *, const MV *, const MV_CONTEXT_HP *);
void vp8_build_component_cost_table_hp(int *mvcost[2], const MV_CONTEXT_HP *mvc, int mvc_flag[2]);
-#endif /* CONFIG_HIGH_PRECISION_MV */
#endif
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 6715c80..1e5d4ef 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -38,11 +38,7 @@
#define IF_RTCD(x) NULL
#endif
-#if CONFIG_HIGH_PRECISION_MV
#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
-#else
-#define XMVCOST (x->mvcost)
-#endif
extern void vp8_build_block_offsets(MACROBLOCK *x);
extern void vp8_setup_block_ptrs(MACROBLOCK *x);
@@ -499,10 +495,8 @@
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
vpx_memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
-#if CONFIG_HIGH_PRECISION_MV
vpx_memcpy(cm->fc.mvc_hp, vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cm->fc.mvc_hp, flag);
-#endif
}
// for each macroblock row in image
diff --git a/vp8/encoder/mbgraph.c b/vp8/encoder/mbgraph.c
index 4cd3ea2..d4c14eb 100644
--- a/vp8/encoder/mbgraph.c
+++ b/vp8/encoder/mbgraph.c
@@ -34,11 +34,10 @@
static int dummy_cost[2 * mv_max + 1];
int *mvcost[2] = { &dummy_cost[mv_max + 1], &dummy_cost[mv_max + 1] };
int *mvsadcost[2] = { &dummy_cost[mv_max + 1], &dummy_cost[mv_max + 1] };
-#if CONFIG_HIGH_PRECISION_MV
static int dummy_cost_hp[2 * mv_max_hp + 1];
int *mvcost_hp[2] = { &dummy_cost_hp[mv_max_hp + 1], &dummy_cost_hp[mv_max_hp + 1] };
int *mvsadcost_hp[2] = { &dummy_cost_hp[mv_max_hp + 1], &dummy_cost_hp[mv_max_hp + 1] };
-#endif
+
int col_min = (ref_mv->as_mv.col >> 3) - MAX_FULL_PEL_VAL + ((ref_mv->as_mv.col & 7) ? 1 : 0);
int row_min = (ref_mv->as_mv.row >> 3) - MAX_FULL_PEL_VAL + ((ref_mv->as_mv.row & 7) ? 1 : 0);
int col_max = (ref_mv->as_mv.col >> 3) + MAX_FULL_PEL_VAL;
@@ -72,32 +71,27 @@
ref_full.as_mv.row = ref_mv->as_mv.row >> 3;
/*cpi->sf.search_method == HEX*/
- best_err = vp8_hex_search(x, b, d,
- &ref_full, dst_mv,
- step_param,
- x->errorperbit,
- &v_fn_ptr,
-#if CONFIG_HIGH_PRECISION_MV
- xd->allow_high_precision_mv ? mvsadcost_hp : mvsadcost, xd->allow_high_precision_mv ? mvcost_hp : mvcost,
-#else
- mvsadcost, mvcost,
-#endif
- ref_mv);
+ best_err = vp8_hex_search(
+ x, b, d,
+ &ref_full, dst_mv,
+ step_param,
+ x->errorperbit,
+ &v_fn_ptr,
+ xd->allow_high_precision_mv ? mvsadcost_hp : mvsadcost,
+ xd->allow_high_precision_mv ? mvcost_hp : mvcost,
+ ref_mv);
// Try sub-pixel MC
// if (bestsme > error_thresh && bestsme < INT_MAX)
{
int distortion;
unsigned int sse;
- best_err = cpi->find_fractional_mv_step(x, b, d,
- dst_mv, ref_mv,
- x->errorperbit, &v_fn_ptr,
-#if CONFIG_HIGH_PRECISION_MV
- xd->allow_high_precision_mv ? mvcost_hp : mvcost,
-#else
- mvcost,
-#endif
- & distortion, &sse);
+ best_err = cpi->find_fractional_mv_step(
+ x, b, d,
+ dst_mv, ref_mv,
+ x->errorperbit, &v_fn_ptr,
+ xd->allow_high_precision_mv ? mvcost_hp : mvcost,
+ & distortion, &sse);
}
#if CONFIG_PRED_FILTER
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index 891b479..439d415 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -22,26 +22,21 @@
static int mv_mode_cts [4] [2];
#endif
-#if CONFIG_HIGH_PRECISION_MV
-int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight, int ishp) {
- // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
- // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
- // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
- // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
- return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp == 0)] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp == 0)]) * Weight) >> 7;
+int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
+ int Weight, int ishp) {
+ // MV costing is based on the distribution of vectors in the previous frame
+ // and as such will tend to over state the cost of vectors. In addition
+ // coding a new vector can have a knock on effect on the cost of subsequent
+ // vectors and the quality of prediction from NEAR and NEAREST for subsequent
+ // blocks. The "Weight" parameter allows, to a limited extent, for some
+ // account to be taken of these factors.
+ return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp == 0)] +
+ mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp == 0)])
+ * Weight) >> 7;
}
-#else
-int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight) {
- // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
- // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
- // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
- // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
- return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
-}
-#endif
-#if CONFIG_HIGH_PRECISION_MV
-static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit, int ishp) {
+static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
+ int error_per_bit, int ishp) {
// Ignore costing if mvcost is NULL
if (mvcost)
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp == 0)] +
@@ -49,19 +44,10 @@
* error_per_bit + 128) >> 8;
return 0;
}
-#else
-static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit) {
- // Ignore costing if mvcost is NULL
- if (mvcost)
- return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
- mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
- * error_per_bit + 128) >> 8;
- return 0;
-}
-#endif
-static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit) {
+static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2],
+ int error_per_bit) {
// Calculate sad error cost on full pixel basis.
// Ignore costing if mvcost is NULL
if (mvsadcost)
@@ -209,7 +195,6 @@
#define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
#define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
-#if CONFIG_HIGH_PRECISION_MV
#define PREHP(r,c) (y + (((r)>>3) * y_stride + ((c)>>3) -(offset))) // pointer to predictor base of a motionvector
#if CONFIG_SIXTEENTH_SUBPEL_UV
#define SPHP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
@@ -219,9 +204,6 @@
#define DISTHP(r,c) vfp->svf( PREHP(r,c), y_stride, SPHP(c),SPHP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
#define ERRHP(r,c) (MVC(r,c)+DISTHP(r,c)) // returns distortion + motion vector cost
#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = ((xd->allow_high_precision_mv)?DISTHP(r,c):DIST(r,c)); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
-#else
-#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
-#endif /* CONFIG_HIGH_PRECISION_MV */
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
@@ -243,9 +225,7 @@
unsigned int whichdir;
unsigned int halfiters = 4;
unsigned int quarteriters = 4;
-#if CONFIG_HIGH_PRECISION_MV
unsigned int eighthiters = 4;
-#endif
int thismse;
int maxc, minc, maxr, minr;
int y_stride;
@@ -276,7 +256,6 @@
#endif
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
rr = ref_mv->as_mv.row;
rc = ref_mv->as_mv.col;
@@ -287,9 +266,7 @@
maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width_hp) - 1));
minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width_hp) - 1));
maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width_hp) - 1));
- } else
-#endif
- {
+ } else {
rr = ref_mv->as_mv.row >> 1;
rc = ref_mv->as_mv.col >> 1;
br = bestmv->as_mv.row << 2;
@@ -314,13 +291,11 @@
// calculate central point error
besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = besterr;
-#if CONFIG_HIGH_PRECISION_MV
- besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
-#endif
+ besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
- // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
+ // TODO: Each subsequent iteration checks at least one point in
+ // common with the last iteration could be 2 ( if diag selected)
while (--halfiters) {
// 1/2 pel
CHECK_BETTER(left, tr, tc - hstep);
@@ -353,8 +328,8 @@
tc = bc;
}
- // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
- // 1/4 pel
+ // TODO: Each subsequent iteration checks at least one point in common with
+ // the last iteration could be 2 ( if diag selected) 1/4 pel
hstep >>= 1;
while (--quarteriters) {
CHECK_BETTER(left, tr, tc - hstep);
@@ -387,7 +362,6 @@
tc = bc;
}
-#if CONFIG_HIGH_PRECISION_MV
if (x->e_mbd.allow_high_precision_mv) {
hstep >>= 1;
while (--eighthiters) {
@@ -421,14 +395,10 @@
tc = bc;
}
}
-#endif
-#if CONFIG_HIGH_PRECISION_MV
if (x->e_mbd.allow_high_precision_mv) {
bestmv->as_mv.row = br;
bestmv->as_mv.col = bc;
- } else
-#endif /* CONFIG_HIGH_PRECISION_MV */
- {
+ } else {
bestmv->as_mv.row = br << 1;
bestmv->as_mv.col = bc << 1;
}
@@ -449,12 +419,10 @@
#undef MIN
#undef MAX
-#if CONFIG_HIGH_PRECISION_MV
#undef PREHP
#undef DPHP
#undef DISTHP
#undef ERRHP
-#endif
#if CONFIG_SIXTEENTH_SUBPEL_UV
#define SP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
@@ -470,10 +438,8 @@
int bestmse = INT_MAX;
int_mv startmv;
int_mv this_mv;
-#if CONFIG_HIGH_PRECISION_MV
int_mv orig_mv;
int yrow_movedback = 0, ycol_movedback = 0;
-#endif
unsigned char *z = (*(b->base_src) + b->src);
int left, right, up, down, diag;
unsigned int sse;
@@ -499,28 +465,20 @@
bestmv->as_mv.row <<= 3;
bestmv->as_mv.col <<= 3;
startmv = *bestmv;
-#if CONFIG_HIGH_PRECISION_MV
orig_mv = *bestmv;
-#endif
// calculate central point error
bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
-#if CONFIG_HIGH_PRECISION_MV
- bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
-#endif
+ bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
-#if CONFIG_HIGH_PRECISION_MV
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (left < bestmse) {
*bestmv = this_mv;
@@ -531,11 +489,8 @@
this_mv.as_mv.col += 8;
thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
-#if CONFIG_HIGH_PRECISION_MV
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (right < bestmse) {
*bestmv = this_mv;
@@ -548,11 +503,8 @@
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
-#if CONFIG_HIGH_PRECISION_MV
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (up < bestmse) {
*bestmv = this_mv;
@@ -563,11 +515,8 @@
this_mv.as_mv.row += 8;
thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
-#if CONFIG_HIGH_PRECISION_MV
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (down < bestmse) {
*bestmv = this_mv;
@@ -607,11 +556,8 @@
break;
}
-#if CONFIG_HIGH_PRECISION_MV
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (diag < bestmse) {
*bestmv = this_mv;
@@ -626,16 +572,12 @@
// time to check quarter pels.
if (bestmv->as_mv.row < startmv.as_mv.row) {
y -= y_stride;
-#if CONFIG_HIGH_PRECISION_MV
yrow_movedback = 1;
-#endif
}
if (bestmv->as_mv.col < startmv.as_mv.col) {
y--;
-#if CONFIG_HIGH_PRECISION_MV
ycol_movedback = 1;
-#endif
}
startmv = *bestmv;
@@ -647,17 +589,17 @@
if (startmv.as_mv.col & 7) {
this_mv.as_mv.col = startmv.as_mv.col - 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, b->src_stride, &sse);
} else {
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z,
+ b->src_stride, &sse);
}
-#if CONFIG_HIGH_PRECISION_MV
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (left < bestmse) {
*bestmv = this_mv;
@@ -667,12 +609,11 @@
}
this_mv.as_mv.col += 4;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
-#if CONFIG_HIGH_PRECISION_MV
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, b->src_stride, &sse);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (right < bestmse) {
*bestmv = this_mv;
@@ -686,17 +627,17 @@
if (startmv.as_mv.row & 7) {
this_mv.as_mv.row = startmv.as_mv.row - 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, b->src_stride, &sse);
} else {
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6),
+ z, b->src_stride, &sse);
}
-#if CONFIG_HIGH_PRECISION_MV
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (up < bestmse) {
*bestmv = this_mv;
@@ -706,12 +647,10 @@
}
this_mv.as_mv.row += 4;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
-#if CONFIG_HIGH_PRECISION_MV
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, b->src_stride, &sse);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (down < bestmse) {
*bestmv = this_mv;
@@ -771,25 +710,26 @@
if (startmv.as_mv.col & 7) {
this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, b->src_stride, &sse);
} else {
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z,
+ b->src_stride, &sse);
}
break;
case 3:
this_mv.as_mv.col += 2;
this_mv.as_mv.row += 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, b->src_stride, &sse);
break;
}
-#if CONFIG_HIGH_PRECISION_MV
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (diag < bestmse) {
*bestmv = this_mv;
@@ -798,7 +738,6 @@
*sse1 = sse;
}
-#if CONFIG_HIGH_PRECISION_MV
if (!x->e_mbd.allow_high_precision_mv)
return bestmse;
@@ -820,13 +759,17 @@
if (startmv.as_mv.col & 7) {
this_mv.as_mv.col = startmv.as_mv.col - 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, b->src_stride, &sse);
} else {
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
- thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row),
+ z, b->src_stride, &sse);
}
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (left < bestmse) {
*bestmv = this_mv;
@@ -877,7 +820,6 @@
*sse1 = sse;
}
-
// now check 1 more diagonal
whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
@@ -951,8 +893,6 @@
*sse1 = sse;
}
-#endif /* CONFIG_HIGH_PRECISION_MV */
-
return bestmse;
}
@@ -976,7 +916,8 @@
MACROBLOCKD *xd = &x->e_mbd;
#if ARCH_X86 || ARCH_X86_64
- unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ unsigned char *y0 = *(d->base_pre) + d->pre +
+ (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
unsigned char *y;
y_stride = 32;
@@ -984,7 +925,8 @@
vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
y = xd->y_buf + y_stride + 1;
#else
- unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ unsigned char *y = *(d->base_pre) + d->pre +
+ (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
y_stride = d->pre_stride;
#endif
@@ -996,21 +938,15 @@
// calculate central point error
bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
-#if CONFIG_HIGH_PRECISION_MV
- bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
-#endif
+ bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
-#if CONFIG_HIGH_PRECISION_MV
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (left < bestmse) {
*bestmv = this_mv;
@@ -1021,11 +957,8 @@
this_mv.as_mv.col += 8;
thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
-#if CONFIG_HIGH_PRECISION_MV
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (right < bestmse) {
*bestmv = this_mv;
@@ -1038,11 +971,8 @@
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
-#if CONFIG_HIGH_PRECISION_MV
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (up < bestmse) {
*bestmv = this_mv;
@@ -1053,11 +983,8 @@
this_mv.as_mv.row += 8;
thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
-#if CONFIG_HIGH_PRECISION_MV
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (down < bestmse) {
*bestmv = this_mv;
@@ -1094,11 +1021,8 @@
break;
}
-#if CONFIG_HIGH_PRECISION_MV
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
-#endif
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
if (diag < bestmse) {
*bestmv = this_mv;
@@ -1347,12 +1271,10 @@
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1423,12 +1345,11 @@
if (bestsad == INT_MAX)
return INT_MAX;
- return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
-#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
-#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
-#endif
+ return
+ fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
}
int vp8_diamond_search_sadx4
@@ -1473,12 +1394,10 @@
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1582,12 +1501,11 @@
if (bestsad == INT_MAX)
return INT_MAX;
- return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
-#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
-#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
-#endif
+ return
+ fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
}
int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
@@ -1620,12 +1538,10 @@
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1680,12 +1596,11 @@
this_mv.as_mv.col = best_mv->as_mv.col << 3;
if (bestsad < INT_MAX)
- return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
-#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
-#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
-#endif
+ return
+ fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
else
return INT_MAX;
}
@@ -1722,12 +1637,10 @@
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1813,12 +1726,11 @@
this_mv.as_mv.col = best_mv->as_mv.col << 3;
if (bestsad < INT_MAX)
- return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
-#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
-#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
-#endif
+ return
+ fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
else
return INT_MAX;
}
@@ -1856,12 +1768,10 @@
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1972,12 +1882,11 @@
this_mv.as_mv.col = best_mv->as_mv.col << 3;
if (bestsad < INT_MAX)
- return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
-#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
-#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
-#endif
+ return
+ fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
else
return INT_MAX;
}
@@ -2004,12 +1913,10 @@
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -2054,12 +1961,11 @@
this_mv.as_mv.col = ref_mv->as_mv.col << 3;
if (bestsad < INT_MAX)
- return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
-#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
-#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
-#endif
+ return
+ fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
else
return INT_MAX;
}
@@ -2086,12 +1992,10 @@
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
-#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -2166,12 +2070,11 @@
this_mv.as_mv.col = ref_mv->as_mv.col << 3;
if (bestsad < INT_MAX)
- return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
-#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
-#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
-#endif
+ return
+ fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
else
return INT_MAX;
}
diff --git a/vp8/encoder/mcomp.h b/vp8/encoder/mcomp.h
index d7fd137..5c7f527 100644
--- a/vp8/encoder/mcomp.h
+++ b/vp8/encoder/mcomp.h
@@ -25,11 +25,8 @@
#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1) // Max full pel mv specified in 1 pel units
#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) // Maximum size of the first step in full pel units
-#if CONFIG_HIGH_PRECISION_MV
-extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight, int ishp);
-#else
-extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight);
-#endif
+extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
+ int Weight, int ishp);
extern void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride);
extern void vp8_init3smotion_compensation(MACROBLOCK *x, int stride);
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 73888cf..854e318 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -87,14 +87,12 @@
before trying each new filter */
#define SHARP_FILTER_QTHRESH 0 /* Q threshold for 8-tap sharp filter */
-#if CONFIG_HIGH_PRECISION_MV
#define ALTREF_HIGH_PRECISION_MV 1 /* whether to use high precision mv
for altref computation */
#define HIGH_PRECISION_MV_QTHRESH 200 /* Q threshold for use of high precision
mv. Choose a very high value for
now so that HIGH_PRECISION is always
chosen */
-#endif
#if CONFIG_INTERNAL_STATS
#include "math.h"
@@ -1523,9 +1521,7 @@
cm->refresh_entropy_probs = 1;
setup_features(cpi);
-#if CONFIG_HIGH_PRECISION_MV
cpi->mb.e_mbd.allow_high_precision_mv = 0; // Default mv precision adaptation
-#endif
{
int i;
@@ -1679,7 +1675,6 @@
} while (++i <= mvfp_max);
}
-#if CONFIG_HIGH_PRECISION_MV
static void cal_mvsadcosts_hp(int *mvsadcost[2]) {
int i = 1;
@@ -1694,7 +1689,6 @@
mvsadcost [1][-i] = (int) z;
} while (++i <= mvfp_max_hp);
}
-#endif
VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
int i;
@@ -1876,14 +1870,12 @@
cal_mvsadcosts(cpi->mb.mvsadcost);
-#if CONFIG_HIGH_PRECISION_MV
cpi->mb.mvcost_hp[0] = &cpi->mb.mvcosts_hp[0][mv_max_hp + 1];
cpi->mb.mvcost_hp[1] = &cpi->mb.mvcosts_hp[1][mv_max_hp + 1];
cpi->mb.mvsadcost_hp[0] = &cpi->mb.mvsadcosts_hp[0][mvfp_max_hp + 1];
cpi->mb.mvsadcost_hp[1] = &cpi->mb.mvsadcosts_hp[1][mvfp_max_hp + 1];
cal_mvsadcosts_hp(cpi->mb.mvsadcost_hp);
-#endif
for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate;
@@ -3153,10 +3145,8 @@
(Q < SHARP_FILTER_QTHRESH ? EIGHTTAP_SHARP : EIGHTTAP);
#endif
}
-#if CONFIG_HIGH_PRECISION_MV
/* TODO: Decide this more intelligently */
xd->allow_high_precision_mv = (Q < HIGH_PRECISION_MV_QTHRESH);
-#endif
}
#if CONFIG_POSTPROC
@@ -3652,9 +3642,7 @@
vp8_adapt_mode_probs(&cpi->common);
vp8_copy(cpi->common.fc.MVcount, cpi->MVcount);
-#if CONFIG_HIGH_PRECISION_MV
vp8_copy(cpi->common.fc.MVcount_hp, cpi->MVcount_hp);
-#endif
vp8_adapt_mv_probs(&cpi->common);
vp8_update_mode_context(&cpi->common);
}
@@ -4048,9 +4036,7 @@
cpi->source = NULL;
-#if CONFIG_HIGH_PRECISION_MV
cpi->mb.e_mbd.allow_high_precision_mv = ALTREF_HIGH_PRECISION_MV;
-#endif
// Should we code an alternate reference frame
if (cpi->oxcf.play_alternate &&
cpi->source_alt_ref_pending) {
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 455533e..6a0a902 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -62,10 +62,8 @@
typedef struct {
MV_CONTEXT mvc[2];
int mvcosts[2][MVvals + 1];
-#if CONFIG_HIGH_PRECISION_MV
MV_CONTEXT_HP mvc_hp[2];
int mvcosts_hp[2][MVvals_hp + 1];
-#endif
#ifdef MODE_STATS
// Stats
@@ -539,9 +537,7 @@
int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES];
unsigned int MVcount [2] [MVvals]; /* (row,col) MV cts this frame */
-#if CONFIG_HIGH_PRECISION_MV
unsigned int MVcount_hp [2] [MVvals_hp]; /* (row,col) MV cts this frame */
-#endif
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
// DECLARE_ALIGNED(16, int, coef_counts_backup [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]); //not used any more
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index b0f92c9..80fc16a 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -134,10 +134,8 @@
vp8_copy(cc->mvc, cm->fc.mvc);
vp8_copy(cc->mvcosts, cpi->mb.mvcosts);
-#if CONFIG_HIGH_PRECISION_MV
vp8_copy(cc->mvc_hp, cm->fc.mvc_hp);
vp8_copy(cc->mvcosts_hp, cpi->mb.mvcosts_hp);
-#endif
vp8_copy(cc->mv_ref_ct, cm->fc.mv_ref_ct);
vp8_copy(cc->mode_context, cm->fc.mode_context);
@@ -192,10 +190,8 @@
vp8_copy(cm->fc.mvc, cc->mvc);
vp8_copy(cpi->mb.mvcosts, cc->mvcosts);
-#if CONFIG_HIGH_PRECISION_MV
vp8_copy(cm->fc.mvc_hp, cc->mvc_hp);
vp8_copy(cpi->mb.mvcosts_hp, cc->mvcosts_hp);
-#endif
vp8_copy(cm->fc.mv_ref_ct, cc->mv_ref_ct);
vp8_copy(cm->fc.mode_context, cc->mode_context);
@@ -253,14 +249,11 @@
int flag[2] = {1, 1};
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
}
-#if CONFIG_HIGH_PRECISION_MV
vpx_memcpy(cpi->common.fc.mvc_hp, vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
{
int flag[2] = {1, 1};
vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cpi->common.fc.mvc_hp, flag);
}
-#endif
-
cpi->common.txfm_mode = ALLOW_8X8;
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index a2cd265..3540611 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -54,11 +54,7 @@
extern void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d);
#endif
-#if CONFIG_HIGH_PRECISION_MV
#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
-#else
-#define XMVCOST (x->mvcost)
-#endif
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
@@ -454,7 +450,7 @@
return error;
}
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
int vp8_submb_error_c(short *coeff, short *dqcoeff) {
int i;
int error = 0;
@@ -985,28 +981,7 @@
#if CONFIG_HYBRIDTRANSFORM
if(active_ht) {
b->bmi.as_mode.test = mode;
- switch(mode) {
- // case B_DC_PRED :
- case B_TM_PRED :
- case B_RD_PRED :
- b->bmi.as_mode.tx_type = ADST_ADST;
- break;
-
- case B_VE_PRED :
- case B_VR_PRED :
- b->bmi.as_mode.tx_type = ADST_DCT;
- break;
-
- case B_HE_PRED :
- case B_HD_PRED :
- case B_HU_PRED :
- b->bmi.as_mode.tx_type = DCT_ADST;
- break;
-
- default :
- b->bmi.as_mode.tx_type = DCT_DCT;
- break;
- }
+ txfm_map(b, mode);
vp8_fht4x4_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type);
vp8_ht_quantize_b(be, b);
@@ -1267,7 +1242,7 @@
DECLARE_ALIGNED_ARRAY(16, unsigned char, best_predictor, 16 * 8);
DECLARE_ALIGNED_ARRAY(16, short, best_dqcoeff, 16 * 4);
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
// perform transformation of dimension 8x8
// note the input and output index mapping
int idx = (ib & 0x02) ? (ib + 2) : ib;
@@ -1298,8 +1273,10 @@
vp8_subtract_4b_c(be, b, 16);
-#if CONFIG_HTRANS8X8
- x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
+#if CONFIG_HYBRIDTRANSFORM8X8
+ txfm_map(b, pred_mode_conv(mode));
+ vp8_fht8x8_c(be->src_diff, (x->block + idx)->coeff, 32, b->bmi.as_mode.tx_type);
+// x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
x->quantize_b_8x8(x->block + idx, xd->block + idx);
// compute quantization mse of 8x8 block
@@ -1376,7 +1353,7 @@
#endif
vp8_encode_intra8x8(IF_RTCD(&cpi->rtcd), x, ib);
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
*(a + vp8_block2above_8x8[idx]) = besta0;
*(a + vp8_block2above_8x8[idx] + 1) = besta1;
*(l + vp8_block2left_8x8 [idx]) = bestl0;
@@ -1730,19 +1707,13 @@
this_second_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.second_ref_frame - 1].as_int;
}
-#if CONFIG_HIGH_PRECISION_MV
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost,
102, xd->allow_high_precision_mv);
if (xd->mode_info_context->mbmi.second_ref_frame) {
- thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost,
- 102, xd->allow_high_precision_mv);
+ thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv,
+ mvcost, 102,
+ xd->allow_high_precision_mv);
}
-#else
- thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
- if (xd->mode_info_context->mbmi.second_ref_frame) {
- thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost, 102);
- }
-#endif
break;
case LEFT4X4:
this_mv->as_int = col ? d[-1].bmi.as_mv.first.as_int : left_block_mv(mic, i);
@@ -2527,7 +2498,6 @@
for (i = 0; i < x->partition_info->count; i++) {
if (x->partition_info->bmi[i].mode == NEW4X4) {
-#if CONFIG_HIGH_PRECISION_MV
if (x->e_mbd.allow_high_precision_mv) {
cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row)]++;
@@ -2540,7 +2510,6 @@
- second_best_ref_mv->as_mv.col)]++;
}
} else
-#endif
{
cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row) >> 1)]++;
@@ -2556,7 +2525,6 @@
}
}
} else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV) {
-#if CONFIG_HIGH_PRECISION_MV
if (x->e_mbd.allow_high_precision_mv) {
cpi->MVcount_hp[0][mv_max_hp + (x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
- best_ref_mv->as_mv.row)]++;
@@ -2569,7 +2537,6 @@
- second_best_ref_mv->as_mv.col)]++;
}
} else
-#endif
{
cpi->MVcount[0][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
- best_ref_mv->as_mv.row) >> 1)]++;
@@ -3343,14 +3310,9 @@
mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
// Add the new motion vector cost to our rolling cost variable
-#if CONFIG_HIGH_PRECISION_MV
rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
XMVCOST, 96,
x->e_mbd.allow_high_precision_mv);
-#else
- rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
- XMVCOST, 96);
-#endif
}
case NEARESTMV:
@@ -3502,7 +3464,6 @@
continue;
x->e_mbd.mode_info_context->mbmi.mv.as_int = mc_search_result[ref1].as_int;
x->e_mbd.mode_info_context->mbmi.second_mv.as_int = mc_search_result[ref2].as_int;
-#if CONFIG_HIGH_PRECISION_MV
rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
&frame_best_ref_mv[ref1],
XMVCOST, 96,
@@ -3511,14 +3472,6 @@
&frame_best_ref_mv[ref2],
XMVCOST, 96,
x->e_mbd.allow_high_precision_mv);
-#else
- rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
- &frame_best_ref_mv[ref1],
- XMVCOST, 96);
- rate2 += vp8_mv_bit_cost(&mc_search_result[ref2],
- &frame_best_ref_mv[ref2],
- XMVCOST, 96);
-#endif
break;
case ZEROMV:
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index dac18c6..105aa6a 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -504,7 +504,7 @@
#endif
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
static void tokenize1st_order_chroma
(
MACROBLOCKD *xd,
@@ -886,7 +886,7 @@
tokenize1st_order_ht(x, t, plane_type, cpi);
} else {
-#if CONFIG_HTRANS8X8
+#if CONFIG_HYBRIDTRANSFORM8X8
if (x->mode_info_context->mbmi.mode == I8X8_PRED) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context;