Merge "Tokenization code cleanup." into experimental
diff --git a/vp9/common/vp9_entropy.c b/vp9/common/vp9_entropy.c
index a1dbd3e..d05be99 100644
--- a/vp9/common/vp9_entropy.c
+++ b/vp9/common/vp9_entropy.c
@@ -2105,11 +2105,10 @@
int nb_pad, uint8_t *token_cache, int c, int l) {
int eob = l;
assert(nb_pad == MAX_NEIGHBORS);
- if (c == eob - 1) {
+ if (c == eob) {
return 0;
} else {
int ctx;
- c++;
assert(neighbors[MAX_NEIGHBORS * c + 0] >= 0);
if (neighbors[MAX_NEIGHBORS * c + 1] >= 0) {
ctx = (1 + token_cache[neighbors[MAX_NEIGHBORS * c + 0]] +
diff --git a/vp9/common/vp9_entropymode.c b/vp9/common/vp9_entropymode.c
index 061c279..673b35a 100644
--- a/vp9/common/vp9_entropymode.c
+++ b/vp9/common/vp9_entropymode.c
@@ -115,8 +115,6 @@
return SUBMVREF_NORMAL;
}
-const vp9_prob vp9_sub_mv_ref_prob [VP9_SUBMVREFS - 1] = { 180, 162, 25};
-
const vp9_prob vp9_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP9_SUBMVREFS - 1] = {
{ 147, 136, 18 },
{ 106, 145, 1 },
diff --git a/vp9/common/vp9_entropymode.h b/vp9/common/vp9_entropymode.h
index 345eb02..8b0caf6 100644
--- a/vp9/common/vp9_entropymode.h
+++ b/vp9/common/vp9_entropymode.h
@@ -34,8 +34,6 @@
extern int vp9_mv_cont(const int_mv *l, const int_mv *a);
-extern const vp9_prob vp9_sub_mv_ref_prob[VP9_SUBMVREFS - 1];
-
extern const vp9_prob vp9_sub_mv_ref_prob2[SUBMVREF_COUNT][VP9_SUBMVREFS - 1];
extern const unsigned int vp9_kf_default_bmode_counts[VP9_KF_BINTRAMODES]
diff --git a/vp9/common/vp9_entropymv.c b/vp9/common/vp9_entropymv.c
index 89dea4e..a4a9d54 100644
--- a/vp9/common/vp9_entropymv.c
+++ b/vp9/common/vp9_entropymv.c
@@ -116,11 +116,8 @@
}
int vp9_use_nmv_hp(const MV *ref) {
- if ((abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH &&
- (abs(ref->col) >> 3) < COMPANDED_MVREF_THRESH)
- return 1;
- else
- return 0;
+ return (abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH &&
+ (abs(ref->col) >> 3) < COMPANDED_MVREF_THRESH;
}
int vp9_get_mv_mag(MV_CLASS_TYPE c, int offset) {
@@ -231,13 +228,13 @@
}
}
-void vp9_counts_process(nmv_context_counts *NMVcount, int usehp) {
- counts_to_context(&NMVcount->comps[0], usehp);
- counts_to_context(&NMVcount->comps[1], usehp);
+void vp9_counts_process(nmv_context_counts *nmv_count, int usehp) {
+ counts_to_context(&nmv_count->comps[0], usehp);
+ counts_to_context(&nmv_count->comps[1], usehp);
}
void vp9_counts_to_nmv_context(
- nmv_context_counts *NMVcount,
+ nmv_context_counts *nmv_count,
nmv_context *prob,
int usehp,
unsigned int (*branch_ct_joint)[2],
@@ -250,29 +247,29 @@
unsigned int (*branch_ct_class0_hp)[2],
unsigned int (*branch_ct_hp)[2]) {
int i, j, k;
- vp9_counts_process(NMVcount, usehp);
+ vp9_counts_process(nmv_count, usehp);
vp9_tree_probs_from_distribution(vp9_mv_joint_tree,
prob->joints,
branch_ct_joint,
- NMVcount->joints, 0);
+ nmv_count->joints, 0);
for (i = 0; i < 2; ++i) {
- prob->comps[i].sign = get_binary_prob(NMVcount->comps[i].sign[0],
- NMVcount->comps[i].sign[1]);
- branch_ct_sign[i][0] = NMVcount->comps[i].sign[0];
- branch_ct_sign[i][1] = NMVcount->comps[i].sign[1];
+ prob->comps[i].sign = get_binary_prob(nmv_count->comps[i].sign[0],
+ nmv_count->comps[i].sign[1]);
+ branch_ct_sign[i][0] = nmv_count->comps[i].sign[0];
+ branch_ct_sign[i][1] = nmv_count->comps[i].sign[1];
vp9_tree_probs_from_distribution(vp9_mv_class_tree,
prob->comps[i].classes,
branch_ct_classes[i],
- NMVcount->comps[i].classes, 0);
+ nmv_count->comps[i].classes, 0);
vp9_tree_probs_from_distribution(vp9_mv_class0_tree,
prob->comps[i].class0,
branch_ct_class0[i],
- NMVcount->comps[i].class0, 0);
+ nmv_count->comps[i].class0, 0);
for (j = 0; j < MV_OFFSET_BITS; ++j) {
- prob->comps[i].bits[j] = get_binary_prob(NMVcount->comps[i].bits[j][0],
- NMVcount->comps[i].bits[j][1]);
- branch_ct_bits[i][j][0] = NMVcount->comps[i].bits[j][0];
- branch_ct_bits[i][j][1] = NMVcount->comps[i].bits[j][1];
+ prob->comps[i].bits[j] = get_binary_prob(nmv_count->comps[i].bits[j][0],
+ nmv_count->comps[i].bits[j][1]);
+ branch_ct_bits[i][j][0] = nmv_count->comps[i].bits[j][0];
+ branch_ct_bits[i][j][1] = nmv_count->comps[i].bits[j][1];
}
}
for (i = 0; i < 2; ++i) {
@@ -280,25 +277,25 @@
vp9_tree_probs_from_distribution(vp9_mv_fp_tree,
prob->comps[i].class0_fp[k],
branch_ct_class0_fp[i][k],
- NMVcount->comps[i].class0_fp[k], 0);
+ nmv_count->comps[i].class0_fp[k], 0);
}
vp9_tree_probs_from_distribution(vp9_mv_fp_tree,
prob->comps[i].fp,
branch_ct_fp[i],
- NMVcount->comps[i].fp, 0);
+ nmv_count->comps[i].fp, 0);
}
if (usehp) {
for (i = 0; i < 2; ++i) {
prob->comps[i].class0_hp =
- get_binary_prob(NMVcount->comps[i].class0_hp[0],
- NMVcount->comps[i].class0_hp[1]);
- branch_ct_class0_hp[i][0] = NMVcount->comps[i].class0_hp[0];
- branch_ct_class0_hp[i][1] = NMVcount->comps[i].class0_hp[1];
+ get_binary_prob(nmv_count->comps[i].class0_hp[0],
+ nmv_count->comps[i].class0_hp[1]);
+ branch_ct_class0_hp[i][0] = nmv_count->comps[i].class0_hp[0];
+ branch_ct_class0_hp[i][1] = nmv_count->comps[i].class0_hp[1];
- prob->comps[i].hp = get_binary_prob(NMVcount->comps[i].hp[0],
- NMVcount->comps[i].hp[1]);
- branch_ct_hp[i][0] = NMVcount->comps[i].hp[0];
- branch_ct_hp[i][1] = NMVcount->comps[i].hp[1];
+ prob->comps[i].hp = get_binary_prob(nmv_count->comps[i].hp[0],
+ nmv_count->comps[i].hp[1]);
+ branch_ct_hp[i][0] = nmv_count->comps[i].hp[0];
+ branch_ct_hp[i][1] = nmv_count->comps[i].hp[1];
}
}
}
@@ -308,32 +305,26 @@
vp9_prob this_probs[],
const vp9_prob last_probs[],
const unsigned int num_events[]) {
- unsigned int left, right, weight;
vp9_prob this_prob;
- if (tree[i] <= 0) {
- left = num_events[-tree[i]];
- } else {
- left = adapt_probs(tree[i], tree, this_probs, last_probs,
- num_events);
- }
- if (tree[i + 1] <= 0) {
- right = num_events[-tree[i + 1]];
- } else {
- right = adapt_probs(tree[i + 1], tree, this_probs, last_probs,
- num_events);
- }
+ const uint32_t left = tree[i] <= 0
+ ? num_events[-tree[i]]
+ : adapt_probs(tree[i], tree, this_probs, last_probs, num_events);
- weight = left + right;
+ const uint32_t right = tree[i + 1] <= 0
+ ? num_events[-tree[i + 1]]
+ : adapt_probs(tree[i + 1], tree, this_probs, last_probs, num_events);
+
+ uint32_t weight = left + right;
if (weight) {
this_prob = get_binary_prob(left, right);
weight = weight > MV_COUNT_SAT ? MV_COUNT_SAT : weight;
- this_prob = weighted_prob(last_probs[i>>1], this_prob,
+ this_prob = weighted_prob(last_probs[i >> 1], this_prob,
MV_MAX_UPDATE_FACTOR * weight / MV_COUNT_SAT);
} else {
this_prob = last_probs[i >> 1];
}
- this_probs[i>>1] = this_prob;
+ this_probs[i >> 1] = this_prob;
return left + right;
}
diff --git a/vp9/common/vp9_extend.c b/vp9/common/vp9_extend.c
index d3e66f6..6aac905 100644
--- a/vp9/common/vp9_extend.c
+++ b/vp9/common/vp9_extend.c
@@ -11,159 +11,137 @@
#include "vp9/common/vp9_extend.h"
#include "vpx_mem/vpx_mem.h"
-static void copy_and_extend_plane(uint8_t *s, /* source */
- int sp, /* source pitch */
- uint8_t *d, /* destination */
- int dp, /* destination pitch */
- int h, /* height */
- int w, /* width */
- int et, /* extend top border */
- int el, /* extend left border */
- int eb, /* extend bottom border */
- int er) { /* extend right border */
- int i;
- uint8_t *src_ptr1, *src_ptr2;
- uint8_t *dest_ptr1, *dest_ptr2;
- int linesize;
+static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
+ uint8_t *dst, int dst_pitch,
+ int w, int h,
+ int extend_top, int extend_left,
+ int extend_bottom, int extend_right) {
+ int i, linesize;
- /* copy the left and right most columns out */
- src_ptr1 = s;
- src_ptr2 = s + w - 1;
- dest_ptr1 = d - el;
- dest_ptr2 = d + w;
+ // copy the left and right most columns out
+ const uint8_t *src_ptr1 = src;
+ const uint8_t *src_ptr2 = src + w - 1;
+ uint8_t *dst_ptr1 = dst - extend_left;
+ uint8_t *dst_ptr2 = dst + w;
for (i = 0; i < h; i++) {
- vpx_memset(dest_ptr1, src_ptr1[0], el);
- vpx_memcpy(dest_ptr1 + el, src_ptr1, w);
- vpx_memset(dest_ptr2, src_ptr2[0], er);
- src_ptr1 += sp;
- src_ptr2 += sp;
- dest_ptr1 += dp;
- dest_ptr2 += dp;
+ vpx_memset(dst_ptr1, src_ptr1[0], extend_left);
+ vpx_memcpy(dst_ptr1 + extend_left, src_ptr1, w);
+ vpx_memset(dst_ptr2, src_ptr2[0], extend_right);
+ src_ptr1 += src_pitch;
+ src_ptr2 += src_pitch;
+ dst_ptr1 += dst_pitch;
+ dst_ptr2 += dst_pitch;
}
- /* Now copy the top and bottom lines into each line of the respective
- * borders
- */
- src_ptr1 = d - el;
- src_ptr2 = d + dp * (h - 1) - el;
- dest_ptr1 = d + dp * (-et) - el;
- dest_ptr2 = d + dp * (h) - el;
- linesize = el + er + w;
+ // Now copy the top and bottom lines into each line of the respective
+ // borders
+ src_ptr1 = dst - extend_left;
+ src_ptr2 = dst + dst_pitch * (h - 1) - extend_left;
+ dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left;
+ dst_ptr2 = dst + dst_pitch * (h) - extend_left;
+ linesize = extend_left + extend_right + w;
- for (i = 0; i < et; i++) {
- vpx_memcpy(dest_ptr1, src_ptr1, linesize);
- dest_ptr1 += dp;
+ for (i = 0; i < extend_top; i++) {
+ vpx_memcpy(dst_ptr1, src_ptr1, linesize);
+ dst_ptr1 += dst_pitch;
}
- for (i = 0; i < eb; i++) {
- vpx_memcpy(dest_ptr2, src_ptr2, linesize);
- dest_ptr2 += dp;
+ for (i = 0; i < extend_bottom; i++) {
+ vpx_memcpy(dst_ptr2, src_ptr2, linesize);
+ dst_ptr2 += dst_pitch;
}
}
-void vp9_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
+void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst) {
- int et = dst->border;
- int el = dst->border;
- int eb = dst->border + dst->y_height - src->y_height;
- int er = dst->border + dst->y_width - src->y_width;
+ const int et_y = dst->border;
+ const int el_y = dst->border;
+ const int eb_y = dst->border + dst->y_height - src->y_height;
+ const int er_y = dst->border + dst->y_width - src->y_width;
+
+ const int et_uv = dst->border >> 1;
+ const int el_uv = dst->border >> 1;
+ const int eb_uv = (dst->border >> 1) + dst->uv_height - src->uv_height;
+ const int er_uv = (dst->border >> 1) + dst->uv_width - src->uv_width;
copy_and_extend_plane(src->y_buffer, src->y_stride,
dst->y_buffer, dst->y_stride,
- src->y_height, src->y_width,
- et, el, eb, er);
-
- et = dst->border >> 1;
- el = dst->border >> 1;
- eb = (dst->border >> 1) + dst->uv_height - src->uv_height;
- er = (dst->border >> 1) + dst->uv_width - src->uv_width;
+ src->y_width, src->y_height,
+ et_y, el_y, eb_y, er_y);
copy_and_extend_plane(src->u_buffer, src->uv_stride,
dst->u_buffer, dst->uv_stride,
- src->uv_height, src->uv_width,
- et, el, eb, er);
+ src->uv_width, src->uv_height,
+ et_uv, el_uv, eb_uv, er_uv);
copy_and_extend_plane(src->v_buffer, src->uv_stride,
dst->v_buffer, dst->uv_stride,
- src->uv_height, src->uv_width,
- et, el, eb, er);
+ src->uv_width, src->uv_height,
+ et_y, el_y, eb_uv, er_uv);
}
-void vp9_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
+void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst,
int srcy, int srcx,
int srch, int srcw) {
- int et = dst->border;
- int el = dst->border;
- int eb = dst->border + dst->y_height - src->y_height;
- int er = dst->border + dst->y_width - src->y_width;
- int src_y_offset = srcy * src->y_stride + srcx;
- int dst_y_offset = srcy * dst->y_stride + srcx;
- int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1);
- int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1);
-
// If the side is not touching the bounder then don't extend.
- if (srcy)
- et = 0;
- if (srcx)
- el = 0;
- if (srcy + srch != src->y_height)
- eb = 0;
- if (srcx + srcw != src->y_width)
- er = 0;
+ const int et_y = srcy ? 0 : dst->border;
+ const int el_y = srcx ? 0 : dst->border;
+ const int eb_y = srcy + srch != src->y_height ? 0 :
+ dst->border + dst->y_height - src->y_height;
+ const int er_y = srcx + srcw != src->y_width ? 0 :
+ dst->border + dst->y_width - src->y_width;
+ const int src_y_offset = srcy * src->y_stride + srcx;
+ const int dst_y_offset = srcy * dst->y_stride + srcx;
- copy_and_extend_plane(src->y_buffer + src_y_offset,
- src->y_stride,
- dst->y_buffer + dst_y_offset,
- dst->y_stride,
- srch, srcw,
- et, el, eb, er);
+ const int et_uv = (et_y + 1) >> 1;
+ const int el_uv = (el_y + 1) >> 1;
+ const int eb_uv = (eb_y + 1) >> 1;
+ const int er_uv = (er_y + 1) >> 1;
+ const int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1);
+ const int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1);
+ const int srch_uv = (srch + 1) >> 1;
+ const int srcw_uv = (srcw + 1) >> 1;
- et = (et + 1) >> 1;
- el = (el + 1) >> 1;
- eb = (eb + 1) >> 1;
- er = (er + 1) >> 1;
- srch = (srch + 1) >> 1;
- srcw = (srcw + 1) >> 1;
+ copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride,
+ dst->y_buffer + dst_y_offset, dst->y_stride,
+ srcw, srch,
+ et_y, el_y, eb_y, er_y);
- copy_and_extend_plane(src->u_buffer + src_uv_offset,
- src->uv_stride,
- dst->u_buffer + dst_uv_offset,
- dst->uv_stride,
- srch, srcw,
- et, el, eb, er);
+ copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride,
+ dst->u_buffer + dst_uv_offset, dst->uv_stride,
+ srcw_uv, srch_uv,
+ et_uv, el_uv, eb_uv, er_uv);
- copy_and_extend_plane(src->v_buffer + src_uv_offset,
- src->uv_stride,
- dst->v_buffer + dst_uv_offset,
- dst->uv_stride,
- srch, srcw,
- et, el, eb, er);
+ copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride,
+ dst->v_buffer + dst_uv_offset, dst->uv_stride,
+ srcw_uv, srch_uv,
+ et_uv, el_uv, eb_uv, er_uv);
}
-/* note the extension is only for the last row, for intra prediction purpose */
-void vp9_extend_mb_row(YV12_BUFFER_CONFIG *ybf, uint8_t *YPtr,
- uint8_t *UPtr, uint8_t *VPtr) {
+// note the extension is only for the last row, for intra prediction purpose
+void vp9_extend_mb_row(YV12_BUFFER_CONFIG *buf,
+ uint8_t *y, uint8_t *u, uint8_t *v) {
int i;
- YPtr += ybf->y_stride * 14;
- UPtr += ybf->uv_stride * 6;
- VPtr += ybf->uv_stride * 6;
+ y += buf->y_stride * 14;
+ u += buf->uv_stride * 6;
+ v += buf->uv_stride * 6;
for (i = 0; i < 4; i++) {
- YPtr[i] = YPtr[-1];
- UPtr[i] = UPtr[-1];
- VPtr[i] = VPtr[-1];
+ y[i] = y[-1];
+ u[i] = u[-1];
+ v[i] = v[-1];
}
- YPtr += ybf->y_stride;
- UPtr += ybf->uv_stride;
- VPtr += ybf->uv_stride;
+ y += buf->y_stride;
+ u += buf->uv_stride;
+ v += buf->uv_stride;
for (i = 0; i < 4; i++) {
- YPtr[i] = YPtr[-1];
- UPtr[i] = UPtr[-1];
- VPtr[i] = VPtr[-1];
+ y[i] = y[-1];
+ u[i] = u[-1];
+ v[i] = v[-1];
}
}
diff --git a/vp9/common/vp9_extend.h b/vp9/common/vp9_extend.h
index 847c2c5..6ec75c9 100644
--- a/vp9/common/vp9_extend.h
+++ b/vp9/common/vp9_extend.h
@@ -14,15 +14,17 @@
#include "vpx_scale/yv12config.h"
#include "vpx/vpx_integer.h"
-void vp9_extend_mb_row(YV12_BUFFER_CONFIG *ybf, uint8_t *YPtr,
- uint8_t *UPtr, uint8_t *VPtr);
-void vp9_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
+void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst);
-void vp9_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
+void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst,
int srcy, int srcx,
int srch, int srcw);
+void vp9_extend_mb_row(YV12_BUFFER_CONFIG *buf,
+ uint8_t *y, uint8_t *u, uint8_t *v);
+
+
#endif // VP9_COMMON_VP9_EXTEND_H_
diff --git a/vp9/common/vp9_filter.c b/vp9/common/vp9_filter.c
index 434c63e..6c1ea21 100644
--- a/vp9/common/vp9_filter.c
+++ b/vp9/common/vp9_filter.c
@@ -34,8 +34,9 @@
{ 0, 0, 0, 8, 120, 0, 0, 0 }
};
-#define FILTER_ALPHA 0
-#define FILTER_ALPHA_SHARP 1
+#define FILTER_ALPHA 0
+#define FILTER_ALPHA_SHARP 0
+#define FILTER_ALPHA_SMOOTH 50
DECLARE_ALIGNED(256, const int16_t, vp9_sub_pel_filters_8[SUBPEL_SHIFTS][8])
= {
#if FILTER_ALPHA == 0
@@ -81,12 +82,13 @@
{ 0, 3, -9, 27, 118, -13, 3, -1},
{ 0, 2, -6, 18, 122, -10, 2, 0},
{ 0, 1, -3, 8, 126, -5, 1, 0}
+
#endif /* FILTER_ALPHA */
};
DECLARE_ALIGNED(256, const int16_t, vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][8])
= {
-#if FILTER_ALPHA_SHARP == 1
+#if FILTER_ALPHA_SHARP == 0
/* dct based filter */
{0, 0, 0, 128, 0, 0, 0, 0},
{-1, 3, -7, 127, 8, -3, 1, 0},
@@ -105,24 +107,24 @@
{-1, 3, -6, 17, 125, -13, 5, -2},
{0, 1, -3, 8, 127, -7, 3, -1}
-#elif FILTER_ALPHA_SHARP == 75
- /* alpha = 0.75 */
- {0, 0, 0, 128, 0, 0, 0, 0},
- {-1, 2, -6, 126, 9, -3, 2, -1},
- {-1, 4, -11, 123, 18, -7, 3, -1},
- {-2, 6, -16, 119, 28, -10, 5, -2},
- {-2, 7, -19, 113, 38, -13, 6, -2},
- {-3, 8, -21, 106, 49, -16, 7, -2},
- {-3, 9, -22, 99, 59, -19, 8, -3},
- {-3, 9, -23, 90, 70, -21, 9, -3},
- {-3, 9, -22, 80, 80, -22, 9, -3},
- {-3, 9, -21, 70, 90, -23, 9, -3},
- {-3, 8, -19, 59, 99, -22, 9, -3},
- {-2, 7, -16, 49, 106, -21, 8, -3},
- {-2, 6, -13, 38, 113, -19, 7, -2},
- {-2, 5, -10, 28, 119, -16, 6, -2},
- {-1, 3, -7, 18, 123, -11, 4, -1},
- {-1, 2, -3, 9, 126, -6, 2, -1}
+#elif FILTER_ALPHA_SHARP == 80
+ /* alpha = 0.80 */
+ { 0, 0, 0, 128, 0, 0, 0, 0},
+ {-1, 2, -6, 127, 9, -4, 2, -1},
+ {-2, 5, -12, 124, 18, -7, 4, -2},
+ {-2, 7, -16, 119, 28, -11, 5, -2},
+ {-3, 8, -19, 114, 38, -14, 7, -3},
+ {-3, 9, -22, 107, 49, -17, 8, -3},
+ {-4, 10, -23, 99, 60, -20, 10, -4},
+ {-4, 11, -23, 90, 70, -22, 10, -4},
+ {-4, 11, -23, 80, 80, -23, 11, -4},
+ {-4, 10, -22, 70, 90, -23, 11, -4},
+ {-4, 10, -20, 60, 99, -23, 10, -4},
+ {-3, 8, -17, 49, 107, -22, 9, -3},
+ {-3, 7, -14, 38, 114, -19, 8, -3},
+ {-2, 5, -11, 28, 119, -16, 7, -2},
+ {-2, 4, -7, 18, 124, -12, 5, -2},
+ {-1, 2, -4, 9, 127, -6, 2, -1}
#endif /* FILTER_ALPHA_SHARP */
};
@@ -130,6 +132,8 @@
vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS][8]) = {
/* 8-tap lowpass filter */
/* Hamming window */
+ /* freqmultiplier = 0.625 */
+#if FILTER_ALPHA_SMOOTH == 625
{-1, -7, 32, 80, 32, -7, -1, 0},
{-1, -8, 28, 80, 37, -7, -2, 1},
{ 0, -8, 24, 79, 41, -7, -2, 1},
@@ -146,6 +150,26 @@
{ 1, -3, -5, 45, 78, 20, -8, 0},
{ 1, -2, -7, 41, 79, 24, -8, 0},
{ 1, -2, -7, 37, 80, 28, -8, -1}
+
+#elif FILTER_ALPHA_SMOOTH == 50
+ /* freqmultiplier = 0.5 */
+ {-3, 0, 35, 64, 35, 0, -3, 0},
+ {-3, -1, 32, 64, 38, 1, -3, 0},
+ {-2, -2, 29, 63, 41, 2, -3, 0},
+ {-2, -2, 26, 63, 43, 4, -4, 0},
+ {-2, -3, 24, 62, 46, 5, -4, 0},
+ {-2, -3, 21, 60, 49, 7, -4, 0},
+ {-1, -4, 18, 59, 51, 9, -4, 0},
+ {-1, -4, 16, 57, 53, 12, -4, -1},
+ {-1, -4, 14, 55, 55, 14, -4, -1},
+ {-1, -4, 12, 53, 57, 16, -4, -1},
+ {0, -4, 9, 51, 59, 18, -4, -1},
+ {0, -4, 7, 49, 60, 21, -3, -2},
+ {0, -4, 5, 46, 62, 24, -3, -2},
+ {0, -4, 4, 43, 63, 26, -2, -2},
+ {0, -3, 2, 41, 63, 29, -2, -2},
+ {0, -3, 1, 38, 64, 32, -1, -3}
+#endif
};
DECLARE_ALIGNED(256, const int16_t, vp9_sub_pel_filters_6[SUBPEL_SHIFTS][8])
diff --git a/vp9/common/vp9_mvref_common.c b/vp9/common/vp9_mvref_common.c
index d23530a..8d376ad 100644
--- a/vp9/common/vp9_mvref_common.c
+++ b/vp9/common/vp9_mvref_common.c
@@ -28,6 +28,18 @@
static int sb_ref_distance_weight[MVREF_NEIGHBOURS] =
{ 3, 3, 2, 2, 2, 1, 1, 1 };
+
+
+static int sb64_mv_ref_search[MVREF_NEIGHBOURS][2] = {
+ {0, -1}, {-1, 0}, {1, -1}, {-1, 1},
+ {2, -1}, {-1, 2}, {3, -1}, {-1,-1}
+};
+
+static int sb64_ref_distance_weight[MVREF_NEIGHBOURS] =
+ { 1, 1, 1, 1, 1, 1, 1, 1 };
+
+
+
// clamp_mv_ref
#define MV_BORDER (16 << 3) // Allow 16 pels in 1/8th pel units
@@ -247,7 +259,10 @@
vpx_memset(candidate_mvs, 0, sizeof(int_mv) * MAX_MV_REF_CANDIDATES);
vpx_memset(candidate_scores, 0, sizeof(candidate_scores));
- if (mbmi->sb_type) {
+ if (mbmi->sb_type == BLOCK_SIZE_SB64X64) {
+ mv_ref_search = sb64_mv_ref_search;
+ ref_distance_weight = sb64_ref_distance_weight;
+ } else if (mbmi->sb_type == BLOCK_SIZE_SB32X32) {
mv_ref_search = sb_mv_ref_search;
ref_distance_weight = sb_ref_distance_weight;
} else {
diff --git a/vp9/common/vp9_postproc.c b/vp9/common/vp9_postproc.c
index e59bc50..06dadfc 100644
--- a/vp9/common/vp9_postproc.c
+++ b/vp9/common/vp9_postproc.c
@@ -336,11 +336,8 @@
source->uv_height, source->uv_width, ppl);
}
-void vp9_de_noise(YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *post,
- int q,
- int low_var_thresh,
- int flag) {
+void vp9_denoise(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *post,
+ int q, int low_var_thresh, int flag) {
double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065;
int ppl = (int)(level + .5);
(void) post;
diff --git a/vp9/common/vp9_postproc.h b/vp9/common/vp9_postproc.h
index 0a637f0..c2f556e 100644
--- a/vp9/common/vp9_postproc.h
+++ b/vp9/common/vp9_postproc.h
@@ -29,8 +29,8 @@
int vp9_post_proc_frame(struct VP9Common *oci, YV12_BUFFER_CONFIG *dest,
vp9_ppflags_t *flags);
-void vp9_de_noise(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post,
- int q, int low_var_thresh, int flag);
+void vp9_denoise(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post,
+ int q, int low_var_thresh, int flag);
void vp9_deblock(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post,
int q, int low_var_thresh, int flag);
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index 110af5e..a654c7d 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -1629,26 +1629,48 @@
}
}
-static int mv_pred_row(MACROBLOCKD *mb, int off, int idx) {
- int temp = mb->mode_info_context->bmi[off + 0].as_mv[idx].as_mv.row +
- mb->mode_info_context->bmi[off + 1].as_mv[idx].as_mv.row +
- mb->mode_info_context->bmi[off + 4].as_mv[idx].as_mv.row +
- mb->mode_info_context->bmi[off + 5].as_mv[idx].as_mv.row;
- return (temp < 0 ? temp - 4 : temp + 4) / 8;
+static INLINE int round_mv_comp(int value) {
+ return (value < 0 ? value - 4 : value + 4) / 8;
}
-static int mv_pred_col(MACROBLOCKD *mb, int off, int idx) {
- int temp = mb->mode_info_context->bmi[off + 0].as_mv[idx].as_mv.col +
- mb->mode_info_context->bmi[off + 1].as_mv[idx].as_mv.col +
- mb->mode_info_context->bmi[off + 4].as_mv[idx].as_mv.col +
- mb->mode_info_context->bmi[off + 5].as_mv[idx].as_mv.col;
- return (temp < 0 ? temp - 4 : temp + 4) / 8;
+static int mi_mv_pred_row(MACROBLOCKD *mb, int off, int idx) {
+ const int temp = mb->mode_info_context->bmi[off + 0].as_mv[idx].as_mv.row +
+ mb->mode_info_context->bmi[off + 1].as_mv[idx].as_mv.row +
+ mb->mode_info_context->bmi[off + 4].as_mv[idx].as_mv.row +
+ mb->mode_info_context->bmi[off + 5].as_mv[idx].as_mv.row;
+ return round_mv_comp(temp) & mb->fullpixel_mask;
}
+static int mi_mv_pred_col(MACROBLOCKD *mb, int off, int idx) {
+ const int temp = mb->mode_info_context->bmi[off + 0].as_mv[idx].as_mv.col +
+ mb->mode_info_context->bmi[off + 1].as_mv[idx].as_mv.col +
+ mb->mode_info_context->bmi[off + 4].as_mv[idx].as_mv.col +
+ mb->mode_info_context->bmi[off + 5].as_mv[idx].as_mv.col;
+ return round_mv_comp(temp) & mb->fullpixel_mask;
+}
+
+static int b_mv_pred_row(MACROBLOCKD *mb, int off, int idx) {
+ BLOCKD *const blockd = mb->block;
+ const int temp = blockd[off + 0].bmi.as_mv[idx].as_mv.row +
+ blockd[off + 1].bmi.as_mv[idx].as_mv.row +
+ blockd[off + 4].bmi.as_mv[idx].as_mv.row +
+ blockd[off + 5].bmi.as_mv[idx].as_mv.row;
+ return round_mv_comp(temp) & mb->fullpixel_mask;
+}
+
+static int b_mv_pred_col(MACROBLOCKD *mb, int off, int idx) {
+ BLOCKD *const blockd = mb->block;
+ const int temp = blockd[off + 0].bmi.as_mv[idx].as_mv.col +
+ blockd[off + 1].bmi.as_mv[idx].as_mv.col +
+ blockd[off + 4].bmi.as_mv[idx].as_mv.col +
+ blockd[off + 5].bmi.as_mv[idx].as_mv.col;
+ return round_mv_comp(temp) & mb->fullpixel_mask;
+}
+
+
static void build_4x4uvmvs(MACROBLOCKD *xd) {
int i, j;
BLOCKD *blockd = xd->block;
- const int mask = xd->fullpixel_mask;
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
@@ -1658,8 +1680,8 @@
MV *u = &blockd[uoffset].bmi.as_mv[0].as_mv;
MV *v = &blockd[voffset].bmi.as_mv[0].as_mv;
- u->row = mv_pred_row(xd, yoffset, 0) & mask;
- u->col = mv_pred_col(xd, yoffset, 0) & mask;
+ u->row = mi_mv_pred_row(xd, yoffset, 0);
+ u->col = mi_mv_pred_col(xd, yoffset, 0);
// if (x->mode_info_context->mbmi.need_to_clamp_mvs)
clamp_uvmv_to_umv_border(u, xd);
@@ -1673,8 +1695,8 @@
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
u = &blockd[uoffset].bmi.as_mv[1].as_mv;
v = &blockd[voffset].bmi.as_mv[1].as_mv;
- u->row = mv_pred_row(xd, yoffset, 1) & mask;
- u->col = mv_pred_col(xd, yoffset, 1) & mask;
+ u->row = mi_mv_pred_row(xd, yoffset, 1);
+ u->col = mi_mv_pred_col(xd, yoffset, 1);
// if (mbmi->need_to_clamp_mvs)
clamp_uvmv_to_umv_border(u, xd);
@@ -1725,84 +1747,29 @@
/*encoder only*/
void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- int i, j;
- int weight;
- BLOCKD *blockd = xd->block;
+ int mb_row, int mb_col) {
+ int i, j, weight;
+ BLOCKD *const blockd = xd->block;
/* build uv mvs */
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
- int yoffset = i * 8 + j * 2;
- int uoffset = 16 + i * 2 + j;
- int voffset = 20 + i * 2 + j;
- int temp;
+ const int yoffset = i * 8 + j * 2;
+ const int uoffset = 16 + i * 2 + j;
+ const int voffset = 20 + i * 2 + j;
- temp = blockd[yoffset ].bmi.as_mv[0].as_mv.row
- + blockd[yoffset + 1].bmi.as_mv[0].as_mv.row
- + blockd[yoffset + 4].bmi.as_mv[0].as_mv.row
- + blockd[yoffset + 5].bmi.as_mv[0].as_mv.row;
+ MV *u = &blockd[uoffset].bmi.as_mv[0].as_mv;
+ MV *v = &blockd[voffset].bmi.as_mv[0].as_mv;
- if (temp < 0)
- temp -= 4;
- else
- temp += 4;
-
- xd->block[uoffset].bmi.as_mv[0].as_mv.row = (temp / 8) &
- xd->fullpixel_mask;
-
- temp = blockd[yoffset ].bmi.as_mv[0].as_mv.col
- + blockd[yoffset + 1].bmi.as_mv[0].as_mv.col
- + blockd[yoffset + 4].bmi.as_mv[0].as_mv.col
- + blockd[yoffset + 5].bmi.as_mv[0].as_mv.col;
-
- if (temp < 0)
- temp -= 4;
- else
- temp += 4;
-
- blockd[uoffset].bmi.as_mv[0].as_mv.col = (temp / 8) &
- xd->fullpixel_mask;
-
- blockd[voffset].bmi.as_mv[0].as_mv.row =
- blockd[uoffset].bmi.as_mv[0].as_mv.row;
- blockd[voffset].bmi.as_mv[0].as_mv.col =
- blockd[uoffset].bmi.as_mv[0].as_mv.col;
+ v->row = u->row = b_mv_pred_row(xd, yoffset, 0);
+ v->col = u->col = b_mv_pred_col(xd, yoffset, 0);
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
- temp = blockd[yoffset ].bmi.as_mv[1].as_mv.row
- + blockd[yoffset + 1].bmi.as_mv[1].as_mv.row
- + blockd[yoffset + 4].bmi.as_mv[1].as_mv.row
- + blockd[yoffset + 5].bmi.as_mv[1].as_mv.row;
+ u = &blockd[uoffset].bmi.as_mv[1].as_mv;
+ v = &blockd[voffset].bmi.as_mv[1].as_mv;
- if (temp < 0) {
- temp -= 4;
- } else {
- temp += 4;
- }
-
- blockd[uoffset].bmi.as_mv[1].as_mv.row = (temp / 8) &
- xd->fullpixel_mask;
-
- temp = blockd[yoffset ].bmi.as_mv[1].as_mv.col
- + blockd[yoffset + 1].bmi.as_mv[1].as_mv.col
- + blockd[yoffset + 4].bmi.as_mv[1].as_mv.col
- + blockd[yoffset + 5].bmi.as_mv[1].as_mv.col;
-
- if (temp < 0) {
- temp -= 4;
- } else {
- temp += 4;
- }
-
- blockd[uoffset].bmi.as_mv[1].as_mv.col = (temp / 8) &
- xd->fullpixel_mask;
-
- blockd[voffset].bmi.as_mv[1].as_mv.row =
- blockd[uoffset].bmi.as_mv[1].as_mv.row;
- blockd[voffset].bmi.as_mv[1].as_mv.col =
- blockd[uoffset].bmi.as_mv[1].as_mv.col;
+ v->row = u->row = b_mv_pred_row(xd, yoffset, 1);
+ v->row = u->col = b_mv_pred_row(xd, yoffset, 1);
}
}
}
diff --git a/vp9/common/vp9_seg_common.c b/vp9/common/vp9_seg_common.c
index 859e211..44d3172 100644
--- a/vp9/common/vp9_seg_common.c
+++ b/vp9/common/vp9_seg_common.c
@@ -51,7 +51,7 @@
}
int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
- return (segfeaturedata_signed[feature_id]);
+ return segfeaturedata_signed[feature_id];
}
void vp9_clear_segdata(MACROBLOCKD *xd,
diff --git a/vp9/decoder/vp9_dboolhuff.h b/vp9/decoder/vp9_dboolhuff.h
index eeb5c35..02ae1d3 100644
--- a/vp9/decoder/vp9_dboolhuff.h
+++ b/vp9/decoder/vp9_dboolhuff.h
@@ -88,34 +88,28 @@
int bit;
for (bit = bits - 1; bit >= 0; bit--) {
- z |= (decode_bool(br, 0x80) << bit);
+ z |= decode_bool(br, 0x80) << bit;
}
return z;
}
static int bool_error(BOOL_DECODER *br) {
- /* Check if we have reached the end of the buffer.
- *
- * Variable 'count' stores the number of bits in the 'value' buffer, minus
- * 8. The top byte is part of the algorithm, and the remainder is buffered
- * to be shifted into it. So if count == 8, the top 16 bits of 'value' are
- * occupied, 8 for the algorithm and 8 in the buffer.
- *
- * When reading a byte from the user's buffer, count is filled with 8 and
- * one byte is filled into the value buffer. When we reach the end of the
- * data, count is additionally filled with VP9_LOTS_OF_BITS. So when
- * count == VP9_LOTS_OF_BITS - 1, the user's data has been exhausted.
- */
- if ((br->count > VP9_BD_VALUE_SIZE) && (br->count < VP9_LOTS_OF_BITS)) {
- /* We have tried to decode bits after the end of
- * stream was encountered.
- */
- return 1;
- }
-
- /* No error. */
- return 0;
+ // Check if we have reached the end of the buffer.
+ //
+ // Variable 'count' stores the number of bits in the 'value' buffer, minus
+ // 8. The top byte is part of the algorithm, and the remainder is buffered
+ // to be shifted into it. So if count == 8, the top 16 bits of 'value' are
+ // occupied, 8 for the algorithm and 8 in the buffer.
+ //
+ // When reading a byte from the user's buffer, count is filled with 8 and
+ // one byte is filled into the value buffer. When we reach the end of the
+ // data, count is additionally filled with VP9_LOTS_OF_BITS. So when
+ // count == VP9_LOTS_OF_BITS - 1, the user's data has been exhausted.
+ //
+ // 1 if we have tried to decode bits after the end of stream was encountered.
+ // 0 No error.
+ return br->count > VP9_BD_VALUE_SIZE && br->count < VP9_LOTS_OF_BITS;
}
int vp9_decode_unsigned_max(BOOL_DECODER *br, int max);
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index 9cb1814..353e94f 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -490,7 +490,7 @@
int i, j;
for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j) {
for (i = 0; i < VP9_SWITCHABLE_FILTERS - 1; ++i) {
- cm->fc.switchable_interp_prob[j][i] = vp9_read_literal(bc, 8);
+ cm->fc.switchable_interp_prob[j][i] = vp9_read_prob(bc);
}
}
//printf("DECODER: %d %d\n", cm->fc.switchable_interp_prob[0],
@@ -511,13 +511,13 @@
#if CONFIG_COMP_INTERINTRA_PRED
if (cm->use_interintra) {
if (vp9_read(bc, VP9_UPD_INTERINTRA_PROB))
- cm->fc.interintra_prob = (vp9_prob)vp9_read_literal(bc, 8);
+ cm->fc.interintra_prob = vp9_read_prob(bc);
}
#endif
// Decode the baseline probabilities for decoding reference frame
- cm->prob_intra_coded = (vp9_prob)vp9_read_literal(bc, 8);
- cm->prob_last_coded = (vp9_prob)vp9_read_literal(bc, 8);
- cm->prob_gf_coded = (vp9_prob)vp9_read_literal(bc, 8);
+ cm->prob_intra_coded = vp9_read_prob(bc);
+ cm->prob_last_coded = vp9_read_prob(bc);
+ cm->prob_gf_coded = vp9_read_prob(bc);
// Computes a modified set of probabilities for use when reference
// frame prediction fails.
@@ -529,14 +529,14 @@
if (cm->comp_pred_mode == HYBRID_PREDICTION) {
int i;
for (i = 0; i < COMP_PRED_CONTEXTS; i++)
- cm->prob_comppred[i] = (vp9_prob)vp9_read_literal(bc, 8);
+ cm->prob_comppred[i] = vp9_read_prob(bc);
}
if (vp9_read_bit(bc)) {
int i = 0;
do {
- cm->fc.ymode_prob[i] = (vp9_prob) vp9_read_literal(bc, 8);
+ cm->fc.ymode_prob[i] = vp9_read_prob(bc);
} while (++i < VP9_YMODES - 1);
}
@@ -544,7 +544,7 @@
int i = 0;
do {
- cm->fc.sb_ymode_prob[i] = (vp9_prob) vp9_read_literal(bc, 8);
+ cm->fc.sb_ymode_prob[i] = vp9_read_prob(bc);
} while (++i < VP9_I32X32_MODES - 1);
}
@@ -1141,7 +1141,7 @@
if (pbi->common.mb_no_coeff_skip) {
int k;
for (k = 0; k < MBSKIP_CONTEXTS; ++k) {
- cm->mbskip_pred_probs[k] = (vp9_prob)vp9_read_literal(bc, 8);
+ cm->mbskip_pred_probs[k] = vp9_read_prob(bc);
}
}
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index ba75703..fea6433 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -44,7 +44,6 @@
int dec_debug = 0;
#endif
-
static int read_le16(const uint8_t *p) {
return (p[1] << 8) | p[0];
}
@@ -1278,61 +1277,51 @@
vp9_update_mode_info_in_image(cm, cm->mi);
}
-static void setup_segmentation(VP9_COMMON *pc, MACROBLOCKD *xd,
- BOOL_DECODER *header_bc) {
+static void setup_segmentation(VP9_COMMON *pc, MACROBLOCKD *xd, vp9_reader *r) {
int i, j;
- // Is segmentation enabled
- xd->segmentation_enabled = vp9_read_bit(header_bc);
-
+ xd->segmentation_enabled = vp9_read_bit(r);
if (xd->segmentation_enabled) {
// Read whether or not the segmentation map is being explicitly updated
// this frame.
- xd->update_mb_segmentation_map = vp9_read_bit(header_bc);
+ xd->update_mb_segmentation_map = vp9_read_bit(r);
// If so what method will be used.
if (xd->update_mb_segmentation_map) {
// Which macro block level features are enabled. Read the probs used to
// decode the segment id for each macro block.
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
- xd->mb_segment_tree_probs[i] = vp9_read_bit(header_bc) ?
- (vp9_prob)vp9_read_literal(header_bc, 8) : 255;
+ xd->mb_segment_tree_probs[i] = vp9_read_bit(r) ? vp9_read_prob(r) : 255;
}
// Read the prediction probs needed to decode the segment id
- pc->temporal_update = vp9_read_bit(header_bc);
+ pc->temporal_update = vp9_read_bit(r);
for (i = 0; i < PREDICTION_PROBS; i++) {
- if (pc->temporal_update) {
- pc->segment_pred_probs[i] = vp9_read_bit(header_bc) ?
- (vp9_prob)vp9_read_literal(header_bc, 8) : 255;
- } else {
- pc->segment_pred_probs[i] = 255;
- }
+ pc->segment_pred_probs[i] = pc->temporal_update
+ ? (vp9_read_bit(r) ? vp9_read_prob(r) : 255)
+ : 255;
}
if (pc->temporal_update) {
- int count[4];
const vp9_prob *p = xd->mb_segment_tree_probs;
vp9_prob *p_mod = xd->mb_segment_mispred_tree_probs;
+ const int c0 = p[0] * p[1];
+ const int c1 = p[0] * (256 - p[1]);
+ const int c2 = (256 - p[0]) * p[2];
+ const int c3 = (256 - p[0]) * (256 - p[2]);
- count[0] = p[0] * p[1];
- count[1] = p[0] * (256 - p[1]);
- count[2] = (256 - p[0]) * p[2];
- count[3] = (256 - p[0]) * (256 - p[2]);
-
- p_mod[0] = get_binary_prob(count[1], count[2] + count[3]);
- p_mod[1] = get_binary_prob(count[0], count[2] + count[3]);
- p_mod[2] = get_binary_prob(count[0] + count[1], count[3]);
- p_mod[3] = get_binary_prob(count[0] + count[1], count[2]);
+ p_mod[0] = get_binary_prob(c1, c2 + c3);
+ p_mod[1] = get_binary_prob(c0, c2 + c3);
+ p_mod[2] = get_binary_prob(c0 + c1, c3);
+ p_mod[3] = get_binary_prob(c0 + c1, c2);
}
}
- // Is the segment data being updated
- xd->update_mb_segmentation_data = vp9_read_bit(header_bc);
+ xd->update_mb_segmentation_data = vp9_read_bit(r);
if (xd->update_mb_segmentation_data) {
int data;
- xd->mb_segment_abs_delta = vp9_read_bit(header_bc);
+ xd->mb_segment_abs_delta = vp9_read_bit(r);
vp9_clearall_segfeatures(xd);
@@ -1341,16 +1330,15 @@
// For each of the segments features...
for (j = 0; j < SEG_LVL_MAX; j++) {
// Is the feature enabled
- if (vp9_read_bit(header_bc)) {
+ if (vp9_read_bit(r)) {
// Update the feature data and mask
vp9_enable_segfeature(xd, i, j);
- data = vp9_decode_unsigned_max(header_bc,
- vp9_seg_feature_data_max(j));
+ data = vp9_decode_unsigned_max(r, vp9_seg_feature_data_max(j));
// Is the segment data signed..
if (vp9_is_segfeature_signed(j)) {
- if (vp9_read_bit(header_bc))
+ if (vp9_read_bit(r))
data = -data;
}
} else {
@@ -1364,17 +1352,16 @@
}
}
-static void setup_loopfilter(VP9_COMMON *pc, MACROBLOCKD *xd,
- BOOL_DECODER *header_bc) {
+static void setup_loopfilter(VP9_COMMON *pc, MACROBLOCKD *xd, vp9_reader *r) {
int i;
- pc->filter_type = (LOOPFILTERTYPE) vp9_read_bit(header_bc);
- pc->filter_level = vp9_read_literal(header_bc, 6);
- pc->sharpness_level = vp9_read_literal(header_bc, 3);
+ pc->filter_type = (LOOPFILTERTYPE) vp9_read_bit(r);
+ pc->filter_level = vp9_read_literal(r, 6);
+ pc->sharpness_level = vp9_read_literal(r, 3);
#if CONFIG_LOOP_DERING
- if (vp9_read_bit(header_bc))
- pc->dering_enabled = 1 + vp9_read_literal(header_bc, 4);
+ if (vp9_read_bit(r))
+ pc->dering_enabled = 1 + vp9_read_literal(r, 4);
else
pc->dering_enabled = 0;
#endif
@@ -1382,31 +1369,31 @@
// Read in loop filter deltas applied at the MB level based on mode or ref
// frame.
xd->mode_ref_lf_delta_update = 0;
- xd->mode_ref_lf_delta_enabled = vp9_read_bit(header_bc);
+ xd->mode_ref_lf_delta_enabled = vp9_read_bit(r);
if (xd->mode_ref_lf_delta_enabled) {
// Do the deltas need to be updated
- xd->mode_ref_lf_delta_update = vp9_read_bit(header_bc);
+ xd->mode_ref_lf_delta_update = vp9_read_bit(r);
if (xd->mode_ref_lf_delta_update) {
// Send update
for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
- if (vp9_read_bit(header_bc)) {
- // sign = vp9_read_bit( &header_bc );
- xd->ref_lf_deltas[i] = (signed char)vp9_read_literal(header_bc, 6);
+ if (vp9_read_bit(r)) {
+ // sign = vp9_read_bit(r);
+ xd->ref_lf_deltas[i] = vp9_read_literal(r, 6);
- if (vp9_read_bit(header_bc))
+ if (vp9_read_bit(r))
xd->ref_lf_deltas[i] = -xd->ref_lf_deltas[i]; // Apply sign
}
}
// Send update
for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
- if (vp9_read_bit(header_bc)) {
- // sign = vp9_read_bit( &header_bc );
- xd->mode_lf_deltas[i] = (signed char)vp9_read_literal(header_bc, 6);
+ if (vp9_read_bit(r)) {
+ // sign = vp9_read_bit(r);
+ xd->mode_lf_deltas[i] = vp9_read_literal(r, 6);
- if (vp9_read_bit(header_bc))
+ if (vp9_read_bit(r))
xd->mode_lf_deltas[i] = -xd->mode_lf_deltas[i]; // Apply sign
}
}
@@ -1414,6 +1401,219 @@
}
}
+static const uint8_t *setup_frame_size(VP9D_COMP *pbi, int scaling_active,
+ const uint8_t *data,
+ const uint8_t *data_end) {
+ VP9_COMMON *const pc = &pbi->common;
+ const int width = pc->width;
+ const int height = pc->height;
+
+ // If error concealment is enabled we should only parse the new size
+ // if we have enough data. Otherwise we will end up with the wrong size.
+ if (scaling_active && data + 4 < data_end) {
+ pc->display_width = read_le16(data + 0);
+ pc->display_height = read_le16(data + 2);
+ data += 4;
+ }
+
+ if (data + 4 < data_end) {
+ pc->width = read_le16(data + 0);
+ pc->height = read_le16(data + 2);
+ data += 4;
+ }
+
+ if (!scaling_active) {
+ pc->display_width = pc->width;
+ pc->display_height = pc->height;
+ }
+
+ if (width != pc->width || height != pc->height) {
+ if (pc->width <= 0) {
+ pc->width = width;
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame width");
+ }
+
+ if (pc->height <= 0) {
+ pc->height = height;
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame height");
+ }
+
+ if (!pbi->initial_width || !pbi->initial_height) {
+ if (vp9_alloc_frame_buffers(pc, pc->width, pc->height))
+ vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffers");
+ pbi->initial_width = pc->width;
+ pbi->initial_height = pc->height;
+ }
+
+ if (pc->width > pbi->initial_width) {
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Frame width too large");
+ }
+
+ if (pc->height > pbi->initial_height) {
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Frame height too large");
+ }
+
+ update_frame_size(pbi);
+ }
+
+ return data;
+}
+
+static void update_frame_context(VP9D_COMP *pbi, vp9_reader *r) {
+ FRAME_CONTEXT *const fc = &pbi->common.fc;
+
+ vp9_copy(fc->pre_coef_probs_4x4, fc->coef_probs_4x4);
+ vp9_copy(fc->pre_coef_probs_8x8, fc->coef_probs_8x8);
+ vp9_copy(fc->pre_coef_probs_16x16, fc->coef_probs_16x16);
+ vp9_copy(fc->pre_coef_probs_32x32, fc->coef_probs_32x32);
+ vp9_copy(fc->pre_ymode_prob, fc->ymode_prob);
+ vp9_copy(fc->pre_sb_ymode_prob, fc->sb_ymode_prob);
+ vp9_copy(fc->pre_uv_mode_prob, fc->uv_mode_prob);
+ vp9_copy(fc->pre_bmode_prob, fc->bmode_prob);
+ vp9_copy(fc->pre_i8x8_mode_prob, fc->i8x8_mode_prob);
+ vp9_copy(fc->pre_sub_mv_ref_prob, fc->sub_mv_ref_prob);
+ vp9_copy(fc->pre_mbsplit_prob, fc->mbsplit_prob);
+ fc->pre_nmvc = fc->nmvc;
+
+ vp9_zero(fc->coef_counts_4x4);
+ vp9_zero(fc->coef_counts_8x8);
+ vp9_zero(fc->coef_counts_16x16);
+ vp9_zero(fc->coef_counts_32x32);
+ vp9_zero(fc->eob_branch_counts);
+ vp9_zero(fc->ymode_counts);
+ vp9_zero(fc->sb_ymode_counts);
+ vp9_zero(fc->uv_mode_counts);
+ vp9_zero(fc->bmode_counts);
+ vp9_zero(fc->i8x8_mode_counts);
+ vp9_zero(fc->sub_mv_ref_counts);
+ vp9_zero(fc->mbsplit_counts);
+ vp9_zero(fc->NMVcount);
+ vp9_zero(fc->mv_ref_ct);
+
+#if CONFIG_COMP_INTERINTRA_PRED
+ fc->pre_interintra_prob = fc->interintra_prob;
+ vp9_zero(fc->interintra_counts);
+#endif
+
+#if CONFIG_CODE_NONZEROCOUNT
+ vp9_copy(fc->pre_nzc_probs_4x4, fc->nzc_probs_4x4);
+ vp9_copy(fc->pre_nzc_probs_8x8, fc->nzc_probs_8x8);
+ vp9_copy(fc->pre_nzc_probs_16x16, fc->nzc_probs_16x16);
+ vp9_copy(fc->pre_nzc_probs_32x32, fc->nzc_probs_32x32);
+ vp9_copy(fc->pre_nzc_pcat_probs, fc->nzc_pcat_probs);
+
+ vp9_zero(fc->nzc_counts_4x4);
+ vp9_zero(fc->nzc_counts_8x8);
+ vp9_zero(fc->nzc_counts_16x16);
+ vp9_zero(fc->nzc_counts_32x32);
+ vp9_zero(fc->nzc_pcat_counts);
+#endif
+
+ read_coef_probs(pbi, r);
+#if CONFIG_CODE_NONZEROCOUNT
+ read_nzc_probs(&pbi->common, r);
+#endif
+}
+
+static void decode_tiles(VP9D_COMP *pbi,
+ const uint8_t *data, int first_partition_size,
+ BOOL_DECODER *header_bc, BOOL_DECODER *residual_bc) {
+ VP9_COMMON *const pc = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+
+ const uint8_t *data_ptr = data + first_partition_size;
+ int tile_row, tile_col, delta_log2_tiles;
+ int mb_row;
+
+ vp9_get_tile_n_bits(pc, &pc->log2_tile_columns, &delta_log2_tiles);
+ while (delta_log2_tiles--) {
+ if (vp9_read_bit(header_bc)) {
+ pc->log2_tile_columns++;
+ } else {
+ break;
+ }
+ }
+ pc->log2_tile_rows = vp9_read_bit(header_bc);
+ if (pc->log2_tile_rows)
+ pc->log2_tile_rows += vp9_read_bit(header_bc);
+ pc->tile_columns = 1 << pc->log2_tile_columns;
+ pc->tile_rows = 1 << pc->log2_tile_rows;
+
+ vpx_memset(pc->above_context, 0,
+ sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
+
+ if (pbi->oxcf.inv_tile_order) {
+ const int n_cols = pc->tile_columns;
+ const uint8_t *data_ptr2[4][1 << 6];
+ BOOL_DECODER UNINITIALIZED_IS_SAFE(bc_bak);
+
+ // pre-initialize the offsets, we're going to read in inverse order
+ data_ptr2[0][0] = data_ptr;
+ for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
+ if (tile_row) {
+ const int size = read_le32(data_ptr2[tile_row - 1][n_cols - 1]);
+ data_ptr2[tile_row - 1][n_cols - 1] += 4;
+ data_ptr2[tile_row][0] = data_ptr2[tile_row - 1][n_cols - 1] + size;
+ }
+
+ for (tile_col = 1; tile_col < n_cols; tile_col++) {
+ const int size = read_le32(data_ptr2[tile_row][tile_col - 1]);
+ data_ptr2[tile_row][tile_col - 1] += 4;
+ data_ptr2[tile_row][tile_col] =
+ data_ptr2[tile_row][tile_col - 1] + size;
+ }
+ }
+
+ for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
+ vp9_get_tile_row_offsets(pc, tile_row);
+ for (tile_col = n_cols - 1; tile_col >= 0; tile_col--) {
+ vp9_get_tile_col_offsets(pc, tile_col);
+ setup_token_decoder(pbi, data_ptr2[tile_row][tile_col], residual_bc);
+
+ // Decode a row of superblocks
+ for (mb_row = pc->cur_tile_mb_row_start;
+ mb_row < pc->cur_tile_mb_row_end; mb_row += 4) {
+ decode_sb_row(pbi, pc, mb_row, xd, residual_bc);
+ }
+
+ if (tile_row == pc->tile_rows - 1 && tile_col == n_cols - 1)
+ bc_bak = *residual_bc;
+ }
+ }
+ *residual_bc = bc_bak;
+ } else {
+ int has_more;
+
+ for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
+ vp9_get_tile_row_offsets(pc, tile_row);
+ for (tile_col = 0; tile_col < pc->tile_columns; tile_col++) {
+ vp9_get_tile_col_offsets(pc, tile_col);
+
+ has_more = tile_col < pc->tile_columns - 1 ||
+ tile_row < pc->tile_rows - 1;
+
+ // Setup decoder
+ setup_token_decoder(pbi, data_ptr + (has_more ? 4 : 0), residual_bc);
+
+ // Decode a row of superblocks
+ for (mb_row = pc->cur_tile_mb_row_start;
+ mb_row < pc->cur_tile_mb_row_end; mb_row += 4) {
+ decode_sb_row(pbi, pc, mb_row, xd, residual_bc);
+ }
+
+ if (has_more) {
+ const int size = read_le32(data_ptr);
+ data_ptr += 4 + size;
+ }
+ }
+ }
+ }
+}
int vp9_decode_frame(VP9D_COMP *pbi, const unsigned char **p_data_end) {
BOOL_DECODER header_bc, residual_bc;
@@ -1422,11 +1622,11 @@
const uint8_t *data = (const uint8_t *)pbi->Source;
const uint8_t *data_end = data + pbi->source_sz;
ptrdiff_t first_partition_length_in_bytes = 0;
- int mb_row, i, corrupt_tokens = 0;
+ int i, corrupt_tokens = 0;
// printf("Decoding frame %d\n", pc->current_video_frame);
- /* start with no corruption of current frame */
- xd->corrupted = 0;
+
+ xd->corrupted = 0; // start with no corruption of current frame
pc->yv12_fb[pc->new_fb_idx].corrupted = 0;
if (data_end - data < 3) {
@@ -1449,10 +1649,8 @@
vp9_setup_version(pc);
if (pc->frame_type == KEY_FRAME) {
- /* vet via sync code */
- /* When error concealment is enabled we should only check the sync
- * code if we have enough bits available
- */
+ // When error concealment is enabled we should only check the sync
+ // code if we have enough bits available
if (data + 3 < data_end) {
if (data[0] != 0x9d || data[1] != 0x01 || data[2] != 0x2a)
vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
@@ -1460,63 +1658,8 @@
}
data += 3;
}
- {
- const int width = pc->width;
- const int height = pc->height;
- /* If error concealment is enabled we should only parse the new size
- * if we have enough data. Otherwise we will end up with the wrong
- * size.
- */
- if (scaling_active && data + 4 < data_end) {
- pc->display_width = read_le16(data + 0);
- pc->display_height = read_le16(data + 2);
- data += 4;
- }
- if (data + 4 < data_end) {
- pc->width = read_le16(data + 0);
- pc->height = read_le16(data + 2);
- data += 4;
- }
- if (!scaling_active) {
- pc->display_width = pc->width;
- pc->display_height = pc->height;
- }
-
- if (width != pc->width || height != pc->height) {
- if (pc->width <= 0) {
- pc->width = width;
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Invalid frame width");
- }
-
- if (pc->height <= 0) {
- pc->height = height;
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Invalid frame height");
- }
-
- if (!pbi->initial_width || !pbi->initial_height) {
- if (vp9_alloc_frame_buffers(pc, pc->width, pc->height))
- vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate frame buffers");
- pbi->initial_width = pc->width;
- pbi->initial_height = pc->height;
- }
-
- if (pc->width > pbi->initial_width) {
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Frame width too large");
- }
-
- if (pc->height > pbi->initial_height) {
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Frame height too large");
- }
-
- update_frame_size(pbi);
- }
- }
+ data = setup_frame_size(pbi, scaling_active, data, data_end);
}
if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME) ||
@@ -1526,7 +1669,7 @@
init_frame(pbi);
- /* Reset the frame pointers to the current frame size */
+ // Reset the frame pointers to the current frame size
vp8_yv12_realloc_frame_buffer(&pc->yv12_fb[pc->new_fb_idx],
pc->width, pc->height,
VP9BORDERINPIXELS);
@@ -1535,9 +1678,9 @@
(unsigned int)first_partition_length_in_bytes))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
- pc->clr_type = (YUV_TYPE)vp9_read_bit(&header_bc);
- pc->clamp_type = (CLAMP_TYPE)vp9_read_bit(&header_bc);
+ pc->clr_type = (YUV_TYPE)vp9_read_bit(&header_bc);
+ pc->clamp_type = (CLAMP_TYPE)vp9_read_bit(&header_bc);
pc->error_resilient_mode = vp9_read_bit(&header_bc);
setup_segmentation(pc, xd, &header_bc);
@@ -1552,25 +1695,25 @@
} else {
for (i = 0; i < PREDICTION_PROBS; i++) {
if (vp9_read_bit(&header_bc))
- pc->ref_pred_probs[i] = (vp9_prob)vp9_read_literal(&header_bc, 8);
+ pc->ref_pred_probs[i] = vp9_read_prob(&header_bc);
}
}
- pc->sb64_coded = vp9_read_literal(&header_bc, 8);
- pc->sb32_coded = vp9_read_literal(&header_bc, 8);
+ pc->sb64_coded = vp9_read_prob(&header_bc);
+ pc->sb32_coded = vp9_read_prob(&header_bc);
xd->lossless = vp9_read_bit(&header_bc);
if (xd->lossless) {
pc->txfm_mode = ONLY_4X4;
} else {
// Read the loop filter level and type
pc->txfm_mode = vp9_read_literal(&header_bc, 2);
- if (pc->txfm_mode == 3)
+ if (pc->txfm_mode == ALLOW_32X32)
pc->txfm_mode += vp9_read_bit(&header_bc);
if (pc->txfm_mode == TX_MODE_SELECT) {
- pc->prob_tx[0] = vp9_read_literal(&header_bc, 8);
- pc->prob_tx[1] = vp9_read_literal(&header_bc, 8);
- pc->prob_tx[2] = vp9_read_literal(&header_bc, 8);
+ pc->prob_tx[0] = vp9_read_prob(&header_bc);
+ pc->prob_tx[1] = vp9_read_prob(&header_bc);
+ pc->prob_tx[2] = vp9_read_prob(&header_bc);
}
}
@@ -1596,22 +1739,20 @@
mb_init_dequantizer(pbi, &pbi->mb);
}
- /* Determine if the golden frame or ARF buffer should be updated and how.
- * For all non key frames the GF and ARF refresh flags and sign bias
- * flags must be set explicitly.
- */
+ // Determine if the golden frame or ARF buffer should be updated and how.
+ // For all non key frames the GF and ARF refresh flags and sign bias
+ // flags must be set explicitly.
if (pc->frame_type == KEY_FRAME) {
pc->active_ref_idx[0] = pc->new_fb_idx;
pc->active_ref_idx[1] = pc->new_fb_idx;
pc->active_ref_idx[2] = pc->new_fb_idx;
} else {
- /* Should the GF or ARF be updated from the current frame */
+ // Should the GF or ARF be updated from the current frame
pbi->refresh_frame_flags = vp9_read_literal(&header_bc, NUM_REF_FRAMES);
- /* Select active reference frames */
+ // Select active reference frames
for (i = 0; i < 3; i++) {
int ref_frame_num = vp9_read_literal(&header_bc, NUM_REF_FRAMES_LG2);
-
pc->active_ref_idx[i] = pc->ref_frame_map[ref_frame_num];
}
@@ -1619,16 +1760,17 @@
pc->ref_frame_sign_bias[ALTREF_FRAME] = vp9_read_bit(&header_bc);
// Is high precision mv allowed
- xd->allow_high_precision_mv = (unsigned char)vp9_read_bit(&header_bc);
+ xd->allow_high_precision_mv = vp9_read_bit(&header_bc);
// Read the type of subpel filter to use
- pc->mcomp_filter_type = vp9_read_bit(&header_bc) ? SWITCHABLE :
- vp9_read_literal(&header_bc, 2);
+ pc->mcomp_filter_type = vp9_read_bit(&header_bc)
+ ? SWITCHABLE
+ : vp9_read_literal(&header_bc, 2);
#if CONFIG_COMP_INTERINTRA_PRED
pc->use_interintra = vp9_read_bit(&header_bc);
#endif
- /* To enable choice of different interploation filters */
+ // To enable choice of different interploation filters
vp9_setup_interp_filters(xd, pc->mcomp_filter_type, pc);
}
@@ -1649,8 +1791,7 @@
for (i = 0; i < INTER_MODE_CONTEXTS; i++) {
for (j = 0; j < 4; j++) {
if (vp9_read(&header_bc, 252)) {
- pc->fc.vp9_mode_contexts[i][j] =
- (vp9_prob)vp9_read_literal(&header_bc, 8);
+ pc->fc.vp9_mode_contexts[i][j] = vp9_read_prob(&header_bc);
}
}
}
@@ -1675,8 +1816,7 @@
// Read any updates to probabilities
for (j = 0; j < MAX_MV_REF_CANDIDATES - 1; ++j) {
if (vp9_read(&header_bc, VP9_MVREF_UPDATE_PROB)) {
- xd->mb_mv_ref_probs[i][j] =
- (vp9_prob)vp9_read_literal(&header_bc, 8);
+ xd->mb_mv_ref_probs[i][j] = vp9_read_prob(&header_bc);
}
}
}
@@ -1693,69 +1833,9 @@
fclose(z);
}
- vp9_copy(pbi->common.fc.pre_coef_probs_4x4,
- pbi->common.fc.coef_probs_4x4);
- vp9_copy(pbi->common.fc.pre_coef_probs_8x8,
- pbi->common.fc.coef_probs_8x8);
- vp9_copy(pbi->common.fc.pre_coef_probs_16x16,
- pbi->common.fc.coef_probs_16x16);
- vp9_copy(pbi->common.fc.pre_coef_probs_32x32,
- pbi->common.fc.coef_probs_32x32);
- vp9_copy(pbi->common.fc.pre_ymode_prob, pbi->common.fc.ymode_prob);
- vp9_copy(pbi->common.fc.pre_sb_ymode_prob, pbi->common.fc.sb_ymode_prob);
- vp9_copy(pbi->common.fc.pre_uv_mode_prob, pbi->common.fc.uv_mode_prob);
- vp9_copy(pbi->common.fc.pre_bmode_prob, pbi->common.fc.bmode_prob);
- vp9_copy(pbi->common.fc.pre_i8x8_mode_prob, pbi->common.fc.i8x8_mode_prob);
- vp9_copy(pbi->common.fc.pre_sub_mv_ref_prob, pbi->common.fc.sub_mv_ref_prob);
- vp9_copy(pbi->common.fc.pre_mbsplit_prob, pbi->common.fc.mbsplit_prob);
-#if CONFIG_COMP_INTERINTRA_PRED
- pbi->common.fc.pre_interintra_prob = pbi->common.fc.interintra_prob;
-#endif
- pbi->common.fc.pre_nmvc = pbi->common.fc.nmvc;
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_copy(pbi->common.fc.pre_nzc_probs_4x4,
- pbi->common.fc.nzc_probs_4x4);
- vp9_copy(pbi->common.fc.pre_nzc_probs_8x8,
- pbi->common.fc.nzc_probs_8x8);
- vp9_copy(pbi->common.fc.pre_nzc_probs_16x16,
- pbi->common.fc.nzc_probs_16x16);
- vp9_copy(pbi->common.fc.pre_nzc_probs_32x32,
- pbi->common.fc.nzc_probs_32x32);
- vp9_copy(pbi->common.fc.pre_nzc_pcat_probs,
- pbi->common.fc.nzc_pcat_probs);
-#endif
+ update_frame_context(pbi, &header_bc);
- vp9_zero(pbi->common.fc.coef_counts_4x4);
- vp9_zero(pbi->common.fc.coef_counts_8x8);
- vp9_zero(pbi->common.fc.coef_counts_16x16);
- vp9_zero(pbi->common.fc.coef_counts_32x32);
- vp9_zero(pbi->common.fc.eob_branch_counts);
- vp9_zero(pbi->common.fc.ymode_counts);
- vp9_zero(pbi->common.fc.sb_ymode_counts);
- vp9_zero(pbi->common.fc.uv_mode_counts);
- vp9_zero(pbi->common.fc.bmode_counts);
- vp9_zero(pbi->common.fc.i8x8_mode_counts);
- vp9_zero(pbi->common.fc.sub_mv_ref_counts);
- vp9_zero(pbi->common.fc.mbsplit_counts);
- vp9_zero(pbi->common.fc.NMVcount);
- vp9_zero(pbi->common.fc.mv_ref_ct);
-#if CONFIG_COMP_INTERINTRA_PRED
- vp9_zero(pbi->common.fc.interintra_counts);
-#endif
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_zero(pbi->common.fc.nzc_counts_4x4);
- vp9_zero(pbi->common.fc.nzc_counts_8x8);
- vp9_zero(pbi->common.fc.nzc_counts_16x16);
- vp9_zero(pbi->common.fc.nzc_counts_32x32);
- vp9_zero(pbi->common.fc.nzc_pcat_counts);
-#endif
-
- read_coef_probs(pbi, &header_bc);
-#if CONFIG_CODE_NONZEROCOUNT
- read_nzc_probs(&pbi->common, &header_bc);
-#endif
-
- /* Initialize xd pointers. Any reference should do for xd->pre, so use 0. */
+ // Initialize xd pointers. Any reference should do for xd->pre, so use 0.
vpx_memcpy(&xd->pre, &pc->yv12_fb[pc->active_ref_idx[0]],
sizeof(YV12_BUFFER_CONFIG));
vpx_memcpy(&xd->dst, &pc->yv12_fb[pc->new_fb_idx],
@@ -1781,91 +1861,8 @@
vp9_decode_mode_mvs_init(pbi, &header_bc);
- /* tile info */
- {
- const uint8_t *data_ptr = data + first_partition_length_in_bytes;
- int tile_row, tile_col, delta_log2_tiles;
-
- vp9_get_tile_n_bits(pc, &pc->log2_tile_columns, &delta_log2_tiles);
- while (delta_log2_tiles--) {
- if (vp9_read_bit(&header_bc)) {
- pc->log2_tile_columns++;
- } else {
- break;
- }
- }
- pc->log2_tile_rows = vp9_read_bit(&header_bc);
- if (pc->log2_tile_rows)
- pc->log2_tile_rows += vp9_read_bit(&header_bc);
- pc->tile_columns = 1 << pc->log2_tile_columns;
- pc->tile_rows = 1 << pc->log2_tile_rows;
-
- vpx_memset(pc->above_context, 0,
- sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
-
- if (pbi->oxcf.inv_tile_order) {
- const int n_cols = pc->tile_columns;
- const uint8_t *data_ptr2[4][1 << 6];
- BOOL_DECODER UNINITIALIZED_IS_SAFE(bc_bak);
-
- // pre-initialize the offsets, we're going to read in inverse order
- data_ptr2[0][0] = data_ptr;
- for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
- if (tile_row) {
- const int size = read_le32(data_ptr2[tile_row - 1][n_cols - 1]);
- data_ptr2[tile_row - 1][n_cols - 1] += 4;
- data_ptr2[tile_row][0] = data_ptr2[tile_row - 1][n_cols - 1] + size;
- }
-
- for (tile_col = 1; tile_col < n_cols; tile_col++) {
- const int size = read_le32(data_ptr2[tile_row][tile_col - 1]);
- data_ptr2[tile_row][tile_col - 1] += 4;
- data_ptr2[tile_row][tile_col] =
- data_ptr2[tile_row][tile_col - 1] + size;
- }
- }
-
- for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
- vp9_get_tile_row_offsets(pc, tile_row);
- for (tile_col = n_cols - 1; tile_col >= 0; tile_col--) {
- vp9_get_tile_col_offsets(pc, tile_col);
- setup_token_decoder(pbi, data_ptr2[tile_row][tile_col], &residual_bc);
-
- /* Decode a row of superblocks */
- for (mb_row = pc->cur_tile_mb_row_start;
- mb_row < pc->cur_tile_mb_row_end; mb_row += 4) {
- decode_sb_row(pbi, pc, mb_row, xd, &residual_bc);
- }
- if (tile_row == pc->tile_rows - 1 && tile_col == n_cols - 1)
- bc_bak = residual_bc;
- }
- }
- residual_bc = bc_bak;
- } else {
- for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
- vp9_get_tile_row_offsets(pc, tile_row);
- for (tile_col = 0; tile_col < pc->tile_columns; tile_col++) {
- vp9_get_tile_col_offsets(pc, tile_col);
-
- if (tile_col < pc->tile_columns - 1 || tile_row < pc->tile_rows - 1)
- setup_token_decoder(pbi, data_ptr + 4, &residual_bc);
- else
- setup_token_decoder(pbi, data_ptr, &residual_bc);
-
- /* Decode a row of superblocks */
- for (mb_row = pc->cur_tile_mb_row_start;
- mb_row < pc->cur_tile_mb_row_end; mb_row += 4) {
- decode_sb_row(pbi, pc, mb_row, xd, &residual_bc);
- }
-
- if (tile_col < pc->tile_columns - 1 || tile_row < pc->tile_rows - 1) {
- int size = read_le32(data_ptr);
- data_ptr += 4 + size;
- }
- }
- }
- }
- }
+ decode_tiles(pbi, data, first_partition_length_in_bytes,
+ &header_bc, &residual_bc);
corrupt_tokens |= xd->corrupted;
// keep track of the last coded dimensions
@@ -1917,8 +1914,8 @@
#endif
/* Find the end of the coded buffer */
- while (residual_bc.count > CHAR_BIT
- && residual_bc.count < VP9_BD_VALUE_SIZE) {
+ while (residual_bc.count > CHAR_BIT &&
+ residual_bc.count < VP9_BD_VALUE_SIZE) {
residual_bc.count -= CHAR_BIT;
residual_bc.user_buffer--;
}
diff --git a/vp9/decoder/vp9_detokenize.c b/vp9/decoder/vp9_detokenize.c
index a891ddd..cb3038e 100644
--- a/vp9/decoder/vp9_detokenize.c
+++ b/vp9/decoder/vp9_detokenize.c
@@ -70,7 +70,7 @@
[pt][token]++; \
token_cache[c] = token; \
pt = vp9_get_coef_context(scan, nb, pad, token_cache, \
- c, default_eob); \
+ c + 1, default_eob); \
} while (0)
#if CONFIG_CODE_NONZEROCOUNT
diff --git a/vp9/decoder/vp9_onyxd_if.c b/vp9/decoder/vp9_onyxd_if.c
index bcb5897..2b61f0a 100644
--- a/vp9/decoder/vp9_onyxd_if.c
+++ b/vp9/decoder/vp9_onyxd_if.c
@@ -154,8 +154,8 @@
if (!pbi)
return;
- // Delete sementation map
- if (pbi->common.last_frame_seg_map != 0)
+ // Delete segmentation map
+ if (pbi->common.last_frame_seg_map)
vpx_free(pbi->common.last_frame_seg_map);
vp9_remove_common(&pbi->common);
@@ -163,6 +163,10 @@
vpx_free(pbi);
}
+static int equal_dimensions(YV12_BUFFER_CONFIG *a, YV12_BUFFER_CONFIG *b) {
+ return a->y_height == b->y_height && a->y_width == b->y_width &&
+ a->uv_height == b->uv_height && a->uv_width == b->uv_width;
+}
vpx_codec_err_t vp9_copy_reference_dec(VP9D_PTR ptr,
VP9_REFFRAME ref_frame_flag,
@@ -176,22 +180,20 @@
* vpxenc --test-decode functionality working, and will be replaced in a
* later commit that adds VP9-specific controls for this functionality.
*/
- if (ref_frame_flag == VP9_LAST_FLAG)
+ if (ref_frame_flag == VP9_LAST_FLAG) {
ref_fb_idx = pbi->common.ref_frame_map[0];
- else {
+ } else {
vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
"Invalid reference frame");
return pbi->common.error.error_code;
}
- if (cm->yv12_fb[ref_fb_idx].y_height != sd->y_height ||
- cm->yv12_fb[ref_fb_idx].y_width != sd->y_width ||
- cm->yv12_fb[ref_fb_idx].uv_height != sd->uv_height ||
- cm->yv12_fb[ref_fb_idx].uv_width != sd->uv_width) {
+ if (!equal_dimensions(&cm->yv12_fb[ref_fb_idx], sd)) {
vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
"Incorrect buffer dimensions");
- } else
+ } else {
vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
+ }
return pbi->common.error.error_code;
}
@@ -202,7 +204,6 @@
VP9D_COMP *pbi = (VP9D_COMP *) ptr;
VP9_COMMON *cm = &pbi->common;
int *ref_fb_ptr = NULL;
- int free_fb;
/* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
* encoder is using the frame buffers for. This is just a stub to keep the
@@ -221,20 +222,17 @@
return pbi->common.error.error_code;
}
- if (cm->yv12_fb[*ref_fb_ptr].y_height != sd->y_height ||
- cm->yv12_fb[*ref_fb_ptr].y_width != sd->y_width ||
- cm->yv12_fb[*ref_fb_ptr].uv_height != sd->uv_height ||
- cm->yv12_fb[*ref_fb_ptr].uv_width != sd->uv_width) {
+ if (!equal_dimensions(&cm->yv12_fb[*ref_fb_ptr], sd)) {
vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
"Incorrect buffer dimensions");
} else {
- /* Find an empty frame buffer. */
- free_fb = get_free_fb(cm);
- /* Decrease fb_idx_ref_cnt since it will be increased again in
- * ref_cnt_fb() below. */
+ // Find an empty frame buffer.
+ const int free_fb = get_free_fb(cm);
+ // Decrease fb_idx_ref_cnt since it will be increased again in
+ // ref_cnt_fb() below.
cm->fb_idx_ref_cnt[free_fb]--;
- /* Manage the reference counters and copy image. */
+ // Manage the reference counters and copy image.
ref_cnt_fb(cm->fb_idx_ref_cnt, ref_fb_ptr, free_fb);
vp8_yv12_copy_frame(sd, &cm->yv12_fb[*ref_fb_ptr]);
}
diff --git a/vp9/decoder/vp9_treereader.h b/vp9/decoder/vp9_treereader.h
index 305dfe5..4ec6de9 100644
--- a/vp9/decoder/vp9_treereader.h
+++ b/vp9/decoder/vp9_treereader.h
@@ -19,10 +19,10 @@
#define vp9_read decode_bool
#define vp9_read_literal decode_value
-#define vp9_read_bit(R) vp9_read(R, vp9_prob_half)
+#define vp9_read_bit(r) vp9_read(r, vp9_prob_half)
+#define vp9_read_prob(r) ((vp9_prob)vp9_read_literal(r, 8))
-/* Intent of tree data structure is to make decoding trivial. */
-
+// Intent of tree data structure is to make decoding trivial.
static int treed_read(vp9_reader *const r, /* !!! must return a 0 or 1 !!! */
vp9_tree t,
const vp9_prob *const p) {
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index ec72923..7128b70 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -1339,6 +1339,7 @@
void update_nzcstats(VP9_COMMON *const cm) {
int c, r, b, t;
+
for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
for (r = 0; r < REF_TYPES; ++r) {
for (b = 0; b < BLOCK_TYPES; ++b) {
@@ -1388,6 +1389,8 @@
void print_nzcstats() {
int c, r, b, t;
+ FILE *f;
+
printf(
"static const unsigned int default_nzc_counts_4x4[MAX_NZC_CONTEXTS]\n"
" [REF_TYPES]\n"
@@ -1508,11 +1511,9 @@
for (b = 0; b < BLOCK_TYPES; ++b) {
vp9_prob probs[NZC4X4_NODES];
unsigned int branch_ct[NZC4X4_NODES][2];
- vp9_tree_probs_from_distribution(NZC4X4_TOKENS,
- vp9_nzc4x4_encodings,
- vp9_nzc4x4_tree,
+ vp9_tree_probs_from_distribution(vp9_nzc4x4_tree,
probs, branch_ct,
- nzc_stats_4x4[c][r][b]);
+ nzc_stats_4x4[c][r][b], 0);
printf(" {");
for (t = 0; t < NZC4X4_NODES; ++t) {
printf(" %-3d,", probs[t]);
@@ -1537,11 +1538,9 @@
for (b = 0; b < BLOCK_TYPES; ++b) {
vp9_prob probs[NZC8X8_NODES];
unsigned int branch_ct[NZC8X8_NODES][2];
- vp9_tree_probs_from_distribution(NZC8X8_TOKENS,
- vp9_nzc8x8_encodings,
- vp9_nzc8x8_tree,
+ vp9_tree_probs_from_distribution(vp9_nzc8x8_tree,
probs, branch_ct,
- nzc_stats_8x8[c][r][b]);
+ nzc_stats_8x8[c][r][b], 0);
printf(" {");
for (t = 0; t < NZC8X8_NODES; ++t) {
printf(" %-3d,", probs[t]);
@@ -1566,11 +1565,9 @@
for (b = 0; b < BLOCK_TYPES; ++b) {
vp9_prob probs[NZC16X16_NODES];
unsigned int branch_ct[NZC16X16_NODES][2];
- vp9_tree_probs_from_distribution(NZC16X16_TOKENS,
- vp9_nzc16x16_encodings,
- vp9_nzc16x16_tree,
+ vp9_tree_probs_from_distribution(vp9_nzc16x16_tree,
probs, branch_ct,
- nzc_stats_16x16[c][r][b]);
+ nzc_stats_16x16[c][r][b], 0);
printf(" {");
for (t = 0; t < NZC16X16_NODES; ++t) {
printf(" %-3d,", probs[t]);
@@ -1595,11 +1592,9 @@
for (b = 0; b < BLOCK_TYPES; ++b) {
vp9_prob probs[NZC32X32_NODES];
unsigned int branch_ct[NZC32X32_NODES][2];
- vp9_tree_probs_from_distribution(NZC32X32_TOKENS,
- vp9_nzc32x32_encodings,
- vp9_nzc32x32_tree,
+ vp9_tree_probs_from_distribution(vp9_nzc32x32_tree,
probs, branch_ct,
- nzc_stats_32x32[c][r][b]);
+ nzc_stats_32x32[c][r][b], 0);
printf(" {");
for (t = 0; t < NZC32X32_NODES; ++t) {
printf(" %-3d,", probs[t]);
@@ -1630,6 +1625,14 @@
printf(" },\n");
}
printf("};\n");
+
+ f = fopen("nzcstats.bin", "wb");
+ fwrite(nzc_stats_4x4, sizeof(nzc_stats_4x4), 1, f);
+ fwrite(nzc_stats_8x8, sizeof(nzc_stats_8x8), 1, f);
+ fwrite(nzc_stats_16x16, sizeof(nzc_stats_16x16), 1, f);
+ fwrite(nzc_stats_32x32, sizeof(nzc_stats_32x32), 1, f);
+ fwrite(nzc_pcat_stats, sizeof(nzc_pcat_stats), 1, f);
+ fclose(f);
}
#endif
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index fc95c32..f2a13de 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -198,7 +198,7 @@
#define OUTPUT_NORM_ACT_STATS 0
#if USE_ACT_INDEX
-// Calculate and activity index for each mb
+// Calculate an activity index for each mb
static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
VP9_COMMON *const cm = &cpi->common;
int mb_row, mb_col;
@@ -268,6 +268,8 @@
unsigned int mb_activity;
int64_t activity_sum = 0;
+ x->mb_activity_ptr = cpi->mb_activity_map;
+
// for each macroblock row in image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
#if ALT_ACT_MEASURE
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 92bb108..70f9e31 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -1536,7 +1536,7 @@
double this_frame_mv_in_out = 0.0;
double mv_in_out_accumulator = 0.0;
double abs_mv_in_out_accumulator = 0.0;
-
+ double mv_ratio_accumulator_thresh;
int max_bits = frame_max_bits(cpi); // Max for a single frame
unsigned int allow_alt_ref =
@@ -1568,10 +1568,8 @@
if (cpi->common.frame_type == KEY_FRAME)
gf_group_err -= gf_first_frame_err;
- // Scan forward to try and work out how many frames the next gf group
- // should contain and what level of boost is appropriate for the GF
- // or ARF that will be coded with the group
- i = 0;
+ // Motion breakout threshold for loop below depends on image size.
+ mv_ratio_accumulator_thresh = (cpi->common.width + cpi->common.height) / 10.0;
// Work out a maximum interval for the GF.
// If the image appears completely static we can extend beyond this.
@@ -1585,6 +1583,7 @@
if (active_max_gf_interval > cpi->max_gf_interval)
active_max_gf_interval = cpi->max_gf_interval;
+ i = 0;
while (((i < cpi->twopass.static_scene_max_gf_interval) ||
((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) &&
(i < cpi->twopass.frames_to_key)) {
@@ -1644,7 +1643,7 @@
((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) &&
((boost_score > 125.0) || (next_frame.pcnt_inter < 0.75)) &&
(!flash_detected) &&
- ((mv_ratio_accumulator > 100.0) ||
+ ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
(abs_mv_in_out_accumulator > 3.0) ||
(mv_in_out_accumulator < -2.0) ||
((boost_score - old_boost_score) < IIFACTOR))
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index 490a639..656975a 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -156,31 +156,24 @@
// The formulae were derived from computing a 3rd order polynomial best
// fit to the original data (after plotting real maxq vs minq (not q index))
static int calculate_minq_index(double maxq,
- double x3, double x2, double x, double c) {
+ double x3, double x2, double x1, double c) {
int i;
- double minqtarget;
-
- minqtarget = ((x3 * maxq * maxq * maxq) +
- (x2 * maxq * maxq) +
- (x * maxq) +
- c);
-
- if (minqtarget > maxq)
- minqtarget = maxq;
+ const double minqtarget = MIN(((x3 * maxq + x2) * maxq + x1) * maxq + c,
+ maxq);
for (i = 0; i < QINDEX_RANGE; i++) {
if (minqtarget <= vp9_convert_qindex_to_q(i))
return i;
}
+
return QINDEX_RANGE - 1;
}
static void init_minq_luts(void) {
int i;
- double maxq;
for (i = 0; i < QINDEX_RANGE; i++) {
- maxq = vp9_convert_qindex_to_q(i);
+ const double maxq = vp9_convert_qindex_to_q(i);
kf_low_motion_minq[i] = calculate_minq_index(maxq,
@@ -216,7 +209,6 @@
if (mb->e_mbd.allow_high_precision_mv) {
mb->mvcost = mb->nmvcost_hp;
mb->mvsadcost = mb->nmvsadcost_hp;
-
} else {
mb->mvcost = mb->nmvcost;
mb->mvsadcost = mb->nmvsadcost;
@@ -224,15 +216,13 @@
}
static void init_base_skip_probs(void) {
int i;
- double q;
- int t;
for (i = 0; i < QINDEX_RANGE; i++) {
- q = vp9_convert_qindex_to_q(i);
+ const double q = vp9_convert_qindex_to_q(i);
// Exponential decay caluclation of baseline skip prob with clamping
// Based on crude best fit of old table.
- t = (int)(564.25 * pow(2.71828, (-0.012 * q)));
+ const int t = (int)(564.25 * pow(2.71828, (-0.012 * q)));
base_skip_false_prob[i][1] = clip_prob(t);
base_skip_false_prob[i][2] = clip_prob(t * 3 / 4);
@@ -268,7 +258,6 @@
cm->mbskip_pred_probs[k];
}
}
-
}
void vp9_initialize_enc() {
@@ -309,7 +298,6 @@
vpx_memset(xd->last_mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
set_default_lf_deltas(cpi);
-
}
@@ -527,17 +515,13 @@
VP9_COMMON *cm = &cpi->common;
int row, col;
int map_index = 0;
- FILE *statsfile;
+ FILE *statsfile = fopen("segmap.stt", "a");
- statsfile = fopen("segmap.stt", "a");
-
- fprintf(statsfile, "%10d\n",
- cm->current_video_frame);
+ fprintf(statsfile, "%10d\n", cm->current_video_frame);
for (row = 0; row < cpi->common.mb_rows; row++) {
for (col = 0; col < cpi->common.mb_cols; col++) {
- fprintf(statsfile, "%10d",
- cpi->segmentation_map[map_index]);
+ fprintf(statsfile, "%10d", cpi->segmentation_map[map_index]);
map_index++;
}
fprintf(statsfile, "\n");
@@ -1153,10 +1137,7 @@
VP9_COMP *cpi = (VP9_COMP *)(ptr);
VP9_COMMON *const cm = &cpi->common;
- if (!cpi)
- return;
-
- if (!oxcf)
+ if (!cpi || !oxcf)
return;
if (cm->version != oxcf->version) {
@@ -1197,11 +1178,11 @@
cpi->oxcf.lossless = oxcf->lossless;
if (cpi->oxcf.lossless) {
- cpi->mb.e_mbd.inv_txm4x4_1 = vp9_short_iwalsh4x4_1;
- cpi->mb.e_mbd.inv_txm4x4 = vp9_short_iwalsh4x4;
+ cpi->mb.e_mbd.inv_txm4x4_1 = vp9_short_iwalsh4x4_1;
+ cpi->mb.e_mbd.inv_txm4x4 = vp9_short_iwalsh4x4;
} else {
- cpi->mb.e_mbd.inv_txm4x4_1 = vp9_short_idct4x4_1;
- cpi->mb.e_mbd.inv_txm4x4 = vp9_short_idct4x4;
+ cpi->mb.e_mbd.inv_txm4x4_1 = vp9_short_idct4x4_1;
+ cpi->mb.e_mbd.inv_txm4x4 = vp9_short_idct4x4;
}
cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
@@ -1239,31 +1220,28 @@
// Convert target bandwidth from Kbit/s to Bit/s
cpi->oxcf.target_bandwidth *= 1000;
- cpi->oxcf.starting_buffer_level =
- rescale(cpi->oxcf.starting_buffer_level,
- cpi->oxcf.target_bandwidth, 1000);
+ cpi->oxcf.starting_buffer_level = rescale(cpi->oxcf.starting_buffer_level,
+ cpi->oxcf.target_bandwidth, 1000);
// Set or reset optimal and maximum buffer levels.
if (cpi->oxcf.optimal_buffer_level == 0)
cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
else
- cpi->oxcf.optimal_buffer_level =
- rescale(cpi->oxcf.optimal_buffer_level,
- cpi->oxcf.target_bandwidth, 1000);
+ cpi->oxcf.optimal_buffer_level = rescale(cpi->oxcf.optimal_buffer_level,
+ cpi->oxcf.target_bandwidth, 1000);
if (cpi->oxcf.maximum_buffer_size == 0)
cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
else
- cpi->oxcf.maximum_buffer_size =
- rescale(cpi->oxcf.maximum_buffer_size,
- cpi->oxcf.target_bandwidth, 1000);
+ cpi->oxcf.maximum_buffer_size = rescale(cpi->oxcf.maximum_buffer_size,
+ cpi->oxcf.target_bandwidth, 1000);
// Set up frame rate and related parameters rate control values.
vp9_new_frame_rate(cpi, cpi->oxcf.frame_rate);
// Set absolute upper and lower quality limits
- cpi->worst_quality = cpi->oxcf.worst_allowed_q;
- cpi->best_quality = cpi->oxcf.best_allowed_q;
+ cpi->worst_quality = cpi->oxcf.worst_allowed_q;
+ cpi->best_quality = cpi->oxcf.best_allowed_q;
// active values should only be modified if out of new range
if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
@@ -1356,30 +1334,30 @@
static void cal_nmvsadcosts(int *mvsadcost[2]) {
int i = 1;
- mvsadcost [0] [0] = 0;
- mvsadcost [1] [0] = 0;
+ mvsadcost[0][0] = 0;
+ mvsadcost[1][0] = 0;
do {
double z = 256 * (2 * (log2f(8 * i) + .6));
- mvsadcost [0][i] = (int) z;
- mvsadcost [1][i] = (int) z;
- mvsadcost [0][-i] = (int) z;
- mvsadcost [1][-i] = (int) z;
+ mvsadcost[0][i] = (int)z;
+ mvsadcost[1][i] = (int)z;
+ mvsadcost[0][-i] = (int)z;
+ mvsadcost[1][-i] = (int)z;
} while (++i <= MV_MAX);
}
static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
int i = 1;
- mvsadcost [0] [0] = 0;
- mvsadcost [1] [0] = 0;
+ mvsadcost[0][0] = 0;
+ mvsadcost[1][0] = 0;
do {
double z = 256 * (2 * (log2f(8 * i) + .6));
- mvsadcost [0][i] = (int) z;
- mvsadcost [1][i] = (int) z;
- mvsadcost [0][-i] = (int) z;
- mvsadcost [1][-i] = (int) z;
+ mvsadcost[0][i] = (int)z;
+ mvsadcost[1][i] = (int)z;
+ mvsadcost[0][-i] = (int)z;
+ mvsadcost[1][-i] = (int)z;
} while (++i <= MV_MAX);
}
@@ -2920,7 +2898,6 @@
#if CONFIG_POSTPROC
if (cpi->oxcf.noise_sensitivity > 0) {
- uint8_t *src;
int l = 0;
switch (cpi->oxcf.noise_sensitivity) {
@@ -2934,7 +2911,6 @@
l = 60;
break;
case 4:
-
case 5:
l = 100;
break;
@@ -2943,18 +2919,7 @@
break;
}
-
- if (cm->frame_type == KEY_FRAME) {
- vp9_de_noise(cpi->Source, cpi->Source, l, 1, 0);
- } else {
- vp9_de_noise(cpi->Source, cpi->Source, l, 1, 0);
-
- src = cpi->Source->y_buffer;
-
- if (cpi->Source->y_stride < 0) {
- src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
- }
- }
+ vp9_denoise(cpi->Source, cpi->Source, l, 1, 0);
}
#endif
@@ -3189,12 +3154,9 @@
}
// Clamp Q to upper and lower limits:
- if (Q > q_high)
- Q = q_high;
- else if (Q < q_low)
- Q = q_low;
+ Q = clamp(Q, q_low, q_high);
- Loop = ((Q != last_q)) ? TRUE : FALSE;
+ Loop = Q != last_q;
} else
Loop = FALSE;
@@ -4156,16 +4118,17 @@
int vp9_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
int i, j;
- int Total = 0;
+ int total = 0;
uint8_t *src = source->y_buffer;
uint8_t *dst = dest->y_buffer;
- // Loop through the Y plane raw and reconstruction data summing (square differences)
+ // Loop through the Y plane raw and reconstruction data summing
+ // (square differences)
for (i = 0; i < source->y_height; i += 16) {
for (j = 0; j < source->y_width; j += 16) {
unsigned int sse;
- Total += vp9_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
+ total += vp9_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
&sse);
}
@@ -4173,7 +4136,7 @@
dst += 16 * dest->y_stride;
}
- return Total;
+ return total;
}
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index ad9b960..0083e8a 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -595,7 +595,7 @@
[get_coef_band(scan, tx_size, c)]
[pt][0], 1);
#endif
- pt = vp9_get_coef_context(scan, nb, pad, token_cache, c, default_eob);
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache, c + 1, default_eob);
}
#if CONFIG_CODE_NONZEROCOUNT
cost += nzc_cost[nzc];
diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c
index 6e2b847..21401d1 100644
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -291,7 +291,7 @@
#endif
token_cache[c] = token;
- pt = vp9_get_coef_context(scan, nb, pad, token_cache, c, default_eob);
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache, c + 1, default_eob);
++t;
} while (c < eob && ++c < seg_eob);
#if CONFIG_CODE_NONZEROCOUNT