Merge "Fix build without dual-filter" into nextgenv2
diff --git a/test/encode_test_driver.cc b/test/encode_test_driver.cc
index 3de952c..e24c9bf 100644
--- a/test/encode_test_driver.cc
+++ b/test/encode_test_driver.cc
@@ -13,6 +13,7 @@
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
+#include "vpx_ports/mem.h"
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/encode_test_driver.h"
@@ -141,38 +142,120 @@
else
passes_ = 1;
}
+
+static bool compare_plane(const uint8_t *const buf1, const int stride1,
+ const uint8_t *const buf2, const int stride2,
+ const int w, const int h,
+ int *const mismatch_row,
+ int *const mismatch_col,
+ int *const mismatch_pix1,
+ int *const mismatch_pix2) {
+ int r, c;
+
+ for (r = 0; r < h; ++r) {
+ for (c = 0; c < w; ++c) {
+ const int pix1 = buf1[r * stride1 + c];
+ const int pix2 = buf2[r * stride2 + c];
+
+ if (pix1 != pix2) {
+ if (mismatch_row != NULL)
+ *mismatch_row = r;
+ if (mismatch_col != NULL)
+ *mismatch_col = c;
+ if (mismatch_pix1 != NULL)
+ *mismatch_pix1 = pix1;
+ if (mismatch_pix2 != NULL)
+ *mismatch_pix2 = pix2;
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
// The function should return "true" most of the time, therefore no early
// break-out is implemented within the match checking process.
static bool compare_img(const vpx_image_t *img1,
- const vpx_image_t *img2) {
- bool match = (img1->fmt == img2->fmt) &&
- (img1->cs == img2->cs) &&
- (img1->d_w == img2->d_w) &&
- (img1->d_h == img2->d_h);
+ const vpx_image_t *img2,
+ int *const mismatch_row,
+ int *const mismatch_col,
+ int *const mismatch_plane,
+ int *const mismatch_pix1,
+ int *const mismatch_pix2) {
- const unsigned int width_y = img1->d_w;
- const unsigned int height_y = img1->d_h;
- unsigned int i;
- for (i = 0; i < height_y; ++i)
- match = (memcmp(img1->planes[VPX_PLANE_Y] + i * img1->stride[VPX_PLANE_Y],
- img2->planes[VPX_PLANE_Y] + i * img2->stride[VPX_PLANE_Y],
- width_y) == 0) && match;
- const unsigned int width_uv = (img1->d_w + 1) >> 1;
- const unsigned int height_uv = (img1->d_h + 1) >> 1;
- for (i = 0; i < height_uv; ++i)
- match = (memcmp(img1->planes[VPX_PLANE_U] + i * img1->stride[VPX_PLANE_U],
- img2->planes[VPX_PLANE_U] + i * img2->stride[VPX_PLANE_U],
- width_uv) == 0) && match;
- for (i = 0; i < height_uv; ++i)
- match = (memcmp(img1->planes[VPX_PLANE_V] + i * img1->stride[VPX_PLANE_V],
- img2->planes[VPX_PLANE_V] + i * img2->stride[VPX_PLANE_V],
- width_uv) == 0) && match;
- return match;
+ const unsigned int w_y = img1->d_w;
+ const unsigned int h_y = img1->d_h;
+ const unsigned int w_uv = ROUNDZ_POWER_OF_TWO(w_y, img1->x_chroma_shift);
+ const unsigned int h_uv = ROUNDZ_POWER_OF_TWO(h_y, img1->y_chroma_shift);
+
+ if (img1->fmt != img2->fmt
+ || img1->cs != img2->cs
+ || img1->d_w != img2->d_w
+ || img1->d_h != img2->d_h) {
+ if (mismatch_row != NULL)
+ *mismatch_row = -1;
+ if (mismatch_col != NULL)
+ *mismatch_col = -1;
+ return false;
+ }
+
+ if (!compare_plane(img1->planes[VPX_PLANE_Y], img1->stride[VPX_PLANE_Y],
+ img2->planes[VPX_PLANE_Y], img2->stride[VPX_PLANE_Y],
+ w_y, h_y,
+ mismatch_row, mismatch_col,
+ mismatch_pix1, mismatch_pix2)) {
+ if (mismatch_plane != NULL)
+ *mismatch_plane = VPX_PLANE_Y;
+ return false;
+ }
+
+ if (!compare_plane(img1->planes[VPX_PLANE_U], img1->stride[VPX_PLANE_U],
+ img2->planes[VPX_PLANE_U], img2->stride[VPX_PLANE_U],
+ w_uv, h_uv,
+ mismatch_row, mismatch_col,
+ mismatch_pix1, mismatch_pix2)) {
+ if (mismatch_plane != NULL)
+ *mismatch_plane = VPX_PLANE_U;
+ return false;
+ }
+
+ if (!compare_plane(img1->planes[VPX_PLANE_V], img1->stride[VPX_PLANE_V],
+ img2->planes[VPX_PLANE_V], img2->stride[VPX_PLANE_V],
+ w_uv, h_uv,
+ mismatch_row, mismatch_col,
+ mismatch_pix1, mismatch_pix2)) {
+ if (mismatch_plane != NULL)
+ *mismatch_plane = VPX_PLANE_U;
+ return false;
+ }
+
+ return true;
}
-void EncoderTest::MismatchHook(const vpx_image_t* /*img1*/,
- const vpx_image_t* /*img2*/) {
- ASSERT_TRUE(0) << "Encode/Decode mismatch found";
+void EncoderTest::MismatchHook(const vpx_image_t* img_enc,
+ const vpx_image_t* img_dec) {
+ int mismatch_row;
+ int mismatch_col;
+ int mismatch_plane;
+ int mismatch_pix_enc;
+ int mismatch_pix_dec;
+
+ ASSERT_FALSE(compare_img(img_enc, img_dec,
+ &mismatch_row, &mismatch_col,
+ &mismatch_plane,
+ &mismatch_pix_enc,
+ &mismatch_pix_dec));
+
+ GTEST_FAIL()
+ << "Encode/Decode mismatch found:"
+ << std::endl
+ << " pixel value enc/dec: " << mismatch_pix_enc << "/" << mismatch_pix_dec
+ << std::endl
+ << " plane: " << mismatch_plane
+ << std::endl
+ << " row/col: " << mismatch_row << "/" << mismatch_col
+ << std::endl;
}
void EncoderTest::RunLoop(VideoSource *video) {
@@ -265,7 +348,8 @@
DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img_dec = dec_iter.Next();
if (img_enc && img_dec) {
- const bool res = compare_img(img_enc, img_dec);
+ const bool res = compare_img(img_enc, img_dec,
+ NULL, NULL, NULL, NULL, NULL);
if (!res) { // Mismatch
MismatchHook(img_enc, img_dec);
}
diff --git a/test/error_resilience_test.cc b/test/error_resilience_test.cc
index 777ac49..c4e2721 100644
--- a/test/error_resilience_test.cc
+++ b/test/error_resilience_test.cc
@@ -164,7 +164,7 @@
mismatch_psnr_ += mismatch_psnr;
++mismatch_nframes_;
// std::cout << "Mismatch frame psnr: " << mismatch_psnr << "\n";
- ASSERT_TRUE(0) << "Encode/Decode mismatch found";
+ ::libvpx_test::EncoderTest::MismatchHook(img1, img2);
}
void SetErrorFrames(int num, unsigned int *list) {
diff --git a/test/tile_independence_test.cc b/test/tile_independence_test.cc
index db5d5df..9a049bf 100644
--- a/test/tile_independence_test.cc
+++ b/test/tile_independence_test.cc
@@ -97,8 +97,8 @@
cfg_.g_lag_in_frames = 12;
cfg_.rc_end_usage = VPX_VBR;
- libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 704, 144,
- timebase.den, timebase.num, 0, 15);
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 704, 576,
+ timebase.den, timebase.num, 0, 5);
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
const char *md5_fw_str = md5_fw_order_.Get();
@@ -110,14 +110,14 @@
ASSERT_STREQ(md5_fw_str, md5_inv_str);
}
-VP9_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Range(0, 2, 1),
+VP9_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Values(0, 1),
::testing::Values(0));
#if CONFIG_EXT_TILE
VP10_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Values(1, 2, 32),
::testing::Values(1, 2, 32));
#else
-VP10_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Range(0, 1, 1),
- ::testing::Range(0, 1, 1));
+VP10_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Values(0, 1),
+ ::testing::Values(0, 1));
#endif // CONFIG_EXT_TILE
} // namespace
diff --git a/test/vp10_inv_txfm2d_test.cc b/test/vp10_inv_txfm2d_test.cc
index c3552dc..80ac78b 100644
--- a/test/vp10_inv_txfm2d_test.cc
+++ b/test/vp10_inv_txfm2d_test.cc
@@ -84,7 +84,7 @@
}
fwd_txfm_func(input, output, txfm_size, tx_type, bd);
- inv_txfm_func(output, ref_input, txfm_size, inv_txfm_cfg, bd);
+ inv_txfm_func(output, ref_input, txfm_size, tx_type, bd);
for (int ni = 0; ni < sqr_txfm_size; ++ni) {
EXPECT_LE(abs(input[ni] - ref_input[ni]), 4);
diff --git a/test/vp10_txfm_test.h b/test/vp10_txfm_test.h
index 6b0bd0a..c4d03ce 100644
--- a/test/vp10_txfm_test.h
+++ b/test/vp10_txfm_test.h
@@ -104,10 +104,8 @@
typedef void (*TxfmFunc)(const int32_t* in, int32_t* out, const int8_t* cos_bit,
const int8_t* range_bit);
-typedef void (*Fwd_Txfm2d_Func)(const int16_t*, int32_t*, const int,
- int tx_type, const int);
-typedef void (*Inv_Txfm2d_Func)(const int32_t*, uint16_t*, const int,
- const TXFM_2D_CFG*, const int);
+typedef void (*Fwd_Txfm2d_Func)(const int16_t*, int32_t*, int, int, int);
+typedef void (*Inv_Txfm2d_Func)(const int32_t*, uint16_t*, int, int, int);
static const int bd = 10;
static const int input_base = (1 << bd);
diff --git a/vp10/common/idct.c b/vp10/common/idct.c
index b7da81b..ab17cca 100644
--- a/vp10/common/idct.c
+++ b/vp10/common/idct.c
@@ -1302,20 +1302,11 @@
switch (tx_type) {
case DCT_DCT:
- vp10_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_dct_dct_4, bd);
- break;
case ADST_DCT:
- vp10_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_adst_dct_4, bd);
- break;
case DCT_ADST:
- vp10_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_dct_adst_4, bd);
- break;
case ADST_ADST:
vp10_inv_txfm2d_add_4x4(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_adst_adst_4, bd);
+ tx_type, bd);
break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
@@ -1350,20 +1341,11 @@
(void)eob;
switch (tx_type) {
case DCT_DCT:
- vp10_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_dct_dct_8, bd);
- break;
case ADST_DCT:
- vp10_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_adst_dct_8, bd);
- break;
case DCT_ADST:
- vp10_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_dct_adst_8, bd);
- break;
case ADST_ADST:
vp10_inv_txfm2d_add_8x8(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_adst_adst_8, bd);
+ tx_type, bd);
break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
@@ -1398,20 +1380,11 @@
(void)eob;
switch (tx_type) {
case DCT_DCT:
- vp10_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_dct_dct_16, bd);
- break;
case ADST_DCT:
- vp10_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_adst_dct_16, bd);
- break;
case DCT_ADST:
- vp10_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_dct_adst_16, bd);
- break;
case ADST_ADST:
vp10_inv_txfm2d_add_16x16(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_adst_adst_16, bd);
+ tx_type, bd);
break;
#if CONFIG_EXT_TX
case FLIPADST_DCT:
@@ -1447,7 +1420,7 @@
switch (tx_type) {
case DCT_DCT:
vp10_inv_txfm2d_add_32x32(input, CONVERT_TO_SHORTPTR(dest), stride,
- &inv_txfm_2d_cfg_dct_dct_32, bd);
+ DCT_DCT, bd);
break;
#if CONFIG_EXT_TX
case ADST_DCT:
diff --git a/vp10/common/mvref_common.c b/vp10/common/mvref_common.c
index 17d539f..eed1508 100644
--- a/vp10/common/mvref_common.c
+++ b/vp10/common/mvref_common.c
@@ -173,7 +173,7 @@
mi_pos.row = row_offset;
mi_pos.col = i;
- if (is_inside(tile, mi_col, mi_row, cm->mi_rows, &mi_pos)) {
+ if (is_inside(tile, mi_col, mi_row, &mi_pos)) {
const MODE_INFO *const candidate_mi =
xd->mi[mi_pos.row * xd->mi_stride + mi_pos.col];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
@@ -208,7 +208,7 @@
mi_pos.row = i;
mi_pos.col = col_offset;
- if (is_inside(tile, mi_col, mi_row, cm->mi_rows, &mi_pos)) {
+ if (is_inside(tile, mi_col, mi_row, &mi_pos)) {
const MODE_INFO *const candidate_mi =
xd->mi[mi_pos.row * xd->mi_stride + mi_pos.col];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
@@ -241,7 +241,7 @@
mi_pos.row = row_offset;
mi_pos.col = col_offset;
- if (is_inside(tile, mi_col, mi_row, cm->mi_rows, &mi_pos) &&
+ if (is_inside(tile, mi_col, mi_row, &mi_pos) &&
*refmv_count < MAX_REF_MV_STACK_SIZE) {
const MODE_INFO *const candidate_mi =
xd->mi[mi_pos.row * xd->mi_stride + mi_pos.col];
@@ -387,7 +387,7 @@
mi_pos.row = blk_row;
mi_pos.col = blk_col;
- if (!is_inside(&xd->tile, mi_col, mi_row, cm->mi_rows, &mi_pos))
+ if (!is_inside(&xd->tile, mi_col, mi_row, &mi_pos))
continue;
for (ref = 0; ref < 2; ++ref) {
@@ -565,7 +565,7 @@
// and we also need to keep a mode count.
for (i = 0; i < 2; ++i) {
const POSITION *const mv_ref = &mv_ref_search[i];
- if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
+ if (is_inside(tile, mi_col, mi_row, mv_ref)) {
const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
xd->mi_stride];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
@@ -587,7 +587,7 @@
// mode counts.
for (; i < MVREF_NEIGHBOURS; ++i) {
const POSITION *const mv_ref = &mv_ref_search[i];
- if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
+ if (is_inside(tile, mi_col, mi_row, mv_ref)) {
const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row *
xd->mi_stride]->mbmi;
different_ref_found = 1;
@@ -633,7 +633,7 @@
if (different_ref_found) {
for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
const POSITION *mv_ref = &mv_ref_search[i];
- if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
+ if (is_inside(tile, mi_col, mi_row, mv_ref)) {
const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row
* xd->mi_stride]->mbmi;
@@ -678,7 +678,7 @@
#if CONFIG_EXT_INTER
// This function keeps a mode count for a given MB/SB
-void vp10_update_mv_context(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+void vp10_update_mv_context(const MACROBLOCKD *xd,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
int block, int mi_row, int mi_col,
@@ -697,7 +697,7 @@
// If the size < 8x8, we get the mv from the bmi substructure;
for (i = 0; i < 2; ++i) {
const POSITION *const mv_ref = &mv_ref_search[i];
- if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
+ if (is_inside(tile, mi_col, mi_row, mv_ref)) {
const MODE_INFO *const candidate_mi =
xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
@@ -739,7 +739,7 @@
int idx, all_zero = 1;
#endif
#if CONFIG_EXT_INTER
- vp10_update_mv_context(cm, xd, mi, ref_frame, mv_ref_list, -1,
+ vp10_update_mv_context(xd, mi, ref_frame, mv_ref_list, -1,
mi_row, mi_col,
#if CONFIG_REF_MV
compound_mode_context);
diff --git a/vp10/common/mvref_common.h b/vp10/common/mvref_common.h
index a3a3192..b5a0921 100644
--- a/vp10/common/mvref_common.h
+++ b/vp10/common/mvref_common.h
@@ -223,20 +223,12 @@
// Checks that the given mi_row, mi_col and search point
// are inside the borders of the tile.
static INLINE int is_inside(const TileInfo *const tile,
- int mi_col, int mi_row, int mi_rows,
+ int mi_col, int mi_row,
const POSITION *mi_pos) {
-#if CONFIG_EXT_TILE
- (void) mi_rows;
return !(mi_row + mi_pos->row < tile->mi_row_start ||
mi_col + mi_pos->col < tile->mi_col_start ||
mi_row + mi_pos->row >= tile->mi_row_end ||
mi_col + mi_pos->col >= tile->mi_col_end);
-#else
- return !(mi_row + mi_pos->row < 0 ||
- mi_col + mi_pos->col < tile->mi_col_start ||
- mi_row + mi_pos->row >= mi_rows ||
- mi_col + mi_pos->col >= tile->mi_col_end);
-#endif // CONFIG_EXT_TILE
}
static INLINE void lower_mv_precision(MV *mv, int allow_hp) {
@@ -367,7 +359,7 @@
#if CONFIG_EXT_INTER
// This function keeps a mode count for a given MB/SB
-void vp10_update_mv_context(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+void vp10_update_mv_context(const MACROBLOCKD *xd,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
int block, int mi_row, int mi_col,
diff --git a/vp10/common/onyxc_int.h b/vp10/common/onyxc_int.h
index 7d0b259..455ca2d 100644
--- a/vp10/common/onyxc_int.h
+++ b/vp10/common/onyxc_int.h
@@ -67,10 +67,6 @@
typedef enum {
/**
- * Don't update frame context
- */
- REFRESH_FRAME_CONTEXT_OFF,
- /**
* Update frame context to values resulting from forward probability
* updates signaled in the frame header
*/
@@ -470,11 +466,7 @@
xd->mb_to_right_edge = ((mi_cols - bw - mi_col) * MI_SIZE) * 8;
// Are edges available for intra prediction?
-#if CONFIG_EXT_TILE
xd->up_available = (mi_row > tile->mi_row_start);
-#else
- xd->up_available = (mi_row != 0);
-#endif // CONFIG_EXT_TILE
xd->left_available = (mi_col > tile->mi_col_start);
if (xd->up_available) {
xd->above_mi = xd->mi[-xd->mi_stride];
diff --git a/vp10/common/reconinter.c b/vp10/common/reconinter.c
index 6b266c5..4c94ceb 100644
--- a/vp10/common/reconinter.c
+++ b/vp10/common/reconinter.c
@@ -1290,11 +1290,7 @@
const TileInfo *const tile = &xd->tile;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
int plane, i, mi_step;
-#if CONFIG_EXT_TILE
- int above_available = mi_row > 0 && (mi_row - 1 >= tile->mi_row_start);
-#else
- int above_available = mi_row > 0;
-#endif // CONFIG_EXT_TILE
+ int above_available = mi_row > tile->mi_row_start;
#if CONFIG_VP9_HIGHBITDEPTH
int is_hbd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -1466,17 +1462,11 @@
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_stride[MAX_MB_PLANE]) {
-#if CONFIG_EXT_TILE
const TileInfo *const tile = &xd->tile;
-#endif // CONFIG_EXT_TILE
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
int i, j, mi_step, ref;
-#if CONFIG_EXT_TILE
- if (mi_row == 0 || (mi_row - 1) < tile->mi_row_start)
-#else
- if (mi_row == 0)
-#endif // CONFIG_EXT_TILE
+ if (mi_row <= tile->mi_row_start)
return;
for (i = 0; i < VPXMIN(xd->n8_w, cm->mi_cols - mi_col); i += mi_step) {
@@ -1980,6 +1970,8 @@
BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, &xd->plane[plane]);
const int bwl = b_width_log2_lookup[plane_bsize];
const int bhl = b_height_log2_lookup[plane_bsize];
+ const int pxbw = 4 << bwl;
+ const int pxbh = 4 << bhl;
TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
if (bwl == bhl) {
@@ -1988,8 +1980,8 @@
0, 0, plane);
} else if (bwl < bhl) {
- uint8_t *src_2 = ref + (4 << bwl)*ref_stride;
- uint8_t *dst_2 = dst + (4 << bwl)*dst_stride;
+ uint8_t *src_2 = ref + pxbw * ref_stride;
+ uint8_t *dst_2 = dst + pxbw * dst_stride;
vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode,
ref, ref_stride, dst, dst_stride,
0, 0, plane);
@@ -1998,20 +1990,19 @@
uint16_t *src_216 = CONVERT_TO_SHORTPTR(src_2);
uint16_t *dst_216 = CONVERT_TO_SHORTPTR(dst_2);
memcpy(src_216 - ref_stride, dst_216 - dst_stride,
- sizeof(*src_216) * (4 << bhl));
+ sizeof(*src_216) * pxbw);
} else
#endif // CONFIG_VP9_HIGHBITDEPTH
{
- memcpy(src_2 - ref_stride, dst_2 - dst_stride,
- sizeof(*src_2) * (4 << bhl));
+ memcpy(src_2 - ref_stride, dst_2 - dst_stride, sizeof(*src_2) * pxbw);
}
vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode,
src_2, ref_stride, dst_2, dst_stride,
0, 1 << bwl, plane);
- } else {
+ } else { // bwl > bhl
int i;
- uint8_t *src_2 = ref + (4 << bhl);
- uint8_t *dst_2 = dst + (4 << bhl);
+ uint8_t *src_2 = ref + pxbh;
+ uint8_t *dst_2 = dst + pxbh;
vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode,
ref, ref_stride, dst, dst_stride,
0, 0, plane);
@@ -2019,12 +2010,12 @@
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
uint16_t *src_216 = CONVERT_TO_SHORTPTR(src_2);
uint16_t *dst_216 = CONVERT_TO_SHORTPTR(dst_2);
- for (i = 0; i < (4 << bwl); ++i)
+ for (i = 0; i < pxbh; ++i)
src_216[i * ref_stride - 1] = dst_216[i * dst_stride - 1];
} else
#endif // CONFIG_VP9_HIGHBITDEPTH
{
- for (i = 0; i < (4 << bwl); ++i)
+ for (i = 0; i < pxbh; ++i)
src_2[i * ref_stride - 1] = dst_2[i * dst_stride - 1];
}
vp10_predict_intra_block(xd, bwl, bhl, max_tx_size, mode,
diff --git a/vp10/common/vp10_inv_txfm2d.c b/vp10/common/vp10_inv_txfm2d.c
index 5227fc8..3ae54c9 100644
--- a/vp10/common/vp10_inv_txfm2d.c
+++ b/vp10/common/vp10_inv_txfm2d.c
@@ -8,8 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "vp10/common/enums.h"
#include "vp10/common/vp10_txfm.h"
#include "vp10/common/vp10_inv_txfm1d.h"
+#include "vp10/common/vp10_inv_txfm2d_cfg.h"
static INLINE TxfmFunc inv_txfm_type_to_func(TXFM_TYPE txfm_type) {
switch (txfm_type) {
@@ -46,6 +48,105 @@
}
}
+static const TXFM_2D_CFG* vp10_get_inv_txfm_4x4_cfg(int tx_type) {
+ const TXFM_2D_CFG* cfg = NULL;
+ switch (tx_type) {
+ case DCT_DCT:
+ cfg = &inv_txfm_2d_cfg_dct_dct_4;
+ break;
+ case ADST_DCT:
+ cfg = &inv_txfm_2d_cfg_adst_dct_4;
+ break;
+ case DCT_ADST:
+ cfg = &inv_txfm_2d_cfg_dct_adst_4;
+ break;
+ case ADST_ADST:
+ cfg = &inv_txfm_2d_cfg_adst_adst_4;
+ break;
+ default:
+ assert(0);
+ }
+ return cfg;
+}
+
+static const TXFM_2D_CFG* vp10_get_inv_txfm_8x8_cfg(int tx_type) {
+ const TXFM_2D_CFG* cfg = NULL;
+ switch (tx_type) {
+ case DCT_DCT:
+ cfg = &inv_txfm_2d_cfg_dct_dct_8;
+ break;
+ case ADST_DCT:
+ cfg = &inv_txfm_2d_cfg_adst_dct_8;
+ break;
+ case DCT_ADST:
+ cfg = &inv_txfm_2d_cfg_dct_adst_8;
+ break;
+ case ADST_ADST:
+ cfg = &inv_txfm_2d_cfg_adst_adst_8;
+ break;
+ default:
+ assert(0);
+ }
+ return cfg;
+}
+
+static const TXFM_2D_CFG* vp10_get_inv_txfm_16x16_cfg(int tx_type) {
+ const TXFM_2D_CFG* cfg = NULL;
+ switch (tx_type) {
+ case DCT_DCT:
+ cfg = &inv_txfm_2d_cfg_dct_dct_16;
+ break;
+ case ADST_DCT:
+ cfg = &inv_txfm_2d_cfg_adst_dct_16;
+ break;
+ case DCT_ADST:
+ cfg = &inv_txfm_2d_cfg_dct_adst_16;
+ break;
+ case ADST_ADST:
+ cfg = &inv_txfm_2d_cfg_adst_adst_16;
+ break;
+ default:
+ assert(0);
+ }
+ return cfg;
+}
+
+static const TXFM_2D_CFG* vp10_get_inv_txfm_32x32_cfg(int tx_type) {
+ const TXFM_2D_CFG* cfg = NULL;
+ switch (tx_type) {
+ case DCT_DCT:
+ cfg = &inv_txfm_2d_cfg_dct_dct_32;
+ break;
+ case ADST_DCT:
+ cfg = &inv_txfm_2d_cfg_adst_dct_32;
+ break;
+ case DCT_ADST:
+ cfg = &inv_txfm_2d_cfg_dct_adst_32;
+ break;
+ case ADST_ADST:
+ cfg = &inv_txfm_2d_cfg_adst_adst_32;
+ break;
+ default:
+ assert(0);
+ }
+ return cfg;
+}
+
+static const TXFM_2D_CFG* vp10_get_inv_txfm_64x64_cfg(int tx_type) {
+ const TXFM_2D_CFG* cfg = NULL;
+ switch (tx_type) {
+ case DCT_DCT:
+ cfg = &inv_txfm_2d_cfg_dct_dct_64;
+ case ADST_DCT:
+ case DCT_ADST:
+ case ADST_ADST:
+ default:
+ assert(0);
+ }
+ return cfg;
+}
+
+
static INLINE void inv_txfm2d_add_c(const int32_t *input, int16_t *output,
int stride, const TXFM_2D_CFG *cfg,
int32_t *txfm_buf) {
@@ -86,61 +187,66 @@
}
void vp10_inv_txfm2d_add_4x4_c(const int32_t *input, uint16_t *output,
- const int stride, const TXFM_2D_CFG *cfg,
- const int bd) {
+ int stride, int tx_type,
+ int bd) {
int txfm_buf[4 * 4 + 4 + 4];
// output contains the prediction signal which is always positive and smaller
// than (1 << bd) - 1
// since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
// int16_t*
+ const TXFM_2D_CFG* cfg = vp10_get_inv_txfm_4x4_cfg(tx_type);
inv_txfm2d_add_c(input, (int16_t *)output, stride, cfg, txfm_buf);
clamp_block((int16_t *)output, 4, stride, 0, (1 << bd) - 1);
}
void vp10_inv_txfm2d_add_8x8_c(const int32_t *input, uint16_t *output,
- const int stride, const TXFM_2D_CFG *cfg,
- const int bd) {
+ int stride, int tx_type,
+ int bd) {
int txfm_buf[8 * 8 + 8 + 8];
// output contains the prediction signal which is always positive and smaller
// than (1 << bd) - 1
// since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
// int16_t*
+ const TXFM_2D_CFG* cfg = vp10_get_inv_txfm_8x8_cfg(tx_type);
inv_txfm2d_add_c(input, (int16_t *)output, stride, cfg, txfm_buf);
clamp_block((int16_t *)output, 8, stride, 0, (1 << bd) - 1);
}
void vp10_inv_txfm2d_add_16x16_c(const int32_t *input, uint16_t *output,
- const int stride, const TXFM_2D_CFG *cfg,
- const int bd) {
+ int stride, int tx_type,
+ int bd) {
int txfm_buf[16 * 16 + 16 + 16];
// output contains the prediction signal which is always positive and smaller
// than (1 << bd) - 1
// since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
// int16_t*
+ const TXFM_2D_CFG* cfg = vp10_get_inv_txfm_16x16_cfg(tx_type);
inv_txfm2d_add_c(input, (int16_t *)output, stride, cfg, txfm_buf);
clamp_block((int16_t *)output, 16, stride, 0, (1 << bd) - 1);
}
void vp10_inv_txfm2d_add_32x32_c(const int32_t *input, uint16_t *output,
- const int stride, const TXFM_2D_CFG *cfg,
- const int bd) {
+ int stride, int tx_type,
+ int bd) {
int txfm_buf[32 * 32 + 32 + 32];
// output contains the prediction signal which is always positive and smaller
// than (1 << bd) - 1
// since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
// int16_t*
+ const TXFM_2D_CFG* cfg = vp10_get_inv_txfm_32x32_cfg(tx_type);
inv_txfm2d_add_c(input, (int16_t *)output, stride, cfg, txfm_buf);
clamp_block((int16_t *)output, 32, stride, 0, (1 << bd) - 1);
}
void vp10_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output,
- const int stride, const TXFM_2D_CFG *cfg,
- const int bd) {
+ int stride, int tx_type,
+ int bd) {
int txfm_buf[64 * 64 + 64 + 64];
// output contains the prediction signal which is always positive and smaller
// than (1 << bd) - 1
// since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
// int16_t*
+ const TXFM_2D_CFG* cfg = vp10_get_inv_txfm_64x64_cfg(tx_type);
inv_txfm2d_add_c(input, (int16_t *)output, stride, cfg, txfm_buf);
clamp_block((int16_t *)output, 64, stride, 0, (1 << bd) - 1);
}
diff --git a/vp10/common/vp10_rtcd_defs.pl b/vp10/common/vp10_rtcd_defs.pl
index ae0d2cb..d843dfe 100644
--- a/vp10/common/vp10_rtcd_defs.pl
+++ b/vp10/common/vp10_rtcd_defs.pl
@@ -614,27 +614,27 @@
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
#fwd txfm
- add_proto qw/void vp10_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, const int stride, int tx_type, const int bd";
+ add_proto qw/void vp10_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
specialize qw/vp10_fwd_txfm2d_4x4 sse4_1/;
- add_proto qw/void vp10_fwd_txfm2d_8x8/, "const int16_t *input, int32_t *output, const int stride, int tx_type, const int bd";
+ add_proto qw/void vp10_fwd_txfm2d_8x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
specialize qw/vp10_fwd_txfm2d_8x8 sse4_1/;
- add_proto qw/void vp10_fwd_txfm2d_16x16/, "const int16_t *input, int32_t *output, const int stride, int tx_type, const int bd";
+ add_proto qw/void vp10_fwd_txfm2d_16x16/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
specialize qw/vp10_fwd_txfm2d_16x16 sse4_1/;
- add_proto qw/void vp10_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, const int stride, int tx_type, const int bd";
+ add_proto qw/void vp10_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
specialize qw/vp10_fwd_txfm2d_32x32 sse4_1/;
- add_proto qw/void vp10_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, const int stride, int tx_type, const int bd";
+ add_proto qw/void vp10_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
specialize qw/vp10_fwd_txfm2d_64x64 sse4_1/;
#inv txfm
- add_proto qw/void vp10_inv_txfm2d_add_4x4/, "const int32_t *input, uint16_t *output, const int stride, const TXFM_2D_CFG *cfg, const int bd";
+ add_proto qw/void vp10_inv_txfm2d_add_4x4/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
specialize qw/vp10_inv_txfm2d_add_4x4/;
- add_proto qw/void vp10_inv_txfm2d_add_8x8/, "const int32_t *input, uint16_t *output, const int stride, const TXFM_2D_CFG *cfg, const int bd";
+ add_proto qw/void vp10_inv_txfm2d_add_8x8/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
specialize qw/vp10_inv_txfm2d_add_8x8/;
- add_proto qw/void vp10_inv_txfm2d_add_16x16/, "const int32_t *input, uint16_t *output, const int stride, const TXFM_2D_CFG *cfg, const int bd";
+ add_proto qw/void vp10_inv_txfm2d_add_16x16/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
specialize qw/vp10_inv_txfm2d_add_16x16/;
- add_proto qw/void vp10_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, const int stride, const TXFM_2D_CFG *cfg, const int bd";
+ add_proto qw/void vp10_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
specialize qw/vp10_inv_txfm2d_add_32x32/;
- add_proto qw/void vp10_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, const int stride, const TXFM_2D_CFG *cfg, const int bd";
+ add_proto qw/void vp10_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
specialize qw/vp10_inv_txfm2d_add_64x64/;
}
@@ -662,10 +662,10 @@
add_proto qw/int64_t vp10_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
specialize qw/vp10_highbd_block_error sse2/;
- add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const int log_scale";
+ add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
specialize qw/vp10_highbd_quantize_fp/;
- add_proto qw/void vp10_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const int log_scale";
+ add_proto qw/void vp10_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, int log_scale";
specialize qw/vp10_highbd_quantize_b/;
# fdct functions
diff --git a/vp10/decoder/decodeframe.c b/vp10/decoder/decodeframe.c
index 6006e2d..37208bb 100644
--- a/vp10/decoder/decodeframe.c
+++ b/vp10/decoder/decodeframe.c
@@ -1024,11 +1024,7 @@
set_mi_row_col(xd, tile, mi_row_pred, bh, mi_col_pred, bw,
cm->mi_rows, cm->mi_cols);
-#if CONFIG_EXT_TILE
xd->up_available = (mi_row_ori > tile->mi_row_start);
-#else
- xd->up_available = (mi_row_ori != 0);
-#endif // CONFIG_EXT_TILE
xd->left_available = (mi_col_ori > tile->mi_col_start);
set_plane_n4(xd, bw, bh, bwl, bhl);
@@ -3739,14 +3735,9 @@
if (!cm->error_resilient_mode) {
cm->refresh_frame_context =
vpx_rb_read_bit(rb) ? REFRESH_FRAME_CONTEXT_FORWARD
- : REFRESH_FRAME_CONTEXT_OFF;
- if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) {
- cm->refresh_frame_context =
- vpx_rb_read_bit(rb) ? REFRESH_FRAME_CONTEXT_FORWARD
- : REFRESH_FRAME_CONTEXT_BACKWARD;
- }
+ : REFRESH_FRAME_CONTEXT_BACKWARD;
} else {
- cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_OFF;
+ cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_FORWARD;
}
// This flag will be overridden by the call to vp10_setup_past_independence
@@ -4274,7 +4265,6 @@
}
// Non frame parallel update frame context here.
- if (cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF &&
- !context_updated)
+ if (!cm->error_resilient_mode && !context_updated)
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
}
diff --git a/vp10/decoder/decodemv.c b/vp10/decoder/decodemv.c
index e7a1ce3..fd14ef5 100644
--- a/vp10/decoder/decodemv.c
+++ b/vp10/decoder/decodemv.c
@@ -1442,7 +1442,7 @@
#if CONFIG_EXT_INTER
{
int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
- vp10_update_mv_context(cm, xd, mi, mbmi->ref_frame[ref],
+ vp10_update_mv_context(xd, mi, mbmi->ref_frame[ref],
mv_ref_list, j, mi_row, mi_col, NULL);
#endif // CONFIG_EXT_INTER
vp10_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
diff --git a/vp10/encoder/bitstream.c b/vp10/encoder/bitstream.c
index 00c637d..a7f8162 100644
--- a/vp10/encoder/bitstream.c
+++ b/vp10/encoder/bitstream.c
@@ -2889,7 +2889,7 @@
}
}
#endif // CONFIG_EXT_TILE
- return total_size;
+ return (uint32_t)total_size;
}
static void write_render_size(const VP10_COMMON *cm,
@@ -3053,10 +3053,7 @@
if (!cm->error_resilient_mode) {
vpx_wb_write_bit(wb,
- cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF);
- if (cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF)
- vpx_wb_write_bit(wb, cm->refresh_frame_context !=
- REFRESH_FRAME_CONTEXT_BACKWARD);
+ cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD);
}
vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
@@ -3438,7 +3435,7 @@
// Size of compressed header
vpx_wb_write_literal(&wb, 0, 16);
- uncompressed_header_size = vpx_wb_bytes_written(&wb);
+ uncompressed_header_size = (uint32_t)vpx_wb_bytes_written(&wb);
data += uncompressed_header_size;
vpx_clear_system_state();
diff --git a/vp10/encoder/encodeframe.c b/vp10/encoder/encodeframe.c
index 3050d1e..a6ff9b6 100644
--- a/vp10/encoder/encodeframe.c
+++ b/vp10/encoder/encodeframe.c
@@ -379,11 +379,7 @@
assert(!(mi_col_pred & (mi_width - 1)) && !(mi_row_pred & (mi_height - 1)));
set_mi_row_col(xd, tile, mi_row_pred, mi_height, mi_col_pred, mi_width,
cm->mi_rows, cm->mi_cols);
-#if CONFIG_EXT_TILE
xd->up_available = (mi_row_ori > tile->mi_row_start);
-#else
- xd->up_available = (mi_row_ori != 0);
-#endif // CONFIG_EXT_TILE
xd->left_available = (mi_col_ori > tile->mi_col_start);
// R/D setup.
diff --git a/vp10/encoder/encoder.c b/vp10/encoder/encoder.c
index 8ed09f3..fc55133 100644
--- a/vp10/encoder/encoder.c
+++ b/vp10/encoder/encoder.c
@@ -1986,9 +1986,8 @@
cpi->refresh_last_frame = 1;
cm->refresh_frame_context =
- oxcf->error_resilient_mode ? REFRESH_FRAME_CONTEXT_OFF :
- oxcf->frame_parallel_decoding_mode ? REFRESH_FRAME_CONTEXT_FORWARD
- : REFRESH_FRAME_CONTEXT_BACKWARD;
+ (oxcf->error_resilient_mode || oxcf->frame_parallel_decoding_mode) ?
+ REFRESH_FRAME_CONTEXT_FORWARD : REFRESH_FRAME_CONTEXT_BACKWARD;
cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
cm->allow_screen_content_tools = (cpi->oxcf.content == VP9E_CONTENT_SCREEN);
@@ -4324,7 +4323,7 @@
// By default, encoder assumes decoder can use prev_mi.
if (cm->error_resilient_mode) {
cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
- cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_OFF;
+ cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_FORWARD;
} else if (cm->intra_only) {
// Only reset the current context.
cm->reset_frame_context = RESET_FRAME_CONTEXT_CURRENT;
@@ -4616,7 +4615,7 @@
cpi->refresh_last_frame ||
cpi->refresh_golden_frame ||
cpi->refresh_alt_ref_frame ||
- cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF ||
+ !cm->error_resilient_mode ||
cm->lf.mode_ref_delta_update ||
cm->seg.update_map ||
cm->seg.update_data;
@@ -4844,9 +4843,8 @@
// Normal defaults
cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
cm->refresh_frame_context =
- oxcf->error_resilient_mode ? REFRESH_FRAME_CONTEXT_OFF :
- oxcf->frame_parallel_decoding_mode ? REFRESH_FRAME_CONTEXT_FORWARD
- : REFRESH_FRAME_CONTEXT_BACKWARD;
+ (oxcf->error_resilient_mode || oxcf->frame_parallel_decoding_mode) ?
+ REFRESH_FRAME_CONTEXT_FORWARD : REFRESH_FRAME_CONTEXT_BACKWARD;
cpi->refresh_last_frame = 1;
cpi->refresh_golden_frame = 0;
@@ -4990,7 +4988,7 @@
Pass0Encode(cpi, size, dest, frame_flags);
}
- if (cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF)
+ if (!cm->error_resilient_mode)
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
// No frame encoded, or frame was dropped, release scaled references.
diff --git a/vp10/encoder/quantize.c b/vp10/encoder/quantize.c
index 3919fee..2c61de5 100644
--- a/vp10/encoder/quantize.c
+++ b/vp10/encoder/quantize.c
@@ -186,7 +186,7 @@
const int16_t *dequant_ptr,
uint16_t *eob_ptr,
const int16_t *scan,
- const int16_t *iscan, const int log_scale) {
+ const int16_t *iscan, int log_scale) {
int i;
int eob = -1;
const int scale = 1 << log_scale;
@@ -272,7 +272,7 @@
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr,
uint16_t *eob_ptr, const int16_t *scan,
- const int16_t *iscan, const int log_scale) {
+ const int16_t *iscan, int log_scale) {
int i, non_zero_count = (int)n_coeffs, eob = -1;
int zbins[2] = {zbin_ptr[0], zbin_ptr[1]};
int round[2] = {round_ptr[0], round_ptr[1]};
diff --git a/vp10/encoder/rdopt.c b/vp10/encoder/rdopt.c
index f9b3f8d..b02e915 100644
--- a/vp10/encoder/rdopt.c
+++ b/vp10/encoder/rdopt.c
@@ -4986,7 +4986,7 @@
const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
#if CONFIG_EXT_INTER
int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
- vp10_update_mv_context(cm, xd, mi, frame, mv_ref_list, i,
+ vp10_update_mv_context(xd, mi, frame, mv_ref_list, i,
mi_row, mi_col, NULL);
#endif // CONFIG_EXT_INTER
frame_mv[ZEROMV][frame].as_int = 0;
diff --git a/vp10/encoder/x86/highbd_fwd_txfm_sse4.c b/vp10/encoder/x86/highbd_fwd_txfm_sse4.c
index 23e08fe..ce9089e 100644
--- a/vp10/encoder/x86/highbd_fwd_txfm_sse4.c
+++ b/vp10/encoder/x86/highbd_fwd_txfm_sse4.c
@@ -206,8 +206,7 @@
}
void vp10_fwd_txfm2d_4x4_sse4_1(const int16_t *input, int32_t *coeff,
- const int input_stride, int tx_type,
- const int bd) {
+ int input_stride, int tx_type, int bd) {
__m128i in[4];
const TXFM_2D_CFG *cfg = NULL;
@@ -916,7 +915,7 @@
}
void vp10_fwd_txfm2d_8x8_sse4_1(const int16_t *input, int32_t *coeff,
- const int stride, int tx_type, const int bd) {
+ int stride, int tx_type, int bd) {
__m128i in[16], out[16];
const TXFM_2D_CFG *cfg = NULL;
diff --git a/vpx_dsp/variance.c b/vpx_dsp/variance.c
index e6be1dd..cc99d25 100644
--- a/vpx_dsp/variance.c
+++ b/vpx_dsp/variance.c
@@ -719,8 +719,8 @@
m += m_stride;
}
sum64 = (sum64 >= 0) ? sum64 : -sum64;
- *sum = ROUND_POWER_OF_TWO(sum64, 6);
- *sse = ROUND_POWER_OF_TWO(sse64, 12);
+ *sum = (int)ROUND_POWER_OF_TWO(sum64, 6);
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 12);
}
#define MASK_VAR(W, H) \
diff --git a/vpx_dsp/x86/highbd_variance_sse4.c b/vpx_dsp/x86/highbd_variance_sse4.c
index 5c1dfe4..54fc609 100644
--- a/vpx_dsp/x86/highbd_variance_sse4.c
+++ b/vpx_dsp/x86/highbd_variance_sse4.c
@@ -76,7 +76,7 @@
variance4x4_64_sse4_1(a, a_stride, b, b_stride, &local_sse, &sum);
*sse = (uint32_t)local_sse;
- return *sse - ((sum * sum) >> 4);
+ return *sse - (uint32_t)((sum * sum) >> 4);
}
uint32_t vpx_highbd_10_variance4x4_sse4_1(const uint8_t *a,
@@ -91,7 +91,7 @@
*sse = (uint32_t)ROUND_POWER_OF_TWO(local_sse, 4);
sum = ROUND_POWER_OF_TWO(sum, 2);
- return *sse - ((sum * sum) >> 4);
+ return *sse - (uint32_t)((sum * sum) >> 4);
}
uint32_t vpx_highbd_12_variance4x4_sse4_1(const uint8_t *a,
@@ -106,7 +106,7 @@
*sse = (uint32_t)ROUND_POWER_OF_TWO(local_sse, 8);
sum = ROUND_POWER_OF_TWO(sum, 4);
- return *sse - ((sum * sum) >> 4);
+ return *sse - (uint32_t)((sum * sum) >> 4);
}
// Sub-pixel
diff --git a/vpx_dsp/x86/masked_variance_intrin_ssse3.c b/vpx_dsp/x86/masked_variance_intrin_ssse3.c
index ca4f6fc..47e2c32 100644
--- a/vpx_dsp/x86/masked_variance_intrin_ssse3.c
+++ b/vpx_dsp/x86/masked_variance_intrin_ssse3.c
@@ -54,9 +54,9 @@
}
#endif // CONFIG_VP9_HIGHBITDEPTH
-static INLINE int calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
- unsigned int* sse,
- const int w, const int h) {
+static INLINE uint32_t calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
+ uint32_t* sse,
+ const int w, const int h) {
int64_t sum64;
uint64_t sse64;
@@ -71,9 +71,9 @@
sse64 = ROUND_POWER_OF_TWO(sse64, 12);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute the variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
/*****************************************************************************
@@ -497,9 +497,9 @@
&sum64, &sse64);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute and return variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
static INLINE unsigned int highbd_10_masked_variancewxh_ssse3(
@@ -523,9 +523,9 @@
sse64 = ROUND_POWER_OF_TWO(sse64, 4);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute and return variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
static INLINE unsigned int highbd_12_masked_variancewxh_ssse3(
@@ -548,9 +548,9 @@
sse64 = ROUND_POWER_OF_TWO(sse64, 8);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute and return variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
#define HIGHBD_MASKED_VARWXH(W, H) \
@@ -1460,10 +1460,11 @@
*v_sse_q = _mm_add_epi64(*v_sse_q, v_se_q);
}
-static INLINE int highbd_10_calc_masked_variance(__m128i v_sum_d,
- __m128i v_sse_q,
- unsigned int* sse,
- const int w, const int h) {
+static INLINE uint32_t highbd_10_calc_masked_variance(__m128i v_sum_d,
+ __m128i v_sse_q,
+ uint32_t* sse,
+ const int w,
+ const int h) {
int64_t sum64;
uint64_t sse64;
@@ -1482,14 +1483,15 @@
sse64 = ROUND_POWER_OF_TWO(sse64, 4);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute the variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
-static INLINE int highbd_12_calc_masked_variance(__m128i v_sum_d,
- __m128i v_sse_q,
- unsigned int* sse,
- const int w, const int h) {
+static INLINE uint32_t highbd_12_calc_masked_variance(__m128i v_sum_d,
+ __m128i v_sse_q,
+ uint32_t* sse,
+ const int w,
+ const int h) {
int64_t sum64;
uint64_t sse64;
@@ -1508,9 +1510,9 @@
sse64 = ROUND_POWER_OF_TWO(sse64, 8);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute the variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}