Merge "Fix comment for target_bandwidth in VP9 and VP10"
diff --git a/test/add_noise_test.cc b/test/add_noise_test.cc
index 96e3afb..d25e4f5 100644
--- a/test/add_noise_test.cc
+++ b/test/add_noise_test.cc
@@ -144,8 +144,7 @@
vpx_free(s);
}
-// TODO(jimbankoski): Make the c work like assembly so we can enable this.
-TEST_P(AddNoiseTest, DISABLED_CheckCvsAssembly) {
+TEST_P(AddNoiseTest, CheckCvsAssembly) {
DECLARE_ALIGNED(16, char, blackclamp[16]);
DECLARE_ALIGNED(16, char, whiteclamp[16]);
DECLARE_ALIGNED(16, char, bothclamp[16]);
@@ -167,8 +166,10 @@
memset(s, 99, image_size);
memset(d, 99, image_size);
+ srand(0);
ASM_REGISTER_STATE_CHECK(GetParam()(s, noise, blackclamp, whiteclamp,
bothclamp, width, height, width));
+ srand(0);
ASM_REGISTER_STATE_CHECK(vpx_plane_add_noise_c(d, noise, blackclamp,
whiteclamp, bothclamp,
width, height, width));
diff --git a/test/convolve_test.cc b/test/convolve_test.cc
index 22a2e77..9c71f23 100644
--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -118,7 +118,8 @@
// and filter_max_width = 16
//
uint8_t intermediate_buffer[71 * kMaxDimension];
- const int intermediate_next_stride = 1 - intermediate_height * output_width;
+ const int intermediate_next_stride =
+ 1 - static_cast<int>(intermediate_height * output_width);
// Horizontal pass (src -> transposed intermediate).
uint8_t *output_ptr = intermediate_buffer;
diff --git a/vp8/common/arm/neon/bilinearpredict_neon.c b/vp8/common/arm/neon/bilinearpredict_neon.c
index 9824a31..bb6ea76 100644
--- a/vp8/common/arm/neon/bilinearpredict_neon.c
+++ b/vp8/common/arm/neon/bilinearpredict_neon.c
@@ -21,114 +21,6 @@
{ 16, 112}
};
-void vp8_bilinear_predict4x4_neon(
- unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- unsigned char *dst_ptr,
- int dst_pitch) {
- uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8;
- uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8;
- uint8x16_t q1u8, q2u8;
- uint16x8_t q1u16, q2u16;
- uint16x8_t q7u16, q8u16, q9u16;
- uint64x2_t q4u64, q5u64;
- uint64x1_t d12u64;
- uint32x2x2_t d0u32x2, d1u32x2, d2u32x2, d3u32x2;
-
- if (xoffset == 0) { // skip_1stpass_filter
- uint32x2_t d28u32 = vdup_n_u32(0);
- uint32x2_t d29u32 = vdup_n_u32(0);
- uint32x2_t d30u32 = vdup_n_u32(0);
-
- d28u32 = vld1_lane_u32((const uint32_t *)src_ptr, d28u32, 0);
- src_ptr += src_pixels_per_line;
- d28u32 = vld1_lane_u32((const uint32_t *)src_ptr, d28u32, 1);
- src_ptr += src_pixels_per_line;
- d29u32 = vld1_lane_u32((const uint32_t *)src_ptr, d29u32, 0);
- src_ptr += src_pixels_per_line;
- d29u32 = vld1_lane_u32((const uint32_t *)src_ptr, d29u32, 1);
- src_ptr += src_pixels_per_line;
- d30u32 = vld1_lane_u32((const uint32_t *)src_ptr, d30u32, 0);
- d28u8 = vreinterpret_u8_u32(d28u32);
- d29u8 = vreinterpret_u8_u32(d29u32);
- d30u8 = vreinterpret_u8_u32(d30u32);
- } else {
- d2u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
- d3u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
- d4u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
- d5u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
- d6u8 = vld1_u8(src_ptr);
-
- q1u8 = vcombine_u8(d2u8, d3u8);
- q2u8 = vcombine_u8(d4u8, d5u8);
-
- d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
- d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
-
- q4u64 = vshrq_n_u64(vreinterpretq_u64_u8(q1u8), 8);
- q5u64 = vshrq_n_u64(vreinterpretq_u64_u8(q2u8), 8);
- d12u64 = vshr_n_u64(vreinterpret_u64_u8(d6u8), 8);
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q1u8)),
- vreinterpret_u32_u8(vget_high_u8(q1u8)));
- d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q2u8)),
- vreinterpret_u32_u8(vget_high_u8(q2u8)));
- d2u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q4u64)),
- vreinterpret_u32_u64(vget_high_u64(q4u64)));
- d3u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)),
- vreinterpret_u32_u64(vget_high_u64(q5u64)));
-
- q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d0u8);
- q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d0u8);
- q9u16 = vmull_u8(d6u8, d0u8);
-
- q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d2u32x2.val[0]), d1u8);
- q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d3u32x2.val[0]), d1u8);
- q9u16 = vmlal_u8(q9u16, vreinterpret_u8_u64(d12u64), d1u8);
-
- d28u8 = vqrshrn_n_u16(q7u16, 7);
- d29u8 = vqrshrn_n_u16(q8u16, 7);
- d30u8 = vqrshrn_n_u16(q9u16, 7);
- }
-
- // secondpass_filter
- if (yoffset == 0) { // skip_2ndpass_filter
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 0);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 1);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d29u8), 0);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d29u8), 1);
- } else {
- d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
- d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
-
- q1u16 = vmull_u8(d28u8, d0u8);
- q2u16 = vmull_u8(d29u8, d0u8);
-
- d26u8 = vext_u8(d28u8, d29u8, 4);
- d27u8 = vext_u8(d29u8, d30u8, 4);
-
- q1u16 = vmlal_u8(q1u16, d26u8, d1u8);
- q2u16 = vmlal_u8(q2u16, d27u8, d1u8);
-
- d2u8 = vqrshrn_n_u16(q1u16, 7);
- d3u8 = vqrshrn_n_u16(q2u16, 7);
-
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 0);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 1);
- }
- return;
-}
-
void vp8_bilinear_predict8x4_neon(
unsigned char *src_ptr,
int src_pixels_per_line,
diff --git a/vp8/common/arm/neon/sixtappredict_neon.c b/vp8/common/arm/neon/sixtappredict_neon.c
index 4c2efc9..49d8d22 100644
--- a/vp8/common/arm/neon/sixtappredict_neon.c
+++ b/vp8/common/arm/neon/sixtappredict_neon.c
@@ -22,383 +22,6 @@
{0, -1, 12, 123, -6, 0, 0, 0},
};
-void vp8_sixtap_predict4x4_neon(
- unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- unsigned char *dst_ptr,
- int dst_pitch) {
- unsigned char *src;
- uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d18u8, d19u8, d20u8, d21u8;
- uint8x8_t d23u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8, d30u8, d31u8;
- int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
- uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16;
- uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16;
- int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16;
- int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16;
- uint8x16_t q3u8, q4u8, q5u8, q6u8, q11u8;
- uint64x2_t q3u64, q4u64, q5u64, q6u64, q9u64, q10u64;
- uint32x2x2_t d0u32x2, d1u32x2;
-
- if (xoffset == 0) { // secondpass_filter4x4_only
- uint32x2_t d27u32 = vdup_n_u32(0);
- uint32x2_t d28u32 = vdup_n_u32(0);
- uint32x2_t d29u32 = vdup_n_u32(0);
- uint32x2_t d30u32 = vdup_n_u32(0);
- uint32x2_t d31u32 = vdup_n_u32(0);
-
- // load second_pass filter
- dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
- // load src data
- src = src_ptr - src_pixels_per_line * 2;
- d27u32 = vld1_lane_u32((const uint32_t *)src, d27u32, 0);
- src += src_pixels_per_line;
- d27u32 = vld1_lane_u32((const uint32_t *)src, d27u32, 1);
- src += src_pixels_per_line;
- d28u32 = vld1_lane_u32((const uint32_t *)src, d28u32, 0);
- src += src_pixels_per_line;
- d28u32 = vld1_lane_u32((const uint32_t *)src, d28u32, 1);
- src += src_pixels_per_line;
- d29u32 = vld1_lane_u32((const uint32_t *)src, d29u32, 0);
- src += src_pixels_per_line;
- d29u32 = vld1_lane_u32((const uint32_t *)src, d29u32, 1);
- src += src_pixels_per_line;
- d30u32 = vld1_lane_u32((const uint32_t *)src, d30u32, 0);
- src += src_pixels_per_line;
- d30u32 = vld1_lane_u32((const uint32_t *)src, d30u32, 1);
- src += src_pixels_per_line;
- d31u32 = vld1_lane_u32((const uint32_t *)src, d31u32, 0);
-
- d27u8 = vreinterpret_u8_u32(d27u32);
- d28u8 = vreinterpret_u8_u32(d28u32);
- d29u8 = vreinterpret_u8_u32(d29u32);
- d30u8 = vreinterpret_u8_u32(d30u32);
- d31u8 = vreinterpret_u8_u32(d31u32);
-
- d23u8 = vext_u8(d27u8, d28u8, 4);
- d24u8 = vext_u8(d28u8, d29u8, 4);
- d25u8 = vext_u8(d29u8, d30u8, 4);
- d26u8 = vext_u8(d30u8, d31u8, 4);
-
- q3u16 = vmull_u8(d27u8, d0u8);
- q4u16 = vmull_u8(d28u8, d0u8);
- q5u16 = vmull_u8(d25u8, d5u8);
- q6u16 = vmull_u8(d26u8, d5u8);
-
- q3u16 = vmlsl_u8(q3u16, d29u8, d4u8);
- q4u16 = vmlsl_u8(q4u16, d30u8, d4u8);
- q5u16 = vmlsl_u8(q5u16, d23u8, d1u8);
- q6u16 = vmlsl_u8(q6u16, d24u8, d1u8);
-
- q3u16 = vmlal_u8(q3u16, d28u8, d2u8);
- q4u16 = vmlal_u8(q4u16, d29u8, d2u8);
- q5u16 = vmlal_u8(q5u16, d24u8, d3u8);
- q6u16 = vmlal_u8(q6u16, d25u8, d3u8);
-
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
-
- q5s16 = vqaddq_s16(q5s16, q3s16);
- q6s16 = vqaddq_s16(q6s16, q4s16);
-
- d3u8 = vqrshrun_n_s16(q5s16, 7);
- d4u8 = vqrshrun_n_s16(q6s16, 7);
-
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 0);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 1);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 0);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 1);
- return;
- }
-
- // load first_pass filter
- dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
- // First pass: output_height lines x output_width columns (9x4)
-
- if (yoffset == 0) // firstpass_filter4x4_only
- src = src_ptr - 2;
- else
- src = src_ptr - 2 - (src_pixels_per_line * 2);
-
- q3u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q4u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q5u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q6u8 = vld1q_u8(src);
- src += src_pixels_per_line;
-
- d18u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
- d19u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
- d20u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
- d21u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
-
- // vswp here
- q3u8 = vcombine_u8(vget_low_u8(q3u8), vget_low_u8(q4u8));
- q5u8 = vcombine_u8(vget_low_u8(q5u8), vget_low_u8(q6u8));
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u8(d18u8), // d18 d19
- vreinterpret_u32_u8(d19u8));
- d1u32x2 = vzip_u32(vreinterpret_u32_u8(d20u8), // d20 d21
- vreinterpret_u32_u8(d21u8));
- q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d5u8);
- q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d5u8);
-
- // keep original src data in q4 q6
- q4u64 = vreinterpretq_u64_u8(q3u8);
- q6u64 = vreinterpretq_u64_u8(q5u8);
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q3u8)), // d6 d7
- vreinterpret_u32_u8(vget_high_u8(q3u8)));
- d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q5u8)), // d10 d11
- vreinterpret_u32_u8(vget_high_u8(q5u8)));
- q9u64 = vshrq_n_u64(q4u64, 8);
- q10u64 = vshrq_n_u64(q6u64, 8);
- q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d0u8);
- q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d0u8);
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)), // d18 d19
- vreinterpret_u32_u64(vget_high_u64(q9u64)));
- d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)), // d20 d211
- vreinterpret_u32_u64(vget_high_u64(q10u64)));
- q3u64 = vshrq_n_u64(q4u64, 32);
- q5u64 = vshrq_n_u64(q6u64, 32);
- q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d1u8);
- q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d1u8);
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)), // d6 d7
- vreinterpret_u32_u64(vget_high_u64(q3u64)));
- d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)), // d10 d11
- vreinterpret_u32_u64(vget_high_u64(q5u64)));
- q9u64 = vshrq_n_u64(q4u64, 16);
- q10u64 = vshrq_n_u64(q6u64, 16);
- q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d4u8);
- q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d4u8);
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)), // d18 d19
- vreinterpret_u32_u64(vget_high_u64(q9u64)));
- d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)), // d20 d211
- vreinterpret_u32_u64(vget_high_u64(q10u64)));
- q3u64 = vshrq_n_u64(q4u64, 24);
- q5u64 = vshrq_n_u64(q6u64, 24);
- q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d2u8);
- q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d2u8);
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)), // d6 d7
- vreinterpret_u32_u64(vget_high_u64(q3u64)));
- d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)), // d10 d11
- vreinterpret_u32_u64(vget_high_u64(q5u64)));
- q9u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d3u8);
- q10u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d3u8);
-
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q7s16 = vqaddq_s16(q7s16, q9s16);
- q8s16 = vqaddq_s16(q8s16, q10s16);
-
- d27u8 = vqrshrun_n_s16(q7s16, 7);
- d28u8 = vqrshrun_n_s16(q8s16, 7);
-
- if (yoffset == 0) { // firstpass_filter4x4_only
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d27u8), 0);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d27u8), 1);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 0);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 1);
- return;
- }
-
- // First Pass on rest 5-line data
- q3u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q4u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q5u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q6u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q11u8 = vld1q_u8(src);
-
- d18u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
- d19u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
- d20u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
- d21u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
-
- // vswp here
- q3u8 = vcombine_u8(vget_low_u8(q3u8), vget_low_u8(q4u8));
- q5u8 = vcombine_u8(vget_low_u8(q5u8), vget_low_u8(q6u8));
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u8(d18u8), // d18 d19
- vreinterpret_u32_u8(d19u8));
- d1u32x2 = vzip_u32(vreinterpret_u32_u8(d20u8), // d20 d21
- vreinterpret_u32_u8(d21u8));
- d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 5);
- q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d5u8);
- q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d5u8);
- q12u16 = vmull_u8(d31u8, d5u8);
-
- q4u64 = vreinterpretq_u64_u8(q3u8);
- q6u64 = vreinterpretq_u64_u8(q5u8);
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q3u8)), // d6 d7
- vreinterpret_u32_u8(vget_high_u8(q3u8)));
- d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q5u8)), // d10 d11
- vreinterpret_u32_u8(vget_high_u8(q5u8)));
- q9u64 = vshrq_n_u64(q4u64, 8);
- q10u64 = vshrq_n_u64(q6u64, 8);
- q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d0u8);
- q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d0u8);
- q12u16 = vmlal_u8(q12u16, vget_low_u8(q11u8), d0u8);
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)), // d18 d19
- vreinterpret_u32_u64(vget_high_u64(q9u64)));
- d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)), // d20 d211
- vreinterpret_u32_u64(vget_high_u64(q10u64)));
- q3u64 = vshrq_n_u64(q4u64, 32);
- q5u64 = vshrq_n_u64(q6u64, 32);
- d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 1);
- q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d1u8);
- q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d1u8);
- q12u16 = vmlsl_u8(q12u16, d31u8, d1u8);
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)), // d6 d7
- vreinterpret_u32_u64(vget_high_u64(q3u64)));
- d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)), // d10 d11
- vreinterpret_u32_u64(vget_high_u64(q5u64)));
- q9u64 = vshrq_n_u64(q4u64, 16);
- q10u64 = vshrq_n_u64(q6u64, 16);
- d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 4);
- q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d4u8);
- q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d4u8);
- q12u16 = vmlsl_u8(q12u16, d31u8, d4u8);
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)), // d18 d19
- vreinterpret_u32_u64(vget_high_u64(q9u64)));
- d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)), // d20 d211
- vreinterpret_u32_u64(vget_high_u64(q10u64)));
- q3u64 = vshrq_n_u64(q4u64, 24);
- q5u64 = vshrq_n_u64(q6u64, 24);
- d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 2);
- q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d2u8);
- q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d2u8);
- q12u16 = vmlal_u8(q12u16, d31u8, d2u8);
-
- d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)), // d6 d7
- vreinterpret_u32_u64(vget_high_u64(q3u64)));
- d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)), // d10 d11
- vreinterpret_u32_u64(vget_high_u64(q5u64)));
- d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 3);
- q9u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d3u8);
- q10u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d3u8);
- q11u16 = vmull_u8(d31u8, d3u8);
-
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q11s16 = vreinterpretq_s16_u16(q11u16);
- q12s16 = vreinterpretq_s16_u16(q12u16);
- q7s16 = vqaddq_s16(q7s16, q9s16);
- q8s16 = vqaddq_s16(q8s16, q10s16);
- q12s16 = vqaddq_s16(q12s16, q11s16);
-
- d29u8 = vqrshrun_n_s16(q7s16, 7);
- d30u8 = vqrshrun_n_s16(q8s16, 7);
- d31u8 = vqrshrun_n_s16(q12s16, 7);
-
- // Second pass: 4x4
- dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
- d23u8 = vext_u8(d27u8, d28u8, 4);
- d24u8 = vext_u8(d28u8, d29u8, 4);
- d25u8 = vext_u8(d29u8, d30u8, 4);
- d26u8 = vext_u8(d30u8, d31u8, 4);
-
- q3u16 = vmull_u8(d27u8, d0u8);
- q4u16 = vmull_u8(d28u8, d0u8);
- q5u16 = vmull_u8(d25u8, d5u8);
- q6u16 = vmull_u8(d26u8, d5u8);
-
- q3u16 = vmlsl_u8(q3u16, d29u8, d4u8);
- q4u16 = vmlsl_u8(q4u16, d30u8, d4u8);
- q5u16 = vmlsl_u8(q5u16, d23u8, d1u8);
- q6u16 = vmlsl_u8(q6u16, d24u8, d1u8);
-
- q3u16 = vmlal_u8(q3u16, d28u8, d2u8);
- q4u16 = vmlal_u8(q4u16, d29u8, d2u8);
- q5u16 = vmlal_u8(q5u16, d24u8, d3u8);
- q6u16 = vmlal_u8(q6u16, d25u8, d3u8);
-
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
-
- q5s16 = vqaddq_s16(q5s16, q3s16);
- q6s16 = vqaddq_s16(q6s16, q4s16);
-
- d3u8 = vqrshrun_n_s16(q5s16, 7);
- d4u8 = vqrshrun_n_s16(q6s16, 7);
-
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 0);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 1);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 0);
- dst_ptr += dst_pitch;
- vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 1);
- return;
-}
-
void vp8_sixtap_predict8x4_neon(
unsigned char *src_ptr,
int src_pixels_per_line,
diff --git a/vp8/common/rtcd_defs.pl b/vp8/common/rtcd_defs.pl
index b942d5b..856ede1 100644
--- a/vp8/common/rtcd_defs.pl
+++ b/vp8/common/rtcd_defs.pl
@@ -205,7 +205,6 @@
$vp8_sixtap_predict8x4_dspr2=vp8_sixtap_predict8x4_dspr2;
add_proto qw/void vp8_sixtap_predict4x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch";
-#TODO(johannkoenig): fix the neon version https://code.google.com/p/webm/issues/detail?id=817
specialize qw/vp8_sixtap_predict4x4 mmx ssse3 media dspr2 msa/;
$vp8_sixtap_predict4x4_media=vp8_sixtap_predict4x4_armv6;
$vp8_sixtap_predict4x4_dspr2=vp8_sixtap_predict4x4_dspr2;
@@ -223,7 +222,6 @@
$vp8_bilinear_predict8x4_media=vp8_bilinear_predict8x4_armv6;
add_proto qw/void vp8_bilinear_predict4x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch";
-#TODO(johannkoenig): fix the neon version https://code.google.com/p/webm/issues/detail?id=892
specialize qw/vp8_bilinear_predict4x4 mmx media msa/;
$vp8_bilinear_predict4x4_media=vp8_bilinear_predict4x4_armv6;
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index 768c764..e20c1ea 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -1591,7 +1591,6 @@
int col_min = ref_col - distance;
int col_max = ref_col + distance;
- // TODO(johannkoenig): check if this alignment is necessary.
DECLARE_ALIGNED(16, unsigned int, sad_array8[8]);
unsigned int sad_array[3];
diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c
index 98fc603..d2ecafc 100644
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -244,7 +244,7 @@
&sse8x8[k], &sum8x8[k]);
*sse += sse8x8[k];
*sum += sum8x8[k];
- var8x8[k] = sse8x8[k] - (((unsigned int)sum8x8[k] * sum8x8[k]) >> 6);
+ var8x8[k] = sse8x8[k] - (uint32_t)(((int64_t)sum8x8[k] * sum8x8[k]) >> 6);
k++;
}
}
@@ -265,7 +265,7 @@
sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1];
sum_o[k] = sum_i[i * nw + j] + sum_i[i * nw + j + 1] +
sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1];
- var_o[k] = sse_o[k] - (((unsigned int)sum_o[k] * sum_o[k]) >>
+ var_o[k] = sse_o[k] - (uint32_t)(((int64_t)sum_o[k] * sum_o[k]) >>
(b_width_log2_lookup[unit_size] +
b_height_log2_lookup[unit_size] + 6));
k++;
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index b2d748e..1e6c152 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -956,7 +956,7 @@
// Use the min of the average Q (with some increase) and
// active_worst_quality as basis for active_best.
if (cm->current_video_frame > 1) {
- q = VPXMIN(((35 * rc->avg_frame_qindex[INTER_FRAME]) >> 5),
+ q = VPXMIN(((17 * rc->avg_frame_qindex[INTER_FRAME]) >> 4),
active_worst_quality);
active_best_quality = inter_minq[q];
} else {
diff --git a/vpx/vpx_integer.h b/vpx/vpx_integer.h
index e85146c..829c9d1 100644
--- a/vpx/vpx_integer.h
+++ b/vpx/vpx_integer.h
@@ -68,12 +68,7 @@
#if defined(_MSC_VER) && _MSC_VER < 1800
#define PRId64 "I64d"
#else
-#if defined(__APPLE__)
-// When building dynamic frameworks with Swift compatibility, module maps
-// do not allow us to include the system's inttypes.h.
-#else
#include <inttypes.h>
#endif
-#endif
#endif // VPX_VPX_INTEGER_H_
diff --git a/vpx_dsp/postproc.c b/vpx_dsp/postproc.c
index c337e6e..682b444 100644
--- a/vpx_dsp/postproc.c
+++ b/vpx_dsp/postproc.c
@@ -23,21 +23,18 @@
unsigned int width, unsigned int height, int pitch) {
unsigned int i, j;
- // TODO(jbb): why does simd code use both but c doesn't, normalize and
- // fix..
- (void) bothclamp;
for (i = 0; i < height; i++) {
uint8_t *pos = start + i * pitch;
char *ref = (char *)(noise + (rand() & 0xff)); // NOLINT
for (j = 0; j < width; j++) {
- if (pos[j] < blackclamp[0])
- pos[j] = blackclamp[0];
+ int v = pos[j];
- if (pos[j] > 255 - whiteclamp[0])
- pos[j] = 255 - whiteclamp[0];
+ v = clamp(v - blackclamp[0], 0, 255);
+ v = clamp(v + bothclamp[0], 0, 255);
+ v = clamp(v - whiteclamp[0], 0, 255);
- pos[j] += ref[j];
+ pos[j] = v + ref[j];
}
}
}
diff --git a/vpx_dsp/sad.c b/vpx_dsp/sad.c
index c0c3ff9..f1f951f 100644
--- a/vpx_dsp/sad.c
+++ b/vpx_dsp/sad.c
@@ -33,47 +33,6 @@
return sad;
}
-// TODO(johannkoenig): this moved to vpx_dsp, should be able to clean this up.
-/* Remove dependency on vp9 variance function by duplicating vp9_comp_avg_pred.
- * The function averages every corresponding element of the buffers and stores
- * the value in a third buffer, comp_pred.
- * pred and comp_pred are assumed to have stride = width
- * In the usage below comp_pred is a local array.
- */
-static INLINE void avg_pred(uint8_t *comp_pred, const uint8_t *pred, int width,
- int height, const uint8_t *ref, int ref_stride) {
- int i, j;
-
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j++) {
- const int tmp = pred[j] + ref[j];
- comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1);
- }
- comp_pred += width;
- pred += width;
- ref += ref_stride;
- }
-}
-
-#if CONFIG_VP9_HIGHBITDEPTH
-static INLINE void highbd_avg_pred(uint16_t *comp_pred, const uint8_t *pred8,
- int width, int height, const uint8_t *ref8,
- int ref_stride) {
- int i, j;
- uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j++) {
- const int tmp = pred[j] + ref[j];
- comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1);
- }
- comp_pred += width;
- pred += width;
- ref += ref_stride;
- }
-}
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
#define sadMxN(m, n) \
unsigned int vpx_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride) { \
@@ -83,7 +42,7 @@
const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
uint8_t comp_pred[m * n]; \
- avg_pred(comp_pred, second_pred, m, n, ref, ref_stride); \
+ vpx_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \
return sad(src, src_stride, comp_pred, m, m, n); \
}
@@ -221,7 +180,7 @@
int ref_stride, \
const uint8_t *second_pred) { \
uint16_t comp_pred[m * n]; \
- highbd_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride); \
+ vpx_highbd_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \
return highbd_sadb(src, src_stride, comp_pred, m, m, n); \
}
diff --git a/vpx_dsp/x86/highbd_variance_sse2.c b/vpx_dsp/x86/highbd_variance_sse2.c
index 81ec5db..14d029c 100644
--- a/vpx_dsp/x86/highbd_variance_sse2.c
+++ b/vpx_dsp/x86/highbd_variance_sse2.c
@@ -255,13 +255,12 @@
int height, \
unsigned int *sse, \
void *unused0, void *unused);
-#define DECLS(opt1, opt2) \
- DECL(8, opt1); \
- DECL(16, opt1)
+#define DECLS(opt) \
+ DECL(8, opt); \
+ DECL(16, opt)
-DECLS(sse2, sse);
-// TODO(johannkoenig): enable the ssse3 or delete
-// DECLS(ssse3, ssse3);
+DECLS(sse2);
+
#undef DECLS
#undef DECL
@@ -398,21 +397,21 @@
return sse - ((cast se * se) >> (wlog2 + hlog2)); \
}
-#define FNS(opt1, opt2) \
-FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
-FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
-FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
-FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
-FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
-FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
-FN(16, 16, 16, 4, 4, opt1, (int64_t)); \
-FN(16, 8, 16, 4, 3, opt1, (int64_t)); \
-FN(8, 16, 8, 3, 4, opt1, (int64_t)); \
-FN(8, 8, 8, 3, 3, opt1, (int64_t)); \
-FN(8, 4, 8, 3, 2, opt1, (int64_t));
+#define FNS(opt) \
+FN(64, 64, 16, 6, 6, opt, (int64_t)); \
+FN(64, 32, 16, 6, 5, opt, (int64_t)); \
+FN(32, 64, 16, 5, 6, opt, (int64_t)); \
+FN(32, 32, 16, 5, 5, opt, (int64_t)); \
+FN(32, 16, 16, 5, 4, opt, (int64_t)); \
+FN(16, 32, 16, 4, 5, opt, (int64_t)); \
+FN(16, 16, 16, 4, 4, opt, (int64_t)); \
+FN(16, 8, 16, 4, 3, opt, (int64_t)); \
+FN(8, 16, 8, 3, 4, opt, (int64_t)); \
+FN(8, 8, 8, 3, 3, opt, (int64_t)); \
+FN(8, 4, 8, 3, 2, opt, (int64_t));
-FNS(sse2, sse);
+FNS(sse2);
#undef FNS
#undef FN
diff --git a/vpx_dsp/x86/inv_txfm_sse2.c b/vpx_dsp/x86/inv_txfm_sse2.c
index ae907fd..9c0d3eb 100644
--- a/vpx_dsp/x86/inv_txfm_sse2.c
+++ b/vpx_dsp/x86/inv_txfm_sse2.c
@@ -1311,24 +1311,10 @@
dc_value = _mm_set1_epi16(a);
- for (i = 0; i < 2; ++i) {
- RECON_AND_STORE(dest + 0 * stride, dc_value);
- RECON_AND_STORE(dest + 1 * stride, dc_value);
- RECON_AND_STORE(dest + 2 * stride, dc_value);
- RECON_AND_STORE(dest + 3 * stride, dc_value);
- RECON_AND_STORE(dest + 4 * stride, dc_value);
- RECON_AND_STORE(dest + 5 * stride, dc_value);
- RECON_AND_STORE(dest + 6 * stride, dc_value);
- RECON_AND_STORE(dest + 7 * stride, dc_value);
- RECON_AND_STORE(dest + 8 * stride, dc_value);
- RECON_AND_STORE(dest + 9 * stride, dc_value);
- RECON_AND_STORE(dest + 10 * stride, dc_value);
- RECON_AND_STORE(dest + 11 * stride, dc_value);
- RECON_AND_STORE(dest + 12 * stride, dc_value);
- RECON_AND_STORE(dest + 13 * stride, dc_value);
- RECON_AND_STORE(dest + 14 * stride, dc_value);
- RECON_AND_STORE(dest + 15 * stride, dc_value);
- dest += 8;
+ for (i = 0; i < 16; ++i) {
+ RECON_AND_STORE(dest + 0, dc_value);
+ RECON_AND_STORE(dest + 8, dc_value);
+ dest += stride;
}
}