Merge "Extend x32 check by also checking for __x86_64__."
diff --git a/examples/vp8_multi_resolution_encoder.c b/examples/vp8_multi_resolution_encoder.c
index 9f50dc7..e623567 100644
--- a/examples/vp8_multi_resolution_encoder.c
+++ b/examples/vp8_multi_resolution_encoder.c
@@ -8,292 +8,730 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+/*
+ * This is an example demonstrating multi-resolution encoding in VP8.
+ * High-resolution input video is down-sampled to lower-resolutions. The
+ * encoder then encodes the video and outputs multiple bitstreams with
+ * different resolutions.
+ *
+ * This test also allows for settings temporal layers for each spatial layer.
+ * Different number of temporal layers per spatial stream may be used.
+ * Currently up to 3 temporal layers per spatial stream (encoder) are supported
+ * in this test.
+ */
-// This is an example demonstrating multi-resolution encoding in VP8.
-// High-resolution input video is down-sampled to lower-resolutions. The
-// encoder then encodes the video and outputs multiple bitstreams with
-// different resolutions.
-//
-// Configure with --enable-multi-res-encoding flag to enable this example.
+#include "./vpx_config.h"
#include <stdio.h>
#include <stdlib.h>
+#include <stdarg.h>
#include <string.h>
+#include <math.h>
+#include <assert.h>
+#include <sys/time.h>
+#if USE_POSIX_MMAP
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <unistd.h>
+#endif
+#include "vpx_ports/vpx_timer.h"
+#define VPX_CODEC_DISABLE_COMPAT 1
+#include "vpx/vpx_encoder.h"
+#include "vpx/vp8cx.h"
+#include "vpx_ports/mem_ops.h"
+#include "./tools_common.h"
+#define interface (vpx_codec_vp8_cx())
+#define fourcc 0x30385056
+void usage_exit() {
+ exit(EXIT_FAILURE);
+}
+
+/*
+ * The input video frame is downsampled several times to generate a multi-level
+ * hierarchical structure. NUM_ENCODERS is defined as the number of encoding
+ * levels required. For example, if the size of input video is 1280x720,
+ * NUM_ENCODERS is 3, and down-sampling factor is 2, the encoder outputs 3
+ * bitstreams with resolution of 1280x720(level 0), 640x360(level 1), and
+ * 320x180(level 2) respectively.
+ */
+
+/* Number of encoders (spatial resolutions) used in this test. */
+#define NUM_ENCODERS 3
+
+/* Maximum number of temporal layers allowed for this test. */
+#define MAX_NUM_TEMPORAL_LAYERS 3
+
+/* This example uses the scaler function in libyuv. */
#include "third_party/libyuv/include/libyuv/basic_types.h"
#include "third_party/libyuv/include/libyuv/scale.h"
#include "third_party/libyuv/include/libyuv/cpu_id.h"
-#include "vpx/vpx_encoder.h"
-#include "vpx/vp8cx.h"
+int (*read_frame_p)(FILE *f, vpx_image_t *img);
-#include "./tools_common.h"
-#include "./video_writer.h"
+static int read_frame(FILE *f, vpx_image_t *img) {
+ size_t nbytes, to_read;
+ int res = 1;
-// The input video frame is downsampled several times to generate a
-// multi-level hierarchical structure. kNumEncoders is defined as the number
-// of encoding levels required. For example, if the size of input video is
-// 1280x720, kNumEncoders is 3, and down-sampling factor is 2, the encoder
-// outputs 3 bitstreams with resolution of 1280x720(level 0),
-// 640x360(level 1), and 320x180(level 2) respectively.
-#define kNumEncoders 3
-
-static const char *exec_name;
-
-void usage_exit() {
- fprintf(stderr,
- "Usage: %s <width> <height> <infile> <outfile(s)> <output psnr?>\n",
- exec_name);
- exit(EXIT_FAILURE);
+ to_read = img->w*img->h*3/2;
+ nbytes = fread(img->planes[0], 1, to_read, f);
+ if(nbytes != to_read) {
+ res = 0;
+ if(nbytes > 0)
+ printf("Warning: Read partial frame. Check your width & height!\n");
+ }
+ return res;
}
-int main(int argc, char *argv[]) {
- int frame_cnt = 0;
- FILE *infile = NULL;
- VpxVideoWriter *writers[kNumEncoders];
- vpx_codec_ctx_t codec[kNumEncoders];
- vpx_codec_enc_cfg_t cfg[kNumEncoders];
- vpx_image_t raw[kNumEncoders];
- const VpxInterface *const encoder = get_vpx_encoder_by_name("vp8");
- // Currently, only realtime mode is supported in multi-resolution encoding.
- const int arg_deadline = VPX_DL_REALTIME;
- int i;
- int width = 0;
- int height = 0;
- int frame_avail = 0;
- int got_data = 0;
+static int read_frame_by_row(FILE *f, vpx_image_t *img) {
+ size_t nbytes, to_read;
+ int res = 1;
+ int plane;
- // Set show_psnr to 1/0 to show/not show PSNR. Choose show_psnr=0 if you
- // don't need to know PSNR, which will skip PSNR calculation and save
- // encoding time.
- int show_psnr = 0;
- uint64_t psnr_sse_total[kNumEncoders] = {0};
- uint64_t psnr_samples_total[kNumEncoders] = {0};
- double psnr_totals[kNumEncoders][4] = {{0, 0}};
- int psnr_count[kNumEncoders] = {0};
-
- // Set the required target bitrates for each resolution level.
- // If target bitrate for highest-resolution level is set to 0,
- // (i.e. target_bitrate[0]=0), we skip encoding at that level.
- unsigned int target_bitrate[kNumEncoders] = {1000, 500, 100};
-
- // Enter the frame rate of the input video.
- const int framerate = 30;
- // Set down-sampling factor for each resolution level.
- // dsf[0] controls down sampling from level 0 to level 1;
- // dsf[1] controls down sampling from level 1 to level 2;
- // dsf[2] is not used.
- vpx_rational_t dsf[kNumEncoders] = {{2, 1}, {2, 1}, {1, 1}};
-
- exec_name = argv[0];
-
- if (!encoder)
- die("Unsupported codec.");
-
- // exe_name, input width, input height, input file,
- // output file 1, output file 2, output file 3, psnr on/off
- if (argc != (5 + kNumEncoders))
- die("Invalid number of input options.");
-
- printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
-
- width = strtol(argv[1], NULL, 0);
- height = strtol(argv[2], NULL, 0);
-
- if (width < 16 || width % 2 || height < 16 || height % 2)
- die("Invalid resolution: %ldx%ld", width, height);
-
- // Open input video file for encoding
- if (!(infile = fopen(argv[3], "rb")))
- die("Failed to open %s for reading", argv[3]);
-
- show_psnr = strtol(argv[kNumEncoders + 4], NULL, 0);
-
- // Populate default encoder configuration
- for (i = 0; i < kNumEncoders; ++i) {
- vpx_codec_err_t res =
- vpx_codec_enc_config_default(encoder->codec_interface(), &cfg[i], 0);
- if (res != VPX_CODEC_OK) {
- printf("Failed to get config: %s\n", vpx_codec_err_to_string(res));
- return EXIT_FAILURE;
- }
- }
-
- // Update the default configuration according to needs of the application.
- // Highest-resolution encoder settings
- cfg[0].g_w = width;
- cfg[0].g_h = height;
- cfg[0].g_threads = 1;
- cfg[0].rc_dropframe_thresh = 30;
- cfg[0].rc_end_usage = VPX_CBR;
- cfg[0].rc_resize_allowed = 0;
- cfg[0].rc_min_quantizer = 4;
- cfg[0].rc_max_quantizer = 56;
- cfg[0].rc_undershoot_pct = 98;
- cfg[0].rc_overshoot_pct = 100;
- cfg[0].rc_buf_initial_sz = 500;
- cfg[0].rc_buf_optimal_sz = 600;
- cfg[0].rc_buf_sz = 1000;
- cfg[0].g_error_resilient = 1;
- cfg[0].g_lag_in_frames = 0;
- cfg[0].kf_mode = VPX_KF_AUTO; // VPX_KF_DISABLED
- cfg[0].kf_min_dist = 3000;
- cfg[0].kf_max_dist = 3000;
- cfg[0].rc_target_bitrate = target_bitrate[0];
- cfg[0].g_timebase.num = 1;
- cfg[0].g_timebase.den = framerate;
-
- // Other-resolution encoder settings
- for (i = 1; i < kNumEncoders; ++i) {
- cfg[i] = cfg[0];
- cfg[i].g_threads = 1;
- cfg[i].rc_target_bitrate = target_bitrate[i];
-
- // Note: Width & height of other-resolution encoders are calculated
- // from the highest-resolution encoder's size and the corresponding
- // down_sampling_factor.
+ for (plane = 0; plane < 3; plane++)
{
- unsigned int iw = cfg[i - 1].g_w * dsf[i - 1].den + dsf[i - 1].num - 1;
- unsigned int ih = cfg[i - 1].g_h * dsf[i - 1].den + dsf[i - 1].num - 1;
- cfg[i].g_w = iw / dsf[i - 1].num;
- cfg[i].g_h = ih / dsf[i - 1].num;
- }
+ unsigned char *ptr;
+ int w = (plane ? (1 + img->d_w) / 2 : img->d_w);
+ int h = (plane ? (1 + img->d_h) / 2 : img->d_h);
+ int r;
- // Make width & height to be multiplier of 2.
- if ((cfg[i].g_w) % 2)
- cfg[i].g_w++;
-
- if ((cfg[i].g_h) % 2)
- cfg[i].g_h++;
- }
-
- // Open output file for each encoder to output bitstreams
- for (i = 0; i < kNumEncoders; ++i) {
- VpxVideoInfo info = {
- encoder->fourcc,
- cfg[i].g_w,
- cfg[i].g_h,
- {cfg[i].g_timebase.num, cfg[i].g_timebase.den}
- };
-
- if (!(writers[i] = vpx_video_writer_open(argv[i+4], kContainerIVF, &info)))
- die("Failed to open %s for writing", argv[i+4]);
- }
-
- // Allocate image for each encoder
- for (i = 0; i < kNumEncoders; ++i)
- if (!vpx_img_alloc(&raw[i], VPX_IMG_FMT_I420, cfg[i].g_w, cfg[i].g_h, 32))
- die("Failed to allocate image", cfg[i].g_w, cfg[i].g_h);
-
- // Initialize multi-encoder
- if (vpx_codec_enc_init_multi(&codec[0], encoder->codec_interface(), &cfg[0],
- kNumEncoders,
- show_psnr ? VPX_CODEC_USE_PSNR : 0, &dsf[0]))
- die_codec(&codec[0], "Failed to initialize encoder");
-
- // The extra encoding configuration parameters can be set as follows.
- for (i = 0; i < kNumEncoders; i++) {
- // Set encoding speed
- if (vpx_codec_control(&codec[i], VP8E_SET_CPUUSED, -6))
- die_codec(&codec[i], "Failed to set cpu_used");
-
- // Set static threshold.
- if (vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, 1))
- die_codec(&codec[i], "Failed to set static threshold");
-
- // Set NOISE_SENSITIVITY to do TEMPORAL_DENOISING
- // Enable denoising for the highest-resolution encoder.
- if (vpx_codec_control(&codec[0], VP8E_SET_NOISE_SENSITIVITY, i == 0))
- die_codec(&codec[0], "Failed to set noise_sensitivity");
- }
-
- frame_avail = 1;
- got_data = 0;
-
- while (frame_avail || got_data) {
- vpx_codec_iter_t iter[kNumEncoders] = {NULL};
- const vpx_codec_cx_pkt_t *pkt[kNumEncoders];
-
- frame_avail = vpx_img_read(&raw[0], infile);
-
- if (frame_avail) {
- for (i = 1; i < kNumEncoders; ++i) {
- vpx_image_t *const prev = &raw[i - 1];
-
- // Scale the image down a number of times by downsampling factor
- // FilterMode 1 or 2 give better psnr than FilterMode 0.
- I420Scale(prev->planes[VPX_PLANE_Y], prev->stride[VPX_PLANE_Y],
- prev->planes[VPX_PLANE_U], prev->stride[VPX_PLANE_U],
- prev->planes[VPX_PLANE_V], prev->stride[VPX_PLANE_V],
- prev->d_w, prev->d_h,
- raw[i].planes[VPX_PLANE_Y], raw[i].stride[VPX_PLANE_Y],
- raw[i].planes[VPX_PLANE_U], raw[i].stride[VPX_PLANE_U],
- raw[i].planes[VPX_PLANE_V], raw[i].stride[VPX_PLANE_V],
- raw[i].d_w, raw[i].d_h, 1);
- }
- }
-
- // Encode frame.
- if (vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
- frame_cnt, 1, 0, arg_deadline)) {
- die_codec(&codec[0], "Failed to encode frame");
- }
-
- for (i = kNumEncoders - 1; i >= 0; i--) {
- got_data = 0;
-
- while ((pkt[i] = vpx_codec_get_cx_data(&codec[i], &iter[i]))) {
- got_data = 1;
- switch (pkt[i]->kind) {
- case VPX_CODEC_CX_FRAME_PKT:
- vpx_video_writer_write_frame(writers[i], pkt[i]->data.frame.buf,
- pkt[i]->data.frame.sz, frame_cnt - 1);
- break;
- case VPX_CODEC_PSNR_PKT:
- if (show_psnr) {
- int j;
- psnr_sse_total[i] += pkt[i]->data.psnr.sse[0];
- psnr_samples_total[i] += pkt[i]->data.psnr.samples[0];
- for (j = 0; j < 4; j++)
- psnr_totals[i][j] += pkt[i]->data.psnr.psnr[j];
- psnr_count[i]++;
- }
+ /* Determine the correct plane based on the image format. The for-loop
+ * always counts in Y,U,V order, but this may not match the order of
+ * the data on disk.
+ */
+ switch (plane)
+ {
+ case 1:
+ ptr = img->planes[img->fmt==VPX_IMG_FMT_YV12? VPX_PLANE_V : VPX_PLANE_U];
break;
- default:
+ case 2:
+ ptr = img->planes[img->fmt==VPX_IMG_FMT_YV12?VPX_PLANE_U : VPX_PLANE_V];
break;
+ default:
+ ptr = img->planes[plane];
}
- printf(pkt[i]->kind == VPX_CODEC_CX_FRAME_PKT &&
- (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":".");
- fflush(stdout);
- }
- }
- frame_cnt++;
- }
- printf("\n");
- fclose(infile);
+ for (r = 0; r < h; r++)
+ {
+ to_read = w;
- printf("Processed %d frames.\n", frame_cnt - 1);
- for (i = 0; i < kNumEncoders; ++i) {
- // Calculate PSNR and print it out
- if (show_psnr && psnr_count[i] > 0) {
- int j;
- double ovpsnr = sse_to_psnr(psnr_samples_total[i], 255.0,
- psnr_sse_total[i]);
+ nbytes = fread(ptr, 1, to_read, f);
+ if(nbytes != to_read) {
+ res = 0;
+ if(nbytes > 0)
+ printf("Warning: Read partial frame. Check your width & height!\n");
+ break;
+ }
- fprintf(stderr, "\n ENC%d PSNR (Overall/Avg/Y/U/V)", i);
- fprintf(stderr, " %.3lf", ovpsnr);
- for (j = 0; j < 4; j++)
- fprintf(stderr, " %.3lf", psnr_totals[i][j]/psnr_count[i]);
+ ptr += img->stride[plane];
+ }
+ if (!res)
+ break;
}
- if (vpx_codec_destroy(&codec[i]))
- die_codec(&codec[i], "Failed to destroy codec");
+ return res;
+}
- vpx_img_free(&raw[i]);
- vpx_video_writer_close(writers[i]);
- }
- printf("\n");
+static void write_ivf_file_header(FILE *outfile,
+ const vpx_codec_enc_cfg_t *cfg,
+ int frame_cnt) {
+ char header[32];
- return EXIT_SUCCESS;
+ if(cfg->g_pass != VPX_RC_ONE_PASS && cfg->g_pass != VPX_RC_LAST_PASS)
+ return;
+ header[0] = 'D';
+ header[1] = 'K';
+ header[2] = 'I';
+ header[3] = 'F';
+ mem_put_le16(header+4, 0); /* version */
+ mem_put_le16(header+6, 32); /* headersize */
+ mem_put_le32(header+8, fourcc); /* headersize */
+ mem_put_le16(header+12, cfg->g_w); /* width */
+ mem_put_le16(header+14, cfg->g_h); /* height */
+ mem_put_le32(header+16, cfg->g_timebase.den); /* rate */
+ mem_put_le32(header+20, cfg->g_timebase.num); /* scale */
+ mem_put_le32(header+24, frame_cnt); /* length */
+ mem_put_le32(header+28, 0); /* unused */
+
+ (void) fwrite(header, 1, 32, outfile);
+}
+
+static void write_ivf_frame_header(FILE *outfile,
+ const vpx_codec_cx_pkt_t *pkt)
+{
+ char header[12];
+ vpx_codec_pts_t pts;
+
+ if(pkt->kind != VPX_CODEC_CX_FRAME_PKT)
+ return;
+
+ pts = pkt->data.frame.pts;
+ mem_put_le32(header, pkt->data.frame.sz);
+ mem_put_le32(header+4, pts&0xFFFFFFFF);
+ mem_put_le32(header+8, pts >> 32);
+
+ (void) fwrite(header, 1, 12, outfile);
+}
+
+/* Temporal scaling parameters */
+/* This sets all the temporal layer parameters given |num_temporal_layers|,
+ * including the target bit allocation across temporal layers. Bit allocation
+ * parameters will be passed in as user parameters in another version.
+ */
+static void set_temporal_layer_pattern(int num_temporal_layers,
+ vpx_codec_enc_cfg_t *cfg,
+ int bitrate,
+ int *layer_flags)
+{
+ assert(num_temporal_layers <= MAX_NUM_TEMPORAL_LAYERS);
+ switch (num_temporal_layers)
+ {
+ case 1:
+ {
+ /* 1-layer */
+ cfg->ts_number_layers = 1;
+ cfg->ts_periodicity = 1;
+ cfg->ts_rate_decimator[0] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_target_bitrate[0] = bitrate;
+
+ // Update L only.
+ layer_flags[0] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+ break;
+ }
+
+ case 2:
+ {
+ /* 2-layers, with sync point at first frame of layer 1. */
+ cfg->ts_number_layers = 2;
+ cfg->ts_periodicity = 2;
+ cfg->ts_rate_decimator[0] = 2;
+ cfg->ts_rate_decimator[1] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_layer_id[1] = 1;
+ // Use 60/40 bit allocation as example.
+ cfg->ts_target_bitrate[0] = 0.6f * bitrate;
+ cfg->ts_target_bitrate[1] = bitrate;
+
+ /* 0=L, 1=GF */
+ // ARF is used as predictor for all frames, and is only updated on
+ // key frame. Sync point every 8 frames.
+
+ // Layer 0: predict from L and ARF, update L and G.
+ layer_flags[0] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 1: sync point: predict from L and ARF, and update G.
+ layer_flags[1] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 0, predict from L and ARF, update L.
+ layer_flags[2] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+
+ // Layer 1: predict from L, G and ARF, and update G.
+ layer_flags[3] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 0
+ layer_flags[4] = layer_flags[2];
+
+ // Layer 1
+ layer_flags[5] = layer_flags[3];
+
+ // Layer 0
+ layer_flags[6] = layer_flags[4];
+
+ // Layer 1
+ layer_flags[7] = layer_flags[5];
+ break;
+ }
+
+ case 3:
+ default:
+ {
+ // 3-layers structure where ARF is used as predictor for all frames,
+ // and is only updated on key frame.
+ // Sync points for layer 1 and 2 every 8 frames.
+ cfg->ts_number_layers = 3;
+ cfg->ts_periodicity = 4;
+ cfg->ts_rate_decimator[0] = 4;
+ cfg->ts_rate_decimator[1] = 2;
+ cfg->ts_rate_decimator[2] = 1;
+ cfg->ts_layer_id[0] = 0;
+ cfg->ts_layer_id[1] = 2;
+ cfg->ts_layer_id[2] = 1;
+ cfg->ts_layer_id[3] = 2;
+ // Use 40/20/40 bit allocation as example.
+ cfg->ts_target_bitrate[0] = 0.4f * bitrate;
+ cfg->ts_target_bitrate[1] = 0.6f * bitrate;
+ cfg->ts_target_bitrate[2] = bitrate;
+
+ /* 0=L, 1=GF, 2=ARF */
+
+ // Layer 0: predict from L and ARF; update L and G.
+ layer_flags[0] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF;
+
+ // Layer 2: sync point: predict from L and ARF; update none.
+ layer_flags[1] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 1: sync point: predict from L and ARF; update G.
+ layer_flags[2] = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[3] = VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+
+ // Layer 0: predict from L and ARF; update L.
+ layer_flags[4] = VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[5] = layer_flags[3];
+
+ // Layer 1: predict from L, G, ARF; update G.
+ layer_flags[6] = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST;
+
+ // Layer 2: predict from L, G, ARF; update none.
+ layer_flags[7] = layer_flags[3];
+ break;
+ }
+ }
+}
+
+/* The periodicity of the pattern given the number of temporal layers. */
+static int periodicity_to_num_layers[MAX_NUM_TEMPORAL_LAYERS] = {1, 8, 8};
+
+int main(int argc, char **argv)
+{
+ FILE *infile, *outfile[NUM_ENCODERS];
+ FILE *downsampled_input[NUM_ENCODERS - 1];
+ char filename[50];
+ vpx_codec_ctx_t codec[NUM_ENCODERS];
+ vpx_codec_enc_cfg_t cfg[NUM_ENCODERS];
+ int frame_cnt = 0;
+ vpx_image_t raw[NUM_ENCODERS];
+ vpx_codec_err_t res[NUM_ENCODERS];
+
+ int i;
+ long width;
+ long height;
+ int length_frame;
+ int frame_avail;
+ int got_data;
+ int flags = 0;
+ int layer_id = 0;
+
+ int layer_flags[VPX_TS_MAX_PERIODICITY * NUM_ENCODERS]
+ = {0};
+ int flag_periodicity;
+
+ /*Currently, only realtime mode is supported in multi-resolution encoding.*/
+ int arg_deadline = VPX_DL_REALTIME;
+
+ /* Set show_psnr to 1/0 to show/not show PSNR. Choose show_psnr=0 if you
+ don't need to know PSNR, which will skip PSNR calculation and save
+ encoding time. */
+ int show_psnr = 0;
+ int key_frame_insert = 0;
+ uint64_t psnr_sse_total[NUM_ENCODERS] = {0};
+ uint64_t psnr_samples_total[NUM_ENCODERS] = {0};
+ double psnr_totals[NUM_ENCODERS][4] = {{0,0}};
+ int psnr_count[NUM_ENCODERS] = {0};
+
+ double cx_time = 0;
+ struct timeval tv1, tv2, difftv;
+
+ /* Set the required target bitrates for each resolution level.
+ * If target bitrate for highest-resolution level is set to 0,
+ * (i.e. target_bitrate[0]=0), we skip encoding at that level.
+ */
+ unsigned int target_bitrate[NUM_ENCODERS]={1000, 500, 100};
+
+ /* Enter the frame rate of the input video */
+ int framerate = 30;
+
+ /* Set down-sampling factor for each resolution level.
+ dsf[0] controls down sampling from level 0 to level 1;
+ dsf[1] controls down sampling from level 1 to level 2;
+ dsf[2] is not used. */
+ vpx_rational_t dsf[NUM_ENCODERS] = {{2, 1}, {2, 1}, {1, 1}};
+
+ /* Set the number of temporal layers for each encoder/resolution level,
+ * starting from highest resoln down to lowest resoln. */
+ unsigned int num_temporal_layers[NUM_ENCODERS] = {3, 3, 3};
+
+ if(argc!= (7 + 3 * NUM_ENCODERS))
+ die("Usage: %s <width> <height> <frame_rate> <infile> <outfile(s)> "
+ "<rate_encoder(s)> <temporal_layer(s)> <key_frame_insert> <output psnr?> \n",
+ argv[0]);
+
+ printf("Using %s\n",vpx_codec_iface_name(interface));
+
+ width = strtol(argv[1], NULL, 0);
+ height = strtol(argv[2], NULL, 0);
+ framerate = strtol(argv[3], NULL, 0);
+
+ if(width < 16 || width%2 || height <16 || height%2)
+ die("Invalid resolution: %ldx%ld", width, height);
+
+ /* Open input video file for encoding */
+ if(!(infile = fopen(argv[4], "rb")))
+ die("Failed to open %s for reading", argv[4]);
+
+ /* Open output file for each encoder to output bitstreams */
+ for (i=0; i< NUM_ENCODERS; i++)
+ {
+ if(!target_bitrate[i])
+ {
+ outfile[i] = NULL;
+ continue;
+ }
+
+ if(!(outfile[i] = fopen(argv[i+5], "wb")))
+ die("Failed to open %s for writing", argv[i+4]);
+ }
+
+ // Bitrates per spatial layer: overwrite default rates above.
+ for (i=0; i< NUM_ENCODERS; i++)
+ {
+ target_bitrate[i] = strtol(argv[NUM_ENCODERS + 5 + i], NULL, 0);
+ }
+
+ // Temporal layers per spatial layers: overwrite default settings above.
+ for (i=0; i< NUM_ENCODERS; i++)
+ {
+ num_temporal_layers[i] = strtol(argv[2 * NUM_ENCODERS + 5 + i], NULL, 0);
+ if (num_temporal_layers[i] < 1 || num_temporal_layers[i] > 3)
+ die("Invalid temporal layers: %d, Must be 1, 2, or 3. \n",
+ num_temporal_layers);
+ }
+
+ /* Open file to write out each spatially downsampled input stream. */
+ for (i=0; i< NUM_ENCODERS - 1; i++)
+ {
+ // Highest resoln is encoder 0.
+ if (sprintf(filename,"ds%d.yuv",NUM_ENCODERS - i) < 0)
+ {
+ return EXIT_FAILURE;
+ }
+ downsampled_input[i] = fopen(filename,"wb");
+ }
+
+ key_frame_insert = strtol(argv[3 * NUM_ENCODERS + 5], NULL, 0);
+
+ show_psnr = strtol(argv[3 * NUM_ENCODERS + 6], NULL, 0);
+
+
+ /* Populate default encoder configuration */
+ for (i=0; i< NUM_ENCODERS; i++)
+ {
+ res[i] = vpx_codec_enc_config_default(interface, &cfg[i], 0);
+ if(res[i]) {
+ printf("Failed to get config: %s\n", vpx_codec_err_to_string(res[i]));
+ return EXIT_FAILURE;
+ }
+ }
+
+ /*
+ * Update the default configuration according to needs of the application.
+ */
+ /* Highest-resolution encoder settings */
+ cfg[0].g_w = width;
+ cfg[0].g_h = height;
+ cfg[0].rc_dropframe_thresh = 0;
+ cfg[0].rc_end_usage = VPX_CBR;
+ cfg[0].rc_resize_allowed = 0;
+ cfg[0].rc_min_quantizer = 2;
+ cfg[0].rc_max_quantizer = 56;
+ cfg[0].rc_undershoot_pct = 100;
+ cfg[0].rc_overshoot_pct = 15;
+ cfg[0].rc_buf_initial_sz = 500;
+ cfg[0].rc_buf_optimal_sz = 600;
+ cfg[0].rc_buf_sz = 1000;
+ cfg[0].g_error_resilient = 1; /* Enable error resilient mode */
+ cfg[0].g_lag_in_frames = 0;
+
+ /* Disable automatic keyframe placement */
+ /* Note: These 3 settings are copied to all levels. But, except the lowest
+ * resolution level, all other levels are set to VPX_KF_DISABLED internally.
+ */
+ cfg[0].kf_mode = VPX_KF_AUTO;
+ cfg[0].kf_min_dist = 3000;
+ cfg[0].kf_max_dist = 3000;
+
+ cfg[0].rc_target_bitrate = target_bitrate[0]; /* Set target bitrate */
+ cfg[0].g_timebase.num = 1; /* Set fps */
+ cfg[0].g_timebase.den = framerate;
+
+ /* Other-resolution encoder settings */
+ for (i=1; i< NUM_ENCODERS; i++)
+ {
+ memcpy(&cfg[i], &cfg[0], sizeof(vpx_codec_enc_cfg_t));
+
+ cfg[i].rc_target_bitrate = target_bitrate[i];
+
+ /* Note: Width & height of other-resolution encoders are calculated
+ * from the highest-resolution encoder's size and the corresponding
+ * down_sampling_factor.
+ */
+ {
+ unsigned int iw = cfg[i-1].g_w*dsf[i-1].den + dsf[i-1].num - 1;
+ unsigned int ih = cfg[i-1].g_h*dsf[i-1].den + dsf[i-1].num - 1;
+ cfg[i].g_w = iw/dsf[i-1].num;
+ cfg[i].g_h = ih/dsf[i-1].num;
+ }
+
+ /* Make width & height to be multiplier of 2. */
+ // Should support odd size ???
+ if((cfg[i].g_w)%2)cfg[i].g_w++;
+ if((cfg[i].g_h)%2)cfg[i].g_h++;
+ }
+
+
+ // Set the number of threads per encode/spatial layer.
+ // (1, 1, 1) means no encoder threading.
+ cfg[0].g_threads = 2;
+ cfg[1].g_threads = 1;
+ cfg[2].g_threads = 1;
+
+ /* Allocate image for each encoder */
+ for (i=0; i< NUM_ENCODERS; i++)
+ if(!vpx_img_alloc(&raw[i], VPX_IMG_FMT_I420, cfg[i].g_w, cfg[i].g_h, 32))
+ die("Failed to allocate image", cfg[i].g_w, cfg[i].g_h);
+
+ if (raw[0].stride[VPX_PLANE_Y] == raw[0].d_w)
+ read_frame_p = read_frame;
+ else
+ read_frame_p = read_frame_by_row;
+
+ for (i=0; i< NUM_ENCODERS; i++)
+ if(outfile[i])
+ write_ivf_file_header(outfile[i], &cfg[i], 0);
+
+ /* Temporal layers settings */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ set_temporal_layer_pattern(num_temporal_layers[i],
+ &cfg[i],
+ cfg[i].rc_target_bitrate,
+ &layer_flags[i * VPX_TS_MAX_PERIODICITY]);
+ }
+
+ /* Initialize multi-encoder */
+ if(vpx_codec_enc_init_multi(&codec[0], interface, &cfg[0], NUM_ENCODERS,
+ (show_psnr ? VPX_CODEC_USE_PSNR : 0), &dsf[0]))
+ die_codec(&codec[0], "Failed to initialize encoder");
+
+ /* The extra encoding configuration parameters can be set as follows. */
+ /* Set encoding speed */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ int speed = -6;
+ /* Lower speed for the lowest resolution. */
+ if (i == NUM_ENCODERS - 1) speed = -4;
+ if(vpx_codec_control(&codec[i], VP8E_SET_CPUUSED, speed))
+ die_codec(&codec[i], "Failed to set cpu_used");
+ }
+
+ /* Set static threshold = 1 for all encoders */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, 1))
+ die_codec(&codec[i], "Failed to set static threshold");
+ }
+
+ /* Set NOISE_SENSITIVITY to do TEMPORAL_DENOISING */
+ /* Enable denoising for the highest-resolution encoder. */
+ if(vpx_codec_control(&codec[0], VP8E_SET_NOISE_SENSITIVITY, 1))
+ die_codec(&codec[0], "Failed to set noise_sensitivity");
+ for ( i=1; i< NUM_ENCODERS; i++)
+ {
+ if(vpx_codec_control(&codec[i], VP8E_SET_NOISE_SENSITIVITY, 0))
+ die_codec(&codec[i], "Failed to set noise_sensitivity");
+ }
+
+ /* Set the number of token partitions */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ if(vpx_codec_control(&codec[i], VP8E_SET_TOKEN_PARTITIONS, 1))
+ die_codec(&codec[i], "Failed to set static threshold");
+ }
+
+ /* Set the max intra target bitrate */
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ unsigned int max_intra_size_pct =
+ (int)(((double)cfg[0].rc_buf_optimal_sz * 0.5) * framerate / 10);
+ if(vpx_codec_control(&codec[i], VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ max_intra_size_pct))
+ die_codec(&codec[i], "Failed to set static threshold");
+ //printf("%d %d \n",i,max_intra_size_pct);
+ }
+
+ frame_avail = 1;
+ got_data = 0;
+
+ while(frame_avail || got_data)
+ {
+ vpx_codec_iter_t iter[NUM_ENCODERS]={NULL};
+ const vpx_codec_cx_pkt_t *pkt[NUM_ENCODERS];
+
+ flags = 0;
+ frame_avail = read_frame_p(infile, &raw[0]);
+
+ if(frame_avail)
+ {
+ for ( i=1; i<NUM_ENCODERS; i++)
+ {
+ /*Scale the image down a number of times by downsampling factor*/
+ /* FilterMode 1 or 2 give better psnr than FilterMode 0. */
+ I420Scale(raw[i-1].planes[VPX_PLANE_Y], raw[i-1].stride[VPX_PLANE_Y],
+ raw[i-1].planes[VPX_PLANE_U], raw[i-1].stride[VPX_PLANE_U],
+ raw[i-1].planes[VPX_PLANE_V], raw[i-1].stride[VPX_PLANE_V],
+ raw[i-1].d_w, raw[i-1].d_h,
+ raw[i].planes[VPX_PLANE_Y], raw[i].stride[VPX_PLANE_Y],
+ raw[i].planes[VPX_PLANE_U], raw[i].stride[VPX_PLANE_U],
+ raw[i].planes[VPX_PLANE_V], raw[i].stride[VPX_PLANE_V],
+ raw[i].d_w, raw[i].d_h, 1);
+ /* Write out down-sampled input. */
+ length_frame = cfg[i].g_w * cfg[i].g_h *3/2;
+ if (fwrite(raw[i].planes[0], 1, length_frame,
+ downsampled_input[NUM_ENCODERS - i - 1]) !=
+ length_frame)
+ {
+ return EXIT_FAILURE;
+ }
+ }
+ }
+
+ /* Set the flags (reference and update) for all the encoders.*/
+ for ( i=0; i<NUM_ENCODERS; i++)
+ {
+ layer_id = cfg[i].ts_layer_id[frame_cnt % cfg[i].ts_periodicity];
+ flags = 0;
+ flag_periodicity = periodicity_to_num_layers
+ [num_temporal_layers[i] - 1];
+ flags = layer_flags[i * VPX_TS_MAX_PERIODICITY +
+ frame_cnt % flag_periodicity];
+ // Key frame flag for first frame.
+ if (frame_cnt == 0)
+ {
+ flags |= VPX_EFLAG_FORCE_KF;
+ }
+ if (frame_cnt > 0 && frame_cnt == key_frame_insert)
+ {
+ flags = VPX_EFLAG_FORCE_KF;
+ }
+
+ vpx_codec_control(&codec[i], VP8E_SET_FRAME_FLAGS, flags);
+ vpx_codec_control(&codec[i], VP8E_SET_TEMPORAL_LAYER_ID, layer_id);
+ }
+
+ gettimeofday(&tv1, NULL);
+ /* Encode each frame at multi-levels */
+ /* Note the flags must be set to 0 in the encode call if they are set
+ for each frame with the vpx_codec_control(), as done above. */
+ if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL,
+ frame_cnt, 1, 0, arg_deadline))
+ {
+ die_codec(&codec[0], "Failed to encode frame");
+ }
+ gettimeofday(&tv2, NULL);
+ timersub(&tv2, &tv1, &difftv);
+ cx_time += (double)(difftv.tv_sec * 1000000 + difftv.tv_usec);
+ for (i=NUM_ENCODERS-1; i>=0 ; i--)
+ {
+ got_data = 0;
+ while( (pkt[i] = vpx_codec_get_cx_data(&codec[i], &iter[i])) )
+ {
+ got_data = 1;
+ switch(pkt[i]->kind) {
+ case VPX_CODEC_CX_FRAME_PKT:
+ write_ivf_frame_header(outfile[i], pkt[i]);
+ (void) fwrite(pkt[i]->data.frame.buf, 1,
+ pkt[i]->data.frame.sz, outfile[i]);
+ break;
+ case VPX_CODEC_PSNR_PKT:
+ if (show_psnr)
+ {
+ int j;
+
+ psnr_sse_total[i] += pkt[i]->data.psnr.sse[0];
+ psnr_samples_total[i] += pkt[i]->data.psnr.samples[0];
+ for (j = 0; j < 4; j++)
+ {
+ psnr_totals[i][j] += pkt[i]->data.psnr.psnr[j];
+ }
+ psnr_count[i]++;
+ }
+
+ break;
+ default:
+ break;
+ }
+ printf(pkt[i]->kind == VPX_CODEC_CX_FRAME_PKT
+ && (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":"");
+ fflush(stdout);
+ }
+ }
+ frame_cnt++;
+ }
+ printf("\n");
+ printf("FPS for encoding %d %f %f \n", frame_cnt, (float)cx_time / 1000000,
+ 1000000 * (double)frame_cnt / (double)cx_time);
+
+ fclose(infile);
+
+ printf("Processed %ld frames.\n",(long int)frame_cnt-1);
+ for (i=0; i< NUM_ENCODERS; i++)
+ {
+ /* Calculate PSNR and print it out */
+ if ( (show_psnr) && (psnr_count[i]>0) )
+ {
+ int j;
+ double ovpsnr = sse_to_psnr(psnr_samples_total[i], 255.0,
+ psnr_sse_total[i]);
+
+ fprintf(stderr, "\n ENC%d PSNR (Overall/Avg/Y/U/V)", i);
+
+ fprintf(stderr, " %.3lf", ovpsnr);
+ for (j = 0; j < 4; j++)
+ {
+ fprintf(stderr, " %.3lf", psnr_totals[i][j]/psnr_count[i]);
+ }
+ }
+
+ if(vpx_codec_destroy(&codec[i]))
+ die_codec(&codec[i], "Failed to destroy codec");
+
+ vpx_img_free(&raw[i]);
+
+ if(!outfile[i])
+ continue;
+
+ /* Try to rewrite the file header with the actual frame count */
+ if(!fseek(outfile[i], 0, SEEK_SET))
+ write_ivf_file_header(outfile[i], &cfg[i], frame_cnt-1);
+ fclose(outfile[i]);
+ }
+ printf("\n");
+
+ return EXIT_SUCCESS;
}
diff --git a/examples/vpx_temporal_svc_encoder.c b/examples/vpx_temporal_svc_encoder.c
index 8cc7f4a..cbe0157 100644
--- a/examples/vpx_temporal_svc_encoder.c
+++ b/examples/vpx_temporal_svc_encoder.c
@@ -675,6 +675,9 @@
die_codec(&codec, "Failed to set SVC");
}
}
+ if (strncmp(encoder->name, "vp8", 3) == 0) {
+ vpx_codec_control(&codec, VP8E_SET_SCREEN_CONTENT_MODE, 0);
+ }
vpx_codec_control(&codec, VP8E_SET_STATIC_THRESHOLD, 1);
vpx_codec_control(&codec, VP8E_SET_TOKEN_PARTITIONS, 1);
// This controls the maximum target size of the key frame.
@@ -697,6 +700,9 @@
cfg.ts_layer_id[frame_cnt % cfg.ts_periodicity];
if (strncmp(encoder->name, "vp9", 3) == 0) {
vpx_codec_control(&codec, VP9E_SET_SVC_LAYER_ID, &layer_id);
+ } else if (strncmp(encoder->name, "vp8", 3) == 0) {
+ vpx_codec_control(&codec, VP8E_SET_TEMPORAL_LAYER_ID,
+ layer_id.temporal_layer_id);
}
flags = layer_flags[frame_cnt % flag_periodicity];
frame_avail = vpx_img_read(&raw, infile);
diff --git a/test/acm_random.h b/test/acm_random.h
index 496dae3..ff5c93e 100644
--- a/test/acm_random.h
+++ b/test/acm_random.h
@@ -29,14 +29,14 @@
uint16_t Rand16(void) {
const uint32_t value =
random_.Generate(testing::internal::Random::kMaxRange);
- return (value >> 16) & 0xffff;
+ return (value >> 15) & 0xffff;
}
uint8_t Rand8(void) {
const uint32_t value =
random_.Generate(testing::internal::Random::kMaxRange);
// There's a bit more entropy in the upper bits of this implementation.
- return (value >> 24) & 0xff;
+ return (value >> 23) & 0xff;
}
uint8_t Rand8Extremes(void) {
diff --git a/test/error_resilience_test.cc b/test/error_resilience_test.cc
index 28cda2f..182547b 100644
--- a/test/error_resilience_test.cc
+++ b/test/error_resilience_test.cc
@@ -316,7 +316,205 @@
Reset();
}
-VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
-VP9_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
+class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+ protected:
+ ErrorResilienceTestLargeCodecControls()
+ : EncoderTest(GET_PARAM(0)),
+ encoding_mode_(GET_PARAM(1)) {
+ Reset();
+ }
+ virtual ~ErrorResilienceTestLargeCodecControls() {}
+
+ void Reset() {
+ last_pts_ = 0;
+ tot_frame_number_ = 0;
+ // For testing up to 3 layers.
+ for (int i = 0; i < 3; ++i) {
+ bits_total_[i] = 0;
+ }
+ duration_ = 0.0;
+ }
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(encoding_mode_);
+ }
+
+ //
+ // Frame flags and layer id for temporal layers.
+ //
+
+ // For two layers, test pattern is:
+ // 1 3
+ // 0 2 .....
+ // For three layers, test pattern is:
+ // 1 3 5 7
+ // 2 6
+ // 0 4 ....
+ // LAST is always update on base/layer 0, GOLDEN is updated on layer 1,
+ // and ALTREF is updated on top layer for 3 layer pattern.
+ int SetFrameFlags(int frame_num, int num_temp_layers) {
+ int frame_flags = 0;
+ if (num_temp_layers == 2) {
+ if (frame_num % 2 == 0) {
+ // Layer 0: predict from L and ARF, update L.
+ frame_flags = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+ } else {
+ // Layer 1: predict from L, G and ARF, and update G.
+ frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY;
+ }
+ } else if (num_temp_layers == 3) {
+ if (frame_num % 4 == 0) {
+ // Layer 0: predict from L, update L.
+ frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
+ } else if ((frame_num - 2) % 4 == 0) {
+ // Layer 1: predict from L, G, update G.
+ frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_REF_ARF;
+ } else if ((frame_num - 1) % 2 == 0) {
+ // Layer 2: predict from L, G, ARF; update ARG.
+ frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST;
+ }
+ }
+ return frame_flags;
+ }
+
+ int SetLayerId(int frame_num, int num_temp_layers) {
+ int layer_id = 0;
+ if (num_temp_layers == 2) {
+ if (frame_num % 2 == 0) {
+ layer_id = 0;
+ } else {
+ layer_id = 1;
+ }
+ } else if (num_temp_layers == 3) {
+ if (frame_num % 4 == 0) {
+ layer_id = 0;
+ } else if ((frame_num - 2) % 4 == 0) {
+ layer_id = 1;
+ } else if ((frame_num - 1) % 2 == 0) {
+ layer_id = 2;
+ }
+ }
+ return layer_id;
+ }
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ libvpx_test::Encoder *encoder) {
+ if (cfg_.ts_number_layers > 1) {
+ int layer_id = SetLayerId(video->frame(), cfg_.ts_number_layers);
+ int frame_flags = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
+ if (video->frame() > 0) {
+ encoder->Control(VP8E_SET_TEMPORAL_LAYER_ID, layer_id);
+ encoder->Control(VP8E_SET_FRAME_FLAGS, frame_flags);
+ }
+ const vpx_rational_t tb = video->timebase();
+ timebase_ = static_cast<double>(tb.num) / tb.den;
+ duration_ = 0;
+ return;
+ }
+ }
+
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ // Time since last timestamp = duration.
+ vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_;
+ if (duration > 1) {
+ // Update counter for total number of frames (#frames input to encoder).
+ // Needed for setting the proper layer_id below.
+ tot_frame_number_ += static_cast<int>(duration - 1);
+ }
+ int layer = SetLayerId(tot_frame_number_, cfg_.ts_number_layers);
+ const size_t frame_size_in_bits = pkt->data.frame.sz * 8;
+ // Update the total encoded bits. For temporal layers, update the cumulative
+ // encoded bits per layer.
+ for (int i = layer; i < static_cast<int>(cfg_.ts_number_layers); ++i) {
+ bits_total_[i] += frame_size_in_bits;
+ }
+ // Update the most recent pts.
+ last_pts_ = pkt->data.frame.pts;
+ ++tot_frame_number_;
+ }
+
+ virtual void EndPassHook(void) {
+ duration_ = (last_pts_ + 1) * timebase_;
+ if (cfg_.ts_number_layers > 1) {
+ for (int layer = 0; layer < static_cast<int>(cfg_.ts_number_layers);
+ ++layer) {
+ if (bits_total_[layer]) {
+ // Effective file datarate:
+ effective_datarate_[layer] = (bits_total_[layer] / 1000.0) / duration_;
+ }
+ }
+ }
+ }
+
+ double effective_datarate_[3];
+ private:
+ libvpx_test::TestMode encoding_mode_;
+ vpx_codec_pts_t last_pts_;
+ double timebase_;
+ int64_t bits_total_[3];
+ double duration_;
+ int tot_frame_number_;
+ };
+
+// Check two codec controls used for:
+// (1) for setting temporal layer id, and (2) for settings encoder flags.
+// This test invokes those controls for each frame, and verifies encoder/decoder
+// mismatch and basic rate control response.
+// TODO(marpan): Maybe move this test to datarate_test.cc.
+TEST_P(ErrorResilienceTestLargeCodecControls, CodecControl3TemporalLayers) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_min_quantizer = 2;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.g_lag_in_frames = 0;
+ cfg_.kf_mode = VPX_KF_DISABLED;
+ cfg_.g_error_resilient = 1;
+
+ // 3 Temporal layers. Framerate decimation (4, 2, 1).
+ cfg_.ts_number_layers = 3;
+ cfg_.ts_rate_decimator[0] = 4;
+ cfg_.ts_rate_decimator[1] = 2;
+ cfg_.ts_rate_decimator[2] = 1;
+ cfg_.ts_periodicity = 4;
+ cfg_.ts_layer_id[0] = 0;
+ cfg_.ts_layer_id[1] = 2;
+ cfg_.ts_layer_id[2] = 1;
+ cfg_.ts_layer_id[3] = 2;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 200);
+ for (int i = 200; i <= 800; i += 200) {
+ cfg_.rc_target_bitrate = i;
+ Reset();
+ // 40-20-40 bitrate allocation for 3 temporal layers.
+ cfg_.ts_target_bitrate[0] = 40 * cfg_.rc_target_bitrate / 100;
+ cfg_.ts_target_bitrate[1] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
+ ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.75)
+ << " The datarate for the file is lower than target by too much, "
+ "for layer: " << j;
+ ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.25)
+ << " The datarate for the file is greater than target by too much, "
+ "for layer: " << j;
+ }
+ }
+}
+
+VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
+VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTestLargeCodecControls,
+ ONE_PASS_TEST_MODES);
+VP9_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES);
} // namespace
diff --git a/test/fdct8x8_test.cc b/test/fdct8x8_test.cc
index 23e1510..110c9c3 100644
--- a/test/fdct8x8_test.cc
+++ b/test/fdct8x8_test.cc
@@ -646,20 +646,33 @@
using std::tr1::make_tuple;
#if CONFIG_VP9_HIGHBITDEPTH
+// TODO(jingning): re-enable after this handles the expanded range [0, 65535]
+// returned from Rand16().
+INSTANTIATE_TEST_CASE_P(
+ DISABLED_C, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_c, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8DCT,
::testing::Values(
make_tuple(&vp9_highbd_fdct8x8_c, &idct8x8_10, 0, VPX_BITS_10),
- make_tuple(&vp9_highbd_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12),
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_c, 0, VPX_BITS_8)));
+ make_tuple(&vp9_highbd_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12)));
#else
+// TODO(jingning): re-enable after this handles the expanded range [0, 65535]
+// returned from Rand16().
INSTANTIATE_TEST_CASE_P(
- C, FwdTrans8x8DCT,
+ DISABLED_C, FwdTrans8x8DCT,
::testing::Values(
make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_c, 0, VPX_BITS_8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_VP9_HIGHBITDEPTH
+// TODO(jingning): re-enable after this handles the expanded range [0, 65535]
+// returned from Rand16().
+INSTANTIATE_TEST_CASE_P(
+ DISABLED_C, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
@@ -671,23 +684,29 @@
make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
make_tuple(&vp9_highbd_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
#else
+// TODO(jingning): re-enable after this handles the expanded range [0, 65535]
+// returned from Rand16().
+INSTANTIATE_TEST_CASE_P(
+ DISABLED_C, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+// TODO(jingning): re-enable after this handles the expanded range [0, 65535]
+// returned from Rand16().
INSTANTIATE_TEST_CASE_P(
- NEON, FwdTrans8x8DCT,
+ DISABLED_NEON, FwdTrans8x8DCT,
::testing::Values(
make_tuple(&vp9_fdct8x8_neon, &vp9_idct8x8_64_add_neon, 0,
VPX_BITS_8)));
@@ -701,21 +720,32 @@
#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+// TODO(jingning): re-enable after these handle the expanded range [0, 65535]
+// returned from Rand16().
INSTANTIATE_TEST_CASE_P(
- SSE2, FwdTrans8x8DCT,
+ DISABLED_SSE2, FwdTrans8x8DCT,
::testing::Values(
make_tuple(&vp9_fdct8x8_sse2, &vp9_idct8x8_64_add_sse2, 0,
VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
+ DISABLED_SSE2, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0, VPX_BITS_8),
make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1, VPX_BITS_8),
make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2, VPX_BITS_8),
make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3, VPX_BITS_8)));
#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+// TODO(jingning): re-enable after these handle the expanded range [0, 65535]
+// returned from Rand16().
+INSTANTIATE_TEST_CASE_P(
+ DISABLED_SSE2, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vp9_fdct8x8_sse2, &vp9_idct8x8_64_add_c, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8DCT,
::testing::Values(
@@ -726,14 +756,17 @@
make_tuple(&vp9_highbd_fdct8x8_c,
&idct8x8_64_add_12_sse2, 12, VPX_BITS_12),
make_tuple(&vp9_highbd_fdct8x8_sse2,
- &idct8x8_64_add_12_sse2, 12, VPX_BITS_12),
- make_tuple(&vp9_fdct8x8_sse2, &vp9_idct8x8_64_add_c, 0, VPX_BITS_8)));
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12)));
-
+// TODO(jingning): re-enable after these handle the expanded range [0, 65535]
+// returned from Rand16().
+INSTANTIATE_TEST_CASE_P(
+ DISABLED_SSE2, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
@@ -755,8 +788,10 @@
#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH && \
!CONFIG_EMULATE_HARDWARE
+// TODO(jingning): re-enable after this handles the expanded range [0, 65535]
+// returned from Rand16().
INSTANTIATE_TEST_CASE_P(
- SSSE3, FwdTrans8x8DCT,
+ DISABLED_SSSE3, FwdTrans8x8DCT,
::testing::Values(
make_tuple(&vp9_fdct8x8_ssse3, &vp9_idct8x8_64_add_ssse3, 0,
VPX_BITS_8)));
diff --git a/test/lpf_8_test.cc b/test/lpf_8_test.cc
index 4324cb9..0f335e2 100644
--- a/test/lpf_8_test.cc
+++ b/test/lpf_8_test.cc
@@ -21,6 +21,7 @@
#include "./vpx_config.h"
#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_loopfilter.h"
#include "vpx/vpx_integer.h"
using libvpx_test::ACMRandom;
@@ -159,12 +160,12 @@
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
- uint8_t tmp = rnd.Rand8();
+ uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
- tmp = rnd.Rand8();
+ tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
DECLARE_ALIGNED(16, const uint8_t, limit[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
@@ -243,14 +244,27 @@
#endif // CONFIG_VP9_HIGHBITDEPTH
int err_count_total = 0;
int first_failure = -1;
+
+ // NOTE: The code in vp9_loopfilter.c:update_sharpness computes mblim as a
+ // function of sharpness_lvl and the loopfilter lvl as:
+ // block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4));
+ // ...
+ // vpx_memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit),
+ // SIMD_WIDTH);
+ // This means that the largest value for mblim will occur when sharpness_lvl
+ // is equal to 0, and lvl is equal to its greatest value (MAX_LOOP_FILTER).
+ // In this case block_inside_limit will be equal to MAX_LOOP_FILTER and
+ // therefore mblim will be equal to (2 * (lvl + 2) + block_inside_limit) =
+ // 2 * (MAX_LOOP_FILTER + 2) + MAX_LOOP_FILTER = 3 * MAX_LOOP_FILTER + 4
+
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
- uint8_t tmp = rnd.Rand8();
+ uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
- tmp = rnd.Rand8();
+ tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
DECLARE_ALIGNED(16, const uint8_t, limit[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
@@ -304,12 +318,12 @@
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
- uint8_t tmp = rnd.Rand8();
+ uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
- tmp = rnd.Rand8();
+ tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
DECLARE_ALIGNED(16, const uint8_t, limit0[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
@@ -319,12 +333,12 @@
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
- tmp = rnd.Rand8();
+ tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
- tmp = rnd.Rand8();
+ tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
DECLARE_ALIGNED(16, const uint8_t, limit1[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
@@ -406,12 +420,12 @@
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
- uint8_t tmp = rnd.Rand8();
+ uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
- tmp = rnd.Rand8();
+ tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
DECLARE_ALIGNED(16, const uint8_t, limit0[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
@@ -421,12 +435,12 @@
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
- tmp = rnd.Rand8();
+ tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
- tmp = rnd.Rand8();
+ tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
DECLARE_ALIGNED(16, const uint8_t, limit1[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
diff --git a/third_party/x86inc/x86inc.asm b/third_party/x86inc/x86inc.asm
index 99453a9..8e75a4b 100644
--- a/third_party/x86inc/x86inc.asm
+++ b/third_party/x86inc/x86inc.asm
@@ -617,9 +617,17 @@
%elifidn __OUTPUT_FORMAT__,elf64
global %1:function hidden
%elifidn __OUTPUT_FORMAT__,macho32
- global %1:private_extern
+ %ifdef __NASM_VER__
+ global %1
+ %else
+ global %1:private_extern
+ %endif
%elifidn __OUTPUT_FORMAT__,macho64
- global %1:private_extern
+ %ifdef __NASM_VER__
+ global %1
+ %else
+ global %1:private_extern
+ %endif
%else
global %1
%endif
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index ea1a6a4..192108a 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -187,8 +187,12 @@
{
FRAME_TYPE frame_type;
int is_frame_dropped;
+ // The frame rate for the lowest resolution.
+ double low_res_framerate;
/* The frame number of each reference frames */
unsigned int low_res_ref_frames[MAX_REF_FRAMES];
+ // The video frame counter value for the key frame, for lowest resolution.
+ unsigned int key_frame_counter_value;
LOWER_RES_MB_INFO *mb_info;
} LOWER_RES_FRAME_INFO;
#endif
diff --git a/vp8/common/onyx.h b/vp8/common/onyx.h
index d48c4fe..f39b675 100644
--- a/vp8/common/onyx.h
+++ b/vp8/common/onyx.h
@@ -122,6 +122,7 @@
int Sharpness;
int cpu_used;
unsigned int rc_max_intra_bitrate_pct;
+ unsigned int screen_content_mode;
/* mode ->
*(0)=Realtime/Live Encoding. This mode is optimized for realtim
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index dbdcab9..248e795 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -125,6 +125,8 @@
int optimize;
int q_index;
+ int is_skin;
+ int denoise_zeromv;
#if CONFIG_TEMPORAL_DENOISING
int increase_denoising;
@@ -161,6 +163,8 @@
void (*short_walsh4x4)(short *input, short *output, int pitch);
void (*quantize_b)(BLOCK *b, BLOCKD *d);
+ unsigned int mbs_zero_last_dot_suppress;
+ int zero_last_dot_suppress;
} MACROBLOCK;
diff --git a/vp8/encoder/denoising.c b/vp8/encoder/denoising.c
index c0eff4e..b9fbf06 100644
--- a/vp8/encoder/denoising.c
+++ b/vp8/encoder/denoising.c
@@ -391,7 +391,7 @@
denoiser->denoise_pars.scale_increase_filter = 1;
denoiser->denoise_pars.denoise_mv_bias = 60;
denoiser->denoise_pars.pickmode_mv_bias = 75;
- denoiser->denoise_pars.qp_thresh = 85;
+ denoiser->denoise_pars.qp_thresh = 80;
denoiser->denoise_pars.consec_zerolast = 15;
denoiser->denoise_pars.spatial_blur = 0;
}
@@ -456,10 +456,10 @@
denoiser->bitrate_threshold = 400000; // (bits/sec).
denoiser->threshold_aggressive_mode = 80;
if (width * height > 1280 * 720) {
- denoiser->bitrate_threshold = 2500000;
- denoiser->threshold_aggressive_mode = 180;
+ denoiser->bitrate_threshold = 3000000;
+ denoiser->threshold_aggressive_mode = 200;
} else if (width * height > 960 * 540) {
- denoiser->bitrate_threshold = 1000000;
+ denoiser->bitrate_threshold = 1200000;
denoiser->threshold_aggressive_mode = 120;
} else if (width * height > 640 * 480) {
denoiser->bitrate_threshold = 600000;
@@ -483,7 +483,6 @@
vpx_free(denoiser->denoise_state);
}
-
void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
MACROBLOCK *x,
unsigned int best_sse,
@@ -554,6 +553,7 @@
* Note that any changes to the mode info only affects the
* denoising.
*/
+ x->denoise_zeromv = 1;
mbmi->ref_frame =
x->best_zeromv_reference_frame;
@@ -603,6 +603,12 @@
motion_threshold = denoiser->denoise_pars.scale_motion_thresh *
NOISE_MOTION_THRESHOLD;
+ // If block is considered to be skin area, lower the motion threshold.
+ // In current version set threshold = 1, so only denoise very low
+ // (i.e., zero) mv on skin.
+ if (x->is_skin)
+ motion_threshold = 1;
+
if (motion_magnitude2 <
denoiser->denoise_pars.scale_increase_filter * NOISE_MOTION_THRESHOLD)
x->increase_denoising = 1;
@@ -662,6 +668,7 @@
/* No filtering of this block; it differs too much from the predictor,
* or the motion vector magnitude is considered too big.
*/
+ x->denoise_zeromv = 0;
vp8_copy_mem16x16(
x->thismb, 16,
denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset,
@@ -692,7 +699,7 @@
int uv_stride =denoiser->yv12_running_avg[INTRA_FRAME].uv_stride;
// Fix filter level to some nominal value for now.
- int filter_level = 32;
+ int filter_level = 48;
int hev_index = lfi_n->hev_thr_lut[INTER_FRAME][filter_level];
lfi.mblim = lfi_n->mblim[filter_level];
diff --git a/vp8/encoder/denoising.h b/vp8/encoder/denoising.h
index 6c1f9e2..9a379a6 100644
--- a/vp8/encoder/denoising.h
+++ b/vp8/encoder/denoising.h
@@ -19,7 +19,7 @@
#endif
#define SUM_DIFF_THRESHOLD (16 * 16 * 2)
-#define SUM_DIFF_THRESHOLD_HIGH (600)
+#define SUM_DIFF_THRESHOLD_HIGH (600) // ~(16 * 16 * 1.5)
#define MOTION_MAGNITUDE_THRESHOLD (8*3)
#define SUM_DIFF_THRESHOLD_UV (96) // (8 * 8 * 1.5)
@@ -27,7 +27,7 @@
#define SUM_DIFF_FROM_AVG_THRESH_UV (8 * 8 * 8)
#define MOTION_MAGNITUDE_THRESHOLD_UV (8*3)
-#define MAX_GF_ARF_DENOISE_RANGE (16)
+#define MAX_GF_ARF_DENOISE_RANGE (8)
enum vp8_denoiser_decision
{
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 2a3f69c..70632c0 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -522,7 +522,8 @@
}
#endif
- // Keep track of how many (consecutive) times a block is coded
+
+ // Keep track of how many (consecutive) times a block is coded
// as ZEROMV_LASTREF, for base layer frames.
// Reset to 0 if its coded as anything else.
if (cpi->current_layer == 0) {
@@ -531,9 +532,14 @@
// Increment, check for wrap-around.
if (cpi->consec_zero_last[map_index+mb_col] < 255)
cpi->consec_zero_last[map_index+mb_col] += 1;
+ if (cpi->consec_zero_last_mvbias[map_index+mb_col] < 255)
+ cpi->consec_zero_last_mvbias[map_index+mb_col] += 1;
} else {
cpi->consec_zero_last[map_index+mb_col] = 0;
+ cpi->consec_zero_last_mvbias[map_index+mb_col] = 0;
}
+ if (x->zero_last_dot_suppress)
+ cpi->consec_zero_last_mvbias[map_index+mb_col] = 0;
}
/* Special case code for cyclic refresh
diff --git a/vp8/encoder/ethreading.c b/vp8/encoder/ethreading.c
index 3598a7a..a6b30a6 100644
--- a/vp8/encoder/ethreading.c
+++ b/vp8/encoder/ethreading.c
@@ -215,11 +215,15 @@
LAST_FRAME) {
// Increment, check for wrap-around.
if (cpi->consec_zero_last[map_index+mb_col] < 255)
- cpi->consec_zero_last[map_index+mb_col] +=
- 1;
+ cpi->consec_zero_last[map_index+mb_col] += 1;
+ if (cpi->consec_zero_last_mvbias[map_index+mb_col] < 255)
+ cpi->consec_zero_last_mvbias[map_index+mb_col] += 1;
} else {
cpi->consec_zero_last[map_index+mb_col] = 0;
+ cpi->consec_zero_last_mvbias[map_index+mb_col] = 0;
}
+ if (x->zero_last_dot_suppress)
+ cpi->consec_zero_last_mvbias[map_index+mb_col] = 0;
}
/* Special case code for cyclic refresh
@@ -505,6 +509,7 @@
mb->intra_error = 0;
vp8_zero(mb->count_mb_ref_frame_usage);
mb->mbs_tested_so_far = 0;
+ mb->mbs_zero_last_dot_suppress = 0;
}
}
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 3cceb5a..b046456 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -579,11 +579,31 @@
cpi->cyclic_refresh_q = Q / 2;
+ if (cpi->oxcf.screen_content_mode) {
+ // Modify quality ramp-up based on Q. Above some Q level, increase the
+ // number of blocks to be refreshed, and reduce it below the thredhold.
+ // Turn-off under certain conditions (i.e., away from key frame, and if
+ // we are at good quality (low Q) and most of the blocks were skipped-encoded
+ // in previous frame.
+ if (Q >= 100) {
+ cpi->cyclic_refresh_mode_max_mbs_perframe =
+ (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
+ } else if (cpi->frames_since_key > 250 &&
+ Q < 20 &&
+ cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
+ cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
+ } else {
+ cpi->cyclic_refresh_mode_max_mbs_perframe =
+ (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
+ }
+ block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
+ }
+
// Set every macroblock to be eligible for update.
// For key frame this will reset seg map to 0.
vpx_memset(cpi->segmentation_map, 0, mbs_in_frame);
- if (cpi->common.frame_type != KEY_FRAME)
+ if (cpi->common.frame_type != KEY_FRAME && block_count > 0)
{
/* Cycle through the macro_block rows */
/* MB loop to set local segmentation map */
@@ -617,15 +637,18 @@
#if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity > 0) {
if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
- Q < (int)cpi->denoiser.denoise_pars.qp_thresh) {
+ Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
+ (cpi->frames_since_key >
+ 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
// Under aggressive denoising, use segmentation to turn off loop
- // filter below some qp thresh. The filter is turned off for all
+ // filter below some qp thresh. The filter is reduced for all
// blocks that have been encoded as ZEROMV LAST x frames in a row,
// where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
// This is to avoid "dot" artifacts that can occur from repeated
// loop filtering on noisy input source.
cpi->cyclic_refresh_q = Q;
- lf_adjustment = -MAX_LOOP_FILTER;
+ // lf_adjustment = -MAX_LOOP_FILTER;
+ lf_adjustment = -40;
for (i = 0; i < mbs_in_frame; ++i) {
seg_map[i] = (cpi->consec_zero_last[i] >
cpi->denoiser.denoise_pars.consec_zerolast) ? 1 : 0;
@@ -786,6 +809,7 @@
}
cpi->mb.mbs_tested_so_far = 0;
+ cpi->mb.mbs_zero_last_dot_suppress = 0;
/* best quality defaults */
sf->RD = 1;
@@ -853,6 +877,25 @@
sf->thresh_mult[THR_SPLIT2] =
sf->thresh_mult[THR_SPLIT3] = speed_map(Speed, thresh_mult_map_split2);
+ // Special case for temporal layers.
+ // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
+ // used as second reference. We don't modify thresholds for ALTREF case
+ // since ALTREF is usually used as long-term reference in temporal layers.
+ if ((cpi->Speed <= 6) &&
+ (cpi->oxcf.number_of_layers > 1) &&
+ (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
+ (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
+ if (cpi->closest_reference_frame == GOLDEN_FRAME) {
+ sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
+ sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
+ sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
+ } else {
+ sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
+ sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
+ sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
+ }
+ }
+
cpi->mode_check_freq[THR_ZERO1] =
cpi->mode_check_freq[THR_NEAREST1] =
cpi->mode_check_freq[THR_NEAR1] =
@@ -1380,6 +1423,12 @@
cpi->ref_framerate = cpi->framerate;
+ cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
+
+ cm->refresh_golden_frame = 0;
+ cm->refresh_last_frame = 1;
+ cm->refresh_entropy_probs = 1;
+
/* change includes all joint functionality */
vp8_change_config(cpi, oxcf);
@@ -1600,12 +1649,6 @@
cpi->baseline_gf_interval =
cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
- cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
-
- cm->refresh_golden_frame = 0;
- cm->refresh_last_frame = 1;
- cm->refresh_entropy_probs = 1;
-
#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
cpi->oxcf.token_partitions = 3;
#endif
@@ -1708,7 +1751,11 @@
if (cpi->oxcf.number_of_layers != prev_number_of_layers)
{
// If the number of temporal layers are changed we must start at the
- // base of the pattern cycle, so reset temporal_pattern_counter.
+ // base of the pattern cycle, so set the layer id to 0 and reset
+ // the temporal pattern counter.
+ if (cpi->temporal_layer_id > 0) {
+ cpi->temporal_layer_id = 0;
+ }
cpi->temporal_pattern_counter = 0;
reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
}
@@ -1855,6 +1902,7 @@
memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
cpi->common.current_video_frame = 0;
cpi->temporal_pattern_counter = 0;
+ cpi->temporal_layer_id = -1;
cpi->kf_overspend_bits = 0;
cpi->kf_bitrate_adjustment = 0;
cpi->frames_till_gf_update_due = 0;
@@ -1907,6 +1955,8 @@
}
#endif
+ cpi->mse_source_denoised = 0;
+
/* Should we use the cyclic refresh method.
* Currently this is tied to error resilliant mode
*/
@@ -1930,7 +1980,9 @@
cpi->cyclic_refresh_map = (signed char *) NULL;
CHECK_MEM_ERROR(cpi->consec_zero_last,
- vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
+ vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
+ CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
+ vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
#ifdef VP8_ENTROPY_STATS
init_context_counters();
@@ -2453,6 +2505,7 @@
vpx_free(cpi->tok);
vpx_free(cpi->cyclic_refresh_map);
vpx_free(cpi->consec_zero_last);
+ vpx_free(cpi->consec_zero_last_mvbias);
vp8_remove_common(&cpi->common);
vpx_free(cpi);
@@ -3296,6 +3349,49 @@
}
+static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest,
+ VP8_COMP *cpi)
+ {
+ int i, j;
+ int Total = 0;
+ int num_blocks = 0;
+ int skip = 2;
+ int min_consec_zero_last = 10;
+ int tot_num_blocks = (source->y_height * source->y_width) >> 8;
+ unsigned char *src = source->y_buffer;
+ unsigned char *dst = dest->y_buffer;
+
+ /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
+ * summing the square differences, and only for blocks that have been
+ * zero_last mode at least |x| frames in a row.
+ */
+ for (i = 0; i < source->y_height; i += 16 * skip)
+ {
+ int block_index_row = (i >> 4) * cpi->common.mb_cols;
+ for (j = 0; j < source->y_width; j += 16 * skip)
+ {
+ int index = block_index_row + (j >> 4);
+ if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
+ unsigned int sse;
+ Total += vp8_mse16x16(src + j,
+ source->y_stride,
+ dst + j, dest->y_stride,
+ &sse);
+ num_blocks++;
+ }
+ }
+ src += 16 * skip * source->y_stride;
+ dst += 16 * skip * dest->y_stride;
+ }
+ // Only return non-zero if we have at least ~1/16 samples for estimate.
+ if (num_blocks > (tot_num_blocks >> 4)) {
+ return (Total / num_blocks);
+ } else {
+ return 0;
+ }
+ }
+
#if CONFIG_TEMPORAL_DENOISING
static void process_denoiser_mode_change(VP8_COMP *cpi) {
const VP8_COMMON *const cm = &cpi->common;
@@ -3350,7 +3446,7 @@
// Only consider this block as valid for noise measurement
// if the sum_diff average of the current and previous frame
// is small (to avoid effects from lighting change).
- if ((sse - var) < 256) {
+ if ((sse - var) < 128) {
unsigned int sse2;
const unsigned int act = vp8_variance16x16(src + j,
ystride,
@@ -3421,6 +3517,13 @@
{
const FRAME_TYPE frame_type = cm->frame_type;
+ int update_any_ref_buffers = 1;
+ if (cpi->common.refresh_last_frame == 0 &&
+ cpi->common.refresh_golden_frame == 0 &&
+ cpi->common.refresh_alt_ref_frame == 0) {
+ update_any_ref_buffers = 0;
+ }
+
if (cm->no_lpf)
{
cm->filter_level = 0;
@@ -3432,11 +3535,36 @@
vp8_clear_system_state();
vpx_usec_timer_start(&timer);
- if (cpi->sf.auto_filter == 0)
+ if (cpi->sf.auto_filter == 0) {
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
+ // Use the denoised buffer for selecting base loop filter level.
+ // Denoised signal for current frame is stored in INTRA_FRAME.
+ // No denoising on key frames.
+ vp8cx_pick_filter_level_fast(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
+ } else {
+ vp8cx_pick_filter_level_fast(cpi->Source, cpi);
+ }
+#else
vp8cx_pick_filter_level_fast(cpi->Source, cpi);
-
- else
+#endif
+ } else {
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
+ // Use the denoised buffer for selecting base loop filter level.
+ // Denoised signal for current frame is stored in INTRA_FRAME.
+ // No denoising on key frames.
+ vp8cx_pick_filter_level(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
+ } else {
+ vp8cx_pick_filter_level(cpi->Source, cpi);
+ }
+#else
vp8cx_pick_filter_level(cpi->Source, cpi);
+#endif
+ }
+
if (cm->filter_level > 0)
{
@@ -3452,7 +3580,9 @@
sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
#endif
- if (cm->filter_level > 0)
+ // No need to apply loop-filter if the encoded frame does not update
+ // any reference buffers.
+ if (cm->filter_level > 0 && update_any_ref_buffers)
{
vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
}
@@ -3582,39 +3712,78 @@
}
#if CONFIG_MULTI_RES_ENCODING
- /* In multi-resolution encoding, frame_type is decided by lowest-resolution
- * encoder. Same frame_type is adopted while encoding at other resolution.
- */
- if (cpi->oxcf.mr_encoder_id)
- {
- LOWER_RES_FRAME_INFO* low_res_frame_info
- = (LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info;
+ if (cpi->oxcf.mr_total_resolutions > 1) {
+ LOWER_RES_FRAME_INFO* low_res_frame_info
+ = (LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info;
+ if (cpi->oxcf.mr_encoder_id) {
+
+ // TODO(marpan): This constraint shouldn't be needed, as we would like
+ // to allow for key frame setting (forced or periodic) defined per
+ // spatial layer. For now, keep this in.
cm->frame_type = low_res_frame_info->frame_type;
+ // Check if lower resolution is available for motion vector reuse.
if(cm->frame_type != KEY_FRAME)
{
- cpi->mr_low_res_mv_avail = 1;
- cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
+ cpi->mr_low_res_mv_avail = 1;
+ cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
- if (cpi->ref_frame_flags & VP8_LAST_FRAME)
- cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[LAST_FRAME]
- == low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
+ if (cpi->ref_frame_flags & VP8_LAST_FRAME)
+ cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[LAST_FRAME]
+ == low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
- if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
- cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[GOLDEN_FRAME]
- == low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
+ if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
+ cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[GOLDEN_FRAME]
+ == low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
- if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
- cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
- == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
+ // Don't use altref to determine whether low res is available.
+ // TODO (marpan): Should we make this type of condition on a
+ // per-reference frame basis?
+ /*
+ if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
+ cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
+ == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
+ */
}
+ }
+
+ // On a key frame: For the lowest resolution, keep track of the key frame
+ // counter value. For the higher resolutions, reset the current video
+ // frame counter to that of the lowest resolution.
+ // This is done to the handle the case where we may stop/start encoding
+ // higher layer(s). The restart-encoding of higher layer is only signaled
+ // by a key frame for now.
+ // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
+ if (cm->frame_type == KEY_FRAME) {
+ if (cpi->oxcf.mr_encoder_id) {
+ // If the initial starting value of the buffer level is zero (this can
+ // happen because we may have not started encoding this higher stream),
+ // then reset it to non-zero value based on |starting_buffer_level|.
+ if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
+ unsigned int i;
+ cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
+ cpi->buffer_level = cpi->oxcf.starting_buffer_level;
+ for (i = 0; i < cpi->oxcf.number_of_layers; i++) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+ lc->bits_off_target = lc->starting_buffer_level;
+ lc->buffer_level = lc->starting_buffer_level;
+ }
+ }
+ cpi->common.current_video_frame =
+ low_res_frame_info->key_frame_counter_value;
+ } else {
+ low_res_frame_info->key_frame_counter_value =
+ cpi->common.current_video_frame;
+ }
+ }
+
}
#endif
// Find the reference frame closest to the current frame.
cpi->closest_reference_frame = LAST_FRAME;
- if (cm->frame_type != KEY_FRAME) {
+ if(cm->frame_type != KEY_FRAME) {
int i;
MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
@@ -3624,12 +3793,12 @@
} else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
closest_ref = ALTREF_FRAME;
}
- for (i = 1; i <= 3; i++) {
+ for(i = 1; i <= 3; i++) {
vpx_ref_frame_type_t ref_frame_type = (vpx_ref_frame_type_t)
((i == 3) ? 4 : i);
if (cpi->ref_frame_flags & ref_frame_type) {
if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
- (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
+ (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
closest_ref = i;
}
}
@@ -3656,6 +3825,8 @@
// Reset the zero_last counter to 0 on key frame.
vpx_memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
+ vpx_memset(cpi->consec_zero_last_mvbias, 0,
+ (cpi->common.mb_rows * cpi->common.mb_cols));
}
#if 0
@@ -4184,8 +4355,10 @@
else
disable_segmentation(cpi);
}
- // Reset the consec_zero_last counter on key frame.
+ // Reset the zero_last counter to 0 on key frame.
vpx_memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
+ vpx_memset(cpi->consec_zero_last_mvbias, 0,
+ (cpi->common.mb_rows * cpi->common.mb_cols));
vp8_set_quantizer(cpi, Q);
}
@@ -4618,6 +4791,22 @@
cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
#if CONFIG_TEMPORAL_DENOISING
+ // Get some measure of the amount of noise, by measuring the (partial) mse
+ // between source and denoised buffer, for y channel. Partial refers to
+ // computing the sse for a sub-sample of the frame (i.e., skip x blocks along row/column),
+ // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
+ // Do this every ~8 frames, to further reduce complexity.
+ // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity < 4,
+ // should be removed in favor of the process_denoiser_mode_change() function below.
+ if (cpi->oxcf.noise_sensitivity > 0 &&
+ cpi->oxcf.noise_sensitivity < 4 &&
+ !cpi->oxcf.screen_content_mode &&
+ cpi->frames_since_key%8 == 0 &&
+ cm->frame_type != KEY_FRAME) {
+ cpi->mse_source_denoised = measure_square_diff_partial(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
+ }
+
// For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
// of source diff (between current and previous frame), and determine if we
// should switch the denoiser mode. Sampling refers to computing the mse for
@@ -4626,6 +4815,7 @@
// constraint on the sum diff between blocks. This process is called every
// ~8 frames, to further reduce complexity.
if (cpi->oxcf.noise_sensitivity == 4 &&
+ !cpi->oxcf.screen_content_mode &&
cpi->frames_since_key % 8 == 0 &&
cm->frame_type != KEY_FRAME) {
process_denoiser_mode_change(cpi);
@@ -4763,6 +4953,13 @@
if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
+ // If the frame dropper is not enabled, don't let the buffer level go below
+ // some threshold, given here by -|maximum_buffer_size|. For now we only do
+ // this for screen content input.
+ if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode &&
+ cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size)
+ cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
+
/* Rolling monitors of whether we are over or underspending used to
* help regulate min and Max Q in two pass.
*/
@@ -5237,7 +5434,26 @@
cpi->ref_framerate = 10000000.0 / avg_duration;
}
-
+#if CONFIG_MULTI_RES_ENCODING
+ if (cpi->oxcf.mr_total_resolutions > 1) {
+ LOWER_RES_FRAME_INFO* low_res_frame_info = (LOWER_RES_FRAME_INFO*)
+ cpi->oxcf.mr_low_res_mode_info;
+ // Frame rate should be the same for all spatial layers in
+ // multi-res-encoding (simulcast), so we constrain the frame for
+ // higher layers to be that of lowest resolution. This is needed
+ // as he application may decide to skip encoding a high layer and
+ // then start again, in which case a big jump in time-stamps will
+ // be received for that high layer, which will yield an incorrect
+ // frame rate (from time-stamp adjustment in above calculation).
+ if (cpi->oxcf.mr_encoder_id) {
+ cpi->ref_framerate = low_res_frame_info->low_res_framerate;
+ }
+ else {
+ // Keep track of frame rate for lowest resolution.
+ low_res_frame_info->low_res_framerate = cpi->ref_framerate;
+ }
+ }
+#endif
if (cpi->oxcf.number_of_layers > 1)
{
unsigned int i;
@@ -5267,8 +5483,12 @@
update_layer_contexts (cpi);
/* Restore layer specific context & set frame rate */
- layer = cpi->oxcf.layer_id[
- cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
+ if (cpi->temporal_layer_id >= 0) {
+ layer = cpi->temporal_layer_id;
+ } else {
+ layer = cpi->oxcf.layer_id[
+ cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
+ }
restore_layer_context (cpi, layer);
vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
}
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index f0424e6..b1a749c 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -513,10 +513,18 @@
signed char *cyclic_refresh_map;
// Count on how many (consecutive) times a macroblock uses ZER0MV_LAST.
unsigned char *consec_zero_last;
+ // Counter that is reset when a block is checked for a mode-bias against
+ // ZEROMV_LASTREF.
+ unsigned char *consec_zero_last_mvbias;
// Frame counter for the temporal pattern. Counter is rest when the temporal
// layers are changed dynamically (run-time change).
unsigned int temporal_pattern_counter;
+ // Temporal layer id.
+ int temporal_layer_id;
+
+ // Measure of average squared difference between source and denoised signal.
+ int mse_source_denoised;
#if CONFIG_MULTITHREAD
/* multithread data */
@@ -687,6 +695,7 @@
#endif
/* The frame number of each reference frames */
unsigned int current_ref_frames[MAX_REF_FRAMES];
+ // Closest reference frame to current frame.
MV_REFERENCE_FRAME closest_reference_frame;
struct rd_costs_struct
diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c
index 9d5556d..9eb6932 100644
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -40,6 +40,134 @@
extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]);
+// Fixed point implementation of a skin color classifier. Skin color
+// is model by a Gaussian distribution in the CbCr color space.
+// See ../../test/skin_color_detector_test.cc where the reference
+// skin color classifier is defined.
+
+// Fixed-point skin color model parameters.
+static const int skin_mean[2] = {7463, 9614}; // q6
+static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157}; // q16
+static const int skin_threshold = 1570636; // q18
+
+// Evaluates the Mahalanobis distance measure for the input CbCr values.
+static int evaluate_skin_color_difference(int cb, int cr)
+{
+ const int cb_q6 = cb << 6;
+ const int cr_q6 = cr << 6;
+ const int cb_diff_q12 = (cb_q6 - skin_mean[0]) * (cb_q6 - skin_mean[0]);
+ const int cbcr_diff_q12 = (cb_q6 - skin_mean[0]) * (cr_q6 - skin_mean[1]);
+ const int cr_diff_q12 = (cr_q6 - skin_mean[1]) * (cr_q6 - skin_mean[1]);
+ const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
+ const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
+ const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
+ const int skin_diff = skin_inv_cov[0] * cb_diff_q2 +
+ skin_inv_cov[1] * cbcr_diff_q2 +
+ skin_inv_cov[2] * cbcr_diff_q2 +
+ skin_inv_cov[3] * cr_diff_q2;
+ return skin_diff;
+}
+
+static int macroblock_corner_grad(unsigned char* signal, int stride,
+ int offsetx, int offsety, int sgnx, int sgny)
+{
+ int y1 = signal[offsetx * stride + offsety];
+ int y2 = signal[offsetx * stride + offsety + sgny];
+ int y3 = signal[(offsetx + sgnx) * stride + offsety];
+ int y4 = signal[(offsetx + sgnx) * stride + offsety + sgny];
+ return MAX(MAX(abs(y1 - y2), abs(y1 - y3)), abs(y1 - y4));
+}
+
+static int check_dot_artifact_candidate(VP8_COMP *cpi,
+ MACROBLOCK *x,
+ unsigned char *target_last,
+ int stride,
+ unsigned char* last_ref,
+ int mb_row,
+ int mb_col,
+ int channel)
+{
+ int threshold1 = 6;
+ int threshold2 = 3;
+ unsigned int max_num = (cpi->common.MBs) / 10;
+ int grad_last = 0;
+ int grad_source = 0;
+ int index = mb_row * cpi->common.mb_cols + mb_col;
+ // Threshold for #consecutive (base layer) frames using zero_last mode.
+ int num_frames = 30;
+ int shift = 15;
+ if (channel > 0) {
+ shift = 7;
+ }
+ if (cpi->oxcf.number_of_layers > 1)
+ {
+ num_frames = 20;
+ }
+ x->zero_last_dot_suppress = 0;
+ // Blocks on base layer frames that have been using ZEROMV_LAST repeatedly
+ // (i.e, at least |x| consecutive frames are candidates for increasing the
+ // rd adjustment for zero_last mode.
+ // Only allow this for at most |max_num| blocks per frame.
+ // Don't allow this for screen content input.
+ if (cpi->current_layer == 0 &&
+ cpi->consec_zero_last_mvbias[index] > num_frames &&
+ x->mbs_zero_last_dot_suppress < max_num &&
+ !cpi->oxcf.screen_content_mode)
+ {
+ // If this block is checked here, label it so we don't check it again until
+ // ~|x| framaes later.
+ x->zero_last_dot_suppress = 1;
+ // Dot artifact is noticeable as strong gradient at corners of macroblock,
+ // for flat areas. As a simple detector for now, we look for a high
+ // corner gradient on last ref, and a smaller gradient on source.
+ // Check 4 corners, return if any satisfy condition.
+ // Top-left:
+ grad_last = macroblock_corner_grad(last_ref, stride, 0, 0, 1, 1);
+ grad_source = macroblock_corner_grad(target_last, stride, 0, 0, 1, 1);
+ if (grad_last >= threshold1 && grad_source <= threshold2)
+ {
+ x->mbs_zero_last_dot_suppress++;
+ return 1;
+ }
+ // Top-right:
+ grad_last = macroblock_corner_grad(last_ref, stride, 0, shift, 1, -1);
+ grad_source = macroblock_corner_grad(target_last, stride, 0, shift, 1, -1);
+ if (grad_last >= threshold1 && grad_source <= threshold2)
+ {
+ x->mbs_zero_last_dot_suppress++;
+ return 1;
+ }
+ // Bottom-left:
+ grad_last = macroblock_corner_grad(last_ref, stride, shift, 0, -1, 1);
+ grad_source = macroblock_corner_grad(target_last, stride, shift, 0, -1, 1);
+ if (grad_last >= threshold1 && grad_source <= threshold2)
+ {
+ x->mbs_zero_last_dot_suppress++;
+ return 1;
+ }
+ // Bottom-right:
+ grad_last = macroblock_corner_grad(last_ref, stride, shift, shift, -1, -1);
+ grad_source = macroblock_corner_grad(target_last, stride, shift, shift, -1, -1);
+ if (grad_last >= threshold1 && grad_source <= threshold2)
+ {
+ x->mbs_zero_last_dot_suppress++;
+ return 1;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+// Checks if the input yCbCr values corresponds to skin color.
+static int is_skin_color(int y, int cb, int cr)
+{
+ if (y < 40 || y > 220)
+ {
+ return 0;
+ }
+ return (evaluate_skin_color_difference(cb, cr) < skin_threshold);
+}
+
int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
@@ -514,10 +642,17 @@
#endif
// Adjust rd for ZEROMV and LAST, if LAST is the closest reference frame.
- if (this_mode == ZEROMV &&
- x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME &&
- (denoise_aggressive || cpi->closest_reference_frame == LAST_FRAME)) {
- this_rd = ((int64_t)this_rd) * rd_adj / 100;
+ // TODO: We should also add condition on distance of closest to current.
+ if(!cpi->oxcf.screen_content_mode &&
+ this_mode == ZEROMV &&
+ x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME &&
+ (denoise_aggressive || (cpi->closest_reference_frame == LAST_FRAME)))
+ {
+ // No adjustment if block is considered to be skin area.
+ if(x->is_skin)
+ rd_adj = 100;
+
+ this_rd = ((int64_t)this_rd) * rd_adj / 100;
}
check_for_encode_breakout(*sse, x);
@@ -597,6 +732,15 @@
#endif
int sf_improved_mv_pred = cpi->sf.improved_mv_pred;
+
+#if CONFIG_MULTI_RES_ENCODING
+ int dissim = INT_MAX;
+ int parent_ref_frame = 0;
+ int_mv parent_ref_mv;
+ MB_PREDICTION_MODE parent_mode = 0;
+ int parent_ref_valid = 0;
+#endif
+
int_mv mvp;
int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
@@ -607,14 +751,55 @@
unsigned char *plane[4][3];
int ref_frame_map[4];
int sign_bias = 0;
+ int dot_artifact_candidate = 0;
+ // For detecting dot artifact.
+ unsigned char* target = x->src.y_buffer;
+ unsigned char* target_u = x->block[16].src + *x->block[16].base_src;
+ unsigned char* target_v = x->block[20].src + *x->block[20].base_src;
+ int stride = x->src.y_stride;
+ int stride_uv = x->block[16].src_stride;
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity) {
+ int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0;
+ target =
+ cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset;
+ stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride;
+ if (uv_denoise) {
+ target_u =
+ cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer + recon_uvoffset;
+ target_v =
+ cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer + recon_uvoffset;
+ stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride;
+ }
+ }
+#endif
+
+ get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
+
+ dot_artifact_candidate =
+ check_dot_artifact_candidate(cpi, x,
+ target, stride,
+ plane[LAST_FRAME][0], mb_row, mb_col, 0);
+ // If not found in Y channel, check UV channel.
+ if (!dot_artifact_candidate) {
+ dot_artifact_candidate =
+ check_dot_artifact_candidate(cpi, x,
+ target_u, stride_uv,
+ plane[LAST_FRAME][1], mb_row, mb_col, 1);
+ if (!dot_artifact_candidate) {
+ dot_artifact_candidate =
+ check_dot_artifact_candidate(cpi, x,
+ target_v, stride_uv,
+ plane[LAST_FRAME][2], mb_row, mb_col, 2);
+ }
+ }
#if CONFIG_MULTI_RES_ENCODING
- int dissim = INT_MAX;
- int parent_ref_frame = 0;
- int parent_ref_valid = cpi->oxcf.mr_encoder_id && cpi->mr_low_res_mv_avail;
- int_mv parent_ref_mv;
- MB_PREDICTION_MODE parent_mode = 0;
-
+ // |parent_ref_valid| will be set here if potentially we can do mv resue for
+ // this higher resol (|cpi->oxcf.mr_encoder_id| > 0) frame.
+ // |parent_ref_valid| may be reset depending on |parent_ref_frame| for
+ // the current macroblock below.
+ parent_ref_valid = cpi->oxcf.mr_encoder_id && cpi->mr_low_res_mv_avail;
if (parent_ref_valid)
{
int parent_ref_flag;
@@ -632,17 +817,44 @@
* In this event, take the conservative approach of disabling the
* lower res info for this MB.
*/
+
parent_ref_flag = 0;
+ // Note availability for mv reuse is only based on last and golden.
if (parent_ref_frame == LAST_FRAME)
parent_ref_flag = (cpi->ref_frame_flags & VP8_LAST_FRAME);
else if (parent_ref_frame == GOLDEN_FRAME)
parent_ref_flag = (cpi->ref_frame_flags & VP8_GOLD_FRAME);
- else if (parent_ref_frame == ALTREF_FRAME)
- parent_ref_flag = (cpi->ref_frame_flags & VP8_ALTR_FRAME);
//assert(!parent_ref_frame || parent_ref_flag);
+
+ // If |parent_ref_frame| did not match either last or golden then
+ // shut off mv reuse.
if (parent_ref_frame && !parent_ref_flag)
parent_ref_valid = 0;
+
+ // Don't do mv reuse since we want to allow for another mode besides
+ // ZEROMV_LAST to remove dot artifact.
+ if (dot_artifact_candidate)
+ parent_ref_valid = 0;
+ }
+#endif
+
+ // Check if current macroblock is in skin area.
+ {
+ const int y = x->src.y_buffer[7 * x->src.y_stride + 7];
+ const int cb = x->src.u_buffer[3 * x->src.uv_stride + 3];
+ const int cr = x->src.v_buffer[3 * x->src.uv_stride + 3];
+ x->is_skin = 0;
+ if (!cpi->oxcf.screen_content_mode)
+ x->is_skin = is_skin_color(y, cb, cr);
+ }
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity) {
+ // Under aggressive denoising mode, should we use skin map to reduce denoiser
+ // and ZEROMV bias? Will need to revisit the accuracy of this detection for
+ // very noisy input. For now keep this as is (i.e., don't turn it off).
+ // if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive)
+ // x->is_skin = 0;
}
#endif
@@ -680,8 +892,6 @@
best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
}
- get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
-
/* Count of the number of MBs tested so far this frame */
x->mbs_tested_so_far++;
@@ -691,9 +901,13 @@
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
/* If the frame has big static background and current MB is in low
- * motion area, its mode decision is biased to ZEROMV mode.
- */
- calculate_zeromv_rd_adjustment(cpi, x, &rd_adjustment);
+ * motion area, its mode decision is biased to ZEROMV mode.
+ * No adjustment if cpu_used is <= -12 (i.e., cpi->Speed >= 12).
+ * At such speed settings, ZEROMV is already heavily favored.
+ */
+ if (cpi->Speed < 12) {
+ calculate_zeromv_rd_adjustment(cpi, x, &rd_adjustment);
+ }
#if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity) {
@@ -702,6 +916,13 @@
}
#endif
+ if (dot_artifact_candidate)
+ {
+ // Bias against ZEROMV_LAST mode.
+ rd_adjustment = 150;
+ }
+
+
/* if we encode a new mv this is important
* find the best new motion vector
*/
@@ -887,14 +1108,17 @@
step_param = cpi->sf.first_step + speed_adjust;
#if CONFIG_MULTI_RES_ENCODING
- /* If lower-res drops this frame, then higher-res encoder does
- motion search without any previous knowledge. Also, since
- last frame motion info is not stored, then we can not
+ /* If lower-res frame is not available for mv reuse (because of
+ frame dropping or different temporal layer pattern), then higher
+ resol encoder does motion search without any previous knowledge.
+ Also, since last frame motion info is not stored, then we can not
use improved_mv_pred. */
- if (cpi->oxcf.mr_encoder_id && !parent_ref_valid)
+ if (cpi->oxcf.mr_encoder_id)
sf_improved_mv_pred = 0;
- if (parent_ref_valid && parent_ref_frame)
+ // Only use parent MV as predictor if this candidate reference frame
+ // (|this_ref_frame|) is equal to |parent_ref_frame|.
+ if (parent_ref_valid && (parent_ref_frame == this_ref_frame))
{
/* Use parent MV as predictor. Adjust search range
* accordingly.
@@ -938,7 +1162,8 @@
}
#if CONFIG_MULTI_RES_ENCODING
- if (parent_ref_valid && parent_ref_frame && dissim <= 2 &&
+ if (parent_ref_valid && (parent_ref_frame == this_ref_frame) &&
+ dissim <= 2 &&
MAX(abs(best_ref_mv.as_mv.row - parent_ref_mv.as_mv.row),
abs(best_ref_mv.as_mv.col - parent_ref_mv.as_mv.col)) <= 4)
{
@@ -975,10 +1200,12 @@
* change the behavior in lowest-resolution encoder.
* Will improve it later.
*/
- /* Set step_param to 0 to ensure large-range motion search
- when encoder drops this frame at lower-resolution.
- */
- if (!parent_ref_valid)
+ /* Set step_param to 0 to ensure large-range motion search
+ * when mv reuse if not valid (i.e. |parent_ref_valid| = 0),
+ * or if this candidate reference frame (|this_ref_frame|) is
+ * not equal to |parent_ref_frame|.
+ */
+ if (!parent_ref_valid || (parent_ref_frame != this_ref_frame))
step_param = 0;
#endif
bestsme = vp8_hex_search(x, b, d, &mvp_full, &d->bmi.mv,
@@ -1080,7 +1307,6 @@
#if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity)
{
-
/* Store for later use by denoiser. */
// Dont' denoise with GOLDEN OR ALTREF is they are old reference
// frames (greater than MAX_GF_ARF_DENOISE_RANGE frames in past).
@@ -1096,7 +1322,7 @@
x->e_mbd.mode_info_context->mbmi.ref_frame;
}
- /* Store the best NEWMV in x for later use in the denoiser. */
+ // Store the best NEWMV in x for later use in the denoiser.
if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV &&
sse < best_sse && !skip_old_reference)
{
@@ -1184,6 +1410,8 @@
if (cpi->oxcf.noise_sensitivity)
{
int block_index = mb_row * cpi->common.mb_cols + mb_col;
+ int reevaluate = 0;
+ int is_noisy = 0;
if (x->best_sse_inter_mode == DC_PRED)
{
/* No best MV found. */
@@ -1193,18 +1421,52 @@
x->best_reference_frame = best_mbmode.ref_frame;
best_sse = best_rd_sse;
}
+ // For non-skin blocks that have selected ZEROMV for this current frame,
+ // and have been selecting ZEROMV_LAST (on the base layer frame) at
+ // least |x~20| consecutive past frames in a row, label the block for
+ // possible increase in denoising strength. We also condition this
+ // labeling on there being significant denoising in the scene
+ if (cpi->oxcf.noise_sensitivity == 4) {
+ if (cpi->denoiser.nmse_source_diff >
+ 70 * cpi->denoiser.threshold_aggressive_mode / 100)
+ is_noisy = 1;
+ } else {
+ if (cpi->mse_source_denoised > 1000)
+ is_noisy = 1;
+ }
x->increase_denoising = 0;
+ if (!x->is_skin &&
+ x->best_sse_inter_mode == ZEROMV &&
+ (x->best_reference_frame == LAST_FRAME ||
+ x->best_reference_frame == cpi->closest_reference_frame) &&
+ cpi->consec_zero_last[block_index] >= 20 &&
+ is_noisy) {
+ x->increase_denoising = 1;
+ }
+ x->denoise_zeromv = 0;
vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
recon_yoffset, recon_uvoffset,
&cpi->common.lf_info, mb_row, mb_col,
block_index);
- /* Reevaluate ZEROMV after denoising. */
- if (best_mbmode.ref_frame == INTRA_FRAME &&
+ // Reevaluate ZEROMV after denoising: for large noise content
+ // (i.e., cpi->mse_source_denoised is above threshold), do this for all
+ // blocks that did not pick ZEROMV as best mode but are using ZEROMV
+ // for denoising. Otherwise, always re-evaluate for blocks that picked
+ // INTRA mode as best mode.
+ // Avoid blocks that have been biased against ZERO_LAST
+ // (i.e., dot artifact candidate blocks).
+ reevaluate = (best_mbmode.ref_frame == INTRA_FRAME) ||
+ (best_mbmode.mode != ZEROMV &&
+ x->denoise_zeromv &&
+ cpi->mse_source_denoised > 2000);
+ if (!dot_artifact_candidate &&
+ reevaluate &&
x->best_zeromv_reference_frame != INTRA_FRAME)
{
int this_rd = 0;
int this_ref_frame = x->best_zeromv_reference_frame;
+ rd_adjustment = 100;
rate2 = x->ref_frame_cost[this_ref_frame] +
vp8_cost_mv_ref(ZEROMV, mdcounts);
distortion2 = 0;
@@ -1264,7 +1526,6 @@
update_mvcount(x, &best_ref_mv);
}
-
void vp8_pick_intra_mode(MACROBLOCK *x, int *rate_)
{
int error4x4, error16x16 = INT_MAX;
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index 2feb316..bd47823 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -535,6 +535,7 @@
MACROBLOCKD *mbd = &cpi->mb.e_mbd;
int update = 0;
int new_delta_q;
+ int new_uv_delta_q;
cm->base_qindex = Q;
/* if any of the delta_q values are changing update flag has to be set */
@@ -542,8 +543,6 @@
cm->y1dc_delta_q = 0;
cm->y2ac_delta_q = 0;
- cm->uvdc_delta_q = 0;
- cm->uvac_delta_q = 0;
if (Q < 4)
{
@@ -555,6 +554,21 @@
update |= cm->y2dc_delta_q != new_delta_q;
cm->y2dc_delta_q = new_delta_q;
+ new_uv_delta_q = 0;
+ // For screen content, lower the q value for UV channel. For now, select
+ // conservative delta; same delta for dc and ac, and decrease it with lower
+ // Q, and set to 0 below some threshold. May want to condition this in
+ // future on the variance/energy in UV channel.
+ if (cpi->oxcf.screen_content_mode && Q > 40) {
+ new_uv_delta_q = -(int)(0.15 * Q);
+ // Check range: magnitude of delta is 4 bits.
+ if (new_uv_delta_q < -15) {
+ new_uv_delta_q = -15;
+ }
+ }
+ update |= cm->uvdc_delta_q != new_uv_delta_q;
+ cm->uvdc_delta_q = new_uv_delta_q;
+ cm->uvac_delta_q = new_uv_delta_q;
/* Set Segment specific quatizers */
mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index c51650c..e30ad9e 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -708,7 +708,13 @@
Adjustment = (cpi->this_frame_target - min_frame_target);
if (cpi->frames_since_golden == (cpi->current_gf_interval >> 1))
- cpi->this_frame_target += ((cpi->current_gf_interval - 1) * Adjustment);
+ {
+ Adjustment = (cpi->current_gf_interval - 1) * Adjustment;
+ // Limit adjustment to 10% of current target.
+ if (Adjustment > (10 * cpi->this_frame_target) / 100)
+ Adjustment = (10 * cpi->this_frame_target) / 100;
+ cpi->this_frame_target += Adjustment;
+ }
else
cpi->this_frame_target -= Adjustment;
}
diff --git a/vp8/vp8_cx_iface.c b/vp8/vp8_cx_iface.c
index b1b079c..3426f59 100644
--- a/vp8/vp8_cx_iface.c
+++ b/vp8/vp8_cx_iface.c
@@ -37,6 +37,7 @@
vp8e_tuning tuning;
unsigned int cq_level; /* constrained quality level */
unsigned int rc_max_intra_bitrate_pct;
+ unsigned int screen_content_mode;
};
@@ -62,6 +63,7 @@
0, /* tuning*/
10, /* cq_level */
0, /* rc_max_intra_bitrate_pct */
+ 0, /* screen_content_mode */
};
struct vpx_codec_alg_priv
@@ -79,6 +81,7 @@
/* pkt_list size depends on the maximum number of lagged frames allowed. */
vpx_codec_pkt_list_decl(64) pkt_list;
unsigned int fixed_kf_cntr;
+ vpx_enc_frame_flags_t control_frame_flags;
};
@@ -194,6 +197,7 @@
RANGE_CHECK_HI(vp8_cfg, arnr_strength, 6);
RANGE_CHECK(vp8_cfg, arnr_type, 1, 3);
RANGE_CHECK(vp8_cfg, cq_level, 0, 63);
+ RANGE_CHECK_BOOL(vp8_cfg, screen_content_mode);
if (finalize && (cfg->rc_end_usage == VPX_CQ || cfg->rc_end_usage == VPX_Q))
RANGE_CHECK(vp8_cfg, cq_level,
cfg->rc_min_quantizer, cfg->rc_max_quantizer);
@@ -231,7 +235,8 @@
RANGE_CHECK_HI(cfg, ts_periodicity, 16);
for (i=1; i<cfg->ts_number_layers; i++)
- if (cfg->ts_target_bitrate[i] <= cfg->ts_target_bitrate[i-1])
+ if (cfg->ts_target_bitrate[i] <= cfg->ts_target_bitrate[i-1] &&
+ cfg->rc_target_bitrate > 0)
ERROR("ts_target_bitrate entries are not strictly increasing");
RANGE_CHECK(cfg, ts_rate_decimator[cfg->ts_number_layers-1], 1, 1);
@@ -397,6 +402,8 @@
oxcf->tuning = vp8_cfg.tuning;
+ oxcf->screen_content_mode = vp8_cfg.screen_content_mode;
+
/*
printf("Current VP8 Settings: \n");
printf("target_bandwidth: %d\n", oxcf->target_bandwidth);
@@ -586,6 +593,15 @@
return update_extracfg(ctx, &extra_cfg);
}
+static vpx_codec_err_t set_screen_content_mode(vpx_codec_alg_priv_t *ctx,
+ va_list args)
+{
+ struct vp8_extracfg extra_cfg = ctx->vp8_cfg;
+ extra_cfg.screen_content_mode =
+ CAST(VP8E_SET_SCREEN_CONTENT_MODE, args);
+ return update_extracfg(ctx, &extra_cfg);
+}
+
static vpx_codec_err_t vp8e_mr_alloc_mem(const vpx_codec_enc_cfg_t *cfg,
void **mem_loc)
{
@@ -768,27 +784,9 @@
}
}
-
-static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
- unsigned long duration,
- vpx_enc_frame_flags_t flags,
- unsigned long deadline)
+static vpx_codec_err_t set_reference_and_update(vpx_codec_alg_priv_t *ctx,
+ int flags)
{
- vpx_codec_err_t res = VPX_CODEC_OK;
-
- if (!ctx->cfg.rc_target_bitrate)
- return res;
-
- if (img)
- res = validate_img(ctx, img);
-
- if (!res)
- res = validate_config(ctx, &ctx->cfg, &ctx->vp8_cfg, 1);
-
- pick_quickcompress_mode(ctx, duration, deadline);
- vpx_codec_pkt_list_init(&ctx->pkt_list);
/* Handle Flags */
if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF))
@@ -838,6 +836,42 @@
vp8_update_entropy(ctx->cpi, 0);
}
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned long duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned long deadline)
+{
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ if (!ctx->cfg.rc_target_bitrate)
+ return res;
+
+ if (!ctx->cfg.rc_target_bitrate)
+ return res;
+
+ if (img)
+ res = validate_img(ctx, img);
+
+ if (!res)
+ res = validate_config(ctx, &ctx->cfg, &ctx->vp8_cfg, 1);
+
+ pick_quickcompress_mode(ctx, duration, deadline);
+ vpx_codec_pkt_list_init(&ctx->pkt_list);
+
+ // If no flags are set in the encode call, then use the frame flags as
+ // defined via the control function: vp8e_set_frame_flags.
+ if (!flags) {
+ flags = ctx->control_frame_flags;
+ }
+ ctx->control_frame_flags = 0;
+
+ res = set_reference_and_update(ctx, flags);
+
/* Handle fixed keyframe intervals */
if (ctx->cfg.kf_mode == VPX_KF_AUTO
&& ctx->cfg.kf_min_dist == ctx->cfg.kf_max_dist)
@@ -1140,6 +1174,25 @@
return VPX_CODEC_OK;
}
+static vpx_codec_err_t vp8e_set_frame_flags(vpx_codec_alg_priv_t *ctx,
+ va_list args)
+{
+ int frame_flags = va_arg(args, int);
+ ctx->control_frame_flags = frame_flags;
+ return set_reference_and_update(ctx, frame_flags);
+}
+
+static vpx_codec_err_t vp8e_set_temporal_layer_id(vpx_codec_alg_priv_t *ctx,
+ va_list args)
+{
+ int layer_id = va_arg(args, int);
+ if (layer_id < 0 || layer_id >= (int)ctx->cfg.ts_number_layers) {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+ ctx->cpi->temporal_layer_id = layer_id;
+ return VPX_CODEC_OK;
+}
+
static vpx_codec_err_t vp8e_set_roi_map(vpx_codec_alg_priv_t *ctx,
va_list args)
{
@@ -1214,6 +1267,8 @@
{VP8E_UPD_ENTROPY, vp8e_update_entropy},
{VP8E_UPD_REFERENCE, vp8e_update_reference},
{VP8E_USE_REFERENCE, vp8e_use_reference},
+ {VP8E_SET_FRAME_FLAGS, vp8e_set_frame_flags},
+ {VP8E_SET_TEMPORAL_LAYER_ID, vp8e_set_temporal_layer_id},
{VP8E_SET_ROI_MAP, vp8e_set_roi_map},
{VP8E_SET_ACTIVEMAP, vp8e_set_activemap},
{VP8E_SET_SCALEMODE, vp8e_set_scalemode},
@@ -1231,6 +1286,7 @@
{VP8E_SET_TUNING, set_tuning},
{VP8E_SET_CQ_LEVEL, set_cq_level},
{VP8E_SET_MAX_INTRA_BITRATE_PCT, set_rc_max_intra_bitrate_pct},
+ {VP8E_SET_SCREEN_CONTENT_MODE, set_screen_content_mode},
{ -1, NULL},
};
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index 893a2bb..7d7209c 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -192,6 +192,10 @@
int mi_stride;
MODE_INFO *mi;
+ MODE_INFO *left_mi;
+ MODE_INFO *above_mi;
+ MB_MODE_INFO *left_mbmi;
+ MB_MODE_INFO *above_mbmi;
int up_available;
int left_available;
diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h
index ae69c0c..ed56bed 100644
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -309,17 +309,21 @@
// Are edges available for intra prediction?
xd->up_available = (mi_row != 0);
xd->left_available = (mi_col > tile->mi_col_start);
-}
+ if (xd->up_available) {
+ xd->above_mi = xd->mi[-xd->mi_stride].src_mi;
+ xd->above_mbmi = &xd->above_mi->mbmi;;
+ } else {
+ xd->above_mi = NULL;
+ xd->above_mbmi = NULL;
+ }
-static INLINE void set_prev_mi(VP9_COMMON *cm) {
- const int use_prev_in_find_mv_refs = cm->width == cm->last_width &&
- cm->height == cm->last_height &&
- !cm->intra_only &&
- cm->last_show_frame;
- // Special case: set prev_mi to NULL when the previous mode info
- // context cannot be used.
- cm->prev_mi = use_prev_in_find_mv_refs ?
- cm->prev_mip + cm->mi_stride + 1 : NULL;
+ if (xd->left_available) {
+ xd->left_mi = xd->mi[-1].src_mi;
+ xd->left_mbmi = &xd->left_mi->mbmi;;
+ } else {
+ xd->left_mi = NULL;
+ xd->left_mbmi = NULL;
+ }
}
static INLINE void update_partition_context(MACROBLOCKD *xd,
diff --git a/vp9/common/vp9_pred_common.c b/vp9/common/vp9_pred_common.c
index 901a043..fd735f4 100644
--- a/vp9/common/vp9_pred_common.c
+++ b/vp9/common/vp9_pred_common.c
@@ -15,21 +15,17 @@
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_seg_common.h"
-static INLINE const MB_MODE_INFO *get_mbmi(const MODE_INFO *const mi) {
- return (mi != NULL) ? &mi->mbmi : NULL;
-}
-
// Returns a context number for the given MB prediction signal
int vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
- const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
- const int left_type = left_mbmi != NULL && is_inter_block(left_mbmi) ?
- left_mbmi->interp_filter : SWITCHABLE_FILTERS;
- const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
- const int above_type = above_mbmi != NULL && is_inter_block(above_mbmi) ?
+ const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const int left_type = xd->left_available && is_inter_block(left_mbmi) ?
+ left_mbmi->interp_filter : SWITCHABLE_FILTERS;
+ const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
+ const int above_type = xd->up_available && is_inter_block(above_mbmi) ?
above_mbmi->interp_filter : SWITCHABLE_FILTERS;
if (left_type == above_type)
@@ -50,10 +46,10 @@
// 2 - intra/--, --/intra
// 3 - intra/intra
int vp9_get_intra_inter_context(const MACROBLOCKD *xd) {
- const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
- const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
- const int has_above = above_mbmi != NULL;
- const int has_left = left_mbmi != NULL;
+ const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
+ const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const int has_above = xd->up_available;
+ const int has_left = xd->left_available;
if (has_above && has_left) { // both edges available
const int above_intra = !is_inter_block(above_mbmi);
@@ -70,10 +66,10 @@
int vp9_get_reference_mode_context(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int ctx;
- const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
- const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
- const int has_above = above_mbmi != NULL;
- const int has_left = left_mbmi != NULL;
+ const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
+ const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const int has_above = xd->up_available;
+ const int has_left = xd->left_available;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
@@ -113,10 +109,10 @@
int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int pred_context;
- const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
- const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
- const int above_in_image = above_mbmi != NULL;
- const int left_in_image = left_mbmi != NULL;
+ const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
+ const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const int above_in_image = xd->up_available;
+ const int left_in_image = xd->left_available;
// Note:
// The mode info data structure has a one element border above and to the
@@ -194,10 +190,10 @@
int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
int pred_context;
- const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
- const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
- const int has_above = above_mbmi != NULL;
- const int has_left = left_mbmi != NULL;
+ const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
+ const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const int has_above = xd->up_available;
+ const int has_left = xd->left_available;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
@@ -260,10 +256,10 @@
int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
int pred_context;
- const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
- const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
- const int has_above = above_mbmi != NULL;
- const int has_left = left_mbmi != NULL;
+ const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
+ const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const int has_above = xd->up_available;
+ const int has_left = xd->left_available;
// Note:
// The mode info data structure has a one element border above and to the
@@ -349,10 +345,10 @@
// The prediction flags in these dummy entries are initialized to 0.
int vp9_get_tx_size_context(const MACROBLOCKD *xd) {
const int max_tx_size = max_txsize_lookup[xd->mi[0].src_mi->mbmi.sb_type];
- const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
- const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
- const int has_above = above_mbmi != NULL;
- const int has_left = left_mbmi != NULL;
+ const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
+ const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const int has_above = xd->up_available;
+ const int has_left = xd->left_available;
int above_ctx = (has_above && !above_mbmi->skip) ? (int)above_mbmi->tx_size
: max_tx_size;
int left_ctx = (has_left && !left_mbmi->skip) ? (int)left_mbmi->tx_size
diff --git a/vp9/common/vp9_pred_common.h b/vp9/common/vp9_pred_common.h
index cf13e4a..bc19d28 100644
--- a/vp9/common/vp9_pred_common.h
+++ b/vp9/common/vp9_pred_common.h
@@ -18,20 +18,12 @@
extern "C" {
#endif
-static INLINE const MODE_INFO *get_above_mi(const MACROBLOCKD *const xd) {
- return xd->up_available ? xd->mi[-xd->mi_stride].src_mi : NULL;
-}
-
-static INLINE const MODE_INFO *get_left_mi(const MACROBLOCKD *const xd) {
- return xd->left_available ? xd->mi[-1].src_mi : NULL;
-}
-
int vp9_get_segment_id(const VP9_COMMON *cm, const uint8_t *segment_ids,
BLOCK_SIZE bsize, int mi_row, int mi_col);
static INLINE int vp9_get_pred_context_seg_id(const MACROBLOCKD *xd) {
- const MODE_INFO *const above_mi = get_above_mi(xd);
- const MODE_INFO *const left_mi = get_left_mi(xd);
+ const MODE_INFO *const above_mi = xd->above_mi;
+ const MODE_INFO *const left_mi = xd->left_mi;
const int above_sip = (above_mi != NULL) ?
above_mi->mbmi.seg_id_predicted : 0;
const int left_sip = (left_mi != NULL) ? left_mi->mbmi.seg_id_predicted : 0;
@@ -45,8 +37,8 @@
}
static INLINE int vp9_get_skip_context(const MACROBLOCKD *xd) {
- const MODE_INFO *const above_mi = get_above_mi(xd);
- const MODE_INFO *const left_mi = get_left_mi(xd);
+ const MODE_INFO *const above_mi = xd->above_mi;
+ const MODE_INFO *const left_mi = xd->left_mi;
const int above_skip = (above_mi != NULL) ? above_mi->mbmi.skip : 0;
const int left_skip = (left_mi != NULL) ? left_mi->mbmi.skip : 0;
return above_skip + left_skip;
diff --git a/vp9/common/vp9_rtcd_defs.pl b/vp9/common/vp9_rtcd_defs.pl
index 281dcbd..1872191 100644
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -1158,9 +1158,6 @@
specialize qw/vp9_denoiser_filter sse2/;
}
- add_proto qw/void vp9_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp9_fdct8x8_quant sse2 ssse3/;
-
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
# the transform coefficients are held in 32-bit
# values, so the assembler code for vp9_block_error can no longer be used.
@@ -1178,6 +1175,9 @@
add_proto qw/void vp9_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_quantize_b_32x32/;
+
+ add_proto qw/void vp9_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/vp9_fdct8x8_quant/;
} else {
add_proto qw/int64_t vp9_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
specialize qw/vp9_block_error avx2/, "$sse2_x86inc";
@@ -1193,6 +1193,9 @@
add_proto qw/void vp9_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_quantize_b_32x32/, "$ssse3_x86_64";
+
+ add_proto qw/void vp9_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/vp9_fdct8x8_quant sse2 ssse3/;
}
#
diff --git a/vp9/common/x86/vp9_subpixel_8t_ssse3.asm b/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
index fd781d4..4a5bf1b 100644
--- a/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
+++ b/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
@@ -18,7 +18,7 @@
mov rcx, 0x0400040
movdqa xmm4, [rdx] ;load filters
- movd xmm5, rcx
+ movq xmm5, rcx
packsswb xmm4, xmm4
pshuflw xmm0, xmm4, 0b ;k0_k1
pshuflw xmm1, xmm4, 01010101b ;k2_k3
@@ -661,7 +661,7 @@
mov rcx, 0x0400040
movdqa xmm4, [rdx] ;load filters
- movd xmm5, rcx
+ movq xmm5, rcx
packsswb xmm4, xmm4
pshuflw xmm0, xmm4, 0b ;k0_k1
pshuflw xmm1, xmm4, 01010101b ;k2_k3
@@ -765,40 +765,50 @@
movq xmm0, [rsi - 3] ;load src data
movq xmm4, [rsi + 5]
- movq xmm7, [rsi + 13]
+ movq xmm6, [rsi + 13]
punpcklqdq xmm0, xmm4
- punpcklqdq xmm4, xmm7
+ punpcklqdq xmm4, xmm6
+ movdqa xmm7, xmm0
+
+ punpcklbw xmm7, xmm7
+ punpckhbw xmm0, xmm0
movdqa xmm1, xmm0
movdqa xmm2, xmm0
movdqa xmm3, xmm0
+
+ palignr xmm0, xmm7, 1
+ palignr xmm1, xmm7, 5
+ pmaddubsw xmm0, k0k1
+ palignr xmm2, xmm7, 9
+ pmaddubsw xmm1, k2k3
+ palignr xmm3, xmm7, 13
+
+ pmaddubsw xmm2, k4k5
+ pmaddubsw xmm3, k6k7
+ paddsw xmm0, xmm3
+
+ movdqa xmm3, xmm4
+ punpcklbw xmm3, xmm3
+ punpckhbw xmm4, xmm4
+
movdqa xmm5, xmm4
movdqa xmm6, xmm4
movdqa xmm7, xmm4
- pshufb xmm0, [GLOBAL(shuf_t0t1)]
- pshufb xmm1, [GLOBAL(shuf_t2t3)]
- pshufb xmm2, [GLOBAL(shuf_t4t5)]
- pshufb xmm3, [GLOBAL(shuf_t6t7)]
- pshufb xmm4, [GLOBAL(shuf_t0t1)]
- pshufb xmm5, [GLOBAL(shuf_t2t3)]
- pshufb xmm6, [GLOBAL(shuf_t4t5)]
- pshufb xmm7, [GLOBAL(shuf_t6t7)]
+ palignr xmm4, xmm3, 1
+ palignr xmm5, xmm3, 5
+ palignr xmm6, xmm3, 9
+ palignr xmm7, xmm3, 13
- pmaddubsw xmm0, k0k1
- pmaddubsw xmm1, k2k3
- pmaddubsw xmm2, k4k5
- pmaddubsw xmm3, k6k7
- pmaddubsw xmm4, k0k1
- pmaddubsw xmm5, k2k3
- pmaddubsw xmm6, k4k5
- pmaddubsw xmm7, k6k7
-
- paddsw xmm0, xmm3
movdqa xmm3, xmm1
+ pmaddubsw xmm4, k0k1
pmaxsw xmm1, xmm2
+ pmaddubsw xmm5, k2k3
pminsw xmm2, xmm3
+ pmaddubsw xmm6, k4k5
paddsw xmm0, xmm2
+ pmaddubsw xmm7, k6k7
paddsw xmm0, xmm1
paddsw xmm4, xmm7
diff --git a/vp9/decoder/vp9_decoder.c b/vp9/decoder/vp9_decoder.c
index 16f3cd4..39f03aa 100644
--- a/vp9/decoder/vp9_decoder.c
+++ b/vp9/decoder/vp9_decoder.c
@@ -273,8 +273,19 @@
cm->cur_frame = &cm->frame_bufs[cm->new_fb_idx];
if (setjmp(cm->error.jmp)) {
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
+ int i;
+
pbi->need_resync = 1;
cm->error.setjmp = 0;
+
+ // Synchronize all threads immediately as a subsequent decode call may
+ // cause a resize invalidating some allocations.
+ winterface->sync(&pbi->lf_worker);
+ for (i = 0; i < pbi->num_tile_workers; ++i) {
+ winterface->sync(&pbi->tile_workers[i]);
+ }
+
vp9_clear_system_state();
// We do not know if the missing frame(s) was supposed to update
diff --git a/vp9/decoder/vp9_reader.h b/vp9/decoder/vp9_reader.h
index 2d9eccf..a68a1d5 100644
--- a/vp9/decoder/vp9_reader.h
+++ b/vp9/decoder/vp9_reader.h
@@ -30,14 +30,15 @@
#define BD_VALUE_SIZE ((int)sizeof(BD_VALUE) * CHAR_BIT)
typedef struct {
+ // Be careful when reordering this struct, it may impact the cache negatively.
+ BD_VALUE value;
+ unsigned int range;
+ int count;
const uint8_t *buffer_end;
const uint8_t *buffer;
- uint8_t clear_buffer[sizeof(BD_VALUE) + 1];
- BD_VALUE value;
- int count;
- unsigned int range;
vpx_decrypt_cb decrypt_cb;
void *decrypt_state;
+ uint8_t clear_buffer[sizeof(BD_VALUE) + 1];
} vp9_reader;
int vp9_reader_init(vp9_reader *r,
diff --git a/vp9/encoder/vp9_aq_complexity.c b/vp9/encoder/vp9_aq_complexity.c
index 83f4a53..9ec4799 100644
--- a/vp9/encoder/vp9_aq_complexity.c
+++ b/vp9/encoder/vp9_aq_complexity.c
@@ -23,7 +23,7 @@
{{1.0, 1.0, 1.0}, {1.0, 2.0, 1.0}, {1.0, 1.5, 2.5}};
static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
{{1.0, 1.0, 1.0}, {1.0, 0.25, 0.0}, {1.0, 0.5, 0.25}};
-static const double aq_c_var_thresholds[AQ_C_SEGMENTS] = {100.0, 12.0, 10.0};
+static const double aq_c_var_thresholds[AQ_C_SEGMENTS] = {100.0, -1.0, -2.0};
static int get_aq_c_strength(int q_index, vpx_bit_depth_t bit_depth) {
// Approximate base quatizer (truncated to int)
@@ -90,6 +90,8 @@
}
}
+#define DEFAULT_LV_THRESH 10.0
+
// Select a segment for the current SB64 block.
// The choice of segment for a block depends on the ratio of the projected
// bits for the block vs a target average.
@@ -122,6 +124,11 @@
const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
const int active_segments = aq_c_active_segments[aq_strength];
double logvar;
+ double low_var_thresh;
+
+ vp9_clear_system_state();
+ low_var_thresh =
+ (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_LV_THRESH;
vp9_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
logvar = vp9_log_block_var(cpi, mb, bs);
@@ -136,7 +143,7 @@
while (segment > 0) {
if ((projected_rate <
target_rate * aq_c_transitions[aq_strength][segment]) &&
- (logvar < aq_c_var_thresholds[segment])) {
+ (logvar < (low_var_thresh + aq_c_var_thresholds[segment]))) {
break;
}
--segment;
diff --git a/vp9/encoder/vp9_aq_cyclicrefresh.c b/vp9/encoder/vp9_aq_cyclicrefresh.c
index bc68b37..5f1c8ce 100644
--- a/vp9/encoder/vp9_aq_cyclicrefresh.c
+++ b/vp9/encoder/vp9_aq_cyclicrefresh.c
@@ -319,6 +319,20 @@
cr->sb_index = i;
}
+// Set/update global/frame level cyclic refresh parameters.
+void vp9_cyclic_refresh_update_parameters(VP9_COMP *const cpi) {
+ const RATE_CONTROL *const rc = &cpi->rc;
+ CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+ cr->percent_refresh = 10;
+ // Use larger delta-qp (increase rate_ratio_qdelta) for first few (~4)
+ // periods of the refresh cycle, after a key frame. This corresponds to ~40
+ // frames with cr->percent_refresh = 10.
+ if (rc->frames_since_key < 40)
+ cr->rate_ratio_qdelta = 3.0;
+ else
+ cr->rate_ratio_qdelta = 2.0;
+}
+
// Setup cyclic background refresh: set delta q and segmentation map.
void vp9_cyclic_refresh_setup(VP9_COMP *const cpi) {
VP9_COMMON *const cm = &cpi->common;
@@ -343,9 +357,6 @@
int qindex2;
const double q = vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
vp9_clear_system_state();
- // Some of these parameters may be set via codec-control function later.
- cr->percent_refresh = 10;
- cr->rate_ratio_qdelta = 2.0;
cr->max_qdelta_perc = 50;
cr->min_block_size = BLOCK_8X8;
cr->time_for_refresh = 0;
diff --git a/vp9/encoder/vp9_aq_cyclicrefresh.h b/vp9/encoder/vp9_aq_cyclicrefresh.h
index 3fc6776..656d760 100644
--- a/vp9/encoder/vp9_aq_cyclicrefresh.h
+++ b/vp9/encoder/vp9_aq_cyclicrefresh.h
@@ -53,6 +53,9 @@
// Update the actual number of blocks that were applied the segment delta q.
void vp9_cyclic_refresh_update_actual_count(struct VP9_COMP *const cpi);
+// Set/update global/frame level refresh parameters.
+void vp9_cyclic_refresh_update_parameters(struct VP9_COMP *const cpi);
+
// Setup cyclic background refresh: set delta q and segmentation map.
void vp9_cyclic_refresh_setup(struct VP9_COMP *const cpi);
diff --git a/vp9/encoder/vp9_aq_variance.c b/vp9/encoder/vp9_aq_variance.c
index 144936d..be6f7e4 100644
--- a/vp9/encoder/vp9_aq_variance.c
+++ b/vp9/encoder/vp9_aq_variance.c
@@ -19,15 +19,15 @@
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/common/vp9_systemdependent.h"
-#define ENERGY_MIN (-1)
+#define ENERGY_MIN (-4)
#define ENERGY_MAX (1)
#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1)
#define ENERGY_IN_BOUNDS(energy)\
assert((energy) >= ENERGY_MIN && (energy) <= ENERGY_MAX)
static const double rate_ratio[MAX_SEGMENTS] =
- {1.143, 1.0, 0.875, 1.0, 1.0, 1.0, 1.0, 1.0};
-static const int segment_id[ENERGY_SPAN] = {0, 1, 2};
+ {2.5, 2.0, 1.5, 1.0, 0.75, 1.0, 1.0, 1.0};
+static const int segment_id[ENERGY_SPAN] = {0, 1, 1, 2, 3, 4};
#define SEGMENT_ID(i) segment_id[(i) - ENERGY_MIN]
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index 4d88fb5..20368f0 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -346,9 +346,8 @@
MODE_INFO *mi_8x8, vp9_writer *w) {
const struct segmentation *const seg = &cm->seg;
const MODE_INFO *const mi = mi_8x8;
- const MODE_INFO *const above_mi = mi_8x8[-xd->mi_stride].src_mi;
- const MODE_INFO *const left_mi =
- xd->left_available ? mi_8x8[-1].src_mi : NULL;
+ const MODE_INFO *const above_mi = xd->above_mi;
+ const MODE_INFO *const left_mi = xd->left_mi;
const MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 7788e50..5e6e77d 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -36,6 +36,7 @@
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/encoder/vp9_ethread.h"
#include "vp9/encoder/vp9_extend.h"
#include "vp9/encoder/vp9_pickmode.h"
#include "vp9/encoder/vp9_rd.h"
@@ -498,7 +499,6 @@
const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;
vp9_clear_system_state();
- vp9_zero(vt);
set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
if (xd->mb_to_right_edge < 0)
@@ -1164,7 +1164,7 @@
}
break;
default:
- assert("Invalid partition type.");
+ assert(0 && "Invalid partition type.");
break;
}
@@ -1491,7 +1491,7 @@
output_enabled, subsize, pc_tree->split[3]);
break;
default:
- assert("Invalid partition type.");
+ assert(0 && "Invalid partition type.");
break;
}
@@ -2662,6 +2662,15 @@
return cpi->common.tx_mode;
}
+static void hybrid_intra_mode_search(VP9_COMP *cpi, MACROBLOCK *const x,
+ RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx) {
+ if (bsize < BLOCK_16X16)
+ vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
+ else
+ vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
+}
+
static void nonrd_pick_sb_modes(VP9_COMP *cpi,
TileDataEnc *tile_data, MACROBLOCK *const x,
int mi_row, int mi_col, RD_COST *rd_cost,
@@ -2679,7 +2688,7 @@
x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
if (cm->frame_type == KEY_FRAME)
- vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
+ hybrid_intra_mode_search(cpi, x, rd_cost, bsize, ctx);
else if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
set_mode_info_seg_skip(x, cm->tx_mode, rd_cost, bsize);
else
@@ -3147,7 +3156,7 @@
}
break;
default:
- assert("Invalid partition type.");
+ assert(0 && "Invalid partition type.");
break;
}
}
@@ -3282,7 +3291,7 @@
}
break;
default:
- assert("Invalid partition type.");
+ assert(0 && "Invalid partition type.");
break;
}
@@ -3460,9 +3469,9 @@
if (cpi->source_diff_var)
vpx_free(cpi->source_diff_var);
- CHECK_MEM_ERROR(cm, cpi->source_diff_var,
- vpx_calloc(cm->MBs, sizeof(diff)));
- }
+ CHECK_MEM_ERROR(cm, cpi->source_diff_var,
+ vpx_calloc(cm->MBs, sizeof(diff)));
+ }
if (!cpi->frames_till_next_var_check)
cpi->frames_till_next_var_check = set_var_thresh_from_histogram(cpi);
@@ -3488,7 +3497,7 @@
cm->show_frame;
}
-static void init_tile_data(VP9_COMP *cpi) {
+void vp9_init_tile_data(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
@@ -3526,36 +3535,40 @@
}
}
+void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td,
+ int tile_row, int tile_col) {
+ VP9_COMMON *const cm = &cpi->common;
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ TileDataEnc *this_tile =
+ &cpi->tile_data[tile_row * tile_cols + tile_col];
+ const TileInfo * const tile_info = &this_tile->tile_info;
+ TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
+ int mi_row;
+
+ for (mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
+ mi_row += MI_BLOCK_SIZE) {
+ if (cpi->sf.use_nonrd_pick_mode)
+ encode_nonrd_sb_row(cpi, td, this_tile, mi_row, &tok);
+ else
+ encode_rd_sb_row(cpi, td, this_tile, mi_row, &tok);
+ }
+ cpi->tok_count[tile_row][tile_col] =
+ (unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]);
+ assert(tok - cpi->tile_tok[tile_row][tile_col] <=
+ allocated_tokens(*tile_info));
+}
+
static void encode_tiles(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
int tile_col, tile_row;
- init_tile_data(cpi);
+ vp9_init_tile_data(cpi);
- for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
- for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
- const TileInfo * const tile_info =
- &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
- TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
- int mi_row;
- TileDataEnc *this_tile =
- &cpi->tile_data[tile_row * tile_cols + tile_col];
-
- for (mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
- mi_row += MI_BLOCK_SIZE) {
- if (cpi->sf.use_nonrd_pick_mode)
- encode_nonrd_sb_row(cpi, &cpi->td, this_tile, mi_row, &tok);
- else
- encode_rd_sb_row(cpi, &cpi->td, this_tile, mi_row, &tok);
- }
- cpi->tok_count[tile_row][tile_col] =
- (unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]);
- assert(tok - cpi->tile_tok[tile_row][tile_col] <=
- allocated_tokens(*tile_info));
- }
- }
+ for (tile_row = 0; tile_row < tile_rows; ++tile_row)
+ for (tile_col = 0; tile_col < tile_cols; ++tile_col)
+ vp9_encode_tile(cpi, &cpi->td, tile_row, tile_col);
}
#if CONFIG_FP_MB_STATS
@@ -3604,6 +3617,7 @@
cm->tx_mode = ALLOW_16X16;
}
+
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth)
x->fwd_txm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vp9_highbd_fdct4x4;
@@ -3627,12 +3641,15 @@
vp9_initialize_rd_consts(cpi);
vp9_initialize_me_consts(cpi, cm->base_qindex);
init_encode_frame_mb_context(cpi);
- set_prev_mi(cm);
cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
cm->width == cm->last_width &&
cm->height == cm->last_height &&
!cm->intra_only &&
cm->last_show_frame;
+ // Special case: set prev_mi to NULL when the previous mode info
+ // context cannot be used.
+ cm->prev_mi = cm->use_prev_frame_mvs ?
+ cm->prev_mip + cm->mi_stride + 1 : NULL;
x->quant_fp = cpi->sf.use_quant_fp;
vp9_zero(x->skip_txfm);
@@ -3667,7 +3684,11 @@
}
#endif
- encode_tiles(cpi);
+ // If allowed, encoding tiles in parallel with one thread handling one tile.
+ if (MIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1)
+ vp9_encode_tiles_mt(cpi);
+ else
+ encode_tiles(cpi);
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
diff --git a/vp9/encoder/vp9_encodeframe.h b/vp9/encoder/vp9_encodeframe.h
index fd1c9aa..556f3a5 100644
--- a/vp9/encoder/vp9_encodeframe.h
+++ b/vp9/encoder/vp9_encodeframe.h
@@ -19,6 +19,7 @@
struct macroblock;
struct yv12_buffer_config;
struct VP9_COMP;
+struct ThreadData;
// Constants used in SOURCE_VAR_BASED_PARTITION
#define VAR_HIST_MAX_BG_VAR 1000
@@ -33,6 +34,10 @@
void vp9_encode_frame(struct VP9_COMP *cpi);
+void vp9_init_tile_data(struct VP9_COMP *cpi);
+void vp9_encode_tile(struct VP9_COMP *cpi, struct ThreadData *td,
+ int tile_row, int tile_col);
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index ef5bb5a..9b2165b 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -652,10 +652,6 @@
return;
}
-#if CONFIG_VP9_HIGHBITDEPTH
- if (!x->skip_recode)
- vp9_xform_quant(x, plane, block, plane_bsize, tx_size);
-#else
if (!x->skip_recode) {
if (x->quant_fp) {
// Encoding process for rtc mode
@@ -687,7 +683,6 @@
}
}
}
-#endif
if (x->optimize && (!x->skip_recode || !x->skip_optimize)) {
const int ctx = combine_entropy_contexts(*a, *l);
diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c
index e82d5d8..a03131c 100644
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -35,6 +35,7 @@
#include "vp9/encoder/vp9_context_tree.h"
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/encoder/vp9_ethread.h"
#include "vp9/encoder/vp9_firstpass.h"
#include "vp9/encoder/vp9_mbgraph.h"
#include "vp9/encoder/vp9_encoder.h"
@@ -1728,6 +1729,7 @@
void vp9_remove_compressor(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
unsigned int i;
+ int t;
if (!cpi)
return;
@@ -1800,6 +1802,24 @@
}
#endif
+ for (t = 0; t < cpi->num_workers; ++t) {
+ VP9Worker *const worker = &cpi->workers[t];
+ EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+
+ // Deallocate allocated threads.
+ vp9_get_worker_interface()->end(worker);
+
+ // Deallocate allocated thread data.
+ if (t < cpi->num_workers - 1) {
+ vpx_free(thread_data->td->counts);
+ vp9_free_pc_tree(thread_data->td);
+ vpx_free(thread_data->td);
+ }
+
+ vpx_free(worker->data1);
+ }
+ vpx_free(cpi->workers);
+
dealloc_compressor_data(cpi);
for (i = 0; i < sizeof(cpi->mbgraph_stats) /
@@ -2476,6 +2496,7 @@
if (cm->frame_bufs[new_fb].mvs == NULL ||
cm->frame_bufs[new_fb].mi_rows < cm->mi_rows ||
cm->frame_bufs[new_fb].mi_cols < cm->mi_cols) {
+ vpx_free(cm->frame_bufs[new_fb].mvs);
cm->frame_bufs[new_fb].mvs =
(MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
sizeof(*cm->frame_bufs[new_fb].mvs));
@@ -3158,6 +3179,7 @@
cpi->rc.source_alt_ref_active = 0;
cm->error_resilient_mode = oxcf->error_resilient_mode;
+ cm->frame_parallel_decoding_mode = oxcf->frame_parallel_decoding_mode;
// By default, encoder assumes decoder can use prev_mi.
if (cm->error_resilient_mode) {
@@ -3165,7 +3187,6 @@
cm->reset_frame_context = 0;
cm->refresh_frame_context = 0;
} else if (cm->intra_only) {
- cm->frame_parallel_decoding_mode = oxcf->frame_parallel_decoding_mode;
// Only reset the current context.
cm->reset_frame_context = 2;
}
@@ -3217,11 +3238,8 @@
vp9_clear_system_state();
#if CONFIG_INTERNAL_STATS
- {
- int i;
- for (i = 0; i < MAX_MODES; ++i)
- cpi->mode_chosen_counts[i] = 0;
- }
+ vpx_memset(cpi->mode_chosen_counts, 0,
+ MAX_MODES * sizeof(*cpi->mode_chosen_counts));
#endif
if (cpi->sf.recode_loop == DISALLOW_RECODE) {
@@ -3828,7 +3846,8 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- frame_ssim2 = vp9_highbd_calc_ssim(orig, recon, &weight, xd->bd);
+ frame_ssim2 = vp9_highbd_calc_ssim(orig, recon, &weight,
+ (int)cm->bit_depth);
} else {
frame_ssim2 = vp9_calc_ssim(orig, recon, &weight);
}
@@ -3842,7 +3861,7 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
frame_ssim2 = vp9_highbd_calc_ssim(
- orig, &cm->post_proc_buffer, &weight, xd->bd);
+ orig, &cm->post_proc_buffer, &weight, (int)cm->bit_depth);
} else {
frame_ssim2 = vp9_calc_ssim(orig, &cm->post_proc_buffer, &weight);
}
@@ -3870,7 +3889,7 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
frame_all = vp9_highbd_calc_ssimg(cpi->Source, cm->frame_to_show, &y,
- &u, &v, xd->bd);
+ &u, &v, (int)cm->bit_depth);
} else {
frame_all = vp9_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u,
&v);
diff --git a/vp9/encoder/vp9_encoder.h b/vp9/encoder/vp9_encoder.h
index b75f491..7342f74 100644
--- a/vp9/encoder/vp9_encoder.h
+++ b/vp9/encoder/vp9_encoder.h
@@ -20,6 +20,7 @@
#include "vp9/common/vp9_ppflags.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_thread.h"
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
#include "vp9/encoder/vp9_context_tree.h"
@@ -216,6 +217,8 @@
int tile_columns;
int tile_rows;
+ int max_threads;
+
vpx_fixed_buf_t two_pass_stats_in;
struct vpx_codec_pkt_list *output_pkt_list;
@@ -442,6 +445,10 @@
#if CONFIG_VP9_TEMPORAL_DENOISING
VP9_DENOISER denoiser;
#endif
+
+ // Multi-threading
+ int num_workers;
+ VP9Worker *workers;
} VP9_COMP;
void vp9_initialize_enc();
diff --git a/vp9/encoder/vp9_ethread.c b/vp9/encoder/vp9_ethread.c
new file mode 100644
index 0000000..daf3da4
--- /dev/null
+++ b/vp9/encoder/vp9_ethread.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/encoder/vp9_encodeframe.h"
+#include "vp9/encoder/vp9_encoder.h"
+#include "vp9/encoder/vp9_ethread.h"
+
+static void accumulate_frame_counts(VP9_COMMON *cm, ThreadData *td) {
+ int i, j, k, l, m;
+
+ for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
+ for (j = 0; j < INTRA_MODES; j++)
+ cm->counts.y_mode[i][j] += td->counts->y_mode[i][j];
+
+ for (i = 0; i < INTRA_MODES; i++)
+ for (j = 0; j < INTRA_MODES; j++)
+ cm->counts.uv_mode[i][j] += td->counts->uv_mode[i][j];
+
+ for (i = 0; i < PARTITION_CONTEXTS; i++)
+ for (j = 0; j < PARTITION_TYPES; j++)
+ cm->counts.partition[i][j] += td->counts->partition[i][j];
+
+ for (i = 0; i < TX_SIZES; i++)
+ for (j = 0; j < PLANE_TYPES; j++)
+ for (k = 0; k < REF_TYPES; k++)
+ for (l = 0; l < COEF_BANDS; l++)
+ for (m = 0; m < COEFF_CONTEXTS; m++)
+ cm->counts.eob_branch[i][j][k][l][m] +=
+ td->counts->eob_branch[i][j][k][l][m];
+ // cm->counts.coef is only updated at frame level, so not need
+ // to accumulate it here.
+ // for (n = 0; n < UNCONSTRAINED_NODES + 1; n++)
+ // cm->counts.coef[i][j][k][l][m][n] +=
+ // td->counts->coef[i][j][k][l][m][n];
+
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
+ for (j = 0; j < SWITCHABLE_FILTERS; j++)
+ cm->counts.switchable_interp[i][j] += td->counts->switchable_interp[i][j];
+
+ for (i = 0; i < INTER_MODE_CONTEXTS; i++)
+ for (j = 0; j < INTER_MODES; j++)
+ cm->counts.inter_mode[i][j] += td->counts->inter_mode[i][j];
+
+ for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
+ for (j = 0; j < 2; j++)
+ cm->counts.intra_inter[i][j] += td->counts->intra_inter[i][j];
+
+ for (i = 0; i < COMP_INTER_CONTEXTS; i++)
+ for (j = 0; j < 2; j++)
+ cm->counts.comp_inter[i][j] += td->counts->comp_inter[i][j];
+
+ for (i = 0; i < REF_CONTEXTS; i++)
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 2; k++)
+ cm->counts.single_ref[i][j][k] += td->counts->single_ref[i][j][k];
+
+ for (i = 0; i < REF_CONTEXTS; i++)
+ for (j = 0; j < 2; j++)
+ cm->counts.comp_ref[i][j] += td->counts->comp_ref[i][j];
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ for (j = 0; j < TX_SIZES; j++)
+ cm->counts.tx.p32x32[i][j] += td->counts->tx.p32x32[i][j];
+
+ for (j = 0; j < TX_SIZES - 1; j++)
+ cm->counts.tx.p16x16[i][j] += td->counts->tx.p16x16[i][j];
+
+ for (j = 0; j < TX_SIZES - 2; j++)
+ cm->counts.tx.p8x8[i][j] += td->counts->tx.p8x8[i][j];
+ }
+
+ for (i = 0; i < SKIP_CONTEXTS; i++)
+ for (j = 0; j < 2; j++)
+ cm->counts.skip[i][j] += td->counts->skip[i][j];
+
+ for (i = 0; i < MV_JOINTS; i++)
+ cm->counts.mv.joints[i] += td->counts->mv.joints[i];
+
+ for (k = 0; k < 2; k++) {
+ nmv_component_counts *comps = &cm->counts.mv.comps[k];
+ nmv_component_counts *comps_t = &td->counts->mv.comps[k];
+
+ for (i = 0; i < 2; i++) {
+ comps->sign[i] += comps_t->sign[i];
+ comps->class0_hp[i] += comps_t->class0_hp[i];
+ comps->hp[i] += comps_t->hp[i];
+ }
+
+ for (i = 0; i < MV_CLASSES; i++)
+ comps->classes[i] += comps_t->classes[i];
+
+ for (i = 0; i < CLASS0_SIZE; i++) {
+ comps->class0[i] += comps_t->class0[i];
+ for (j = 0; j < MV_FP_SIZE; j++)
+ comps->class0_fp[i][j] += comps_t->class0_fp[i][j];
+ }
+
+ for (i = 0; i < MV_OFFSET_BITS; i++)
+ for (j = 0; j < 2; j++)
+ comps->bits[i][j] += comps_t->bits[i][j];
+
+ for (i = 0; i < MV_FP_SIZE; i++)
+ comps->fp[i] += comps_t->fp[i];
+ }
+}
+
+static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) {
+ int i, j, k, l, m, n;
+
+ for (i = 0; i < REFERENCE_MODES; i++)
+ td->rd_counts.comp_pred_diff[i] += td_t->rd_counts.comp_pred_diff[i];
+
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
+ td->rd_counts.filter_diff[i] += td_t->rd_counts.filter_diff[i];
+
+ for (i = 0; i < TX_MODES; i++)
+ td->rd_counts.tx_select_diff[i] += td_t->rd_counts.tx_select_diff[i];
+
+ for (i = 0; i < TX_SIZES; i++)
+ for (j = 0; j < PLANE_TYPES; j++)
+ for (k = 0; k < REF_TYPES; k++)
+ for (l = 0; l < COEF_BANDS; l++)
+ for (m = 0; m < COEFF_CONTEXTS; m++)
+ for (n = 0; n < ENTROPY_TOKENS; n++)
+ td->rd_counts.coef_counts[i][j][k][l][m][n] +=
+ td_t->rd_counts.coef_counts[i][j][k][l][m][n];
+}
+
+static int enc_worker_hook(EncWorkerData *const thread_data, void *unused) {
+ VP9_COMP *const cpi = thread_data->cpi;
+ const VP9_COMMON *const cm = &cpi->common;
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const int tile_rows = 1 << cm->log2_tile_rows;
+ int t;
+
+ (void) unused;
+
+ for (t = thread_data->start; t < tile_rows * tile_cols;
+ t += cpi->num_workers) {
+ int tile_row = t / tile_cols;
+ int tile_col = t % tile_cols;
+
+ vp9_encode_tile(cpi, thread_data->td, tile_row, tile_col);
+ }
+
+ return 0;
+}
+
+void vp9_encode_tiles_mt(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
+ const int num_workers = MIN(cpi->oxcf.max_threads, tile_cols);
+ int i;
+
+ vp9_init_tile_data(cpi);
+
+ // Only run once to create threads and allocate thread data.
+ if (cpi->num_workers == 0) {
+ CHECK_MEM_ERROR(cm, cpi->workers,
+ vpx_malloc(num_workers * sizeof(*cpi->workers)));
+
+ for (i = 0; i < num_workers; i++) {
+ VP9Worker *const worker = &cpi->workers[i];
+ EncWorkerData *thread_data;
+
+ ++cpi->num_workers;
+
+ winterface->init(worker);
+ CHECK_MEM_ERROR(cm, worker->data1,
+ (EncWorkerData*)vpx_calloc(1, sizeof(EncWorkerData)));
+ thread_data = (EncWorkerData*)worker->data1;
+
+ if (i < num_workers - 1) {
+ thread_data->cpi = cpi;
+
+ // Allocate thread data.
+ CHECK_MEM_ERROR(cm, thread_data->td,
+ vpx_calloc(1, sizeof(*thread_data->td)));
+ // Set up pc_tree.
+ thread_data->td->leaf_tree = NULL;
+ thread_data->td->pc_tree = NULL;
+ vp9_setup_pc_tree(cm, thread_data->td);
+
+ // Allocate frame counters in thread data.
+ CHECK_MEM_ERROR(cm, thread_data->td->counts,
+ vpx_calloc(1, sizeof(*thread_data->td->counts)));
+
+ // Create threads
+ if (!winterface->reset(worker))
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ "Tile encoder thread creation failed");
+ } else {
+ // Main thread acts as a worker and uses the thread data in cpi.
+ thread_data->cpi = cpi;
+ thread_data->td = &cpi->td;
+ }
+
+ // data2 is unused.
+ worker->data2 = NULL;
+
+ winterface->sync(worker);
+ worker->hook = (VP9WorkerHook)enc_worker_hook;
+ }
+ }
+
+ for (i = 0; i < num_workers; i++) {
+ VP9Worker *const worker = &cpi->workers[i];
+ EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+
+ // Before encoding a frame, copy the thread data from cpi.
+ thread_data->td->mb = cpi->td.mb;
+ thread_data->td->rd_counts = cpi->td.rd_counts;
+ vpx_memcpy(thread_data->td->counts, &cpi->common.counts,
+ sizeof(cpi->common.counts));
+
+ // Handle use_nonrd_pick_mode case.
+ if (cpi->sf.use_nonrd_pick_mode) {
+ MACROBLOCK *const x = &thread_data->td->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct macroblock_plane *const p = x->plane;
+ struct macroblockd_plane *const pd = xd->plane;
+ PICK_MODE_CONTEXT *ctx = &thread_data->td->pc_root->none;
+ int j;
+
+ for (j = 0; j < MAX_MB_PLANE; ++j) {
+ p[j].coeff = ctx->coeff_pbuf[j][0];
+ p[j].qcoeff = ctx->qcoeff_pbuf[j][0];
+ pd[j].dqcoeff = ctx->dqcoeff_pbuf[j][0];
+ p[j].eobs = ctx->eobs_pbuf[j][0];
+ }
+ }
+ }
+
+ // Encode a frame
+ for (i = 0; i < num_workers; i++) {
+ VP9Worker *const worker = &cpi->workers[i];
+ EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+
+ // Set the starting tile for each thread.
+ thread_data->start = i;
+
+ if (i == num_workers - 1)
+ winterface->execute(worker);
+ else
+ winterface->launch(worker);
+ }
+
+ // Encoding ends.
+ for (i = 0; i < num_workers; i++) {
+ VP9Worker *const worker = &cpi->workers[i];
+ winterface->sync(worker);
+ }
+
+ for (i = 0; i < num_workers; i++) {
+ VP9Worker *const worker = &cpi->workers[i];
+ EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+
+ // Accumulate counters.
+ if (i < num_workers - 1) {
+ accumulate_frame_counts(&cpi->common, thread_data->td);
+ accumulate_rd_opt(&cpi->td, thread_data->td);
+ }
+ }
+}
diff --git a/vp9/encoder/vp9_ethread.h b/vp9/encoder/vp9_ethread.h
new file mode 100644
index 0000000..e87c50b
--- /dev/null
+++ b/vp9/encoder/vp9_ethread.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_ETHREAD_H_
+#define VP9_ENCODER_VP9_ETHREAD_H_
+
+struct VP9_COMP;
+struct ThreadData;
+
+typedef struct EncWorkerData {
+ struct VP9_COMP *cpi;
+ struct ThreadData *td;
+ int start;
+} EncWorkerData;
+
+void vp9_encode_tiles_mt(struct VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_ETHREAD_H_
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index 7f4d0c7..a428f1a 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -2142,7 +2142,7 @@
1, cost_list, fn_ptr, ref_mv, tmp_mv);
break;
default:
- assert(!"Invalid search method.");
+ assert(0 && "Invalid search method.");
}
if (method != NSTEP && rd && var < var_max)
diff --git a/vp9/encoder/vp9_picklpf.c b/vp9/encoder/vp9_picklpf.c
index 5559f8a..81334e4 100644
--- a/vp9/encoder/vp9_picklpf.c
+++ b/vp9/encoder/vp9_picklpf.c
@@ -153,7 +153,7 @@
const int q = vp9_ac_quant(cm->base_qindex, 0, cm->bit_depth);
// These values were determined by linear fitting the result of the
// searched level, filt_guess = q * 0.316206 + 3.87252
-#if CONFIG_VP9_HIGHDEPTH
+#if CONFIG_VP9_HIGHBITDEPTH
int filt_guess;
switch (cm->bit_depth) {
case VPX_BITS_8:
diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c
index 30709e0..1da5a83 100644
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -599,9 +599,7 @@
// initialize mode decisions
vp9_rd_cost_reset(&best_rdc);
- vp9_rd_cost_reset(&this_rdc);
vp9_rd_cost_reset(rd_cost);
- vpx_memset(mbmi, 0, sizeof(MB_MODE_INFO));
mbmi->sb_type = bsize;
mbmi->ref_frame[0] = NONE;
mbmi->ref_frame[1] = NONE;
@@ -688,7 +686,7 @@
if (ref_frame > LAST_FRAME)
continue;
if (cpi->sf.partition_search_type != VAR_BASED_PARTITION &&
- this_rdc.rdcost < (int64_t)(1 << num_pels_log2_lookup[bsize]))
+ best_rdc.rdcost < (int64_t)(1 << num_pels_log2_lookup[bsize]))
continue;
if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
&frame_mv[NEWMV][ref_frame],
@@ -852,9 +850,20 @@
if (reuse_inter_pred && best_pred != NULL) {
if (best_pred->data == orig_dst.buf) {
this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (cm->use_highbitdepth)
+ vp9_highbd_convolve_copy(best_pred->data, best_pred->stride,
+ this_mode_pred->data, this_mode_pred->stride,
+ NULL, 0, NULL, 0, bw, bh, xd->bd);
+ else
+ vp9_convolve_copy(best_pred->data, best_pred->stride,
+ this_mode_pred->data, this_mode_pred->stride,
+ NULL, 0, NULL, 0, bw, bh);
+#else
vp9_convolve_copy(best_pred->data, best_pred->stride,
this_mode_pred->data, this_mode_pred->stride,
NULL, 0, NULL, 0, bw, bh);
+#endif // CONFIG_VP9_HIGHBITDEPTH
best_pred = this_mode_pred;
}
}
@@ -910,7 +919,7 @@
vp9_convolve_copy(best_pred->data, best_pred->stride,
pd->dst.buf, pd->dst.stride, NULL, 0,
NULL, 0, bw, bh);
-#endif
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index e96c904..3cc9d9a 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -426,8 +426,8 @@
}
// Work out a size correction factor.
if (projected_size_based_on_q > FRAME_OVERHEAD_BITS)
- correction_factor = (100 * cpi->rc.projected_frame_size) /
- projected_size_based_on_q;
+ correction_factor = (int)((100 * (int64_t)cpi->rc.projected_frame_size) /
+ projected_size_based_on_q);
// More heavily damped adjustment used if we have been oscillating either side
// of target.
@@ -1483,6 +1483,12 @@
target = calc_pframe_target_size_one_pass_cbr(cpi);
}
}
+
+ // Any update/change of global cyclic refresh parameters (amount/delta-qp)
+ // should be done here, before the frame qp is selected.
+ if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
+ vp9_cyclic_refresh_update_parameters(cpi);
+
vp9_rc_set_frame_target(cpi, target);
rc->frames_till_gf_update_due = INT_MAX;
rc->baseline_gf_interval = INT_MAX;
@@ -1516,6 +1522,11 @@
rc->gfu_boost = DEFAULT_GF_BOOST;
}
+ // Any update/change of global cyclic refresh parameters (amount/delta-qp)
+ // should be done here, before the frame qp is selected.
+ if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
+ vp9_cyclic_refresh_update_parameters(cpi);
+
if (cm->frame_type == KEY_FRAME)
target = calc_iframe_target_size_one_pass_cbr(cpi);
else
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index f86e21c..600a3eb 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -982,8 +982,8 @@
int i, j;
const MACROBLOCKD *const xd = &mb->e_mbd;
MODE_INFO *const mic = xd->mi[0].src_mi;
- const MODE_INFO *above_mi = xd->mi[-xd->mi_stride].src_mi;
- const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1].src_mi : NULL;
+ const MODE_INFO *above_mi = xd->above_mi;
+ const MODE_INFO *left_mi = xd->left_mi;
const BLOCK_SIZE bsize = xd->mi[0].src_mi->mbmi.sb_type;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
@@ -1058,8 +1058,8 @@
TX_SIZE best_tx = TX_4X4;
int i;
int *bmode_costs;
- const MODE_INFO *above_mi = xd->mi[-xd->mi_stride].src_mi;
- const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1].src_mi : NULL;
+ const MODE_INFO *above_mi = xd->above_mi;
+ const MODE_INFO *left_mi = xd->left_mi;
const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
bmode_costs = cpi->y_mode_costs[A][L];
@@ -1072,6 +1072,16 @@
/* Y Search for intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
int64_t local_tx_cache[TX_MODES];
+
+ if (cpi->sf.use_nonrd_pick_mode) {
+ // These speed features are turned on in hybrid non-RD and RD mode
+ // for key frame coding in the context of real-time setting.
+ if (conditional_skipintra(mode, mode_selected))
+ continue;
+ if (*skippable)
+ break;
+ }
+
mic->mbmi.mode = mode;
super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion,
diff --git a/vp9/encoder/vp9_speed_features.c b/vp9/encoder/vp9_speed_features.c
index 4a0c797..0775b91 100644
--- a/vp9/encoder/vp9_speed_features.c
+++ b/vp9/encoder/vp9_speed_features.c
@@ -106,7 +106,8 @@
: USE_LARGESTALL;
sf->reference_masking = 1;
- sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH |
+ sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 :
+ FLAG_SKIP_INTRA_DIRMISMATCH |
FLAG_SKIP_INTRA_BESTINTER |
FLAG_SKIP_COMP_BESTINTRA |
FLAG_SKIP_INTRA_LOWVAR;
@@ -140,7 +141,8 @@
sf->mv.search_method = BIGDIA;
sf->mv.subpel_search_method = SUBPEL_TREE_PRUNED_MORE;
sf->adaptive_rd_thresh = 4;
- sf->mode_search_skip_flags |= FLAG_EARLY_TERMINATE;
+ if (cm->frame_type != KEY_FRAME)
+ sf->mode_search_skip_flags |= FLAG_EARLY_TERMINATE;
sf->disable_filter_search_var_thresh = 200;
sf->use_lp32x32fdct = 1;
sf->use_fast_coef_updates = ONE_LOOP_REDUCED;
@@ -226,7 +228,8 @@
}
if (speed >= 2) {
- sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH |
+ sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 :
+ FLAG_SKIP_INTRA_DIRMISMATCH |
FLAG_SKIP_INTRA_BESTINTER |
FLAG_SKIP_COMP_BESTINTRA |
FLAG_SKIP_INTRA_LOWVAR;
@@ -305,6 +308,7 @@
sf->partition_search_breakout_rate_thr = 200;
sf->coeff_prob_appx_step = 4;
sf->use_fast_coef_updates = is_keyframe ? TWO_LOOP : ONE_LOOP_REDUCED;
+ sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH;
if (!is_keyframe) {
int i;
@@ -321,7 +325,6 @@
if (speed >= 6) {
// Adaptively switch between SOURCE_VAR_BASED_PARTITION and FIXED_PARTITION.
sf->partition_search_type = VAR_BASED_PARTITION;
-
// Turn on this to use non-RD key frame coding mode.
sf->use_nonrd_pick_mode = 1;
sf->mv.search_method = NSTEP;
diff --git a/vp9/encoder/x86/vp9_dct32x32_sse2.c b/vp9/encoder/x86/vp9_dct32x32_sse2.c
index 7ec126e..099993a 100644
--- a/vp9/encoder/x86/vp9_dct32x32_sse2.c
+++ b/vp9/encoder/x86/vp9_dct32x32_sse2.c
@@ -269,8 +269,9 @@
step1[30] = SUB_EPI16(in01, in30);
step1[31] = SUB_EPI16(in00, in31);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step1[0], step1[1], step1[2],
- step1[3], step1[28], step1[29], step1[30], step1[31]);
+ overflow = check_epi16_overflow_x8(&step1[0], &step1[1], &step1[2],
+ &step1[3], &step1[28], &step1[29],
+ &step1[30], &step1[31]);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -295,9 +296,9 @@
step1[26] = SUB_EPI16(in05, in26);
step1[27] = SUB_EPI16(in04, in27);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step1[4], step1[5], step1[6],
- step1[7], step1[24], step1[25],
- step1[26], step1[27]);
+ overflow = check_epi16_overflow_x8(&step1[4], &step1[5], &step1[6],
+ &step1[7], &step1[24], &step1[25],
+ &step1[26], &step1[27]);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -322,9 +323,9 @@
step1[22] = SUB_EPI16(in09, in22);
step1[23] = SUB_EPI16(in08, in23);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step1[8], step1[9], step1[10],
- step1[11], step1[20], step1[21],
- step1[22], step1[23]);
+ overflow = check_epi16_overflow_x8(&step1[8], &step1[9], &step1[10],
+ &step1[11], &step1[20], &step1[21],
+ &step1[22], &step1[23]);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -349,9 +350,9 @@
step1[18] = SUB_EPI16(in13, in18);
step1[19] = SUB_EPI16(in12, in19);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step1[12], step1[13], step1[14],
- step1[15], step1[16], step1[17],
- step1[18], step1[19]);
+ overflow = check_epi16_overflow_x8(&step1[12], &step1[13], &step1[14],
+ &step1[15], &step1[16], &step1[17],
+ &step1[18], &step1[19]);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -379,10 +380,10 @@
step2[15] = SUB_EPI16(step1[0], step1[15]);
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x16(
- step2[0], step2[1], step2[2], step2[3],
- step2[4], step2[5], step2[6], step2[7],
- step2[8], step2[9], step2[10], step2[11],
- step2[12], step2[13], step2[14], step2[15]);
+ &step2[0], &step2[1], &step2[2], &step2[3],
+ &step2[4], &step2[5], &step2[6], &step2[7],
+ &step2[8], &step2[9], &step2[10], &step2[11],
+ &step2[12], &step2[13], &step2[14], &step2[15]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -460,9 +461,9 @@
step2[26] = _mm_packs_epi32(s2_26_6, s2_26_7);
step2[27] = _mm_packs_epi32(s2_27_6, s2_27_7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step2[20], step2[21], step2[22],
- step2[23], step2[24], step2[25],
- step2[26], step2[27]);
+ overflow = check_epi16_overflow_x8(&step2[20], &step2[21], &step2[22],
+ &step2[23], &step2[24], &step2[25],
+ &step2[26], &step2[27]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -544,14 +545,14 @@
step1[31] = SUB_EPI16(step1[31], s3_31_0);
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x32(
- step2[0], step2[1], step2[2], step2[3],
- step2[4], step2[5], step2[6], step2[7],
- step2[8], step2[9], step2[10], step2[11],
- step2[12], step2[13], step2[14], step2[15],
- step1[16], step1[17], step1[18], step1[19],
- step2[20], step2[21], step2[22], step2[23],
- step2[24], step2[25], step2[26], step2[27],
- step1[28], step1[29], step1[30], step1[31]);
+ &step2[0], &step2[1], &step2[2], &step2[3],
+ &step2[4], &step2[5], &step2[6], &step2[7],
+ &step2[8], &step2[9], &step2[10], &step2[11],
+ &step2[12], &step2[13], &step2[14], &step2[15],
+ &step1[16], &step1[17], &step1[18], &step1[19],
+ &step2[20], &step2[21], &step2[22], &step2[23],
+ &step2[24], &step2[25], &step2[26], &step2[27],
+ &step1[28], &step1[29], &step1[30], &step1[31]);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -639,9 +640,9 @@
step3[6] = SUB_EPI16(step2[(8 - 7)], step2[6]);
step3[7] = SUB_EPI16(step2[(8 - 8)], step2[7]);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step3[0], step3[1], step3[2],
- step3[3], step3[4], step3[5],
- step3[6], step3[7]);
+ overflow = check_epi16_overflow_x8(&step3[0], &step3[1], &step3[2],
+ &step3[3], &step3[4], &step3[5],
+ &step3[6], &step3[7]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -687,8 +688,8 @@
step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7);
step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(step3[10], step3[11],
- step3[12], step3[13]);
+ overflow = check_epi16_overflow_x4(&step3[10], &step3[11],
+ &step3[12], &step3[13]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -717,10 +718,10 @@
step3[31] = ADD_EPI16(step2[24], step1[31]);
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x16(
- step3[16], step3[17], step3[18], step3[19],
- step3[20], step3[21], step3[22], step3[23],
- step3[24], step3[25], step3[26], step3[27],
- step3[28], step3[29], step3[30], step3[31]);
+ &step3[16], &step3[17], &step3[18], &step3[19],
+ &step3[20], &step3[21], &step3[22], &step3[23],
+ &step3[24], &step3[25], &step3[26], &step3[27],
+ &step3[28], &step3[29], &step3[30], &step3[31]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -747,10 +748,10 @@
step1[15] = ADD_EPI16(step3[12], step2[15]);
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x16(
- step1[0], step1[1], step1[2], step1[3],
- step1[4], step1[5], step1[6], step1[7],
- step1[8], step1[9], step1[10], step1[11],
- step1[12], step1[13], step1[14], step1[15]);
+ &step1[0], &step1[1], &step1[2], &step1[3],
+ &step1[4], &step1[5], &step1[6], &step1[7],
+ &step1[8], &step1[9], &step1[10], &step1[11],
+ &step1[12], &step1[13], &step1[14], &step1[15]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -780,7 +781,7 @@
step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7);
step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x2(step1[5], step1[6]);
+ overflow = check_epi16_overflow_x2(&step1[5], &step1[6]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -858,9 +859,9 @@
step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7);
step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step1[18], step1[19], step1[20],
- step1[21], step1[26], step1[27],
- step1[28], step1[29]);
+ overflow = check_epi16_overflow_x8(&step1[18], &step1[19], &step1[20],
+ &step1[21], &step1[26], &step1[27],
+ &step1[28], &step1[29]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -877,8 +878,8 @@
step2[6] = SUB_EPI16(step3[7], step1[6]);
step2[7] = ADD_EPI16(step1[6], step3[7]);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(step2[4], step2[5],
- step2[6], step2[7]);
+ overflow = check_epi16_overflow_x4(&step2[4], &step2[5],
+ &step2[6], &step2[7]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -924,7 +925,8 @@
out[ 8] = _mm_packs_epi32(out_08_6, out_08_7);
out[24] = _mm_packs_epi32(out_24_6, out_24_7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(out[0], out[16], out[8], out[24]);
+ overflow = check_epi16_overflow_x4(&out[0], &out[16],
+ &out[8], &out[24]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -970,8 +972,8 @@
step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7);
step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(step2[9], step2[10],
- step2[13], step2[14]);
+ overflow = check_epi16_overflow_x4(&step2[9], &step2[10],
+ &step2[13], &step2[14]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -1000,10 +1002,10 @@
step2[31] = ADD_EPI16(step1[28], step3[31]);
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x16(
- step2[16], step2[17], step2[18], step2[19],
- step2[20], step2[21], step2[22], step2[23],
- step2[24], step2[25], step2[26], step2[27],
- step2[28], step2[29], step2[30], step2[31]);
+ &step2[16], &step2[17], &step2[18], &step2[19],
+ &step2[20], &step2[21], &step2[22], &step2[23],
+ &step2[24], &step2[25], &step2[26], &step2[27],
+ &step2[28], &step2[29], &step2[30], &step2[31]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -1054,7 +1056,8 @@
out[12] = _mm_packs_epi32(out_12_6, out_12_7);
out[28] = _mm_packs_epi32(out_28_6, out_28_7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(out[4], out[20], out[12], out[28]);
+ overflow = check_epi16_overflow_x4(&out[4], &out[20],
+ &out[12], &out[28]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -1074,9 +1077,9 @@
step3[14] = SUB_EPI16(step1[15], step2[14]);
step3[15] = ADD_EPI16(step2[14], step1[15]);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step3[8], step3[9], step3[10],
- step3[11], step3[12], step3[13],
- step3[14], step3[15]);
+ overflow = check_epi16_overflow_x8(&step3[8], &step3[9], &step3[10],
+ &step3[11], &step3[12], &step3[13],
+ &step3[14], &step3[15]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -1155,9 +1158,9 @@
step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7);
step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step3[17], step3[18], step3[21],
- step3[22], step3[25], step3[26],
- step3[29], step3[30]);
+ overflow = check_epi16_overflow_x8(&step3[17], &step3[18], &step3[21],
+ &step3[22], &step3[25], &step3[26],
+ &step3[29], &step3[30]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -1236,8 +1239,9 @@
out[14] = _mm_packs_epi32(out_14_6, out_14_7);
out[30] = _mm_packs_epi32(out_30_6, out_30_7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(out[2], out[18], out[10], out[26],
- out[6], out[22], out[14], out[30]);
+ overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10],
+ &out[26], &out[6], &out[22],
+ &out[14], &out[30]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -1266,10 +1270,10 @@
step1[31] = ADD_EPI16(step3[30], step2[31]);
#if DCT_HIGH_BIT_DEPTH
overflow = check_epi16_overflow_x16(
- step1[16], step1[17], step1[18], step1[19],
- step1[20], step1[21], step1[22], step1[23],
- step1[24], step1[25], step1[26], step1[27],
- step1[28], step1[29], step1[30], step1[31]);
+ &step1[16], &step1[17], &step1[18], &step1[19],
+ &step1[20], &step1[21], &step1[22], &step1[23],
+ &step1[24], &step1[25], &step1[26], &step1[27],
+ &step1[28], &step1[29], &step1[30], &step1[31]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -1348,8 +1352,9 @@
out[15] = _mm_packs_epi32(out_15_6, out_15_7);
out[31] = _mm_packs_epi32(out_31_6, out_31_7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(out[1], out[17], out[9], out[25],
- out[7], out[23], out[15], out[31]);
+ overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9],
+ &out[25], &out[7], &out[23],
+ &out[15], &out[31]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -1427,8 +1432,9 @@
out[11] = _mm_packs_epi32(out_11_6, out_11_7);
out[27] = _mm_packs_epi32(out_27_6, out_27_7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(out[5], out[21], out[13], out[29],
- out[3], out[19], out[11], out[27]);
+ overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13],
+ &out[29], &out[3], &out[19],
+ &out[11], &out[27]);
if (overflow) {
if (pass == 0)
HIGH_FDCT32x32_2D_C(input, output_org, stride);
@@ -1697,8 +1703,8 @@
v[6] = k_madd_epi32(u[2], k32_p16_p16);
v[7] = k_madd_epi32(u[3], k32_p16_p16);
#if DCT_HIGH_BIT_DEPTH
- overflow = k_check_epi32_overflow_8(v[0], v[1], v[2], v[3], v[4], v[5],
- v[6], v[7], &kZero);
+ overflow = k_check_epi32_overflow_8(&v[0], &v[1], &v[2], &v[3],
+ &v[4], &v[5], &v[6], &v[7], &kZero);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -1776,10 +1782,11 @@
#if DCT_HIGH_BIT_DEPTH
overflow = k_check_epi32_overflow_32(
- v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7],
- v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15],
- v[16], v[17], v[18], v[19], v[20], v[21], v[22], v[23],
- v[24], v[25], v[26], v[27], v[28], v[29], v[30], v[31], &kZero);
+ &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+ &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+ &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
+ &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
+ &kZero);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -1883,8 +1890,9 @@
#if DCT_HIGH_BIT_DEPTH
overflow = k_check_epi32_overflow_16(
- v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7],
- v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15], &kZero);
+ &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+ &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+ &kZero);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -1959,7 +1967,8 @@
out[ 8] = _mm_packs_epi32(u[4], u[5]);
out[24] = _mm_packs_epi32(u[6], u[7]);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(out[0], out[16], out[8], out[24]);
+ overflow = check_epi16_overflow_x4(&out[0], &out[16],
+ &out[8], &out[24]);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -1999,8 +2008,9 @@
#if DCT_HIGH_BIT_DEPTH
overflow = k_check_epi32_overflow_16(
- v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7],
- v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15], &kZero);
+ &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+ &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+ &kZero);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -2110,8 +2120,9 @@
#if DCT_HIGH_BIT_DEPTH
overflow = k_check_epi32_overflow_16(
- v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7],
- v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15], &kZero);
+ &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+ &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+ &kZero);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -2185,7 +2196,8 @@
out[12] = _mm_packs_epi32(u[4], u[5]);
out[28] = _mm_packs_epi32(u[6], u[7]);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(out[4], out[20], out[12], out[28]);
+ overflow = check_epi16_overflow_x4(&out[4], &out[20],
+ &out[12], &out[28]);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -2271,10 +2283,11 @@
#if DCT_HIGH_BIT_DEPTH
overflow = k_check_epi32_overflow_32(
- v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7],
- v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15],
- v[16], v[17], v[18], v[19], v[20], v[21], v[22], v[23],
- v[24], v[25], v[26], v[27], v[28], v[29], v[30], v[31], &kZero);
+ &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+ &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+ &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
+ &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
+ &kZero);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -2394,10 +2407,11 @@
#if DCT_HIGH_BIT_DEPTH
overflow = k_check_epi32_overflow_32(
- v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7],
- v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15],
- v[16], v[17], v[18], v[19], v[20], v[21], v[22], v[23],
- v[24], v[25], v[26], v[27], v[28], v[29], v[30], v[31], &kZero);
+ &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+ &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+ &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
+ &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
+ &kZero);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -2531,8 +2545,9 @@
out[14] = _mm_packs_epi32(u[12], u[13]);
out[30] = _mm_packs_epi32(u[14], u[15]);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(out[2], out[18], out[10], out[26],
- out[6], out[22], out[14], out[30]);
+ overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10],
+ &out[26], &out[6], &out[22],
+ &out[14], &out[30]);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -2636,10 +2651,11 @@
#if DCT_HIGH_BIT_DEPTH
overflow = k_check_epi32_overflow_32(
- v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7],
- v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15],
- v[16], v[17], v[18], v[19], v[20], v[21], v[22], v[23],
- v[24], v[25], v[26], v[27], v[28], v[29], v[30], v[31], &kZero);
+ &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+ &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+ &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
+ &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
+ &kZero);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -2773,8 +2789,9 @@
out[15] = _mm_packs_epi32(u[12], u[13]);
out[31] = _mm_packs_epi32(u[14], u[15]);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(out[1], out[17], out[9], out[25],
- out[7], out[23], out[15], out[31]);
+ overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9],
+ &out[25], &out[7], &out[23],
+ &out[15], &out[31]);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -2843,10 +2860,11 @@
#if DCT_HIGH_BIT_DEPTH
overflow = k_check_epi32_overflow_32(
- v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7],
- v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15],
- v[16], v[17], v[18], v[19], v[20], v[21], v[22], v[23],
- v[24], v[25], v[26], v[27], v[28], v[29], v[30], v[31], &kZero);
+ &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+ &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+ &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
+ &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
+ &kZero);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -2980,8 +2998,9 @@
out[11] = _mm_packs_epi32(u[12], u[13]);
out[27] = _mm_packs_epi32(u[14], u[15]);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(out[5], out[21], out[13], out[29],
- out[3], out[19], out[11], out[27]);
+ overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13],
+ &out[29], &out[3], &out[19],
+ &out[11], &out[27]);
if (overflow) {
HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
return;
@@ -3107,14 +3126,14 @@
// Process next 8x8
output0 += 8;
} else {
- storeu_output(tr2_0, (output1 + 0 * 32));
- storeu_output(tr2_1, (output1 + 1 * 32));
- storeu_output(tr2_2, (output1 + 2 * 32));
- storeu_output(tr2_3, (output1 + 3 * 32));
- storeu_output(tr2_4, (output1 + 4 * 32));
- storeu_output(tr2_5, (output1 + 5 * 32));
- storeu_output(tr2_6, (output1 + 6 * 32));
- storeu_output(tr2_7, (output1 + 7 * 32));
+ storeu_output(&tr2_0, (output1 + 0 * 32));
+ storeu_output(&tr2_1, (output1 + 1 * 32));
+ storeu_output(&tr2_2, (output1 + 2 * 32));
+ storeu_output(&tr2_3, (output1 + 3 * 32));
+ storeu_output(&tr2_4, (output1 + 4 * 32));
+ storeu_output(&tr2_5, (output1 + 5 * 32));
+ storeu_output(&tr2_6, (output1 + 6 * 32));
+ storeu_output(&tr2_7, (output1 + 7 * 32));
// Process next 8x8
output1 += 8;
}
diff --git a/vp9/encoder/x86/vp9_dct_impl_sse2.c b/vp9/encoder/x86/vp9_dct_impl_sse2.c
index 3fdde83..12fa747 100644
--- a/vp9/encoder/x86/vp9_dct_impl_sse2.c
+++ b/vp9/encoder/x86/vp9_dct_impl_sse2.c
@@ -75,7 +75,7 @@
// This second rounding constant saves doing some extra adds at the end
const __m128i k__DCT_CONST_ROUNDING2 = _mm_set1_epi32(DCT_CONST_ROUNDING
+(DCT_CONST_ROUNDING << 1));
- const int DCT_CONST_BITS2 = DCT_CONST_BITS+2;
+ const int DCT_CONST_BITS2 = DCT_CONST_BITS + 2;
const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
__m128i in0, in1;
@@ -170,7 +170,7 @@
const __m128i x0 = _mm_packs_epi32(w0, w1);
const __m128i x1 = _mm_packs_epi32(w2, w3);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x2(x0, x1);
+ overflow = check_epi16_overflow_x2(&x0, &x1);
if (overflow) {
vp9_highbd_fdct4x4_c(input, output, stride);
return;
@@ -192,7 +192,7 @@
// t0 = [c0 c1 c8 c9 c4 c5 cC cD]
// t1 = [c3 c2 cB cA -c7 -c6 -cF -cE]
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x2(t0, t1);
+ overflow = check_epi16_overflow_x2(&t0, &t1);
if (overflow) {
vp9_highbd_fdct4x4_c(input, output, stride);
return;
@@ -231,7 +231,7 @@
const __m128i x0 = _mm_packs_epi32(w0, w1);
const __m128i x1 = _mm_packs_epi32(w2, w3);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x2(x0, x1);
+ overflow = check_epi16_overflow_x2(&x0, &x1);
if (overflow) {
vp9_highbd_fdct4x4_c(input, output, stride);
return;
@@ -254,8 +254,8 @@
// Post-condition (v + 1) >> 2 is now incorporated into previous
// add and right-shift commands. Only 2 store instructions needed
// because we are using the fact that 1/3 are stored just after 0/2.
- storeu_output(in0, output + 0 * 4);
- storeu_output(in1, output + 2 * 4);
+ storeu_output(&in0, output + 0 * 4);
+ storeu_output(&in1, output + 2 * 4);
}
@@ -314,7 +314,8 @@
const __m128i q7 = SUB_EPI16(in0, in7);
#if DCT_HIGH_BIT_DEPTH
if (pass == 1) {
- overflow = check_epi16_overflow_x8(q0, q1, q2, q3, q4, q5, q6, q7);
+ overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3,
+ &q4, &q5, &q6, &q7);
if (overflow) {
vp9_highbd_fdct8x8_c(input, output, stride);
return;
@@ -329,7 +330,7 @@
const __m128i r2 = SUB_EPI16(q1, q2);
const __m128i r3 = SUB_EPI16(q0, q3);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(r0, r1, r2, r3);
+ overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
if (overflow) {
vp9_highbd_fdct8x8_c(input, output, stride);
return;
@@ -372,7 +373,7 @@
res2 = _mm_packs_epi32(w4, w5);
res6 = _mm_packs_epi32(w6, w7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(res0, res4, res2, res6);
+ overflow = check_epi16_overflow_x4(&res0, &res4, &res2, &res6);
if (overflow) {
vp9_highbd_fdct8x8_c(input, output, stride);
return;
@@ -402,7 +403,7 @@
const __m128i r0 = _mm_packs_epi32(s0, s1);
const __m128i r1 = _mm_packs_epi32(s2, s3);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x2(r0, r1);
+ overflow = check_epi16_overflow_x2(&r0, &r1);
if (overflow) {
vp9_highbd_fdct8x8_c(input, output, stride);
return;
@@ -415,7 +416,7 @@
const __m128i x2 = SUB_EPI16(q7, r1);
const __m128i x3 = ADD_EPI16(q7, r1);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(x0, x1, x2, x3);
+ overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
if (overflow) {
vp9_highbd_fdct8x8_c(input, output, stride);
return;
@@ -458,7 +459,7 @@
res5 = _mm_packs_epi32(w4, w5);
res3 = _mm_packs_epi32(w6, w7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(res1, res7, res5, res3);
+ overflow = check_epi16_overflow_x4(&res1, &res7, &res5, &res3);
if (overflow) {
vp9_highbd_fdct8x8_c(input, output, stride);
return;
@@ -557,14 +558,14 @@
in6 = _mm_srai_epi16(in6, 1);
in7 = _mm_srai_epi16(in7, 1);
// store results
- store_output(in0, (output + 0 * 8));
- store_output(in1, (output + 1 * 8));
- store_output(in2, (output + 2 * 8));
- store_output(in3, (output + 3 * 8));
- store_output(in4, (output + 4 * 8));
- store_output(in5, (output + 5 * 8));
- store_output(in6, (output + 6 * 8));
- store_output(in7, (output + 7 * 8));
+ store_output(&in0, (output + 0 * 8));
+ store_output(&in1, (output + 1 * 8));
+ store_output(&in2, (output + 2 * 8));
+ store_output(&in3, (output + 3 * 8));
+ store_output(&in4, (output + 4 * 8));
+ store_output(&in5, (output + 5 * 8));
+ store_output(&in6, (output + 6 * 8));
+ store_output(&in7, (output + 7 * 8));
}
}
@@ -720,8 +721,8 @@
input6 = ADD_EPI16(in06, in09);
input7 = ADD_EPI16(in07, in08);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(input0, input1, input2, input3,
- input4, input5, input6, input7);
+ overflow = check_epi16_overflow_x8(&input0, &input1, &input2, &input3,
+ &input4, &input5, &input6, &input7);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -739,8 +740,10 @@
step1_6 = SUB_EPI16(in01, in14);
step1_7 = SUB_EPI16(in00, in15);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step1_0, step1_1, step1_2, step1_3,
- step1_4, step1_5, step1_6, step1_7);
+ overflow = check_epi16_overflow_x8(&step1_0, &step1_1,
+ &step1_2, &step1_3,
+ &step1_4, &step1_5,
+ &step1_6, &step1_7);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -759,7 +762,8 @@
const __m128i q6 = SUB_EPI16(input1, input6);
const __m128i q7 = SUB_EPI16(input0, input7);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(q0, q1, q2, q3, q4, q5, q6, q7);
+ overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3,
+ &q4, &q5, &q6, &q7);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -773,7 +777,7 @@
const __m128i r2 = SUB_EPI16(q1, q2);
const __m128i r3 = SUB_EPI16(q0, q3);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(r0, r1, r2, r3);
+ overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -786,16 +790,16 @@
const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
- res00 = mult_round_shift(t0, t1, k__cospi_p16_p16,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res08 = mult_round_shift(t0, t1, k__cospi_p16_m16,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res04 = mult_round_shift(t2, t3, k__cospi_p24_p08,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res12 = mult_round_shift(t2, t3, k__cospi_m08_p24,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res00 = mult_round_shift(&t0, &t1, &k__cospi_p16_p16,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res08 = mult_round_shift(&t0, &t1, &k__cospi_p16_m16,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res04 = mult_round_shift(&t2, &t3, &k__cospi_p24_p08,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res12 = mult_round_shift(&t2, &t3, &k__cospi_m08_p24,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(res00, res08, res04, res12);
+ overflow = check_epi16_overflow_x4(&res00, &res08, &res04, &res12);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -809,12 +813,14 @@
// into 32 bits.
const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
- const __m128i r0 = mult_round_shift(d0, d1, k__cospi_p16_m16,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- const __m128i r1 = mult_round_shift(d0, d1, k__cospi_p16_p16,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ const __m128i r0 = mult_round_shift(&d0, &d1, &k__cospi_p16_m16,
+ &k__DCT_CONST_ROUNDING,
+ DCT_CONST_BITS);
+ const __m128i r1 = mult_round_shift(&d0, &d1, &k__cospi_p16_p16,
+ &k__DCT_CONST_ROUNDING,
+ DCT_CONST_BITS);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x2(r0, r1);
+ overflow = check_epi16_overflow_x2(&r0, &r1);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -827,7 +833,7 @@
const __m128i x2 = SUB_EPI16(q7, r1);
const __m128i x3 = ADD_EPI16(q7, r1);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(x0, x1, x2, x3);
+ overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -840,16 +846,17 @@
const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
- res02 = mult_round_shift(t0, t1, k__cospi_p28_p04,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res14 = mult_round_shift(t0, t1, k__cospi_m04_p28,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res10 = mult_round_shift(t2, t3, k__cospi_p12_p20,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res06 = mult_round_shift(t2, t3, k__cospi_m20_p12,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res02 = mult_round_shift(&t0, &t1, &k__cospi_p28_p04,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res14 = mult_round_shift(&t0, &t1, &k__cospi_m04_p28,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res10 = mult_round_shift(&t2, &t3, &k__cospi_p12_p20,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res06 = mult_round_shift(&t2, &t3, &k__cospi_m20_p12,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(res02, res14, res10, res06);
+ overflow = check_epi16_overflow_x4(&res02, &res14,
+ &res10, &res06);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -867,17 +874,17 @@
const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2);
const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3);
const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3);
- step2_2 = mult_round_shift(t0, t1, k__cospi_p16_m16,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- step2_3 = mult_round_shift(t2, t3, k__cospi_p16_m16,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- step2_5 = mult_round_shift(t0, t1, k__cospi_p16_p16,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- step2_4 = mult_round_shift(t2, t3, k__cospi_p16_p16,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ step2_2 = mult_round_shift(&t0, &t1, &k__cospi_p16_m16,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ step2_3 = mult_round_shift(&t2, &t3, &k__cospi_p16_m16,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ step2_5 = mult_round_shift(&t0, &t1, &k__cospi_p16_p16,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ step2_4 = mult_round_shift(&t2, &t3, &k__cospi_p16_p16,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(step2_2, step2_3, step2_5,
- step2_4);
+ overflow = check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5,
+ &step2_4);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -895,8 +902,10 @@
step3_6 = ADD_EPI16(step1_6, step2_5);
step3_7 = ADD_EPI16(step1_7, step2_4);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step3_0, step3_1, step3_2, step3_3,
- step3_4, step3_5, step3_6, step3_7);
+ overflow = check_epi16_overflow_x8(&step3_0, &step3_1,
+ &step3_2, &step3_3,
+ &step3_4, &step3_5,
+ &step3_6, &step3_7);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -909,17 +918,17 @@
const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6);
const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5);
const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5);
- step2_1 = mult_round_shift(t0, t1, k__cospi_m08_p24,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- step2_2 = mult_round_shift(t2, t3, k__cospi_p24_p08,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- step2_6 = mult_round_shift(t0, t1, k__cospi_p24_p08,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- step2_5 = mult_round_shift(t2, t3, k__cospi_p08_m24,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ step2_1 = mult_round_shift(&t0, &t1, &k__cospi_m08_p24,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ step2_2 = mult_round_shift(&t2, &t3, &k__cospi_p24_p08,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ step2_6 = mult_round_shift(&t0, &t1, &k__cospi_p24_p08,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ step2_5 = mult_round_shift(&t2, &t3, &k__cospi_p08_m24,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(step2_1, step2_2, step2_6,
- step2_5);
+ overflow = check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6,
+ &step2_5);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -937,8 +946,10 @@
step1_6 = SUB_EPI16(step3_7, step2_6);
step1_7 = ADD_EPI16(step3_7, step2_6);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x8(step1_0, step1_1, step1_2, step1_3,
- step1_4, step1_5, step1_6, step1_7);
+ overflow = check_epi16_overflow_x8(&step1_0, &step1_1,
+ &step1_2, &step1_3,
+ &step1_4, &step1_5,
+ &step1_6, &step1_7);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -951,16 +962,16 @@
const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7);
const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6);
const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6);
- res01 = mult_round_shift(t0, t1, k__cospi_p30_p02,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res09 = mult_round_shift(t2, t3, k__cospi_p14_p18,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res15 = mult_round_shift(t0, t1, k__cospi_m02_p30,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res07 = mult_round_shift(t2, t3, k__cospi_m18_p14,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res01 = mult_round_shift(&t0, &t1, &k__cospi_p30_p02,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res09 = mult_round_shift(&t2, &t3, &k__cospi_p14_p18,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res15 = mult_round_shift(&t0, &t1, &k__cospi_m02_p30,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res07 = mult_round_shift(&t2, &t3, &k__cospi_m18_p14,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(res01, res09, res15, res07);
+ overflow = check_epi16_overflow_x4(&res01, &res09, &res15, &res07);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -972,16 +983,16 @@
const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5);
const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4);
const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4);
- res05 = mult_round_shift(t0, t1, k__cospi_p22_p10,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res13 = mult_round_shift(t2, t3, k__cospi_p06_p26,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res11 = mult_round_shift(t0, t1, k__cospi_m10_p22,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
- res03 = mult_round_shift(t2, t3, k__cospi_m26_p06,
- k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res05 = mult_round_shift(&t0, &t1, &k__cospi_p22_p10,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res13 = mult_round_shift(&t2, &t3, &k__cospi_p06_p26,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res11 = mult_round_shift(&t0, &t1, &k__cospi_m10_p22,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
+ res03 = mult_round_shift(&t2, &t3, &k__cospi_m26_p06,
+ &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
#if DCT_HIGH_BIT_DEPTH
- overflow = check_epi16_overflow_x4(res05, res13, res11, res03);
+ overflow = check_epi16_overflow_x4(&res05, &res13, &res11, &res03);
if (overflow) {
vp9_highbd_fdct16x16_c(input, output, stride);
return;
@@ -990,11 +1001,11 @@
}
}
// Transpose the results, do it as two 8x8 transposes.
- transpose_and_output8x8(res00, res01, res02, res03,
- res04, res05, res06, res07,
+ transpose_and_output8x8(&res00, &res01, &res02, &res03,
+ &res04, &res05, &res06, &res07,
pass, out0, out1);
- transpose_and_output8x8(res08, res09, res10, res11,
- res12, res13, res14, res15,
+ transpose_and_output8x8(&res08, &res09, &res10, &res11,
+ &res12, &res13, &res14, &res15,
pass, out0 + 8, out1 + 8);
if (pass == 0) {
out0 += 8*16;
diff --git a/vp9/encoder/x86/vp9_dct_sse2.c b/vp9/encoder/x86/vp9_dct_sse2.c
index 81da343..e671f39 100644
--- a/vp9/encoder/x86/vp9_dct_sse2.c
+++ b/vp9/encoder/x86/vp9_dct_sse2.c
@@ -40,7 +40,7 @@
in1 = _mm_add_epi32(tmp, in0);
in0 = _mm_slli_epi32(in1, 1);
- store_output(in0, output);
+ store_output(&in0, output);
}
static INLINE void load_buffer_4x4(const int16_t *input, __m128i *in,
@@ -72,8 +72,8 @@
__m128i out23 = _mm_add_epi16(in23, kOne);
out01 = _mm_srai_epi16(out01, 2);
out23 = _mm_srai_epi16(out23, 2);
- store_output(out01, (output + 0 * 8));
- store_output(out23, (output + 1 * 8));
+ store_output(&out01, (output + 0 * 8));
+ store_output(&out23, (output + 1 * 8));
}
static INLINE void transpose_4x4(__m128i *res) {
@@ -245,7 +245,7 @@
in0 = _mm_srli_si128(sum, 8);
in1 = _mm_add_epi32(sum, in0);
- store_output(in1, output);
+ store_output(&in1, output);
}
void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride,
@@ -759,14 +759,14 @@
// write 8x8 array
static INLINE void write_buffer_8x8(tran_low_t *output, __m128i *res,
int stride) {
- store_output(res[0], (output + 0 * stride));
- store_output(res[1], (output + 1 * stride));
- store_output(res[2], (output + 2 * stride));
- store_output(res[3], (output + 3 * stride));
- store_output(res[4], (output + 4 * stride));
- store_output(res[5], (output + 5 * stride));
- store_output(res[6], (output + 6 * stride));
- store_output(res[7], (output + 7 * stride));
+ store_output(&res[0], (output + 0 * stride));
+ store_output(&res[1], (output + 1 * stride));
+ store_output(&res[2], (output + 2 * stride));
+ store_output(&res[3], (output + 3 * stride));
+ store_output(&res[4], (output + 4 * stride));
+ store_output(&res[5], (output + 5 * stride));
+ store_output(&res[6], (output + 6 * stride));
+ store_output(&res[7], (output + 7 * stride));
}
// perform in-place transpose
@@ -1292,7 +1292,7 @@
in1 = _mm_add_epi32(sum, in0);
in1 = _mm_srai_epi32(in1, 1);
- store_output(in1, output);
+ store_output(&in1, output);
}
static INLINE void load_buffer_16x16(const int16_t* input, __m128i *in0,
@@ -2251,7 +2251,7 @@
in1 = _mm_add_epi32(sum, in0);
in1 = _mm_srai_epi32(in1, 3);
- store_output(in1, output);
+ store_output(&in1, output);
}
#if CONFIG_VP9_HIGHBITDEPTH
diff --git a/vp9/encoder/x86/vp9_dct_sse2.h b/vp9/encoder/x86/vp9_dct_sse2.h
index 2d32210..b99db92 100644
--- a/vp9/encoder/x86/vp9_dct_sse2.h
+++ b/vp9/encoder/x86/vp9_dct_sse2.h
@@ -43,99 +43,144 @@
return _mm_unpacklo_epi64(buf0, buf1);
}
-static INLINE int check_epi16_overflow_x2(__m128i reg0, __m128i reg1) {
+static INLINE int check_epi16_overflow_x2(const __m128i *preg0,
+ const __m128i *preg1) {
const __m128i max_overflow = _mm_set1_epi16(0x7fff);
const __m128i min_overflow = _mm_set1_epi16(0x8000);
- __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(reg0, max_overflow),
- _mm_cmpeq_epi16(reg0, min_overflow));
- __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(reg1, max_overflow),
- _mm_cmpeq_epi16(reg1, min_overflow));
+ __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(*preg0, max_overflow),
+ _mm_cmpeq_epi16(*preg0, min_overflow));
+ __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(*preg1, max_overflow),
+ _mm_cmpeq_epi16(*preg1, min_overflow));
cmp0 = _mm_or_si128(cmp0, cmp1);
return _mm_movemask_epi8(cmp0);
}
-static INLINE int check_epi16_overflow_x4(__m128i reg0, __m128i reg1,
- __m128i reg2, __m128i reg3) {
+static INLINE int check_epi16_overflow_x4(const __m128i *preg0,
+ const __m128i *preg1,
+ const __m128i *preg2,
+ const __m128i *preg3) {
const __m128i max_overflow = _mm_set1_epi16(0x7fff);
const __m128i min_overflow = _mm_set1_epi16(0x8000);
- __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(reg0, max_overflow),
- _mm_cmpeq_epi16(reg0, min_overflow));
- __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(reg1, max_overflow),
- _mm_cmpeq_epi16(reg1, min_overflow));
- __m128i cmp2 = _mm_or_si128(_mm_cmpeq_epi16(reg2, max_overflow),
- _mm_cmpeq_epi16(reg2, min_overflow));
- __m128i cmp3 = _mm_or_si128(_mm_cmpeq_epi16(reg3, max_overflow),
- _mm_cmpeq_epi16(reg3, min_overflow));
+ __m128i cmp0 = _mm_or_si128(_mm_cmpeq_epi16(*preg0, max_overflow),
+ _mm_cmpeq_epi16(*preg0, min_overflow));
+ __m128i cmp1 = _mm_or_si128(_mm_cmpeq_epi16(*preg1, max_overflow),
+ _mm_cmpeq_epi16(*preg1, min_overflow));
+ __m128i cmp2 = _mm_or_si128(_mm_cmpeq_epi16(*preg2, max_overflow),
+ _mm_cmpeq_epi16(*preg2, min_overflow));
+ __m128i cmp3 = _mm_or_si128(_mm_cmpeq_epi16(*preg3, max_overflow),
+ _mm_cmpeq_epi16(*preg3, min_overflow));
cmp0 = _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3));
return _mm_movemask_epi8(cmp0);
}
-static INLINE int check_epi16_overflow_x8(__m128i reg0, __m128i reg1,
- __m128i reg2, __m128i reg3,
- __m128i reg4, __m128i reg5,
- __m128i reg6, __m128i reg7) {
+static INLINE int check_epi16_overflow_x8(const __m128i *preg0,
+ const __m128i *preg1,
+ const __m128i *preg2,
+ const __m128i *preg3,
+ const __m128i *preg4,
+ const __m128i *preg5,
+ const __m128i *preg6,
+ const __m128i *preg7) {
int res0, res1;
- res0 = check_epi16_overflow_x4(reg0, reg1, reg2, reg3);
- res1 = check_epi16_overflow_x4(reg4, reg5, reg6, reg7);
+ res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3);
+ res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7);
return res0 + res1;
}
-static INLINE int check_epi16_overflow_x12(__m128i reg0, __m128i reg1,
- __m128i reg2, __m128i reg3, __m128i reg4,
- __m128i reg5, __m128i reg6, __m128i reg7,
- __m128i reg8, __m128i reg9, __m128i reg10,
- __m128i reg11) {
+static INLINE int check_epi16_overflow_x12(const __m128i *preg0,
+ const __m128i *preg1,
+ const __m128i *preg2,
+ const __m128i *preg3,
+ const __m128i *preg4,
+ const __m128i *preg5,
+ const __m128i *preg6,
+ const __m128i *preg7,
+ const __m128i *preg8,
+ const __m128i *preg9,
+ const __m128i *preg10,
+ const __m128i *preg11) {
int res0, res1;
- res0 = check_epi16_overflow_x4(reg0, reg1, reg2, reg3);
- res1 = check_epi16_overflow_x4(reg4, reg5, reg6, reg7);
+ res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3);
+ res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7);
if (!res0)
- res0 = check_epi16_overflow_x4(reg8, reg9, reg10, reg11);
+ res0 = check_epi16_overflow_x4(preg8, preg9, preg10, preg11);
return res0 + res1;
}
-static INLINE int check_epi16_overflow_x16(__m128i reg0, __m128i reg1,
- __m128i reg2, __m128i reg3, __m128i reg4,
- __m128i reg5, __m128i reg6, __m128i reg7,
- __m128i reg8, __m128i reg9, __m128i reg10,
- __m128i reg11, __m128i reg12, __m128i reg13,
- __m128i reg14, __m128i reg15) {
+static INLINE int check_epi16_overflow_x16(const __m128i *preg0,
+ const __m128i *preg1,
+ const __m128i *preg2,
+ const __m128i *preg3,
+ const __m128i *preg4,
+ const __m128i *preg5,
+ const __m128i *preg6,
+ const __m128i *preg7,
+ const __m128i *preg8,
+ const __m128i *preg9,
+ const __m128i *preg10,
+ const __m128i *preg11,
+ const __m128i *preg12,
+ const __m128i *preg13,
+ const __m128i *preg14,
+ const __m128i *preg15) {
int res0, res1;
- res0 = check_epi16_overflow_x4(reg0, reg1, reg2, reg3);
- res1 = check_epi16_overflow_x4(reg4, reg5, reg6, reg7);
+ res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3);
+ res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7);
if (!res0) {
- res0 = check_epi16_overflow_x4(reg8, reg9, reg10, reg11);
+ res0 = check_epi16_overflow_x4(preg8, preg9, preg10, preg11);
if (!res1)
- res1 = check_epi16_overflow_x4(reg12, reg13, reg14, reg15);
+ res1 = check_epi16_overflow_x4(preg12, preg13, preg14, preg15);
}
return res0 + res1;
}
-static INLINE int check_epi16_overflow_x32(__m128i reg0, __m128i reg1,
- __m128i reg2, __m128i reg3, __m128i reg4,
- __m128i reg5, __m128i reg6, __m128i reg7,
- __m128i reg8, __m128i reg9, __m128i reg10,
- __m128i reg11, __m128i reg12, __m128i reg13,
- __m128i reg14, __m128i reg15, __m128i reg16,
- __m128i reg17, __m128i reg18, __m128i reg19,
- __m128i reg20, __m128i reg21, __m128i reg22,
- __m128i reg23, __m128i reg24, __m128i reg25,
- __m128i reg26, __m128i reg27, __m128i reg28,
- __m128i reg29, __m128i reg30, __m128i reg31) {
+static INLINE int check_epi16_overflow_x32(const __m128i *preg0,
+ const __m128i *preg1,
+ const __m128i *preg2,
+ const __m128i *preg3,
+ const __m128i *preg4,
+ const __m128i *preg5,
+ const __m128i *preg6,
+ const __m128i *preg7,
+ const __m128i *preg8,
+ const __m128i *preg9,
+ const __m128i *preg10,
+ const __m128i *preg11,
+ const __m128i *preg12,
+ const __m128i *preg13,
+ const __m128i *preg14,
+ const __m128i *preg15,
+ const __m128i *preg16,
+ const __m128i *preg17,
+ const __m128i *preg18,
+ const __m128i *preg19,
+ const __m128i *preg20,
+ const __m128i *preg21,
+ const __m128i *preg22,
+ const __m128i *preg23,
+ const __m128i *preg24,
+ const __m128i *preg25,
+ const __m128i *preg26,
+ const __m128i *preg27,
+ const __m128i *preg28,
+ const __m128i *preg29,
+ const __m128i *preg30,
+ const __m128i *preg31) {
int res0, res1;
- res0 = check_epi16_overflow_x4(reg0, reg1, reg2, reg3);
- res1 = check_epi16_overflow_x4(reg4, reg5, reg6, reg7);
+ res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3);
+ res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7);
if (!res0) {
- res0 = check_epi16_overflow_x4(reg8, reg9, reg10, reg11);
+ res0 = check_epi16_overflow_x4(preg8, preg9, preg10, preg11);
if (!res1) {
- res1 = check_epi16_overflow_x4(reg12, reg13, reg14, reg15);
+ res1 = check_epi16_overflow_x4(preg12, preg13, preg14, preg15);
if (!res0) {
- res0 = check_epi16_overflow_x4(reg16, reg17, reg18, reg19);
+ res0 = check_epi16_overflow_x4(preg16, preg17, preg18, preg19);
if (!res1) {
- res1 = check_epi16_overflow_x4(reg20, reg21, reg22, reg23);
+ res1 = check_epi16_overflow_x4(preg20, preg21, preg22, preg23);
if (!res0) {
- res0 = check_epi16_overflow_x4(reg24, reg25, reg26, reg27);
+ res0 = check_epi16_overflow_x4(preg24, preg25, preg26, preg27);
if (!res1)
- res1 = check_epi16_overflow_x4(reg28, reg29, reg30, reg31);
+ res1 = check_epi16_overflow_x4(preg28, preg29, preg30, preg31);
}
}
}
@@ -144,14 +189,17 @@
return res0 + res1;
}
-static INLINE int k_check_epi32_overflow_4(__m128i reg0, __m128i reg1,
- __m128i reg2, __m128i reg3, const __m128i* zero) {
+static INLINE int k_check_epi32_overflow_4(const __m128i *preg0,
+ const __m128i *preg1,
+ const __m128i *preg2,
+ const __m128i *preg3,
+ const __m128i *zero) {
__m128i minus_one = _mm_set1_epi32(-1);
// Check for overflows
- __m128i reg0_shifted = _mm_slli_epi64(reg0, 1);
- __m128i reg1_shifted = _mm_slli_epi64(reg1, 1);
- __m128i reg2_shifted = _mm_slli_epi64(reg2, 1);
- __m128i reg3_shifted = _mm_slli_epi64(reg3, 1);
+ __m128i reg0_shifted = _mm_slli_epi64(*preg0, 1);
+ __m128i reg1_shifted = _mm_slli_epi64(*preg1, 1);
+ __m128i reg2_shifted = _mm_slli_epi64(*preg2, 1);
+ __m128i reg3_shifted = _mm_slli_epi64(*preg3, 1);
__m128i reg0_top_dwords = _mm_shuffle_epi32(
reg0_shifted, _MM_SHUFFLE(0, 0, 3, 1));
__m128i reg1_top_dwords = _mm_shuffle_epi32(
@@ -173,65 +221,107 @@
return (overflow_01 + overflow_23);
}
-static INLINE int k_check_epi32_overflow_8(__m128i reg0, __m128i reg1,
- __m128i reg2, __m128i reg3,
- __m128i reg4, __m128i reg5,
- __m128i reg6, __m128i reg7,
- const __m128i* zero) {
- int overflow = k_check_epi32_overflow_4(reg0, reg1, reg2, reg3, zero);
+static INLINE int k_check_epi32_overflow_8(const __m128i *preg0,
+ const __m128i *preg1,
+ const __m128i *preg2,
+ const __m128i *preg3,
+ const __m128i *preg4,
+ const __m128i *preg5,
+ const __m128i *preg6,
+ const __m128i *preg7,
+ const __m128i *zero) {
+ int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero);
if (!overflow) {
- overflow = k_check_epi32_overflow_4(reg4, reg5, reg6, reg7, zero);
+ overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero);
}
return overflow;
}
-static INLINE int k_check_epi32_overflow_16(
- __m128i reg0, __m128i reg1, __m128i reg2, __m128i reg3,
- __m128i reg4, __m128i reg5, __m128i reg6, __m128i reg7,
- __m128i reg8, __m128i reg9, __m128i reg10, __m128i reg11,
- __m128i reg12, __m128i reg13, __m128i reg14, __m128i reg15,
- const __m128i* zero) {
- int overflow = k_check_epi32_overflow_4(reg0, reg1, reg2, reg3, zero);
+static INLINE int k_check_epi32_overflow_16(const __m128i *preg0,
+ const __m128i *preg1,
+ const __m128i *preg2,
+ const __m128i *preg3,
+ const __m128i *preg4,
+ const __m128i *preg5,
+ const __m128i *preg6,
+ const __m128i *preg7,
+ const __m128i *preg8,
+ const __m128i *preg9,
+ const __m128i *preg10,
+ const __m128i *preg11,
+ const __m128i *preg12,
+ const __m128i *preg13,
+ const __m128i *preg14,
+ const __m128i *preg15,
+ const __m128i *zero) {
+ int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero);
if (!overflow) {
- overflow = k_check_epi32_overflow_4(reg4, reg5, reg6, reg7, zero);
+ overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero);
if (!overflow) {
- overflow = k_check_epi32_overflow_4(reg8, reg9, reg10, reg11, zero);
+ overflow = k_check_epi32_overflow_4(preg8, preg9, preg10, preg11,
+ zero);
if (!overflow) {
- overflow = k_check_epi32_overflow_4(reg12, reg13, reg14, reg15, zero);
+ overflow = k_check_epi32_overflow_4(preg12, preg13, preg14, preg15,
+ zero);
}
}
}
return overflow;
}
-static INLINE int k_check_epi32_overflow_32(
- __m128i reg0, __m128i reg1, __m128i reg2, __m128i reg3,
- __m128i reg4, __m128i reg5, __m128i reg6, __m128i reg7,
- __m128i reg8, __m128i reg9, __m128i reg10, __m128i reg11,
- __m128i reg12, __m128i reg13, __m128i reg14, __m128i reg15,
- __m128i reg16, __m128i reg17, __m128i reg18, __m128i reg19,
- __m128i reg20, __m128i reg21, __m128i reg22, __m128i reg23,
- __m128i reg24, __m128i reg25, __m128i reg26, __m128i reg27,
- __m128i reg28, __m128i reg29, __m128i reg30, __m128i reg31,
- const __m128i* zero) {
- int overflow = k_check_epi32_overflow_4(reg0, reg1, reg2, reg3, zero);
+static INLINE int k_check_epi32_overflow_32(const __m128i *preg0,
+ const __m128i *preg1,
+ const __m128i *preg2,
+ const __m128i *preg3,
+ const __m128i *preg4,
+ const __m128i *preg5,
+ const __m128i *preg6,
+ const __m128i *preg7,
+ const __m128i *preg8,
+ const __m128i *preg9,
+ const __m128i *preg10,
+ const __m128i *preg11,
+ const __m128i *preg12,
+ const __m128i *preg13,
+ const __m128i *preg14,
+ const __m128i *preg15,
+ const __m128i *preg16,
+ const __m128i *preg17,
+ const __m128i *preg18,
+ const __m128i *preg19,
+ const __m128i *preg20,
+ const __m128i *preg21,
+ const __m128i *preg22,
+ const __m128i *preg23,
+ const __m128i *preg24,
+ const __m128i *preg25,
+ const __m128i *preg26,
+ const __m128i *preg27,
+ const __m128i *preg28,
+ const __m128i *preg29,
+ const __m128i *preg30,
+ const __m128i *preg31,
+ const __m128i *zero) {
+ int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero);
if (!overflow) {
- overflow = k_check_epi32_overflow_4(reg4, reg5, reg6, reg7, zero);
+ overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero);
if (!overflow) {
- overflow = k_check_epi32_overflow_4(reg8, reg9, reg10, reg11, zero);
+ overflow = k_check_epi32_overflow_4(preg8, preg9, preg10, preg11, zero);
if (!overflow) {
- overflow = k_check_epi32_overflow_4(reg12, reg13, reg14, reg15, zero);
+ overflow = k_check_epi32_overflow_4(preg12, preg13, preg14, preg15,
+ zero);
if (!overflow) {
- overflow = k_check_epi32_overflow_4(reg16, reg17, reg18, reg19, zero);
+ overflow = k_check_epi32_overflow_4(preg16, preg17, preg18, preg19,
+ zero);
if (!overflow) {
- overflow = k_check_epi32_overflow_4(reg20, reg21,
- reg22, reg23, zero);
+ overflow = k_check_epi32_overflow_4(preg20, preg21,
+ preg22, preg23, zero);
if (!overflow) {
- overflow = k_check_epi32_overflow_4(reg24, reg25,
- reg26, reg27, zero);
+ overflow = k_check_epi32_overflow_4(preg24, preg25,
+ preg26, preg27, zero);
if (!overflow) {
- overflow = k_check_epi32_overflow_4(reg28, reg29,
- reg30, reg31, zero);
+ overflow = k_check_epi32_overflow_4(preg28, preg29,
+ preg30, preg31, zero);
}
}
}
@@ -242,51 +332,52 @@
return overflow;
}
-static INLINE void store_output(const __m128i output, tran_low_t* dst_ptr) {
+static INLINE void store_output(const __m128i *poutput, tran_low_t* dst_ptr) {
#if CONFIG_VP9_HIGHBITDEPTH
const __m128i zero = _mm_setzero_si128();
- const __m128i sign_bits = _mm_cmplt_epi16(output, zero);
- __m128i out0 = _mm_unpacklo_epi16(output, sign_bits);
- __m128i out1 = _mm_unpackhi_epi16(output, sign_bits);
+ const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
+ __m128i out0 = _mm_unpacklo_epi16(*poutput, sign_bits);
+ __m128i out1 = _mm_unpackhi_epi16(*poutput, sign_bits);
_mm_store_si128((__m128i *)(dst_ptr), out0);
_mm_store_si128((__m128i *)(dst_ptr + 4), out1);
#else
- _mm_store_si128((__m128i *)(dst_ptr), output);
+ _mm_store_si128((__m128i *)(dst_ptr), *poutput);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
-static INLINE void storeu_output(const __m128i output, tran_low_t* dst_ptr) {
+static INLINE void storeu_output(const __m128i *poutput, tran_low_t* dst_ptr) {
#if CONFIG_VP9_HIGHBITDEPTH
const __m128i zero = _mm_setzero_si128();
- const __m128i sign_bits = _mm_cmplt_epi16(output, zero);
- __m128i out0 = _mm_unpacklo_epi16(output, sign_bits);
- __m128i out1 = _mm_unpackhi_epi16(output, sign_bits);
+ const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
+ __m128i out0 = _mm_unpacklo_epi16(*poutput, sign_bits);
+ __m128i out1 = _mm_unpackhi_epi16(*poutput, sign_bits);
_mm_storeu_si128((__m128i *)(dst_ptr), out0);
_mm_storeu_si128((__m128i *)(dst_ptr + 4), out1);
#else
- _mm_storeu_si128((__m128i *)(dst_ptr), output);
+ _mm_storeu_si128((__m128i *)(dst_ptr), *poutput);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
-static INLINE __m128i mult_round_shift(const __m128i in0, const __m128i in1,
- const __m128i multiplier,
- const __m128i rounding,
+static INLINE __m128i mult_round_shift(const __m128i *pin0,
+ const __m128i *pin1,
+ const __m128i *pmultiplier,
+ const __m128i *prounding,
const int shift) {
- const __m128i u0 = _mm_madd_epi16(in0, multiplier);
- const __m128i u1 = _mm_madd_epi16(in1, multiplier);
- const __m128i v0 = _mm_add_epi32(u0, rounding);
- const __m128i v1 = _mm_add_epi32(u1, rounding);
+ const __m128i u0 = _mm_madd_epi16(*pin0, *pmultiplier);
+ const __m128i u1 = _mm_madd_epi16(*pin1, *pmultiplier);
+ const __m128i v0 = _mm_add_epi32(u0, *prounding);
+ const __m128i v1 = _mm_add_epi32(u1, *prounding);
const __m128i w0 = _mm_srai_epi32(v0, shift);
const __m128i w1 = _mm_srai_epi32(v1, shift);
return _mm_packs_epi32(w0, w1);
}
static INLINE void transpose_and_output8x8(
- const __m128i in00, const __m128i in01,
- const __m128i in02, const __m128i in03,
- const __m128i in04, const __m128i in05,
- const __m128i in06, const __m128i in07,
+ const __m128i *pin00, const __m128i *pin01,
+ const __m128i *pin02, const __m128i *pin03,
+ const __m128i *pin04, const __m128i *pin05,
+ const __m128i *pin06, const __m128i *pin07,
const int pass, int16_t* out0_ptr,
tran_low_t* out1_ptr) {
// 00 01 02 03 04 05 06 07
@@ -297,14 +388,14 @@
// 50 51 52 53 54 55 56 57
// 60 61 62 63 64 65 66 67
// 70 71 72 73 74 75 76 77
- const __m128i tr0_0 = _mm_unpacklo_epi16(in00, in01);
- const __m128i tr0_1 = _mm_unpacklo_epi16(in02, in03);
- const __m128i tr0_2 = _mm_unpackhi_epi16(in00, in01);
- const __m128i tr0_3 = _mm_unpackhi_epi16(in02, in03);
- const __m128i tr0_4 = _mm_unpacklo_epi16(in04, in05);
- const __m128i tr0_5 = _mm_unpacklo_epi16(in06, in07);
- const __m128i tr0_6 = _mm_unpackhi_epi16(in04, in05);
- const __m128i tr0_7 = _mm_unpackhi_epi16(in06, in07);
+ const __m128i tr0_0 = _mm_unpacklo_epi16(*pin00, *pin01);
+ const __m128i tr0_1 = _mm_unpacklo_epi16(*pin02, *pin03);
+ const __m128i tr0_2 = _mm_unpackhi_epi16(*pin00, *pin01);
+ const __m128i tr0_3 = _mm_unpackhi_epi16(*pin02, *pin03);
+ const __m128i tr0_4 = _mm_unpacklo_epi16(*pin04, *pin05);
+ const __m128i tr0_5 = _mm_unpacklo_epi16(*pin06, *pin07);
+ const __m128i tr0_6 = _mm_unpackhi_epi16(*pin04, *pin05);
+ const __m128i tr0_7 = _mm_unpackhi_epi16(*pin06, *pin07);
// 00 10 01 11 02 12 03 13
// 20 30 21 31 22 32 23 33
// 04 14 05 15 06 16 07 17
@@ -355,14 +446,14 @@
_mm_storeu_si128((__m128i*)(out0_ptr + 6 * 16), tr2_6);
_mm_storeu_si128((__m128i*)(out0_ptr + 7 * 16), tr2_7);
} else {
- storeu_output(tr2_0, (out1_ptr + 0 * 16));
- storeu_output(tr2_1, (out1_ptr + 1 * 16));
- storeu_output(tr2_2, (out1_ptr + 2 * 16));
- storeu_output(tr2_3, (out1_ptr + 3 * 16));
- storeu_output(tr2_4, (out1_ptr + 4 * 16));
- storeu_output(tr2_5, (out1_ptr + 5 * 16));
- storeu_output(tr2_6, (out1_ptr + 6 * 16));
- storeu_output(tr2_7, (out1_ptr + 7 * 16));
+ storeu_output(&tr2_0, (out1_ptr + 0 * 16));
+ storeu_output(&tr2_1, (out1_ptr + 1 * 16));
+ storeu_output(&tr2_2, (out1_ptr + 2 * 16));
+ storeu_output(&tr2_3, (out1_ptr + 3 * 16));
+ storeu_output(&tr2_4, (out1_ptr + 4 * 16));
+ storeu_output(&tr2_5, (out1_ptr + 5 * 16));
+ storeu_output(&tr2_6, (out1_ptr + 6 * 16));
+ storeu_output(&tr2_7, (out1_ptr + 7 * 16));
}
}
diff --git a/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm b/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
index 508e1d4..f5f05e7 100644
--- a/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
+++ b/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
@@ -122,8 +122,8 @@
pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin
pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin
%ifidn %1, b_32x32
- pmovmskb r6, m7
- pmovmskb r2, m12
+ pmovmskb r6d, m7
+ pmovmskb r2d, m12
or r6, r2
jz .skip_iter
%endif
@@ -308,8 +308,8 @@
%ifidn %1, fp_32x32
pcmpgtw m7, m6, m0
pcmpgtw m12, m11, m0
- pmovmskb r6, m7
- pmovmskb r2, m12
+ pmovmskb r6d, m7
+ pmovmskb r2d, m12
or r6, r2
jz .skip_iter
diff --git a/vp9/encoder/x86/vp9_subpel_variance.asm b/vp9/encoder/x86/vp9_subpel_variance.asm
index 1a9e4e8..06b8b03 100644
--- a/vp9/encoder/x86/vp9_subpel_variance.asm
+++ b/vp9/encoder/x86/vp9_subpel_variance.asm
@@ -101,7 +101,7 @@
pshufd m4, m6, 0x1
movd [r1], m7 ; store sse
paddd m6, m4
- movd rax, m6 ; store sum as return value
+ movd raxd, m6 ; store sum as return value
%else ; mmsize == 8
pshufw m4, m6, 0xe
pshufw m3, m7, 0xe
@@ -113,7 +113,7 @@
movd [r1], m7 ; store sse
pshufw m4, m6, 0xe
paddd m6, m4
- movd rax, m6 ; store sum as return value
+ movd raxd, m6 ; store sum as return value
%endif
RET
%endmacro
diff --git a/vp9/vp9_cx_iface.c b/vp9/vp9_cx_iface.c
index e86df22..b9fb814 100644
--- a/vp9/vp9_cx_iface.c
+++ b/vp9/vp9_cx_iface.c
@@ -355,6 +355,7 @@
const struct vp9_extracfg *extra_cfg) {
const int is_vbr = cfg->rc_end_usage == VPX_VBR;
oxcf->profile = cfg->g_profile;
+ oxcf->max_threads = (int)cfg->g_threads;
oxcf->width = cfg->g_w;
oxcf->height = cfg->g_h;
oxcf->bit_depth = cfg->g_bit_depth;
diff --git a/vp9/vp9cx.mk b/vp9/vp9cx.mk
index d3076a3..c75fd8a 100644
--- a/vp9/vp9cx.mk
+++ b/vp9/vp9cx.mk
@@ -31,6 +31,8 @@
VP9_CX_SRCS-yes += encoder/vp9_encodeframe.h
VP9_CX_SRCS-yes += encoder/vp9_encodemb.c
VP9_CX_SRCS-yes += encoder/vp9_encodemv.c
+VP9_CX_SRCS-yes += encoder/vp9_ethread.h
+VP9_CX_SRCS-yes += encoder/vp9_ethread.c
VP9_CX_SRCS-yes += encoder/vp9_extend.c
VP9_CX_SRCS-yes += encoder/vp9_firstpass.c
VP9_CX_SRCS-yes += encoder/vp9_block.h
diff --git a/vpx/vp8cx.h b/vpx/vp8cx.h
index 0a45dbb..4fc0fd6 100644
--- a/vpx/vp8cx.h
+++ b/vpx/vp8cx.h
@@ -193,6 +193,7 @@
*
*/
VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ VP8E_SET_FRAME_FLAGS, /**< control function to set reference and update frame flags */
/*!\brief Max data rate for Inter frames
*
@@ -222,6 +223,17 @@
*/
VP8E_SET_GF_CBR_BOOST_PCT,
+ /*!\brief Codec control function to set the temporal layer id
+ *
+ * For temporal scalability: this control allows the application to set the
+ * layer id for each frame to be encoded. Note that this control must be set
+ * for every frame prior to encoding. The usage of this control function
+ * supersedes the internal temporal pattern counter, which is now deprecated.
+ */
+ VP8E_SET_TEMPORAL_LAYER_ID,
+
+ VP8E_SET_SCREEN_CONTENT_MODE, /**<control function to set encoder screen content mode */
+
/* TODO(jkoleszar): Move to vp9cx.h */
VP9E_SET_LOSSLESS,
VP9E_SET_TILE_COLUMNS,
@@ -362,6 +374,8 @@
VPX_CTRL_USE_TYPE_DEPRECATED(VP8E_UPD_REFERENCE, int)
VPX_CTRL_USE_TYPE_DEPRECATED(VP8E_USE_REFERENCE, int)
+VPX_CTRL_USE_TYPE(VP8E_SET_FRAME_FLAGS, int)
+VPX_CTRL_USE_TYPE(VP8E_SET_TEMPORAL_LAYER_ID, int)
VPX_CTRL_USE_TYPE(VP8E_SET_ROI_MAP, vpx_roi_map_t *)
VPX_CTRL_USE_TYPE(VP8E_SET_ACTIVEMAP, vpx_active_map_t *)
VPX_CTRL_USE_TYPE(VP8E_SET_SCALEMODE, vpx_scaling_mode_t *)
@@ -395,6 +409,9 @@
VPX_CTRL_USE_TYPE(VP8E_SET_MAX_INTER_BITRATE_PCT, unsigned int)
VPX_CTRL_USE_TYPE(VP8E_SET_GF_CBR_BOOST_PCT, unsigned int)
+
+VPX_CTRL_USE_TYPE(VP8E_SET_SCREEN_CONTENT_MODE, unsigned int)
+
VPX_CTRL_USE_TYPE(VP9E_SET_LOSSLESS, unsigned int)
VPX_CTRL_USE_TYPE(VP9E_SET_FRAME_PARALLEL_DECODING, unsigned int)
diff --git a/vpx_scale/vpx_scale.mk b/vpx_scale/vpx_scale.mk
index 92d4991..a49abf3 100644
--- a/vpx_scale/vpx_scale.mk
+++ b/vpx_scale/vpx_scale.mk
@@ -1,7 +1,7 @@
SCALE_SRCS-yes += vpx_scale.mk
SCALE_SRCS-yes += yv12config.h
-SCALE_SRCS-yes += vpx_scale.h
-SCALE_SRCS-yes += generic/vpx_scale.c
+SCALE_SRCS-$(CONFIG_SPATIAL_RESAMPLING) += vpx_scale.h
+SCALE_SRCS-$(CONFIG_SPATIAL_RESAMPLING) += generic/vpx_scale.c
SCALE_SRCS-yes += generic/yv12config.c
SCALE_SRCS-yes += generic/yv12extend.c
SCALE_SRCS-$(CONFIG_SPATIAL_RESAMPLING) += generic/gen_scalers.c
diff --git a/vpxenc.c b/vpxenc.c
index 2b89fc1..cbf142a 100644
--- a/vpxenc.c
+++ b/vpxenc.c
@@ -354,13 +354,16 @@
static const arg_def_t gf_cbr_boost_pct = ARG_DEF(
NULL, "gf-cbr-boost", 1, "Boost for Golden Frame in CBR mode (pct)");
+static const arg_def_t screen_content_mode = ARG_DEF(NULL, "screen-content-mode", 1,
+ "Screen content mode");
+
#if CONFIG_VP8_ENCODER
static const arg_def_t token_parts = ARG_DEF(
NULL, "token-parts", 1, "Number of token partitions to use, log2");
static const arg_def_t *vp8_args[] = {
&cpu_used, &auto_altref, &noise_sens, &sharpness, &static_thresh,
&token_parts, &arnr_maxframes, &arnr_strength, &arnr_type,
- &tune_ssim, &cq_level, &max_intra_rate_pct,
+ &tune_ssim, &cq_level, &max_intra_rate_pct, &screen_content_mode,
NULL
};
static const int vp8_arg_ctrl_map[] = {
@@ -369,6 +372,7 @@
VP8E_SET_TOKEN_PARTITIONS,
VP8E_SET_ARNR_MAXFRAMES, VP8E_SET_ARNR_STRENGTH, VP8E_SET_ARNR_TYPE,
VP8E_SET_TUNING, VP8E_SET_CQ_LEVEL, VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ VP8E_SET_SCREEN_CONTENT_MODE,
0
};
#endif