Merge "Minor bug fix in ex-intra experiment" into nextgenv2
diff --git a/test/vp10_fwd_txfm1d_test.cc b/test/vp10_fwd_txfm1d_test.cc
index a39e0ef..bcbc617 100644
--- a/test/vp10_fwd_txfm1d_test.cc
+++ b/test/vp10_fwd_txfm1d_test.cc
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "test/vp10_txfm_test.h"
#include "vp10/common/vp10_fwd_txfm1d.h"
+#include "test/vp10_txfm_test.h"
using libvpx_test::ACMRandom;
@@ -17,12 +17,14 @@
static int txfm_type_num = 2;
static TYPE_TXFM txfm_type_ls[2] = {TYPE_DCT, TYPE_ADST};
-static int txfm_size_num = 4;
-static int txfm_size_ls[4] = {4, 8, 16, 32};
+static int txfm_size_num = 5;
+static int txfm_size_ls[5] = {4, 8, 16, 32, 64};
-static TxfmFunc fwd_txfm_func_ls[2][4] = {
- {vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new},
- {vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new}};
+static TxfmFunc fwd_txfm_func_ls[2][5] = {
+ {vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new,
+ vp10_fdct64_new},
+ {vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new,
+ NULL}};
// the maximum stage number of fwd/inv 1d dct/adst txfm is 12
static int8_t cos_bit[12] = {14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14};
@@ -104,19 +106,21 @@
int max_error = 7;
const int count_test_block = 5000;
- for (int ti = 0; ti < count_test_block; ++ti) {
- for (int ni = 0; ni < txfm_size; ++ni) {
- input[ni] = rnd.Rand16() % base - rnd.Rand16() % base;
- ref_input[ni] = static_cast<double>(input[ni]);
- }
+ if (fwd_txfm_func != NULL) {
+ for (int ti = 0; ti < count_test_block; ++ti) {
+ for (int ni = 0; ni < txfm_size; ++ni) {
+ input[ni] = rnd.Rand16() % base - rnd.Rand16() % base;
+ ref_input[ni] = static_cast<double>(input[ni]);
+ }
- fwd_txfm_func(input, output, cos_bit, range_bit);
- reference_hybrid_1d(ref_input, ref_output, txfm_size, txfm_type);
+ fwd_txfm_func(input, output, cos_bit, range_bit);
+ reference_hybrid_1d(ref_input, ref_output, txfm_size, txfm_type);
- for (int ni = 0; ni < txfm_size; ++ni) {
- EXPECT_LE(
- abs(output[ni] - static_cast<int32_t>(round(ref_output[ni]))),
- max_error);
+ for (int ni = 0; ni < txfm_size; ++ni) {
+ EXPECT_LE(
+ abs(output[ni] - static_cast<int32_t>(round(ref_output[ni]))),
+ max_error);
+ }
}
}
}
diff --git a/test/vp10_fwd_txfm2d_test.cc b/test/vp10_fwd_txfm2d_test.cc
index e6416cc..d4115c9 100644
--- a/test/vp10_fwd_txfm2d_test.cc
+++ b/test/vp10_fwd_txfm2d_test.cc
@@ -8,9 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <math.h>
#include <stdio.h>
#include <stdlib.h>
-#include <math.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
@@ -23,21 +23,22 @@
namespace {
-const int txfm_size_num = 4;
-const int txfm_size_ls[4] = {4, 8, 16, 32};
-const TXFM_2D_CFG fwd_txfm_cfg_ls[4][4] = {
- {fwd_txfm_2d_cfg_dct_dct_4, fwd_txfm_2d_cfg_dct_adst_4,
- fwd_txfm_2d_cfg_adst_adst_4, fwd_txfm_2d_cfg_adst_dct_4},
- {fwd_txfm_2d_cfg_dct_dct_8, fwd_txfm_2d_cfg_dct_adst_8,
- fwd_txfm_2d_cfg_adst_adst_8, fwd_txfm_2d_cfg_adst_dct_8},
- {fwd_txfm_2d_cfg_dct_dct_16, fwd_txfm_2d_cfg_dct_adst_16,
- fwd_txfm_2d_cfg_adst_adst_16, fwd_txfm_2d_cfg_adst_dct_16},
- {fwd_txfm_2d_cfg_dct_dct_32, fwd_txfm_2d_cfg_dct_adst_32,
- fwd_txfm_2d_cfg_adst_adst_32, fwd_txfm_2d_cfg_adst_dct_32}};
+const int txfm_size_num = 5;
+const int txfm_size_ls[5] = {4, 8, 16, 32, 64};
+const TXFM_2D_CFG* fwd_txfm_cfg_ls[5][4] = {
+ {&fwd_txfm_2d_cfg_dct_dct_4, &fwd_txfm_2d_cfg_dct_adst_4,
+ &fwd_txfm_2d_cfg_adst_adst_4, &fwd_txfm_2d_cfg_adst_dct_4},
+ {&fwd_txfm_2d_cfg_dct_dct_8, &fwd_txfm_2d_cfg_dct_adst_8,
+ &fwd_txfm_2d_cfg_adst_adst_8, &fwd_txfm_2d_cfg_adst_dct_8},
+ {&fwd_txfm_2d_cfg_dct_dct_16, &fwd_txfm_2d_cfg_dct_adst_16,
+ &fwd_txfm_2d_cfg_adst_adst_16, &fwd_txfm_2d_cfg_adst_dct_16},
+ {&fwd_txfm_2d_cfg_dct_dct_32, &fwd_txfm_2d_cfg_dct_adst_32,
+ &fwd_txfm_2d_cfg_adst_adst_32, &fwd_txfm_2d_cfg_adst_dct_32},
+ {&fwd_txfm_2d_cfg_dct_dct_64, NULL, NULL, NULL}};
-const Fwd_Txfm2d_Func fwd_txfm_func_ls[4] = {
+const Fwd_Txfm2d_Func fwd_txfm_func_ls[5] = {
vp10_fwd_txfm2d_4x4, vp10_fwd_txfm2d_8x8, vp10_fwd_txfm2d_16x16,
- vp10_fwd_txfm2d_32x32};
+ vp10_fwd_txfm2d_32x32, vp10_fwd_txfm2d_64x64};
const int txfm_type_num = 4;
const TYPE_TXFM type_ls_0[4] = {TYPE_DCT, TYPE_DCT, TYPE_ADST, TYPE_ADST};
@@ -54,44 +55,48 @@
for (int txfm_type_idx = 0; txfm_type_idx < txfm_type_num;
++txfm_type_idx) {
- TXFM_2D_CFG fwd_txfm_cfg = fwd_txfm_cfg_ls[txfm_size_idx][txfm_type_idx];
- Fwd_Txfm2d_Func fwd_txfm_func = fwd_txfm_func_ls[txfm_size_idx];
- TYPE_TXFM type0 = type_ls_0[txfm_type_idx];
- TYPE_TXFM type1 = type_ls_1[txfm_type_idx];
- int amplify_bit =
- fwd_txfm_cfg.shift[0] + fwd_txfm_cfg.shift[1] + fwd_txfm_cfg.shift[2];
- double amplify_factor =
- amplify_bit >= 0 ? (1 << amplify_bit) : (1.0 / (1 << -amplify_bit));
+ const TXFM_2D_CFG* fwd_txfm_cfg =
+ fwd_txfm_cfg_ls[txfm_size_idx][txfm_type_idx];
+ if (fwd_txfm_cfg != NULL) {
+ Fwd_Txfm2d_Func fwd_txfm_func = fwd_txfm_func_ls[txfm_size_idx];
+ TYPE_TXFM type0 = type_ls_0[txfm_type_idx];
+ TYPE_TXFM type1 = type_ls_1[txfm_type_idx];
+ int amplify_bit = fwd_txfm_cfg->shift[0] + fwd_txfm_cfg->shift[1] +
+ fwd_txfm_cfg->shift[2];
+ double amplify_factor =
+ amplify_bit >= 0 ? (1 << amplify_bit) : (1.0 / (1 << -amplify_bit));
- ACMRandom rnd(ACMRandom::DeterministicSeed());
- int count = 5000;
- double avg_abs_error = 0;
- for (int ci = 0; ci < count; ci++) {
- for (int ni = 0; ni < sqr_txfm_size; ++ni) {
- input[ni] = rnd.Rand16() % base;
- ref_input[ni] = static_cast<double>(input[ni]);
- output[ni] = 0;
- ref_output[ni] = 0;
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int count = 500;
+ double avg_abs_error = 0;
+ for (int ci = 0; ci < count; ci++) {
+ for (int ni = 0; ni < sqr_txfm_size; ++ni) {
+ input[ni] = rnd.Rand16() % base;
+ ref_input[ni] = static_cast<double>(input[ni]);
+ output[ni] = 0;
+ ref_output[ni] = 0;
+ }
+
+ fwd_txfm_func(input, output, txfm_size, fwd_txfm_cfg, bd);
+ reference_hybrid_2d(ref_input, ref_output, txfm_size, type0, type1);
+
+ for (int ni = 0; ni < sqr_txfm_size; ++ni) {
+ ref_output[ni] = round(ref_output[ni] * amplify_factor);
+ EXPECT_LE(fabs(output[ni] - ref_output[ni]) / amplify_factor, 60);
+ }
+ avg_abs_error += compute_avg_abs_error<int32_t, double>(
+ output, ref_output, sqr_txfm_size);
}
- fwd_txfm_func(input, output, txfm_size, &fwd_txfm_cfg, bd);
- reference_hybrid_2d(ref_input, ref_output, txfm_size, type0, type1);
-
- for (int ni = 0; ni < sqr_txfm_size; ++ni) {
- ref_output[ni] = round(ref_output[ni] * amplify_factor);
- EXPECT_LE(fabs(output[ni] - ref_output[ni]) / amplify_factor, 30);
- }
- avg_abs_error += compute_avg_abs_error<int32_t, double>(
- output, ref_output, sqr_txfm_size);
+ avg_abs_error /= amplify_factor;
+ avg_abs_error /= count;
+ // max_abs_avg_error comes from upper bound of avg_abs_error
+ // printf("type0: %d type1: %d txfm_size: %d accuracy_avg_abs_error:
+ // %f\n",
+ // type0, type1, txfm_size, avg_abs_error);
+ double max_abs_avg_error = 5;
+ EXPECT_LE(avg_abs_error, max_abs_avg_error);
}
-
- avg_abs_error /= amplify_factor;
- avg_abs_error /= count;
- // max_abs_avg_error comes from upper bound of avg_abs_error
- // printf("type0: %d type1: %d txfm_size: %d accuracy_avg_abs_error:
- // %f\n", type0, type1, txfm_size, avg_abs_error);
- double max_abs_avg_error = 1.5;
- EXPECT_LE(avg_abs_error, max_abs_avg_error);
}
delete[] input;
diff --git a/test/vp10_inv_txfm1d_test.cc b/test/vp10_inv_txfm1d_test.cc
index 3b716c8..2e9e58d 100644
--- a/test/vp10_inv_txfm1d_test.cc
+++ b/test/vp10_inv_txfm1d_test.cc
@@ -16,16 +16,20 @@
namespace {
static int txfm_type_num = 2;
-static int txfm_size_num = 4;
-static int txfm_size_ls[4] = {4, 8, 16, 32};
+static int txfm_size_num = 5;
+static int txfm_size_ls[5] = {4, 8, 16, 32, 64};
-static TxfmFunc fwd_txfm_func_ls[2][4] = {
- {vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new},
- {vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new}};
+static TxfmFunc fwd_txfm_func_ls[2][5] = {
+ {vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new,
+ vp10_fdct64_new},
+ {vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new,
+ NULL}};
-static TxfmFunc inv_txfm_func_ls[2][4] = {
- {vp10_idct4_new, vp10_idct8_new, vp10_idct16_new, vp10_idct32_new},
- {vp10_iadst4_new, vp10_iadst8_new, vp10_iadst16_new, vp10_iadst32_new}};
+static TxfmFunc inv_txfm_func_ls[2][5] = {
+ {vp10_idct4_new, vp10_idct8_new, vp10_idct16_new, vp10_idct32_new,
+ vp10_idct64_new},
+ {vp10_iadst4_new, vp10_iadst8_new, vp10_iadst16_new, vp10_iadst32_new,
+ NULL}};
// the maximum stage number of fwd/inv 1d dct/adst txfm is 12
static int8_t cos_bit[12] = {14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14};
@@ -44,19 +48,22 @@
TxfmFunc inv_txfm_func = inv_txfm_func_ls[ti][si];
int max_error = 2;
- const int count_test_block = 5000;
- for (int ci = 0; ci < count_test_block; ++ci) {
- for (int ni = 0; ni < txfm_size; ++ni) {
- input[ni] = rnd.Rand16() % base - rnd.Rand16() % base;
- }
+ if (fwd_txfm_func != NULL) {
+ const int count_test_block = 5000;
+ for (int ci = 0; ci < count_test_block; ++ci) {
+ for (int ni = 0; ni < txfm_size; ++ni) {
+ input[ni] = rnd.Rand16() % base - rnd.Rand16() % base;
+ }
- fwd_txfm_func(input, output, cos_bit, range_bit);
- inv_txfm_func(output, round_trip_output, cos_bit, range_bit);
+ fwd_txfm_func(input, output, cos_bit, range_bit);
+ inv_txfm_func(output, round_trip_output, cos_bit, range_bit);
- for (int ni = 0; ni < txfm_size; ++ni) {
- EXPECT_LE(abs(input[ni] - round_shift(round_trip_output[ni],
- get_max_bit(txfm_size) - 1)),
- max_error);
+ for (int ni = 0; ni < txfm_size; ++ni) {
+ int node_err =
+ abs(input[ni] - round_shift(round_trip_output[ni],
+ get_max_bit(txfm_size) - 1));
+ EXPECT_LE(node_err, max_error);
+ }
}
}
}
diff --git a/test/vp10_inv_txfm2d_test.cc b/test/vp10_inv_txfm2d_test.cc
index 603821e..b0e6af5 100644
--- a/test/vp10_inv_txfm2d_test.cc
+++ b/test/vp10_inv_txfm2d_test.cc
@@ -25,34 +25,36 @@
namespace {
-const int txfm_size_num = 4;
-const int txfm_size_ls[4] = {4, 8, 16, 32};
-const TXFM_2D_CFG fwd_txfm_cfg_ls[4][4] = {
- {fwd_txfm_2d_cfg_dct_dct_4, fwd_txfm_2d_cfg_dct_adst_4,
- fwd_txfm_2d_cfg_adst_adst_4, fwd_txfm_2d_cfg_adst_dct_4},
- {fwd_txfm_2d_cfg_dct_dct_8, fwd_txfm_2d_cfg_dct_adst_8,
- fwd_txfm_2d_cfg_adst_adst_8, fwd_txfm_2d_cfg_adst_dct_8},
- {fwd_txfm_2d_cfg_dct_dct_16, fwd_txfm_2d_cfg_dct_adst_16,
- fwd_txfm_2d_cfg_adst_adst_16, fwd_txfm_2d_cfg_adst_dct_16},
- {fwd_txfm_2d_cfg_dct_dct_32, fwd_txfm_2d_cfg_dct_adst_32,
- fwd_txfm_2d_cfg_adst_adst_32, fwd_txfm_2d_cfg_adst_dct_32}};
+const int txfm_size_num = 5;
+const int txfm_size_ls[5] = {4, 8, 16, 32, 64};
+const TXFM_2D_CFG* fwd_txfm_cfg_ls[5][4] = {
+ {&fwd_txfm_2d_cfg_dct_dct_4, &fwd_txfm_2d_cfg_dct_adst_4,
+ &fwd_txfm_2d_cfg_adst_adst_4, &fwd_txfm_2d_cfg_adst_dct_4},
+ {&fwd_txfm_2d_cfg_dct_dct_8, &fwd_txfm_2d_cfg_dct_adst_8,
+ &fwd_txfm_2d_cfg_adst_adst_8, &fwd_txfm_2d_cfg_adst_dct_8},
+ {&fwd_txfm_2d_cfg_dct_dct_16, &fwd_txfm_2d_cfg_dct_adst_16,
+ &fwd_txfm_2d_cfg_adst_adst_16, &fwd_txfm_2d_cfg_adst_dct_16},
+ {&fwd_txfm_2d_cfg_dct_dct_32, &fwd_txfm_2d_cfg_dct_adst_32,
+ &fwd_txfm_2d_cfg_adst_adst_32, &fwd_txfm_2d_cfg_adst_dct_32},
+ {&fwd_txfm_2d_cfg_dct_dct_64, NULL, NULL, NULL}};
-const TXFM_2D_CFG inv_txfm_cfg_ls[4][4] = {
- {inv_txfm_2d_cfg_dct_dct_4, inv_txfm_2d_cfg_dct_adst_4,
- inv_txfm_2d_cfg_adst_adst_4, inv_txfm_2d_cfg_adst_dct_4},
- {inv_txfm_2d_cfg_dct_dct_8, inv_txfm_2d_cfg_dct_adst_8,
- inv_txfm_2d_cfg_adst_adst_8, inv_txfm_2d_cfg_adst_dct_8},
- {inv_txfm_2d_cfg_dct_dct_16, inv_txfm_2d_cfg_dct_adst_16,
- inv_txfm_2d_cfg_adst_adst_16, inv_txfm_2d_cfg_adst_dct_16},
- {inv_txfm_2d_cfg_dct_dct_32, inv_txfm_2d_cfg_dct_adst_32,
- inv_txfm_2d_cfg_adst_adst_32, inv_txfm_2d_cfg_adst_dct_32}};
+const TXFM_2D_CFG* inv_txfm_cfg_ls[5][4] = {
+ {&inv_txfm_2d_cfg_dct_dct_4, &inv_txfm_2d_cfg_dct_adst_4,
+ &inv_txfm_2d_cfg_adst_adst_4, &inv_txfm_2d_cfg_adst_dct_4},
+ {&inv_txfm_2d_cfg_dct_dct_8, &inv_txfm_2d_cfg_dct_adst_8,
+ &inv_txfm_2d_cfg_adst_adst_8, &inv_txfm_2d_cfg_adst_dct_8},
+ {&inv_txfm_2d_cfg_dct_dct_16, &inv_txfm_2d_cfg_dct_adst_16,
+ &inv_txfm_2d_cfg_adst_adst_16, &inv_txfm_2d_cfg_adst_dct_16},
+ {&inv_txfm_2d_cfg_dct_dct_32, &inv_txfm_2d_cfg_dct_adst_32,
+ &inv_txfm_2d_cfg_adst_adst_32, &inv_txfm_2d_cfg_adst_dct_32},
+ {&inv_txfm_2d_cfg_dct_dct_64, NULL, NULL, NULL}};
-const Fwd_Txfm2d_Func fwd_txfm_func_ls[4] = {
+const Fwd_Txfm2d_Func fwd_txfm_func_ls[5] = {
vp10_fwd_txfm2d_4x4, vp10_fwd_txfm2d_8x8, vp10_fwd_txfm2d_16x16,
- vp10_fwd_txfm2d_32x32};
-const Inv_Txfm2d_Func inv_txfm_func_ls[4] = {
+ vp10_fwd_txfm2d_32x32, vp10_fwd_txfm2d_64x64};
+const Inv_Txfm2d_Func inv_txfm_func_ls[5] = {
vp10_inv_txfm2d_add_4x4, vp10_inv_txfm2d_add_8x8, vp10_inv_txfm2d_add_16x16,
- vp10_inv_txfm2d_add_32x32};
+ vp10_inv_txfm2d_add_32x32, vp10_inv_txfm2d_add_64x64};
const int txfm_type_num = 4;
@@ -66,44 +68,46 @@
for (int txfm_type_idx = 0; txfm_type_idx < txfm_type_num;
++txfm_type_idx) {
- const TXFM_2D_CFG fwd_txfm_cfg =
+ const TXFM_2D_CFG* fwd_txfm_cfg =
fwd_txfm_cfg_ls[txfm_size_idx][txfm_type_idx];
- const TXFM_2D_CFG inv_txfm_cfg =
+ const TXFM_2D_CFG* inv_txfm_cfg =
inv_txfm_cfg_ls[txfm_size_idx][txfm_type_idx];
- const Fwd_Txfm2d_Func fwd_txfm_func = fwd_txfm_func_ls[txfm_size_idx];
- const Inv_Txfm2d_Func inv_txfm_func = inv_txfm_func_ls[txfm_size_idx];
- const int count = 5000;
- double avg_abs_error = 0;
- ACMRandom rnd(ACMRandom::DeterministicSeed());
- for (int ci = 0; ci < count; ci++) {
- for (int ni = 0; ni < sqr_txfm_size; ++ni) {
- if (ci == 0) {
- int extreme_input = base - 1;
- input[ni] = extreme_input; // extreme case
- ref_input[ni] = 0;
- } else {
- input[ni] = rnd.Rand16() % base;
- ref_input[ni] = 0;
+ if (fwd_txfm_cfg != NULL) {
+ const Fwd_Txfm2d_Func fwd_txfm_func = fwd_txfm_func_ls[txfm_size_idx];
+ const Inv_Txfm2d_Func inv_txfm_func = inv_txfm_func_ls[txfm_size_idx];
+ const int count = 1000;
+ double avg_abs_error = 0;
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ for (int ci = 0; ci < count; ci++) {
+ for (int ni = 0; ni < sqr_txfm_size; ++ni) {
+ if (ci == 0) {
+ int extreme_input = base - 1;
+ input[ni] = extreme_input; // extreme case
+ ref_input[ni] = 0;
+ } else {
+ input[ni] = rnd.Rand16() % base;
+ ref_input[ni] = 0;
+ }
}
+
+ fwd_txfm_func(input, output, txfm_size, fwd_txfm_cfg, bd);
+ inv_txfm_func(output, ref_input, txfm_size, inv_txfm_cfg, bd);
+
+ for (int ni = 0; ni < sqr_txfm_size; ++ni) {
+ EXPECT_LE(abs(input[ni] - ref_input[ni]), 2);
+ }
+ avg_abs_error += compute_avg_abs_error<int16_t, uint16_t>(
+ input, ref_input, sqr_txfm_size);
}
- fwd_txfm_func(input, output, txfm_size, &fwd_txfm_cfg, bd);
- inv_txfm_func(output, ref_input, txfm_size, &inv_txfm_cfg, bd);
-
- for (int ni = 0; ni < sqr_txfm_size; ++ni) {
- EXPECT_LE(abs(input[ni] - ref_input[ni]), 2);
- }
- avg_abs_error += compute_avg_abs_error<int16_t, uint16_t>(
- input, ref_input, sqr_txfm_size);
+ avg_abs_error /= count;
+ // max_abs_avg_error comes from upper bound of
+ // printf("txfm_size: %d accuracy_avg_abs_error: %f\n",
+ // txfm_size, avg_abs_error);
+ // TODO(angiebird): this upper bound is from adst_adst_8
+ const double max_abs_avg_error = 0.024;
+ EXPECT_LE(avg_abs_error, max_abs_avg_error);
}
-
- avg_abs_error /= count;
- // max_abs_avg_error comes from upper bound of
- // printf("txfm_size: %d accuracy_avg_abs_error: %f\n", txfm_size,
- // avg_abs_error);
- // TODO(angiebird): this upper bound is from adst_adst_8
- const double max_abs_avg_error = 0.024;
- EXPECT_LE(avg_abs_error, max_abs_avg_error);
}
delete[] input;
diff --git a/vp10/common/alloccommon.c b/vp10/common/alloccommon.c
index e14aee7..b3c216e 100644
--- a/vp10/common/alloccommon.c
+++ b/vp10/common/alloccommon.c
@@ -97,10 +97,13 @@
}
void vp10_free_context_buffers(VP10_COMMON *cm) {
+ int i;
cm->free_mi(cm);
free_seg_map(cm);
- vpx_free(cm->above_context);
- cm->above_context = NULL;
+ for (i = 0 ; i < MAX_MB_PLANE ; i++) {
+ vpx_free(cm->above_context[i]);
+ cm->above_context[i] = NULL;
+ }
vpx_free(cm->above_seg_context);
cm->above_seg_context = NULL;
#if CONFIG_VAR_TX
@@ -128,11 +131,14 @@
}
if (cm->above_context_alloc_cols < cm->mi_cols) {
- vpx_free(cm->above_context);
- cm->above_context = (ENTROPY_CONTEXT *)vpx_calloc(
- 2 * mi_cols_aligned_to_sb(cm->mi_cols) * MAX_MB_PLANE,
- sizeof(*cm->above_context));
- if (!cm->above_context) goto fail;
+ int i;
+ for (i = 0 ; i < MAX_MB_PLANE ; i++) {
+ vpx_free(cm->above_context[i]);
+ cm->above_context[i] = (ENTROPY_CONTEXT *)vpx_calloc(
+ 2 * mi_cols_aligned_to_sb(cm->mi_cols),
+ sizeof(*cm->above_context[0]));
+ if (!cm->above_context[i]) goto fail;
+ }
vpx_free(cm->above_seg_context);
cm->above_seg_context = (PARTITION_CONTEXT *)vpx_calloc(
diff --git a/vp10/common/onyxc_int.h b/vp10/common/onyxc_int.h
index bcc69f3..26ae569 100644
--- a/vp10/common/onyxc_int.h
+++ b/vp10/common/onyxc_int.h
@@ -313,7 +313,7 @@
BufferPool *buffer_pool;
PARTITION_CONTEXT *above_seg_context;
- ENTROPY_CONTEXT *above_context;
+ ENTROPY_CONTEXT *above_context[MAX_MB_PLANE];
#if CONFIG_VAR_TX
TXFM_CONTEXT *above_txfm_context;
TXFM_CONTEXT left_txfm_context[8];
@@ -405,9 +405,7 @@
for (i = 0; i < MAX_MB_PLANE; ++i) {
xd->plane[i].dqcoeff = dqcoeff;
- xd->above_context[i] = cm->above_context +
- i * sizeof(*cm->above_context) * 2 * mi_cols_aligned_to_sb(cm->mi_cols);
-
+ xd->above_context[i] = cm->above_context[i];
if (xd->plane[i].plane_type == PLANE_TYPE_Y) {
memcpy(xd->plane[i].seg_dequant, cm->y_dequant, sizeof(cm->y_dequant));
} else {
@@ -525,6 +523,27 @@
return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
}
+static INLINE void vp10_zero_above_context(VP10_COMMON *const cm,
+ int mi_col_start, int mi_col_end) {
+ const int width = mi_col_end - mi_col_start;
+ int i;
+
+ for (i = 0 ; i < MAX_MB_PLANE ; i++)
+ vp10_zero_array(cm->above_context[i] + 2 * mi_col_start, 2 * width);
+ vp10_zero_array(cm->above_seg_context + mi_col_start, width);
+#if CONFIG_VAR_TX
+ vp10_zero_array(cm->above_txfm_context + mi_col_start, width);
+#endif // CONFIG_VAR_TX
+}
+
+static INLINE void vp10_zero_left_context(MACROBLOCKD *const xd) {
+ vp10_zero(xd->left_context);
+ vp10_zero(xd->left_seg_context);
+#if CONFIG_VAR_TX
+ vp10_zero(xd->left_txfm_context_buffer);
+#endif
+}
+
#if CONFIG_VAR_TX
static INLINE void set_txfm_ctx(TXFM_CONTEXT *txfm_ctx,
TX_SIZE tx_size,
diff --git a/vp10/common/vp10_fwd_txfm1d.c b/vp10/common/vp10_fwd_txfm1d.c
index f3da5c9..ef24362 100644
--- a/vp10/common/vp10_fwd_txfm1d.c
+++ b/vp10/common/vp10_fwd_txfm1d.c
@@ -15,8 +15,8 @@
{ \
int i, j; \
for (i = 0; i < size; ++i) { \
- int buf_bit = get_max_bit(abs(buf[i])) + 1; \
- if (buf_bit > bit) { \
+ int buf_bit = get_max_bit(abs(buf[i])) + 1; \
+ if (buf_bit > bit) { \
printf("======== %s overflow ========\n", __func__); \
printf("stage: %d node: %d\n", stage, i); \
printf("bit: %d buf_bit: %d buf[i]: %d\n", bit, buf_bit, buf[i]); \
@@ -32,11 +32,11 @@
#else
#define range_check(stage, input, buf, size, bit) \
{ \
- (void) stage; \
- (void) input; \
- (void) buf; \
- (void) size; \
- (void) bit; \
+ (void)stage; \
+ (void)input; \
+ (void)buf; \
+ (void)size; \
+ (void)bit; \
}
#endif
@@ -1092,7 +1092,6 @@
bf1[14] = bf0[9];
bf1[15] = -bf0[1];
range_check(stage, input, bf1, size, stage_range[stage]);
-
}
void vp10_fadst32_new(const int32_t *input, int32_t *output,
@@ -1529,3 +1528,796 @@
bf1[31] = -bf0[1];
range_check(stage, input, bf1, size, stage_range[stage]);
}
+
+void vp10_fdct64_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
+ const int32_t size = 64;
+ const int32_t *cospi;
+
+ int32_t stage = 0;
+ int32_t *bf0, *bf1;
+ int32_t step[64];
+
+ // stage 0;
+ range_check(stage, input, input, size, stage_range[stage]);
+
+ // stage 1;
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf1 = output;
+ bf1[0] = input[0] + input[63];
+ bf1[1] = input[1] + input[62];
+ bf1[2] = input[2] + input[61];
+ bf1[3] = input[3] + input[60];
+ bf1[4] = input[4] + input[59];
+ bf1[5] = input[5] + input[58];
+ bf1[6] = input[6] + input[57];
+ bf1[7] = input[7] + input[56];
+ bf1[8] = input[8] + input[55];
+ bf1[9] = input[9] + input[54];
+ bf1[10] = input[10] + input[53];
+ bf1[11] = input[11] + input[52];
+ bf1[12] = input[12] + input[51];
+ bf1[13] = input[13] + input[50];
+ bf1[14] = input[14] + input[49];
+ bf1[15] = input[15] + input[48];
+ bf1[16] = input[16] + input[47];
+ bf1[17] = input[17] + input[46];
+ bf1[18] = input[18] + input[45];
+ bf1[19] = input[19] + input[44];
+ bf1[20] = input[20] + input[43];
+ bf1[21] = input[21] + input[42];
+ bf1[22] = input[22] + input[41];
+ bf1[23] = input[23] + input[40];
+ bf1[24] = input[24] + input[39];
+ bf1[25] = input[25] + input[38];
+ bf1[26] = input[26] + input[37];
+ bf1[27] = input[27] + input[36];
+ bf1[28] = input[28] + input[35];
+ bf1[29] = input[29] + input[34];
+ bf1[30] = input[30] + input[33];
+ bf1[31] = input[31] + input[32];
+ bf1[32] = -input[32] + input[31];
+ bf1[33] = -input[33] + input[30];
+ bf1[34] = -input[34] + input[29];
+ bf1[35] = -input[35] + input[28];
+ bf1[36] = -input[36] + input[27];
+ bf1[37] = -input[37] + input[26];
+ bf1[38] = -input[38] + input[25];
+ bf1[39] = -input[39] + input[24];
+ bf1[40] = -input[40] + input[23];
+ bf1[41] = -input[41] + input[22];
+ bf1[42] = -input[42] + input[21];
+ bf1[43] = -input[43] + input[20];
+ bf1[44] = -input[44] + input[19];
+ bf1[45] = -input[45] + input[18];
+ bf1[46] = -input[46] + input[17];
+ bf1[47] = -input[47] + input[16];
+ bf1[48] = -input[48] + input[15];
+ bf1[49] = -input[49] + input[14];
+ bf1[50] = -input[50] + input[13];
+ bf1[51] = -input[51] + input[12];
+ bf1[52] = -input[52] + input[11];
+ bf1[53] = -input[53] + input[10];
+ bf1[54] = -input[54] + input[9];
+ bf1[55] = -input[55] + input[8];
+ bf1[56] = -input[56] + input[7];
+ bf1[57] = -input[57] + input[6];
+ bf1[58] = -input[58] + input[5];
+ bf1[59] = -input[59] + input[4];
+ bf1[60] = -input[60] + input[3];
+ bf1[61] = -input[61] + input[2];
+ bf1[62] = -input[62] + input[1];
+ bf1[63] = -input[63] + input[0];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 2
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = bf0[0] + bf0[31];
+ bf1[1] = bf0[1] + bf0[30];
+ bf1[2] = bf0[2] + bf0[29];
+ bf1[3] = bf0[3] + bf0[28];
+ bf1[4] = bf0[4] + bf0[27];
+ bf1[5] = bf0[5] + bf0[26];
+ bf1[6] = bf0[6] + bf0[25];
+ bf1[7] = bf0[7] + bf0[24];
+ bf1[8] = bf0[8] + bf0[23];
+ bf1[9] = bf0[9] + bf0[22];
+ bf1[10] = bf0[10] + bf0[21];
+ bf1[11] = bf0[11] + bf0[20];
+ bf1[12] = bf0[12] + bf0[19];
+ bf1[13] = bf0[13] + bf0[18];
+ bf1[14] = bf0[14] + bf0[17];
+ bf1[15] = bf0[15] + bf0[16];
+ bf1[16] = -bf0[16] + bf0[15];
+ bf1[17] = -bf0[17] + bf0[14];
+ bf1[18] = -bf0[18] + bf0[13];
+ bf1[19] = -bf0[19] + bf0[12];
+ bf1[20] = -bf0[20] + bf0[11];
+ bf1[21] = -bf0[21] + bf0[10];
+ bf1[22] = -bf0[22] + bf0[9];
+ bf1[23] = -bf0[23] + bf0[8];
+ bf1[24] = -bf0[24] + bf0[7];
+ bf1[25] = -bf0[25] + bf0[6];
+ bf1[26] = -bf0[26] + bf0[5];
+ bf1[27] = -bf0[27] + bf0[4];
+ bf1[28] = -bf0[28] + bf0[3];
+ bf1[29] = -bf0[29] + bf0[2];
+ bf1[30] = -bf0[30] + bf0[1];
+ bf1[31] = -bf0[31] + bf0[0];
+ bf1[32] = bf0[32];
+ bf1[33] = bf0[33];
+ bf1[34] = bf0[34];
+ bf1[35] = bf0[35];
+ bf1[36] = bf0[36];
+ bf1[37] = bf0[37];
+ bf1[38] = bf0[38];
+ bf1[39] = bf0[39];
+ bf1[40] = half_btf(-cospi[32], bf0[40], cospi[32], bf0[55], cos_bit[stage]);
+ bf1[41] = half_btf(-cospi[32], bf0[41], cospi[32], bf0[54], cos_bit[stage]);
+ bf1[42] = half_btf(-cospi[32], bf0[42], cospi[32], bf0[53], cos_bit[stage]);
+ bf1[43] = half_btf(-cospi[32], bf0[43], cospi[32], bf0[52], cos_bit[stage]);
+ bf1[44] = half_btf(-cospi[32], bf0[44], cospi[32], bf0[51], cos_bit[stage]);
+ bf1[45] = half_btf(-cospi[32], bf0[45], cospi[32], bf0[50], cos_bit[stage]);
+ bf1[46] = half_btf(-cospi[32], bf0[46], cospi[32], bf0[49], cos_bit[stage]);
+ bf1[47] = half_btf(-cospi[32], bf0[47], cospi[32], bf0[48], cos_bit[stage]);
+ bf1[48] = half_btf(cospi[32], bf0[48], cospi[32], bf0[47], cos_bit[stage]);
+ bf1[49] = half_btf(cospi[32], bf0[49], cospi[32], bf0[46], cos_bit[stage]);
+ bf1[50] = half_btf(cospi[32], bf0[50], cospi[32], bf0[45], cos_bit[stage]);
+ bf1[51] = half_btf(cospi[32], bf0[51], cospi[32], bf0[44], cos_bit[stage]);
+ bf1[52] = half_btf(cospi[32], bf0[52], cospi[32], bf0[43], cos_bit[stage]);
+ bf1[53] = half_btf(cospi[32], bf0[53], cospi[32], bf0[42], cos_bit[stage]);
+ bf1[54] = half_btf(cospi[32], bf0[54], cospi[32], bf0[41], cos_bit[stage]);
+ bf1[55] = half_btf(cospi[32], bf0[55], cospi[32], bf0[40], cos_bit[stage]);
+ bf1[56] = bf0[56];
+ bf1[57] = bf0[57];
+ bf1[58] = bf0[58];
+ bf1[59] = bf0[59];
+ bf1[60] = bf0[60];
+ bf1[61] = bf0[61];
+ bf1[62] = bf0[62];
+ bf1[63] = bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 3
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0] + bf0[15];
+ bf1[1] = bf0[1] + bf0[14];
+ bf1[2] = bf0[2] + bf0[13];
+ bf1[3] = bf0[3] + bf0[12];
+ bf1[4] = bf0[4] + bf0[11];
+ bf1[5] = bf0[5] + bf0[10];
+ bf1[6] = bf0[6] + bf0[9];
+ bf1[7] = bf0[7] + bf0[8];
+ bf1[8] = -bf0[8] + bf0[7];
+ bf1[9] = -bf0[9] + bf0[6];
+ bf1[10] = -bf0[10] + bf0[5];
+ bf1[11] = -bf0[11] + bf0[4];
+ bf1[12] = -bf0[12] + bf0[3];
+ bf1[13] = -bf0[13] + bf0[2];
+ bf1[14] = -bf0[14] + bf0[1];
+ bf1[15] = -bf0[15] + bf0[0];
+ bf1[16] = bf0[16];
+ bf1[17] = bf0[17];
+ bf1[18] = bf0[18];
+ bf1[19] = bf0[19];
+ bf1[20] = half_btf(-cospi[32], bf0[20], cospi[32], bf0[27], cos_bit[stage]);
+ bf1[21] = half_btf(-cospi[32], bf0[21], cospi[32], bf0[26], cos_bit[stage]);
+ bf1[22] = half_btf(-cospi[32], bf0[22], cospi[32], bf0[25], cos_bit[stage]);
+ bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[24], cos_bit[stage]);
+ bf1[24] = half_btf(cospi[32], bf0[24], cospi[32], bf0[23], cos_bit[stage]);
+ bf1[25] = half_btf(cospi[32], bf0[25], cospi[32], bf0[22], cos_bit[stage]);
+ bf1[26] = half_btf(cospi[32], bf0[26], cospi[32], bf0[21], cos_bit[stage]);
+ bf1[27] = half_btf(cospi[32], bf0[27], cospi[32], bf0[20], cos_bit[stage]);
+ bf1[28] = bf0[28];
+ bf1[29] = bf0[29];
+ bf1[30] = bf0[30];
+ bf1[31] = bf0[31];
+ bf1[32] = bf0[32] + bf0[47];
+ bf1[33] = bf0[33] + bf0[46];
+ bf1[34] = bf0[34] + bf0[45];
+ bf1[35] = bf0[35] + bf0[44];
+ bf1[36] = bf0[36] + bf0[43];
+ bf1[37] = bf0[37] + bf0[42];
+ bf1[38] = bf0[38] + bf0[41];
+ bf1[39] = bf0[39] + bf0[40];
+ bf1[40] = -bf0[40] + bf0[39];
+ bf1[41] = -bf0[41] + bf0[38];
+ bf1[42] = -bf0[42] + bf0[37];
+ bf1[43] = -bf0[43] + bf0[36];
+ bf1[44] = -bf0[44] + bf0[35];
+ bf1[45] = -bf0[45] + bf0[34];
+ bf1[46] = -bf0[46] + bf0[33];
+ bf1[47] = -bf0[47] + bf0[32];
+ bf1[48] = -bf0[48] + bf0[63];
+ bf1[49] = -bf0[49] + bf0[62];
+ bf1[50] = -bf0[50] + bf0[61];
+ bf1[51] = -bf0[51] + bf0[60];
+ bf1[52] = -bf0[52] + bf0[59];
+ bf1[53] = -bf0[53] + bf0[58];
+ bf1[54] = -bf0[54] + bf0[57];
+ bf1[55] = -bf0[55] + bf0[56];
+ bf1[56] = bf0[56] + bf0[55];
+ bf1[57] = bf0[57] + bf0[54];
+ bf1[58] = bf0[58] + bf0[53];
+ bf1[59] = bf0[59] + bf0[52];
+ bf1[60] = bf0[60] + bf0[51];
+ bf1[61] = bf0[61] + bf0[50];
+ bf1[62] = bf0[62] + bf0[49];
+ bf1[63] = bf0[63] + bf0[48];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 4
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = bf0[0] + bf0[7];
+ bf1[1] = bf0[1] + bf0[6];
+ bf1[2] = bf0[2] + bf0[5];
+ bf1[3] = bf0[3] + bf0[4];
+ bf1[4] = -bf0[4] + bf0[3];
+ bf1[5] = -bf0[5] + bf0[2];
+ bf1[6] = -bf0[6] + bf0[1];
+ bf1[7] = -bf0[7] + bf0[0];
+ bf1[8] = bf0[8];
+ bf1[9] = bf0[9];
+ bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit[stage]);
+ bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit[stage]);
+ bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit[stage]);
+ bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit[stage]);
+ bf1[14] = bf0[14];
+ bf1[15] = bf0[15];
+ bf1[16] = bf0[16] + bf0[23];
+ bf1[17] = bf0[17] + bf0[22];
+ bf1[18] = bf0[18] + bf0[21];
+ bf1[19] = bf0[19] + bf0[20];
+ bf1[20] = -bf0[20] + bf0[19];
+ bf1[21] = -bf0[21] + bf0[18];
+ bf1[22] = -bf0[22] + bf0[17];
+ bf1[23] = -bf0[23] + bf0[16];
+ bf1[24] = -bf0[24] + bf0[31];
+ bf1[25] = -bf0[25] + bf0[30];
+ bf1[26] = -bf0[26] + bf0[29];
+ bf1[27] = -bf0[27] + bf0[28];
+ bf1[28] = bf0[28] + bf0[27];
+ bf1[29] = bf0[29] + bf0[26];
+ bf1[30] = bf0[30] + bf0[25];
+ bf1[31] = bf0[31] + bf0[24];
+ bf1[32] = bf0[32];
+ bf1[33] = bf0[33];
+ bf1[34] = bf0[34];
+ bf1[35] = bf0[35];
+ bf1[36] = half_btf(-cospi[16], bf0[36], cospi[48], bf0[59], cos_bit[stage]);
+ bf1[37] = half_btf(-cospi[16], bf0[37], cospi[48], bf0[58], cos_bit[stage]);
+ bf1[38] = half_btf(-cospi[16], bf0[38], cospi[48], bf0[57], cos_bit[stage]);
+ bf1[39] = half_btf(-cospi[16], bf0[39], cospi[48], bf0[56], cos_bit[stage]);
+ bf1[40] = half_btf(-cospi[48], bf0[40], -cospi[16], bf0[55], cos_bit[stage]);
+ bf1[41] = half_btf(-cospi[48], bf0[41], -cospi[16], bf0[54], cos_bit[stage]);
+ bf1[42] = half_btf(-cospi[48], bf0[42], -cospi[16], bf0[53], cos_bit[stage]);
+ bf1[43] = half_btf(-cospi[48], bf0[43], -cospi[16], bf0[52], cos_bit[stage]);
+ bf1[44] = bf0[44];
+ bf1[45] = bf0[45];
+ bf1[46] = bf0[46];
+ bf1[47] = bf0[47];
+ bf1[48] = bf0[48];
+ bf1[49] = bf0[49];
+ bf1[50] = bf0[50];
+ bf1[51] = bf0[51];
+ bf1[52] = half_btf(cospi[48], bf0[52], -cospi[16], bf0[43], cos_bit[stage]);
+ bf1[53] = half_btf(cospi[48], bf0[53], -cospi[16], bf0[42], cos_bit[stage]);
+ bf1[54] = half_btf(cospi[48], bf0[54], -cospi[16], bf0[41], cos_bit[stage]);
+ bf1[55] = half_btf(cospi[48], bf0[55], -cospi[16], bf0[40], cos_bit[stage]);
+ bf1[56] = half_btf(cospi[16], bf0[56], cospi[48], bf0[39], cos_bit[stage]);
+ bf1[57] = half_btf(cospi[16], bf0[57], cospi[48], bf0[38], cos_bit[stage]);
+ bf1[58] = half_btf(cospi[16], bf0[58], cospi[48], bf0[37], cos_bit[stage]);
+ bf1[59] = half_btf(cospi[16], bf0[59], cospi[48], bf0[36], cos_bit[stage]);
+ bf1[60] = bf0[60];
+ bf1[61] = bf0[61];
+ bf1[62] = bf0[62];
+ bf1[63] = bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 5
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0] + bf0[3];
+ bf1[1] = bf0[1] + bf0[2];
+ bf1[2] = -bf0[2] + bf0[1];
+ bf1[3] = -bf0[3] + bf0[0];
+ bf1[4] = bf0[4];
+ bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit[stage]);
+ bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit[stage]);
+ bf1[7] = bf0[7];
+ bf1[8] = bf0[8] + bf0[11];
+ bf1[9] = bf0[9] + bf0[10];
+ bf1[10] = -bf0[10] + bf0[9];
+ bf1[11] = -bf0[11] + bf0[8];
+ bf1[12] = -bf0[12] + bf0[15];
+ bf1[13] = -bf0[13] + bf0[14];
+ bf1[14] = bf0[14] + bf0[13];
+ bf1[15] = bf0[15] + bf0[12];
+ bf1[16] = bf0[16];
+ bf1[17] = bf0[17];
+ bf1[18] = half_btf(-cospi[16], bf0[18], cospi[48], bf0[29], cos_bit[stage]);
+ bf1[19] = half_btf(-cospi[16], bf0[19], cospi[48], bf0[28], cos_bit[stage]);
+ bf1[20] = half_btf(-cospi[48], bf0[20], -cospi[16], bf0[27], cos_bit[stage]);
+ bf1[21] = half_btf(-cospi[48], bf0[21], -cospi[16], bf0[26], cos_bit[stage]);
+ bf1[22] = bf0[22];
+ bf1[23] = bf0[23];
+ bf1[24] = bf0[24];
+ bf1[25] = bf0[25];
+ bf1[26] = half_btf(cospi[48], bf0[26], -cospi[16], bf0[21], cos_bit[stage]);
+ bf1[27] = half_btf(cospi[48], bf0[27], -cospi[16], bf0[20], cos_bit[stage]);
+ bf1[28] = half_btf(cospi[16], bf0[28], cospi[48], bf0[19], cos_bit[stage]);
+ bf1[29] = half_btf(cospi[16], bf0[29], cospi[48], bf0[18], cos_bit[stage]);
+ bf1[30] = bf0[30];
+ bf1[31] = bf0[31];
+ bf1[32] = bf0[32] + bf0[39];
+ bf1[33] = bf0[33] + bf0[38];
+ bf1[34] = bf0[34] + bf0[37];
+ bf1[35] = bf0[35] + bf0[36];
+ bf1[36] = -bf0[36] + bf0[35];
+ bf1[37] = -bf0[37] + bf0[34];
+ bf1[38] = -bf0[38] + bf0[33];
+ bf1[39] = -bf0[39] + bf0[32];
+ bf1[40] = -bf0[40] + bf0[47];
+ bf1[41] = -bf0[41] + bf0[46];
+ bf1[42] = -bf0[42] + bf0[45];
+ bf1[43] = -bf0[43] + bf0[44];
+ bf1[44] = bf0[44] + bf0[43];
+ bf1[45] = bf0[45] + bf0[42];
+ bf1[46] = bf0[46] + bf0[41];
+ bf1[47] = bf0[47] + bf0[40];
+ bf1[48] = bf0[48] + bf0[55];
+ bf1[49] = bf0[49] + bf0[54];
+ bf1[50] = bf0[50] + bf0[53];
+ bf1[51] = bf0[51] + bf0[52];
+ bf1[52] = -bf0[52] + bf0[51];
+ bf1[53] = -bf0[53] + bf0[50];
+ bf1[54] = -bf0[54] + bf0[49];
+ bf1[55] = -bf0[55] + bf0[48];
+ bf1[56] = -bf0[56] + bf0[63];
+ bf1[57] = -bf0[57] + bf0[62];
+ bf1[58] = -bf0[58] + bf0[61];
+ bf1[59] = -bf0[59] + bf0[60];
+ bf1[60] = bf0[60] + bf0[59];
+ bf1[61] = bf0[61] + bf0[58];
+ bf1[62] = bf0[62] + bf0[57];
+ bf1[63] = bf0[63] + bf0[56];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 6
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit[stage]);
+ bf1[1] = half_btf(-cospi[32], bf0[1], cospi[32], bf0[0], cos_bit[stage]);
+ bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit[stage]);
+ bf1[3] = half_btf(cospi[48], bf0[3], -cospi[16], bf0[2], cos_bit[stage]);
+ bf1[4] = bf0[4] + bf0[5];
+ bf1[5] = -bf0[5] + bf0[4];
+ bf1[6] = -bf0[6] + bf0[7];
+ bf1[7] = bf0[7] + bf0[6];
+ bf1[8] = bf0[8];
+ bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit[stage]);
+ bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit[stage]);
+ bf1[11] = bf0[11];
+ bf1[12] = bf0[12];
+ bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit[stage]);
+ bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit[stage]);
+ bf1[15] = bf0[15];
+ bf1[16] = bf0[16] + bf0[19];
+ bf1[17] = bf0[17] + bf0[18];
+ bf1[18] = -bf0[18] + bf0[17];
+ bf1[19] = -bf0[19] + bf0[16];
+ bf1[20] = -bf0[20] + bf0[23];
+ bf1[21] = -bf0[21] + bf0[22];
+ bf1[22] = bf0[22] + bf0[21];
+ bf1[23] = bf0[23] + bf0[20];
+ bf1[24] = bf0[24] + bf0[27];
+ bf1[25] = bf0[25] + bf0[26];
+ bf1[26] = -bf0[26] + bf0[25];
+ bf1[27] = -bf0[27] + bf0[24];
+ bf1[28] = -bf0[28] + bf0[31];
+ bf1[29] = -bf0[29] + bf0[30];
+ bf1[30] = bf0[30] + bf0[29];
+ bf1[31] = bf0[31] + bf0[28];
+ bf1[32] = bf0[32];
+ bf1[33] = bf0[33];
+ bf1[34] = half_btf(-cospi[8], bf0[34], cospi[56], bf0[61], cos_bit[stage]);
+ bf1[35] = half_btf(-cospi[8], bf0[35], cospi[56], bf0[60], cos_bit[stage]);
+ bf1[36] = half_btf(-cospi[56], bf0[36], -cospi[8], bf0[59], cos_bit[stage]);
+ bf1[37] = half_btf(-cospi[56], bf0[37], -cospi[8], bf0[58], cos_bit[stage]);
+ bf1[38] = bf0[38];
+ bf1[39] = bf0[39];
+ bf1[40] = bf0[40];
+ bf1[41] = bf0[41];
+ bf1[42] = half_btf(-cospi[40], bf0[42], cospi[24], bf0[53], cos_bit[stage]);
+ bf1[43] = half_btf(-cospi[40], bf0[43], cospi[24], bf0[52], cos_bit[stage]);
+ bf1[44] = half_btf(-cospi[24], bf0[44], -cospi[40], bf0[51], cos_bit[stage]);
+ bf1[45] = half_btf(-cospi[24], bf0[45], -cospi[40], bf0[50], cos_bit[stage]);
+ bf1[46] = bf0[46];
+ bf1[47] = bf0[47];
+ bf1[48] = bf0[48];
+ bf1[49] = bf0[49];
+ bf1[50] = half_btf(cospi[24], bf0[50], -cospi[40], bf0[45], cos_bit[stage]);
+ bf1[51] = half_btf(cospi[24], bf0[51], -cospi[40], bf0[44], cos_bit[stage]);
+ bf1[52] = half_btf(cospi[40], bf0[52], cospi[24], bf0[43], cos_bit[stage]);
+ bf1[53] = half_btf(cospi[40], bf0[53], cospi[24], bf0[42], cos_bit[stage]);
+ bf1[54] = bf0[54];
+ bf1[55] = bf0[55];
+ bf1[56] = bf0[56];
+ bf1[57] = bf0[57];
+ bf1[58] = half_btf(cospi[56], bf0[58], -cospi[8], bf0[37], cos_bit[stage]);
+ bf1[59] = half_btf(cospi[56], bf0[59], -cospi[8], bf0[36], cos_bit[stage]);
+ bf1[60] = half_btf(cospi[8], bf0[60], cospi[56], bf0[35], cos_bit[stage]);
+ bf1[61] = half_btf(cospi[8], bf0[61], cospi[56], bf0[34], cos_bit[stage]);
+ bf1[62] = bf0[62];
+ bf1[63] = bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 7
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = bf0[2];
+ bf1[3] = bf0[3];
+ bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit[stage]);
+ bf1[5] = half_btf(cospi[24], bf0[5], cospi[40], bf0[6], cos_bit[stage]);
+ bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit[stage]);
+ bf1[7] = half_btf(cospi[56], bf0[7], -cospi[8], bf0[4], cos_bit[stage]);
+ bf1[8] = bf0[8] + bf0[9];
+ bf1[9] = -bf0[9] + bf0[8];
+ bf1[10] = -bf0[10] + bf0[11];
+ bf1[11] = bf0[11] + bf0[10];
+ bf1[12] = bf0[12] + bf0[13];
+ bf1[13] = -bf0[13] + bf0[12];
+ bf1[14] = -bf0[14] + bf0[15];
+ bf1[15] = bf0[15] + bf0[14];
+ bf1[16] = bf0[16];
+ bf1[17] = half_btf(-cospi[8], bf0[17], cospi[56], bf0[30], cos_bit[stage]);
+ bf1[18] = half_btf(-cospi[56], bf0[18], -cospi[8], bf0[29], cos_bit[stage]);
+ bf1[19] = bf0[19];
+ bf1[20] = bf0[20];
+ bf1[21] = half_btf(-cospi[40], bf0[21], cospi[24], bf0[26], cos_bit[stage]);
+ bf1[22] = half_btf(-cospi[24], bf0[22], -cospi[40], bf0[25], cos_bit[stage]);
+ bf1[23] = bf0[23];
+ bf1[24] = bf0[24];
+ bf1[25] = half_btf(cospi[24], bf0[25], -cospi[40], bf0[22], cos_bit[stage]);
+ bf1[26] = half_btf(cospi[40], bf0[26], cospi[24], bf0[21], cos_bit[stage]);
+ bf1[27] = bf0[27];
+ bf1[28] = bf0[28];
+ bf1[29] = half_btf(cospi[56], bf0[29], -cospi[8], bf0[18], cos_bit[stage]);
+ bf1[30] = half_btf(cospi[8], bf0[30], cospi[56], bf0[17], cos_bit[stage]);
+ bf1[31] = bf0[31];
+ bf1[32] = bf0[32] + bf0[35];
+ bf1[33] = bf0[33] + bf0[34];
+ bf1[34] = -bf0[34] + bf0[33];
+ bf1[35] = -bf0[35] + bf0[32];
+ bf1[36] = -bf0[36] + bf0[39];
+ bf1[37] = -bf0[37] + bf0[38];
+ bf1[38] = bf0[38] + bf0[37];
+ bf1[39] = bf0[39] + bf0[36];
+ bf1[40] = bf0[40] + bf0[43];
+ bf1[41] = bf0[41] + bf0[42];
+ bf1[42] = -bf0[42] + bf0[41];
+ bf1[43] = -bf0[43] + bf0[40];
+ bf1[44] = -bf0[44] + bf0[47];
+ bf1[45] = -bf0[45] + bf0[46];
+ bf1[46] = bf0[46] + bf0[45];
+ bf1[47] = bf0[47] + bf0[44];
+ bf1[48] = bf0[48] + bf0[51];
+ bf1[49] = bf0[49] + bf0[50];
+ bf1[50] = -bf0[50] + bf0[49];
+ bf1[51] = -bf0[51] + bf0[48];
+ bf1[52] = -bf0[52] + bf0[55];
+ bf1[53] = -bf0[53] + bf0[54];
+ bf1[54] = bf0[54] + bf0[53];
+ bf1[55] = bf0[55] + bf0[52];
+ bf1[56] = bf0[56] + bf0[59];
+ bf1[57] = bf0[57] + bf0[58];
+ bf1[58] = -bf0[58] + bf0[57];
+ bf1[59] = -bf0[59] + bf0[56];
+ bf1[60] = -bf0[60] + bf0[63];
+ bf1[61] = -bf0[61] + bf0[62];
+ bf1[62] = bf0[62] + bf0[61];
+ bf1[63] = bf0[63] + bf0[60];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 8
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = bf0[2];
+ bf1[3] = bf0[3];
+ bf1[4] = bf0[4];
+ bf1[5] = bf0[5];
+ bf1[6] = bf0[6];
+ bf1[7] = bf0[7];
+ bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit[stage]);
+ bf1[9] = half_btf(cospi[28], bf0[9], cospi[36], bf0[14], cos_bit[stage]);
+ bf1[10] = half_btf(cospi[44], bf0[10], cospi[20], bf0[13], cos_bit[stage]);
+ bf1[11] = half_btf(cospi[12], bf0[11], cospi[52], bf0[12], cos_bit[stage]);
+ bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit[stage]);
+ bf1[13] = half_btf(cospi[44], bf0[13], -cospi[20], bf0[10], cos_bit[stage]);
+ bf1[14] = half_btf(cospi[28], bf0[14], -cospi[36], bf0[9], cos_bit[stage]);
+ bf1[15] = half_btf(cospi[60], bf0[15], -cospi[4], bf0[8], cos_bit[stage]);
+ bf1[16] = bf0[16] + bf0[17];
+ bf1[17] = -bf0[17] + bf0[16];
+ bf1[18] = -bf0[18] + bf0[19];
+ bf1[19] = bf0[19] + bf0[18];
+ bf1[20] = bf0[20] + bf0[21];
+ bf1[21] = -bf0[21] + bf0[20];
+ bf1[22] = -bf0[22] + bf0[23];
+ bf1[23] = bf0[23] + bf0[22];
+ bf1[24] = bf0[24] + bf0[25];
+ bf1[25] = -bf0[25] + bf0[24];
+ bf1[26] = -bf0[26] + bf0[27];
+ bf1[27] = bf0[27] + bf0[26];
+ bf1[28] = bf0[28] + bf0[29];
+ bf1[29] = -bf0[29] + bf0[28];
+ bf1[30] = -bf0[30] + bf0[31];
+ bf1[31] = bf0[31] + bf0[30];
+ bf1[32] = bf0[32];
+ bf1[33] = half_btf(-cospi[4], bf0[33], cospi[60], bf0[62], cos_bit[stage]);
+ bf1[34] = half_btf(-cospi[60], bf0[34], -cospi[4], bf0[61], cos_bit[stage]);
+ bf1[35] = bf0[35];
+ bf1[36] = bf0[36];
+ bf1[37] = half_btf(-cospi[36], bf0[37], cospi[28], bf0[58], cos_bit[stage]);
+ bf1[38] = half_btf(-cospi[28], bf0[38], -cospi[36], bf0[57], cos_bit[stage]);
+ bf1[39] = bf0[39];
+ bf1[40] = bf0[40];
+ bf1[41] = half_btf(-cospi[20], bf0[41], cospi[44], bf0[54], cos_bit[stage]);
+ bf1[42] = half_btf(-cospi[44], bf0[42], -cospi[20], bf0[53], cos_bit[stage]);
+ bf1[43] = bf0[43];
+ bf1[44] = bf0[44];
+ bf1[45] = half_btf(-cospi[52], bf0[45], cospi[12], bf0[50], cos_bit[stage]);
+ bf1[46] = half_btf(-cospi[12], bf0[46], -cospi[52], bf0[49], cos_bit[stage]);
+ bf1[47] = bf0[47];
+ bf1[48] = bf0[48];
+ bf1[49] = half_btf(cospi[12], bf0[49], -cospi[52], bf0[46], cos_bit[stage]);
+ bf1[50] = half_btf(cospi[52], bf0[50], cospi[12], bf0[45], cos_bit[stage]);
+ bf1[51] = bf0[51];
+ bf1[52] = bf0[52];
+ bf1[53] = half_btf(cospi[44], bf0[53], -cospi[20], bf0[42], cos_bit[stage]);
+ bf1[54] = half_btf(cospi[20], bf0[54], cospi[44], bf0[41], cos_bit[stage]);
+ bf1[55] = bf0[55];
+ bf1[56] = bf0[56];
+ bf1[57] = half_btf(cospi[28], bf0[57], -cospi[36], bf0[38], cos_bit[stage]);
+ bf1[58] = half_btf(cospi[36], bf0[58], cospi[28], bf0[37], cos_bit[stage]);
+ bf1[59] = bf0[59];
+ bf1[60] = bf0[60];
+ bf1[61] = half_btf(cospi[60], bf0[61], -cospi[4], bf0[34], cos_bit[stage]);
+ bf1[62] = half_btf(cospi[4], bf0[62], cospi[60], bf0[33], cos_bit[stage]);
+ bf1[63] = bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 9
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = bf0[2];
+ bf1[3] = bf0[3];
+ bf1[4] = bf0[4];
+ bf1[5] = bf0[5];
+ bf1[6] = bf0[6];
+ bf1[7] = bf0[7];
+ bf1[8] = bf0[8];
+ bf1[9] = bf0[9];
+ bf1[10] = bf0[10];
+ bf1[11] = bf0[11];
+ bf1[12] = bf0[12];
+ bf1[13] = bf0[13];
+ bf1[14] = bf0[14];
+ bf1[15] = bf0[15];
+ bf1[16] = half_btf(cospi[62], bf0[16], cospi[2], bf0[31], cos_bit[stage]);
+ bf1[17] = half_btf(cospi[30], bf0[17], cospi[34], bf0[30], cos_bit[stage]);
+ bf1[18] = half_btf(cospi[46], bf0[18], cospi[18], bf0[29], cos_bit[stage]);
+ bf1[19] = half_btf(cospi[14], bf0[19], cospi[50], bf0[28], cos_bit[stage]);
+ bf1[20] = half_btf(cospi[54], bf0[20], cospi[10], bf0[27], cos_bit[stage]);
+ bf1[21] = half_btf(cospi[22], bf0[21], cospi[42], bf0[26], cos_bit[stage]);
+ bf1[22] = half_btf(cospi[38], bf0[22], cospi[26], bf0[25], cos_bit[stage]);
+ bf1[23] = half_btf(cospi[6], bf0[23], cospi[58], bf0[24], cos_bit[stage]);
+ bf1[24] = half_btf(cospi[6], bf0[24], -cospi[58], bf0[23], cos_bit[stage]);
+ bf1[25] = half_btf(cospi[38], bf0[25], -cospi[26], bf0[22], cos_bit[stage]);
+ bf1[26] = half_btf(cospi[22], bf0[26], -cospi[42], bf0[21], cos_bit[stage]);
+ bf1[27] = half_btf(cospi[54], bf0[27], -cospi[10], bf0[20], cos_bit[stage]);
+ bf1[28] = half_btf(cospi[14], bf0[28], -cospi[50], bf0[19], cos_bit[stage]);
+ bf1[29] = half_btf(cospi[46], bf0[29], -cospi[18], bf0[18], cos_bit[stage]);
+ bf1[30] = half_btf(cospi[30], bf0[30], -cospi[34], bf0[17], cos_bit[stage]);
+ bf1[31] = half_btf(cospi[62], bf0[31], -cospi[2], bf0[16], cos_bit[stage]);
+ bf1[32] = bf0[32] + bf0[33];
+ bf1[33] = -bf0[33] + bf0[32];
+ bf1[34] = -bf0[34] + bf0[35];
+ bf1[35] = bf0[35] + bf0[34];
+ bf1[36] = bf0[36] + bf0[37];
+ bf1[37] = -bf0[37] + bf0[36];
+ bf1[38] = -bf0[38] + bf0[39];
+ bf1[39] = bf0[39] + bf0[38];
+ bf1[40] = bf0[40] + bf0[41];
+ bf1[41] = -bf0[41] + bf0[40];
+ bf1[42] = -bf0[42] + bf0[43];
+ bf1[43] = bf0[43] + bf0[42];
+ bf1[44] = bf0[44] + bf0[45];
+ bf1[45] = -bf0[45] + bf0[44];
+ bf1[46] = -bf0[46] + bf0[47];
+ bf1[47] = bf0[47] + bf0[46];
+ bf1[48] = bf0[48] + bf0[49];
+ bf1[49] = -bf0[49] + bf0[48];
+ bf1[50] = -bf0[50] + bf0[51];
+ bf1[51] = bf0[51] + bf0[50];
+ bf1[52] = bf0[52] + bf0[53];
+ bf1[53] = -bf0[53] + bf0[52];
+ bf1[54] = -bf0[54] + bf0[55];
+ bf1[55] = bf0[55] + bf0[54];
+ bf1[56] = bf0[56] + bf0[57];
+ bf1[57] = -bf0[57] + bf0[56];
+ bf1[58] = -bf0[58] + bf0[59];
+ bf1[59] = bf0[59] + bf0[58];
+ bf1[60] = bf0[60] + bf0[61];
+ bf1[61] = -bf0[61] + bf0[60];
+ bf1[62] = -bf0[62] + bf0[63];
+ bf1[63] = bf0[63] + bf0[62];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 10
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = bf0[2];
+ bf1[3] = bf0[3];
+ bf1[4] = bf0[4];
+ bf1[5] = bf0[5];
+ bf1[6] = bf0[6];
+ bf1[7] = bf0[7];
+ bf1[8] = bf0[8];
+ bf1[9] = bf0[9];
+ bf1[10] = bf0[10];
+ bf1[11] = bf0[11];
+ bf1[12] = bf0[12];
+ bf1[13] = bf0[13];
+ bf1[14] = bf0[14];
+ bf1[15] = bf0[15];
+ bf1[16] = bf0[16];
+ bf1[17] = bf0[17];
+ bf1[18] = bf0[18];
+ bf1[19] = bf0[19];
+ bf1[20] = bf0[20];
+ bf1[21] = bf0[21];
+ bf1[22] = bf0[22];
+ bf1[23] = bf0[23];
+ bf1[24] = bf0[24];
+ bf1[25] = bf0[25];
+ bf1[26] = bf0[26];
+ bf1[27] = bf0[27];
+ bf1[28] = bf0[28];
+ bf1[29] = bf0[29];
+ bf1[30] = bf0[30];
+ bf1[31] = bf0[31];
+ bf1[32] = half_btf(cospi[63], bf0[32], cospi[1], bf0[63], cos_bit[stage]);
+ bf1[33] = half_btf(cospi[31], bf0[33], cospi[33], bf0[62], cos_bit[stage]);
+ bf1[34] = half_btf(cospi[47], bf0[34], cospi[17], bf0[61], cos_bit[stage]);
+ bf1[35] = half_btf(cospi[15], bf0[35], cospi[49], bf0[60], cos_bit[stage]);
+ bf1[36] = half_btf(cospi[55], bf0[36], cospi[9], bf0[59], cos_bit[stage]);
+ bf1[37] = half_btf(cospi[23], bf0[37], cospi[41], bf0[58], cos_bit[stage]);
+ bf1[38] = half_btf(cospi[39], bf0[38], cospi[25], bf0[57], cos_bit[stage]);
+ bf1[39] = half_btf(cospi[7], bf0[39], cospi[57], bf0[56], cos_bit[stage]);
+ bf1[40] = half_btf(cospi[59], bf0[40], cospi[5], bf0[55], cos_bit[stage]);
+ bf1[41] = half_btf(cospi[27], bf0[41], cospi[37], bf0[54], cos_bit[stage]);
+ bf1[42] = half_btf(cospi[43], bf0[42], cospi[21], bf0[53], cos_bit[stage]);
+ bf1[43] = half_btf(cospi[11], bf0[43], cospi[53], bf0[52], cos_bit[stage]);
+ bf1[44] = half_btf(cospi[51], bf0[44], cospi[13], bf0[51], cos_bit[stage]);
+ bf1[45] = half_btf(cospi[19], bf0[45], cospi[45], bf0[50], cos_bit[stage]);
+ bf1[46] = half_btf(cospi[35], bf0[46], cospi[29], bf0[49], cos_bit[stage]);
+ bf1[47] = half_btf(cospi[3], bf0[47], cospi[61], bf0[48], cos_bit[stage]);
+ bf1[48] = half_btf(cospi[3], bf0[48], -cospi[61], bf0[47], cos_bit[stage]);
+ bf1[49] = half_btf(cospi[35], bf0[49], -cospi[29], bf0[46], cos_bit[stage]);
+ bf1[50] = half_btf(cospi[19], bf0[50], -cospi[45], bf0[45], cos_bit[stage]);
+ bf1[51] = half_btf(cospi[51], bf0[51], -cospi[13], bf0[44], cos_bit[stage]);
+ bf1[52] = half_btf(cospi[11], bf0[52], -cospi[53], bf0[43], cos_bit[stage]);
+ bf1[53] = half_btf(cospi[43], bf0[53], -cospi[21], bf0[42], cos_bit[stage]);
+ bf1[54] = half_btf(cospi[27], bf0[54], -cospi[37], bf0[41], cos_bit[stage]);
+ bf1[55] = half_btf(cospi[59], bf0[55], -cospi[5], bf0[40], cos_bit[stage]);
+ bf1[56] = half_btf(cospi[7], bf0[56], -cospi[57], bf0[39], cos_bit[stage]);
+ bf1[57] = half_btf(cospi[39], bf0[57], -cospi[25], bf0[38], cos_bit[stage]);
+ bf1[58] = half_btf(cospi[23], bf0[58], -cospi[41], bf0[37], cos_bit[stage]);
+ bf1[59] = half_btf(cospi[55], bf0[59], -cospi[9], bf0[36], cos_bit[stage]);
+ bf1[60] = half_btf(cospi[15], bf0[60], -cospi[49], bf0[35], cos_bit[stage]);
+ bf1[61] = half_btf(cospi[47], bf0[61], -cospi[17], bf0[34], cos_bit[stage]);
+ bf1[62] = half_btf(cospi[31], bf0[62], -cospi[33], bf0[33], cos_bit[stage]);
+ bf1[63] = half_btf(cospi[63], bf0[63], -cospi[1], bf0[32], cos_bit[stage]);
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 11
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[32];
+ bf1[2] = bf0[16];
+ bf1[3] = bf0[48];
+ bf1[4] = bf0[8];
+ bf1[5] = bf0[40];
+ bf1[6] = bf0[24];
+ bf1[7] = bf0[56];
+ bf1[8] = bf0[4];
+ bf1[9] = bf0[36];
+ bf1[10] = bf0[20];
+ bf1[11] = bf0[52];
+ bf1[12] = bf0[12];
+ bf1[13] = bf0[44];
+ bf1[14] = bf0[28];
+ bf1[15] = bf0[60];
+ bf1[16] = bf0[2];
+ bf1[17] = bf0[34];
+ bf1[18] = bf0[18];
+ bf1[19] = bf0[50];
+ bf1[20] = bf0[10];
+ bf1[21] = bf0[42];
+ bf1[22] = bf0[26];
+ bf1[23] = bf0[58];
+ bf1[24] = bf0[6];
+ bf1[25] = bf0[38];
+ bf1[26] = bf0[22];
+ bf1[27] = bf0[54];
+ bf1[28] = bf0[14];
+ bf1[29] = bf0[46];
+ bf1[30] = bf0[30];
+ bf1[31] = bf0[62];
+ bf1[32] = bf0[1];
+ bf1[33] = bf0[33];
+ bf1[34] = bf0[17];
+ bf1[35] = bf0[49];
+ bf1[36] = bf0[9];
+ bf1[37] = bf0[41];
+ bf1[38] = bf0[25];
+ bf1[39] = bf0[57];
+ bf1[40] = bf0[5];
+ bf1[41] = bf0[37];
+ bf1[42] = bf0[21];
+ bf1[43] = bf0[53];
+ bf1[44] = bf0[13];
+ bf1[45] = bf0[45];
+ bf1[46] = bf0[29];
+ bf1[47] = bf0[61];
+ bf1[48] = bf0[3];
+ bf1[49] = bf0[35];
+ bf1[50] = bf0[19];
+ bf1[51] = bf0[51];
+ bf1[52] = bf0[11];
+ bf1[53] = bf0[43];
+ bf1[54] = bf0[27];
+ bf1[55] = bf0[59];
+ bf1[56] = bf0[7];
+ bf1[57] = bf0[39];
+ bf1[58] = bf0[23];
+ bf1[59] = bf0[55];
+ bf1[60] = bf0[15];
+ bf1[61] = bf0[47];
+ bf1[62] = bf0[31];
+ bf1[63] = bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+}
diff --git a/vp10/common/vp10_fwd_txfm1d.h b/vp10/common/vp10_fwd_txfm1d.h
index d5b9f40..d06e305 100644
--- a/vp10/common/vp10_fwd_txfm1d.h
+++ b/vp10/common/vp10_fwd_txfm1d.h
@@ -25,6 +25,8 @@
const int8_t *cos_bit, const int8_t *stage_range);
void vp10_fdct32_new(const int32_t *input, int32_t *output,
const int8_t *cos_bit, const int8_t *stage_range);
+void vp10_fdct64_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
void vp10_fadst4_new(const int32_t *input, int32_t *output,
const int8_t *cos_bit, const int8_t *stage_range);
diff --git a/vp10/common/vp10_fwd_txfm2d.c b/vp10/common/vp10_fwd_txfm2d.c
index 67449ec..045ca2b 100644
--- a/vp10/common/vp10_fwd_txfm2d.c
+++ b/vp10/common/vp10_fwd_txfm2d.c
@@ -82,3 +82,11 @@
(void)bd;
fwd_txfm2d_c(input, output, stride, cfg, txfm_buf);
}
+
+void vp10_fwd_txfm2d_64x64(const int16_t *input, int32_t *output,
+ const int stride, const TXFM_2D_CFG *cfg,
+ const int bd) {
+ int txfm_buf[64 * 64 + 64 + 64];
+ (void)bd;
+ fwd_txfm2d_c(input, output, stride, cfg, txfm_buf);
+}
diff --git a/vp10/common/vp10_fwd_txfm2d.h b/vp10/common/vp10_fwd_txfm2d.h
index 64e6f56..3829609 100644
--- a/vp10/common/vp10_fwd_txfm2d.h
+++ b/vp10/common/vp10_fwd_txfm2d.h
@@ -27,6 +27,9 @@
void vp10_fwd_txfm2d_32x32(const int16_t *input, int32_t *output,
const int stride, const TXFM_2D_CFG *cfg,
const int bd);
+void vp10_fwd_txfm2d_64x64(const int16_t *input, int32_t *output,
+ const int stride, const TXFM_2D_CFG *cfg,
+ const int bd);
#ifdef __cplusplus
}
#endif
diff --git a/vp10/common/vp10_fwd_txfm2d_cfg.h b/vp10/common/vp10_fwd_txfm2d_cfg.h
index 5c2b4ca..bbabafe 100644
--- a/vp10/common/vp10_fwd_txfm2d_cfg.h
+++ b/vp10/common/vp10_fwd_txfm2d_cfg.h
@@ -95,6 +95,29 @@
vp10_fdct32_new, // .txfm_func_col
vp10_fdct32_new}; // .txfm_func_row;
+// ---------------- config fwd_dct_dct_64 ----------------
+static int8_t fwd_shift_dct_dct_64[3] = {2, -2, -2};
+static int8_t fwd_stage_range_col_dct_dct_64[12] = {13, 14, 15, 16, 17, 18,
+ 19, 19, 19, 19, 19, 19};
+static int8_t fwd_stage_range_row_dct_dct_64[12] = {17, 18, 19, 20, 21, 22,
+ 22, 22, 22, 22, 22, 22};
+static int8_t fwd_cos_bit_col_dct_dct_64[12] = {15, 15, 15, 15, 15, 14,
+ 13, 13, 13, 13, 13, 13};
+static int8_t fwd_cos_bit_row_dct_dct_64[12] = {15, 14, 13, 12, 11, 10,
+ 10, 10, 10, 10, 10, 10};
+
+static const TXFM_2D_CFG fwd_txfm_2d_cfg_dct_dct_64 = {
+ 64, // .txfm_size
+ 12, // .stage_num_col
+ 12, // .stage_num_row
+ fwd_shift_dct_dct_64, // .shift
+ fwd_stage_range_col_dct_dct_64, // .stage_range_col
+ fwd_stage_range_row_dct_dct_64, // .stage_range_row
+ fwd_cos_bit_col_dct_dct_64, // .cos_bit_col
+ fwd_cos_bit_row_dct_dct_64, // .cos_bit_row
+ vp10_fdct64_new, // .txfm_func_col
+ vp10_fdct64_new}; // .txfm_func_row;
+
// ---------------- config fwd_dct_adst_4 ----------------
static const int8_t fwd_shift_dct_adst_4[3] = {5, -2, -1};
static const int8_t fwd_stage_range_col_dct_adst_4[4] = {16, 17, 18, 18};
diff --git a/vp10/common/vp10_inv_txfm1d.c b/vp10/common/vp10_inv_txfm1d.c
index 606ca55..494000f 100644
--- a/vp10/common/vp10_inv_txfm1d.c
+++ b/vp10/common/vp10_inv_txfm1d.c
@@ -32,11 +32,11 @@
#else
#define range_check(stage, input, buf, size, bit) \
{ \
- (void) stage; \
- (void) input; \
- (void) buf; \
- (void) size; \
- (void) bit; \
+ (void)stage; \
+ (void)input; \
+ (void)buf; \
+ (void)size; \
+ (void)bit; \
}
#endif
@@ -1535,3 +1535,796 @@
bf1[31] = bf0[0];
range_check(stage, input, bf1, size, stage_range[stage]);
}
+
+void vp10_idct64_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range) {
+ const int32_t size = 64;
+ const int32_t *cospi;
+
+ int32_t stage = 0;
+ int32_t *bf0, *bf1;
+ int32_t step[64];
+
+ // stage 0;
+ range_check(stage, input, input, size, stage_range[stage]);
+
+ // stage 1;
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf1 = output;
+ bf1[0] = input[0];
+ bf1[1] = input[32];
+ bf1[2] = input[16];
+ bf1[3] = input[48];
+ bf1[4] = input[8];
+ bf1[5] = input[40];
+ bf1[6] = input[24];
+ bf1[7] = input[56];
+ bf1[8] = input[4];
+ bf1[9] = input[36];
+ bf1[10] = input[20];
+ bf1[11] = input[52];
+ bf1[12] = input[12];
+ bf1[13] = input[44];
+ bf1[14] = input[28];
+ bf1[15] = input[60];
+ bf1[16] = input[2];
+ bf1[17] = input[34];
+ bf1[18] = input[18];
+ bf1[19] = input[50];
+ bf1[20] = input[10];
+ bf1[21] = input[42];
+ bf1[22] = input[26];
+ bf1[23] = input[58];
+ bf1[24] = input[6];
+ bf1[25] = input[38];
+ bf1[26] = input[22];
+ bf1[27] = input[54];
+ bf1[28] = input[14];
+ bf1[29] = input[46];
+ bf1[30] = input[30];
+ bf1[31] = input[62];
+ bf1[32] = input[1];
+ bf1[33] = input[33];
+ bf1[34] = input[17];
+ bf1[35] = input[49];
+ bf1[36] = input[9];
+ bf1[37] = input[41];
+ bf1[38] = input[25];
+ bf1[39] = input[57];
+ bf1[40] = input[5];
+ bf1[41] = input[37];
+ bf1[42] = input[21];
+ bf1[43] = input[53];
+ bf1[44] = input[13];
+ bf1[45] = input[45];
+ bf1[46] = input[29];
+ bf1[47] = input[61];
+ bf1[48] = input[3];
+ bf1[49] = input[35];
+ bf1[50] = input[19];
+ bf1[51] = input[51];
+ bf1[52] = input[11];
+ bf1[53] = input[43];
+ bf1[54] = input[27];
+ bf1[55] = input[59];
+ bf1[56] = input[7];
+ bf1[57] = input[39];
+ bf1[58] = input[23];
+ bf1[59] = input[55];
+ bf1[60] = input[15];
+ bf1[61] = input[47];
+ bf1[62] = input[31];
+ bf1[63] = input[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 2
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = bf0[2];
+ bf1[3] = bf0[3];
+ bf1[4] = bf0[4];
+ bf1[5] = bf0[5];
+ bf1[6] = bf0[6];
+ bf1[7] = bf0[7];
+ bf1[8] = bf0[8];
+ bf1[9] = bf0[9];
+ bf1[10] = bf0[10];
+ bf1[11] = bf0[11];
+ bf1[12] = bf0[12];
+ bf1[13] = bf0[13];
+ bf1[14] = bf0[14];
+ bf1[15] = bf0[15];
+ bf1[16] = bf0[16];
+ bf1[17] = bf0[17];
+ bf1[18] = bf0[18];
+ bf1[19] = bf0[19];
+ bf1[20] = bf0[20];
+ bf1[21] = bf0[21];
+ bf1[22] = bf0[22];
+ bf1[23] = bf0[23];
+ bf1[24] = bf0[24];
+ bf1[25] = bf0[25];
+ bf1[26] = bf0[26];
+ bf1[27] = bf0[27];
+ bf1[28] = bf0[28];
+ bf1[29] = bf0[29];
+ bf1[30] = bf0[30];
+ bf1[31] = bf0[31];
+ bf1[32] = half_btf(cospi[63], bf0[32], -cospi[1], bf0[63], cos_bit[stage]);
+ bf1[33] = half_btf(cospi[31], bf0[33], -cospi[33], bf0[62], cos_bit[stage]);
+ bf1[34] = half_btf(cospi[47], bf0[34], -cospi[17], bf0[61], cos_bit[stage]);
+ bf1[35] = half_btf(cospi[15], bf0[35], -cospi[49], bf0[60], cos_bit[stage]);
+ bf1[36] = half_btf(cospi[55], bf0[36], -cospi[9], bf0[59], cos_bit[stage]);
+ bf1[37] = half_btf(cospi[23], bf0[37], -cospi[41], bf0[58], cos_bit[stage]);
+ bf1[38] = half_btf(cospi[39], bf0[38], -cospi[25], bf0[57], cos_bit[stage]);
+ bf1[39] = half_btf(cospi[7], bf0[39], -cospi[57], bf0[56], cos_bit[stage]);
+ bf1[40] = half_btf(cospi[59], bf0[40], -cospi[5], bf0[55], cos_bit[stage]);
+ bf1[41] = half_btf(cospi[27], bf0[41], -cospi[37], bf0[54], cos_bit[stage]);
+ bf1[42] = half_btf(cospi[43], bf0[42], -cospi[21], bf0[53], cos_bit[stage]);
+ bf1[43] = half_btf(cospi[11], bf0[43], -cospi[53], bf0[52], cos_bit[stage]);
+ bf1[44] = half_btf(cospi[51], bf0[44], -cospi[13], bf0[51], cos_bit[stage]);
+ bf1[45] = half_btf(cospi[19], bf0[45], -cospi[45], bf0[50], cos_bit[stage]);
+ bf1[46] = half_btf(cospi[35], bf0[46], -cospi[29], bf0[49], cos_bit[stage]);
+ bf1[47] = half_btf(cospi[3], bf0[47], -cospi[61], bf0[48], cos_bit[stage]);
+ bf1[48] = half_btf(cospi[61], bf0[47], cospi[3], bf0[48], cos_bit[stage]);
+ bf1[49] = half_btf(cospi[29], bf0[46], cospi[35], bf0[49], cos_bit[stage]);
+ bf1[50] = half_btf(cospi[45], bf0[45], cospi[19], bf0[50], cos_bit[stage]);
+ bf1[51] = half_btf(cospi[13], bf0[44], cospi[51], bf0[51], cos_bit[stage]);
+ bf1[52] = half_btf(cospi[53], bf0[43], cospi[11], bf0[52], cos_bit[stage]);
+ bf1[53] = half_btf(cospi[21], bf0[42], cospi[43], bf0[53], cos_bit[stage]);
+ bf1[54] = half_btf(cospi[37], bf0[41], cospi[27], bf0[54], cos_bit[stage]);
+ bf1[55] = half_btf(cospi[5], bf0[40], cospi[59], bf0[55], cos_bit[stage]);
+ bf1[56] = half_btf(cospi[57], bf0[39], cospi[7], bf0[56], cos_bit[stage]);
+ bf1[57] = half_btf(cospi[25], bf0[38], cospi[39], bf0[57], cos_bit[stage]);
+ bf1[58] = half_btf(cospi[41], bf0[37], cospi[23], bf0[58], cos_bit[stage]);
+ bf1[59] = half_btf(cospi[9], bf0[36], cospi[55], bf0[59], cos_bit[stage]);
+ bf1[60] = half_btf(cospi[49], bf0[35], cospi[15], bf0[60], cos_bit[stage]);
+ bf1[61] = half_btf(cospi[17], bf0[34], cospi[47], bf0[61], cos_bit[stage]);
+ bf1[62] = half_btf(cospi[33], bf0[33], cospi[31], bf0[62], cos_bit[stage]);
+ bf1[63] = half_btf(cospi[1], bf0[32], cospi[63], bf0[63], cos_bit[stage]);
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 3
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = bf0[2];
+ bf1[3] = bf0[3];
+ bf1[4] = bf0[4];
+ bf1[5] = bf0[5];
+ bf1[6] = bf0[6];
+ bf1[7] = bf0[7];
+ bf1[8] = bf0[8];
+ bf1[9] = bf0[9];
+ bf1[10] = bf0[10];
+ bf1[11] = bf0[11];
+ bf1[12] = bf0[12];
+ bf1[13] = bf0[13];
+ bf1[14] = bf0[14];
+ bf1[15] = bf0[15];
+ bf1[16] = half_btf(cospi[62], bf0[16], -cospi[2], bf0[31], cos_bit[stage]);
+ bf1[17] = half_btf(cospi[30], bf0[17], -cospi[34], bf0[30], cos_bit[stage]);
+ bf1[18] = half_btf(cospi[46], bf0[18], -cospi[18], bf0[29], cos_bit[stage]);
+ bf1[19] = half_btf(cospi[14], bf0[19], -cospi[50], bf0[28], cos_bit[stage]);
+ bf1[20] = half_btf(cospi[54], bf0[20], -cospi[10], bf0[27], cos_bit[stage]);
+ bf1[21] = half_btf(cospi[22], bf0[21], -cospi[42], bf0[26], cos_bit[stage]);
+ bf1[22] = half_btf(cospi[38], bf0[22], -cospi[26], bf0[25], cos_bit[stage]);
+ bf1[23] = half_btf(cospi[6], bf0[23], -cospi[58], bf0[24], cos_bit[stage]);
+ bf1[24] = half_btf(cospi[58], bf0[23], cospi[6], bf0[24], cos_bit[stage]);
+ bf1[25] = half_btf(cospi[26], bf0[22], cospi[38], bf0[25], cos_bit[stage]);
+ bf1[26] = half_btf(cospi[42], bf0[21], cospi[22], bf0[26], cos_bit[stage]);
+ bf1[27] = half_btf(cospi[10], bf0[20], cospi[54], bf0[27], cos_bit[stage]);
+ bf1[28] = half_btf(cospi[50], bf0[19], cospi[14], bf0[28], cos_bit[stage]);
+ bf1[29] = half_btf(cospi[18], bf0[18], cospi[46], bf0[29], cos_bit[stage]);
+ bf1[30] = half_btf(cospi[34], bf0[17], cospi[30], bf0[30], cos_bit[stage]);
+ bf1[31] = half_btf(cospi[2], bf0[16], cospi[62], bf0[31], cos_bit[stage]);
+ bf1[32] = bf0[32] + bf0[33];
+ bf1[33] = bf0[32] - bf0[33];
+ bf1[34] = -bf0[34] + bf0[35];
+ bf1[35] = bf0[34] + bf0[35];
+ bf1[36] = bf0[36] + bf0[37];
+ bf1[37] = bf0[36] - bf0[37];
+ bf1[38] = -bf0[38] + bf0[39];
+ bf1[39] = bf0[38] + bf0[39];
+ bf1[40] = bf0[40] + bf0[41];
+ bf1[41] = bf0[40] - bf0[41];
+ bf1[42] = -bf0[42] + bf0[43];
+ bf1[43] = bf0[42] + bf0[43];
+ bf1[44] = bf0[44] + bf0[45];
+ bf1[45] = bf0[44] - bf0[45];
+ bf1[46] = -bf0[46] + bf0[47];
+ bf1[47] = bf0[46] + bf0[47];
+ bf1[48] = bf0[48] + bf0[49];
+ bf1[49] = bf0[48] - bf0[49];
+ bf1[50] = -bf0[50] + bf0[51];
+ bf1[51] = bf0[50] + bf0[51];
+ bf1[52] = bf0[52] + bf0[53];
+ bf1[53] = bf0[52] - bf0[53];
+ bf1[54] = -bf0[54] + bf0[55];
+ bf1[55] = bf0[54] + bf0[55];
+ bf1[56] = bf0[56] + bf0[57];
+ bf1[57] = bf0[56] - bf0[57];
+ bf1[58] = -bf0[58] + bf0[59];
+ bf1[59] = bf0[58] + bf0[59];
+ bf1[60] = bf0[60] + bf0[61];
+ bf1[61] = bf0[60] - bf0[61];
+ bf1[62] = -bf0[62] + bf0[63];
+ bf1[63] = bf0[62] + bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 4
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = bf0[2];
+ bf1[3] = bf0[3];
+ bf1[4] = bf0[4];
+ bf1[5] = bf0[5];
+ bf1[6] = bf0[6];
+ bf1[7] = bf0[7];
+ bf1[8] = half_btf(cospi[60], bf0[8], -cospi[4], bf0[15], cos_bit[stage]);
+ bf1[9] = half_btf(cospi[28], bf0[9], -cospi[36], bf0[14], cos_bit[stage]);
+ bf1[10] = half_btf(cospi[44], bf0[10], -cospi[20], bf0[13], cos_bit[stage]);
+ bf1[11] = half_btf(cospi[12], bf0[11], -cospi[52], bf0[12], cos_bit[stage]);
+ bf1[12] = half_btf(cospi[52], bf0[11], cospi[12], bf0[12], cos_bit[stage]);
+ bf1[13] = half_btf(cospi[20], bf0[10], cospi[44], bf0[13], cos_bit[stage]);
+ bf1[14] = half_btf(cospi[36], bf0[9], cospi[28], bf0[14], cos_bit[stage]);
+ bf1[15] = half_btf(cospi[4], bf0[8], cospi[60], bf0[15], cos_bit[stage]);
+ bf1[16] = bf0[16] + bf0[17];
+ bf1[17] = bf0[16] - bf0[17];
+ bf1[18] = -bf0[18] + bf0[19];
+ bf1[19] = bf0[18] + bf0[19];
+ bf1[20] = bf0[20] + bf0[21];
+ bf1[21] = bf0[20] - bf0[21];
+ bf1[22] = -bf0[22] + bf0[23];
+ bf1[23] = bf0[22] + bf0[23];
+ bf1[24] = bf0[24] + bf0[25];
+ bf1[25] = bf0[24] - bf0[25];
+ bf1[26] = -bf0[26] + bf0[27];
+ bf1[27] = bf0[26] + bf0[27];
+ bf1[28] = bf0[28] + bf0[29];
+ bf1[29] = bf0[28] - bf0[29];
+ bf1[30] = -bf0[30] + bf0[31];
+ bf1[31] = bf0[30] + bf0[31];
+ bf1[32] = bf0[32];
+ bf1[33] = half_btf(-cospi[4], bf0[33], cospi[60], bf0[62], cos_bit[stage]);
+ bf1[34] = half_btf(-cospi[60], bf0[34], -cospi[4], bf0[61], cos_bit[stage]);
+ bf1[35] = bf0[35];
+ bf1[36] = bf0[36];
+ bf1[37] = half_btf(-cospi[36], bf0[37], cospi[28], bf0[58], cos_bit[stage]);
+ bf1[38] = half_btf(-cospi[28], bf0[38], -cospi[36], bf0[57], cos_bit[stage]);
+ bf1[39] = bf0[39];
+ bf1[40] = bf0[40];
+ bf1[41] = half_btf(-cospi[20], bf0[41], cospi[44], bf0[54], cos_bit[stage]);
+ bf1[42] = half_btf(-cospi[44], bf0[42], -cospi[20], bf0[53], cos_bit[stage]);
+ bf1[43] = bf0[43];
+ bf1[44] = bf0[44];
+ bf1[45] = half_btf(-cospi[52], bf0[45], cospi[12], bf0[50], cos_bit[stage]);
+ bf1[46] = half_btf(-cospi[12], bf0[46], -cospi[52], bf0[49], cos_bit[stage]);
+ bf1[47] = bf0[47];
+ bf1[48] = bf0[48];
+ bf1[49] = half_btf(-cospi[52], bf0[46], cospi[12], bf0[49], cos_bit[stage]);
+ bf1[50] = half_btf(cospi[12], bf0[45], cospi[52], bf0[50], cos_bit[stage]);
+ bf1[51] = bf0[51];
+ bf1[52] = bf0[52];
+ bf1[53] = half_btf(-cospi[20], bf0[42], cospi[44], bf0[53], cos_bit[stage]);
+ bf1[54] = half_btf(cospi[44], bf0[41], cospi[20], bf0[54], cos_bit[stage]);
+ bf1[55] = bf0[55];
+ bf1[56] = bf0[56];
+ bf1[57] = half_btf(-cospi[36], bf0[38], cospi[28], bf0[57], cos_bit[stage]);
+ bf1[58] = half_btf(cospi[28], bf0[37], cospi[36], bf0[58], cos_bit[stage]);
+ bf1[59] = bf0[59];
+ bf1[60] = bf0[60];
+ bf1[61] = half_btf(-cospi[4], bf0[34], cospi[60], bf0[61], cos_bit[stage]);
+ bf1[62] = half_btf(cospi[60], bf0[33], cospi[4], bf0[62], cos_bit[stage]);
+ bf1[63] = bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 5
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = bf0[2];
+ bf1[3] = bf0[3];
+ bf1[4] = half_btf(cospi[56], bf0[4], -cospi[8], bf0[7], cos_bit[stage]);
+ bf1[5] = half_btf(cospi[24], bf0[5], -cospi[40], bf0[6], cos_bit[stage]);
+ bf1[6] = half_btf(cospi[40], bf0[5], cospi[24], bf0[6], cos_bit[stage]);
+ bf1[7] = half_btf(cospi[8], bf0[4], cospi[56], bf0[7], cos_bit[stage]);
+ bf1[8] = bf0[8] + bf0[9];
+ bf1[9] = bf0[8] - bf0[9];
+ bf1[10] = -bf0[10] + bf0[11];
+ bf1[11] = bf0[10] + bf0[11];
+ bf1[12] = bf0[12] + bf0[13];
+ bf1[13] = bf0[12] - bf0[13];
+ bf1[14] = -bf0[14] + bf0[15];
+ bf1[15] = bf0[14] + bf0[15];
+ bf1[16] = bf0[16];
+ bf1[17] = half_btf(-cospi[8], bf0[17], cospi[56], bf0[30], cos_bit[stage]);
+ bf1[18] = half_btf(-cospi[56], bf0[18], -cospi[8], bf0[29], cos_bit[stage]);
+ bf1[19] = bf0[19];
+ bf1[20] = bf0[20];
+ bf1[21] = half_btf(-cospi[40], bf0[21], cospi[24], bf0[26], cos_bit[stage]);
+ bf1[22] = half_btf(-cospi[24], bf0[22], -cospi[40], bf0[25], cos_bit[stage]);
+ bf1[23] = bf0[23];
+ bf1[24] = bf0[24];
+ bf1[25] = half_btf(-cospi[40], bf0[22], cospi[24], bf0[25], cos_bit[stage]);
+ bf1[26] = half_btf(cospi[24], bf0[21], cospi[40], bf0[26], cos_bit[stage]);
+ bf1[27] = bf0[27];
+ bf1[28] = bf0[28];
+ bf1[29] = half_btf(-cospi[8], bf0[18], cospi[56], bf0[29], cos_bit[stage]);
+ bf1[30] = half_btf(cospi[56], bf0[17], cospi[8], bf0[30], cos_bit[stage]);
+ bf1[31] = bf0[31];
+ bf1[32] = bf0[32] + bf0[35];
+ bf1[33] = bf0[33] + bf0[34];
+ bf1[34] = bf0[33] - bf0[34];
+ bf1[35] = bf0[32] - bf0[35];
+ bf1[36] = -bf0[36] + bf0[39];
+ bf1[37] = -bf0[37] + bf0[38];
+ bf1[38] = bf0[37] + bf0[38];
+ bf1[39] = bf0[36] + bf0[39];
+ bf1[40] = bf0[40] + bf0[43];
+ bf1[41] = bf0[41] + bf0[42];
+ bf1[42] = bf0[41] - bf0[42];
+ bf1[43] = bf0[40] - bf0[43];
+ bf1[44] = -bf0[44] + bf0[47];
+ bf1[45] = -bf0[45] + bf0[46];
+ bf1[46] = bf0[45] + bf0[46];
+ bf1[47] = bf0[44] + bf0[47];
+ bf1[48] = bf0[48] + bf0[51];
+ bf1[49] = bf0[49] + bf0[50];
+ bf1[50] = bf0[49] - bf0[50];
+ bf1[51] = bf0[48] - bf0[51];
+ bf1[52] = -bf0[52] + bf0[55];
+ bf1[53] = -bf0[53] + bf0[54];
+ bf1[54] = bf0[53] + bf0[54];
+ bf1[55] = bf0[52] + bf0[55];
+ bf1[56] = bf0[56] + bf0[59];
+ bf1[57] = bf0[57] + bf0[58];
+ bf1[58] = bf0[57] - bf0[58];
+ bf1[59] = bf0[56] - bf0[59];
+ bf1[60] = -bf0[60] + bf0[63];
+ bf1[61] = -bf0[61] + bf0[62];
+ bf1[62] = bf0[61] + bf0[62];
+ bf1[63] = bf0[60] + bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 6
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit[stage]);
+ bf1[1] = half_btf(cospi[32], bf0[0], -cospi[32], bf0[1], cos_bit[stage]);
+ bf1[2] = half_btf(cospi[48], bf0[2], -cospi[16], bf0[3], cos_bit[stage]);
+ bf1[3] = half_btf(cospi[16], bf0[2], cospi[48], bf0[3], cos_bit[stage]);
+ bf1[4] = bf0[4] + bf0[5];
+ bf1[5] = bf0[4] - bf0[5];
+ bf1[6] = -bf0[6] + bf0[7];
+ bf1[7] = bf0[6] + bf0[7];
+ bf1[8] = bf0[8];
+ bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit[stage]);
+ bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit[stage]);
+ bf1[11] = bf0[11];
+ bf1[12] = bf0[12];
+ bf1[13] = half_btf(-cospi[16], bf0[10], cospi[48], bf0[13], cos_bit[stage]);
+ bf1[14] = half_btf(cospi[48], bf0[9], cospi[16], bf0[14], cos_bit[stage]);
+ bf1[15] = bf0[15];
+ bf1[16] = bf0[16] + bf0[19];
+ bf1[17] = bf0[17] + bf0[18];
+ bf1[18] = bf0[17] - bf0[18];
+ bf1[19] = bf0[16] - bf0[19];
+ bf1[20] = -bf0[20] + bf0[23];
+ bf1[21] = -bf0[21] + bf0[22];
+ bf1[22] = bf0[21] + bf0[22];
+ bf1[23] = bf0[20] + bf0[23];
+ bf1[24] = bf0[24] + bf0[27];
+ bf1[25] = bf0[25] + bf0[26];
+ bf1[26] = bf0[25] - bf0[26];
+ bf1[27] = bf0[24] - bf0[27];
+ bf1[28] = -bf0[28] + bf0[31];
+ bf1[29] = -bf0[29] + bf0[30];
+ bf1[30] = bf0[29] + bf0[30];
+ bf1[31] = bf0[28] + bf0[31];
+ bf1[32] = bf0[32];
+ bf1[33] = bf0[33];
+ bf1[34] = half_btf(-cospi[8], bf0[34], cospi[56], bf0[61], cos_bit[stage]);
+ bf1[35] = half_btf(-cospi[8], bf0[35], cospi[56], bf0[60], cos_bit[stage]);
+ bf1[36] = half_btf(-cospi[56], bf0[36], -cospi[8], bf0[59], cos_bit[stage]);
+ bf1[37] = half_btf(-cospi[56], bf0[37], -cospi[8], bf0[58], cos_bit[stage]);
+ bf1[38] = bf0[38];
+ bf1[39] = bf0[39];
+ bf1[40] = bf0[40];
+ bf1[41] = bf0[41];
+ bf1[42] = half_btf(-cospi[40], bf0[42], cospi[24], bf0[53], cos_bit[stage]);
+ bf1[43] = half_btf(-cospi[40], bf0[43], cospi[24], bf0[52], cos_bit[stage]);
+ bf1[44] = half_btf(-cospi[24], bf0[44], -cospi[40], bf0[51], cos_bit[stage]);
+ bf1[45] = half_btf(-cospi[24], bf0[45], -cospi[40], bf0[50], cos_bit[stage]);
+ bf1[46] = bf0[46];
+ bf1[47] = bf0[47];
+ bf1[48] = bf0[48];
+ bf1[49] = bf0[49];
+ bf1[50] = half_btf(-cospi[40], bf0[45], cospi[24], bf0[50], cos_bit[stage]);
+ bf1[51] = half_btf(-cospi[40], bf0[44], cospi[24], bf0[51], cos_bit[stage]);
+ bf1[52] = half_btf(cospi[24], bf0[43], cospi[40], bf0[52], cos_bit[stage]);
+ bf1[53] = half_btf(cospi[24], bf0[42], cospi[40], bf0[53], cos_bit[stage]);
+ bf1[54] = bf0[54];
+ bf1[55] = bf0[55];
+ bf1[56] = bf0[56];
+ bf1[57] = bf0[57];
+ bf1[58] = half_btf(-cospi[8], bf0[37], cospi[56], bf0[58], cos_bit[stage]);
+ bf1[59] = half_btf(-cospi[8], bf0[36], cospi[56], bf0[59], cos_bit[stage]);
+ bf1[60] = half_btf(cospi[56], bf0[35], cospi[8], bf0[60], cos_bit[stage]);
+ bf1[61] = half_btf(cospi[56], bf0[34], cospi[8], bf0[61], cos_bit[stage]);
+ bf1[62] = bf0[62];
+ bf1[63] = bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 7
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0] + bf0[3];
+ bf1[1] = bf0[1] + bf0[2];
+ bf1[2] = bf0[1] - bf0[2];
+ bf1[3] = bf0[0] - bf0[3];
+ bf1[4] = bf0[4];
+ bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit[stage]);
+ bf1[6] = half_btf(cospi[32], bf0[5], cospi[32], bf0[6], cos_bit[stage]);
+ bf1[7] = bf0[7];
+ bf1[8] = bf0[8] + bf0[11];
+ bf1[9] = bf0[9] + bf0[10];
+ bf1[10] = bf0[9] - bf0[10];
+ bf1[11] = bf0[8] - bf0[11];
+ bf1[12] = -bf0[12] + bf0[15];
+ bf1[13] = -bf0[13] + bf0[14];
+ bf1[14] = bf0[13] + bf0[14];
+ bf1[15] = bf0[12] + bf0[15];
+ bf1[16] = bf0[16];
+ bf1[17] = bf0[17];
+ bf1[18] = half_btf(-cospi[16], bf0[18], cospi[48], bf0[29], cos_bit[stage]);
+ bf1[19] = half_btf(-cospi[16], bf0[19], cospi[48], bf0[28], cos_bit[stage]);
+ bf1[20] = half_btf(-cospi[48], bf0[20], -cospi[16], bf0[27], cos_bit[stage]);
+ bf1[21] = half_btf(-cospi[48], bf0[21], -cospi[16], bf0[26], cos_bit[stage]);
+ bf1[22] = bf0[22];
+ bf1[23] = bf0[23];
+ bf1[24] = bf0[24];
+ bf1[25] = bf0[25];
+ bf1[26] = half_btf(-cospi[16], bf0[21], cospi[48], bf0[26], cos_bit[stage]);
+ bf1[27] = half_btf(-cospi[16], bf0[20], cospi[48], bf0[27], cos_bit[stage]);
+ bf1[28] = half_btf(cospi[48], bf0[19], cospi[16], bf0[28], cos_bit[stage]);
+ bf1[29] = half_btf(cospi[48], bf0[18], cospi[16], bf0[29], cos_bit[stage]);
+ bf1[30] = bf0[30];
+ bf1[31] = bf0[31];
+ bf1[32] = bf0[32] + bf0[39];
+ bf1[33] = bf0[33] + bf0[38];
+ bf1[34] = bf0[34] + bf0[37];
+ bf1[35] = bf0[35] + bf0[36];
+ bf1[36] = bf0[35] - bf0[36];
+ bf1[37] = bf0[34] - bf0[37];
+ bf1[38] = bf0[33] - bf0[38];
+ bf1[39] = bf0[32] - bf0[39];
+ bf1[40] = -bf0[40] + bf0[47];
+ bf1[41] = -bf0[41] + bf0[46];
+ bf1[42] = -bf0[42] + bf0[45];
+ bf1[43] = -bf0[43] + bf0[44];
+ bf1[44] = bf0[43] + bf0[44];
+ bf1[45] = bf0[42] + bf0[45];
+ bf1[46] = bf0[41] + bf0[46];
+ bf1[47] = bf0[40] + bf0[47];
+ bf1[48] = bf0[48] + bf0[55];
+ bf1[49] = bf0[49] + bf0[54];
+ bf1[50] = bf0[50] + bf0[53];
+ bf1[51] = bf0[51] + bf0[52];
+ bf1[52] = bf0[51] - bf0[52];
+ bf1[53] = bf0[50] - bf0[53];
+ bf1[54] = bf0[49] - bf0[54];
+ bf1[55] = bf0[48] - bf0[55];
+ bf1[56] = -bf0[56] + bf0[63];
+ bf1[57] = -bf0[57] + bf0[62];
+ bf1[58] = -bf0[58] + bf0[61];
+ bf1[59] = -bf0[59] + bf0[60];
+ bf1[60] = bf0[59] + bf0[60];
+ bf1[61] = bf0[58] + bf0[61];
+ bf1[62] = bf0[57] + bf0[62];
+ bf1[63] = bf0[56] + bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 8
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = bf0[0] + bf0[7];
+ bf1[1] = bf0[1] + bf0[6];
+ bf1[2] = bf0[2] + bf0[5];
+ bf1[3] = bf0[3] + bf0[4];
+ bf1[4] = bf0[3] - bf0[4];
+ bf1[5] = bf0[2] - bf0[5];
+ bf1[6] = bf0[1] - bf0[6];
+ bf1[7] = bf0[0] - bf0[7];
+ bf1[8] = bf0[8];
+ bf1[9] = bf0[9];
+ bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit[stage]);
+ bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit[stage]);
+ bf1[12] = half_btf(cospi[32], bf0[11], cospi[32], bf0[12], cos_bit[stage]);
+ bf1[13] = half_btf(cospi[32], bf0[10], cospi[32], bf0[13], cos_bit[stage]);
+ bf1[14] = bf0[14];
+ bf1[15] = bf0[15];
+ bf1[16] = bf0[16] + bf0[23];
+ bf1[17] = bf0[17] + bf0[22];
+ bf1[18] = bf0[18] + bf0[21];
+ bf1[19] = bf0[19] + bf0[20];
+ bf1[20] = bf0[19] - bf0[20];
+ bf1[21] = bf0[18] - bf0[21];
+ bf1[22] = bf0[17] - bf0[22];
+ bf1[23] = bf0[16] - bf0[23];
+ bf1[24] = -bf0[24] + bf0[31];
+ bf1[25] = -bf0[25] + bf0[30];
+ bf1[26] = -bf0[26] + bf0[29];
+ bf1[27] = -bf0[27] + bf0[28];
+ bf1[28] = bf0[27] + bf0[28];
+ bf1[29] = bf0[26] + bf0[29];
+ bf1[30] = bf0[25] + bf0[30];
+ bf1[31] = bf0[24] + bf0[31];
+ bf1[32] = bf0[32];
+ bf1[33] = bf0[33];
+ bf1[34] = bf0[34];
+ bf1[35] = bf0[35];
+ bf1[36] = half_btf(-cospi[16], bf0[36], cospi[48], bf0[59], cos_bit[stage]);
+ bf1[37] = half_btf(-cospi[16], bf0[37], cospi[48], bf0[58], cos_bit[stage]);
+ bf1[38] = half_btf(-cospi[16], bf0[38], cospi[48], bf0[57], cos_bit[stage]);
+ bf1[39] = half_btf(-cospi[16], bf0[39], cospi[48], bf0[56], cos_bit[stage]);
+ bf1[40] = half_btf(-cospi[48], bf0[40], -cospi[16], bf0[55], cos_bit[stage]);
+ bf1[41] = half_btf(-cospi[48], bf0[41], -cospi[16], bf0[54], cos_bit[stage]);
+ bf1[42] = half_btf(-cospi[48], bf0[42], -cospi[16], bf0[53], cos_bit[stage]);
+ bf1[43] = half_btf(-cospi[48], bf0[43], -cospi[16], bf0[52], cos_bit[stage]);
+ bf1[44] = bf0[44];
+ bf1[45] = bf0[45];
+ bf1[46] = bf0[46];
+ bf1[47] = bf0[47];
+ bf1[48] = bf0[48];
+ bf1[49] = bf0[49];
+ bf1[50] = bf0[50];
+ bf1[51] = bf0[51];
+ bf1[52] = half_btf(-cospi[16], bf0[43], cospi[48], bf0[52], cos_bit[stage]);
+ bf1[53] = half_btf(-cospi[16], bf0[42], cospi[48], bf0[53], cos_bit[stage]);
+ bf1[54] = half_btf(-cospi[16], bf0[41], cospi[48], bf0[54], cos_bit[stage]);
+ bf1[55] = half_btf(-cospi[16], bf0[40], cospi[48], bf0[55], cos_bit[stage]);
+ bf1[56] = half_btf(cospi[48], bf0[39], cospi[16], bf0[56], cos_bit[stage]);
+ bf1[57] = half_btf(cospi[48], bf0[38], cospi[16], bf0[57], cos_bit[stage]);
+ bf1[58] = half_btf(cospi[48], bf0[37], cospi[16], bf0[58], cos_bit[stage]);
+ bf1[59] = half_btf(cospi[48], bf0[36], cospi[16], bf0[59], cos_bit[stage]);
+ bf1[60] = bf0[60];
+ bf1[61] = bf0[61];
+ bf1[62] = bf0[62];
+ bf1[63] = bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 9
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0] + bf0[15];
+ bf1[1] = bf0[1] + bf0[14];
+ bf1[2] = bf0[2] + bf0[13];
+ bf1[3] = bf0[3] + bf0[12];
+ bf1[4] = bf0[4] + bf0[11];
+ bf1[5] = bf0[5] + bf0[10];
+ bf1[6] = bf0[6] + bf0[9];
+ bf1[7] = bf0[7] + bf0[8];
+ bf1[8] = bf0[7] - bf0[8];
+ bf1[9] = bf0[6] - bf0[9];
+ bf1[10] = bf0[5] - bf0[10];
+ bf1[11] = bf0[4] - bf0[11];
+ bf1[12] = bf0[3] - bf0[12];
+ bf1[13] = bf0[2] - bf0[13];
+ bf1[14] = bf0[1] - bf0[14];
+ bf1[15] = bf0[0] - bf0[15];
+ bf1[16] = bf0[16];
+ bf1[17] = bf0[17];
+ bf1[18] = bf0[18];
+ bf1[19] = bf0[19];
+ bf1[20] = half_btf(-cospi[32], bf0[20], cospi[32], bf0[27], cos_bit[stage]);
+ bf1[21] = half_btf(-cospi[32], bf0[21], cospi[32], bf0[26], cos_bit[stage]);
+ bf1[22] = half_btf(-cospi[32], bf0[22], cospi[32], bf0[25], cos_bit[stage]);
+ bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[24], cos_bit[stage]);
+ bf1[24] = half_btf(cospi[32], bf0[23], cospi[32], bf0[24], cos_bit[stage]);
+ bf1[25] = half_btf(cospi[32], bf0[22], cospi[32], bf0[25], cos_bit[stage]);
+ bf1[26] = half_btf(cospi[32], bf0[21], cospi[32], bf0[26], cos_bit[stage]);
+ bf1[27] = half_btf(cospi[32], bf0[20], cospi[32], bf0[27], cos_bit[stage]);
+ bf1[28] = bf0[28];
+ bf1[29] = bf0[29];
+ bf1[30] = bf0[30];
+ bf1[31] = bf0[31];
+ bf1[32] = bf0[32] + bf0[47];
+ bf1[33] = bf0[33] + bf0[46];
+ bf1[34] = bf0[34] + bf0[45];
+ bf1[35] = bf0[35] + bf0[44];
+ bf1[36] = bf0[36] + bf0[43];
+ bf1[37] = bf0[37] + bf0[42];
+ bf1[38] = bf0[38] + bf0[41];
+ bf1[39] = bf0[39] + bf0[40];
+ bf1[40] = bf0[39] - bf0[40];
+ bf1[41] = bf0[38] - bf0[41];
+ bf1[42] = bf0[37] - bf0[42];
+ bf1[43] = bf0[36] - bf0[43];
+ bf1[44] = bf0[35] - bf0[44];
+ bf1[45] = bf0[34] - bf0[45];
+ bf1[46] = bf0[33] - bf0[46];
+ bf1[47] = bf0[32] - bf0[47];
+ bf1[48] = -bf0[48] + bf0[63];
+ bf1[49] = -bf0[49] + bf0[62];
+ bf1[50] = -bf0[50] + bf0[61];
+ bf1[51] = -bf0[51] + bf0[60];
+ bf1[52] = -bf0[52] + bf0[59];
+ bf1[53] = -bf0[53] + bf0[58];
+ bf1[54] = -bf0[54] + bf0[57];
+ bf1[55] = -bf0[55] + bf0[56];
+ bf1[56] = bf0[55] + bf0[56];
+ bf1[57] = bf0[54] + bf0[57];
+ bf1[58] = bf0[53] + bf0[58];
+ bf1[59] = bf0[52] + bf0[59];
+ bf1[60] = bf0[51] + bf0[60];
+ bf1[61] = bf0[50] + bf0[61];
+ bf1[62] = bf0[49] + bf0[62];
+ bf1[63] = bf0[48] + bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 10
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = bf0[0] + bf0[31];
+ bf1[1] = bf0[1] + bf0[30];
+ bf1[2] = bf0[2] + bf0[29];
+ bf1[3] = bf0[3] + bf0[28];
+ bf1[4] = bf0[4] + bf0[27];
+ bf1[5] = bf0[5] + bf0[26];
+ bf1[6] = bf0[6] + bf0[25];
+ bf1[7] = bf0[7] + bf0[24];
+ bf1[8] = bf0[8] + bf0[23];
+ bf1[9] = bf0[9] + bf0[22];
+ bf1[10] = bf0[10] + bf0[21];
+ bf1[11] = bf0[11] + bf0[20];
+ bf1[12] = bf0[12] + bf0[19];
+ bf1[13] = bf0[13] + bf0[18];
+ bf1[14] = bf0[14] + bf0[17];
+ bf1[15] = bf0[15] + bf0[16];
+ bf1[16] = bf0[15] - bf0[16];
+ bf1[17] = bf0[14] - bf0[17];
+ bf1[18] = bf0[13] - bf0[18];
+ bf1[19] = bf0[12] - bf0[19];
+ bf1[20] = bf0[11] - bf0[20];
+ bf1[21] = bf0[10] - bf0[21];
+ bf1[22] = bf0[9] - bf0[22];
+ bf1[23] = bf0[8] - bf0[23];
+ bf1[24] = bf0[7] - bf0[24];
+ bf1[25] = bf0[6] - bf0[25];
+ bf1[26] = bf0[5] - bf0[26];
+ bf1[27] = bf0[4] - bf0[27];
+ bf1[28] = bf0[3] - bf0[28];
+ bf1[29] = bf0[2] - bf0[29];
+ bf1[30] = bf0[1] - bf0[30];
+ bf1[31] = bf0[0] - bf0[31];
+ bf1[32] = bf0[32];
+ bf1[33] = bf0[33];
+ bf1[34] = bf0[34];
+ bf1[35] = bf0[35];
+ bf1[36] = bf0[36];
+ bf1[37] = bf0[37];
+ bf1[38] = bf0[38];
+ bf1[39] = bf0[39];
+ bf1[40] = half_btf(-cospi[32], bf0[40], cospi[32], bf0[55], cos_bit[stage]);
+ bf1[41] = half_btf(-cospi[32], bf0[41], cospi[32], bf0[54], cos_bit[stage]);
+ bf1[42] = half_btf(-cospi[32], bf0[42], cospi[32], bf0[53], cos_bit[stage]);
+ bf1[43] = half_btf(-cospi[32], bf0[43], cospi[32], bf0[52], cos_bit[stage]);
+ bf1[44] = half_btf(-cospi[32], bf0[44], cospi[32], bf0[51], cos_bit[stage]);
+ bf1[45] = half_btf(-cospi[32], bf0[45], cospi[32], bf0[50], cos_bit[stage]);
+ bf1[46] = half_btf(-cospi[32], bf0[46], cospi[32], bf0[49], cos_bit[stage]);
+ bf1[47] = half_btf(-cospi[32], bf0[47], cospi[32], bf0[48], cos_bit[stage]);
+ bf1[48] = half_btf(cospi[32], bf0[47], cospi[32], bf0[48], cos_bit[stage]);
+ bf1[49] = half_btf(cospi[32], bf0[46], cospi[32], bf0[49], cos_bit[stage]);
+ bf1[50] = half_btf(cospi[32], bf0[45], cospi[32], bf0[50], cos_bit[stage]);
+ bf1[51] = half_btf(cospi[32], bf0[44], cospi[32], bf0[51], cos_bit[stage]);
+ bf1[52] = half_btf(cospi[32], bf0[43], cospi[32], bf0[52], cos_bit[stage]);
+ bf1[53] = half_btf(cospi[32], bf0[42], cospi[32], bf0[53], cos_bit[stage]);
+ bf1[54] = half_btf(cospi[32], bf0[41], cospi[32], bf0[54], cos_bit[stage]);
+ bf1[55] = half_btf(cospi[32], bf0[40], cospi[32], bf0[55], cos_bit[stage]);
+ bf1[56] = bf0[56];
+ bf1[57] = bf0[57];
+ bf1[58] = bf0[58];
+ bf1[59] = bf0[59];
+ bf1[60] = bf0[60];
+ bf1[61] = bf0[61];
+ bf1[62] = bf0[62];
+ bf1[63] = bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 11
+ stage++;
+ cospi = cospi_arr[cos_bit[stage] - cos_bit_min];
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0] + bf0[63];
+ bf1[1] = bf0[1] + bf0[62];
+ bf1[2] = bf0[2] + bf0[61];
+ bf1[3] = bf0[3] + bf0[60];
+ bf1[4] = bf0[4] + bf0[59];
+ bf1[5] = bf0[5] + bf0[58];
+ bf1[6] = bf0[6] + bf0[57];
+ bf1[7] = bf0[7] + bf0[56];
+ bf1[8] = bf0[8] + bf0[55];
+ bf1[9] = bf0[9] + bf0[54];
+ bf1[10] = bf0[10] + bf0[53];
+ bf1[11] = bf0[11] + bf0[52];
+ bf1[12] = bf0[12] + bf0[51];
+ bf1[13] = bf0[13] + bf0[50];
+ bf1[14] = bf0[14] + bf0[49];
+ bf1[15] = bf0[15] + bf0[48];
+ bf1[16] = bf0[16] + bf0[47];
+ bf1[17] = bf0[17] + bf0[46];
+ bf1[18] = bf0[18] + bf0[45];
+ bf1[19] = bf0[19] + bf0[44];
+ bf1[20] = bf0[20] + bf0[43];
+ bf1[21] = bf0[21] + bf0[42];
+ bf1[22] = bf0[22] + bf0[41];
+ bf1[23] = bf0[23] + bf0[40];
+ bf1[24] = bf0[24] + bf0[39];
+ bf1[25] = bf0[25] + bf0[38];
+ bf1[26] = bf0[26] + bf0[37];
+ bf1[27] = bf0[27] + bf0[36];
+ bf1[28] = bf0[28] + bf0[35];
+ bf1[29] = bf0[29] + bf0[34];
+ bf1[30] = bf0[30] + bf0[33];
+ bf1[31] = bf0[31] + bf0[32];
+ bf1[32] = bf0[31] - bf0[32];
+ bf1[33] = bf0[30] - bf0[33];
+ bf1[34] = bf0[29] - bf0[34];
+ bf1[35] = bf0[28] - bf0[35];
+ bf1[36] = bf0[27] - bf0[36];
+ bf1[37] = bf0[26] - bf0[37];
+ bf1[38] = bf0[25] - bf0[38];
+ bf1[39] = bf0[24] - bf0[39];
+ bf1[40] = bf0[23] - bf0[40];
+ bf1[41] = bf0[22] - bf0[41];
+ bf1[42] = bf0[21] - bf0[42];
+ bf1[43] = bf0[20] - bf0[43];
+ bf1[44] = bf0[19] - bf0[44];
+ bf1[45] = bf0[18] - bf0[45];
+ bf1[46] = bf0[17] - bf0[46];
+ bf1[47] = bf0[16] - bf0[47];
+ bf1[48] = bf0[15] - bf0[48];
+ bf1[49] = bf0[14] - bf0[49];
+ bf1[50] = bf0[13] - bf0[50];
+ bf1[51] = bf0[12] - bf0[51];
+ bf1[52] = bf0[11] - bf0[52];
+ bf1[53] = bf0[10] - bf0[53];
+ bf1[54] = bf0[9] - bf0[54];
+ bf1[55] = bf0[8] - bf0[55];
+ bf1[56] = bf0[7] - bf0[56];
+ bf1[57] = bf0[6] - bf0[57];
+ bf1[58] = bf0[5] - bf0[58];
+ bf1[59] = bf0[4] - bf0[59];
+ bf1[60] = bf0[3] - bf0[60];
+ bf1[61] = bf0[2] - bf0[61];
+ bf1[62] = bf0[1] - bf0[62];
+ bf1[63] = bf0[0] - bf0[63];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+}
diff --git a/vp10/common/vp10_inv_txfm1d.h b/vp10/common/vp10_inv_txfm1d.h
index 0609b65..fd547a6 100644
--- a/vp10/common/vp10_inv_txfm1d.h
+++ b/vp10/common/vp10_inv_txfm1d.h
@@ -25,6 +25,8 @@
const int8_t *cos_bit, const int8_t *stage_range);
void vp10_idct32_new(const int32_t *input, int32_t *output,
const int8_t *cos_bit, const int8_t *stage_range);
+void vp10_idct64_new(const int32_t *input, int32_t *output,
+ const int8_t *cos_bit, const int8_t *stage_range);
void vp10_iadst4_new(const int32_t *input, int32_t *output,
const int8_t *cos_bit, const int8_t *stage_range);
diff --git a/vp10/common/vp10_inv_txfm2d.c b/vp10/common/vp10_inv_txfm2d.c
index c894a42..cacbd0b 100644
--- a/vp10/common/vp10_inv_txfm2d.c
+++ b/vp10/common/vp10_inv_txfm2d.c
@@ -96,3 +96,15 @@
inv_txfm2d_add_c(input, (int16_t *)output, stride, cfg, txfm_buf);
clamp_block((int16_t *)output, 32, stride, 0, (1 << bd) - 1);
}
+
+void vp10_inv_txfm2d_add_64x64(const int32_t *input, uint16_t *output,
+ const int stride, const TXFM_2D_CFG *cfg,
+ const int bd) {
+ int txfm_buf[64 * 64 + 64 + 64];
+ // output contains the prediction signal which is always positive and smaller
+ // than (1 << bd) - 1
+ // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an
+ // int16_t*
+ inv_txfm2d_add_c(input, (int16_t *)output, stride, cfg, txfm_buf);
+ clamp_block((int16_t *)output, 64, stride, 0, (1 << bd) - 1);
+}
diff --git a/vp10/common/vp10_inv_txfm2d.h b/vp10/common/vp10_inv_txfm2d.h
index 1b570ef..fd4b23c 100644
--- a/vp10/common/vp10_inv_txfm2d.h
+++ b/vp10/common/vp10_inv_txfm2d.h
@@ -27,6 +27,9 @@
void vp10_inv_txfm2d_add_32x32(const int32_t *input, uint16_t *output,
const int stride, const TXFM_2D_CFG *cfg,
const int bd);
+void vp10_inv_txfm2d_add_64x64(const int32_t *input, uint16_t *output,
+ const int stride, const TXFM_2D_CFG *cfg,
+ const int bd);
#ifdef __cplusplus
}
#endif
diff --git a/vp10/common/vp10_inv_txfm2d_cfg.h b/vp10/common/vp10_inv_txfm2d_cfg.h
index fc552fe..a1c4892 100644
--- a/vp10/common/vp10_inv_txfm2d_cfg.h
+++ b/vp10/common/vp10_inv_txfm2d_cfg.h
@@ -96,6 +96,29 @@
vp10_idct32_new, // .txfm_func_col
vp10_idct32_new}; // .txfm_func_row;
+// ---------------- config inv_dct_dct_64 ----------------
+static int8_t inv_shift_dct_dct_64[2] = {-1, -7};
+static int8_t inv_stage_range_col_dct_dct_64[12] = {19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 18, 18};
+static int8_t inv_stage_range_row_dct_dct_64[12] = {20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20};
+static int8_t inv_cos_bit_col_dct_dct_64[12] = {13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 14};
+static int8_t inv_cos_bit_row_dct_dct_64[12] = {12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12};
+
+static const TXFM_2D_CFG inv_txfm_2d_cfg_dct_dct_64 = {
+ 64, // .txfm_size
+ 12, // .stage_num_col
+ 12, // .stage_num_row
+ inv_shift_dct_dct_64, // .shift
+ inv_stage_range_col_dct_dct_64, // .stage_range_col
+ inv_stage_range_row_dct_dct_64, // .stage_range_row
+ inv_cos_bit_col_dct_dct_64, // .cos_bit_col
+ inv_cos_bit_row_dct_dct_64, // .cos_bit_row
+ vp10_idct64_new, // .txfm_func_col
+ vp10_idct64_new}; // .txfm_func_row;
+
// ---------------- config inv_dct_adst_4 ----------------
static const int8_t inv_shift_dct_adst_4[2] = {1, -5};
static const int8_t inv_stage_range_col_dct_adst_4[4] = {17, 17, 16, 16};
diff --git a/vp10/decoder/decodeframe.c b/vp10/decoder/decodeframe.c
index ce6317c..d4ac688 100644
--- a/vp10/decoder/decodeframe.c
+++ b/vp10/decoder/decodeframe.c
@@ -215,58 +215,28 @@
}
}
-static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane,
- const TX_SIZE tx_size,
- uint8_t *dst, int stride,
- int eob, int block) {
+static void inverse_transform_block(MACROBLOCKD* xd, int plane,
+ const TX_TYPE tx_type,
+ const TX_SIZE tx_size,
+ uint8_t *dst, int stride,
+ int eob) {
struct macroblockd_plane *const pd = &xd->plane[plane];
- TX_TYPE tx_type = get_tx_type(pd->plane_type, xd, block, tx_size);
const int seg_id = xd->mi[0]->mbmi.segment_id;
if (eob > 0) {
tran_low_t *const dqcoeff = pd->dqcoeff;
+ INV_TXFM_PARAM inv_txfm_param;
+ inv_txfm_param.tx_type = tx_type;
+ inv_txfm_param.tx_size = tx_size;
+ inv_txfm_param.eob = eob;
+ inv_txfm_param.lossless = xd->lossless[seg_id];
+
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- switch (tx_size) {
- case TX_4X4:
- vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
- tx_type, xd->lossless[seg_id]);
- break;
- case TX_8X8:
- vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
- tx_type);
- break;
- case TX_16X16:
- vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
- tx_type);
- break;
- case TX_32X32:
- vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
- tx_type);
- break;
- default:
- assert(0 && "Invalid transform size");
- return;
- }
+ inv_txfm_param.bd = xd->bd;
+ highbd_inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
} else {
#endif // CONFIG_VP9_HIGHBITDEPTH
- switch (tx_size) {
- case TX_4X4:
- vp10_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
- xd->lossless[seg_id]);
- break;
- case TX_8X8:
- vp10_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
- break;
- case TX_16X16:
- vp10_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
- break;
- case TX_32X32:
- vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
- break;
- default:
- assert(0 && "Invalid transform size");
- return;
- }
+ inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
#if CONFIG_VP9_HIGHBITDEPTH
}
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -289,75 +259,6 @@
}
}
-static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
- const TX_TYPE tx_type,
- const TX_SIZE tx_size,
- uint8_t *dst, int stride,
- int eob) {
- struct macroblockd_plane *const pd = &xd->plane[plane];
- const int seg_id = xd->mi[0]->mbmi.segment_id;
- if (eob > 0) {
- tran_low_t *const dqcoeff = pd->dqcoeff;
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- switch (tx_size) {
- case TX_4X4:
- vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
- tx_type, xd->lossless[seg_id]);
- break;
- case TX_8X8:
- vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
- tx_type);
- break;
- case TX_16X16:
- vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
- tx_type);
- break;
- case TX_32X32:
- vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
- tx_type);
- break;
- default:
- assert(0 && "Invalid transform size");
- return;
- }
- } else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
- switch (tx_size) {
- case TX_4X4:
- vp10_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
- xd->lossless[seg_id]);
- break;
- case TX_8X8:
- vp10_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
- break;
- case TX_16X16:
- vp10_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
- break;
- case TX_32X32:
- vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
- break;
- default:
- assert(0 && "Invalid transform size");
- return;
- }
-#if CONFIG_VP9_HIGHBITDEPTH
- }
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
- if (eob == 1) {
- dqcoeff[0] = 0;
- } else {
- if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
- memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
- else if (tx_size == TX_32X32 && eob <= 34)
- memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
- else
- memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
- }
- }
-}
-
static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd,
#if CONFIG_ANS
const rans_dec_lut *const token_tab,
@@ -393,8 +294,8 @@
#endif // CONFIG_ANS
plane, sc, col, row, tx_size,
r, mbmi->segment_id);
- inverse_transform_block_intra(xd, plane, tx_type, tx_size,
- dst, pd->dst.stride, eob);
+ inverse_transform_block(xd, plane, tx_type, tx_size,
+ dst, pd->dst.stride, eob);
}
}
@@ -429,9 +330,9 @@
const int eob = vp10_decode_block_tokens(xd, plane, sc,
blk_col, blk_row, tx_size,
r, mbmi->segment_id);
- inverse_transform_block_inter(xd, plane, tx_size,
+ inverse_transform_block(xd, plane, tx_type, tx_size,
&pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col],
- pd->dst.stride, eob, block);
+ pd->dst.stride, eob);
*eob_total += eob;
} else {
int bsl = b_width_log2_lookup[bsize];
@@ -477,9 +378,9 @@
plane, sc, col, row, tx_size, r,
mbmi->segment_id);
- inverse_transform_block_inter(xd, plane, tx_size,
- &pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
- pd->dst.stride, eob, block_idx);
+ inverse_transform_block(xd, plane, tx_type, tx_size,
+ &pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
+ pd->dst.stride, eob);
return eob;
}
#endif // !CONFIG_VAR_TX || CONFIG_SUPER_TX
@@ -2962,18 +2863,7 @@
assert(tile_rows <= 4);
assert(tile_cols <= (1 << 6));
- // Note: this memset assumes above_context[0], [1] and [2]
- // are allocated as part of the same buffer.
- memset(cm->above_context, 0,
- sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols);
-
- memset(cm->above_seg_context, 0,
- sizeof(*cm->above_seg_context) * aligned_cols);
-
-#if CONFIG_VAR_TX
- memset(cm->above_txfm_context, 0,
- sizeof(*cm->above_txfm_context) * aligned_cols);
-#endif
+ vp10_zero_above_context(cm, 0, aligned_cols);
get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
@@ -3032,11 +2922,7 @@
tile_cols - tile_col - 1 : tile_col;
tile_data = pbi->tile_data + tile_cols * tile_row + col;
vp10_tile_set_col(&tile, tile_data->cm, col);
- vp10_zero(tile_data->xd.left_context);
- vp10_zero(tile_data->xd.left_seg_context);
-#if CONFIG_VAR_TX
- vp10_zero(tile_data->xd.left_txfm_context_buffer);
-#endif
+ vp10_zero_left_context(&tile_data->xd);
for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
mi_col += MI_BLOCK_SIZE) {
decode_partition(pbi, &tile_data->xd,
@@ -3126,11 +3012,7 @@
for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
mi_row += MI_BLOCK_SIZE) {
- vp10_zero(tile_data->xd.left_context);
- vp10_zero(tile_data->xd.left_seg_context);
-#if CONFIG_VAR_TX
- vp10_zero(tile_data->xd.left_txfm_context_buffer);
-#endif
+ vp10_zero_left_context(&tile_data->xd);
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += MI_BLOCK_SIZE) {
decode_partition(tile_data->pbi, &tile_data->xd,
@@ -3211,16 +3093,8 @@
worker->data2 = &pbi->tile_worker_info[n];
}
- // Note: this memset assumes above_context[0], [1] and [2]
- // are allocated as part of the same buffer.
- memset(cm->above_context, 0,
- sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_mi_cols);
- memset(cm->above_seg_context, 0,
- sizeof(*cm->above_seg_context) * aligned_mi_cols);
-#if CONFIG_VAR_TX
- memset(cm->above_txfm_context, 0,
- sizeof(*cm->above_txfm_context) * aligned_mi_cols);
-#endif
+ vp10_zero_above_context(cm, 0, aligned_mi_cols);
+
// Load tile data into tile_buffers
get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
diff --git a/vp10/encoder/bitstream.c b/vp10/encoder/bitstream.c
index 1ef2ea5..f20c224 100644
--- a/vp10/encoder/bitstream.c
+++ b/vp10/encoder/bitstream.c
@@ -1710,10 +1710,7 @@
for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
mi_row += MI_BLOCK_SIZE) {
- vp10_zero(xd->left_seg_context);
-#if CONFIG_VAR_TX
- vp10_zero(xd->left_txfm_context_buffer);
-#endif
+ vp10_zero_left_context(xd);
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += MI_BLOCK_SIZE)
write_modes_sb(cpi, tile, w, tok, tok_end,
@@ -2190,12 +2187,7 @@
const int tile_rows = 1 << cm->log2_tile_rows;
unsigned int max_tile = 0;
- memset(cm->above_seg_context, 0,
- sizeof(*cm->above_seg_context) * mi_cols_aligned_to_sb(cm->mi_cols));
-#if CONFIG_VAR_TX
- memset(cm->above_txfm_context, 0,
- sizeof(*cm->above_txfm_context) * mi_cols_aligned_to_sb(cm->mi_cols));
-#endif
+ vp10_zero_above_context(cm, 0, mi_cols_aligned_to_sb(cm->mi_cols));
for (tile_row = 0; tile_row < tile_rows; tile_row++) {
for (tile_col = 0; tile_col < tile_cols; tile_col++) {
diff --git a/vp10/encoder/context_tree.h b/vp10/encoder/context_tree.h
index 4fa5806..53c7142 100644
--- a/vp10/encoder/context_tree.h
+++ b/vp10/encoder/context_tree.h
@@ -54,7 +54,6 @@
int hybrid_pred_diff;
int comp_pred_diff;
int single_pred_diff;
- int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
// TODO(jingning) Use RD_COST struct here instead. This involves a boarder
// scope of refactoring.
diff --git a/vp10/encoder/encodeframe.c b/vp10/encoder/encodeframe.c
index c5a68a9..ec00b62 100644
--- a/vp10/encoder/encodeframe.c
+++ b/vp10/encoder/encodeframe.c
@@ -1194,9 +1194,6 @@
rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
-
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- rdc->filter_diff[i] += ctx->best_filter_diff[i];
}
for (h = 0; h < y_mis; ++h) {
@@ -1316,9 +1313,6 @@
rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
-
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- rdc->filter_diff[i] += ctx->best_filter_diff[i];
}
for (h = 0; h < y_mis; ++h) {
@@ -1654,6 +1648,9 @@
totalrate_nocoef,
#endif // CONFIG_SUPERTX
bsize, ctx, best_rd);
+#if CONFIG_SUPERTX
+ assert(*totalrate_nocoef >= 0);
+#endif // CONFIG_SUPERTX
}
} else {
vp10_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost,
@@ -1661,6 +1658,9 @@
totalrate_nocoef,
#endif // CONFIG_SUPERTX
bsize, ctx, best_rd);
+#if CONFIG_SUPERTX
+ assert(*totalrate_nocoef >= 0);
+#endif // CONFIG_SUPERTX
}
}
@@ -3687,13 +3687,8 @@
SPEED_FEATURES *const sf = &cpi->sf;
int mi_col;
- // Initialize the left context for the new SB row
- memset(&xd->left_context, 0, sizeof(xd->left_context));
- memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
-#if CONFIG_VAR_TX
- memset(xd->left_txfm_context_buffer, 0,
- sizeof(xd->left_txfm_context_buffer));
-#endif
+ vp10_zero_left_context(xd);
+
// Code each SB in the row
for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
mi_col += MI_BLOCK_SIZE) {
@@ -3791,19 +3786,9 @@
// Copy data over into macro block data structures.
vp10_setup_src_planes(x, cpi->Source, 0, 0);
- vp10_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+ vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
- // Note: this memset assumes above_context[0], [1] and [2]
- // are allocated as part of the same buffer.
- memset(xd->above_context[0], 0,
- sizeof(*xd->above_context[0]) *
- 2 * aligned_mi_cols * MAX_MB_PLANE);
- memset(xd->above_seg_context, 0,
- sizeof(*xd->above_seg_context) * aligned_mi_cols);
-#if CONFIG_VAR_TX
- memset(cm->above_txfm_context, 0,
- sizeof(*xd->above_txfm_context) * aligned_mi_cols);
-#endif
+ vp10_zero_above_context(cm, 0, aligned_mi_cols);
}
static int check_dual_ref_flags(VP10_COMP *cpi) {
@@ -3971,7 +3956,6 @@
vp10_zero(*td->counts);
vp10_zero(rdc->coef_counts);
vp10_zero(rdc->comp_pred_diff);
- vp10_zero(rdc->filter_diff);
rdc->m_search_count = 0; // Count of motion search hits.
rdc->ex_search_count = 0; // Exhaustive mesh search hits.
@@ -4039,31 +4023,9 @@
cpi->last_frame_distortion = cpi->frame_distortion;
#endif
}
-
-static INTERP_FILTER get_interp_filter(
- const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
-#if CONFIG_EXT_INTERP
- if (!is_alt_ref &&
- threshes[EIGHTTAP_SMOOTH2] > threshes[EIGHTTAP_SMOOTH] &&
- threshes[EIGHTTAP_SMOOTH2] > threshes[EIGHTTAP_REGULAR] &&
- threshes[EIGHTTAP_SMOOTH2] > threshes[MULTITAP_SHARP] &&
- threshes[EIGHTTAP_SMOOTH2] > threshes[SWITCHABLE - 1]) {
- return EIGHTTAP_SMOOTH2;
- }
-#endif // CONFIG_EXT_INTERP
- if (!is_alt_ref &&
- threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_REGULAR] &&
- threshes[EIGHTTAP_SMOOTH] > threshes[MULTITAP_SHARP] &&
- threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
- return EIGHTTAP_SMOOTH;
- } else if (threshes[MULTITAP_SHARP] > threshes[EIGHTTAP_REGULAR] &&
- threshes[MULTITAP_SHARP] > threshes[SWITCHABLE - 1]) {
- return MULTITAP_SHARP;
- } else if (threshes[EIGHTTAP_REGULAR] > threshes[SWITCHABLE - 1]) {
- return EIGHTTAP_REGULAR;
- } else {
- return SWITCHABLE;
- }
+static INTERP_FILTER get_cm_interp_filter(VP10_COMP *cpi) {
+ (void)cpi;
+ return SWITCHABLE;
}
void vp10_encode_frame(VP10_COMP *cpi) {
@@ -4116,7 +4078,6 @@
// INTRA/ALTREF/GOLDEN/LAST needs to be specified seperately.
const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
- int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
const int is_alt_ref = frame_type == ALTREF_FRAME;
/* prediction (compound, single or hybrid) mode selection */
@@ -4134,7 +4095,7 @@
cm->reference_mode = REFERENCE_MODE_SELECT;
if (cm->interp_filter == SWITCHABLE) {
- cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
+ cm->interp_filter = get_cm_interp_filter(cpi);
}
encode_frame_internal(cpi);
@@ -4142,9 +4103,6 @@
for (i = 0; i < REFERENCE_MODES; ++i)
mode_thrs[i] = (mode_thrs[i] + rdc->comp_pred_diff[i] / cm->MBs) / 2;
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- filter_thrs[i] = (filter_thrs[i] + rdc->filter_diff[i] / cm->MBs) / 2;
-
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
int single_count_zero = 0;
int comp_count_zero = 0;
diff --git a/vp10/encoder/encoder.h b/vp10/encoder/encoder.h
index afe3292..a319901 100644
--- a/vp10/encoder/encoder.h
+++ b/vp10/encoder/encoder.h
@@ -251,7 +251,6 @@
typedef struct RD_COUNTS {
vp10_coeff_count coef_counts[TX_SIZES][PLANE_TYPES];
int64_t comp_pred_diff[REFERENCE_MODES];
- int64_t filter_diff[SWITCHABLE_FILTER_CONTEXTS];
int m_search_count;
int ex_search_count;
} RD_COUNTS;
diff --git a/vp10/encoder/ethread.c b/vp10/encoder/ethread.c
index 6cb9494..c586b9a 100644
--- a/vp10/encoder/ethread.c
+++ b/vp10/encoder/ethread.c
@@ -19,9 +19,6 @@
for (i = 0; i < REFERENCE_MODES; i++)
td->rd_counts.comp_pred_diff[i] += td_t->rd_counts.comp_pred_diff[i];
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
- td->rd_counts.filter_diff[i] += td_t->rd_counts.filter_diff[i];
-
for (i = 0; i < TX_SIZES; i++)
for (j = 0; j < PLANE_TYPES; j++)
for (k = 0; k < REF_TYPES; k++)
diff --git a/vp10/encoder/mcomp.c b/vp10/encoder/mcomp.c
index 1f147d7..23184ed 100644
--- a/vp10/encoder/mcomp.c
+++ b/vp10/encoder/mcomp.c
@@ -685,47 +685,6 @@
{0, -1}, {0, 1}, {-1, 0}, {1, 0}
};
-#if CONFIG_VP9_HIGHBITDEPTH
-// TODO(yunqing): Optimize the following 2 functions.
-static void highbd_comp_avg_upsampled_pred(uint16_t *comp_pred,
- const uint8_t *pred8,
- int width, int height,
- const uint8_t *ref8,
- int ref_stride) {
- int i, j;
- int stride = ref_stride << 3;
-
- uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
- for (i = 0; i < height; ++i) {
- for (j = 0; j < width; ++j) {
- const int tmp = pred[j] + ref[(j << 3)];
- comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1);
- }
- comp_pred += width;
- pred += width;
- ref += stride;
- }
-}
-
-static void highbd_upsampled_pred(uint16_t *comp_pred,
- int width, int height,
- const uint8_t *ref8,
- int ref_stride) {
- int i, j;
- int stride = ref_stride << 3;
-
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
- for (i = 0; i < height; ++i) {
- for (j = 0; j < width; ++j) {
- comp_pred[j] = ref[(j << 3)];
- }
- comp_pred += width;
- ref += stride;
- }
-}
-#endif
-
static int upsampled_pref_error(const MACROBLOCKD *xd,
const vp10_variance_fn_ptr_t *vfp,
const uint8_t *const src, const int src_stride,
@@ -737,10 +696,10 @@
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, pred16[64 * 64]);
if (second_pred != NULL)
- highbd_comp_avg_upsampled_pred(pred16, second_pred, w, h, y,
- y_stride);
+ vpx_highbd_comp_avg_upsampled_pred(pred16, second_pred, w, h, y,
+ y_stride);
else
- highbd_upsampled_pred(pred16, w, h, y, y_stride);
+ vpx_highbd_upsampled_pred(pred16, w, h, y, y_stride);
besterr = vfp->vf(CONVERT_TO_BYTEPTR(pred16), w, src, src_stride,
sse);
diff --git a/vp10/encoder/rd.h b/vp10/encoder/rd.h
index 5a6a44a..61feabe 100644
--- a/vp10/encoder/rd.h
+++ b/vp10/encoder/rd.h
@@ -279,8 +279,6 @@
int64_t prediction_type_threshes[MAX_REF_FRAMES][REFERENCE_MODES];
- int64_t filter_threshes[MAX_REF_FRAMES][SWITCHABLE_FILTER_CONTEXTS];
-
int RDMULT;
int RDDIV;
} RD_OPT;
diff --git a/vp10/encoder/rdopt.c b/vp10/encoder/rdopt.c
index 69c3391..01b5abb 100644
--- a/vp10/encoder/rdopt.c
+++ b/vp10/encoder/rdopt.c
@@ -377,16 +377,35 @@
unsigned int var[16];
double total = 0;
const int f_index = bsize - 6;
+
if (f_index < 0) {
int i, j, index;
int w_shift = bw == 8 ? 1 : 2;
int h_shift = bh == 8 ? 1 : 2;
- for (i = 0; i < bh; ++i)
- for (j = 0; j < bw; ++j) {
- index = (j >> w_shift) + ((i >> h_shift) << 2);
- esq[index] += (src[j + i * src_stride] - dst[j + i * dst_stride]) *
- (src[j + i * src_stride] - dst[j + i * dst_stride]);
- }
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (cpi->common.use_highbitdepth) {
+ uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+ uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
+ for (i = 0; i < bh; ++i)
+ for (j = 0; j < bw; ++j) {
+ index = (j >> w_shift) + ((i >> h_shift) << 2);
+ esq[index] += (src16[j + i * src_stride] -
+ dst16[j + i * dst_stride]) *
+ (src16[j + i * src_stride] -
+ dst16[j + i * dst_stride]);
+ }
+ } else {
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ for (i = 0; i < bh; ++i)
+ for (j = 0; j < bw; ++j) {
+ index = (j >> w_shift) + ((i >> h_shift) << 2);
+ esq[index] += (src[j + i * src_stride] - dst[j + i * dst_stride]) *
+ (src[j + i * src_stride] - dst[j + i * dst_stride]);
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
} else {
var[0] = cpi->fn_ptr[f_index].vf(src, src_stride,
dst, dst_stride, &esq[0]);
@@ -2846,57 +2865,21 @@
*bsse += tmp * 16;
if (p->eobs[block] > 0) {
- const int lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+ INV_TXFM_PARAM inv_txfm_param;
+ inv_txfm_param.tx_type = tx_type;
+ inv_txfm_param.tx_size = tx_size;
+ inv_txfm_param.eob = p->eobs[block];
+ inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- const int bd = xd->bd;
- switch (tx_size) {
- case TX_32X32:
- vp10_highbd_inv_txfm_add_32x32(dqcoeff, rec_buffer, 32,
- p->eobs[block], bd, tx_type);
- break;
- case TX_16X16:
- vp10_highbd_inv_txfm_add_16x16(dqcoeff, rec_buffer, 32,
- p->eobs[block], bd, tx_type);
- break;
- case TX_8X8:
- vp10_highbd_inv_txfm_add_8x8(dqcoeff, rec_buffer, 32,
- p->eobs[block], bd, tx_type);
- break;
- case TX_4X4:
- vp10_highbd_inv_txfm_add_4x4(dqcoeff, rec_buffer, 32,
- p->eobs[block], bd, tx_type, lossless);
- break;
- default:
- assert(0 && "Invalid transform size");
- break;
- }
+ inv_txfm_param.bd = xd->bd;
+ highbd_inv_txfm_add(dqcoeff, rec_buffer, 32, &inv_txfm_param);
} else {
-#else
- {
-#endif // CONFIG_VP9_HIGHBITDEPTH
- switch (tx_size) {
- case TX_32X32:
- vp10_inv_txfm_add_32x32(dqcoeff, rec_buffer, 32, p->eobs[block],
- tx_type);
- break;
- case TX_16X16:
- vp10_inv_txfm_add_16x16(dqcoeff, rec_buffer, 32, p->eobs[block],
- tx_type);
- break;
- case TX_8X8:
- vp10_inv_txfm_add_8x8(dqcoeff, rec_buffer, 32, p->eobs[block],
- tx_type);
- break;
- case TX_4X4:
- vp10_inv_txfm_add_4x4(dqcoeff, rec_buffer, 32, p->eobs[block],
- tx_type, lossless);
- break;
- default:
- assert(0 && "Invalid transform size");
- break;
- }
+ inv_txfm_add(dqcoeff, rec_buffer, 32, &inv_txfm_param);
}
+#else // CONFIG_VP9_HIGHBITDEPTH
+ inv_txfm_add(dqcoeff, rec_buffer, 32, &inv_txfm_param);
+#endif // CONFIG_VP9_HIGHBITDEPTH
if ((bh >> 2) + blk_col > max_blocks_wide ||
(bh >> 2) + blk_row > max_blocks_high) {
@@ -5452,7 +5435,6 @@
static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
int mode_index,
int64_t comp_pred_diff[REFERENCE_MODES],
- int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS],
int skippable) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -5466,9 +5448,6 @@
ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
-
- memcpy(ctx->best_filter_diff, best_filter_diff,
- sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
}
static void setup_buffer_inter(
@@ -6036,9 +6015,7 @@
INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
int (*single_skippable)[MAX_REF_FRAMES],
int64_t *psse,
- const int64_t ref_best_rd,
- int64_t *mask_filter,
- int64_t filter_cache[]) {
+ const int64_t ref_best_rd) {
VP10_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -6381,11 +6358,6 @@
if (is_comp_pred)
intpel_mv &= !mv_has_subpel(&mbmi->mv[1].as_mv);
- // Search for best switchable filter by checking the variance of
- // pred error irrespective of whether the filter will be used
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- filter_cache[i] = INT64_MAX;
-
best_filter = predict_interp_filter(cpi, x, bsize, mi_row, mi_col,
single_filter);
if (cm->interp_filter != BILINEAR && best_filter == SWITCHABLE) {
@@ -6405,12 +6377,8 @@
if (i > 0 && intpel_mv && IsInterpolatingFilter(i)) {
rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum);
- filter_cache[i] = rd;
- filter_cache[SWITCHABLE_FILTERS] =
- VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
if (cm->interp_filter == SWITCHABLE)
rd += rs_rd;
- *mask_filter = VPXMAX(*mask_filter, rd);
} else {
int rate_sum = 0;
int64_t dist_sum = 0;
@@ -6442,12 +6410,8 @@
&tmp_skip_sb, &tmp_skip_sse);
rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
- filter_cache[i] = rd;
- filter_cache[SWITCHABLE_FILTERS] =
- VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
if (cm->interp_filter == SWITCHABLE)
rd += rs_rd;
- *mask_filter = VPXMAX(*mask_filter, rd);
if (i == 0 && intpel_mv && IsInterpolatingFilter(i)) {
tmp_rate_sum = rate_sum;
@@ -7391,8 +7355,6 @@
int64_t best_rd = best_rd_so_far;
int64_t best_pred_diff[REFERENCE_MODES];
int64_t best_pred_rd[REFERENCE_MODES];
- int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
- int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
MB_MODE_INFO best_mbmode;
int best_mode_skippable = 0;
int midx, best_mode_index = -1;
@@ -7430,8 +7392,6 @@
int64_t mode_threshold[MAX_MODES];
int *mode_map = tile_data->mode_map[bsize];
const int mode_search_skip_flags = sf->mode_search_skip_flags;
- int64_t mask_filter = 0;
- int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
int palette_ctx = 0;
const int rows = 4 * num_4x4_blocks_high_lookup[bsize];
@@ -7487,16 +7447,11 @@
sizeof(directional_mode_skip_mask[0]) * INTRA_MODES);
#endif // CONFIG_EXT_INTRA
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- filter_cache[i] = INT64_MAX;
-
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
&comp_mode_p);
for (i = 0; i < REFERENCE_MODES; ++i)
best_pred_rd[i] = INT64_MAX;
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
- best_filter_rd[i] = INT64_MAX;
for (i = 0; i < TX_SIZES; i++)
rate_uv_intra[i] = INT_MAX;
for (i = 0; i < MAX_REF_FRAMES; ++i)
@@ -8081,8 +8036,7 @@
#endif // CONFIG_EXT_INTER
single_inter_filter,
single_skippable,
- &total_sse, best_rd,
- &mask_filter, filter_cache);
+ &total_sse, best_rd);
#if CONFIG_REF_MV
// TODO(jingning): This needs some refactoring to improve code quality
@@ -8092,6 +8046,7 @@
int_mv backup_mv = frame_mv[NEARMV][ref_frame];
int_mv cur_mv = mbmi_ext->ref_mv_stack[ref_frame][2].this_mv;
MB_MODE_INFO backup_mbmi = *mbmi;
+ int backup_skip = x->skip;
int64_t tmp_ref_rd = this_rd;
int ref_idx;
@@ -8124,18 +8079,17 @@
int tmp_rate = 0, tmp_rate_y = 0, tmp_rate_uv = 0;
int tmp_skip = 1;
int64_t tmp_dist = 0, tmp_sse = 0;
+ int dummy_disable_skip = 0;
cur_mv = mbmi_ext->ref_mv_stack[ref_frame][2 + ref_idx].this_mv;
lower_mv_precision(&cur_mv.as_mv, cm->allow_high_precision_mv);
clamp_mv2(&cur_mv.as_mv, xd);
if (!mv_check_bounds(x, &cur_mv.as_mv)) {
- int64_t dummy_filter_cache[SWITCHABLE_FILTER_CONTEXTS];
INTERP_FILTER dummy_single_inter_filter[MB_MODE_COUNT]
[MAX_REF_FRAMES];
int dummy_single_skippable[MB_MODE_COUNT][MAX_REF_FRAMES];
int dummy_disable_skip = 0;
- int64_t dummy_mask_filter = 0;
#if CONFIG_EXT_INTER
int_mv dummy_single_newmvs[2][MAX_REF_FRAMES] =
{ { { 0 } }, { { 0 } } };
@@ -8168,9 +8122,7 @@
#endif
dummy_single_inter_filter,
dummy_single_skippable,
- &tmp_sse, best_rd,
- &dummy_mask_filter,
- dummy_filter_cache);
+ &tmp_sse, best_rd);
}
tmp_rate += cpi->drl_mode_cost0[drl0_ctx][1];
@@ -8201,6 +8153,7 @@
if (tmp_ref_rd > tmp_alt_rd) {
rate2 = tmp_rate;
+ disable_skip = dummy_disable_skip;
distortion2 = tmp_dist;
skippable = tmp_skip;
rate_y = tmp_rate_y;
@@ -8209,6 +8162,7 @@
this_rd = tmp_alt_rd;
tmp_ref_rd = tmp_alt_rd;
backup_mbmi = *mbmi;
+ backup_skip = x->skip;
#if CONFIG_VAR_TX
for (i = 0; i < MAX_MB_PLANE; ++i)
memcpy(x->blk_skip_drl[i], x->blk_skip[i],
@@ -8216,6 +8170,7 @@
#endif
} else {
*mbmi = backup_mbmi;
+ x->skip = backup_skip;
}
}
@@ -8312,8 +8267,6 @@
if (!disable_skip && ref_frame == INTRA_FRAME) {
for (i = 0; i < REFERENCE_MODES; ++i)
best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
- best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
}
// Did this mode help.. i.e. is it the new best mode
@@ -8333,11 +8286,12 @@
rd_cost->rate = rate2;
#if CONFIG_SUPERTX
- *returnrate_nocoef = rate2 - rate_y - rate_uv;
- if (!disable_skip) {
- *returnrate_nocoef -= vp10_cost_bit(vp10_get_skip_prob(cm, xd),
- skippable || this_skip2);
- }
+ if (x->skip && rate_y == INT_MAX)
+ *returnrate_nocoef = rate2;
+ else
+ *returnrate_nocoef = rate2 - rate_y - rate_uv;
+ *returnrate_nocoef -= vp10_cost_bit(vp10_get_skip_prob(cm, xd),
+ disable_skip || skippable || this_skip2);
*returnrate_nocoef -= vp10_cost_bit(vp10_get_intra_inter_prob(cm, xd),
mbmi->ref_frame[0] != INTRA_FRAME);
#if CONFIG_OBMC
@@ -8412,29 +8366,6 @@
}
if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
-
- /* keep record of best filter type */
- if (!mode_excluded && cm->interp_filter != BILINEAR) {
- int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
- SWITCHABLE_FILTERS : cm->interp_filter];
-
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
- int64_t adj_rd;
- if (ref == INT64_MAX)
- adj_rd = 0;
- else if (filter_cache[i] == INT64_MAX)
- // when early termination is triggered, the encoder does not have
- // access to the rate-distortion cost. it only knows that the cost
- // should be above the maximum valid value. hence it takes the known
- // maximum plus an arbitrary constant as the rate-distortion cost.
- adj_rd = mask_filter - ref + 10;
- else
- adj_rd = filter_cache[i] - ref;
-
- adj_rd += this_rd;
- best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
- }
- }
}
if (early_term)
@@ -8729,21 +8660,6 @@
best_pred_diff[i] = best_rd - best_pred_rd[i];
}
- if (!x->skip) {
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
- if (best_filter_rd[i] == INT64_MAX)
- best_filter_diff[i] = 0;
- else
- best_filter_diff[i] = best_rd - best_filter_rd[i];
- }
- if (cm->interp_filter == SWITCHABLE)
- assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
- } else {
- vp10_zero(best_filter_diff);
- }
-
- // TODO(yunqingwang): Moving this line in front of the above best_filter_diff
- // updating code causes PSNR loss. Need to figure out the confliction.
x->skip |= best_mode_skippable;
if (!x->skip && !x->select_tx_size) {
@@ -8767,7 +8683,7 @@
assert(best_mode_index >= 0);
store_coding_context(x, ctx, best_mode_index, best_pred_diff,
- best_filter_diff, best_mode_skippable);
+ best_mode_skippable);
if (cm->allow_screen_content_tools && pmi->palette_size[1] > 0) {
restore_uv_color_map(cpi, x);
@@ -8788,7 +8704,6 @@
const int comp_pred = 0;
int i;
int64_t best_pred_diff[REFERENCE_MODES];
- int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
vpx_prob comp_mode_p;
INTERP_FILTER best_filter = SWITCHABLE;
@@ -8873,12 +8788,11 @@
cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
vp10_zero(best_pred_diff);
- vp10_zero(best_filter_diff);
if (!x->select_tx_size)
swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
store_coding_context(x, ctx, THR_ZEROMV,
- best_pred_diff, best_filter_diff, 0);
+ best_pred_diff, 0);
}
void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
@@ -8918,8 +8832,6 @@
int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise
int64_t best_pred_diff[REFERENCE_MODES];
int64_t best_pred_rd[REFERENCE_MODES];
- int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
- int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
MB_MODE_INFO best_mbmode;
int ref_index, best_ref_index = 0;
unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
@@ -8939,8 +8851,6 @@
b_mode_info best_bmodes[4];
int best_skip2 = 0;
int ref_frame_skip_mask[2] = { 0 };
- int64_t mask_filter = 0;
- int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
int internal_active_edge =
vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
@@ -8964,9 +8874,6 @@
mbmi->use_wedge_interintra = 0;
#endif // CONFIG_EXT_INTER
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- filter_cache[i] = INT64_MAX;
-
for (i = 0; i < 4; i++) {
int j;
#if CONFIG_EXT_INTER
@@ -8986,8 +8893,6 @@
for (i = 0; i < REFERENCE_MODES; ++i)
best_pred_rd[i] = INT64_MAX;
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
- best_filter_rd[i] = INT64_MAX;
rate_uv_intra = INT_MAX;
rd_cost->rate = INT_MAX;
@@ -9245,8 +9150,6 @@
#endif // CONFIG_EXT_REFS
this_rd_thresh = (ref_frame == GOLDEN_FRAME) ?
rd_opt->threshes[segment_id][bsize][THR_GOLD] : this_rd_thresh;
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- filter_cache[i] = INT64_MAX;
// TODO(any): Add search of the tx_type to improve rd performance at the
// expense of speed.
@@ -9290,14 +9193,9 @@
continue;
rs = vp10_get_switchable_rate(cpi, xd);
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
- filter_cache[switchable_filter_index] = tmp_rd;
- filter_cache[SWITCHABLE_FILTERS] =
- VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd);
if (cm->interp_filter == SWITCHABLE)
tmp_rd += rs_rd;
- mask_filter = VPXMAX(mask_filter, tmp_rd);
-
newbest = (tmp_rd < tmp_best_rd);
if (newbest) {
tmp_best_filter = mbmi->interp_filter;
@@ -9469,8 +9367,6 @@
if (!disable_skip && ref_frame == INTRA_FRAME) {
for (i = 0; i < REFERENCE_MODES; ++i)
best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
- best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
}
// Did this mode help.. i.e. is it the new best mode
@@ -9565,29 +9461,6 @@
best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
}
- /* keep record of best filter type */
- if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
- cm->interp_filter != BILINEAR) {
- int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
- SWITCHABLE_FILTERS : cm->interp_filter];
- int64_t adj_rd;
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
- if (ref == INT64_MAX)
- adj_rd = 0;
- else if (filter_cache[i] == INT64_MAX)
- // when early termination is triggered, the encoder does not have
- // access to the rate-distortion cost. it only knows that the cost
- // should be above the maximum valid value. hence it takes the known
- // maximum plus an arbitrary constant as the rate-distortion cost.
- adj_rd = mask_filter - ref + 10;
- else
- adj_rd = filter_cache[i] - ref;
-
- adj_rd += this_rd;
- best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
- }
- }
-
if (early_term)
break;
@@ -9659,21 +9532,8 @@
best_pred_diff[i] = best_rd - best_pred_rd[i];
}
- if (!x->skip) {
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
- if (best_filter_rd[i] == INT64_MAX)
- best_filter_diff[i] = 0;
- else
- best_filter_diff[i] = best_rd - best_filter_rd[i];
- }
- if (cm->interp_filter == SWITCHABLE)
- assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
- } else {
- vp10_zero(best_filter_diff);
- }
-
store_coding_context(x, ctx, best_ref_index,
- best_pred_diff, best_filter_diff, 0);
+ best_pred_diff, 0);
}
#if CONFIG_OBMC
diff --git a/vpx_dsp/variance.c b/vpx_dsp/variance.c
index ee1e305..24f42df 100644
--- a/vpx_dsp/variance.c
+++ b/vpx_dsp/variance.c
@@ -651,6 +651,44 @@
ref += ref_stride;
}
}
+
+void vpx_highbd_upsampled_pred_c(uint16_t *comp_pred,
+ int width, int height,
+ const uint8_t *ref8,
+ int ref_stride) {
+ int i, j;
+ int stride = ref_stride << 3;
+
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+ for (i = 0; i < height; ++i) {
+ for (j = 0; j < width; ++j) {
+ comp_pred[j] = ref[(j << 3)];
+ }
+ comp_pred += width;
+ ref += stride;
+ }
+}
+
+void vpx_highbd_comp_avg_upsampled_pred_c(uint16_t *comp_pred,
+ const uint8_t *pred8,
+ int width, int height,
+ const uint8_t *ref8,
+ int ref_stride) {
+ int i, j;
+ int stride = ref_stride << 3;
+
+ uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+ for (i = 0; i < height; ++i) {
+ for (j = 0; j < width; ++j) {
+ const int tmp = pred[j] + ref[(j << 3)];
+ comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1);
+ }
+ comp_pred += width;
+ pred += width;
+ ref += stride;
+ }
+}
#endif // CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_VP10 && CONFIG_EXT_INTER
diff --git a/vpx_dsp/vpx_dsp_rtcd_defs.pl b/vpx_dsp/vpx_dsp_rtcd_defs.pl
index e5c002a..ced7009 100644
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -1238,6 +1238,13 @@
add_proto qw/void vpx_comp_avg_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
specialize qw/vpx_comp_avg_upsampled_pred sse2/;
+if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
+ add_proto qw/void vpx_highbd_upsampled_pred/, "uint16_t *comp_pred, int width, int height, const uint8_t *ref8, int ref_stride";
+ specialize qw/vpx_highbd_upsampled_pred sse2/;
+ add_proto qw/void vpx_highbd_comp_avg_upsampled_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride";
+ specialize qw/vpx_highbd_comp_avg_upsampled_pred sse2/;
+}
+
#
# ...
#
diff --git a/vpx_dsp/x86/highbd_variance_sse2.c b/vpx_dsp/x86/highbd_variance_sse2.c
index 81ec5db..e2b79bf 100644
--- a/vpx_dsp/x86/highbd_variance_sse2.c
+++ b/vpx_dsp/x86/highbd_variance_sse2.c
@@ -7,7 +7,11 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+
+#include <emmintrin.h> // SSE2
+
#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
#include "vpx_ports/mem.h"
@@ -591,3 +595,136 @@
#undef FNS
#undef FN
#endif // CONFIG_USE_X86INC
+
+void vpx_highbd_upsampled_pred_sse2(uint16_t *comp_pred,
+ int width, int height,
+ const uint8_t *ref8,
+ int ref_stride) {
+ int i, j;
+ int stride = ref_stride << 3;
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+
+ if (width >= 8) {
+ // read 8 points at one time
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j+= 8) {
+ __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
+ __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
+ __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
+ __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
+ __m128i s4 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 32));
+ __m128i s5 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 40));
+ __m128i s6 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 48));
+ __m128i s7 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 56));
+ __m128i t0, t1, t2, t3;
+
+ t0 = _mm_unpacklo_epi16(s0, s1);
+ t1 = _mm_unpacklo_epi16(s2, s3);
+ t2 = _mm_unpacklo_epi16(s4, s5);
+ t3 = _mm_unpacklo_epi16(s6, s7);
+ t0 = _mm_unpacklo_epi32(t0, t1);
+ t2 = _mm_unpacklo_epi32(t2, t3);
+ t0 = _mm_unpacklo_epi64(t0, t2);
+
+ _mm_storeu_si128((__m128i *)(comp_pred), t0);
+ comp_pred += 8;
+ ref += 64; // 8 * 8;
+ }
+ ref += stride - (width << 3);
+ }
+ } else {
+ // read 4 points at one time
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j+= 4) {
+ __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
+ __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
+ __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
+ __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
+ __m128i t0, t1;
+
+ t0 = _mm_unpacklo_epi16(s0, s1);
+ t1 = _mm_unpacklo_epi16(s2, s3);
+ t0 = _mm_unpacklo_epi32(t0, t1);
+
+ _mm_storel_epi64((__m128i *)(comp_pred), t0);
+ comp_pred += 4;
+ ref += 4 * 8;
+ }
+ ref += stride - (width << 3);
+ }
+ }
+}
+
+void vpx_highbd_comp_avg_upsampled_pred_sse2(uint16_t *comp_pred,
+ const uint8_t *pred8,
+ int width, int height,
+ const uint8_t *ref8,
+ int ref_stride) {
+ const __m128i one = _mm_set1_epi16(1);
+ int i, j;
+ int stride = ref_stride << 3;
+ uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+
+ if (width >= 8) {
+ // read 8 points at one time
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j+= 8) {
+ __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
+ __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
+ __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
+ __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
+ __m128i s4 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 32));
+ __m128i s5 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 40));
+ __m128i s6 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 48));
+ __m128i s7 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 56));
+ __m128i p0 = _mm_loadu_si128((const __m128i *)pred);
+ __m128i t0, t1, t2, t3;
+
+ t0 = _mm_unpacklo_epi16(s0, s1);
+ t1 = _mm_unpacklo_epi16(s2, s3);
+ t2 = _mm_unpacklo_epi16(s4, s5);
+ t3 = _mm_unpacklo_epi16(s6, s7);
+ t0 = _mm_unpacklo_epi32(t0, t1);
+ t2 = _mm_unpacklo_epi32(t2, t3);
+ t0 = _mm_unpacklo_epi64(t0, t2);
+
+ p0 = _mm_adds_epu16(t0, p0);
+ p0 = _mm_adds_epu16(p0, one);
+ p0 = _mm_srli_epi16(p0, 1);
+
+ _mm_storeu_si128((__m128i *)(comp_pred), p0);
+ comp_pred += 8;
+ pred += 8;
+ ref += 8 * 8;
+ }
+ ref += stride - (width << 3);
+ }
+ } else {
+ // read 4 points at one time
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j+= 4) {
+ __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
+ __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
+ __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
+ __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
+ __m128i p0 = _mm_loadl_epi64((const __m128i *)pred);
+ __m128i t0, t1;
+
+ t0 = _mm_unpacklo_epi16(s0, s1);
+ t1 = _mm_unpacklo_epi16(s2, s3);
+ t0 = _mm_unpacklo_epi32(t0, t1);
+
+ p0 = _mm_adds_epu16(t0, p0);
+ p0 = _mm_adds_epu16(p0, one);
+ p0 = _mm_srli_epi16(p0, 1);
+
+ _mm_storel_epi64((__m128i *)(comp_pred), p0);
+ comp_pred += 4;
+ pred += 4;
+ ref += 4 * 8;
+ }
+ ref += stride - (width << 3);
+ }
+ }
+}
diff --git a/vpx_dsp/x86/variance_sse2.c b/vpx_dsp/x86/variance_sse2.c
index 63fc1e6..dc51173 100644
--- a/vpx_dsp/x86/variance_sse2.c
+++ b/vpx_dsp/x86/variance_sse2.c
@@ -509,12 +509,11 @@
s2 = _mm_unpacklo_epi8(t1, s3);
s4 = _mm_unpacklo_epi8(t2, s5);
s6 = _mm_unpacklo_epi8(t3, s7);
+ s0 = _mm_unpacklo_epi32(s0, s2);
+ s4 = _mm_unpacklo_epi32(s4, s6);
+ s0 = _mm_unpacklo_epi64(s0, s4);
- *(int *)comp_pred = _mm_cvtsi128_si32(s0);
- *(int *)(comp_pred + 4) = _mm_cvtsi128_si32(s2);
- *(int *)(comp_pred + 8) = _mm_cvtsi128_si32(s4);
- *(int *)(comp_pred + 12) = _mm_cvtsi128_si32(s6);
-
+ _mm_storeu_si128((__m128i *)(comp_pred), s0);
comp_pred += 16;
ref += 16 * 8;
}
@@ -537,9 +536,9 @@
s0 = _mm_unpacklo_epi8(t0, s1);
s2 = _mm_unpacklo_epi8(t1, s3);
+ s0 = _mm_unpacklo_epi32(s0, s2);
- *(int *)comp_pred = _mm_cvtsi128_si32(s0);
- *(int *)(comp_pred + 4) = _mm_cvtsi128_si32(s2);
+ _mm_storel_epi64((__m128i *)(comp_pred), s0);
comp_pred += 8;
ref += 8 * 8;
}
@@ -558,7 +557,6 @@
s0 = _mm_unpacklo_epi8(t0, s1);
*(int *)comp_pred = _mm_cvtsi128_si32(s0);
-
comp_pred += 4;
ref += 4 * 8;
}
@@ -621,14 +619,7 @@
p1 = _mm_srli_epi16(p1, 1);
p0 = _mm_packus_epi16(p0, p1);
- *(int *)comp_pred = _mm_cvtsi128_si32(p0);
- p0 = _mm_srli_si128(p0, 4);
- *(int *)(comp_pred + 4) = _mm_cvtsi128_si32(p0);
- p0 = _mm_srli_si128(p0, 4);
- *(int *)(comp_pred + 8) = _mm_cvtsi128_si32(p0);
- p0 = _mm_srli_si128(p0, 4);
- *(int *)(comp_pred + 12) = _mm_cvtsi128_si32(p0);
-
+ _mm_storeu_si128((__m128i *)(comp_pred), p0);
comp_pred += 16;
pred += 16;
ref += 16 * 8;
@@ -662,10 +653,7 @@
p0 = _mm_srli_epi16(p0, 1);
p0 = _mm_packus_epi16(p0, zero);
- *(int *)comp_pred = _mm_cvtsi128_si32(p0);
- p0 = _mm_srli_si128(p0, 4);
- *(int *)(comp_pred + 4) = _mm_cvtsi128_si32(p0);
-
+ _mm_storel_epi64((__m128i *)(comp_pred), p0);
comp_pred += 8;
pred += 8;
ref += 8 * 8;
@@ -693,7 +681,6 @@
p0 = _mm_packus_epi16(p0, zero);
*(int *)comp_pred = _mm_cvtsi128_si32(p0);
-
comp_pred += 4;
pred += 4;
ref += 4 * 8;