Swap new size 8 and 16 fwd/inv transforms for ADST
This is to make them similar to the ones in VP9.
Change-Id: Iaebf625f2dce4f159b8a8615f37003d773ee6450
diff --git a/av1/common/av1_inv_txfm1d.c b/av1/common/av1_inv_txfm1d.c
index d48ca6f..f19a39a 100644
--- a/av1/common/av1_inv_txfm1d.c
+++ b/av1/common/av1_inv_txfm1d.c
@@ -812,16 +812,15 @@
// stage 1;
stage++;
- assert(output != input);
bf1 = output;
- bf1[0] = input[0];
- bf1[1] = -input[7];
- bf1[2] = -input[3];
- bf1[3] = input[4];
- bf1[4] = -input[1];
- bf1[5] = input[6];
- bf1[6] = input[2];
- bf1[7] = -input[5];
+ bf1[0] = input[7];
+ bf1[1] = input[0];
+ bf1[2] = input[5];
+ bf1[3] = input[2];
+ bf1[4] = input[3];
+ bf1[5] = input[4];
+ bf1[6] = input[1];
+ bf1[7] = input[6];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 2
@@ -829,28 +828,28 @@
cospi = cospi_arr(cos_bit[stage]);
bf0 = output;
bf1 = step;
- bf1[0] = bf0[0];
- bf1[1] = bf0[1];
- bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit[stage]);
- bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit[stage]);
- bf1[4] = bf0[4];
- bf1[5] = bf0[5];
- bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit[stage]);
+ bf1[0] = half_btf(cospi[4], bf0[0], cospi[60], bf0[1], cos_bit[stage]);
+ bf1[1] = half_btf(-cospi[4], bf0[1], cospi[60], bf0[0], cos_bit[stage]);
+ bf1[2] = half_btf(cospi[20], bf0[2], cospi[44], bf0[3], cos_bit[stage]);
+ bf1[3] = half_btf(-cospi[20], bf0[3], cospi[44], bf0[2], cos_bit[stage]);
+ bf1[4] = half_btf(cospi[36], bf0[4], cospi[28], bf0[5], cos_bit[stage]);
+ bf1[5] = half_btf(-cospi[36], bf0[5], cospi[28], bf0[4], cos_bit[stage]);
+ bf1[6] = half_btf(cospi[52], bf0[6], cospi[12], bf0[7], cos_bit[stage]);
+ bf1[7] = half_btf(-cospi[52], bf0[7], cospi[12], bf0[6], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 3
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[0] + bf0[2];
- bf1[1] = bf0[1] + bf0[3];
- bf1[2] = bf0[0] - bf0[2];
- bf1[3] = bf0[1] - bf0[3];
- bf1[4] = bf0[4] + bf0[6];
- bf1[5] = bf0[5] + bf0[7];
- bf1[6] = bf0[4] - bf0[6];
- bf1[7] = bf0[5] - bf0[7];
+ bf1[0] = bf0[0] + bf0[4];
+ bf1[1] = bf0[1] + bf0[5];
+ bf1[2] = bf0[2] + bf0[6];
+ bf1[3] = bf0[3] + bf0[7];
+ bf1[4] = -bf0[4] + bf0[0];
+ bf1[5] = -bf0[5] + bf0[1];
+ bf1[6] = -bf0[6] + bf0[2];
+ bf1[7] = -bf0[7] + bf0[3];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 4
@@ -863,23 +862,23 @@
bf1[2] = bf0[2];
bf1[3] = bf0[3];
bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit[stage]);
- bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit[stage]);
+ bf1[5] = half_btf(-cospi[16], bf0[5], cospi[48], bf0[4], cos_bit[stage]);
bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit[stage]);
+ bf1[7] = half_btf(cospi[48], bf0[7], cospi[16], bf0[6], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 5
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[0] + bf0[4];
- bf1[1] = bf0[1] + bf0[5];
- bf1[2] = bf0[2] + bf0[6];
- bf1[3] = bf0[3] + bf0[7];
- bf1[4] = bf0[0] - bf0[4];
- bf1[5] = bf0[1] - bf0[5];
- bf1[6] = bf0[2] - bf0[6];
- bf1[7] = bf0[3] - bf0[7];
+ bf1[0] = bf0[0] + bf0[2];
+ bf1[1] = bf0[1] + bf0[3];
+ bf1[2] = -bf0[2] + bf0[0];
+ bf1[3] = -bf0[3] + bf0[1];
+ bf1[4] = bf0[4] + bf0[6];
+ bf1[5] = bf0[5] + bf0[7];
+ bf1[6] = -bf0[6] + bf0[4];
+ bf1[7] = -bf0[7] + bf0[5];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 6
@@ -887,28 +886,28 @@
cospi = cospi_arr(cos_bit[stage]);
bf0 = output;
bf1 = step;
- bf1[0] = half_btf(cospi[4], bf0[0], cospi[60], bf0[1], cos_bit[stage]);
- bf1[1] = half_btf(cospi[60], bf0[0], -cospi[4], bf0[1], cos_bit[stage]);
- bf1[2] = half_btf(cospi[20], bf0[2], cospi[44], bf0[3], cos_bit[stage]);
- bf1[3] = half_btf(cospi[44], bf0[2], -cospi[20], bf0[3], cos_bit[stage]);
- bf1[4] = half_btf(cospi[36], bf0[4], cospi[28], bf0[5], cos_bit[stage]);
- bf1[5] = half_btf(cospi[28], bf0[4], -cospi[36], bf0[5], cos_bit[stage]);
- bf1[6] = half_btf(cospi[52], bf0[6], cospi[12], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(cospi[12], bf0[6], -cospi[52], bf0[7], cos_bit[stage]);
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit[stage]);
+ bf1[3] = half_btf(-cospi[32], bf0[3], cospi[32], bf0[2], cos_bit[stage]);
+ bf1[4] = bf0[4];
+ bf1[5] = bf0[5];
+ bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit[stage]);
+ bf1[7] = half_btf(-cospi[32], bf0[7], cospi[32], bf0[6], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 7
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[1];
- bf1[1] = bf0[6];
- bf1[2] = bf0[3];
- bf1[3] = bf0[4];
- bf1[4] = bf0[5];
- bf1[5] = bf0[2];
- bf1[6] = bf0[7];
- bf1[7] = bf0[0];
+ bf1[0] = bf0[0];
+ bf1[1] = -bf0[4];
+ bf1[2] = bf0[6];
+ bf1[3] = -bf0[2];
+ bf1[4] = bf0[3];
+ bf1[5] = -bf0[7];
+ bf1[6] = bf0[5];
+ bf1[7] = -bf0[1];
range_check(stage, input, bf1, size, stage_range[stage]);
}
@@ -926,24 +925,23 @@
// stage 1;
stage++;
- assert(output != input);
bf1 = output;
- bf1[0] = input[0];
- bf1[1] = -input[15];
- bf1[2] = -input[7];
- bf1[3] = input[8];
- bf1[4] = -input[3];
- bf1[5] = input[12];
- bf1[6] = input[4];
- bf1[7] = -input[11];
- bf1[8] = -input[1];
- bf1[9] = input[14];
- bf1[10] = input[6];
- bf1[11] = -input[9];
- bf1[12] = input[2];
- bf1[13] = -input[13];
- bf1[14] = -input[5];
- bf1[15] = input[10];
+ bf1[0] = input[15];
+ bf1[1] = input[0];
+ bf1[2] = input[13];
+ bf1[3] = input[2];
+ bf1[4] = input[11];
+ bf1[5] = input[4];
+ bf1[6] = input[9];
+ bf1[7] = input[6];
+ bf1[8] = input[7];
+ bf1[9] = input[8];
+ bf1[10] = input[5];
+ bf1[11] = input[10];
+ bf1[12] = input[3];
+ bf1[13] = input[12];
+ bf1[14] = input[1];
+ bf1[15] = input[14];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 2
@@ -951,44 +949,44 @@
cospi = cospi_arr(cos_bit[stage]);
bf0 = output;
bf1 = step;
- bf1[0] = bf0[0];
- bf1[1] = bf0[1];
- bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit[stage]);
- bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit[stage]);
- bf1[4] = bf0[4];
- bf1[5] = bf0[5];
- bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit[stage]);
- bf1[8] = bf0[8];
- bf1[9] = bf0[9];
- bf1[10] = half_btf(cospi[32], bf0[10], cospi[32], bf0[11], cos_bit[stage]);
- bf1[11] = half_btf(cospi[32], bf0[10], -cospi[32], bf0[11], cos_bit[stage]);
- bf1[12] = bf0[12];
- bf1[13] = bf0[13];
- bf1[14] = half_btf(cospi[32], bf0[14], cospi[32], bf0[15], cos_bit[stage]);
- bf1[15] = half_btf(cospi[32], bf0[14], -cospi[32], bf0[15], cos_bit[stage]);
+ bf1[0] = half_btf(cospi[2], bf0[0], cospi[62], bf0[1], cos_bit[stage]);
+ bf1[1] = half_btf(-cospi[2], bf0[1], cospi[62], bf0[0], cos_bit[stage]);
+ bf1[2] = half_btf(cospi[10], bf0[2], cospi[54], bf0[3], cos_bit[stage]);
+ bf1[3] = half_btf(-cospi[10], bf0[3], cospi[54], bf0[2], cos_bit[stage]);
+ bf1[4] = half_btf(cospi[18], bf0[4], cospi[46], bf0[5], cos_bit[stage]);
+ bf1[5] = half_btf(-cospi[18], bf0[5], cospi[46], bf0[4], cos_bit[stage]);
+ bf1[6] = half_btf(cospi[26], bf0[6], cospi[38], bf0[7], cos_bit[stage]);
+ bf1[7] = half_btf(-cospi[26], bf0[7], cospi[38], bf0[6], cos_bit[stage]);
+ bf1[8] = half_btf(cospi[34], bf0[8], cospi[30], bf0[9], cos_bit[stage]);
+ bf1[9] = half_btf(-cospi[34], bf0[9], cospi[30], bf0[8], cos_bit[stage]);
+ bf1[10] = half_btf(cospi[42], bf0[10], cospi[22], bf0[11], cos_bit[stage]);
+ bf1[11] = half_btf(-cospi[42], bf0[11], cospi[22], bf0[10], cos_bit[stage]);
+ bf1[12] = half_btf(cospi[50], bf0[12], cospi[14], bf0[13], cos_bit[stage]);
+ bf1[13] = half_btf(-cospi[50], bf0[13], cospi[14], bf0[12], cos_bit[stage]);
+ bf1[14] = half_btf(cospi[58], bf0[14], cospi[6], bf0[15], cos_bit[stage]);
+ bf1[15] = half_btf(-cospi[58], bf0[15], cospi[6], bf0[14], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 3
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[0] + bf0[2];
- bf1[1] = bf0[1] + bf0[3];
- bf1[2] = bf0[0] - bf0[2];
- bf1[3] = bf0[1] - bf0[3];
- bf1[4] = bf0[4] + bf0[6];
- bf1[5] = bf0[5] + bf0[7];
- bf1[6] = bf0[4] - bf0[6];
- bf1[7] = bf0[5] - bf0[7];
- bf1[8] = bf0[8] + bf0[10];
- bf1[9] = bf0[9] + bf0[11];
- bf1[10] = bf0[8] - bf0[10];
- bf1[11] = bf0[9] - bf0[11];
- bf1[12] = bf0[12] + bf0[14];
- bf1[13] = bf0[13] + bf0[15];
- bf1[14] = bf0[12] - bf0[14];
- bf1[15] = bf0[13] - bf0[15];
+ bf1[0] = bf0[0] + bf0[8];
+ bf1[1] = bf0[1] + bf0[9];
+ bf1[2] = bf0[2] + bf0[10];
+ bf1[3] = bf0[3] + bf0[11];
+ bf1[4] = bf0[4] + bf0[12];
+ bf1[5] = bf0[5] + bf0[13];
+ bf1[6] = bf0[6] + bf0[14];
+ bf1[7] = bf0[7] + bf0[15];
+ bf1[8] = -bf0[8] + bf0[0];
+ bf1[9] = -bf0[9] + bf0[1];
+ bf1[10] = -bf0[10] + bf0[2];
+ bf1[11] = -bf0[11] + bf0[3];
+ bf1[12] = -bf0[12] + bf0[4];
+ bf1[13] = -bf0[13] + bf0[5];
+ bf1[14] = -bf0[14] + bf0[6];
+ bf1[15] = -bf0[15] + bf0[7];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 4
@@ -1000,85 +998,85 @@
bf1[1] = bf0[1];
bf1[2] = bf0[2];
bf1[3] = bf0[3];
- bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit[stage]);
- bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit[stage]);
- bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit[stage]);
- bf1[8] = bf0[8];
- bf1[9] = bf0[9];
- bf1[10] = bf0[10];
- bf1[11] = bf0[11];
- bf1[12] = half_btf(cospi[16], bf0[12], cospi[48], bf0[13], cos_bit[stage]);
- bf1[13] = half_btf(cospi[48], bf0[12], -cospi[16], bf0[13], cos_bit[stage]);
- bf1[14] = half_btf(-cospi[48], bf0[14], cospi[16], bf0[15], cos_bit[stage]);
- bf1[15] = half_btf(cospi[16], bf0[14], cospi[48], bf0[15], cos_bit[stage]);
- range_check(stage, input, bf1, size, stage_range[stage]);
-
- // stage 5
- stage++;
- bf0 = step;
- bf1 = output;
- bf1[0] = bf0[0] + bf0[4];
- bf1[1] = bf0[1] + bf0[5];
- bf1[2] = bf0[2] + bf0[6];
- bf1[3] = bf0[3] + bf0[7];
- bf1[4] = bf0[0] - bf0[4];
- bf1[5] = bf0[1] - bf0[5];
- bf1[6] = bf0[2] - bf0[6];
- bf1[7] = bf0[3] - bf0[7];
- bf1[8] = bf0[8] + bf0[12];
- bf1[9] = bf0[9] + bf0[13];
- bf1[10] = bf0[10] + bf0[14];
- bf1[11] = bf0[11] + bf0[15];
- bf1[12] = bf0[8] - bf0[12];
- bf1[13] = bf0[9] - bf0[13];
- bf1[14] = bf0[10] - bf0[14];
- bf1[15] = bf0[11] - bf0[15];
- range_check(stage, input, bf1, size, stage_range[stage]);
-
- // stage 6
- stage++;
- cospi = cospi_arr(cos_bit[stage]);
- bf0 = output;
- bf1 = step;
- bf1[0] = bf0[0];
- bf1[1] = bf0[1];
- bf1[2] = bf0[2];
- bf1[3] = bf0[3];
bf1[4] = bf0[4];
bf1[5] = bf0[5];
bf1[6] = bf0[6];
bf1[7] = bf0[7];
bf1[8] = half_btf(cospi[8], bf0[8], cospi[56], bf0[9], cos_bit[stage]);
- bf1[9] = half_btf(cospi[56], bf0[8], -cospi[8], bf0[9], cos_bit[stage]);
+ bf1[9] = half_btf(-cospi[8], bf0[9], cospi[56], bf0[8], cos_bit[stage]);
bf1[10] = half_btf(cospi[40], bf0[10], cospi[24], bf0[11], cos_bit[stage]);
- bf1[11] = half_btf(cospi[24], bf0[10], -cospi[40], bf0[11], cos_bit[stage]);
+ bf1[11] = half_btf(-cospi[40], bf0[11], cospi[24], bf0[10], cos_bit[stage]);
bf1[12] = half_btf(-cospi[56], bf0[12], cospi[8], bf0[13], cos_bit[stage]);
- bf1[13] = half_btf(cospi[8], bf0[12], cospi[56], bf0[13], cos_bit[stage]);
+ bf1[13] = half_btf(cospi[56], bf0[13], cospi[8], bf0[12], cos_bit[stage]);
bf1[14] = half_btf(-cospi[24], bf0[14], cospi[40], bf0[15], cos_bit[stage]);
- bf1[15] = half_btf(cospi[40], bf0[14], cospi[24], bf0[15], cos_bit[stage]);
+ bf1[15] = half_btf(cospi[24], bf0[15], cospi[40], bf0[14], cos_bit[stage]);
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 5
+ stage++;
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0] + bf0[4];
+ bf1[1] = bf0[1] + bf0[5];
+ bf1[2] = bf0[2] + bf0[6];
+ bf1[3] = bf0[3] + bf0[7];
+ bf1[4] = -bf0[4] + bf0[0];
+ bf1[5] = -bf0[5] + bf0[1];
+ bf1[6] = -bf0[6] + bf0[2];
+ bf1[7] = -bf0[7] + bf0[3];
+ bf1[8] = bf0[8] + bf0[12];
+ bf1[9] = bf0[9] + bf0[13];
+ bf1[10] = bf0[10] + bf0[14];
+ bf1[11] = bf0[11] + bf0[15];
+ bf1[12] = -bf0[12] + bf0[8];
+ bf1[13] = -bf0[13] + bf0[9];
+ bf1[14] = -bf0[14] + bf0[10];
+ bf1[15] = -bf0[15] + bf0[11];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 6
+ stage++;
+ cospi = cospi_arr(cos_bit[stage]);
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = bf0[2];
+ bf1[3] = bf0[3];
+ bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit[stage]);
+ bf1[5] = half_btf(-cospi[16], bf0[5], cospi[48], bf0[4], cos_bit[stage]);
+ bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit[stage]);
+ bf1[7] = half_btf(cospi[48], bf0[7], cospi[16], bf0[6], cos_bit[stage]);
+ bf1[8] = bf0[8];
+ bf1[9] = bf0[9];
+ bf1[10] = bf0[10];
+ bf1[11] = bf0[11];
+ bf1[12] = half_btf(cospi[16], bf0[12], cospi[48], bf0[13], cos_bit[stage]);
+ bf1[13] = half_btf(-cospi[16], bf0[13], cospi[48], bf0[12], cos_bit[stage]);
+ bf1[14] = half_btf(-cospi[48], bf0[14], cospi[16], bf0[15], cos_bit[stage]);
+ bf1[15] = half_btf(cospi[48], bf0[15], cospi[16], bf0[14], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 7
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[0] + bf0[8];
- bf1[1] = bf0[1] + bf0[9];
- bf1[2] = bf0[2] + bf0[10];
- bf1[3] = bf0[3] + bf0[11];
- bf1[4] = bf0[4] + bf0[12];
- bf1[5] = bf0[5] + bf0[13];
- bf1[6] = bf0[6] + bf0[14];
- bf1[7] = bf0[7] + bf0[15];
- bf1[8] = bf0[0] - bf0[8];
- bf1[9] = bf0[1] - bf0[9];
- bf1[10] = bf0[2] - bf0[10];
- bf1[11] = bf0[3] - bf0[11];
- bf1[12] = bf0[4] - bf0[12];
- bf1[13] = bf0[5] - bf0[13];
- bf1[14] = bf0[6] - bf0[14];
- bf1[15] = bf0[7] - bf0[15];
+ bf1[0] = bf0[0] + bf0[2];
+ bf1[1] = bf0[1] + bf0[3];
+ bf1[2] = -bf0[2] + bf0[0];
+ bf1[3] = -bf0[3] + bf0[1];
+ bf1[4] = bf0[4] + bf0[6];
+ bf1[5] = bf0[5] + bf0[7];
+ bf1[6] = -bf0[6] + bf0[4];
+ bf1[7] = -bf0[7] + bf0[5];
+ bf1[8] = bf0[8] + bf0[10];
+ bf1[9] = bf0[9] + bf0[11];
+ bf1[10] = -bf0[10] + bf0[8];
+ bf1[11] = -bf0[11] + bf0[9];
+ bf1[12] = bf0[12] + bf0[14];
+ bf1[13] = bf0[13] + bf0[15];
+ bf1[14] = -bf0[14] + bf0[12];
+ bf1[15] = -bf0[15] + bf0[13];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 8
@@ -1086,44 +1084,44 @@
cospi = cospi_arr(cos_bit[stage]);
bf0 = output;
bf1 = step;
- bf1[0] = half_btf(cospi[2], bf0[0], cospi[62], bf0[1], cos_bit[stage]);
- bf1[1] = half_btf(cospi[62], bf0[0], -cospi[2], bf0[1], cos_bit[stage]);
- bf1[2] = half_btf(cospi[10], bf0[2], cospi[54], bf0[3], cos_bit[stage]);
- bf1[3] = half_btf(cospi[54], bf0[2], -cospi[10], bf0[3], cos_bit[stage]);
- bf1[4] = half_btf(cospi[18], bf0[4], cospi[46], bf0[5], cos_bit[stage]);
- bf1[5] = half_btf(cospi[46], bf0[4], -cospi[18], bf0[5], cos_bit[stage]);
- bf1[6] = half_btf(cospi[26], bf0[6], cospi[38], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(cospi[38], bf0[6], -cospi[26], bf0[7], cos_bit[stage]);
- bf1[8] = half_btf(cospi[34], bf0[8], cospi[30], bf0[9], cos_bit[stage]);
- bf1[9] = half_btf(cospi[30], bf0[8], -cospi[34], bf0[9], cos_bit[stage]);
- bf1[10] = half_btf(cospi[42], bf0[10], cospi[22], bf0[11], cos_bit[stage]);
- bf1[11] = half_btf(cospi[22], bf0[10], -cospi[42], bf0[11], cos_bit[stage]);
- bf1[12] = half_btf(cospi[50], bf0[12], cospi[14], bf0[13], cos_bit[stage]);
- bf1[13] = half_btf(cospi[14], bf0[12], -cospi[50], bf0[13], cos_bit[stage]);
- bf1[14] = half_btf(cospi[58], bf0[14], cospi[6], bf0[15], cos_bit[stage]);
- bf1[15] = half_btf(cospi[6], bf0[14], -cospi[58], bf0[15], cos_bit[stage]);
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit[stage]);
+ bf1[3] = half_btf(-cospi[32], bf0[3], cospi[32], bf0[2], cos_bit[stage]);
+ bf1[4] = bf0[4];
+ bf1[5] = bf0[5];
+ bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit[stage]);
+ bf1[7] = half_btf(-cospi[32], bf0[7], cospi[32], bf0[6], cos_bit[stage]);
+ bf1[8] = bf0[8];
+ bf1[9] = bf0[9];
+ bf1[10] = half_btf(cospi[32], bf0[10], cospi[32], bf0[11], cos_bit[stage]);
+ bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[10], cos_bit[stage]);
+ bf1[12] = bf0[12];
+ bf1[13] = bf0[13];
+ bf1[14] = half_btf(cospi[32], bf0[14], cospi[32], bf0[15], cos_bit[stage]);
+ bf1[15] = half_btf(-cospi[32], bf0[15], cospi[32], bf0[14], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 9
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[1];
- bf1[1] = bf0[14];
- bf1[2] = bf0[3];
- bf1[3] = bf0[12];
- bf1[4] = bf0[5];
- bf1[5] = bf0[10];
- bf1[6] = bf0[7];
- bf1[7] = bf0[8];
- bf1[8] = bf0[9];
- bf1[9] = bf0[6];
- bf1[10] = bf0[11];
- bf1[11] = bf0[4];
- bf1[12] = bf0[13];
- bf1[13] = bf0[2];
- bf1[14] = bf0[15];
- bf1[15] = bf0[0];
+ bf1[0] = bf0[0];
+ bf1[1] = -bf0[8];
+ bf1[2] = bf0[12];
+ bf1[3] = -bf0[4];
+ bf1[4] = bf0[6];
+ bf1[5] = -bf0[14];
+ bf1[6] = bf0[10];
+ bf1[7] = -bf0[2];
+ bf1[8] = bf0[3];
+ bf1[9] = -bf0[11];
+ bf1[10] = bf0[15];
+ bf1[11] = -bf0[7];
+ bf1[12] = bf0[5];
+ bf1[13] = -bf0[13];
+ bf1[14] = bf0[9];
+ bf1[15] = -bf0[1];
range_check(stage, input, bf1, size, stage_range[stage]);
}
diff --git a/av1/common/x86/highbd_inv_txfm_sse4.c b/av1/common/x86/highbd_inv_txfm_sse4.c
index 95297e5..b21c7db 100644
--- a/av1/common/x86/highbd_inv_txfm_sse4.c
+++ b/av1/common/x86/highbd_inv_txfm_sse4.c
@@ -448,183 +448,305 @@
static void iadst8x8_sse4_1(__m128i *in, __m128i *out, int bit) {
const int32_t *cospi = cospi_arr(bit);
- const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
- const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
- const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
- const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
- const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
const __m128i cospi4 = _mm_set1_epi32(cospi[4]);
- const __m128i cospim4 = _mm_set1_epi32(-cospi[4]);
const __m128i cospi60 = _mm_set1_epi32(cospi[60]);
const __m128i cospi20 = _mm_set1_epi32(cospi[20]);
- const __m128i cospim20 = _mm_set1_epi32(-cospi[20]);
const __m128i cospi44 = _mm_set1_epi32(cospi[44]);
- const __m128i cospi28 = _mm_set1_epi32(cospi[28]);
const __m128i cospi36 = _mm_set1_epi32(cospi[36]);
- const __m128i cospim36 = _mm_set1_epi32(-cospi[36]);
+ const __m128i cospi28 = _mm_set1_epi32(cospi[28]);
const __m128i cospi52 = _mm_set1_epi32(cospi[52]);
- const __m128i cospim52 = _mm_set1_epi32(-cospi[52]);
const __m128i cospi12 = _mm_set1_epi32(cospi[12]);
+ const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+ const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+ const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
+ const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
- const __m128i zero = _mm_setzero_si128();
- __m128i u0, u1, u2, u3, u4, u5, u6, u7;
- __m128i v0, v1, v2, v3, v4, v5, v6, v7;
- __m128i x, y;
- int col;
+ const __m128i kZero = _mm_setzero_si128();
+ __m128i u[8], v[8], x;
- // Note:
- // Even column: 0, 2, ..., 14
- // Odd column: 1, 3, ..., 15
- // one even column plus one odd column constructs one row (8 coeffs)
- // total we have 8 rows (8x8).
- for (col = 0; col < 2; ++col) {
- // stage 0
- // stage 1
- u0 = in[2 * 0 + col];
- u1 = _mm_sub_epi32(zero, in[2 * 7 + col]);
- u2 = _mm_sub_epi32(zero, in[2 * 3 + col]);
- u3 = in[2 * 4 + col];
- u4 = _mm_sub_epi32(zero, in[2 * 1 + col]);
- u5 = in[2 * 6 + col];
- u6 = in[2 * 2 + col];
- u7 = _mm_sub_epi32(zero, in[2 * 5 + col]);
+ // Even 8 points: 0, 2, ..., 14
+ // stage 0
+ // stage 1
+ // stage 2
+ // (1)
+ u[0] = _mm_mullo_epi32(in[14], cospi4);
+ x = _mm_mullo_epi32(in[0], cospi60);
+ u[0] = _mm_add_epi32(u[0], x);
+ u[0] = _mm_add_epi32(u[0], rnding);
+ u[0] = _mm_srai_epi32(u[0], bit);
- // stage 2
- v0 = u0;
- v1 = u1;
+ u[1] = _mm_mullo_epi32(in[14], cospi60);
+ x = _mm_mullo_epi32(in[0], cospi4);
+ u[1] = _mm_sub_epi32(u[1], x);
+ u[1] = _mm_add_epi32(u[1], rnding);
+ u[1] = _mm_srai_epi32(u[1], bit);
- x = _mm_mullo_epi32(u2, cospi32);
- y = _mm_mullo_epi32(u3, cospi32);
- v2 = _mm_add_epi32(x, y);
- v2 = _mm_add_epi32(v2, rnding);
- v2 = _mm_srai_epi32(v2, bit);
+ // (2)
+ u[2] = _mm_mullo_epi32(in[10], cospi20);
+ x = _mm_mullo_epi32(in[4], cospi44);
+ u[2] = _mm_add_epi32(u[2], x);
+ u[2] = _mm_add_epi32(u[2], rnding);
+ u[2] = _mm_srai_epi32(u[2], bit);
- v3 = _mm_sub_epi32(x, y);
- v3 = _mm_add_epi32(v3, rnding);
- v3 = _mm_srai_epi32(v3, bit);
+ u[3] = _mm_mullo_epi32(in[10], cospi44);
+ x = _mm_mullo_epi32(in[4], cospi20);
+ u[3] = _mm_sub_epi32(u[3], x);
+ u[3] = _mm_add_epi32(u[3], rnding);
+ u[3] = _mm_srai_epi32(u[3], bit);
- v4 = u4;
- v5 = u5;
+ // (3)
+ u[4] = _mm_mullo_epi32(in[6], cospi36);
+ x = _mm_mullo_epi32(in[8], cospi28);
+ u[4] = _mm_add_epi32(u[4], x);
+ u[4] = _mm_add_epi32(u[4], rnding);
+ u[4] = _mm_srai_epi32(u[4], bit);
- x = _mm_mullo_epi32(u6, cospi32);
- y = _mm_mullo_epi32(u7, cospi32);
- v6 = _mm_add_epi32(x, y);
- v6 = _mm_add_epi32(v6, rnding);
- v6 = _mm_srai_epi32(v6, bit);
+ u[5] = _mm_mullo_epi32(in[6], cospi28);
+ x = _mm_mullo_epi32(in[8], cospi36);
+ u[5] = _mm_sub_epi32(u[5], x);
+ u[5] = _mm_add_epi32(u[5], rnding);
+ u[5] = _mm_srai_epi32(u[5], bit);
- v7 = _mm_sub_epi32(x, y);
- v7 = _mm_add_epi32(v7, rnding);
- v7 = _mm_srai_epi32(v7, bit);
+ // (4)
+ u[6] = _mm_mullo_epi32(in[2], cospi52);
+ x = _mm_mullo_epi32(in[12], cospi12);
+ u[6] = _mm_add_epi32(u[6], x);
+ u[6] = _mm_add_epi32(u[6], rnding);
+ u[6] = _mm_srai_epi32(u[6], bit);
- // stage 3
- u0 = _mm_add_epi32(v0, v2);
- u1 = _mm_add_epi32(v1, v3);
- u2 = _mm_sub_epi32(v0, v2);
- u3 = _mm_sub_epi32(v1, v3);
- u4 = _mm_add_epi32(v4, v6);
- u5 = _mm_add_epi32(v5, v7);
- u6 = _mm_sub_epi32(v4, v6);
- u7 = _mm_sub_epi32(v5, v7);
+ u[7] = _mm_mullo_epi32(in[2], cospi12);
+ x = _mm_mullo_epi32(in[12], cospi52);
+ u[7] = _mm_sub_epi32(u[7], x);
+ u[7] = _mm_add_epi32(u[7], rnding);
+ u[7] = _mm_srai_epi32(u[7], bit);
- // stage 4
- v0 = u0;
- v1 = u1;
- v2 = u2;
- v3 = u3;
+ // stage 3
+ v[0] = _mm_add_epi32(u[0], u[4]);
+ v[4] = _mm_sub_epi32(u[0], u[4]);
+ v[1] = _mm_add_epi32(u[1], u[5]);
+ v[5] = _mm_sub_epi32(u[1], u[5]);
+ v[2] = _mm_add_epi32(u[2], u[6]);
+ v[6] = _mm_sub_epi32(u[2], u[6]);
+ v[3] = _mm_add_epi32(u[3], u[7]);
+ v[7] = _mm_sub_epi32(u[3], u[7]);
- x = _mm_mullo_epi32(u4, cospi16);
- y = _mm_mullo_epi32(u5, cospi48);
- v4 = _mm_add_epi32(x, y);
- v4 = _mm_add_epi32(v4, rnding);
- v4 = _mm_srai_epi32(v4, bit);
+ // stage 4
+ u[0] = v[0];
+ u[1] = v[1];
+ u[2] = v[2];
+ u[3] = v[3];
- x = _mm_mullo_epi32(u4, cospi48);
- y = _mm_mullo_epi32(u5, cospim16);
- v5 = _mm_add_epi32(x, y);
- v5 = _mm_add_epi32(v5, rnding);
- v5 = _mm_srai_epi32(v5, bit);
+ u[4] = _mm_mullo_epi32(v[4], cospi16);
+ x = _mm_mullo_epi32(v[5], cospi48);
+ u[4] = _mm_add_epi32(u[4], x);
+ u[4] = _mm_add_epi32(u[4], rnding);
+ u[4] = _mm_srai_epi32(u[4], bit);
- x = _mm_mullo_epi32(u6, cospim48);
- y = _mm_mullo_epi32(u7, cospi16);
- v6 = _mm_add_epi32(x, y);
- v6 = _mm_add_epi32(v6, rnding);
- v6 = _mm_srai_epi32(v6, bit);
+ u[5] = _mm_mullo_epi32(v[4], cospi48);
+ x = _mm_mullo_epi32(v[5], cospi16);
+ u[5] = _mm_sub_epi32(u[5], x);
+ u[5] = _mm_add_epi32(u[5], rnding);
+ u[5] = _mm_srai_epi32(u[5], bit);
- x = _mm_mullo_epi32(u6, cospi16);
- y = _mm_mullo_epi32(u7, cospi48);
- v7 = _mm_add_epi32(x, y);
- v7 = _mm_add_epi32(v7, rnding);
- v7 = _mm_srai_epi32(v7, bit);
+ u[6] = _mm_mullo_epi32(v[6], cospim48);
+ x = _mm_mullo_epi32(v[7], cospi16);
+ u[6] = _mm_add_epi32(u[6], x);
+ u[6] = _mm_add_epi32(u[6], rnding);
+ u[6] = _mm_srai_epi32(u[6], bit);
- // stage 5
- u0 = _mm_add_epi32(v0, v4);
- u1 = _mm_add_epi32(v1, v5);
- u2 = _mm_add_epi32(v2, v6);
- u3 = _mm_add_epi32(v3, v7);
- u4 = _mm_sub_epi32(v0, v4);
- u5 = _mm_sub_epi32(v1, v5);
- u6 = _mm_sub_epi32(v2, v6);
- u7 = _mm_sub_epi32(v3, v7);
+ u[7] = _mm_mullo_epi32(v[6], cospi16);
+ x = _mm_mullo_epi32(v[7], cospim48);
+ u[7] = _mm_sub_epi32(u[7], x);
+ u[7] = _mm_add_epi32(u[7], rnding);
+ u[7] = _mm_srai_epi32(u[7], bit);
- // stage 6
- x = _mm_mullo_epi32(u0, cospi4);
- y = _mm_mullo_epi32(u1, cospi60);
- v0 = _mm_add_epi32(x, y);
- v0 = _mm_add_epi32(v0, rnding);
- v0 = _mm_srai_epi32(v0, bit);
+ // stage 5
+ v[0] = _mm_add_epi32(u[0], u[2]);
+ v[2] = _mm_sub_epi32(u[0], u[2]);
+ v[1] = _mm_add_epi32(u[1], u[3]);
+ v[3] = _mm_sub_epi32(u[1], u[3]);
+ v[4] = _mm_add_epi32(u[4], u[6]);
+ v[6] = _mm_sub_epi32(u[4], u[6]);
+ v[5] = _mm_add_epi32(u[5], u[7]);
+ v[7] = _mm_sub_epi32(u[5], u[7]);
- x = _mm_mullo_epi32(u0, cospi60);
- y = _mm_mullo_epi32(u1, cospim4);
- v1 = _mm_add_epi32(x, y);
- v1 = _mm_add_epi32(v1, rnding);
- v1 = _mm_srai_epi32(v1, bit);
+ // stage 6
+ u[0] = v[0];
+ u[1] = v[1];
+ u[4] = v[4];
+ u[5] = v[5];
- x = _mm_mullo_epi32(u2, cospi20);
- y = _mm_mullo_epi32(u3, cospi44);
- v2 = _mm_add_epi32(x, y);
- v2 = _mm_add_epi32(v2, rnding);
- v2 = _mm_srai_epi32(v2, bit);
+ v[0] = _mm_mullo_epi32(v[2], cospi32);
+ x = _mm_mullo_epi32(v[3], cospi32);
+ u[2] = _mm_add_epi32(v[0], x);
+ u[2] = _mm_add_epi32(u[2], rnding);
+ u[2] = _mm_srai_epi32(u[2], bit);
- x = _mm_mullo_epi32(u2, cospi44);
- y = _mm_mullo_epi32(u3, cospim20);
- v3 = _mm_add_epi32(x, y);
- v3 = _mm_add_epi32(v3, rnding);
- v3 = _mm_srai_epi32(v3, bit);
+ u[3] = _mm_sub_epi32(v[0], x);
+ u[3] = _mm_add_epi32(u[3], rnding);
+ u[3] = _mm_srai_epi32(u[3], bit);
- x = _mm_mullo_epi32(u4, cospi36);
- y = _mm_mullo_epi32(u5, cospi28);
- v4 = _mm_add_epi32(x, y);
- v4 = _mm_add_epi32(v4, rnding);
- v4 = _mm_srai_epi32(v4, bit);
+ v[0] = _mm_mullo_epi32(v[6], cospi32);
+ x = _mm_mullo_epi32(v[7], cospi32);
+ u[6] = _mm_add_epi32(v[0], x);
+ u[6] = _mm_add_epi32(u[6], rnding);
+ u[6] = _mm_srai_epi32(u[6], bit);
- x = _mm_mullo_epi32(u4, cospi28);
- y = _mm_mullo_epi32(u5, cospim36);
- v5 = _mm_add_epi32(x, y);
- v5 = _mm_add_epi32(v5, rnding);
- v5 = _mm_srai_epi32(v5, bit);
+ u[7] = _mm_sub_epi32(v[0], x);
+ u[7] = _mm_add_epi32(u[7], rnding);
+ u[7] = _mm_srai_epi32(u[7], bit);
- x = _mm_mullo_epi32(u6, cospi52);
- y = _mm_mullo_epi32(u7, cospi12);
- v6 = _mm_add_epi32(x, y);
- v6 = _mm_add_epi32(v6, rnding);
- v6 = _mm_srai_epi32(v6, bit);
+ // stage 7
+ out[0] = u[0];
+ out[2] = _mm_sub_epi32(kZero, u[4]);
+ out[4] = u[6];
+ out[6] = _mm_sub_epi32(kZero, u[2]);
+ out[8] = u[3];
+ out[10] = _mm_sub_epi32(kZero, u[7]);
+ out[12] = u[5];
+ out[14] = _mm_sub_epi32(kZero, u[1]);
- x = _mm_mullo_epi32(u6, cospi12);
- y = _mm_mullo_epi32(u7, cospim52);
- v7 = _mm_add_epi32(x, y);
- v7 = _mm_add_epi32(v7, rnding);
- v7 = _mm_srai_epi32(v7, bit);
+ // Odd 8 points: 1, 3, ..., 15
+ // stage 0
+ // stage 1
+ // stage 2
+ // (1)
+ u[0] = _mm_mullo_epi32(in[15], cospi4);
+ x = _mm_mullo_epi32(in[1], cospi60);
+ u[0] = _mm_add_epi32(u[0], x);
+ u[0] = _mm_add_epi32(u[0], rnding);
+ u[0] = _mm_srai_epi32(u[0], bit);
- // stage 7
- out[2 * 0 + col] = v1;
- out[2 * 1 + col] = v6;
- out[2 * 2 + col] = v3;
- out[2 * 3 + col] = v4;
- out[2 * 4 + col] = v5;
- out[2 * 5 + col] = v2;
- out[2 * 6 + col] = v7;
- out[2 * 7 + col] = v0;
- }
+ u[1] = _mm_mullo_epi32(in[15], cospi60);
+ x = _mm_mullo_epi32(in[1], cospi4);
+ u[1] = _mm_sub_epi32(u[1], x);
+ u[1] = _mm_add_epi32(u[1], rnding);
+ u[1] = _mm_srai_epi32(u[1], bit);
+
+ // (2)
+ u[2] = _mm_mullo_epi32(in[11], cospi20);
+ x = _mm_mullo_epi32(in[5], cospi44);
+ u[2] = _mm_add_epi32(u[2], x);
+ u[2] = _mm_add_epi32(u[2], rnding);
+ u[2] = _mm_srai_epi32(u[2], bit);
+
+ u[3] = _mm_mullo_epi32(in[11], cospi44);
+ x = _mm_mullo_epi32(in[5], cospi20);
+ u[3] = _mm_sub_epi32(u[3], x);
+ u[3] = _mm_add_epi32(u[3], rnding);
+ u[3] = _mm_srai_epi32(u[3], bit);
+
+ // (3)
+ u[4] = _mm_mullo_epi32(in[7], cospi36);
+ x = _mm_mullo_epi32(in[9], cospi28);
+ u[4] = _mm_add_epi32(u[4], x);
+ u[4] = _mm_add_epi32(u[4], rnding);
+ u[4] = _mm_srai_epi32(u[4], bit);
+
+ u[5] = _mm_mullo_epi32(in[7], cospi28);
+ x = _mm_mullo_epi32(in[9], cospi36);
+ u[5] = _mm_sub_epi32(u[5], x);
+ u[5] = _mm_add_epi32(u[5], rnding);
+ u[5] = _mm_srai_epi32(u[5], bit);
+
+ // (4)
+ u[6] = _mm_mullo_epi32(in[3], cospi52);
+ x = _mm_mullo_epi32(in[13], cospi12);
+ u[6] = _mm_add_epi32(u[6], x);
+ u[6] = _mm_add_epi32(u[6], rnding);
+ u[6] = _mm_srai_epi32(u[6], bit);
+
+ u[7] = _mm_mullo_epi32(in[3], cospi12);
+ x = _mm_mullo_epi32(in[13], cospi52);
+ u[7] = _mm_sub_epi32(u[7], x);
+ u[7] = _mm_add_epi32(u[7], rnding);
+ u[7] = _mm_srai_epi32(u[7], bit);
+
+ // stage 3
+ v[0] = _mm_add_epi32(u[0], u[4]);
+ v[4] = _mm_sub_epi32(u[0], u[4]);
+ v[1] = _mm_add_epi32(u[1], u[5]);
+ v[5] = _mm_sub_epi32(u[1], u[5]);
+ v[2] = _mm_add_epi32(u[2], u[6]);
+ v[6] = _mm_sub_epi32(u[2], u[6]);
+ v[3] = _mm_add_epi32(u[3], u[7]);
+ v[7] = _mm_sub_epi32(u[3], u[7]);
+
+ // stage 4
+ u[0] = v[0];
+ u[1] = v[1];
+ u[2] = v[2];
+ u[3] = v[3];
+
+ u[4] = _mm_mullo_epi32(v[4], cospi16);
+ x = _mm_mullo_epi32(v[5], cospi48);
+ u[4] = _mm_add_epi32(u[4], x);
+ u[4] = _mm_add_epi32(u[4], rnding);
+ u[4] = _mm_srai_epi32(u[4], bit);
+
+ u[5] = _mm_mullo_epi32(v[4], cospi48);
+ x = _mm_mullo_epi32(v[5], cospi16);
+ u[5] = _mm_sub_epi32(u[5], x);
+ u[5] = _mm_add_epi32(u[5], rnding);
+ u[5] = _mm_srai_epi32(u[5], bit);
+
+ u[6] = _mm_mullo_epi32(v[6], cospim48);
+ x = _mm_mullo_epi32(v[7], cospi16);
+ u[6] = _mm_add_epi32(u[6], x);
+ u[6] = _mm_add_epi32(u[6], rnding);
+ u[6] = _mm_srai_epi32(u[6], bit);
+
+ u[7] = _mm_mullo_epi32(v[6], cospi16);
+ x = _mm_mullo_epi32(v[7], cospim48);
+ u[7] = _mm_sub_epi32(u[7], x);
+ u[7] = _mm_add_epi32(u[7], rnding);
+ u[7] = _mm_srai_epi32(u[7], bit);
+
+ // stage 5
+ v[0] = _mm_add_epi32(u[0], u[2]);
+ v[2] = _mm_sub_epi32(u[0], u[2]);
+ v[1] = _mm_add_epi32(u[1], u[3]);
+ v[3] = _mm_sub_epi32(u[1], u[3]);
+ v[4] = _mm_add_epi32(u[4], u[6]);
+ v[6] = _mm_sub_epi32(u[4], u[6]);
+ v[5] = _mm_add_epi32(u[5], u[7]);
+ v[7] = _mm_sub_epi32(u[5], u[7]);
+
+ // stage 6
+ u[0] = v[0];
+ u[1] = v[1];
+ u[4] = v[4];
+ u[5] = v[5];
+
+ v[0] = _mm_mullo_epi32(v[2], cospi32);
+ x = _mm_mullo_epi32(v[3], cospi32);
+ u[2] = _mm_add_epi32(v[0], x);
+ u[2] = _mm_add_epi32(u[2], rnding);
+ u[2] = _mm_srai_epi32(u[2], bit);
+
+ u[3] = _mm_sub_epi32(v[0], x);
+ u[3] = _mm_add_epi32(u[3], rnding);
+ u[3] = _mm_srai_epi32(u[3], bit);
+
+ v[0] = _mm_mullo_epi32(v[6], cospi32);
+ x = _mm_mullo_epi32(v[7], cospi32);
+ u[6] = _mm_add_epi32(v[0], x);
+ u[6] = _mm_add_epi32(u[6], rnding);
+ u[6] = _mm_srai_epi32(u[6], bit);
+
+ u[7] = _mm_sub_epi32(v[0], x);
+ u[7] = _mm_add_epi32(u[7], rnding);
+ u[7] = _mm_srai_epi32(u[7], bit);
+
+ // stage 7
+ out[1] = u[0];
+ out[3] = _mm_sub_epi32(kZero, u[4]);
+ out[5] = u[6];
+ out[7] = _mm_sub_epi32(kZero, u[2]);
+ out[9] = u[3];
+ out[11] = _mm_sub_epi32(kZero, u[7]);
+ out[13] = u[5];
+ out[15] = _mm_sub_epi32(kZero, u[1]);
}
static void round_shift_8x8(__m128i *in, int shift) {
@@ -1060,247 +1182,379 @@
static void iadst16x16_sse4_1(__m128i *in, __m128i *out, int bit) {
const int32_t *cospi = cospi_arr(bit);
- const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
- const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
- const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
- const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
- const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
- const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
- const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
- const __m128i cospim56 = _mm_set1_epi32(-cospi[56]);
- const __m128i cospim8 = _mm_set1_epi32(-cospi[8]);
- const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
- const __m128i cospim24 = _mm_set1_epi32(-cospi[24]);
- const __m128i cospim40 = _mm_set1_epi32(-cospi[40]);
- const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
const __m128i cospi2 = _mm_set1_epi32(cospi[2]);
const __m128i cospi62 = _mm_set1_epi32(cospi[62]);
- const __m128i cospim2 = _mm_set1_epi32(-cospi[2]);
const __m128i cospi10 = _mm_set1_epi32(cospi[10]);
const __m128i cospi54 = _mm_set1_epi32(cospi[54]);
- const __m128i cospim10 = _mm_set1_epi32(-cospi[10]);
const __m128i cospi18 = _mm_set1_epi32(cospi[18]);
const __m128i cospi46 = _mm_set1_epi32(cospi[46]);
- const __m128i cospim18 = _mm_set1_epi32(-cospi[18]);
const __m128i cospi26 = _mm_set1_epi32(cospi[26]);
const __m128i cospi38 = _mm_set1_epi32(cospi[38]);
- const __m128i cospim26 = _mm_set1_epi32(-cospi[26]);
const __m128i cospi34 = _mm_set1_epi32(cospi[34]);
const __m128i cospi30 = _mm_set1_epi32(cospi[30]);
- const __m128i cospim34 = _mm_set1_epi32(-cospi[34]);
const __m128i cospi42 = _mm_set1_epi32(cospi[42]);
const __m128i cospi22 = _mm_set1_epi32(cospi[22]);
- const __m128i cospim42 = _mm_set1_epi32(-cospi[42]);
const __m128i cospi50 = _mm_set1_epi32(cospi[50]);
const __m128i cospi14 = _mm_set1_epi32(cospi[14]);
- const __m128i cospim50 = _mm_set1_epi32(-cospi[50]);
const __m128i cospi58 = _mm_set1_epi32(cospi[58]);
const __m128i cospi6 = _mm_set1_epi32(cospi[6]);
- const __m128i cospim58 = _mm_set1_epi32(-cospi[58]);
+ const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
+ const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
+ const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
+ const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
+ const __m128i cospim56 = _mm_set1_epi32(-cospi[56]);
+ const __m128i cospim24 = _mm_set1_epi32(-cospi[24]);
+ const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+ const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+ const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
+ const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
- const __m128i zero = _mm_setzero_si128();
-
__m128i u[16], v[16], x, y;
+ const int col_num = 4;
int col;
- for (col = 0; col < 4; ++col) {
+ // Calculate the column 0, 1, 2, 3
+ for (col = 0; col < col_num; ++col) {
// stage 0
// stage 1
- u[0] = in[0 * 4 + col];
- u[1] = _mm_sub_epi32(zero, in[15 * 4 + col]);
- u[2] = _mm_sub_epi32(zero, in[7 * 4 + col]);
- u[3] = in[8 * 4 + col];
- u[4] = _mm_sub_epi32(zero, in[3 * 4 + col]);
- u[5] = in[12 * 4 + col];
- u[6] = in[4 * 4 + col];
- u[7] = _mm_sub_epi32(zero, in[11 * 4 + col]);
- u[8] = _mm_sub_epi32(zero, in[1 * 4 + col]);
- u[9] = in[14 * 4 + col];
- u[10] = in[6 * 4 + col];
- u[11] = _mm_sub_epi32(zero, in[9 * 4 + col]);
- u[12] = in[2 * 4 + col];
- u[13] = _mm_sub_epi32(zero, in[13 * 4 + col]);
- u[14] = _mm_sub_epi32(zero, in[5 * 4 + col]);
- u[15] = in[10 * 4 + col];
-
// stage 2
- v[0] = u[0];
- v[1] = u[1];
+ v[0] = _mm_mullo_epi32(in[15 * col_num + col], cospi2);
+ x = _mm_mullo_epi32(in[0 * col_num + col], cospi62);
+ v[0] = _mm_add_epi32(v[0], x);
+ v[0] = _mm_add_epi32(v[0], rnding);
+ v[0] = _mm_srai_epi32(v[0], bit);
- x = _mm_mullo_epi32(u[2], cospi32);
- y = _mm_mullo_epi32(u[3], cospi32);
- v[2] = _mm_add_epi32(x, y);
+ v[1] = _mm_mullo_epi32(in[15 * col_num + col], cospi62);
+ x = _mm_mullo_epi32(in[0 * col_num + col], cospi2);
+ v[1] = _mm_sub_epi32(v[1], x);
+ v[1] = _mm_add_epi32(v[1], rnding);
+ v[1] = _mm_srai_epi32(v[1], bit);
+
+ v[2] = _mm_mullo_epi32(in[13 * col_num + col], cospi10);
+ x = _mm_mullo_epi32(in[2 * col_num + col], cospi54);
+ v[2] = _mm_add_epi32(v[2], x);
v[2] = _mm_add_epi32(v[2], rnding);
v[2] = _mm_srai_epi32(v[2], bit);
- v[3] = _mm_sub_epi32(x, y);
+ v[3] = _mm_mullo_epi32(in[13 * col_num + col], cospi54);
+ x = _mm_mullo_epi32(in[2 * col_num + col], cospi10);
+ v[3] = _mm_sub_epi32(v[3], x);
v[3] = _mm_add_epi32(v[3], rnding);
v[3] = _mm_srai_epi32(v[3], bit);
- v[4] = u[4];
- v[5] = u[5];
+ v[4] = _mm_mullo_epi32(in[11 * col_num + col], cospi18);
+ x = _mm_mullo_epi32(in[4 * col_num + col], cospi46);
+ v[4] = _mm_add_epi32(v[4], x);
+ v[4] = _mm_add_epi32(v[4], rnding);
+ v[4] = _mm_srai_epi32(v[4], bit);
- x = _mm_mullo_epi32(u[6], cospi32);
- y = _mm_mullo_epi32(u[7], cospi32);
- v[6] = _mm_add_epi32(x, y);
+ v[5] = _mm_mullo_epi32(in[11 * col_num + col], cospi46);
+ x = _mm_mullo_epi32(in[4 * col_num + col], cospi18);
+ v[5] = _mm_sub_epi32(v[5], x);
+ v[5] = _mm_add_epi32(v[5], rnding);
+ v[5] = _mm_srai_epi32(v[5], bit);
+
+ v[6] = _mm_mullo_epi32(in[9 * col_num + col], cospi26);
+ x = _mm_mullo_epi32(in[6 * col_num + col], cospi38);
+ v[6] = _mm_add_epi32(v[6], x);
v[6] = _mm_add_epi32(v[6], rnding);
v[6] = _mm_srai_epi32(v[6], bit);
- v[7] = _mm_sub_epi32(x, y);
+ v[7] = _mm_mullo_epi32(in[9 * col_num + col], cospi38);
+ x = _mm_mullo_epi32(in[6 * col_num + col], cospi26);
+ v[7] = _mm_sub_epi32(v[7], x);
v[7] = _mm_add_epi32(v[7], rnding);
v[7] = _mm_srai_epi32(v[7], bit);
- v[8] = u[8];
- v[9] = u[9];
+ v[8] = _mm_mullo_epi32(in[7 * col_num + col], cospi34);
+ x = _mm_mullo_epi32(in[8 * col_num + col], cospi30);
+ v[8] = _mm_add_epi32(v[8], x);
+ v[8] = _mm_add_epi32(v[8], rnding);
+ v[8] = _mm_srai_epi32(v[8], bit);
- x = _mm_mullo_epi32(u[10], cospi32);
- y = _mm_mullo_epi32(u[11], cospi32);
- v[10] = _mm_add_epi32(x, y);
+ v[9] = _mm_mullo_epi32(in[7 * col_num + col], cospi30);
+ x = _mm_mullo_epi32(in[8 * col_num + col], cospi34);
+ v[9] = _mm_sub_epi32(v[9], x);
+ v[9] = _mm_add_epi32(v[9], rnding);
+ v[9] = _mm_srai_epi32(v[9], bit);
+
+ v[10] = _mm_mullo_epi32(in[5 * col_num + col], cospi42);
+ x = _mm_mullo_epi32(in[10 * col_num + col], cospi22);
+ v[10] = _mm_add_epi32(v[10], x);
v[10] = _mm_add_epi32(v[10], rnding);
v[10] = _mm_srai_epi32(v[10], bit);
- v[11] = _mm_sub_epi32(x, y);
+ v[11] = _mm_mullo_epi32(in[5 * col_num + col], cospi22);
+ x = _mm_mullo_epi32(in[10 * col_num + col], cospi42);
+ v[11] = _mm_sub_epi32(v[11], x);
v[11] = _mm_add_epi32(v[11], rnding);
v[11] = _mm_srai_epi32(v[11], bit);
- v[12] = u[12];
- v[13] = u[13];
+ v[12] = _mm_mullo_epi32(in[3 * col_num + col], cospi50);
+ x = _mm_mullo_epi32(in[12 * col_num + col], cospi14);
+ v[12] = _mm_add_epi32(v[12], x);
+ v[12] = _mm_add_epi32(v[12], rnding);
+ v[12] = _mm_srai_epi32(v[12], bit);
- x = _mm_mullo_epi32(u[14], cospi32);
- y = _mm_mullo_epi32(u[15], cospi32);
- v[14] = _mm_add_epi32(x, y);
+ v[13] = _mm_mullo_epi32(in[3 * col_num + col], cospi14);
+ x = _mm_mullo_epi32(in[12 * col_num + col], cospi50);
+ v[13] = _mm_sub_epi32(v[13], x);
+ v[13] = _mm_add_epi32(v[13], rnding);
+ v[13] = _mm_srai_epi32(v[13], bit);
+
+ v[14] = _mm_mullo_epi32(in[1 * col_num + col], cospi58);
+ x = _mm_mullo_epi32(in[14 * col_num + col], cospi6);
+ v[14] = _mm_add_epi32(v[14], x);
v[14] = _mm_add_epi32(v[14], rnding);
v[14] = _mm_srai_epi32(v[14], bit);
- v[15] = _mm_sub_epi32(x, y);
+ v[15] = _mm_mullo_epi32(in[1 * col_num + col], cospi6);
+ x = _mm_mullo_epi32(in[14 * col_num + col], cospi58);
+ v[15] = _mm_sub_epi32(v[15], x);
v[15] = _mm_add_epi32(v[15], rnding);
v[15] = _mm_srai_epi32(v[15], bit);
// stage 3
- u[0] = _mm_add_epi32(v[0], v[2]);
- u[1] = _mm_add_epi32(v[1], v[3]);
- u[2] = _mm_sub_epi32(v[0], v[2]);
- u[3] = _mm_sub_epi32(v[1], v[3]);
- u[4] = _mm_add_epi32(v[4], v[6]);
- u[5] = _mm_add_epi32(v[5], v[7]);
- u[6] = _mm_sub_epi32(v[4], v[6]);
- u[7] = _mm_sub_epi32(v[5], v[7]);
- u[8] = _mm_add_epi32(v[8], v[10]);
- u[9] = _mm_add_epi32(v[9], v[11]);
- u[10] = _mm_sub_epi32(v[8], v[10]);
- u[11] = _mm_sub_epi32(v[9], v[11]);
- u[12] = _mm_add_epi32(v[12], v[14]);
- u[13] = _mm_add_epi32(v[13], v[15]);
- u[14] = _mm_sub_epi32(v[12], v[14]);
- u[15] = _mm_sub_epi32(v[13], v[15]);
+ u[0] = _mm_add_epi32(v[0], v[8]);
+ u[8] = _mm_sub_epi32(v[0], v[8]);
+ u[1] = _mm_add_epi32(v[1], v[9]);
+ u[9] = _mm_sub_epi32(v[1], v[9]);
+ u[2] = _mm_add_epi32(v[2], v[10]);
+ u[10] = _mm_sub_epi32(v[2], v[10]);
+ u[3] = _mm_add_epi32(v[3], v[11]);
+ u[11] = _mm_sub_epi32(v[3], v[11]);
+ u[4] = _mm_add_epi32(v[4], v[12]);
+ u[12] = _mm_sub_epi32(v[4], v[12]);
+ u[5] = _mm_add_epi32(v[5], v[13]);
+ u[13] = _mm_sub_epi32(v[5], v[13]);
+ u[6] = _mm_add_epi32(v[6], v[14]);
+ u[14] = _mm_sub_epi32(v[6], v[14]);
+ u[7] = _mm_add_epi32(v[7], v[15]);
+ u[15] = _mm_sub_epi32(v[7], v[15]);
// stage 4
v[0] = u[0];
v[1] = u[1];
v[2] = u[2];
v[3] = u[3];
- v[4] = half_btf_sse4_1(&cospi16, &u[4], &cospi48, &u[5], &rnding, bit);
- v[5] = half_btf_sse4_1(&cospi48, &u[4], &cospim16, &u[5], &rnding, bit);
- v[6] = half_btf_sse4_1(&cospim48, &u[6], &cospi16, &u[7], &rnding, bit);
- v[7] = half_btf_sse4_1(&cospi16, &u[6], &cospi48, &u[7], &rnding, bit);
- v[8] = u[8];
- v[9] = u[9];
- v[10] = u[10];
- v[11] = u[11];
- v[12] = half_btf_sse4_1(&cospi16, &u[12], &cospi48, &u[13], &rnding, bit);
- v[13] = half_btf_sse4_1(&cospi48, &u[12], &cospim16, &u[13], &rnding, bit);
- v[14] = half_btf_sse4_1(&cospim48, &u[14], &cospi16, &u[15], &rnding, bit);
- v[15] = half_btf_sse4_1(&cospi16, &u[14], &cospi48, &u[15], &rnding, bit);
-
- // stage 5
- u[0] = _mm_add_epi32(v[0], v[4]);
- u[1] = _mm_add_epi32(v[1], v[5]);
- u[2] = _mm_add_epi32(v[2], v[6]);
- u[3] = _mm_add_epi32(v[3], v[7]);
- u[4] = _mm_sub_epi32(v[0], v[4]);
- u[5] = _mm_sub_epi32(v[1], v[5]);
- u[6] = _mm_sub_epi32(v[2], v[6]);
- u[7] = _mm_sub_epi32(v[3], v[7]);
- u[8] = _mm_add_epi32(v[8], v[12]);
- u[9] = _mm_add_epi32(v[9], v[13]);
- u[10] = _mm_add_epi32(v[10], v[14]);
- u[11] = _mm_add_epi32(v[11], v[15]);
- u[12] = _mm_sub_epi32(v[8], v[12]);
- u[13] = _mm_sub_epi32(v[9], v[13]);
- u[14] = _mm_sub_epi32(v[10], v[14]);
- u[15] = _mm_sub_epi32(v[11], v[15]);
-
- // stage 6
- v[0] = u[0];
- v[1] = u[1];
- v[2] = u[2];
- v[3] = u[3];
v[4] = u[4];
v[5] = u[5];
v[6] = u[6];
v[7] = u[7];
- v[8] = half_btf_sse4_1(&cospi8, &u[8], &cospi56, &u[9], &rnding, bit);
- v[9] = half_btf_sse4_1(&cospi56, &u[8], &cospim8, &u[9], &rnding, bit);
- v[10] = half_btf_sse4_1(&cospi40, &u[10], &cospi24, &u[11], &rnding, bit);
- v[11] = half_btf_sse4_1(&cospi24, &u[10], &cospim40, &u[11], &rnding, bit);
- v[12] = half_btf_sse4_1(&cospim56, &u[12], &cospi8, &u[13], &rnding, bit);
- v[13] = half_btf_sse4_1(&cospi8, &u[12], &cospi56, &u[13], &rnding, bit);
- v[14] = half_btf_sse4_1(&cospim24, &u[14], &cospi40, &u[15], &rnding, bit);
- v[15] = half_btf_sse4_1(&cospi40, &u[14], &cospi24, &u[15], &rnding, bit);
+
+ v[8] = _mm_mullo_epi32(u[8], cospi8);
+ x = _mm_mullo_epi32(u[9], cospi56);
+ v[8] = _mm_add_epi32(v[8], x);
+ v[8] = _mm_add_epi32(v[8], rnding);
+ v[8] = _mm_srai_epi32(v[8], bit);
+
+ v[9] = _mm_mullo_epi32(u[8], cospi56);
+ x = _mm_mullo_epi32(u[9], cospi8);
+ v[9] = _mm_sub_epi32(v[9], x);
+ v[9] = _mm_add_epi32(v[9], rnding);
+ v[9] = _mm_srai_epi32(v[9], bit);
+
+ v[10] = _mm_mullo_epi32(u[10], cospi40);
+ x = _mm_mullo_epi32(u[11], cospi24);
+ v[10] = _mm_add_epi32(v[10], x);
+ v[10] = _mm_add_epi32(v[10], rnding);
+ v[10] = _mm_srai_epi32(v[10], bit);
+
+ v[11] = _mm_mullo_epi32(u[10], cospi24);
+ x = _mm_mullo_epi32(u[11], cospi40);
+ v[11] = _mm_sub_epi32(v[11], x);
+ v[11] = _mm_add_epi32(v[11], rnding);
+ v[11] = _mm_srai_epi32(v[11], bit);
+
+ v[12] = _mm_mullo_epi32(u[12], cospim56);
+ x = _mm_mullo_epi32(u[13], cospi8);
+ v[12] = _mm_add_epi32(v[12], x);
+ v[12] = _mm_add_epi32(v[12], rnding);
+ v[12] = _mm_srai_epi32(v[12], bit);
+
+ v[13] = _mm_mullo_epi32(u[12], cospi8);
+ x = _mm_mullo_epi32(u[13], cospim56);
+ v[13] = _mm_sub_epi32(v[13], x);
+ v[13] = _mm_add_epi32(v[13], rnding);
+ v[13] = _mm_srai_epi32(v[13], bit);
+
+ v[14] = _mm_mullo_epi32(u[14], cospim24);
+ x = _mm_mullo_epi32(u[15], cospi40);
+ v[14] = _mm_add_epi32(v[14], x);
+ v[14] = _mm_add_epi32(v[14], rnding);
+ v[14] = _mm_srai_epi32(v[14], bit);
+
+ v[15] = _mm_mullo_epi32(u[14], cospi40);
+ x = _mm_mullo_epi32(u[15], cospim24);
+ v[15] = _mm_sub_epi32(v[15], x);
+ v[15] = _mm_add_epi32(v[15], rnding);
+ v[15] = _mm_srai_epi32(v[15], bit);
+
+ // stage 5
+ u[0] = _mm_add_epi32(v[0], v[4]);
+ u[4] = _mm_sub_epi32(v[0], v[4]);
+ u[1] = _mm_add_epi32(v[1], v[5]);
+ u[5] = _mm_sub_epi32(v[1], v[5]);
+ u[2] = _mm_add_epi32(v[2], v[6]);
+ u[6] = _mm_sub_epi32(v[2], v[6]);
+ u[3] = _mm_add_epi32(v[3], v[7]);
+ u[7] = _mm_sub_epi32(v[3], v[7]);
+ u[8] = _mm_add_epi32(v[8], v[12]);
+ u[12] = _mm_sub_epi32(v[8], v[12]);
+ u[9] = _mm_add_epi32(v[9], v[13]);
+ u[13] = _mm_sub_epi32(v[9], v[13]);
+ u[10] = _mm_add_epi32(v[10], v[14]);
+ u[14] = _mm_sub_epi32(v[10], v[14]);
+ u[11] = _mm_add_epi32(v[11], v[15]);
+ u[15] = _mm_sub_epi32(v[11], v[15]);
+
+ // stage 6
+ v[0] = u[0];
+ v[1] = u[1];
+ v[2] = u[2];
+ v[3] = u[3];
+
+ v[4] = _mm_mullo_epi32(u[4], cospi16);
+ x = _mm_mullo_epi32(u[5], cospi48);
+ v[4] = _mm_add_epi32(v[4], x);
+ v[4] = _mm_add_epi32(v[4], rnding);
+ v[4] = _mm_srai_epi32(v[4], bit);
+
+ v[5] = _mm_mullo_epi32(u[4], cospi48);
+ x = _mm_mullo_epi32(u[5], cospi16);
+ v[5] = _mm_sub_epi32(v[5], x);
+ v[5] = _mm_add_epi32(v[5], rnding);
+ v[5] = _mm_srai_epi32(v[5], bit);
+
+ v[6] = _mm_mullo_epi32(u[6], cospim48);
+ x = _mm_mullo_epi32(u[7], cospi16);
+ v[6] = _mm_add_epi32(v[6], x);
+ v[6] = _mm_add_epi32(v[6], rnding);
+ v[6] = _mm_srai_epi32(v[6], bit);
+
+ v[7] = _mm_mullo_epi32(u[6], cospi16);
+ x = _mm_mullo_epi32(u[7], cospim48);
+ v[7] = _mm_sub_epi32(v[7], x);
+ v[7] = _mm_add_epi32(v[7], rnding);
+ v[7] = _mm_srai_epi32(v[7], bit);
+
+ v[8] = u[8];
+ v[9] = u[9];
+ v[10] = u[10];
+ v[11] = u[11];
+
+ v[12] = _mm_mullo_epi32(u[12], cospi16);
+ x = _mm_mullo_epi32(u[13], cospi48);
+ v[12] = _mm_add_epi32(v[12], x);
+ v[12] = _mm_add_epi32(v[12], rnding);
+ v[12] = _mm_srai_epi32(v[12], bit);
+
+ v[13] = _mm_mullo_epi32(u[12], cospi48);
+ x = _mm_mullo_epi32(u[13], cospi16);
+ v[13] = _mm_sub_epi32(v[13], x);
+ v[13] = _mm_add_epi32(v[13], rnding);
+ v[13] = _mm_srai_epi32(v[13], bit);
+
+ v[14] = _mm_mullo_epi32(u[14], cospim48);
+ x = _mm_mullo_epi32(u[15], cospi16);
+ v[14] = _mm_add_epi32(v[14], x);
+ v[14] = _mm_add_epi32(v[14], rnding);
+ v[14] = _mm_srai_epi32(v[14], bit);
+
+ v[15] = _mm_mullo_epi32(u[14], cospi16);
+ x = _mm_mullo_epi32(u[15], cospim48);
+ v[15] = _mm_sub_epi32(v[15], x);
+ v[15] = _mm_add_epi32(v[15], rnding);
+ v[15] = _mm_srai_epi32(v[15], bit);
// stage 7
- u[0] = _mm_add_epi32(v[0], v[8]);
- u[1] = _mm_add_epi32(v[1], v[9]);
- u[2] = _mm_add_epi32(v[2], v[10]);
- u[3] = _mm_add_epi32(v[3], v[11]);
- u[4] = _mm_add_epi32(v[4], v[12]);
- u[5] = _mm_add_epi32(v[5], v[13]);
- u[6] = _mm_add_epi32(v[6], v[14]);
- u[7] = _mm_add_epi32(v[7], v[15]);
- u[8] = _mm_sub_epi32(v[0], v[8]);
- u[9] = _mm_sub_epi32(v[1], v[9]);
- u[10] = _mm_sub_epi32(v[2], v[10]);
- u[11] = _mm_sub_epi32(v[3], v[11]);
- u[12] = _mm_sub_epi32(v[4], v[12]);
- u[13] = _mm_sub_epi32(v[5], v[13]);
- u[14] = _mm_sub_epi32(v[6], v[14]);
- u[15] = _mm_sub_epi32(v[7], v[15]);
+ u[0] = _mm_add_epi32(v[0], v[2]);
+ u[2] = _mm_sub_epi32(v[0], v[2]);
+ u[1] = _mm_add_epi32(v[1], v[3]);
+ u[3] = _mm_sub_epi32(v[1], v[3]);
+ u[4] = _mm_add_epi32(v[4], v[6]);
+ u[6] = _mm_sub_epi32(v[4], v[6]);
+ u[5] = _mm_add_epi32(v[5], v[7]);
+ u[7] = _mm_sub_epi32(v[5], v[7]);
+ u[8] = _mm_add_epi32(v[8], v[10]);
+ u[10] = _mm_sub_epi32(v[8], v[10]);
+ u[9] = _mm_add_epi32(v[9], v[11]);
+ u[11] = _mm_sub_epi32(v[9], v[11]);
+ u[12] = _mm_add_epi32(v[12], v[14]);
+ u[14] = _mm_sub_epi32(v[12], v[14]);
+ u[13] = _mm_add_epi32(v[13], v[15]);
+ u[15] = _mm_sub_epi32(v[13], v[15]);
// stage 8
- v[0] = half_btf_sse4_1(&cospi2, &u[0], &cospi62, &u[1], &rnding, bit);
- v[1] = half_btf_sse4_1(&cospi62, &u[0], &cospim2, &u[1], &rnding, bit);
- v[2] = half_btf_sse4_1(&cospi10, &u[2], &cospi54, &u[3], &rnding, bit);
- v[3] = half_btf_sse4_1(&cospi54, &u[2], &cospim10, &u[3], &rnding, bit);
- v[4] = half_btf_sse4_1(&cospi18, &u[4], &cospi46, &u[5], &rnding, bit);
- v[5] = half_btf_sse4_1(&cospi46, &u[4], &cospim18, &u[5], &rnding, bit);
- v[6] = half_btf_sse4_1(&cospi26, &u[6], &cospi38, &u[7], &rnding, bit);
- v[7] = half_btf_sse4_1(&cospi38, &u[6], &cospim26, &u[7], &rnding, bit);
- v[8] = half_btf_sse4_1(&cospi34, &u[8], &cospi30, &u[9], &rnding, bit);
- v[9] = half_btf_sse4_1(&cospi30, &u[8], &cospim34, &u[9], &rnding, bit);
- v[10] = half_btf_sse4_1(&cospi42, &u[10], &cospi22, &u[11], &rnding, bit);
- v[11] = half_btf_sse4_1(&cospi22, &u[10], &cospim42, &u[11], &rnding, bit);
- v[12] = half_btf_sse4_1(&cospi50, &u[12], &cospi14, &u[13], &rnding, bit);
- v[13] = half_btf_sse4_1(&cospi14, &u[12], &cospim50, &u[13], &rnding, bit);
- v[14] = half_btf_sse4_1(&cospi58, &u[14], &cospi6, &u[15], &rnding, bit);
- v[15] = half_btf_sse4_1(&cospi6, &u[14], &cospim58, &u[15], &rnding, bit);
+ v[0] = u[0];
+ v[1] = u[1];
+
+ y = _mm_mullo_epi32(u[2], cospi32);
+ x = _mm_mullo_epi32(u[3], cospi32);
+ v[2] = _mm_add_epi32(y, x);
+ v[2] = _mm_add_epi32(v[2], rnding);
+ v[2] = _mm_srai_epi32(v[2], bit);
+
+ v[3] = _mm_sub_epi32(y, x);
+ v[3] = _mm_add_epi32(v[3], rnding);
+ v[3] = _mm_srai_epi32(v[3], bit);
+
+ v[4] = u[4];
+ v[5] = u[5];
+
+ y = _mm_mullo_epi32(u[6], cospi32);
+ x = _mm_mullo_epi32(u[7], cospi32);
+ v[6] = _mm_add_epi32(y, x);
+ v[6] = _mm_add_epi32(v[6], rnding);
+ v[6] = _mm_srai_epi32(v[6], bit);
+
+ v[7] = _mm_sub_epi32(y, x);
+ v[7] = _mm_add_epi32(v[7], rnding);
+ v[7] = _mm_srai_epi32(v[7], bit);
+
+ v[8] = u[8];
+ v[9] = u[9];
+
+ y = _mm_mullo_epi32(u[10], cospi32);
+ x = _mm_mullo_epi32(u[11], cospi32);
+ v[10] = _mm_add_epi32(y, x);
+ v[10] = _mm_add_epi32(v[10], rnding);
+ v[10] = _mm_srai_epi32(v[10], bit);
+
+ v[11] = _mm_sub_epi32(y, x);
+ v[11] = _mm_add_epi32(v[11], rnding);
+ v[11] = _mm_srai_epi32(v[11], bit);
+
+ v[12] = u[12];
+ v[13] = u[13];
+
+ y = _mm_mullo_epi32(u[14], cospi32);
+ x = _mm_mullo_epi32(u[15], cospi32);
+ v[14] = _mm_add_epi32(y, x);
+ v[14] = _mm_add_epi32(v[14], rnding);
+ v[14] = _mm_srai_epi32(v[14], bit);
+
+ v[15] = _mm_sub_epi32(y, x);
+ v[15] = _mm_add_epi32(v[15], rnding);
+ v[15] = _mm_srai_epi32(v[15], bit);
// stage 9
- out[0 * 4 + col] = v[1];
- out[1 * 4 + col] = v[14];
- out[2 * 4 + col] = v[3];
- out[3 * 4 + col] = v[12];
- out[4 * 4 + col] = v[5];
- out[5 * 4 + col] = v[10];
- out[6 * 4 + col] = v[7];
- out[7 * 4 + col] = v[8];
- out[8 * 4 + col] = v[9];
- out[9 * 4 + col] = v[6];
- out[10 * 4 + col] = v[11];
- out[11 * 4 + col] = v[4];
- out[12 * 4 + col] = v[13];
- out[13 * 4 + col] = v[2];
- out[14 * 4 + col] = v[15];
- out[15 * 4 + col] = v[0];
+ out[0 * col_num + col] = v[0];
+ out[1 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[8]);
+ out[2 * col_num + col] = v[12];
+ out[3 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[4]);
+ out[4 * col_num + col] = v[6];
+ out[5 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[14]);
+ out[6 * col_num + col] = v[10];
+ out[7 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[2]);
+ out[8 * col_num + col] = v[3];
+ out[9 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[11]);
+ out[10 * col_num + col] = v[15];
+ out[11 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[7]);
+ out[12 * col_num + col] = v[5];
+ out[13 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[13]);
+ out[14 * col_num + col] = v[9];
+ out[15 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[1]);
}
}
diff --git a/av1/encoder/av1_fwd_txfm1d.c b/av1/encoder/av1_fwd_txfm1d.c
index 64d0316..2e66dac 100644
--- a/av1/encoder/av1_fwd_txfm1d.c
+++ b/av1/encoder/av1_fwd_txfm1d.c
@@ -30,7 +30,6 @@
}
#endif
-// TODO(angiebird): Make 1-d txfm functions static
void av1_fdct4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit,
const int8_t *stage_range) {
const int32_t size = 4;
@@ -769,15 +768,16 @@
// stage 1;
stage++;
+ assert(output != input);
bf1 = output;
- bf1[0] = input[7];
- bf1[1] = input[0];
- bf1[2] = input[5];
- bf1[3] = input[2];
- bf1[4] = input[3];
- bf1[5] = input[4];
- bf1[6] = input[1];
- bf1[7] = input[6];
+ bf1[0] = input[0];
+ bf1[1] = -input[7];
+ bf1[2] = -input[3];
+ bf1[3] = input[4];
+ bf1[4] = -input[1];
+ bf1[5] = input[6];
+ bf1[6] = input[2];
+ bf1[7] = -input[5];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 2
@@ -785,28 +785,28 @@
cospi = cospi_arr(cos_bit[stage]);
bf0 = output;
bf1 = step;
- bf1[0] = half_btf(cospi[4], bf0[0], cospi[60], bf0[1], cos_bit[stage]);
- bf1[1] = half_btf(-cospi[4], bf0[1], cospi[60], bf0[0], cos_bit[stage]);
- bf1[2] = half_btf(cospi[20], bf0[2], cospi[44], bf0[3], cos_bit[stage]);
- bf1[3] = half_btf(-cospi[20], bf0[3], cospi[44], bf0[2], cos_bit[stage]);
- bf1[4] = half_btf(cospi[36], bf0[4], cospi[28], bf0[5], cos_bit[stage]);
- bf1[5] = half_btf(-cospi[36], bf0[5], cospi[28], bf0[4], cos_bit[stage]);
- bf1[6] = half_btf(cospi[52], bf0[6], cospi[12], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(-cospi[52], bf0[7], cospi[12], bf0[6], cos_bit[stage]);
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit[stage]);
+ bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit[stage]);
+ bf1[4] = bf0[4];
+ bf1[5] = bf0[5];
+ bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit[stage]);
+ bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 3
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[0] + bf0[4];
- bf1[1] = bf0[1] + bf0[5];
- bf1[2] = bf0[2] + bf0[6];
- bf1[3] = bf0[3] + bf0[7];
- bf1[4] = -bf0[4] + bf0[0];
- bf1[5] = -bf0[5] + bf0[1];
- bf1[6] = -bf0[6] + bf0[2];
- bf1[7] = -bf0[7] + bf0[3];
+ bf1[0] = bf0[0] + bf0[2];
+ bf1[1] = bf0[1] + bf0[3];
+ bf1[2] = bf0[0] - bf0[2];
+ bf1[3] = bf0[1] - bf0[3];
+ bf1[4] = bf0[4] + bf0[6];
+ bf1[5] = bf0[5] + bf0[7];
+ bf1[6] = bf0[4] - bf0[6];
+ bf1[7] = bf0[5] - bf0[7];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 4
@@ -819,23 +819,23 @@
bf1[2] = bf0[2];
bf1[3] = bf0[3];
bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit[stage]);
- bf1[5] = half_btf(-cospi[16], bf0[5], cospi[48], bf0[4], cos_bit[stage]);
+ bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit[stage]);
bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(cospi[48], bf0[7], cospi[16], bf0[6], cos_bit[stage]);
+ bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 5
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[0] + bf0[2];
- bf1[1] = bf0[1] + bf0[3];
- bf1[2] = -bf0[2] + bf0[0];
- bf1[3] = -bf0[3] + bf0[1];
- bf1[4] = bf0[4] + bf0[6];
- bf1[5] = bf0[5] + bf0[7];
- bf1[6] = -bf0[6] + bf0[4];
- bf1[7] = -bf0[7] + bf0[5];
+ bf1[0] = bf0[0] + bf0[4];
+ bf1[1] = bf0[1] + bf0[5];
+ bf1[2] = bf0[2] + bf0[6];
+ bf1[3] = bf0[3] + bf0[7];
+ bf1[4] = bf0[0] - bf0[4];
+ bf1[5] = bf0[1] - bf0[5];
+ bf1[6] = bf0[2] - bf0[6];
+ bf1[7] = bf0[3] - bf0[7];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 6
@@ -843,28 +843,28 @@
cospi = cospi_arr(cos_bit[stage]);
bf0 = output;
bf1 = step;
- bf1[0] = bf0[0];
- bf1[1] = bf0[1];
- bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit[stage]);
- bf1[3] = half_btf(-cospi[32], bf0[3], cospi[32], bf0[2], cos_bit[stage]);
- bf1[4] = bf0[4];
- bf1[5] = bf0[5];
- bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(-cospi[32], bf0[7], cospi[32], bf0[6], cos_bit[stage]);
+ bf1[0] = half_btf(cospi[4], bf0[0], cospi[60], bf0[1], cos_bit[stage]);
+ bf1[1] = half_btf(cospi[60], bf0[0], -cospi[4], bf0[1], cos_bit[stage]);
+ bf1[2] = half_btf(cospi[20], bf0[2], cospi[44], bf0[3], cos_bit[stage]);
+ bf1[3] = half_btf(cospi[44], bf0[2], -cospi[20], bf0[3], cos_bit[stage]);
+ bf1[4] = half_btf(cospi[36], bf0[4], cospi[28], bf0[5], cos_bit[stage]);
+ bf1[5] = half_btf(cospi[28], bf0[4], -cospi[36], bf0[5], cos_bit[stage]);
+ bf1[6] = half_btf(cospi[52], bf0[6], cospi[12], bf0[7], cos_bit[stage]);
+ bf1[7] = half_btf(cospi[12], bf0[6], -cospi[52], bf0[7], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 7
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[0];
- bf1[1] = -bf0[4];
- bf1[2] = bf0[6];
- bf1[3] = -bf0[2];
- bf1[4] = bf0[3];
- bf1[5] = -bf0[7];
- bf1[6] = bf0[5];
- bf1[7] = -bf0[1];
+ bf1[0] = bf0[1];
+ bf1[1] = bf0[6];
+ bf1[2] = bf0[3];
+ bf1[3] = bf0[4];
+ bf1[4] = bf0[5];
+ bf1[5] = bf0[2];
+ bf1[6] = bf0[7];
+ bf1[7] = bf0[0];
range_check(stage, input, bf1, size, stage_range[stage]);
}
@@ -882,23 +882,24 @@
// stage 1;
stage++;
+ assert(output != input);
bf1 = output;
- bf1[0] = input[15];
- bf1[1] = input[0];
- bf1[2] = input[13];
- bf1[3] = input[2];
- bf1[4] = input[11];
- bf1[5] = input[4];
- bf1[6] = input[9];
- bf1[7] = input[6];
- bf1[8] = input[7];
- bf1[9] = input[8];
- bf1[10] = input[5];
- bf1[11] = input[10];
- bf1[12] = input[3];
- bf1[13] = input[12];
- bf1[14] = input[1];
- bf1[15] = input[14];
+ bf1[0] = input[0];
+ bf1[1] = -input[15];
+ bf1[2] = -input[7];
+ bf1[3] = input[8];
+ bf1[4] = -input[3];
+ bf1[5] = input[12];
+ bf1[6] = input[4];
+ bf1[7] = -input[11];
+ bf1[8] = -input[1];
+ bf1[9] = input[14];
+ bf1[10] = input[6];
+ bf1[11] = -input[9];
+ bf1[12] = input[2];
+ bf1[13] = -input[13];
+ bf1[14] = -input[5];
+ bf1[15] = input[10];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 2
@@ -906,44 +907,44 @@
cospi = cospi_arr(cos_bit[stage]);
bf0 = output;
bf1 = step;
- bf1[0] = half_btf(cospi[2], bf0[0], cospi[62], bf0[1], cos_bit[stage]);
- bf1[1] = half_btf(-cospi[2], bf0[1], cospi[62], bf0[0], cos_bit[stage]);
- bf1[2] = half_btf(cospi[10], bf0[2], cospi[54], bf0[3], cos_bit[stage]);
- bf1[3] = half_btf(-cospi[10], bf0[3], cospi[54], bf0[2], cos_bit[stage]);
- bf1[4] = half_btf(cospi[18], bf0[4], cospi[46], bf0[5], cos_bit[stage]);
- bf1[5] = half_btf(-cospi[18], bf0[5], cospi[46], bf0[4], cos_bit[stage]);
- bf1[6] = half_btf(cospi[26], bf0[6], cospi[38], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(-cospi[26], bf0[7], cospi[38], bf0[6], cos_bit[stage]);
- bf1[8] = half_btf(cospi[34], bf0[8], cospi[30], bf0[9], cos_bit[stage]);
- bf1[9] = half_btf(-cospi[34], bf0[9], cospi[30], bf0[8], cos_bit[stage]);
- bf1[10] = half_btf(cospi[42], bf0[10], cospi[22], bf0[11], cos_bit[stage]);
- bf1[11] = half_btf(-cospi[42], bf0[11], cospi[22], bf0[10], cos_bit[stage]);
- bf1[12] = half_btf(cospi[50], bf0[12], cospi[14], bf0[13], cos_bit[stage]);
- bf1[13] = half_btf(-cospi[50], bf0[13], cospi[14], bf0[12], cos_bit[stage]);
- bf1[14] = half_btf(cospi[58], bf0[14], cospi[6], bf0[15], cos_bit[stage]);
- bf1[15] = half_btf(-cospi[58], bf0[15], cospi[6], bf0[14], cos_bit[stage]);
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit[stage]);
+ bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit[stage]);
+ bf1[4] = bf0[4];
+ bf1[5] = bf0[5];
+ bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit[stage]);
+ bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit[stage]);
+ bf1[8] = bf0[8];
+ bf1[9] = bf0[9];
+ bf1[10] = half_btf(cospi[32], bf0[10], cospi[32], bf0[11], cos_bit[stage]);
+ bf1[11] = half_btf(cospi[32], bf0[10], -cospi[32], bf0[11], cos_bit[stage]);
+ bf1[12] = bf0[12];
+ bf1[13] = bf0[13];
+ bf1[14] = half_btf(cospi[32], bf0[14], cospi[32], bf0[15], cos_bit[stage]);
+ bf1[15] = half_btf(cospi[32], bf0[14], -cospi[32], bf0[15], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 3
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[0] + bf0[8];
- bf1[1] = bf0[1] + bf0[9];
- bf1[2] = bf0[2] + bf0[10];
- bf1[3] = bf0[3] + bf0[11];
- bf1[4] = bf0[4] + bf0[12];
- bf1[5] = bf0[5] + bf0[13];
- bf1[6] = bf0[6] + bf0[14];
- bf1[7] = bf0[7] + bf0[15];
- bf1[8] = -bf0[8] + bf0[0];
- bf1[9] = -bf0[9] + bf0[1];
- bf1[10] = -bf0[10] + bf0[2];
- bf1[11] = -bf0[11] + bf0[3];
- bf1[12] = -bf0[12] + bf0[4];
- bf1[13] = -bf0[13] + bf0[5];
- bf1[14] = -bf0[14] + bf0[6];
- bf1[15] = -bf0[15] + bf0[7];
+ bf1[0] = bf0[0] + bf0[2];
+ bf1[1] = bf0[1] + bf0[3];
+ bf1[2] = bf0[0] - bf0[2];
+ bf1[3] = bf0[1] - bf0[3];
+ bf1[4] = bf0[4] + bf0[6];
+ bf1[5] = bf0[5] + bf0[7];
+ bf1[6] = bf0[4] - bf0[6];
+ bf1[7] = bf0[5] - bf0[7];
+ bf1[8] = bf0[8] + bf0[10];
+ bf1[9] = bf0[9] + bf0[11];
+ bf1[10] = bf0[8] - bf0[10];
+ bf1[11] = bf0[9] - bf0[11];
+ bf1[12] = bf0[12] + bf0[14];
+ bf1[13] = bf0[13] + bf0[15];
+ bf1[14] = bf0[12] - bf0[14];
+ bf1[15] = bf0[13] - bf0[15];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 4
@@ -955,85 +956,85 @@
bf1[1] = bf0[1];
bf1[2] = bf0[2];
bf1[3] = bf0[3];
+ bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit[stage]);
+ bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit[stage]);
+ bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit[stage]);
+ bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit[stage]);
+ bf1[8] = bf0[8];
+ bf1[9] = bf0[9];
+ bf1[10] = bf0[10];
+ bf1[11] = bf0[11];
+ bf1[12] = half_btf(cospi[16], bf0[12], cospi[48], bf0[13], cos_bit[stage]);
+ bf1[13] = half_btf(cospi[48], bf0[12], -cospi[16], bf0[13], cos_bit[stage]);
+ bf1[14] = half_btf(-cospi[48], bf0[14], cospi[16], bf0[15], cos_bit[stage]);
+ bf1[15] = half_btf(cospi[16], bf0[14], cospi[48], bf0[15], cos_bit[stage]);
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 5
+ stage++;
+ bf0 = step;
+ bf1 = output;
+ bf1[0] = bf0[0] + bf0[4];
+ bf1[1] = bf0[1] + bf0[5];
+ bf1[2] = bf0[2] + bf0[6];
+ bf1[3] = bf0[3] + bf0[7];
+ bf1[4] = bf0[0] - bf0[4];
+ bf1[5] = bf0[1] - bf0[5];
+ bf1[6] = bf0[2] - bf0[6];
+ bf1[7] = bf0[3] - bf0[7];
+ bf1[8] = bf0[8] + bf0[12];
+ bf1[9] = bf0[9] + bf0[13];
+ bf1[10] = bf0[10] + bf0[14];
+ bf1[11] = bf0[11] + bf0[15];
+ bf1[12] = bf0[8] - bf0[12];
+ bf1[13] = bf0[9] - bf0[13];
+ bf1[14] = bf0[10] - bf0[14];
+ bf1[15] = bf0[11] - bf0[15];
+ range_check(stage, input, bf1, size, stage_range[stage]);
+
+ // stage 6
+ stage++;
+ cospi = cospi_arr(cos_bit[stage]);
+ bf0 = output;
+ bf1 = step;
+ bf1[0] = bf0[0];
+ bf1[1] = bf0[1];
+ bf1[2] = bf0[2];
+ bf1[3] = bf0[3];
bf1[4] = bf0[4];
bf1[5] = bf0[5];
bf1[6] = bf0[6];
bf1[7] = bf0[7];
bf1[8] = half_btf(cospi[8], bf0[8], cospi[56], bf0[9], cos_bit[stage]);
- bf1[9] = half_btf(-cospi[8], bf0[9], cospi[56], bf0[8], cos_bit[stage]);
+ bf1[9] = half_btf(cospi[56], bf0[8], -cospi[8], bf0[9], cos_bit[stage]);
bf1[10] = half_btf(cospi[40], bf0[10], cospi[24], bf0[11], cos_bit[stage]);
- bf1[11] = half_btf(-cospi[40], bf0[11], cospi[24], bf0[10], cos_bit[stage]);
+ bf1[11] = half_btf(cospi[24], bf0[10], -cospi[40], bf0[11], cos_bit[stage]);
bf1[12] = half_btf(-cospi[56], bf0[12], cospi[8], bf0[13], cos_bit[stage]);
- bf1[13] = half_btf(cospi[56], bf0[13], cospi[8], bf0[12], cos_bit[stage]);
+ bf1[13] = half_btf(cospi[8], bf0[12], cospi[56], bf0[13], cos_bit[stage]);
bf1[14] = half_btf(-cospi[24], bf0[14], cospi[40], bf0[15], cos_bit[stage]);
- bf1[15] = half_btf(cospi[24], bf0[15], cospi[40], bf0[14], cos_bit[stage]);
- range_check(stage, input, bf1, size, stage_range[stage]);
-
- // stage 5
- stage++;
- bf0 = step;
- bf1 = output;
- bf1[0] = bf0[0] + bf0[4];
- bf1[1] = bf0[1] + bf0[5];
- bf1[2] = bf0[2] + bf0[6];
- bf1[3] = bf0[3] + bf0[7];
- bf1[4] = -bf0[4] + bf0[0];
- bf1[5] = -bf0[5] + bf0[1];
- bf1[6] = -bf0[6] + bf0[2];
- bf1[7] = -bf0[7] + bf0[3];
- bf1[8] = bf0[8] + bf0[12];
- bf1[9] = bf0[9] + bf0[13];
- bf1[10] = bf0[10] + bf0[14];
- bf1[11] = bf0[11] + bf0[15];
- bf1[12] = -bf0[12] + bf0[8];
- bf1[13] = -bf0[13] + bf0[9];
- bf1[14] = -bf0[14] + bf0[10];
- bf1[15] = -bf0[15] + bf0[11];
- range_check(stage, input, bf1, size, stage_range[stage]);
-
- // stage 6
- stage++;
- cospi = cospi_arr(cos_bit[stage]);
- bf0 = output;
- bf1 = step;
- bf1[0] = bf0[0];
- bf1[1] = bf0[1];
- bf1[2] = bf0[2];
- bf1[3] = bf0[3];
- bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit[stage]);
- bf1[5] = half_btf(-cospi[16], bf0[5], cospi[48], bf0[4], cos_bit[stage]);
- bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(cospi[48], bf0[7], cospi[16], bf0[6], cos_bit[stage]);
- bf1[8] = bf0[8];
- bf1[9] = bf0[9];
- bf1[10] = bf0[10];
- bf1[11] = bf0[11];
- bf1[12] = half_btf(cospi[16], bf0[12], cospi[48], bf0[13], cos_bit[stage]);
- bf1[13] = half_btf(-cospi[16], bf0[13], cospi[48], bf0[12], cos_bit[stage]);
- bf1[14] = half_btf(-cospi[48], bf0[14], cospi[16], bf0[15], cos_bit[stage]);
- bf1[15] = half_btf(cospi[48], bf0[15], cospi[16], bf0[14], cos_bit[stage]);
+ bf1[15] = half_btf(cospi[40], bf0[14], cospi[24], bf0[15], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 7
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[0] + bf0[2];
- bf1[1] = bf0[1] + bf0[3];
- bf1[2] = -bf0[2] + bf0[0];
- bf1[3] = -bf0[3] + bf0[1];
- bf1[4] = bf0[4] + bf0[6];
- bf1[5] = bf0[5] + bf0[7];
- bf1[6] = -bf0[6] + bf0[4];
- bf1[7] = -bf0[7] + bf0[5];
- bf1[8] = bf0[8] + bf0[10];
- bf1[9] = bf0[9] + bf0[11];
- bf1[10] = -bf0[10] + bf0[8];
- bf1[11] = -bf0[11] + bf0[9];
- bf1[12] = bf0[12] + bf0[14];
- bf1[13] = bf0[13] + bf0[15];
- bf1[14] = -bf0[14] + bf0[12];
- bf1[15] = -bf0[15] + bf0[13];
+ bf1[0] = bf0[0] + bf0[8];
+ bf1[1] = bf0[1] + bf0[9];
+ bf1[2] = bf0[2] + bf0[10];
+ bf1[3] = bf0[3] + bf0[11];
+ bf1[4] = bf0[4] + bf0[12];
+ bf1[5] = bf0[5] + bf0[13];
+ bf1[6] = bf0[6] + bf0[14];
+ bf1[7] = bf0[7] + bf0[15];
+ bf1[8] = bf0[0] - bf0[8];
+ bf1[9] = bf0[1] - bf0[9];
+ bf1[10] = bf0[2] - bf0[10];
+ bf1[11] = bf0[3] - bf0[11];
+ bf1[12] = bf0[4] - bf0[12];
+ bf1[13] = bf0[5] - bf0[13];
+ bf1[14] = bf0[6] - bf0[14];
+ bf1[15] = bf0[7] - bf0[15];
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 8
@@ -1041,44 +1042,44 @@
cospi = cospi_arr(cos_bit[stage]);
bf0 = output;
bf1 = step;
- bf1[0] = bf0[0];
- bf1[1] = bf0[1];
- bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit[stage]);
- bf1[3] = half_btf(-cospi[32], bf0[3], cospi[32], bf0[2], cos_bit[stage]);
- bf1[4] = bf0[4];
- bf1[5] = bf0[5];
- bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit[stage]);
- bf1[7] = half_btf(-cospi[32], bf0[7], cospi[32], bf0[6], cos_bit[stage]);
- bf1[8] = bf0[8];
- bf1[9] = bf0[9];
- bf1[10] = half_btf(cospi[32], bf0[10], cospi[32], bf0[11], cos_bit[stage]);
- bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[10], cos_bit[stage]);
- bf1[12] = bf0[12];
- bf1[13] = bf0[13];
- bf1[14] = half_btf(cospi[32], bf0[14], cospi[32], bf0[15], cos_bit[stage]);
- bf1[15] = half_btf(-cospi[32], bf0[15], cospi[32], bf0[14], cos_bit[stage]);
+ bf1[0] = half_btf(cospi[2], bf0[0], cospi[62], bf0[1], cos_bit[stage]);
+ bf1[1] = half_btf(cospi[62], bf0[0], -cospi[2], bf0[1], cos_bit[stage]);
+ bf1[2] = half_btf(cospi[10], bf0[2], cospi[54], bf0[3], cos_bit[stage]);
+ bf1[3] = half_btf(cospi[54], bf0[2], -cospi[10], bf0[3], cos_bit[stage]);
+ bf1[4] = half_btf(cospi[18], bf0[4], cospi[46], bf0[5], cos_bit[stage]);
+ bf1[5] = half_btf(cospi[46], bf0[4], -cospi[18], bf0[5], cos_bit[stage]);
+ bf1[6] = half_btf(cospi[26], bf0[6], cospi[38], bf0[7], cos_bit[stage]);
+ bf1[7] = half_btf(cospi[38], bf0[6], -cospi[26], bf0[7], cos_bit[stage]);
+ bf1[8] = half_btf(cospi[34], bf0[8], cospi[30], bf0[9], cos_bit[stage]);
+ bf1[9] = half_btf(cospi[30], bf0[8], -cospi[34], bf0[9], cos_bit[stage]);
+ bf1[10] = half_btf(cospi[42], bf0[10], cospi[22], bf0[11], cos_bit[stage]);
+ bf1[11] = half_btf(cospi[22], bf0[10], -cospi[42], bf0[11], cos_bit[stage]);
+ bf1[12] = half_btf(cospi[50], bf0[12], cospi[14], bf0[13], cos_bit[stage]);
+ bf1[13] = half_btf(cospi[14], bf0[12], -cospi[50], bf0[13], cos_bit[stage]);
+ bf1[14] = half_btf(cospi[58], bf0[14], cospi[6], bf0[15], cos_bit[stage]);
+ bf1[15] = half_btf(cospi[6], bf0[14], -cospi[58], bf0[15], cos_bit[stage]);
range_check(stage, input, bf1, size, stage_range[stage]);
// stage 9
stage++;
bf0 = step;
bf1 = output;
- bf1[0] = bf0[0];
- bf1[1] = -bf0[8];
- bf1[2] = bf0[12];
- bf1[3] = -bf0[4];
- bf1[4] = bf0[6];
- bf1[5] = -bf0[14];
- bf1[6] = bf0[10];
- bf1[7] = -bf0[2];
- bf1[8] = bf0[3];
- bf1[9] = -bf0[11];
- bf1[10] = bf0[15];
- bf1[11] = -bf0[7];
- bf1[12] = bf0[5];
- bf1[13] = -bf0[13];
- bf1[14] = bf0[9];
- bf1[15] = -bf0[1];
+ bf1[0] = bf0[1];
+ bf1[1] = bf0[14];
+ bf1[2] = bf0[3];
+ bf1[3] = bf0[12];
+ bf1[4] = bf0[5];
+ bf1[5] = bf0[10];
+ bf1[6] = bf0[7];
+ bf1[7] = bf0[8];
+ bf1[8] = bf0[9];
+ bf1[9] = bf0[6];
+ bf1[10] = bf0[11];
+ bf1[11] = bf0[4];
+ bf1[12] = bf0[13];
+ bf1[13] = bf0[2];
+ bf1[14] = bf0[15];
+ bf1[15] = bf0[0];
range_check(stage, input, bf1, size, stage_range[stage]);
}
diff --git a/av1/encoder/x86/highbd_fwd_txfm_sse4.c b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
index 9fc900d..d772761 100644
--- a/av1/encoder/x86/highbd_fwd_txfm_sse4.c
+++ b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
@@ -622,305 +622,183 @@
static void fadst8x8_sse4_1(__m128i *in, __m128i *out, int bit) {
const int32_t *cospi = cospi_arr(bit);
- const __m128i cospi4 = _mm_set1_epi32(cospi[4]);
- const __m128i cospi60 = _mm_set1_epi32(cospi[60]);
- const __m128i cospi20 = _mm_set1_epi32(cospi[20]);
- const __m128i cospi44 = _mm_set1_epi32(cospi[44]);
- const __m128i cospi36 = _mm_set1_epi32(cospi[36]);
- const __m128i cospi28 = _mm_set1_epi32(cospi[28]);
- const __m128i cospi52 = _mm_set1_epi32(cospi[52]);
- const __m128i cospi12 = _mm_set1_epi32(cospi[12]);
+ const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+ const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
- const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+ const __m128i cospi4 = _mm_set1_epi32(cospi[4]);
+ const __m128i cospim4 = _mm_set1_epi32(-cospi[4]);
+ const __m128i cospi60 = _mm_set1_epi32(cospi[60]);
+ const __m128i cospi20 = _mm_set1_epi32(cospi[20]);
+ const __m128i cospim20 = _mm_set1_epi32(-cospi[20]);
+ const __m128i cospi44 = _mm_set1_epi32(cospi[44]);
+ const __m128i cospi28 = _mm_set1_epi32(cospi[28]);
+ const __m128i cospi36 = _mm_set1_epi32(cospi[36]);
+ const __m128i cospim36 = _mm_set1_epi32(-cospi[36]);
+ const __m128i cospi52 = _mm_set1_epi32(cospi[52]);
+ const __m128i cospim52 = _mm_set1_epi32(-cospi[52]);
+ const __m128i cospi12 = _mm_set1_epi32(cospi[12]);
const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
- const __m128i kZero = _mm_setzero_si128();
- __m128i u[8], v[8], x;
+ const __m128i zero = _mm_setzero_si128();
+ __m128i u0, u1, u2, u3, u4, u5, u6, u7;
+ __m128i v0, v1, v2, v3, v4, v5, v6, v7;
+ __m128i x, y;
+ int col;
- // Even 8 points: 0, 2, ..., 14
- // stage 0
- // stage 1
- // stage 2
- // (1)
- u[0] = _mm_mullo_epi32(in[14], cospi4);
- x = _mm_mullo_epi32(in[0], cospi60);
- u[0] = _mm_add_epi32(u[0], x);
- u[0] = _mm_add_epi32(u[0], rnding);
- u[0] = _mm_srai_epi32(u[0], bit);
+ // Note:
+ // Even column: 0, 2, ..., 14
+ // Odd column: 1, 3, ..., 15
+ // one even column plus one odd column constructs one row (8 coeffs)
+ // total we have 8 rows (8x8).
+ for (col = 0; col < 2; ++col) {
+ // stage 0
+ // stage 1
+ u0 = in[2 * 0 + col];
+ u1 = _mm_sub_epi32(zero, in[2 * 7 + col]);
+ u2 = _mm_sub_epi32(zero, in[2 * 3 + col]);
+ u3 = in[2 * 4 + col];
+ u4 = _mm_sub_epi32(zero, in[2 * 1 + col]);
+ u5 = in[2 * 6 + col];
+ u6 = in[2 * 2 + col];
+ u7 = _mm_sub_epi32(zero, in[2 * 5 + col]);
- u[1] = _mm_mullo_epi32(in[14], cospi60);
- x = _mm_mullo_epi32(in[0], cospi4);
- u[1] = _mm_sub_epi32(u[1], x);
- u[1] = _mm_add_epi32(u[1], rnding);
- u[1] = _mm_srai_epi32(u[1], bit);
+ // stage 2
+ v0 = u0;
+ v1 = u1;
- // (2)
- u[2] = _mm_mullo_epi32(in[10], cospi20);
- x = _mm_mullo_epi32(in[4], cospi44);
- u[2] = _mm_add_epi32(u[2], x);
- u[2] = _mm_add_epi32(u[2], rnding);
- u[2] = _mm_srai_epi32(u[2], bit);
+ x = _mm_mullo_epi32(u2, cospi32);
+ y = _mm_mullo_epi32(u3, cospi32);
+ v2 = _mm_add_epi32(x, y);
+ v2 = _mm_add_epi32(v2, rnding);
+ v2 = _mm_srai_epi32(v2, bit);
- u[3] = _mm_mullo_epi32(in[10], cospi44);
- x = _mm_mullo_epi32(in[4], cospi20);
- u[3] = _mm_sub_epi32(u[3], x);
- u[3] = _mm_add_epi32(u[3], rnding);
- u[3] = _mm_srai_epi32(u[3], bit);
+ v3 = _mm_sub_epi32(x, y);
+ v3 = _mm_add_epi32(v3, rnding);
+ v3 = _mm_srai_epi32(v3, bit);
- // (3)
- u[4] = _mm_mullo_epi32(in[6], cospi36);
- x = _mm_mullo_epi32(in[8], cospi28);
- u[4] = _mm_add_epi32(u[4], x);
- u[4] = _mm_add_epi32(u[4], rnding);
- u[4] = _mm_srai_epi32(u[4], bit);
+ v4 = u4;
+ v5 = u5;
- u[5] = _mm_mullo_epi32(in[6], cospi28);
- x = _mm_mullo_epi32(in[8], cospi36);
- u[5] = _mm_sub_epi32(u[5], x);
- u[5] = _mm_add_epi32(u[5], rnding);
- u[5] = _mm_srai_epi32(u[5], bit);
+ x = _mm_mullo_epi32(u6, cospi32);
+ y = _mm_mullo_epi32(u7, cospi32);
+ v6 = _mm_add_epi32(x, y);
+ v6 = _mm_add_epi32(v6, rnding);
+ v6 = _mm_srai_epi32(v6, bit);
- // (4)
- u[6] = _mm_mullo_epi32(in[2], cospi52);
- x = _mm_mullo_epi32(in[12], cospi12);
- u[6] = _mm_add_epi32(u[6], x);
- u[6] = _mm_add_epi32(u[6], rnding);
- u[6] = _mm_srai_epi32(u[6], bit);
+ v7 = _mm_sub_epi32(x, y);
+ v7 = _mm_add_epi32(v7, rnding);
+ v7 = _mm_srai_epi32(v7, bit);
- u[7] = _mm_mullo_epi32(in[2], cospi12);
- x = _mm_mullo_epi32(in[12], cospi52);
- u[7] = _mm_sub_epi32(u[7], x);
- u[7] = _mm_add_epi32(u[7], rnding);
- u[7] = _mm_srai_epi32(u[7], bit);
+ // stage 3
+ u0 = _mm_add_epi32(v0, v2);
+ u1 = _mm_add_epi32(v1, v3);
+ u2 = _mm_sub_epi32(v0, v2);
+ u3 = _mm_sub_epi32(v1, v3);
+ u4 = _mm_add_epi32(v4, v6);
+ u5 = _mm_add_epi32(v5, v7);
+ u6 = _mm_sub_epi32(v4, v6);
+ u7 = _mm_sub_epi32(v5, v7);
- // stage 3
- v[0] = _mm_add_epi32(u[0], u[4]);
- v[4] = _mm_sub_epi32(u[0], u[4]);
- v[1] = _mm_add_epi32(u[1], u[5]);
- v[5] = _mm_sub_epi32(u[1], u[5]);
- v[2] = _mm_add_epi32(u[2], u[6]);
- v[6] = _mm_sub_epi32(u[2], u[6]);
- v[3] = _mm_add_epi32(u[3], u[7]);
- v[7] = _mm_sub_epi32(u[3], u[7]);
+ // stage 4
+ v0 = u0;
+ v1 = u1;
+ v2 = u2;
+ v3 = u3;
- // stage 4
- u[0] = v[0];
- u[1] = v[1];
- u[2] = v[2];
- u[3] = v[3];
+ x = _mm_mullo_epi32(u4, cospi16);
+ y = _mm_mullo_epi32(u5, cospi48);
+ v4 = _mm_add_epi32(x, y);
+ v4 = _mm_add_epi32(v4, rnding);
+ v4 = _mm_srai_epi32(v4, bit);
- u[4] = _mm_mullo_epi32(v[4], cospi16);
- x = _mm_mullo_epi32(v[5], cospi48);
- u[4] = _mm_add_epi32(u[4], x);
- u[4] = _mm_add_epi32(u[4], rnding);
- u[4] = _mm_srai_epi32(u[4], bit);
+ x = _mm_mullo_epi32(u4, cospi48);
+ y = _mm_mullo_epi32(u5, cospim16);
+ v5 = _mm_add_epi32(x, y);
+ v5 = _mm_add_epi32(v5, rnding);
+ v5 = _mm_srai_epi32(v5, bit);
- u[5] = _mm_mullo_epi32(v[4], cospi48);
- x = _mm_mullo_epi32(v[5], cospi16);
- u[5] = _mm_sub_epi32(u[5], x);
- u[5] = _mm_add_epi32(u[5], rnding);
- u[5] = _mm_srai_epi32(u[5], bit);
+ x = _mm_mullo_epi32(u6, cospim48);
+ y = _mm_mullo_epi32(u7, cospi16);
+ v6 = _mm_add_epi32(x, y);
+ v6 = _mm_add_epi32(v6, rnding);
+ v6 = _mm_srai_epi32(v6, bit);
- u[6] = _mm_mullo_epi32(v[6], cospim48);
- x = _mm_mullo_epi32(v[7], cospi16);
- u[6] = _mm_add_epi32(u[6], x);
- u[6] = _mm_add_epi32(u[6], rnding);
- u[6] = _mm_srai_epi32(u[6], bit);
+ x = _mm_mullo_epi32(u6, cospi16);
+ y = _mm_mullo_epi32(u7, cospi48);
+ v7 = _mm_add_epi32(x, y);
+ v7 = _mm_add_epi32(v7, rnding);
+ v7 = _mm_srai_epi32(v7, bit);
- u[7] = _mm_mullo_epi32(v[6], cospi16);
- x = _mm_mullo_epi32(v[7], cospim48);
- u[7] = _mm_sub_epi32(u[7], x);
- u[7] = _mm_add_epi32(u[7], rnding);
- u[7] = _mm_srai_epi32(u[7], bit);
+ // stage 5
+ u0 = _mm_add_epi32(v0, v4);
+ u1 = _mm_add_epi32(v1, v5);
+ u2 = _mm_add_epi32(v2, v6);
+ u3 = _mm_add_epi32(v3, v7);
+ u4 = _mm_sub_epi32(v0, v4);
+ u5 = _mm_sub_epi32(v1, v5);
+ u6 = _mm_sub_epi32(v2, v6);
+ u7 = _mm_sub_epi32(v3, v7);
- // stage 5
- v[0] = _mm_add_epi32(u[0], u[2]);
- v[2] = _mm_sub_epi32(u[0], u[2]);
- v[1] = _mm_add_epi32(u[1], u[3]);
- v[3] = _mm_sub_epi32(u[1], u[3]);
- v[4] = _mm_add_epi32(u[4], u[6]);
- v[6] = _mm_sub_epi32(u[4], u[6]);
- v[5] = _mm_add_epi32(u[5], u[7]);
- v[7] = _mm_sub_epi32(u[5], u[7]);
+ // stage 6
+ x = _mm_mullo_epi32(u0, cospi4);
+ y = _mm_mullo_epi32(u1, cospi60);
+ v0 = _mm_add_epi32(x, y);
+ v0 = _mm_add_epi32(v0, rnding);
+ v0 = _mm_srai_epi32(v0, bit);
- // stage 6
- u[0] = v[0];
- u[1] = v[1];
- u[4] = v[4];
- u[5] = v[5];
+ x = _mm_mullo_epi32(u0, cospi60);
+ y = _mm_mullo_epi32(u1, cospim4);
+ v1 = _mm_add_epi32(x, y);
+ v1 = _mm_add_epi32(v1, rnding);
+ v1 = _mm_srai_epi32(v1, bit);
- v[0] = _mm_mullo_epi32(v[2], cospi32);
- x = _mm_mullo_epi32(v[3], cospi32);
- u[2] = _mm_add_epi32(v[0], x);
- u[2] = _mm_add_epi32(u[2], rnding);
- u[2] = _mm_srai_epi32(u[2], bit);
+ x = _mm_mullo_epi32(u2, cospi20);
+ y = _mm_mullo_epi32(u3, cospi44);
+ v2 = _mm_add_epi32(x, y);
+ v2 = _mm_add_epi32(v2, rnding);
+ v2 = _mm_srai_epi32(v2, bit);
- u[3] = _mm_sub_epi32(v[0], x);
- u[3] = _mm_add_epi32(u[3], rnding);
- u[3] = _mm_srai_epi32(u[3], bit);
+ x = _mm_mullo_epi32(u2, cospi44);
+ y = _mm_mullo_epi32(u3, cospim20);
+ v3 = _mm_add_epi32(x, y);
+ v3 = _mm_add_epi32(v3, rnding);
+ v3 = _mm_srai_epi32(v3, bit);
- v[0] = _mm_mullo_epi32(v[6], cospi32);
- x = _mm_mullo_epi32(v[7], cospi32);
- u[6] = _mm_add_epi32(v[0], x);
- u[6] = _mm_add_epi32(u[6], rnding);
- u[6] = _mm_srai_epi32(u[6], bit);
+ x = _mm_mullo_epi32(u4, cospi36);
+ y = _mm_mullo_epi32(u5, cospi28);
+ v4 = _mm_add_epi32(x, y);
+ v4 = _mm_add_epi32(v4, rnding);
+ v4 = _mm_srai_epi32(v4, bit);
- u[7] = _mm_sub_epi32(v[0], x);
- u[7] = _mm_add_epi32(u[7], rnding);
- u[7] = _mm_srai_epi32(u[7], bit);
+ x = _mm_mullo_epi32(u4, cospi28);
+ y = _mm_mullo_epi32(u5, cospim36);
+ v5 = _mm_add_epi32(x, y);
+ v5 = _mm_add_epi32(v5, rnding);
+ v5 = _mm_srai_epi32(v5, bit);
- // stage 7
- out[0] = u[0];
- out[2] = _mm_sub_epi32(kZero, u[4]);
- out[4] = u[6];
- out[6] = _mm_sub_epi32(kZero, u[2]);
- out[8] = u[3];
- out[10] = _mm_sub_epi32(kZero, u[7]);
- out[12] = u[5];
- out[14] = _mm_sub_epi32(kZero, u[1]);
+ x = _mm_mullo_epi32(u6, cospi52);
+ y = _mm_mullo_epi32(u7, cospi12);
+ v6 = _mm_add_epi32(x, y);
+ v6 = _mm_add_epi32(v6, rnding);
+ v6 = _mm_srai_epi32(v6, bit);
- // Odd 8 points: 1, 3, ..., 15
- // stage 0
- // stage 1
- // stage 2
- // (1)
- u[0] = _mm_mullo_epi32(in[15], cospi4);
- x = _mm_mullo_epi32(in[1], cospi60);
- u[0] = _mm_add_epi32(u[0], x);
- u[0] = _mm_add_epi32(u[0], rnding);
- u[0] = _mm_srai_epi32(u[0], bit);
+ x = _mm_mullo_epi32(u6, cospi12);
+ y = _mm_mullo_epi32(u7, cospim52);
+ v7 = _mm_add_epi32(x, y);
+ v7 = _mm_add_epi32(v7, rnding);
+ v7 = _mm_srai_epi32(v7, bit);
- u[1] = _mm_mullo_epi32(in[15], cospi60);
- x = _mm_mullo_epi32(in[1], cospi4);
- u[1] = _mm_sub_epi32(u[1], x);
- u[1] = _mm_add_epi32(u[1], rnding);
- u[1] = _mm_srai_epi32(u[1], bit);
-
- // (2)
- u[2] = _mm_mullo_epi32(in[11], cospi20);
- x = _mm_mullo_epi32(in[5], cospi44);
- u[2] = _mm_add_epi32(u[2], x);
- u[2] = _mm_add_epi32(u[2], rnding);
- u[2] = _mm_srai_epi32(u[2], bit);
-
- u[3] = _mm_mullo_epi32(in[11], cospi44);
- x = _mm_mullo_epi32(in[5], cospi20);
- u[3] = _mm_sub_epi32(u[3], x);
- u[3] = _mm_add_epi32(u[3], rnding);
- u[3] = _mm_srai_epi32(u[3], bit);
-
- // (3)
- u[4] = _mm_mullo_epi32(in[7], cospi36);
- x = _mm_mullo_epi32(in[9], cospi28);
- u[4] = _mm_add_epi32(u[4], x);
- u[4] = _mm_add_epi32(u[4], rnding);
- u[4] = _mm_srai_epi32(u[4], bit);
-
- u[5] = _mm_mullo_epi32(in[7], cospi28);
- x = _mm_mullo_epi32(in[9], cospi36);
- u[5] = _mm_sub_epi32(u[5], x);
- u[5] = _mm_add_epi32(u[5], rnding);
- u[5] = _mm_srai_epi32(u[5], bit);
-
- // (4)
- u[6] = _mm_mullo_epi32(in[3], cospi52);
- x = _mm_mullo_epi32(in[13], cospi12);
- u[6] = _mm_add_epi32(u[6], x);
- u[6] = _mm_add_epi32(u[6], rnding);
- u[6] = _mm_srai_epi32(u[6], bit);
-
- u[7] = _mm_mullo_epi32(in[3], cospi12);
- x = _mm_mullo_epi32(in[13], cospi52);
- u[7] = _mm_sub_epi32(u[7], x);
- u[7] = _mm_add_epi32(u[7], rnding);
- u[7] = _mm_srai_epi32(u[7], bit);
-
- // stage 3
- v[0] = _mm_add_epi32(u[0], u[4]);
- v[4] = _mm_sub_epi32(u[0], u[4]);
- v[1] = _mm_add_epi32(u[1], u[5]);
- v[5] = _mm_sub_epi32(u[1], u[5]);
- v[2] = _mm_add_epi32(u[2], u[6]);
- v[6] = _mm_sub_epi32(u[2], u[6]);
- v[3] = _mm_add_epi32(u[3], u[7]);
- v[7] = _mm_sub_epi32(u[3], u[7]);
-
- // stage 4
- u[0] = v[0];
- u[1] = v[1];
- u[2] = v[2];
- u[3] = v[3];
-
- u[4] = _mm_mullo_epi32(v[4], cospi16);
- x = _mm_mullo_epi32(v[5], cospi48);
- u[4] = _mm_add_epi32(u[4], x);
- u[4] = _mm_add_epi32(u[4], rnding);
- u[4] = _mm_srai_epi32(u[4], bit);
-
- u[5] = _mm_mullo_epi32(v[4], cospi48);
- x = _mm_mullo_epi32(v[5], cospi16);
- u[5] = _mm_sub_epi32(u[5], x);
- u[5] = _mm_add_epi32(u[5], rnding);
- u[5] = _mm_srai_epi32(u[5], bit);
-
- u[6] = _mm_mullo_epi32(v[6], cospim48);
- x = _mm_mullo_epi32(v[7], cospi16);
- u[6] = _mm_add_epi32(u[6], x);
- u[6] = _mm_add_epi32(u[6], rnding);
- u[6] = _mm_srai_epi32(u[6], bit);
-
- u[7] = _mm_mullo_epi32(v[6], cospi16);
- x = _mm_mullo_epi32(v[7], cospim48);
- u[7] = _mm_sub_epi32(u[7], x);
- u[7] = _mm_add_epi32(u[7], rnding);
- u[7] = _mm_srai_epi32(u[7], bit);
-
- // stage 5
- v[0] = _mm_add_epi32(u[0], u[2]);
- v[2] = _mm_sub_epi32(u[0], u[2]);
- v[1] = _mm_add_epi32(u[1], u[3]);
- v[3] = _mm_sub_epi32(u[1], u[3]);
- v[4] = _mm_add_epi32(u[4], u[6]);
- v[6] = _mm_sub_epi32(u[4], u[6]);
- v[5] = _mm_add_epi32(u[5], u[7]);
- v[7] = _mm_sub_epi32(u[5], u[7]);
-
- // stage 6
- u[0] = v[0];
- u[1] = v[1];
- u[4] = v[4];
- u[5] = v[5];
-
- v[0] = _mm_mullo_epi32(v[2], cospi32);
- x = _mm_mullo_epi32(v[3], cospi32);
- u[2] = _mm_add_epi32(v[0], x);
- u[2] = _mm_add_epi32(u[2], rnding);
- u[2] = _mm_srai_epi32(u[2], bit);
-
- u[3] = _mm_sub_epi32(v[0], x);
- u[3] = _mm_add_epi32(u[3], rnding);
- u[3] = _mm_srai_epi32(u[3], bit);
-
- v[0] = _mm_mullo_epi32(v[6], cospi32);
- x = _mm_mullo_epi32(v[7], cospi32);
- u[6] = _mm_add_epi32(v[0], x);
- u[6] = _mm_add_epi32(u[6], rnding);
- u[6] = _mm_srai_epi32(u[6], bit);
-
- u[7] = _mm_sub_epi32(v[0], x);
- u[7] = _mm_add_epi32(u[7], rnding);
- u[7] = _mm_srai_epi32(u[7], bit);
-
- // stage 7
- out[1] = u[0];
- out[3] = _mm_sub_epi32(kZero, u[4]);
- out[5] = u[6];
- out[7] = _mm_sub_epi32(kZero, u[2]);
- out[9] = u[3];
- out[11] = _mm_sub_epi32(kZero, u[7]);
- out[13] = u[5];
- out[15] = _mm_sub_epi32(kZero, u[1]);
+ // stage 7
+ out[2 * 0 + col] = v1;
+ out[2 * 1 + col] = v6;
+ out[2 * 2 + col] = v3;
+ out[2 * 3 + col] = v4;
+ out[2 * 4 + col] = v5;
+ out[2 * 5 + col] = v2;
+ out[2 * 6 + col] = v7;
+ out[2 * 7 + col] = v0;
+ }
}
void av1_fwd_txfm2d_8x8_sse4_1(const int16_t *input, int32_t *coeff, int stride,
@@ -1398,379 +1276,247 @@
static void fadst16x16_sse4_1(__m128i *in, __m128i *out, int bit) {
const int32_t *cospi = cospi_arr(bit);
- const __m128i cospi2 = _mm_set1_epi32(cospi[2]);
- const __m128i cospi62 = _mm_set1_epi32(cospi[62]);
- const __m128i cospi10 = _mm_set1_epi32(cospi[10]);
- const __m128i cospi54 = _mm_set1_epi32(cospi[54]);
- const __m128i cospi18 = _mm_set1_epi32(cospi[18]);
- const __m128i cospi46 = _mm_set1_epi32(cospi[46]);
- const __m128i cospi26 = _mm_set1_epi32(cospi[26]);
- const __m128i cospi38 = _mm_set1_epi32(cospi[38]);
- const __m128i cospi34 = _mm_set1_epi32(cospi[34]);
- const __m128i cospi30 = _mm_set1_epi32(cospi[30]);
- const __m128i cospi42 = _mm_set1_epi32(cospi[42]);
- const __m128i cospi22 = _mm_set1_epi32(cospi[22]);
- const __m128i cospi50 = _mm_set1_epi32(cospi[50]);
- const __m128i cospi14 = _mm_set1_epi32(cospi[14]);
- const __m128i cospi58 = _mm_set1_epi32(cospi[58]);
- const __m128i cospi6 = _mm_set1_epi32(cospi[6]);
- const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
- const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
- const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
- const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
- const __m128i cospim56 = _mm_set1_epi32(-cospi[56]);
- const __m128i cospim24 = _mm_set1_epi32(-cospi[24]);
+ const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+ const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
- const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+ const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
+ const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
+ const __m128i cospim56 = _mm_set1_epi32(-cospi[56]);
+ const __m128i cospim8 = _mm_set1_epi32(-cospi[8]);
+ const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
+ const __m128i cospim24 = _mm_set1_epi32(-cospi[24]);
+ const __m128i cospim40 = _mm_set1_epi32(-cospi[40]);
+ const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
+ const __m128i cospi2 = _mm_set1_epi32(cospi[2]);
+ const __m128i cospi62 = _mm_set1_epi32(cospi[62]);
+ const __m128i cospim2 = _mm_set1_epi32(-cospi[2]);
+ const __m128i cospi10 = _mm_set1_epi32(cospi[10]);
+ const __m128i cospi54 = _mm_set1_epi32(cospi[54]);
+ const __m128i cospim10 = _mm_set1_epi32(-cospi[10]);
+ const __m128i cospi18 = _mm_set1_epi32(cospi[18]);
+ const __m128i cospi46 = _mm_set1_epi32(cospi[46]);
+ const __m128i cospim18 = _mm_set1_epi32(-cospi[18]);
+ const __m128i cospi26 = _mm_set1_epi32(cospi[26]);
+ const __m128i cospi38 = _mm_set1_epi32(cospi[38]);
+ const __m128i cospim26 = _mm_set1_epi32(-cospi[26]);
+ const __m128i cospi34 = _mm_set1_epi32(cospi[34]);
+ const __m128i cospi30 = _mm_set1_epi32(cospi[30]);
+ const __m128i cospim34 = _mm_set1_epi32(-cospi[34]);
+ const __m128i cospi42 = _mm_set1_epi32(cospi[42]);
+ const __m128i cospi22 = _mm_set1_epi32(cospi[22]);
+ const __m128i cospim42 = _mm_set1_epi32(-cospi[42]);
+ const __m128i cospi50 = _mm_set1_epi32(cospi[50]);
+ const __m128i cospi14 = _mm_set1_epi32(cospi[14]);
+ const __m128i cospim50 = _mm_set1_epi32(-cospi[50]);
+ const __m128i cospi58 = _mm_set1_epi32(cospi[58]);
+ const __m128i cospi6 = _mm_set1_epi32(cospi[6]);
+ const __m128i cospim58 = _mm_set1_epi32(-cospi[58]);
const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+ const __m128i zero = _mm_setzero_si128();
+
__m128i u[16], v[16], x, y;
- const int col_num = 4;
int col;
- // Calculate the column 0, 1, 2, 3
- for (col = 0; col < col_num; ++col) {
+ for (col = 0; col < 4; ++col) {
// stage 0
// stage 1
+ u[0] = in[0 * 4 + col];
+ u[1] = _mm_sub_epi32(zero, in[15 * 4 + col]);
+ u[2] = _mm_sub_epi32(zero, in[7 * 4 + col]);
+ u[3] = in[8 * 4 + col];
+ u[4] = _mm_sub_epi32(zero, in[3 * 4 + col]);
+ u[5] = in[12 * 4 + col];
+ u[6] = in[4 * 4 + col];
+ u[7] = _mm_sub_epi32(zero, in[11 * 4 + col]);
+ u[8] = _mm_sub_epi32(zero, in[1 * 4 + col]);
+ u[9] = in[14 * 4 + col];
+ u[10] = in[6 * 4 + col];
+ u[11] = _mm_sub_epi32(zero, in[9 * 4 + col]);
+ u[12] = in[2 * 4 + col];
+ u[13] = _mm_sub_epi32(zero, in[13 * 4 + col]);
+ u[14] = _mm_sub_epi32(zero, in[5 * 4 + col]);
+ u[15] = in[10 * 4 + col];
+
// stage 2
- v[0] = _mm_mullo_epi32(in[15 * col_num + col], cospi2);
- x = _mm_mullo_epi32(in[0 * col_num + col], cospi62);
- v[0] = _mm_add_epi32(v[0], x);
- v[0] = _mm_add_epi32(v[0], rnding);
- v[0] = _mm_srai_epi32(v[0], bit);
+ v[0] = u[0];
+ v[1] = u[1];
- v[1] = _mm_mullo_epi32(in[15 * col_num + col], cospi62);
- x = _mm_mullo_epi32(in[0 * col_num + col], cospi2);
- v[1] = _mm_sub_epi32(v[1], x);
- v[1] = _mm_add_epi32(v[1], rnding);
- v[1] = _mm_srai_epi32(v[1], bit);
-
- v[2] = _mm_mullo_epi32(in[13 * col_num + col], cospi10);
- x = _mm_mullo_epi32(in[2 * col_num + col], cospi54);
- v[2] = _mm_add_epi32(v[2], x);
+ x = _mm_mullo_epi32(u[2], cospi32);
+ y = _mm_mullo_epi32(u[3], cospi32);
+ v[2] = _mm_add_epi32(x, y);
v[2] = _mm_add_epi32(v[2], rnding);
v[2] = _mm_srai_epi32(v[2], bit);
- v[3] = _mm_mullo_epi32(in[13 * col_num + col], cospi54);
- x = _mm_mullo_epi32(in[2 * col_num + col], cospi10);
- v[3] = _mm_sub_epi32(v[3], x);
+ v[3] = _mm_sub_epi32(x, y);
v[3] = _mm_add_epi32(v[3], rnding);
v[3] = _mm_srai_epi32(v[3], bit);
- v[4] = _mm_mullo_epi32(in[11 * col_num + col], cospi18);
- x = _mm_mullo_epi32(in[4 * col_num + col], cospi46);
- v[4] = _mm_add_epi32(v[4], x);
- v[4] = _mm_add_epi32(v[4], rnding);
- v[4] = _mm_srai_epi32(v[4], bit);
+ v[4] = u[4];
+ v[5] = u[5];
- v[5] = _mm_mullo_epi32(in[11 * col_num + col], cospi46);
- x = _mm_mullo_epi32(in[4 * col_num + col], cospi18);
- v[5] = _mm_sub_epi32(v[5], x);
- v[5] = _mm_add_epi32(v[5], rnding);
- v[5] = _mm_srai_epi32(v[5], bit);
-
- v[6] = _mm_mullo_epi32(in[9 * col_num + col], cospi26);
- x = _mm_mullo_epi32(in[6 * col_num + col], cospi38);
- v[6] = _mm_add_epi32(v[6], x);
+ x = _mm_mullo_epi32(u[6], cospi32);
+ y = _mm_mullo_epi32(u[7], cospi32);
+ v[6] = _mm_add_epi32(x, y);
v[6] = _mm_add_epi32(v[6], rnding);
v[6] = _mm_srai_epi32(v[6], bit);
- v[7] = _mm_mullo_epi32(in[9 * col_num + col], cospi38);
- x = _mm_mullo_epi32(in[6 * col_num + col], cospi26);
- v[7] = _mm_sub_epi32(v[7], x);
+ v[7] = _mm_sub_epi32(x, y);
v[7] = _mm_add_epi32(v[7], rnding);
v[7] = _mm_srai_epi32(v[7], bit);
- v[8] = _mm_mullo_epi32(in[7 * col_num + col], cospi34);
- x = _mm_mullo_epi32(in[8 * col_num + col], cospi30);
- v[8] = _mm_add_epi32(v[8], x);
- v[8] = _mm_add_epi32(v[8], rnding);
- v[8] = _mm_srai_epi32(v[8], bit);
+ v[8] = u[8];
+ v[9] = u[9];
- v[9] = _mm_mullo_epi32(in[7 * col_num + col], cospi30);
- x = _mm_mullo_epi32(in[8 * col_num + col], cospi34);
- v[9] = _mm_sub_epi32(v[9], x);
- v[9] = _mm_add_epi32(v[9], rnding);
- v[9] = _mm_srai_epi32(v[9], bit);
-
- v[10] = _mm_mullo_epi32(in[5 * col_num + col], cospi42);
- x = _mm_mullo_epi32(in[10 * col_num + col], cospi22);
- v[10] = _mm_add_epi32(v[10], x);
+ x = _mm_mullo_epi32(u[10], cospi32);
+ y = _mm_mullo_epi32(u[11], cospi32);
+ v[10] = _mm_add_epi32(x, y);
v[10] = _mm_add_epi32(v[10], rnding);
v[10] = _mm_srai_epi32(v[10], bit);
- v[11] = _mm_mullo_epi32(in[5 * col_num + col], cospi22);
- x = _mm_mullo_epi32(in[10 * col_num + col], cospi42);
- v[11] = _mm_sub_epi32(v[11], x);
+ v[11] = _mm_sub_epi32(x, y);
v[11] = _mm_add_epi32(v[11], rnding);
v[11] = _mm_srai_epi32(v[11], bit);
- v[12] = _mm_mullo_epi32(in[3 * col_num + col], cospi50);
- x = _mm_mullo_epi32(in[12 * col_num + col], cospi14);
- v[12] = _mm_add_epi32(v[12], x);
- v[12] = _mm_add_epi32(v[12], rnding);
- v[12] = _mm_srai_epi32(v[12], bit);
+ v[12] = u[12];
+ v[13] = u[13];
- v[13] = _mm_mullo_epi32(in[3 * col_num + col], cospi14);
- x = _mm_mullo_epi32(in[12 * col_num + col], cospi50);
- v[13] = _mm_sub_epi32(v[13], x);
- v[13] = _mm_add_epi32(v[13], rnding);
- v[13] = _mm_srai_epi32(v[13], bit);
-
- v[14] = _mm_mullo_epi32(in[1 * col_num + col], cospi58);
- x = _mm_mullo_epi32(in[14 * col_num + col], cospi6);
- v[14] = _mm_add_epi32(v[14], x);
+ x = _mm_mullo_epi32(u[14], cospi32);
+ y = _mm_mullo_epi32(u[15], cospi32);
+ v[14] = _mm_add_epi32(x, y);
v[14] = _mm_add_epi32(v[14], rnding);
v[14] = _mm_srai_epi32(v[14], bit);
- v[15] = _mm_mullo_epi32(in[1 * col_num + col], cospi6);
- x = _mm_mullo_epi32(in[14 * col_num + col], cospi58);
- v[15] = _mm_sub_epi32(v[15], x);
+ v[15] = _mm_sub_epi32(x, y);
v[15] = _mm_add_epi32(v[15], rnding);
v[15] = _mm_srai_epi32(v[15], bit);
// stage 3
- u[0] = _mm_add_epi32(v[0], v[8]);
- u[8] = _mm_sub_epi32(v[0], v[8]);
- u[1] = _mm_add_epi32(v[1], v[9]);
- u[9] = _mm_sub_epi32(v[1], v[9]);
- u[2] = _mm_add_epi32(v[2], v[10]);
- u[10] = _mm_sub_epi32(v[2], v[10]);
- u[3] = _mm_add_epi32(v[3], v[11]);
- u[11] = _mm_sub_epi32(v[3], v[11]);
- u[4] = _mm_add_epi32(v[4], v[12]);
- u[12] = _mm_sub_epi32(v[4], v[12]);
- u[5] = _mm_add_epi32(v[5], v[13]);
- u[13] = _mm_sub_epi32(v[5], v[13]);
- u[6] = _mm_add_epi32(v[6], v[14]);
- u[14] = _mm_sub_epi32(v[6], v[14]);
- u[7] = _mm_add_epi32(v[7], v[15]);
- u[15] = _mm_sub_epi32(v[7], v[15]);
+ u[0] = _mm_add_epi32(v[0], v[2]);
+ u[1] = _mm_add_epi32(v[1], v[3]);
+ u[2] = _mm_sub_epi32(v[0], v[2]);
+ u[3] = _mm_sub_epi32(v[1], v[3]);
+ u[4] = _mm_add_epi32(v[4], v[6]);
+ u[5] = _mm_add_epi32(v[5], v[7]);
+ u[6] = _mm_sub_epi32(v[4], v[6]);
+ u[7] = _mm_sub_epi32(v[5], v[7]);
+ u[8] = _mm_add_epi32(v[8], v[10]);
+ u[9] = _mm_add_epi32(v[9], v[11]);
+ u[10] = _mm_sub_epi32(v[8], v[10]);
+ u[11] = _mm_sub_epi32(v[9], v[11]);
+ u[12] = _mm_add_epi32(v[12], v[14]);
+ u[13] = _mm_add_epi32(v[13], v[15]);
+ u[14] = _mm_sub_epi32(v[12], v[14]);
+ u[15] = _mm_sub_epi32(v[13], v[15]);
// stage 4
v[0] = u[0];
v[1] = u[1];
v[2] = u[2];
v[3] = u[3];
+ v[4] = half_btf_sse4_1(&cospi16, &u[4], &cospi48, &u[5], &rnding, bit);
+ v[5] = half_btf_sse4_1(&cospi48, &u[4], &cospim16, &u[5], &rnding, bit);
+ v[6] = half_btf_sse4_1(&cospim48, &u[6], &cospi16, &u[7], &rnding, bit);
+ v[7] = half_btf_sse4_1(&cospi16, &u[6], &cospi48, &u[7], &rnding, bit);
+ v[8] = u[8];
+ v[9] = u[9];
+ v[10] = u[10];
+ v[11] = u[11];
+ v[12] = half_btf_sse4_1(&cospi16, &u[12], &cospi48, &u[13], &rnding, bit);
+ v[13] = half_btf_sse4_1(&cospi48, &u[12], &cospim16, &u[13], &rnding, bit);
+ v[14] = half_btf_sse4_1(&cospim48, &u[14], &cospi16, &u[15], &rnding, bit);
+ v[15] = half_btf_sse4_1(&cospi16, &u[14], &cospi48, &u[15], &rnding, bit);
+
+ // stage 5
+ u[0] = _mm_add_epi32(v[0], v[4]);
+ u[1] = _mm_add_epi32(v[1], v[5]);
+ u[2] = _mm_add_epi32(v[2], v[6]);
+ u[3] = _mm_add_epi32(v[3], v[7]);
+ u[4] = _mm_sub_epi32(v[0], v[4]);
+ u[5] = _mm_sub_epi32(v[1], v[5]);
+ u[6] = _mm_sub_epi32(v[2], v[6]);
+ u[7] = _mm_sub_epi32(v[3], v[7]);
+ u[8] = _mm_add_epi32(v[8], v[12]);
+ u[9] = _mm_add_epi32(v[9], v[13]);
+ u[10] = _mm_add_epi32(v[10], v[14]);
+ u[11] = _mm_add_epi32(v[11], v[15]);
+ u[12] = _mm_sub_epi32(v[8], v[12]);
+ u[13] = _mm_sub_epi32(v[9], v[13]);
+ u[14] = _mm_sub_epi32(v[10], v[14]);
+ u[15] = _mm_sub_epi32(v[11], v[15]);
+
+ // stage 6
+ v[0] = u[0];
+ v[1] = u[1];
+ v[2] = u[2];
+ v[3] = u[3];
v[4] = u[4];
v[5] = u[5];
v[6] = u[6];
v[7] = u[7];
-
- v[8] = _mm_mullo_epi32(u[8], cospi8);
- x = _mm_mullo_epi32(u[9], cospi56);
- v[8] = _mm_add_epi32(v[8], x);
- v[8] = _mm_add_epi32(v[8], rnding);
- v[8] = _mm_srai_epi32(v[8], bit);
-
- v[9] = _mm_mullo_epi32(u[8], cospi56);
- x = _mm_mullo_epi32(u[9], cospi8);
- v[9] = _mm_sub_epi32(v[9], x);
- v[9] = _mm_add_epi32(v[9], rnding);
- v[9] = _mm_srai_epi32(v[9], bit);
-
- v[10] = _mm_mullo_epi32(u[10], cospi40);
- x = _mm_mullo_epi32(u[11], cospi24);
- v[10] = _mm_add_epi32(v[10], x);
- v[10] = _mm_add_epi32(v[10], rnding);
- v[10] = _mm_srai_epi32(v[10], bit);
-
- v[11] = _mm_mullo_epi32(u[10], cospi24);
- x = _mm_mullo_epi32(u[11], cospi40);
- v[11] = _mm_sub_epi32(v[11], x);
- v[11] = _mm_add_epi32(v[11], rnding);
- v[11] = _mm_srai_epi32(v[11], bit);
-
- v[12] = _mm_mullo_epi32(u[12], cospim56);
- x = _mm_mullo_epi32(u[13], cospi8);
- v[12] = _mm_add_epi32(v[12], x);
- v[12] = _mm_add_epi32(v[12], rnding);
- v[12] = _mm_srai_epi32(v[12], bit);
-
- v[13] = _mm_mullo_epi32(u[12], cospi8);
- x = _mm_mullo_epi32(u[13], cospim56);
- v[13] = _mm_sub_epi32(v[13], x);
- v[13] = _mm_add_epi32(v[13], rnding);
- v[13] = _mm_srai_epi32(v[13], bit);
-
- v[14] = _mm_mullo_epi32(u[14], cospim24);
- x = _mm_mullo_epi32(u[15], cospi40);
- v[14] = _mm_add_epi32(v[14], x);
- v[14] = _mm_add_epi32(v[14], rnding);
- v[14] = _mm_srai_epi32(v[14], bit);
-
- v[15] = _mm_mullo_epi32(u[14], cospi40);
- x = _mm_mullo_epi32(u[15], cospim24);
- v[15] = _mm_sub_epi32(v[15], x);
- v[15] = _mm_add_epi32(v[15], rnding);
- v[15] = _mm_srai_epi32(v[15], bit);
-
- // stage 5
- u[0] = _mm_add_epi32(v[0], v[4]);
- u[4] = _mm_sub_epi32(v[0], v[4]);
- u[1] = _mm_add_epi32(v[1], v[5]);
- u[5] = _mm_sub_epi32(v[1], v[5]);
- u[2] = _mm_add_epi32(v[2], v[6]);
- u[6] = _mm_sub_epi32(v[2], v[6]);
- u[3] = _mm_add_epi32(v[3], v[7]);
- u[7] = _mm_sub_epi32(v[3], v[7]);
- u[8] = _mm_add_epi32(v[8], v[12]);
- u[12] = _mm_sub_epi32(v[8], v[12]);
- u[9] = _mm_add_epi32(v[9], v[13]);
- u[13] = _mm_sub_epi32(v[9], v[13]);
- u[10] = _mm_add_epi32(v[10], v[14]);
- u[14] = _mm_sub_epi32(v[10], v[14]);
- u[11] = _mm_add_epi32(v[11], v[15]);
- u[15] = _mm_sub_epi32(v[11], v[15]);
-
- // stage 6
- v[0] = u[0];
- v[1] = u[1];
- v[2] = u[2];
- v[3] = u[3];
-
- v[4] = _mm_mullo_epi32(u[4], cospi16);
- x = _mm_mullo_epi32(u[5], cospi48);
- v[4] = _mm_add_epi32(v[4], x);
- v[4] = _mm_add_epi32(v[4], rnding);
- v[4] = _mm_srai_epi32(v[4], bit);
-
- v[5] = _mm_mullo_epi32(u[4], cospi48);
- x = _mm_mullo_epi32(u[5], cospi16);
- v[5] = _mm_sub_epi32(v[5], x);
- v[5] = _mm_add_epi32(v[5], rnding);
- v[5] = _mm_srai_epi32(v[5], bit);
-
- v[6] = _mm_mullo_epi32(u[6], cospim48);
- x = _mm_mullo_epi32(u[7], cospi16);
- v[6] = _mm_add_epi32(v[6], x);
- v[6] = _mm_add_epi32(v[6], rnding);
- v[6] = _mm_srai_epi32(v[6], bit);
-
- v[7] = _mm_mullo_epi32(u[6], cospi16);
- x = _mm_mullo_epi32(u[7], cospim48);
- v[7] = _mm_sub_epi32(v[7], x);
- v[7] = _mm_add_epi32(v[7], rnding);
- v[7] = _mm_srai_epi32(v[7], bit);
-
- v[8] = u[8];
- v[9] = u[9];
- v[10] = u[10];
- v[11] = u[11];
-
- v[12] = _mm_mullo_epi32(u[12], cospi16);
- x = _mm_mullo_epi32(u[13], cospi48);
- v[12] = _mm_add_epi32(v[12], x);
- v[12] = _mm_add_epi32(v[12], rnding);
- v[12] = _mm_srai_epi32(v[12], bit);
-
- v[13] = _mm_mullo_epi32(u[12], cospi48);
- x = _mm_mullo_epi32(u[13], cospi16);
- v[13] = _mm_sub_epi32(v[13], x);
- v[13] = _mm_add_epi32(v[13], rnding);
- v[13] = _mm_srai_epi32(v[13], bit);
-
- v[14] = _mm_mullo_epi32(u[14], cospim48);
- x = _mm_mullo_epi32(u[15], cospi16);
- v[14] = _mm_add_epi32(v[14], x);
- v[14] = _mm_add_epi32(v[14], rnding);
- v[14] = _mm_srai_epi32(v[14], bit);
-
- v[15] = _mm_mullo_epi32(u[14], cospi16);
- x = _mm_mullo_epi32(u[15], cospim48);
- v[15] = _mm_sub_epi32(v[15], x);
- v[15] = _mm_add_epi32(v[15], rnding);
- v[15] = _mm_srai_epi32(v[15], bit);
+ v[8] = half_btf_sse4_1(&cospi8, &u[8], &cospi56, &u[9], &rnding, bit);
+ v[9] = half_btf_sse4_1(&cospi56, &u[8], &cospim8, &u[9], &rnding, bit);
+ v[10] = half_btf_sse4_1(&cospi40, &u[10], &cospi24, &u[11], &rnding, bit);
+ v[11] = half_btf_sse4_1(&cospi24, &u[10], &cospim40, &u[11], &rnding, bit);
+ v[12] = half_btf_sse4_1(&cospim56, &u[12], &cospi8, &u[13], &rnding, bit);
+ v[13] = half_btf_sse4_1(&cospi8, &u[12], &cospi56, &u[13], &rnding, bit);
+ v[14] = half_btf_sse4_1(&cospim24, &u[14], &cospi40, &u[15], &rnding, bit);
+ v[15] = half_btf_sse4_1(&cospi40, &u[14], &cospi24, &u[15], &rnding, bit);
// stage 7
- u[0] = _mm_add_epi32(v[0], v[2]);
- u[2] = _mm_sub_epi32(v[0], v[2]);
- u[1] = _mm_add_epi32(v[1], v[3]);
- u[3] = _mm_sub_epi32(v[1], v[3]);
- u[4] = _mm_add_epi32(v[4], v[6]);
- u[6] = _mm_sub_epi32(v[4], v[6]);
- u[5] = _mm_add_epi32(v[5], v[7]);
- u[7] = _mm_sub_epi32(v[5], v[7]);
- u[8] = _mm_add_epi32(v[8], v[10]);
- u[10] = _mm_sub_epi32(v[8], v[10]);
- u[9] = _mm_add_epi32(v[9], v[11]);
- u[11] = _mm_sub_epi32(v[9], v[11]);
- u[12] = _mm_add_epi32(v[12], v[14]);
- u[14] = _mm_sub_epi32(v[12], v[14]);
- u[13] = _mm_add_epi32(v[13], v[15]);
- u[15] = _mm_sub_epi32(v[13], v[15]);
+ u[0] = _mm_add_epi32(v[0], v[8]);
+ u[1] = _mm_add_epi32(v[1], v[9]);
+ u[2] = _mm_add_epi32(v[2], v[10]);
+ u[3] = _mm_add_epi32(v[3], v[11]);
+ u[4] = _mm_add_epi32(v[4], v[12]);
+ u[5] = _mm_add_epi32(v[5], v[13]);
+ u[6] = _mm_add_epi32(v[6], v[14]);
+ u[7] = _mm_add_epi32(v[7], v[15]);
+ u[8] = _mm_sub_epi32(v[0], v[8]);
+ u[9] = _mm_sub_epi32(v[1], v[9]);
+ u[10] = _mm_sub_epi32(v[2], v[10]);
+ u[11] = _mm_sub_epi32(v[3], v[11]);
+ u[12] = _mm_sub_epi32(v[4], v[12]);
+ u[13] = _mm_sub_epi32(v[5], v[13]);
+ u[14] = _mm_sub_epi32(v[6], v[14]);
+ u[15] = _mm_sub_epi32(v[7], v[15]);
// stage 8
- v[0] = u[0];
- v[1] = u[1];
-
- y = _mm_mullo_epi32(u[2], cospi32);
- x = _mm_mullo_epi32(u[3], cospi32);
- v[2] = _mm_add_epi32(y, x);
- v[2] = _mm_add_epi32(v[2], rnding);
- v[2] = _mm_srai_epi32(v[2], bit);
-
- v[3] = _mm_sub_epi32(y, x);
- v[3] = _mm_add_epi32(v[3], rnding);
- v[3] = _mm_srai_epi32(v[3], bit);
-
- v[4] = u[4];
- v[5] = u[5];
-
- y = _mm_mullo_epi32(u[6], cospi32);
- x = _mm_mullo_epi32(u[7], cospi32);
- v[6] = _mm_add_epi32(y, x);
- v[6] = _mm_add_epi32(v[6], rnding);
- v[6] = _mm_srai_epi32(v[6], bit);
-
- v[7] = _mm_sub_epi32(y, x);
- v[7] = _mm_add_epi32(v[7], rnding);
- v[7] = _mm_srai_epi32(v[7], bit);
-
- v[8] = u[8];
- v[9] = u[9];
-
- y = _mm_mullo_epi32(u[10], cospi32);
- x = _mm_mullo_epi32(u[11], cospi32);
- v[10] = _mm_add_epi32(y, x);
- v[10] = _mm_add_epi32(v[10], rnding);
- v[10] = _mm_srai_epi32(v[10], bit);
-
- v[11] = _mm_sub_epi32(y, x);
- v[11] = _mm_add_epi32(v[11], rnding);
- v[11] = _mm_srai_epi32(v[11], bit);
-
- v[12] = u[12];
- v[13] = u[13];
-
- y = _mm_mullo_epi32(u[14], cospi32);
- x = _mm_mullo_epi32(u[15], cospi32);
- v[14] = _mm_add_epi32(y, x);
- v[14] = _mm_add_epi32(v[14], rnding);
- v[14] = _mm_srai_epi32(v[14], bit);
-
- v[15] = _mm_sub_epi32(y, x);
- v[15] = _mm_add_epi32(v[15], rnding);
- v[15] = _mm_srai_epi32(v[15], bit);
+ v[0] = half_btf_sse4_1(&cospi2, &u[0], &cospi62, &u[1], &rnding, bit);
+ v[1] = half_btf_sse4_1(&cospi62, &u[0], &cospim2, &u[1], &rnding, bit);
+ v[2] = half_btf_sse4_1(&cospi10, &u[2], &cospi54, &u[3], &rnding, bit);
+ v[3] = half_btf_sse4_1(&cospi54, &u[2], &cospim10, &u[3], &rnding, bit);
+ v[4] = half_btf_sse4_1(&cospi18, &u[4], &cospi46, &u[5], &rnding, bit);
+ v[5] = half_btf_sse4_1(&cospi46, &u[4], &cospim18, &u[5], &rnding, bit);
+ v[6] = half_btf_sse4_1(&cospi26, &u[6], &cospi38, &u[7], &rnding, bit);
+ v[7] = half_btf_sse4_1(&cospi38, &u[6], &cospim26, &u[7], &rnding, bit);
+ v[8] = half_btf_sse4_1(&cospi34, &u[8], &cospi30, &u[9], &rnding, bit);
+ v[9] = half_btf_sse4_1(&cospi30, &u[8], &cospim34, &u[9], &rnding, bit);
+ v[10] = half_btf_sse4_1(&cospi42, &u[10], &cospi22, &u[11], &rnding, bit);
+ v[11] = half_btf_sse4_1(&cospi22, &u[10], &cospim42, &u[11], &rnding, bit);
+ v[12] = half_btf_sse4_1(&cospi50, &u[12], &cospi14, &u[13], &rnding, bit);
+ v[13] = half_btf_sse4_1(&cospi14, &u[12], &cospim50, &u[13], &rnding, bit);
+ v[14] = half_btf_sse4_1(&cospi58, &u[14], &cospi6, &u[15], &rnding, bit);
+ v[15] = half_btf_sse4_1(&cospi6, &u[14], &cospim58, &u[15], &rnding, bit);
// stage 9
- out[0 * col_num + col] = v[0];
- out[1 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[8]);
- out[2 * col_num + col] = v[12];
- out[3 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[4]);
- out[4 * col_num + col] = v[6];
- out[5 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[14]);
- out[6 * col_num + col] = v[10];
- out[7 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[2]);
- out[8 * col_num + col] = v[3];
- out[9 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[11]);
- out[10 * col_num + col] = v[15];
- out[11 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[7]);
- out[12 * col_num + col] = v[5];
- out[13 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[13]);
- out[14 * col_num + col] = v[9];
- out[15 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[1]);
+ out[0 * 4 + col] = v[1];
+ out[1 * 4 + col] = v[14];
+ out[2 * 4 + col] = v[3];
+ out[3 * 4 + col] = v[12];
+ out[4 * 4 + col] = v[5];
+ out[5 * 4 + col] = v[10];
+ out[6 * 4 + col] = v[7];
+ out[7 * 4 + col] = v[8];
+ out[8 * 4 + col] = v[9];
+ out[9 * 4 + col] = v[6];
+ out[10 * 4 + col] = v[11];
+ out[11 * 4 + col] = v[4];
+ out[12 * 4 + col] = v[13];
+ out[13 * 4 + col] = v[2];
+ out[14 * 4 + col] = v[15];
+ out[15 * 4 + col] = v[0];
}
}
diff --git a/test/av1_fwd_txfm2d_test.cc b/test/av1_fwd_txfm2d_test.cc
index 3f568e9..b25e981 100644
--- a/test/av1_fwd_txfm2d_test.cc
+++ b/test/av1_fwd_txfm2d_test.cc
@@ -147,7 +147,7 @@
param_list.push_back(AV1FwdTxfm2dParam(tx_type, TX_4X8, 2.5, 0.4));
param_list.push_back(AV1FwdTxfm2dParam(tx_type, TX_8X4, 2.5, 0.4));
param_list.push_back(AV1FwdTxfm2dParam(tx_type, TX_8X16, 6.5, 1));
- param_list.push_back(AV1FwdTxfm2dParam(tx_type, TX_16X8, 6, 1));
+ param_list.push_back(AV1FwdTxfm2dParam(tx_type, TX_16X8, 6.5, 1));
param_list.push_back(AV1FwdTxfm2dParam(tx_type, TX_16X32, 46, 7));
param_list.push_back(AV1FwdTxfm2dParam(tx_type, TX_32X16, 30, 7));
diff --git a/test/av1_inv_txfm2d_test.cc b/test/av1_inv_txfm2d_test.cc
index 953ec68..ca52cbd 100644
--- a/test/av1_inv_txfm2d_test.cc
+++ b/test/av1_inv_txfm2d_test.cc
@@ -146,12 +146,12 @@
for (int t = 0; t <= FLIPADST_ADST; ++t) {
const TX_TYPE tx_type = static_cast<TX_TYPE>(t);
param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_4X4, 2, 0.002));
- param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_8X8, 2, 0.02));
+ param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_8X8, 2, 0.025));
param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_16X16, 2, 0.04));
param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_32X32, 4, 0.4));
- param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_4X8, 2, 0.012));
- param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_8X4, 2, 0.012));
+ param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_4X8, 2, 0.016));
+ param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_8X4, 2, 0.016));
param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_8X16, 2, 0.033));
param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_16X8, 2, 0.033));
param_list.push_back(AV1InvTxfm2dParam(tx_type, TX_16X32, 2, 0.4));
diff --git a/test/transform_test_base.h b/test/transform_test_base.h
index 21441be..3db0c69 100644
--- a/test/transform_test_base.h
+++ b/test/transform_test_base.h
@@ -255,7 +255,7 @@
int row_length = FindRowLength();
// The minimum quant value is 4.
for (int j = 0; j < num_coeffs_; ++j) {
- EXPECT_EQ(output_block[j], output_ref_block[j])
+ ASSERT_EQ(output_block[j], output_ref_block[j])
<< "Not bit-exact at test index: " << i << ", "
<< "j = " << j << std::endl;
EXPECT_GE(row_length * kDctMaxValue << (bit_depth_ - 8),
@@ -323,7 +323,7 @@
const int diff = dst[j] - src[j];
#endif
const uint32_t error = diff * diff;
- EXPECT_GE(static_cast<uint32_t>(limit), error)
+ ASSERT_GE(static_cast<uint32_t>(limit), error)
<< "Error: 4x4 IDCT has error " << error << " at index " << j;
}
}