Merge "Non-normative quality improvements to CLPF." into nextgenv2
diff --git a/aom_dsp/x86/inv_txfm_sse2.c b/aom_dsp/x86/inv_txfm_sse2.c
index 5162b6f..78fd1ef 100644
--- a/aom_dsp/x86/inv_txfm_sse2.c
+++ b/aom_dsp/x86/inv_txfm_sse2.c
@@ -171,14 +171,6 @@
   RECON_AND_STORE4X4(dest + 3 * stride, dc_value);
 }
 
-static INLINE void transpose_4x4(__m128i *res) {
-  const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
-  const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);
-
-  res[0] = _mm_unpacklo_epi16(tr0_0, tr0_1);
-  res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
-}
-
 void idct4_sse2(__m128i *in) {
   const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
@@ -187,7 +179,7 @@
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
   __m128i u[8], v[8];
 
-  transpose_4x4(in);
+  array_transpose_4x4(in);
   // stage 1
   u[0] = _mm_unpacklo_epi16(in[0], in[1]);
   u[1] = _mm_unpackhi_epi16(in[0], in[1]);
@@ -225,7 +217,7 @@
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
   __m128i u[8], v[8], in7;
 
-  transpose_4x4(in);
+  array_transpose_4x4(in);
   in7 = _mm_srli_si128(in[1], 8);
   in7 = _mm_add_epi16(in7, in[0]);
   in7 = _mm_sub_epi16(in7, in[1]);
@@ -3518,7 +3510,7 @@
     test = _mm_movemask_epi8(temp_mm);
 
     if (test) {
-      transpose_4x4(inptr);
+      array_transpose_4x4(inptr);
       sign_bits[0] = _mm_cmplt_epi16(inptr[0], zero);
       sign_bits[1] = _mm_cmplt_epi16(inptr[1], zero);
       inptr[3] = _mm_unpackhi_epi16(inptr[1], sign_bits[1]);
diff --git a/aom_dsp/x86/inv_txfm_sse2.h b/aom_dsp/x86/inv_txfm_sse2.h
index 6a62508..f39b4d6 100644
--- a/aom_dsp/x86/inv_txfm_sse2.h
+++ b/aom_dsp/x86/inv_txfm_sse2.h
@@ -19,6 +19,14 @@
 #include "aom_dsp/x86/txfm_common_sse2.h"
 
 // perform 8x8 transpose
+static INLINE void array_transpose_4x4(__m128i *res) {
+  const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
+  const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);
+
+  res[0] = _mm_unpacklo_epi16(tr0_0, tr0_1);
+  res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
+}
+
 static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
   const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
   const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index 6f1d462..be23948 100644
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -91,10 +91,10 @@
 
     if (aom_config("CONFIG_EXT_TX") eq "yes") {
       add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht4x8_32_add/;
+      specialize qw/av1_iht4x8_32_add sse2/;
 
       add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht8x4_32_add/;
+      specialize qw/av1_iht8x4_32_add sse2/;
 
       add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
       specialize qw/av1_iht8x16_128_add sse2/;
@@ -152,10 +152,10 @@
 
     if (aom_config("CONFIG_EXT_TX") eq "yes") {
       add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht4x8_32_add/;
+      specialize qw/av1_iht4x8_32_add sse2/;
 
       add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht8x4_32_add/;
+      specialize qw/av1_iht8x4_32_add sse2/;
 
       add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
       specialize qw/av1_iht8x16_128_add sse2/;
@@ -395,10 +395,10 @@
 
 if (aom_config("CONFIG_EXT_TX") eq "yes") {
   add_proto qw/void av1_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/av1_fht4x8/;
+  specialize qw/av1_fht4x8 sse2/;
 
   add_proto qw/void av1_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/av1_fht8x4/;
+  specialize qw/av1_fht8x4 sse2/;
 
   add_proto qw/void av1_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
   specialize qw/av1_fht8x16 sse2/;
diff --git a/av1/common/reconintra.c b/av1/common/reconintra.c
index 3b2221f..719b788 100644
--- a/av1/common/reconintra.c
+++ b/av1/common/reconintra.c
@@ -1257,11 +1257,12 @@
     const int need_right = !!(extend_modes[mode] & NEED_ABOVERIGHT);
 #endif  // CONFIG_EXT_INTRA
     if (n_top_px > 0) {
-      memcpy(above_row, above_ref, n_top_px * 2);
+      memcpy(above_row, above_ref, n_top_px * sizeof(above_ref[0]));
       i = n_top_px;
       if (need_right && n_topright_px > 0) {
         assert(n_top_px == bs);
-        memcpy(above_row + bs, above_ref + bs, n_topright_px * 2);
+        memcpy(above_row + bs, above_ref + bs,
+               n_topright_px * sizeof(above_ref[0]));
         i += n_topright_px;
       }
       if (i < (bs << need_right))
diff --git a/av1/common/x86/idct_intrin_sse2.c b/av1/common/x86/idct_intrin_sse2.c
index 35d3b3b..e90be26 100644
--- a/av1/common/x86/idct_intrin_sse2.c
+++ b/av1/common/x86/idct_intrin_sse2.c
@@ -507,6 +507,25 @@
   in[7] = _mm_slli_epi16(in[7], 1);
 }
 
+static INLINE void iidtx4_sse2(__m128i *in) {
+  const __m128i v_scale_w = _mm_set1_epi16(Sqrt2);
+
+  const __m128i v_p0l_w = _mm_mullo_epi16(in[0], v_scale_w);
+  const __m128i v_p0h_w = _mm_mulhi_epi16(in[0], v_scale_w);
+  const __m128i v_p1l_w = _mm_mullo_epi16(in[1], v_scale_w);
+  const __m128i v_p1h_w = _mm_mulhi_epi16(in[1], v_scale_w);
+
+  const __m128i v_p0a_d = _mm_unpacklo_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p0b_d = _mm_unpackhi_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p1a_d = _mm_unpacklo_epi16(v_p1l_w, v_p1h_w);
+  const __m128i v_p1b_d = _mm_unpackhi_epi16(v_p1l_w, v_p1h_w);
+
+  in[0] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p0a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p0b_d, DCT_CONST_BITS));
+  in[1] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p1a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p1b_d, DCT_CONST_BITS));
+}
+
 // load 8x8 array
 static INLINE void flip_buffer_lr_8x8(__m128i *in) {
   in[0] = mm_reverse_epi16(in[0]);
@@ -519,6 +538,39 @@
   in[7] = mm_reverse_epi16(in[7]);
 }
 
+static INLINE void scale_sqrt2_8x4(__m128i *in) {
+  // Implements 'ROUND_POWER_OF_TWO(input * Sqrt2, DCT_CONST_BITS)'
+  // for each element
+  const __m128i v_scale_w = _mm_set1_epi16(Sqrt2);
+
+  const __m128i v_p0l_w = _mm_mullo_epi16(in[0], v_scale_w);
+  const __m128i v_p0h_w = _mm_mulhi_epi16(in[0], v_scale_w);
+  const __m128i v_p1l_w = _mm_mullo_epi16(in[1], v_scale_w);
+  const __m128i v_p1h_w = _mm_mulhi_epi16(in[1], v_scale_w);
+  const __m128i v_p2l_w = _mm_mullo_epi16(in[2], v_scale_w);
+  const __m128i v_p2h_w = _mm_mulhi_epi16(in[2], v_scale_w);
+  const __m128i v_p3l_w = _mm_mullo_epi16(in[3], v_scale_w);
+  const __m128i v_p3h_w = _mm_mulhi_epi16(in[3], v_scale_w);
+
+  const __m128i v_p0a_d = _mm_unpacklo_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p0b_d = _mm_unpackhi_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p1a_d = _mm_unpacklo_epi16(v_p1l_w, v_p1h_w);
+  const __m128i v_p1b_d = _mm_unpackhi_epi16(v_p1l_w, v_p1h_w);
+  const __m128i v_p2a_d = _mm_unpacklo_epi16(v_p2l_w, v_p2h_w);
+  const __m128i v_p2b_d = _mm_unpackhi_epi16(v_p2l_w, v_p2h_w);
+  const __m128i v_p3a_d = _mm_unpacklo_epi16(v_p3l_w, v_p3h_w);
+  const __m128i v_p3b_d = _mm_unpackhi_epi16(v_p3l_w, v_p3h_w);
+
+  in[0] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p0a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p0b_d, DCT_CONST_BITS));
+  in[1] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p1a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p1b_d, DCT_CONST_BITS));
+  in[2] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p2a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p2b_d, DCT_CONST_BITS));
+  in[3] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p3a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p3b_d, DCT_CONST_BITS));
+}
+
 static INLINE void scale_sqrt2_8x8(__m128i *in) {
   // Implements 'ROUND_POWER_OF_TWO_SIGNED(input * Sqrt2, DCT_CONST_BITS)'
   // for each element
@@ -835,4 +887,319 @@
     default: assert(0); break;
   }
 }
+
+static INLINE void write_buffer_8x4_round5(uint8_t *dest, __m128i *in,
+                                           int stride) {
+  const __m128i final_rounding = _mm_set1_epi16(1 << 4);
+  const __m128i zero = _mm_setzero_si128();
+  // Final rounding and shift
+  in[0] = _mm_adds_epi16(in[0], final_rounding);
+  in[1] = _mm_adds_epi16(in[1], final_rounding);
+  in[2] = _mm_adds_epi16(in[2], final_rounding);
+  in[3] = _mm_adds_epi16(in[3], final_rounding);
+
+  in[0] = _mm_srai_epi16(in[0], 5);
+  in[1] = _mm_srai_epi16(in[1], 5);
+  in[2] = _mm_srai_epi16(in[2], 5);
+  in[3] = _mm_srai_epi16(in[3], 5);
+
+  RECON_AND_STORE(dest + 0 * stride, in[0]);
+  RECON_AND_STORE(dest + 1 * stride, in[1]);
+  RECON_AND_STORE(dest + 2 * stride, in[2]);
+  RECON_AND_STORE(dest + 3 * stride, in[3]);
+}
+
+void av1_iht8x4_32_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
+  __m128i in[8];
+
+  in[0] = load_input_data(input + 0 * 8);
+  in[1] = load_input_data(input + 1 * 8);
+  in[2] = load_input_data(input + 2 * 8);
+  in[3] = load_input_data(input + 3 * 8);
+
+  // Row transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case FLIPADST_DCT:
+    case H_DCT: idct8_sse2(in); break;
+    case DCT_ADST:
+    case ADST_ADST:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+    case H_ADST:
+    case H_FLIPADST: iadst8_sse2(in); break;
+    case V_FLIPADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX:
+      iidtx8_sse2(in);
+      array_transpose_8x8(in, in);
+      break;
+    default: assert(0); break;
+  }
+
+  scale_sqrt2_8x8(in);
+
+  // Repack data. We pack into the bottom half of 'in'
+  // so that the next repacking stage can pack into the
+  // top half without overwriting anything
+  in[7] = _mm_unpacklo_epi64(in[6], in[7]);
+  in[6] = _mm_unpacklo_epi64(in[4], in[5]);
+  in[5] = _mm_unpacklo_epi64(in[2], in[3]);
+  in[4] = _mm_unpacklo_epi64(in[0], in[1]);
+
+  // Column transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case DCT_ADST:
+    case DCT_FLIPADST:
+    case V_DCT:
+      idct4_sse2(in + 4);
+      idct4_sse2(in + 6);
+      break;
+    case ADST_DCT:
+    case ADST_ADST:
+    case FLIPADST_ADST:
+    case ADST_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case FLIPADST_DCT:
+    case V_ADST:
+    case V_FLIPADST:
+      iadst4_sse2(in + 4);
+      iadst4_sse2(in + 6);
+      break;
+    case H_DCT:
+    case H_ADST:
+    case H_FLIPADST:
+    case IDTX:
+      iidtx4_sse2(in + 4);
+      array_transpose_4x4(in + 4);
+      iidtx4_sse2(in + 6);
+      array_transpose_4x4(in + 6);
+      break;
+    default: assert(0); break;
+  }
+
+  // Repack data
+  in[0] = _mm_unpacklo_epi64(in[4], in[6]);
+  in[1] = _mm_unpackhi_epi64(in[4], in[6]);
+  in[2] = _mm_unpacklo_epi64(in[5], in[7]);
+  in[3] = _mm_unpackhi_epi64(in[5], in[7]);
+
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case H_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+    case H_ADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX: break;
+    case FLIPADST_DCT:
+    case FLIPADST_ADST:
+    case V_FLIPADST: FLIPUD_PTR(dest, stride, 4); break;
+    case DCT_FLIPADST:
+    case ADST_FLIPADST:
+    case H_FLIPADST:
+      in[0] = mm_reverse_epi16(in[0]);
+      in[1] = mm_reverse_epi16(in[1]);
+      in[2] = mm_reverse_epi16(in[2]);
+      in[3] = mm_reverse_epi16(in[3]);
+      break;
+    case FLIPADST_FLIPADST:
+      in[0] = mm_reverse_epi16(in[0]);
+      in[1] = mm_reverse_epi16(in[1]);
+      in[2] = mm_reverse_epi16(in[2]);
+      in[3] = mm_reverse_epi16(in[3]);
+      FLIPUD_PTR(dest, stride, 4);
+      break;
+    default: assert(0); break;
+  }
+  write_buffer_8x4_round5(dest, in, stride);
+}
+
+static INLINE void write_buffer_4x8_round5(uint8_t *dest, __m128i *in,
+                                           int stride) {
+  const __m128i final_rounding = _mm_set1_epi16(1 << 4);
+  const __m128i zero = _mm_setzero_si128();
+  // Final rounding and shift
+  in[0] = _mm_adds_epi16(in[0], final_rounding);
+  in[1] = _mm_adds_epi16(in[1], final_rounding);
+  in[2] = _mm_adds_epi16(in[2], final_rounding);
+  in[3] = _mm_adds_epi16(in[3], final_rounding);
+
+  in[0] = _mm_srai_epi16(in[0], 5);
+  in[1] = _mm_srai_epi16(in[1], 5);
+  in[2] = _mm_srai_epi16(in[2], 5);
+  in[3] = _mm_srai_epi16(in[3], 5);
+
+  // Reconstruction and Store
+  {
+    __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 0));
+    __m128i d1 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 1));
+    __m128i d2 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 2));
+    __m128i d3 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 3));
+    __m128i d4 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 4));
+    __m128i d5 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 5));
+    __m128i d6 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 6));
+    __m128i d7 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 7));
+
+    d0 = _mm_unpacklo_epi32(d0, d1);
+    d2 = _mm_unpacklo_epi32(d2, d3);
+    d4 = _mm_unpacklo_epi32(d4, d5);
+    d6 = _mm_unpacklo_epi32(d6, d7);
+    d0 = _mm_unpacklo_epi8(d0, zero);
+    d2 = _mm_unpacklo_epi8(d2, zero);
+    d4 = _mm_unpacklo_epi8(d4, zero);
+    d6 = _mm_unpacklo_epi8(d6, zero);
+    d0 = _mm_add_epi16(d0, in[0]);
+    d2 = _mm_add_epi16(d2, in[1]);
+    d4 = _mm_add_epi16(d4, in[2]);
+    d6 = _mm_add_epi16(d6, in[3]);
+
+    d0 = _mm_packus_epi16(d0, d2);
+    *(int *)dest = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride * 2) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride * 3) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_packus_epi16(d4, d6);
+    *(int *)(dest + stride * 4) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride * 5) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride * 6) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride * 7) = _mm_cvtsi128_si32(d0);
+  }
+}
+
+void av1_iht4x8_32_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
+  __m128i in[8];
+
+  // Load rows, packed two per element of 'in'.
+  // We pack into the bottom half of 'in' so that the
+  // later repacking stage can pack into the
+  // top half without overwriting anything
+  in[4] = load_input_data(input + 0 * 8);
+  in[5] = load_input_data(input + 1 * 8);
+  in[6] = load_input_data(input + 2 * 8);
+  in[7] = load_input_data(input + 3 * 8);
+
+  scale_sqrt2_8x4(in + 4);
+
+  // Row transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case FLIPADST_DCT:
+    case H_DCT:
+      idct4_sse2(in + 4);
+      idct4_sse2(in + 6);
+      break;
+    case DCT_ADST:
+    case ADST_ADST:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+    case H_ADST:
+    case H_FLIPADST:
+      iadst4_sse2(in + 4);
+      iadst4_sse2(in + 6);
+      break;
+    case V_FLIPADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX:
+      iidtx4_sse2(in + 4);
+      array_transpose_4x4(in + 4);
+      iidtx4_sse2(in + 6);
+      array_transpose_4x4(in + 6);
+      break;
+    default: assert(0); break;
+  }
+
+  // Repack data
+  in[0] = _mm_unpacklo_epi64(in[4], in[6]);
+  in[1] = _mm_unpackhi_epi64(in[4], in[6]);
+  in[2] = _mm_unpacklo_epi64(in[5], in[7]);
+  in[3] = _mm_unpackhi_epi64(in[5], in[7]);
+
+  // Column transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case DCT_ADST:
+    case DCT_FLIPADST:
+    case V_DCT: idct8_sse2(in); break;
+    case ADST_DCT:
+    case ADST_ADST:
+    case FLIPADST_ADST:
+    case ADST_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case FLIPADST_DCT:
+    case V_ADST:
+    case V_FLIPADST: iadst8_sse2(in); break;
+    case H_DCT:
+    case H_ADST:
+    case H_FLIPADST:
+    case IDTX:
+      iidtx8_sse2(in);
+      array_transpose_8x8(in, in);
+      break;
+    default: assert(0); break;
+  }
+
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case H_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+    case H_ADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX: break;
+    case FLIPADST_DCT:
+    case FLIPADST_ADST:
+    case V_FLIPADST: FLIPUD_PTR(dest, stride, 8); break;
+    case DCT_FLIPADST:
+    case ADST_FLIPADST:
+    case H_FLIPADST:
+      in[0] = _mm_shufflelo_epi16(in[0], 0x1b);
+      in[1] = _mm_shufflelo_epi16(in[1], 0x1b);
+      in[2] = _mm_shufflelo_epi16(in[2], 0x1b);
+      in[3] = _mm_shufflelo_epi16(in[3], 0x1b);
+      in[4] = _mm_shufflelo_epi16(in[4], 0x1b);
+      in[5] = _mm_shufflelo_epi16(in[5], 0x1b);
+      in[6] = _mm_shufflelo_epi16(in[6], 0x1b);
+      in[7] = _mm_shufflelo_epi16(in[7], 0x1b);
+      break;
+    case FLIPADST_FLIPADST:
+      in[0] = _mm_shufflelo_epi16(in[0], 0x1b);
+      in[1] = _mm_shufflelo_epi16(in[1], 0x1b);
+      in[2] = _mm_shufflelo_epi16(in[2], 0x1b);
+      in[3] = _mm_shufflelo_epi16(in[3], 0x1b);
+      in[4] = _mm_shufflelo_epi16(in[4], 0x1b);
+      in[5] = _mm_shufflelo_epi16(in[5], 0x1b);
+      in[6] = _mm_shufflelo_epi16(in[6], 0x1b);
+      in[7] = _mm_shufflelo_epi16(in[7], 0x1b);
+      FLIPUD_PTR(dest, stride, 8);
+      break;
+    default: assert(0); break;
+  }
+  in[0] = _mm_unpacklo_epi64(in[0], in[1]);
+  in[1] = _mm_unpacklo_epi64(in[2], in[3]);
+  in[2] = _mm_unpacklo_epi64(in[4], in[5]);
+  in[3] = _mm_unpacklo_epi64(in[6], in[7]);
+  write_buffer_4x8_round5(dest, in, stride);
+}
 #endif  // CONFIG_EXT_TX
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index bea0fa2..c733131 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -2735,7 +2735,10 @@
   const FRAME_UPDATE_TYPE next_frame_update_type =
       gf_group->update_type[gf_group->index];
   const int which_arf = gf_group->arf_update_idx[gf_group->index];
-  if (cpi->rc.is_last_bipred_frame) {
+
+  if (cm->show_existing_frame == 1) {
+    cm->show_existing_frame = 0;
+  } else if (cpi->rc.is_last_bipred_frame) {
     // NOTE(zoeliu): If the current frame is a last bi-predictive frame, it is
     //               needed next to show the BWDREF_FRAME, which is pointed by
     //               the last_fb_idxes[0] after reference frame buffer update
@@ -2751,8 +2754,6 @@
     cpi->rc.is_src_frame_alt_ref = 1;
     cpi->existing_fb_idx_to_show = cpi->alt_fb_idx;
     cpi->is_arf_filter_off[which_arf] = 0;
-  } else {
-    cm->show_existing_frame = 0;
   }
   cpi->rc.is_src_frame_ext_arf = 0;
 }
@@ -3187,8 +3188,6 @@
              sizeof(cpi->interp_filter_selected[BWDREF_FRAME]));
     }
     cpi->bwd_fb_idx = tmp;
-#endif  // CONFIG_EXT_REFS
-#if CONFIG_EXT_REFS
   } else if (cpi->rc.is_src_frame_ext_arf && cm->show_existing_frame) {
     // Deal with the special case for showing existing internal ALTREF_FRAME
     // Refresh the LAST_FRAME with the ALTREF_FRAME and retire the LAST3_FRAME
@@ -3317,6 +3316,7 @@
     //      v                v                v
     // lst_fb_idxes[2], lst_fb_idxes[0], lst_fb_idxes[1]
     int ref_frame;
+
     if (cpi->rc.is_bwd_ref_frame && cpi->num_extra_arfs) {
       // We have swapped the virtual indices to use ALT0 as BWD_REF
       // and we need to swap them back.
@@ -3325,6 +3325,7 @@
       cpi->alt_fb_idx = cpi->bwd_fb_idx;
       cpi->bwd_fb_idx = tmp;
     }
+
     if (cm->frame_type == KEY_FRAME) {
       for (ref_frame = 0; ref_frame < LAST_REF_FRAMES; ++ref_frame) {
         ref_cnt_fb(pool->frame_bufs,
diff --git a/av1/encoder/x86/dct_intrin_sse2.c b/av1/encoder/x86/dct_intrin_sse2.c
index 15dd66e..ad61fd3 100644
--- a/av1/encoder/x86/dct_intrin_sse2.c
+++ b/av1/encoder/x86/dct_intrin_sse2.c
@@ -2593,7 +2593,41 @@
 }
 
 #if CONFIG_EXT_TX
-static INLINE void scale_sqrt2_8x8(__m128i *in) {
+static INLINE void scale_sqrt2_8x4(__m128i *in) {
+  // Implements fdct_round_shift(input * Sqrt2), which is equivalent to
+  // ROUND_POWER_OF_TWO(input * Sqrt2, DCT_CONST_BITS),
+  // for 32 consecutive elements.
+  const __m128i v_scale_w = _mm_set1_epi16(Sqrt2);
+
+  const __m128i v_p0l_w = _mm_mullo_epi16(in[0], v_scale_w);
+  const __m128i v_p0h_w = _mm_mulhi_epi16(in[0], v_scale_w);
+  const __m128i v_p1l_w = _mm_mullo_epi16(in[1], v_scale_w);
+  const __m128i v_p1h_w = _mm_mulhi_epi16(in[1], v_scale_w);
+  const __m128i v_p2l_w = _mm_mullo_epi16(in[2], v_scale_w);
+  const __m128i v_p2h_w = _mm_mulhi_epi16(in[2], v_scale_w);
+  const __m128i v_p3l_w = _mm_mullo_epi16(in[3], v_scale_w);
+  const __m128i v_p3h_w = _mm_mulhi_epi16(in[3], v_scale_w);
+
+  const __m128i v_p0a_d = _mm_unpacklo_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p0b_d = _mm_unpackhi_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p1a_d = _mm_unpacklo_epi16(v_p1l_w, v_p1h_w);
+  const __m128i v_p1b_d = _mm_unpackhi_epi16(v_p1l_w, v_p1h_w);
+  const __m128i v_p2a_d = _mm_unpacklo_epi16(v_p2l_w, v_p2h_w);
+  const __m128i v_p2b_d = _mm_unpackhi_epi16(v_p2l_w, v_p2h_w);
+  const __m128i v_p3a_d = _mm_unpacklo_epi16(v_p3l_w, v_p3h_w);
+  const __m128i v_p3b_d = _mm_unpackhi_epi16(v_p3l_w, v_p3h_w);
+
+  in[0] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p0a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p0b_d, DCT_CONST_BITS));
+  in[1] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p1a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p1b_d, DCT_CONST_BITS));
+  in[2] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p2a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p2b_d, DCT_CONST_BITS));
+  in[3] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p3a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p3b_d, DCT_CONST_BITS));
+}
+
+static INLINE void scale_sqrt2_8x8_signed(__m128i *in) {
   // Implements 'ROUND_POWER_OF_TWO_SIGNED(input * Sqrt2, DCT_CONST_BITS)'
   // for each element
   const __m128i v_scale_w = _mm_set1_epi16(Sqrt2);
@@ -2650,6 +2684,419 @@
                           xx_roundn_epi32(v_p7b_d, DCT_CONST_BITS));
 }
 
+// Load input into the left-hand half of in (ie, into lanes 0..3 of
+// each element of in). The right hand half (lanes 4..7) should be
+// treated as being filled with "don't care" values.
+static INLINE void load_buffer_4x8(const int16_t *input, __m128i *in,
+                                   int stride, int flipud, int fliplr) {
+  if (!flipud) {
+    in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+    in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+    in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+    in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+    in[4] = _mm_loadl_epi64((const __m128i *)(input + 4 * stride));
+    in[5] = _mm_loadl_epi64((const __m128i *)(input + 5 * stride));
+    in[6] = _mm_loadl_epi64((const __m128i *)(input + 6 * stride));
+    in[7] = _mm_loadl_epi64((const __m128i *)(input + 7 * stride));
+  } else {
+    in[0] = _mm_loadl_epi64((const __m128i *)(input + 7 * stride));
+    in[1] = _mm_loadl_epi64((const __m128i *)(input + 6 * stride));
+    in[2] = _mm_loadl_epi64((const __m128i *)(input + 5 * stride));
+    in[3] = _mm_loadl_epi64((const __m128i *)(input + 4 * stride));
+    in[4] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+    in[5] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+    in[6] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+    in[7] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+  }
+
+  if (fliplr) {
+    in[0] = _mm_shufflelo_epi16(in[0], 0x1b);
+    in[1] = _mm_shufflelo_epi16(in[1], 0x1b);
+    in[2] = _mm_shufflelo_epi16(in[2], 0x1b);
+    in[3] = _mm_shufflelo_epi16(in[3], 0x1b);
+    in[4] = _mm_shufflelo_epi16(in[4], 0x1b);
+    in[5] = _mm_shufflelo_epi16(in[5], 0x1b);
+    in[6] = _mm_shufflelo_epi16(in[6], 0x1b);
+    in[7] = _mm_shufflelo_epi16(in[7], 0x1b);
+  }
+
+  in[0] = _mm_slli_epi16(in[0], 3);
+  in[1] = _mm_slli_epi16(in[1], 3);
+  in[2] = _mm_slli_epi16(in[2], 3);
+  in[3] = _mm_slli_epi16(in[3], 3);
+  in[4] = _mm_slli_epi16(in[4], 3);
+  in[5] = _mm_slli_epi16(in[5], 3);
+  in[6] = _mm_slli_epi16(in[6], 3);
+  in[7] = _mm_slli_epi16(in[7], 3);
+
+  scale_sqrt2_8x4(in);
+  scale_sqrt2_8x4(in + 4);
+}
+
+static INLINE void write_buffer_4x8(tran_low_t *output, __m128i *res) {
+  const __m128i kOne = _mm_set1_epi16(1);
+  __m128i in01 = _mm_unpacklo_epi64(res[0], res[1]);
+  __m128i in23 = _mm_unpacklo_epi64(res[2], res[3]);
+  __m128i in45 = _mm_unpacklo_epi64(res[4], res[5]);
+  __m128i in67 = _mm_unpacklo_epi64(res[6], res[7]);
+
+  __m128i out01 = _mm_add_epi16(in01, kOne);
+  __m128i out23 = _mm_add_epi16(in23, kOne);
+  __m128i out45 = _mm_add_epi16(in45, kOne);
+  __m128i out67 = _mm_add_epi16(in67, kOne);
+
+  out01 = _mm_srai_epi16(out01, 2);
+  out23 = _mm_srai_epi16(out23, 2);
+  out45 = _mm_srai_epi16(out45, 2);
+  out67 = _mm_srai_epi16(out67, 2);
+
+  store_output(&out01, (output + 0 * 8));
+  store_output(&out23, (output + 1 * 8));
+  store_output(&out45, (output + 2 * 8));
+  store_output(&out67, (output + 3 * 8));
+}
+
+void av1_fht4x8_sse2(const int16_t *input, tran_low_t *output, int stride,
+                     int tx_type) {
+  __m128i in[8];
+
+  switch (tx_type) {
+    case DCT_DCT:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fdct8_sse2(in);
+      // Repack data into two 4x4 blocks so we can reuse the 4x4 transforms
+      // The other cases (and the 8x4 transforms) all behave similarly
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      break;
+    case ADST_DCT:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      break;
+    case DCT_ADST:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fdct8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case ADST_ADST:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+      load_buffer_4x8(input, in, stride, 1, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      break;
+    case DCT_FLIPADST:
+      load_buffer_4x8(input, in, stride, 0, 1);
+      fdct8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case FLIPADST_FLIPADST:
+      load_buffer_4x8(input, in, stride, 1, 1);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case ADST_FLIPADST:
+      load_buffer_4x8(input, in, stride, 0, 1);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case FLIPADST_ADST:
+      load_buffer_4x8(input, in, stride, 1, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case IDTX:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fidtx8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      break;
+    case V_DCT:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fdct8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      break;
+    case H_DCT:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fidtx8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      break;
+    case V_ADST:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      break;
+    case H_ADST:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fidtx8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case V_FLIPADST:
+      load_buffer_4x8(input, in, stride, 1, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      break;
+    case H_FLIPADST:
+      load_buffer_4x8(input, in, stride, 0, 1);
+      fidtx8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0); break;
+  }
+  write_buffer_4x8(output, in);
+}
+
+// Load input into the left-hand half of in (ie, into lanes 0..3 of
+// each element of in). The right hand half (lanes 4..7) should be
+// treated as being filled with "don't care" values.
+// The input is split horizontally into two 4x4
+// chunks 'l' and 'r'. Then 'l' is stored in the top-left 4x4
+// block of 'in' and 'r' is stored in the bottom-left block.
+// This is to allow us to reuse 4x4 transforms.
+static INLINE void load_buffer_8x4(const int16_t *input, __m128i *in,
+                                   int stride, int flipud, int fliplr) {
+  if (!flipud) {
+    in[0] = _mm_loadu_si128((const __m128i *)(input + 0 * stride));
+    in[1] = _mm_loadu_si128((const __m128i *)(input + 1 * stride));
+    in[2] = _mm_loadu_si128((const __m128i *)(input + 2 * stride));
+    in[3] = _mm_loadu_si128((const __m128i *)(input + 3 * stride));
+  } else {
+    in[0] = _mm_loadu_si128((const __m128i *)(input + 3 * stride));
+    in[1] = _mm_loadu_si128((const __m128i *)(input + 2 * stride));
+    in[2] = _mm_loadu_si128((const __m128i *)(input + 1 * stride));
+    in[3] = _mm_loadu_si128((const __m128i *)(input + 0 * stride));
+  }
+
+  if (fliplr) {
+    in[0] = mm_reverse_epi16(in[0]);
+    in[1] = mm_reverse_epi16(in[1]);
+    in[2] = mm_reverse_epi16(in[2]);
+    in[3] = mm_reverse_epi16(in[3]);
+  }
+
+  in[0] = _mm_slli_epi16(in[0], 3);
+  in[1] = _mm_slli_epi16(in[1], 3);
+  in[2] = _mm_slli_epi16(in[2], 3);
+  in[3] = _mm_slli_epi16(in[3], 3);
+
+  scale_sqrt2_8x4(in);
+
+  in[4] = _mm_shuffle_epi32(in[0], 0xe);
+  in[5] = _mm_shuffle_epi32(in[1], 0xe);
+  in[6] = _mm_shuffle_epi32(in[2], 0xe);
+  in[7] = _mm_shuffle_epi32(in[3], 0xe);
+}
+
+static INLINE void write_buffer_8x4(tran_low_t *output, __m128i *res) {
+  const __m128i kOne = _mm_set1_epi16(1);
+
+  __m128i out0 = _mm_add_epi16(res[0], kOne);
+  __m128i out1 = _mm_add_epi16(res[1], kOne);
+  __m128i out2 = _mm_add_epi16(res[2], kOne);
+  __m128i out3 = _mm_add_epi16(res[3], kOne);
+  out0 = _mm_srai_epi16(out0, 2);
+  out1 = _mm_srai_epi16(out1, 2);
+  out2 = _mm_srai_epi16(out2, 2);
+  out3 = _mm_srai_epi16(out3, 2);
+
+  store_output(&out0, (output + 0 * 8));
+  store_output(&out1, (output + 1 * 8));
+  store_output(&out2, (output + 2 * 8));
+  store_output(&out3, (output + 3 * 8));
+}
+
+void av1_fht8x4_sse2(const int16_t *input, tran_low_t *output, int stride,
+                     int tx_type) {
+  __m128i in[8];
+
+  switch (tx_type) {
+    case DCT_DCT:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      fdct8_sse2(in);
+      break;
+    case ADST_DCT:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fdct8_sse2(in);
+      break;
+    case DCT_ADST:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case ADST_ADST:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+      load_buffer_8x4(input, in, stride, 1, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fdct8_sse2(in);
+      break;
+    case DCT_FLIPADST:
+      load_buffer_8x4(input, in, stride, 0, 1);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case FLIPADST_FLIPADST:
+      load_buffer_8x4(input, in, stride, 1, 1);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case ADST_FLIPADST:
+      load_buffer_8x4(input, in, stride, 0, 1);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case FLIPADST_ADST:
+      load_buffer_8x4(input, in, stride, 1, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case IDTX:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      fidtx8_sse2(in);
+      break;
+    case V_DCT:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      fidtx8_sse2(in);
+      break;
+    case H_DCT:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      fdct8_sse2(in);
+      break;
+    case V_ADST:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fidtx8_sse2(in);
+      break;
+    case H_ADST:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case V_FLIPADST:
+      load_buffer_8x4(input, in, stride, 1, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fidtx8_sse2(in);
+      break;
+    case H_FLIPADST:
+      load_buffer_8x4(input, in, stride, 0, 1);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0); break;
+  }
+  write_buffer_8x4(output, in);
+}
+
 static INLINE void load_buffer_8x16(const int16_t *input, __m128i *in,
                                     int stride, int flipud, int fliplr) {
   // Load 2 8x8 blocks
@@ -2663,9 +3110,9 @@
   }
 
   load_buffer_8x8(t, in, stride, flipud, fliplr);
-  scale_sqrt2_8x8(in);
+  scale_sqrt2_8x8_signed(in);
   load_buffer_8x8(b, in + 8, stride, flipud, fliplr);
-  scale_sqrt2_8x8(in + 8);
+  scale_sqrt2_8x8_signed(in + 8);
 }
 
 void av1_fht8x16_sse2(const int16_t *input, tran_low_t *output, int stride,
@@ -2828,9 +3275,9 @@
 
   // load first 8 columns
   load_buffer_8x8(l, in, stride, flipud, fliplr);
-  scale_sqrt2_8x8(in);
+  scale_sqrt2_8x8_signed(in);
   load_buffer_8x8(r, in + 8, stride, flipud, fliplr);
-  scale_sqrt2_8x8(in + 8);
+  scale_sqrt2_8x8_signed(in + 8);
 }
 
 void av1_fht16x8_sse2(const int16_t *input, tran_low_t *output, int stride,
diff --git a/test/av1_fht4x8_test.cc b/test/av1_fht4x8_test.cc
new file mode 100644
index 0000000..a344532
--- /dev/null
+++ b/test/av1_fht4x8_test.cc
@@ -0,0 +1,101 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./aom_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+
+#include "aom_ports/mem.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/transform_test_base.h"
+#include "test/util.h"
+
+using libaom_test::ACMRandom;
+
+namespace {
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                        int tx_type);
+using std::tr1::tuple;
+using libaom_test::FhtFunc;
+typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht4x8Param;
+
+void fht4x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+  av1_fht4x8_c(in, out, stride, tx_type);
+}
+
+void iht4x8_ref(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+  av1_iht4x8_32_add_c(in, out, stride, tx_type);
+}
+
+class AV1Trans4x8HT : public libaom_test::TransformTestBase,
+                      public ::testing::TestWithParam<Ht4x8Param> {
+ public:
+  virtual ~AV1Trans4x8HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    inv_txfm_ = GET_PARAM(1);
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 4;
+    fwd_txfm_ref = fht4x8_ref;
+    inv_txfm_ref = iht4x8_ref;
+    bit_depth_ = GET_PARAM(3);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = GET_PARAM(4);
+  }
+  virtual void TearDown() { libaom_test::ClearSystemState(); }
+
+ protected:
+  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+    fwd_txfm_(in, out, stride, tx_type_);
+  }
+
+  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+    inv_txfm_(out, dst, stride, tx_type_);
+  }
+
+  FhtFunc fwd_txfm_;
+  IhtFunc inv_txfm_;
+};
+
+TEST_P(AV1Trans4x8HT, CoeffCheck) { RunCoeffCheck(); }
+TEST_P(AV1Trans4x8HT, InvCoeffCheck) { RunInvCoeffCheck(); }
+
+using std::tr1::make_tuple;
+
+#if HAVE_SSE2
+const Ht4x8Param kArrayHt4x8Param_sse2[] = {
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 0, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 1, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 2, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 3, AOM_BITS_8, 32),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 4, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 5, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 6, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 7, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 8, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 9, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 10, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 11, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 12, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 13, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 14, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 15, AOM_BITS_8, 32)
+#endif  // CONFIG_EXT_TX
+};
+INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans4x8HT,
+                        ::testing::ValuesIn(kArrayHt4x8Param_sse2));
+#endif  // HAVE_SSE2
+
+}  // namespace
diff --git a/test/av1_fht8x4_test.cc b/test/av1_fht8x4_test.cc
new file mode 100644
index 0000000..ee89e96
--- /dev/null
+++ b/test/av1_fht8x4_test.cc
@@ -0,0 +1,101 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./aom_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+
+#include "aom_ports/mem.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/transform_test_base.h"
+#include "test/util.h"
+
+using libaom_test::ACMRandom;
+
+namespace {
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                        int tx_type);
+using std::tr1::tuple;
+using libaom_test::FhtFunc;
+typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht8x4Param;
+
+void fht8x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+  av1_fht8x4_c(in, out, stride, tx_type);
+}
+
+void iht8x4_ref(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+  av1_iht8x4_32_add_c(in, out, stride, tx_type);
+}
+
+class AV1Trans8x4HT : public libaom_test::TransformTestBase,
+                      public ::testing::TestWithParam<Ht8x4Param> {
+ public:
+  virtual ~AV1Trans8x4HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    inv_txfm_ = GET_PARAM(1);
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 8;
+    fwd_txfm_ref = fht8x4_ref;
+    inv_txfm_ref = iht8x4_ref;
+    bit_depth_ = GET_PARAM(3);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = GET_PARAM(4);
+  }
+  virtual void TearDown() { libaom_test::ClearSystemState(); }
+
+ protected:
+  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+    fwd_txfm_(in, out, stride, tx_type_);
+  }
+
+  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+    inv_txfm_(out, dst, stride, tx_type_);
+  }
+
+  FhtFunc fwd_txfm_;
+  IhtFunc inv_txfm_;
+};
+
+TEST_P(AV1Trans8x4HT, CoeffCheck) { RunCoeffCheck(); }
+TEST_P(AV1Trans8x4HT, InvCoeffCheck) { RunInvCoeffCheck(); }
+
+using std::tr1::make_tuple;
+
+#if HAVE_SSE2
+const Ht8x4Param kArrayHt8x4Param_sse2[] = {
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 0, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 1, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 2, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 3, AOM_BITS_8, 32),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 4, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 5, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 6, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 7, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 8, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 9, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 10, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 11, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 12, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 13, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 14, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 15, AOM_BITS_8, 32)
+#endif  // CONFIG_EXT_TX
+};
+INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans8x4HT,
+                        ::testing::ValuesIn(kArrayHt8x4Param_sse2));
+#endif  // HAVE_SSE2
+
+}  // namespace
diff --git a/test/convolve_test.cc b/test/convolve_test.cc
index 2610264..de1ae04 100644
--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -396,8 +396,8 @@
   void CopyOutputToRef() {
     memcpy(output_ref_, output_, kOutputBufferSize);
 #if CONFIG_AOM_HIGHBITDEPTH
-    memcpy(output16_ref_, output16_,
-           kOutputBufferSize * sizeof(output16_ref_[0]));
+    // Copy 16-bit pixels values. The effective number of bytes is double.
+    memcpy(output16_ref_, output16_, sizeof(output16_[0]) * kOutputBufferSize);
 #endif
   }
 
diff --git a/test/test.mk b/test/test.mk
index 73ef2c4..c071cea 100644
--- a/test/test.mk
+++ b/test/test.mk
@@ -138,6 +138,8 @@
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht8x8_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht16x16_test.cc
 ifeq ($(CONFIG_EXT_TX),yes)
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht4x8_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht8x4_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht8x16_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht16x8_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_iht8x16_test.cc