Revert "Reduce size of low bit depth sse2 sad functions"

This reverts commit ab0dcc819f3ff3b7f9289b911d1c75156d46199a.

Reason for revert:
This causes valgrind failures due to stack overwrites on x86:

[ RUN      ] SSE2/SADx4AvgTest.MaxRef/3
==2608034== Invalid write of size 2
==2608034==    at 0xC32AA4: aom_sad64x64x4d_avg_sse2 (in test_libaom)
==2608034==  Address 0xfebf9bf8 is on thread 1's stack
==2608034==  4 bytes below stack pointer
==2608034== 
==2608034== Invalid read of size 2
==2608034==    at 0xC32C01: aom_sad64x64x4d_avg_sse2 (in test_libaom)
==2608034==  Address 0xfebf9bf8 is on thread 1's stack
==2608034==  4 bytes below stack pointer

Change-Id: I792383ae4a9b5f171ed341058c99b3719a53bd90
diff --git a/aom_dsp/x86/sad4d_sse2.asm b/aom_dsp/x86/sad4d_sse2.asm
index df7e375..9ab44c1 100644
--- a/aom_dsp/x86/sad4d_sse2.asm
+++ b/aom_dsp/x86/sad4d_sse2.asm
@@ -22,95 +22,114 @@
   pavgb                 %2, m2
   lea                   second_predq, [second_predq+8]
 %endmacro
-; 'spill_src_stride' affect a lot how the code works.
+; 'mflag' affect a lot how the code works.
 ;
-; When 'spill_src_stride' is false, the 'src_strideq' resides in
-; register, [srcq + src_strideq + offset] is allowed, so we can simply
-; use such form to access src memory and don't bother to update 'srcq'
-; at each line. We only update 'srcq' each two-lines using a compact
-; LEA instruction like [srcq+src_strideq*2].
+; When 'mflag' is false, the 'src_strideq' resides in register,
+; [srcq + src_strideq + offset] is allowed, so we can simply
+; use such form to access src memory and don't bother to update
+; 'srcq' at each line. We only update 'srcq' each two-lines using
+; a compact LEA instruction like [srcq+src_strideq*2].
 ;
-; When 'spill_src_stride' is true, the 'src_strideq' resides in memory.
+; When 'mflag' is true, the 'src_strideq' resides in memory.
 ; we cannot use above form to access memory, we have to update
 ; 'srcq' at each line break. As we process two parts (first,second)
 ; together in each macro function, the second part may also sit
 ; in the next line, which means we also need to possibly add
 ; one 'src_strideq' to 'srcq' before processing second part.
 
-%macro HANDLE_SECOND_OFFSET 0
-  %if spill_src_stride
-    %define second_offset 0
-    add srcq, src_strideq
-  %else
-    %define second_offset (src_strideq)
+%macro HANDLE_FIRST_OFFSET 2
+  %define first_offset %2
+  %if mflag == 0 && %1 == 1
+    %define first_offset (src_strideq + %2)
   %endif
 %endmacro
 
+; first_extraline, second_extraline, in_line_offset
+%macro HANDLE_SECOND_OFFSET 3
+  %define second_offset %3
+  %if mflag && %1 == 0 && %2 == 1
+    add srcq, src_strideq
+  %endif
+  %if mflag == 0 && %2 == 1
+    %define second_offset (src_strideq + %3)
+  %endif
+%endmacro
+
+; Notes for line_ending:
+; 0 -- not a line ending
+; 1 -- line ending of a odd line [line numbers starts from one]
+; 2 -- line ending of a even line
 ; This is specically designed to handle when src_strideq is a
 ; memory position, under such case, we can not accomplish
 ; complex address calculation using LEA, and fall back to
 ; using simple ADD instruction at each line ending.
-%macro ADVANCE_END_OF_TWO_LINES 0
-  %if spill_src_stride
+%macro ADVANCE_END_OF_LINE 1
+  %if mflag
     add srcq, src_strideq
-  %else
-    lea                 srcq, [srcq+src_strideq*2]
+  %endif
+  %if mflag == 0 && %1 == 2
+    lea                 srcq, [srcq +src_strideq*2]
   %endif
 
-; note: ref_stride is never spilled when processing two lines
-  lea                ref1q, [ref1q+ref_strideq*2]
-  lea                ref2q, [ref2q+ref_strideq*2]
-  lea                ref3q, [ref3q+ref_strideq*2]
-  lea                ref4q, [ref4q+ref_strideq*2]
+  %if %1 == 2
+    lea                ref1q, [ref1q+ref_strideq*2]
+    lea                ref2q, [ref2q+ref_strideq*2]
+    lea                ref3q, [ref3q+ref_strideq*2]
+    lea                ref4q, [ref4q+ref_strideq*2]
+  %endif
 %endmacro
 
-; PROCESS_4x2x4 first, do_avg
-%macro PROCESS_4x2x4 2
-  movd                  m0, [srcq]
-  HANDLE_SECOND_OFFSET
+; Please note that the second_offset of src is for in_line_offset,
+; so it is less than src_stride.
+; PROCESS_4x2x4 first, off_{first,second}_{src,ref}, do_avg,
+;               {first, second}_extraline, line_ending
+%macro PROCESS_4x2x4 9
+  HANDLE_FIRST_OFFSET   %7, %2
+  movd                  m0, [srcq + first_offset]
+  HANDLE_SECOND_OFFSET  %7, %8, %4
 %if %1 == 1
-  movd                  m6, [ref1q]
-  movd                  m4, [ref2q]
-  movd                  m7, [ref3q]
-  movd                  m5, [ref4q]
+  movd                  m6, [ref1q+%3]
+  movd                  m4, [ref2q+%3]
+  movd                  m7, [ref3q+%3]
+  movd                  m5, [ref4q+%3]
 
   movd                  m1, [srcq + second_offset]
-  movd                  m2, [ref1q+ref_strideq]
+  movd                  m2, [ref1q+%5]
   punpckldq             m0, m1
   punpckldq             m6, m2
-  movd                  m1, [ref2q+ref_strideq]
-  movd                  m2, [ref3q+ref_strideq]
-  movd                  m3, [ref4q+ref_strideq]
+  movd                  m1, [ref2q+%5]
+  movd                  m2, [ref3q+%5]
+  movd                  m3, [ref4q+%5]
   punpckldq             m4, m1
   punpckldq             m7, m2
   punpckldq             m5, m3
   movlhps               m0, m0
   movlhps               m6, m4
   movlhps               m7, m5
-%if %2 == 1
+%if %6 == 1
   AVG_4x2x4             m6, m7
 %endif
   psadbw                m6, m0
   psadbw                m7, m0
 %else
-  movd                  m1, [ref1q]
-  movd                  m5, [ref1q+ref_strideq]
-  movd                  m2, [ref2q]
-  movd                  m4, [ref2q+ref_strideq]
+  movd                  m1, [ref1q+%3]
+  movd                  m5, [ref1q+%5]
+  movd                  m2, [ref2q+%3]
+  movd                  m4, [ref2q+%5]
   punpckldq             m1, m5
   punpckldq             m2, m4
-  movd                  m3, [ref3q]
-  movd                  m5, [ref3q+ref_strideq]
+  movd                  m3, [ref3q+%3]
+  movd                  m5, [ref3q+%5]
   punpckldq             m3, m5
-  movd                  m4, [ref4q]
-  movd                  m5, [ref4q+ref_strideq]
+  movd                  m4, [ref4q+%3]
+  movd                  m5, [ref4q+%5]
   punpckldq             m4, m5
   movd                  m5, [srcq + second_offset]
   punpckldq             m0, m5
   movlhps               m0, m0
   movlhps               m1, m2
   movlhps               m3, m4
-%if %2 == 1
+%if %6 == 1
   AVG_4x2x4             m1, m3
 %endif
   psadbw                m1, m0
@@ -118,23 +137,28 @@
   paddd                 m6, m1
   paddd                 m7, m3
 %endif
+%if %9 > 0
+  ADVANCE_END_OF_LINE %9
+%endif
 %endmacro
 
-; PROCESS_8x2x4 first, do_avg
-%macro PROCESS_8x2x4 2
-  movh                  m0, [srcq]
-  HANDLE_SECOND_OFFSET
+; PROCESS_8x2x4 first, off_{first,second}_{src,ref}, do_avg,
+;               {first,second}_extraline, line_ending
+%macro PROCESS_8x2x4 9
+  HANDLE_FIRST_OFFSET   %7, %2
+  movh                  m0, [srcq + first_offset]
+  HANDLE_SECOND_OFFSET  %7, %8, %4
 %if %1 == 1
-  movh                  m4, [ref1q]
-  movh                  m5, [ref2q]
-  movh                  m6, [ref3q]
-  movh                  m7, [ref4q]
+  movh                  m4, [ref1q+%3]
+  movh                  m5, [ref2q+%3]
+  movh                  m6, [ref3q+%3]
+  movh                  m7, [ref4q+%3]
   movhps                m0, [srcq + second_offset]
-  movhps                m4, [ref1q+ref_strideq]
-  movhps                m5, [ref2q+ref_strideq]
-  movhps                m6, [ref3q+ref_strideq]
-  movhps                m7, [ref4q+ref_strideq]
-%if %2 == 1
+  movhps                m4, [ref1q+%5]
+  movhps                m5, [ref2q+%5]
+  movhps                m6, [ref3q+%5]
+  movhps                m7, [ref4q+%5]
+%if %6 == 1
   movu                  m3, [second_predq]
   pavgb                 m4, m3
   pavgb                 m5, m3
@@ -147,12 +171,12 @@
   psadbw                m6, m0
   psadbw                m7, m0
 %else
-  movh                  m1, [ref1q]
-  movh                  m2, [ref2q]
+  movh                  m1, [ref1q+%3]
+  movh                  m2, [ref2q+%3]
   movhps                m0, [srcq + second_offset]
-  movhps                m1, [ref1q+ref_strideq]
-  movhps                m2, [ref2q+ref_strideq]
-%if %2 == 1
+  movhps                m1, [ref1q+%5]
+  movhps                m2, [ref2q+%5]
+%if %6 == 1
   movu                  m3, [second_predq]
   pavgb                 m1, m3
   pavgb                 m2, m3
@@ -162,11 +186,11 @@
   paddd                 m4, m1
   paddd                 m5, m2
 
-  movh                  m1, [ref3q]
-  movhps                m1, [ref3q+ref_strideq]
-  movh                  m2, [ref4q]
-  movhps                m2, [ref4q+ref_strideq]
-%if %2 == 1
+  movh                  m1, [ref3q+%3]
+  movhps                m1, [ref3q+%5]
+  movh                  m2, [ref4q+%3]
+  movhps                m2, [ref4q+%5]
+%if %6 == 1
   pavgb                 m1, m3
   pavgb                 m2, m3
   lea                   second_predq, [second_predq+mmsize]
@@ -176,16 +200,24 @@
   paddd                 m6, m1
   paddd                 m7, m2
 %endif
+%if %9 > 0
+  ADVANCE_END_OF_LINE %9
+%endif
 %endmacro
 
-; PROCESS_FIRST_MMSIZE do_avg
-%macro PROCESS_FIRST_MMSIZE 1
-  mova                  m0, [srcq]
-  movu                  m4, [ref1q]
-  movu                  m5, [ref2q]
-  movu                  m6, [ref3q]
-  movu                  m7, [ref4q]
+; PROCESS_16x2x4 first, off_{first,second}_{src,ref}, do_avg,
+;                {first,second}_extraline, line_ending
+%macro PROCESS_16x2x4 9
+  ; 1st 16 px
+  HANDLE_FIRST_OFFSET   %7, %2
+  mova                  m0, [srcq + first_offset]
+  HANDLE_SECOND_OFFSET  %7, %8, %4
 %if %1 == 1
+  movu                  m4, [ref1q+%3]
+  movu                  m5, [ref2q+%3]
+  movu                  m6, [ref3q+%3]
+  movu                  m7, [ref4q+%3]
+%if %6 == 1
   movu                  m3, [second_predq]
   pavgb                 m4, m3
   pavgb                 m5, m3
@@ -197,14 +229,10 @@
   psadbw                m5, m0
   psadbw                m6, m0
   psadbw                m7, m0
-%endmacro
-
-; PROCESS_16x1x4 offset, do_avg
-%macro PROCESS_16x1x4 2
-  mova                  m0, [srcq + %1]
-  movu                  m1, [ref1q + ref_offsetq + %1]
-  movu                  m2, [ref2q + ref_offsetq + %1]
-%if %2 == 1
+%else ; %1 == 1
+  movu                  m1, [ref1q+%3]
+  movu                  m2, [ref2q+%3]
+%if %6 == 1
   movu                  m3, [second_predq]
   pavgb                 m1, m3
   pavgb                 m2, m3
@@ -214,9 +242,9 @@
   paddd                 m4, m1
   paddd                 m5, m2
 
-  movu                  m1, [ref3q + ref_offsetq + %1]
-  movu                  m2, [ref4q + ref_offsetq + %1]
-%if %2 == 1
+  movu                  m1, [ref3q+%3]
+  movu                  m2, [ref4q+%3]
+%if %6 == 1
   pavgb                 m1, m3
   pavgb                 m2, m3
   lea                   second_predq, [second_predq+mmsize]
@@ -225,6 +253,60 @@
   psadbw                m2, m0
   paddd                 m6, m1
   paddd                 m7, m2
+%endif ; %1 == 1
+
+  ; 2nd 16 px
+  mova                  m0, [srcq + second_offset]
+  movu                  m1, [ref1q+%5]
+  movu                  m2, [ref2q+%5]
+
+%if %6 == 1
+  movu                  m3, [second_predq]
+  pavgb                 m1, m3
+  pavgb                 m2, m3
+%endif
+  psadbw                m1, m0
+  psadbw                m2, m0
+  paddd                 m4, m1
+  paddd                 m5, m2
+
+  movu                  m1, [ref3q+%5]
+  movu                  m2, [ref4q+%5]
+
+%if %9 > 0
+  ADVANCE_END_OF_LINE %9
+%endif
+
+%if %6 == 1
+  pavgb                 m1, m3
+  pavgb                 m2, m3
+  lea                   second_predq, [second_predq+mmsize]
+%endif
+  psadbw                m1, m0
+  psadbw                m2, m0
+  paddd                 m6, m1
+  paddd                 m7, m2
+%endmacro
+
+; PROCESS_32x2x4 first, off_{first,second}_{src,ref}, do_avg,
+;                {first,second}_extraline, line_ending
+%macro PROCESS_32x2x4 9
+  PROCESS_16x2x4 %1, %2, %3, %2 + 16, %3 + 16, %6, %7, %7, %8 - %7
+  PROCESS_16x2x4  0, %4, %5, %4 + 16, %5 + 16, %6, %8, %8, %9
+%endmacro
+
+; PROCESS_64x2x4 first, off_{first,second}_{src,ref}, do_avg,
+;                {first,second}_extraline, line_ending
+%macro PROCESS_64x2x4 9
+  PROCESS_32x2x4 %1, %2, %3, %2 + 32, %3 + 32, %6, %7, %7, %8 - %7
+  PROCESS_32x2x4  0, %4, %5, %4 + 32, %5 + 32, %6, %8, %8, %9
+%endmacro
+
+; PROCESS_128x2x4 first, off_{first,second}_{src,ref}, do_avg,
+;                 {first,second}_extraline, line_ending
+%macro PROCESS_128x2x4 9
+  PROCESS_64x2x4 %1, %2, %3, %2 + 64, %3 + 64, %6, %7, %7, %8 - %7
+  PROCESS_64x2x4  0, %4, %5, %4 + 64, %5 + 64, %6, %8, %8, %9
 %endmacro
 
 ; void aom_sadNxNx4d_sse2(uint8_t *src,    int src_stride,
@@ -236,116 +318,38 @@
 ;   3: If 0, then normal sad, else avg
 ;   4: If 0, then normal sad, else skip rows
 %macro SADNXN4D 2-4 0,0
-
-%define spill_src_stride 0
-%define spill_ref_stride 0
-%define spill_cnt 0
-
-; Whether a shared offset should be used instead of adding strides to
-; each reference array. With this option, only one line will be processed
-; per loop iteration.
-%define use_ref_offset (%1 >= mmsize)
-
-; Remove loops in the 4x4 and 8x4 case
-%define use_loop (use_ref_offset || %2 > 4)
-
 %if %4 == 1  ; skip rows
 %if ARCH_X86_64
-%if use_ref_offset
-cglobal sad_skip_%1x%2x4d, 5, 10, 8, src, src_stride, ref1, ref_stride, res, \
-                                     ref2, ref3, ref4, cnt, ref_offset
-%elif use_loop
-cglobal sad_skip_%1x%2x4d, 5, 9, 8, src, src_stride, ref1, ref_stride, res, \
-                                    ref2, ref3, ref4, cnt
+cglobal sad_skip_%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, \
+                              res, ref2, ref3, ref4
 %else
-cglobal sad_skip_%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, res, \
-                                    ref2, ref3, ref4
-%endif
-%else
-%if use_ref_offset
-cglobal sad_skip_%1x%2x4d, 4, 7, 8, src, ref_offset, ref1, cnt, ref2, ref3, \
-                                    ref4
-%define spill_src_stride 1
-%define spill_ref_stride 1
-%elif use_loop
-cglobal sad_skip_%1x%2x4d, 4, 7, 8, src, cnt, ref1, ref_stride, ref2, \
-                                    ref3, ref4
-%define spill_src_stride 1
-%else
-cglobal sad_skip_%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, ref2, \
-                                    ref3, ref4
-%endif
+cglobal sad_skip_%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, \
+                              ref2, ref3, ref4
 %endif
 %elif %3 == 0  ; normal sad
 %if ARCH_X86_64
-%if use_ref_offset
-cglobal sad%1x%2x4d, 5, 10, 8, src, src_stride, ref1, ref_stride, res, ref2, \
-                               ref3, ref4, cnt, ref_offset
-%elif use_loop
-cglobal sad%1x%2x4d, 5, 9, 8, src, src_stride, ref1, ref_stride, res, ref2, \
-                              ref3, ref4, cnt
+cglobal sad%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, \
+                              res, ref2, ref3, ref4
 %else
-cglobal sad%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, res, ref2, \
-                              ref3, ref4
-%endif
-%else
-%if use_ref_offset
-cglobal sad%1x%2x4d, 4, 7, 8, src, ref_offset, ref1, cnt, ref2, ref3, ref4
-  %define spill_src_stride 1
-  %define spill_ref_stride 1
-%elif use_loop
-cglobal sad%1x%2x4d, 4, 7, 8, src, cnt, ref1, ref_stride, ref2, ref3, ref4
-  %define spill_src_stride 1
-%else
-cglobal sad%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, ref2, ref3, \
-                              ref4
-%endif
+cglobal sad%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, \
+                              ref2, ref3, ref4
 %endif
 %else ; avg
 %if ARCH_X86_64
-%if use_ref_offset
-cglobal sad%1x%2x4d_avg, 6, 11, 8, src, src_stride, ref1, ref_stride, \
-                                   second_pred, res, ref2, ref3, ref4, cnt, \
-                                   ref_offset
-%elif use_loop
 cglobal sad%1x%2x4d_avg, 6, 10, 8, src, src_stride, ref1, ref_stride, \
-                                   second_pred, res, ref2, ref3, ref4, cnt
+                                  second_pred, res, ref2, ref3, ref4
 %else
-cglobal sad%1x%2x4d_avg, 6, 9, 8, src, src_stride, ref1, ref_stride, \
-                                   second_pred, res, ref2, ref3, ref4
-%endif
-%else
-%if use_ref_offset
-cglobal sad%1x%2x4d_avg, 5, 7, 8, src, ref4, ref1, ref_offset, second_pred, ref2, ref3
-  %define spill_src_stride 1
-  %define spill_ref_stride 1
-  %define spill_cnt 1
-%elif use_loop
-cglobal sad%1x%2x4d_avg, 5, 7, 8, src, ref4, ref1, ref_stride, second_pred, ref2, ref3
-  %define spill_src_stride 1
-  %define spill_cnt 1
-%else
-cglobal sad%1x%2x4d_avg, 5, 7, 8, src, ref4, ref1, ref_stride, second_pred, ref2, ref3
-  %define spill_src_stride 1
-%endif
-%endif
-%endif
-
-%if spill_src_stride
+cglobal sad%1x%2x4d_avg, 5, 7, 8, src, ref4, ref1, ref_stride, \
+                                  second_pred, ref2, ref3
   %define src_strideq r1mp
   %define src_strided r1mp
 %endif
-%if spill_ref_stride
-  %define ref_strideq r3mp
-  %define ref_strided r3mp
-%endif
-%if spill_cnt
-  %define cntd word [rsp-4]
 %endif
 
+  %define mflag ((1 - ARCH_X86_64) & %3)
 %if %4 == 1
-  sal          src_strided, 1
-  sal          ref_strided, 1
+  lea          src_strided, [2*src_strided]
+  lea          ref_strided, [2*ref_strided]
 %endif
   movsxdifnidn src_strideq, src_strided
   movsxdifnidn ref_strideq, ref_strided
@@ -355,62 +359,18 @@
   mov                ref4q, [ref1q+gprsize*3]
   mov                ref1q, [ref1q+gprsize*0]
 
-; Is the loop for this wxh in another function?
-; If so, we jump into that function for the loop and returning
-%define external_loop (use_ref_offset && %1 > mmsize && %1 != %2)
-
-%if use_ref_offset
-  PROCESS_FIRST_MMSIZE %3
-%if %1 > mmsize
-  mov          ref_offsetq, 0
-  mov                 cntd, %2 >> %4
-; Jump part way into the loop for the square version of this width
-%if %3 == 1
-  jmp mangle(private_prefix %+ _sad%1x%1x4d_avg %+ SUFFIX).midloop
-%elif %4 == 1
-  jmp mangle(private_prefix %+ _sad_skip_%1x%1x4d %+ SUFFIX).midloop
+  PROCESS_%1x2x4 1, 0, 0, 0, ref_strideq, %3, 0, 1, 2
+%if %4 == 1  ; downsample number of rows by 2
+%define num_rep (%2-8)/4
 %else
-  jmp mangle(private_prefix %+ _sad%1x%1x4d %+ SUFFIX).midloop
+%define num_rep (%2-4)/2
 %endif
-%else
-  mov          ref_offsetq, ref_strideq
-  add                 srcq, src_strideq
-  mov                 cntd, (%2 >> %4) - 1
-%endif
-%if external_loop == 0
-.loop:
-; Unrolled horizontal loop
-%assign h_offset 0
-%rep %1/mmsize
-  PROCESS_16x1x4 h_offset, %3
-%if h_offset == 0
-; The first row of the first column is done outside the loop and jumps here
-.midloop:
-%endif
-%assign h_offset h_offset+mmsize
+%rep num_rep
+  PROCESS_%1x2x4 0, 0, 0, 0, ref_strideq, %3, 0, 1, 2
 %endrep
+%undef num_rep
+  PROCESS_%1x2x4 0, 0, 0, 0, ref_strideq, %3, 0, 1, 2
 
-  add                 srcq, src_strideq
-  add          ref_offsetq, ref_strideq
-  sub                 cntd, 1
-  jnz .loop
-%endif
-%else
-  PROCESS_%1x2x4 1, %3
-  ADVANCE_END_OF_TWO_LINES
-%if use_loop
-  mov                 cntd, (%2/2 >> %4) - 1
-.loop:
-%endif
-  PROCESS_%1x2x4 0, %3
-%if use_loop
-  ADVANCE_END_OF_TWO_LINES
-  sub                 cntd, 1
-  jnz .loop
-%endif
-%endif
-
-%if external_loop == 0
 %if %3 == 0
   %define resultq r4
   %define resultmp r4mp
@@ -419,16 +379,6 @@
   %define resultmp r5mp
 %endif
 
-; Undo modifications on parameters on the stack
-%if %4 == 1
-%if spill_src_stride
-  shr          src_strided, 1
-%endif
-%if spill_ref_stride
-  shr          ref_strided, 1
-%endif
-%endif
-
 %if %1 > 4
   pslldq                m5, 4
   pslldq                m7, 4
@@ -457,7 +407,6 @@
   movq              [resultq+8], m7
   RET
 %endif
-%endif ; external_loop == 0
 %endmacro
 
 INIT_XMM sse2