Revert "Revert "Reduce size of low bit depth sse2 sad functions""
This reverts commit e34c3fcfdb611df99a54d3c764b3a45202941066.
Original commit message:
Reduce size of low bit depth sse2 sad functions
-508 KiB for full binarys
-246 KiB for realtime only mode
Neutral performance impact.
A fix is added to correct 32 bit mode:
Subtract from stack pointer instead of accessing directly from an
offset. (Line 343) Restore original stack pointer on Line 417.
Change-Id: I35c5ef6a7067358324421c6c9bd4c3a9fd633c05
diff --git a/aom_dsp/x86/sad4d_sse2.asm b/aom_dsp/x86/sad4d_sse2.asm
index 9ab44c1..6de708b 100644
--- a/aom_dsp/x86/sad4d_sse2.asm
+++ b/aom_dsp/x86/sad4d_sse2.asm
@@ -22,114 +22,95 @@
pavgb %2, m2
lea second_predq, [second_predq+8]
%endmacro
-; 'mflag' affect a lot how the code works.
+; 'spill_src_stride' affect a lot how the code works.
;
-; When 'mflag' is false, the 'src_strideq' resides in register,
-; [srcq + src_strideq + offset] is allowed, so we can simply
-; use such form to access src memory and don't bother to update
-; 'srcq' at each line. We only update 'srcq' each two-lines using
-; a compact LEA instruction like [srcq+src_strideq*2].
+; When 'spill_src_stride' is false, the 'src_strideq' resides in
+; register, [srcq + src_strideq + offset] is allowed, so we can simply
+; use such form to access src memory and don't bother to update 'srcq'
+; at each line. We only update 'srcq' each two-lines using a compact
+; LEA instruction like [srcq+src_strideq*2].
;
-; When 'mflag' is true, the 'src_strideq' resides in memory.
+; When 'spill_src_stride' is true, the 'src_strideq' resides in memory.
; we cannot use above form to access memory, we have to update
; 'srcq' at each line break. As we process two parts (first,second)
; together in each macro function, the second part may also sit
; in the next line, which means we also need to possibly add
; one 'src_strideq' to 'srcq' before processing second part.
-%macro HANDLE_FIRST_OFFSET 2
- %define first_offset %2
- %if mflag == 0 && %1 == 1
- %define first_offset (src_strideq + %2)
- %endif
-%endmacro
-
-; first_extraline, second_extraline, in_line_offset
-%macro HANDLE_SECOND_OFFSET 3
- %define second_offset %3
- %if mflag && %1 == 0 && %2 == 1
+%macro HANDLE_SECOND_OFFSET 0
+ %if spill_src_stride
+ %define second_offset 0
add srcq, src_strideq
- %endif
- %if mflag == 0 && %2 == 1
- %define second_offset (src_strideq + %3)
+ %else
+ %define second_offset (src_strideq)
%endif
%endmacro
-; Notes for line_ending:
-; 0 -- not a line ending
-; 1 -- line ending of a odd line [line numbers starts from one]
-; 2 -- line ending of a even line
; This is specically designed to handle when src_strideq is a
; memory position, under such case, we can not accomplish
; complex address calculation using LEA, and fall back to
; using simple ADD instruction at each line ending.
-%macro ADVANCE_END_OF_LINE 1
- %if mflag
+%macro ADVANCE_END_OF_TWO_LINES 0
+ %if spill_src_stride
add srcq, src_strideq
- %endif
- %if mflag == 0 && %1 == 2
- lea srcq, [srcq +src_strideq*2]
+ %else
+ lea srcq, [srcq+src_strideq*2]
%endif
- %if %1 == 2
- lea ref1q, [ref1q+ref_strideq*2]
- lea ref2q, [ref2q+ref_strideq*2]
- lea ref3q, [ref3q+ref_strideq*2]
- lea ref4q, [ref4q+ref_strideq*2]
- %endif
+; note: ref_stride is never spilled when processing two lines
+ lea ref1q, [ref1q+ref_strideq*2]
+ lea ref2q, [ref2q+ref_strideq*2]
+ lea ref3q, [ref3q+ref_strideq*2]
+ lea ref4q, [ref4q+ref_strideq*2]
%endmacro
-; Please note that the second_offset of src is for in_line_offset,
-; so it is less than src_stride.
-; PROCESS_4x2x4 first, off_{first,second}_{src,ref}, do_avg,
-; {first, second}_extraline, line_ending
-%macro PROCESS_4x2x4 9
- HANDLE_FIRST_OFFSET %7, %2
- movd m0, [srcq + first_offset]
- HANDLE_SECOND_OFFSET %7, %8, %4
+; PROCESS_4x2x4 first, do_avg
+%macro PROCESS_4x2x4 2
+ movd m0, [srcq]
+ HANDLE_SECOND_OFFSET
%if %1 == 1
- movd m6, [ref1q+%3]
- movd m4, [ref2q+%3]
- movd m7, [ref3q+%3]
- movd m5, [ref4q+%3]
+ movd m6, [ref1q]
+ movd m4, [ref2q]
+ movd m7, [ref3q]
+ movd m5, [ref4q]
movd m1, [srcq + second_offset]
- movd m2, [ref1q+%5]
+ movd m2, [ref1q+ref_strideq]
punpckldq m0, m1
punpckldq m6, m2
- movd m1, [ref2q+%5]
- movd m2, [ref3q+%5]
- movd m3, [ref4q+%5]
+ movd m1, [ref2q+ref_strideq]
+ movd m2, [ref3q+ref_strideq]
+ movd m3, [ref4q+ref_strideq]
punpckldq m4, m1
punpckldq m7, m2
punpckldq m5, m3
movlhps m0, m0
movlhps m6, m4
movlhps m7, m5
-%if %6 == 1
+%if %2 == 1
AVG_4x2x4 m6, m7
%endif
psadbw m6, m0
psadbw m7, m0
%else
- movd m1, [ref1q+%3]
- movd m5, [ref1q+%5]
- movd m2, [ref2q+%3]
- movd m4, [ref2q+%5]
+ movd m1, [ref1q]
+ movd m5, [ref1q+ref_strideq]
+ movd m2, [ref2q]
+ movd m4, [ref2q+ref_strideq]
punpckldq m1, m5
punpckldq m2, m4
- movd m3, [ref3q+%3]
- movd m5, [ref3q+%5]
+ movd m3, [ref3q]
+ movd m5, [ref3q+ref_strideq]
punpckldq m3, m5
- movd m4, [ref4q+%3]
- movd m5, [ref4q+%5]
+ movd m4, [ref4q]
+ movd m5, [ref4q+ref_strideq]
punpckldq m4, m5
movd m5, [srcq + second_offset]
punpckldq m0, m5
movlhps m0, m0
movlhps m1, m2
movlhps m3, m4
-%if %6 == 1
+%if %2 == 1
AVG_4x2x4 m1, m3
%endif
psadbw m1, m0
@@ -137,28 +118,23 @@
paddd m6, m1
paddd m7, m3
%endif
-%if %9 > 0
- ADVANCE_END_OF_LINE %9
-%endif
%endmacro
-; PROCESS_8x2x4 first, off_{first,second}_{src,ref}, do_avg,
-; {first,second}_extraline, line_ending
-%macro PROCESS_8x2x4 9
- HANDLE_FIRST_OFFSET %7, %2
- movh m0, [srcq + first_offset]
- HANDLE_SECOND_OFFSET %7, %8, %4
+; PROCESS_8x2x4 first, do_avg
+%macro PROCESS_8x2x4 2
+ movh m0, [srcq]
+ HANDLE_SECOND_OFFSET
%if %1 == 1
- movh m4, [ref1q+%3]
- movh m5, [ref2q+%3]
- movh m6, [ref3q+%3]
- movh m7, [ref4q+%3]
+ movh m4, [ref1q]
+ movh m5, [ref2q]
+ movh m6, [ref3q]
+ movh m7, [ref4q]
movhps m0, [srcq + second_offset]
- movhps m4, [ref1q+%5]
- movhps m5, [ref2q+%5]
- movhps m6, [ref3q+%5]
- movhps m7, [ref4q+%5]
-%if %6 == 1
+ movhps m4, [ref1q+ref_strideq]
+ movhps m5, [ref2q+ref_strideq]
+ movhps m6, [ref3q+ref_strideq]
+ movhps m7, [ref4q+ref_strideq]
+%if %2 == 1
movu m3, [second_predq]
pavgb m4, m3
pavgb m5, m3
@@ -171,12 +147,12 @@
psadbw m6, m0
psadbw m7, m0
%else
- movh m1, [ref1q+%3]
- movh m2, [ref2q+%3]
+ movh m1, [ref1q]
+ movh m2, [ref2q]
movhps m0, [srcq + second_offset]
- movhps m1, [ref1q+%5]
- movhps m2, [ref2q+%5]
-%if %6 == 1
+ movhps m1, [ref1q+ref_strideq]
+ movhps m2, [ref2q+ref_strideq]
+%if %2 == 1
movu m3, [second_predq]
pavgb m1, m3
pavgb m2, m3
@@ -186,11 +162,11 @@
paddd m4, m1
paddd m5, m2
- movh m1, [ref3q+%3]
- movhps m1, [ref3q+%5]
- movh m2, [ref4q+%3]
- movhps m2, [ref4q+%5]
-%if %6 == 1
+ movh m1, [ref3q]
+ movhps m1, [ref3q+ref_strideq]
+ movh m2, [ref4q]
+ movhps m2, [ref4q+ref_strideq]
+%if %2 == 1
pavgb m1, m3
pavgb m2, m3
lea second_predq, [second_predq+mmsize]
@@ -200,24 +176,16 @@
paddd m6, m1
paddd m7, m2
%endif
-%if %9 > 0
- ADVANCE_END_OF_LINE %9
-%endif
%endmacro
-; PROCESS_16x2x4 first, off_{first,second}_{src,ref}, do_avg,
-; {first,second}_extraline, line_ending
-%macro PROCESS_16x2x4 9
- ; 1st 16 px
- HANDLE_FIRST_OFFSET %7, %2
- mova m0, [srcq + first_offset]
- HANDLE_SECOND_OFFSET %7, %8, %4
+; PROCESS_FIRST_MMSIZE do_avg
+%macro PROCESS_FIRST_MMSIZE 1
+ mova m0, [srcq]
+ movu m4, [ref1q]
+ movu m5, [ref2q]
+ movu m6, [ref3q]
+ movu m7, [ref4q]
%if %1 == 1
- movu m4, [ref1q+%3]
- movu m5, [ref2q+%3]
- movu m6, [ref3q+%3]
- movu m7, [ref4q+%3]
-%if %6 == 1
movu m3, [second_predq]
pavgb m4, m3
pavgb m5, m3
@@ -229,10 +197,14 @@
psadbw m5, m0
psadbw m6, m0
psadbw m7, m0
-%else ; %1 == 1
- movu m1, [ref1q+%3]
- movu m2, [ref2q+%3]
-%if %6 == 1
+%endmacro
+
+; PROCESS_16x1x4 offset, do_avg
+%macro PROCESS_16x1x4 2
+ mova m0, [srcq + %1]
+ movu m1, [ref1q + ref_offsetq + %1]
+ movu m2, [ref2q + ref_offsetq + %1]
+%if %2 == 1
movu m3, [second_predq]
pavgb m1, m3
pavgb m2, m3
@@ -242,9 +214,9 @@
paddd m4, m1
paddd m5, m2
- movu m1, [ref3q+%3]
- movu m2, [ref4q+%3]
-%if %6 == 1
+ movu m1, [ref3q + ref_offsetq + %1]
+ movu m2, [ref4q + ref_offsetq + %1]
+%if %2 == 1
pavgb m1, m3
pavgb m2, m3
lea second_predq, [second_predq+mmsize]
@@ -253,60 +225,6 @@
psadbw m2, m0
paddd m6, m1
paddd m7, m2
-%endif ; %1 == 1
-
- ; 2nd 16 px
- mova m0, [srcq + second_offset]
- movu m1, [ref1q+%5]
- movu m2, [ref2q+%5]
-
-%if %6 == 1
- movu m3, [second_predq]
- pavgb m1, m3
- pavgb m2, m3
-%endif
- psadbw m1, m0
- psadbw m2, m0
- paddd m4, m1
- paddd m5, m2
-
- movu m1, [ref3q+%5]
- movu m2, [ref4q+%5]
-
-%if %9 > 0
- ADVANCE_END_OF_LINE %9
-%endif
-
-%if %6 == 1
- pavgb m1, m3
- pavgb m2, m3
- lea second_predq, [second_predq+mmsize]
-%endif
- psadbw m1, m0
- psadbw m2, m0
- paddd m6, m1
- paddd m7, m2
-%endmacro
-
-; PROCESS_32x2x4 first, off_{first,second}_{src,ref}, do_avg,
-; {first,second}_extraline, line_ending
-%macro PROCESS_32x2x4 9
- PROCESS_16x2x4 %1, %2, %3, %2 + 16, %3 + 16, %6, %7, %7, %8 - %7
- PROCESS_16x2x4 0, %4, %5, %4 + 16, %5 + 16, %6, %8, %8, %9
-%endmacro
-
-; PROCESS_64x2x4 first, off_{first,second}_{src,ref}, do_avg,
-; {first,second}_extraline, line_ending
-%macro PROCESS_64x2x4 9
- PROCESS_32x2x4 %1, %2, %3, %2 + 32, %3 + 32, %6, %7, %7, %8 - %7
- PROCESS_32x2x4 0, %4, %5, %4 + 32, %5 + 32, %6, %8, %8, %9
-%endmacro
-
-; PROCESS_128x2x4 first, off_{first,second}_{src,ref}, do_avg,
-; {first,second}_extraline, line_ending
-%macro PROCESS_128x2x4 9
- PROCESS_64x2x4 %1, %2, %3, %2 + 64, %3 + 64, %6, %7, %7, %8 - %7
- PROCESS_64x2x4 0, %4, %5, %4 + 64, %5 + 64, %6, %8, %8, %9
%endmacro
; void aom_sadNxNx4d_sse2(uint8_t *src, int src_stride,
@@ -318,38 +236,118 @@
; 3: If 0, then normal sad, else avg
; 4: If 0, then normal sad, else skip rows
%macro SADNXN4D 2-4 0,0
+
+%define spill_src_stride 0
+%define spill_ref_stride 0
+%define spill_cnt 0
+
+; Whether a shared offset should be used instead of adding strides to
+; each reference array. With this option, only one line will be processed
+; per loop iteration.
+%define use_ref_offset (%1 >= mmsize)
+
+; Remove loops in the 4x4 and 8x4 case
+%define use_loop (use_ref_offset || %2 > 4)
+
%if %4 == 1 ; skip rows
%if ARCH_X86_64
-cglobal sad_skip_%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, \
- res, ref2, ref3, ref4
+%if use_ref_offset
+cglobal sad_skip_%1x%2x4d, 5, 10, 8, src, src_stride, ref1, ref_stride, res, \
+ ref2, ref3, ref4, cnt, ref_offset
+%elif use_loop
+cglobal sad_skip_%1x%2x4d, 5, 9, 8, src, src_stride, ref1, ref_stride, res, \
+ ref2, ref3, ref4, cnt
%else
-cglobal sad_skip_%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, \
- ref2, ref3, ref4
+cglobal sad_skip_%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, res, \
+ ref2, ref3, ref4
+%endif
+%else
+%if use_ref_offset
+cglobal sad_skip_%1x%2x4d, 4, 7, 8, src, ref_offset, ref1, cnt, ref2, ref3, \
+ ref4
+%define spill_src_stride 1
+%define spill_ref_stride 1
+%elif use_loop
+cglobal sad_skip_%1x%2x4d, 4, 7, 8, src, cnt, ref1, ref_stride, ref2, \
+ ref3, ref4
+%define spill_src_stride 1
+%else
+cglobal sad_skip_%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, ref2, \
+ ref3, ref4
+%endif
%endif
%elif %3 == 0 ; normal sad
%if ARCH_X86_64
-cglobal sad%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, \
- res, ref2, ref3, ref4
+%if use_ref_offset
+cglobal sad%1x%2x4d, 5, 10, 8, src, src_stride, ref1, ref_stride, res, ref2, \
+ ref3, ref4, cnt, ref_offset
+%elif use_loop
+cglobal sad%1x%2x4d, 5, 9, 8, src, src_stride, ref1, ref_stride, res, ref2, \
+ ref3, ref4, cnt
%else
-cglobal sad%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, \
- ref2, ref3, ref4
+cglobal sad%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, res, ref2, \
+ ref3, ref4
+%endif
+%else
+%if use_ref_offset
+cglobal sad%1x%2x4d, 4, 7, 8, src, ref_offset, ref1, cnt, ref2, ref3, ref4
+ %define spill_src_stride 1
+ %define spill_ref_stride 1
+%elif use_loop
+cglobal sad%1x%2x4d, 4, 7, 8, src, cnt, ref1, ref_stride, ref2, ref3, ref4
+ %define spill_src_stride 1
+%else
+cglobal sad%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, ref2, ref3, \
+ ref4
+%endif
%endif
%else ; avg
%if ARCH_X86_64
+%if use_ref_offset
+cglobal sad%1x%2x4d_avg, 6, 11, 8, src, src_stride, ref1, ref_stride, \
+ second_pred, res, ref2, ref3, ref4, cnt, \
+ ref_offset
+%elif use_loop
cglobal sad%1x%2x4d_avg, 6, 10, 8, src, src_stride, ref1, ref_stride, \
- second_pred, res, ref2, ref3, ref4
+ second_pred, res, ref2, ref3, ref4, cnt
%else
-cglobal sad%1x%2x4d_avg, 5, 7, 8, src, ref4, ref1, ref_stride, \
- second_pred, ref2, ref3
- %define src_strideq r1mp
- %define src_strided r1mp
+cglobal sad%1x%2x4d_avg, 6, 9, 8, src, src_stride, ref1, ref_stride, \
+ second_pred, res, ref2, ref3, ref4
+%endif
+%else
+%if use_ref_offset
+cglobal sad%1x%2x4d_avg, 5, 7, 8, src, ref4, ref1, ref_offset, second_pred, ref2, ref3
+ %define spill_src_stride 1
+ %define spill_ref_stride 1
+ %define spill_cnt 1
+%elif use_loop
+cglobal sad%1x%2x4d_avg, 5, 7, 8, src, ref4, ref1, ref_stride, second_pred, ref2, ref3
+ %define spill_src_stride 1
+ %define spill_cnt 1
+%else
+cglobal sad%1x%2x4d_avg, 5, 7, 8, src, ref4, ref1, ref_stride, second_pred, ref2, ref3
+ %define spill_src_stride 1
+%endif
%endif
%endif
- %define mflag ((1 - ARCH_X86_64) & %3)
+%if spill_src_stride
+ %define src_strideq r1mp
+ %define src_strided r1mp
+%endif
+%if spill_ref_stride
+ %define ref_strideq r3mp
+ %define ref_strided r3mp
+%endif
+
+%if spill_cnt
+ SUB rsp, 4
+ %define cntd word [rsp]
+%endif
+
%if %4 == 1
- lea src_strided, [2*src_strided]
- lea ref_strided, [2*ref_strided]
+ sal src_strided, 1
+ sal ref_strided, 1
%endif
movsxdifnidn src_strideq, src_strided
movsxdifnidn ref_strideq, ref_strided
@@ -359,18 +357,67 @@
mov ref4q, [ref1q+gprsize*3]
mov ref1q, [ref1q+gprsize*0]
- PROCESS_%1x2x4 1, 0, 0, 0, ref_strideq, %3, 0, 1, 2
-%if %4 == 1 ; downsample number of rows by 2
-%define num_rep (%2-8)/4
-%else
-%define num_rep (%2-4)/2
-%endif
-%rep num_rep
- PROCESS_%1x2x4 0, 0, 0, 0, ref_strideq, %3, 0, 1, 2
-%endrep
-%undef num_rep
- PROCESS_%1x2x4 0, 0, 0, 0, ref_strideq, %3, 0, 1, 2
+; Is the loop for this wxh in another function?
+; If so, we jump into that function for the loop and returning
+%define external_loop (use_ref_offset && %1 > mmsize && %1 != %2)
+%if use_ref_offset
+ PROCESS_FIRST_MMSIZE %3
+%if %1 > mmsize
+ mov ref_offsetq, 0
+ mov cntd, %2 >> %4
+; Jump part way into the loop for the square version of this width
+%if %3 == 1
+ jmp mangle(private_prefix %+ _sad%1x%1x4d_avg %+ SUFFIX).midloop
+%elif %4 == 1
+ jmp mangle(private_prefix %+ _sad_skip_%1x%1x4d %+ SUFFIX).midloop
+%else
+ jmp mangle(private_prefix %+ _sad%1x%1x4d %+ SUFFIX).midloop
+%endif
+%else
+ mov ref_offsetq, ref_strideq
+ add srcq, src_strideq
+ mov cntd, (%2 >> %4) - 1
+%endif
+%if external_loop == 0
+.loop:
+; Unrolled horizontal loop
+%assign h_offset 0
+%rep %1/mmsize
+ PROCESS_16x1x4 h_offset, %3
+%if h_offset == 0
+; The first row of the first column is done outside the loop and jumps here
+.midloop:
+%endif
+%assign h_offset h_offset+mmsize
+%endrep
+
+ add srcq, src_strideq
+ add ref_offsetq, ref_strideq
+ sub cntd, 1
+ jnz .loop
+%endif
+%else
+ PROCESS_%1x2x4 1, %3
+ ADVANCE_END_OF_TWO_LINES
+%if use_loop
+ mov cntd, (%2/2 >> %4) - 1
+.loop:
+%endif
+ PROCESS_%1x2x4 0, %3
+%if use_loop
+ ADVANCE_END_OF_TWO_LINES
+ sub cntd, 1
+ jnz .loop
+%endif
+%endif
+
+%if spill_cnt
+; Undo stack allocation for cnt
+ ADD rsp, 4
+%endif
+
+%if external_loop == 0
%if %3 == 0
%define resultq r4
%define resultmp r4mp
@@ -379,6 +426,16 @@
%define resultmp r5mp
%endif
+; Undo modifications on parameters on the stack
+%if %4 == 1
+%if spill_src_stride
+ shr src_strided, 1
+%endif
+%if spill_ref_stride
+ shr ref_strided, 1
+%endif
+%endif
+
%if %1 > 4
pslldq m5, 4
pslldq m7, 4
@@ -407,6 +464,7 @@
movq [resultq+8], m7
RET
%endif
+%endif ; external_loop == 0
%endmacro
INIT_XMM sse2