Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 1 | ;***************************************************************************** |
| 2 | ;* x86inc.asm: x264asm abstraction layer |
| 3 | ;***************************************************************************** |
| 4 | ;* Copyright (C) 2005-2012 x264 project |
| 5 | ;* |
| 6 | ;* Authors: Loren Merritt <lorenm@u.washington.edu> |
| 7 | ;* Anton Mitrofanov <BugMaster@narod.ru> |
| 8 | ;* Jason Garrett-Glaser <darkshikari@gmail.com> |
| 9 | ;* Henrik Gramner <hengar-6@student.ltu.se> |
| 10 | ;* |
| 11 | ;* Permission to use, copy, modify, and/or distribute this software for any |
| 12 | ;* purpose with or without fee is hereby granted, provided that the above |
| 13 | ;* copyright notice and this permission notice appear in all copies. |
| 14 | ;* |
| 15 | ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 16 | ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 17 | ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 18 | ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 19 | ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 20 | ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 21 | ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 22 | ;***************************************************************************** |
| 23 | |
| 24 | ; This is a header file for the x264ASM assembly language, which uses |
| 25 | ; NASM/YASM syntax combined with a large number of macros to provide easy |
| 26 | ; abstraction between different calling conventions (x86_32, win64, linux64). |
| 27 | ; It also has various other useful features to simplify writing the kind of |
| 28 | ; DSP functions that are most often used in x264. |
| 29 | |
| 30 | ; Unlike the rest of x264, this file is available under an ISC license, as it |
| 31 | ; has significant usefulness outside of x264 and we want it to be available |
| 32 | ; to the largest audience possible. Of course, if you modify it for your own |
| 33 | ; purposes to add a new feature, we strongly encourage contributing a patch |
| 34 | ; as this feature might be useful for others as well. Send patches or ideas |
| 35 | ; to x264-devel@videolan.org . |
| 36 | |
| 37 | %include "vpx_config.asm" |
| 38 | |
Ronald S. Bultje | 0d53fc2 | 2012-10-30 14:51:31 -0700 | [diff] [blame] | 39 | %define program_name vp9 |
Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 40 | |
| 41 | |
| 42 | %define UNIX64 0 |
| 43 | %define WIN64 0 |
| 44 | %if ARCH_X86_64 |
| 45 | %ifidn __OUTPUT_FORMAT__,win32 |
| 46 | %define WIN64 1 |
| 47 | %elifidn __OUTPUT_FORMAT__,win64 |
| 48 | %define WIN64 1 |
Ronald S. Bultje | 1f9943a | 2012-06-18 12:34:51 -0700 | [diff] [blame] | 49 | %elifidn __OUTPUT_FORMAT__,x64 |
| 50 | %define WIN64 1 |
Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 51 | %else |
| 52 | %define UNIX64 1 |
| 53 | %endif |
| 54 | %endif |
| 55 | |
Ronald S. Bultje | 1f9943a | 2012-06-18 12:34:51 -0700 | [diff] [blame] | 56 | %ifidn __OUTPUT_FORMAT__,elf32 |
Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 57 | %define mangle(x) x |
Ronald S. Bultje | 1f9943a | 2012-06-18 12:34:51 -0700 | [diff] [blame] | 58 | %elifidn __OUTPUT_FORMAT__,elf64 |
| 59 | %define mangle(x) x |
Ronald S. Bultje | 5ea9e2a | 2012-06-20 09:48:54 -0700 | [diff] [blame] | 60 | %elifidn __OUTPUT_FORMAT__,elf |
| 61 | %define mangle(x) x |
Ronald S. Bultje | 1f9943a | 2012-06-18 12:34:51 -0700 | [diff] [blame] | 62 | %elifidn __OUTPUT_FORMAT__,x64 |
| 63 | %define mangle(x) x |
Frank Galligan | f67d740 | 2013-01-31 15:36:55 -0800 | [diff] [blame] | 64 | %elifidn __OUTPUT_FORMAT__,win64 |
| 65 | %define mangle(x) x |
Ronald S. Bultje | 1f9943a | 2012-06-18 12:34:51 -0700 | [diff] [blame] | 66 | %else |
| 67 | %define mangle(x) _ %+ x |
Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 68 | %endif |
| 69 | |
| 70 | ; FIXME: All of the 64bit asm functions that take a stride as an argument |
| 71 | ; via register, assume that the high dword of that register is filled with 0. |
| 72 | ; This is true in practice (since we never do any 64bit arithmetic on strides, |
| 73 | ; and x264's strides are all positive), but is not guaranteed by the ABI. |
| 74 | |
| 75 | ; Name of the .rodata section. |
| 76 | ; Kludge: Something on OS X fails to align .rodata even given an align attribute, |
| 77 | ; so use a different read-only section. |
| 78 | %macro SECTION_RODATA 0-1 16 |
| 79 | %ifidn __OUTPUT_FORMAT__,macho64 |
| 80 | SECTION .text align=%1 |
| 81 | %elifidn __OUTPUT_FORMAT__,macho |
| 82 | SECTION .text align=%1 |
| 83 | fakegot: |
| 84 | %elifidn __OUTPUT_FORMAT__,aout |
| 85 | section .text |
| 86 | %else |
| 87 | SECTION .rodata align=%1 |
| 88 | %endif |
| 89 | %endmacro |
| 90 | |
| 91 | ; aout does not support align= |
| 92 | %macro SECTION_TEXT 0-1 16 |
| 93 | %ifidn __OUTPUT_FORMAT__,aout |
| 94 | SECTION .text |
| 95 | %else |
| 96 | SECTION .text align=%1 |
| 97 | %endif |
| 98 | %endmacro |
| 99 | |
Yunqing Wang | 9d90121 | 2013-09-18 10:36:21 -0700 | [diff] [blame] | 100 | ; PIC macros are copied from vpx_ports/x86_abi_support.asm. The "define PIC" |
| 101 | ; from original code is added in for 64bit. |
| 102 | %ifidn __OUTPUT_FORMAT__,elf32 |
| 103 | %define ABI_IS_32BIT 1 |
| 104 | %elifidn __OUTPUT_FORMAT__,macho32 |
| 105 | %define ABI_IS_32BIT 1 |
| 106 | %elifidn __OUTPUT_FORMAT__,win32 |
| 107 | %define ABI_IS_32BIT 1 |
| 108 | %elifidn __OUTPUT_FORMAT__,aout |
| 109 | %define ABI_IS_32BIT 1 |
| 110 | %else |
| 111 | %define ABI_IS_32BIT 0 |
Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 112 | %endif |
Yunqing Wang | 9d90121 | 2013-09-18 10:36:21 -0700 | [diff] [blame] | 113 | |
| 114 | %if ABI_IS_32BIT |
| 115 | %if CONFIG_PIC=1 |
| 116 | %ifidn __OUTPUT_FORMAT__,elf32 |
| 117 | %define GET_GOT_SAVE_ARG 1 |
| 118 | %define WRT_PLT wrt ..plt |
| 119 | %macro GET_GOT 1 |
| 120 | extern _GLOBAL_OFFSET_TABLE_ |
| 121 | push %1 |
| 122 | call %%get_got |
| 123 | %%sub_offset: |
| 124 | jmp %%exitGG |
| 125 | %%get_got: |
| 126 | mov %1, [esp] |
| 127 | add %1, _GLOBAL_OFFSET_TABLE_ + $$ - %%sub_offset wrt ..gotpc |
| 128 | ret |
| 129 | %%exitGG: |
| 130 | %undef GLOBAL |
| 131 | %define GLOBAL(x) x + %1 wrt ..gotoff |
| 132 | %undef RESTORE_GOT |
| 133 | %define RESTORE_GOT pop %1 |
| 134 | %endmacro |
| 135 | %elifidn __OUTPUT_FORMAT__,macho32 |
| 136 | %define GET_GOT_SAVE_ARG 1 |
| 137 | %macro GET_GOT 1 |
| 138 | push %1 |
| 139 | call %%get_got |
| 140 | %%get_got: |
| 141 | pop %1 |
| 142 | %undef GLOBAL |
| 143 | %define GLOBAL(x) x + %1 - %%get_got |
| 144 | %undef RESTORE_GOT |
| 145 | %define RESTORE_GOT pop %1 |
| 146 | %endmacro |
| 147 | %endif |
| 148 | %endif |
| 149 | |
| 150 | %if ARCH_X86_64 == 0 |
| 151 | %undef PIC |
| 152 | %endif |
| 153 | |
| 154 | %else |
| 155 | %macro GET_GOT 1 |
| 156 | %endmacro |
| 157 | %define GLOBAL(x) rel x |
| 158 | %define WRT_PLT wrt ..plt |
| 159 | |
| 160 | %if WIN64 |
| 161 | %define PIC |
| 162 | %elifidn __OUTPUT_FORMAT__,macho64 |
| 163 | %define PIC |
| 164 | %elif CONFIG_PIC |
| 165 | %define PIC |
| 166 | %endif |
| 167 | %endif |
| 168 | |
| 169 | %ifnmacro GET_GOT |
| 170 | %macro GET_GOT 1 |
| 171 | %endmacro |
| 172 | %define GLOBAL(x) x |
| 173 | %endif |
| 174 | %ifndef RESTORE_GOT |
| 175 | %define RESTORE_GOT |
| 176 | %endif |
| 177 | %ifndef WRT_PLT |
| 178 | %define WRT_PLT |
| 179 | %endif |
| 180 | |
Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 181 | %ifdef PIC |
| 182 | default rel |
| 183 | %endif |
Yunqing Wang | 9d90121 | 2013-09-18 10:36:21 -0700 | [diff] [blame] | 184 | ; Done with PIC macros |
Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 185 | |
| 186 | ; Always use long nops (reduces 0x90 spam in disassembly on x86_32) |
KO Myung-Hun | 7f5e4fd | 2013-02-03 21:44:56 +0900 | [diff] [blame] | 187 | %ifndef __NASM_VER__ |
Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 188 | CPU amdnop |
KO Myung-Hun | 7f5e4fd | 2013-02-03 21:44:56 +0900 | [diff] [blame] | 189 | %else |
| 190 | %use smartalign |
| 191 | ALIGNMODE k7 |
| 192 | %endif |
Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 193 | |
| 194 | ; Macros to eliminate most code duplication between x86_32 and x86_64: |
| 195 | ; Currently this works only for leaf functions which load all their arguments |
| 196 | ; into registers at the start, and make no other use of the stack. Luckily that |
| 197 | ; covers most of x264's asm. |
| 198 | |
| 199 | ; PROLOGUE: |
| 200 | ; %1 = number of arguments. loads them from stack if needed. |
| 201 | ; %2 = number of registers used. pushes callee-saved regs if needed. |
| 202 | ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed. |
| 203 | ; %4 = list of names to define to registers |
| 204 | ; PROLOGUE can also be invoked by adding the same options to cglobal |
| 205 | |
| 206 | ; e.g. |
| 207 | ; cglobal foo, 2,3,0, dst, src, tmp |
| 208 | ; declares a function (foo), taking two args (dst and src) and one local variable (tmp) |
| 209 | |
| 210 | ; TODO Some functions can use some args directly from the stack. If they're the |
| 211 | ; last args then you can just not declare them, but if they're in the middle |
| 212 | ; we need more flexible macro. |
| 213 | |
| 214 | ; RET: |
| 215 | ; Pops anything that was pushed by PROLOGUE, and returns. |
| 216 | |
| 217 | ; REP_RET: |
| 218 | ; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons |
| 219 | ; which are slow when a normal ret follows a branch. |
| 220 | |
| 221 | ; registers: |
| 222 | ; rN and rNq are the native-size register holding function argument N |
| 223 | ; rNd, rNw, rNb are dword, word, and byte size |
| 224 | ; rNm is the original location of arg N (a register or on the stack), dword |
| 225 | ; rNmp is native size |
| 226 | |
| 227 | %macro DECLARE_REG 5-6 |
| 228 | %define r%1q %2 |
| 229 | %define r%1d %3 |
| 230 | %define r%1w %4 |
| 231 | %define r%1b %5 |
| 232 | %if %0 == 5 |
| 233 | %define r%1m %3 |
| 234 | %define r%1mp %2 |
| 235 | %elif ARCH_X86_64 ; memory |
| 236 | %define r%1m [rsp + stack_offset + %6] |
| 237 | %define r%1mp qword r %+ %1m |
| 238 | %else |
| 239 | %define r%1m [esp + stack_offset + %6] |
| 240 | %define r%1mp dword r %+ %1m |
| 241 | %endif |
| 242 | %define r%1 %2 |
| 243 | %endmacro |
| 244 | |
| 245 | %macro DECLARE_REG_SIZE 2 |
| 246 | %define r%1q r%1 |
| 247 | %define e%1q r%1 |
| 248 | %define r%1d e%1 |
| 249 | %define e%1d e%1 |
| 250 | %define r%1w %1 |
| 251 | %define e%1w %1 |
| 252 | %define r%1b %2 |
| 253 | %define e%1b %2 |
| 254 | %if ARCH_X86_64 == 0 |
| 255 | %define r%1 e%1 |
| 256 | %endif |
| 257 | %endmacro |
| 258 | |
| 259 | DECLARE_REG_SIZE ax, al |
| 260 | DECLARE_REG_SIZE bx, bl |
| 261 | DECLARE_REG_SIZE cx, cl |
| 262 | DECLARE_REG_SIZE dx, dl |
| 263 | DECLARE_REG_SIZE si, sil |
| 264 | DECLARE_REG_SIZE di, dil |
| 265 | DECLARE_REG_SIZE bp, bpl |
| 266 | |
| 267 | ; t# defines for when per-arch register allocation is more complex than just function arguments |
| 268 | |
| 269 | %macro DECLARE_REG_TMP 1-* |
| 270 | %assign %%i 0 |
| 271 | %rep %0 |
| 272 | CAT_XDEFINE t, %%i, r%1 |
| 273 | %assign %%i %%i+1 |
| 274 | %rotate 1 |
| 275 | %endrep |
| 276 | %endmacro |
| 277 | |
| 278 | %macro DECLARE_REG_TMP_SIZE 0-* |
| 279 | %rep %0 |
| 280 | %define t%1q t%1 %+ q |
| 281 | %define t%1d t%1 %+ d |
| 282 | %define t%1w t%1 %+ w |
| 283 | %define t%1b t%1 %+ b |
| 284 | %rotate 1 |
| 285 | %endrep |
| 286 | %endmacro |
| 287 | |
| 288 | DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 |
| 289 | |
| 290 | %if ARCH_X86_64 |
| 291 | %define gprsize 8 |
| 292 | %else |
| 293 | %define gprsize 4 |
| 294 | %endif |
| 295 | |
| 296 | %macro PUSH 1 |
| 297 | push %1 |
| 298 | %assign stack_offset stack_offset+gprsize |
| 299 | %endmacro |
| 300 | |
| 301 | %macro POP 1 |
| 302 | pop %1 |
| 303 | %assign stack_offset stack_offset-gprsize |
| 304 | %endmacro |
| 305 | |
| 306 | %macro PUSH_IF_USED 1-* |
| 307 | %rep %0 |
| 308 | %if %1 < regs_used |
| 309 | PUSH r%1 |
| 310 | %endif |
| 311 | %rotate 1 |
| 312 | %endrep |
| 313 | %endmacro |
| 314 | |
| 315 | %macro POP_IF_USED 1-* |
| 316 | %rep %0 |
| 317 | %if %1 < regs_used |
| 318 | pop r%1 |
| 319 | %endif |
| 320 | %rotate 1 |
| 321 | %endrep |
| 322 | %endmacro |
| 323 | |
| 324 | %macro LOAD_IF_USED 1-* |
| 325 | %rep %0 |
| 326 | %if %1 < num_args |
| 327 | mov r%1, r %+ %1 %+ mp |
| 328 | %endif |
| 329 | %rotate 1 |
| 330 | %endrep |
| 331 | %endmacro |
| 332 | |
| 333 | %macro SUB 2 |
| 334 | sub %1, %2 |
| 335 | %ifidn %1, rsp |
| 336 | %assign stack_offset stack_offset+(%2) |
| 337 | %endif |
| 338 | %endmacro |
| 339 | |
| 340 | %macro ADD 2 |
| 341 | add %1, %2 |
| 342 | %ifidn %1, rsp |
| 343 | %assign stack_offset stack_offset-(%2) |
| 344 | %endif |
| 345 | %endmacro |
| 346 | |
| 347 | %macro movifnidn 2 |
| 348 | %ifnidn %1, %2 |
| 349 | mov %1, %2 |
| 350 | %endif |
| 351 | %endmacro |
| 352 | |
| 353 | %macro movsxdifnidn 2 |
| 354 | %ifnidn %1, %2 |
| 355 | movsxd %1, %2 |
| 356 | %endif |
| 357 | %endmacro |
| 358 | |
| 359 | %macro ASSERT 1 |
| 360 | %if (%1) == 0 |
| 361 | %error assert failed |
| 362 | %endif |
| 363 | %endmacro |
| 364 | |
| 365 | %macro DEFINE_ARGS 0-* |
| 366 | %ifdef n_arg_names |
| 367 | %assign %%i 0 |
| 368 | %rep n_arg_names |
| 369 | CAT_UNDEF arg_name %+ %%i, q |
| 370 | CAT_UNDEF arg_name %+ %%i, d |
| 371 | CAT_UNDEF arg_name %+ %%i, w |
| 372 | CAT_UNDEF arg_name %+ %%i, b |
| 373 | CAT_UNDEF arg_name %+ %%i, m |
| 374 | CAT_UNDEF arg_name %+ %%i, mp |
| 375 | CAT_UNDEF arg_name, %%i |
| 376 | %assign %%i %%i+1 |
| 377 | %endrep |
| 378 | %endif |
| 379 | |
| 380 | %xdefine %%stack_offset stack_offset |
| 381 | %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine |
| 382 | %assign %%i 0 |
| 383 | %rep %0 |
| 384 | %xdefine %1q r %+ %%i %+ q |
| 385 | %xdefine %1d r %+ %%i %+ d |
| 386 | %xdefine %1w r %+ %%i %+ w |
| 387 | %xdefine %1b r %+ %%i %+ b |
| 388 | %xdefine %1m r %+ %%i %+ m |
| 389 | %xdefine %1mp r %+ %%i %+ mp |
| 390 | CAT_XDEFINE arg_name, %%i, %1 |
| 391 | %assign %%i %%i+1 |
| 392 | %rotate 1 |
| 393 | %endrep |
| 394 | %xdefine stack_offset %%stack_offset |
| 395 | %assign n_arg_names %0 |
| 396 | %endmacro |
| 397 | |
| 398 | %if WIN64 ; Windows x64 ;================================================= |
| 399 | |
| 400 | DECLARE_REG 0, rcx, ecx, cx, cl |
| 401 | DECLARE_REG 1, rdx, edx, dx, dl |
| 402 | DECLARE_REG 2, R8, R8D, R8W, R8B |
| 403 | DECLARE_REG 3, R9, R9D, R9W, R9B |
| 404 | DECLARE_REG 4, R10, R10D, R10W, R10B, 40 |
| 405 | DECLARE_REG 5, R11, R11D, R11W, R11B, 48 |
| 406 | DECLARE_REG 6, rax, eax, ax, al, 56 |
| 407 | DECLARE_REG 7, rdi, edi, di, dil, 64 |
| 408 | DECLARE_REG 8, rsi, esi, si, sil, 72 |
| 409 | DECLARE_REG 9, rbx, ebx, bx, bl, 80 |
| 410 | DECLARE_REG 10, rbp, ebp, bp, bpl, 88 |
| 411 | DECLARE_REG 11, R12, R12D, R12W, R12B, 96 |
| 412 | DECLARE_REG 12, R13, R13D, R13W, R13B, 104 |
| 413 | DECLARE_REG 13, R14, R14D, R14W, R14B, 112 |
| 414 | DECLARE_REG 14, R15, R15D, R15W, R15B, 120 |
| 415 | |
| 416 | %macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names... |
| 417 | %assign num_args %1 |
| 418 | %assign regs_used %2 |
| 419 | ASSERT regs_used >= num_args |
| 420 | ASSERT regs_used <= 15 |
| 421 | PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14 |
| 422 | %if mmsize == 8 |
| 423 | %assign xmm_regs_used 0 |
| 424 | %else |
| 425 | WIN64_SPILL_XMM %3 |
| 426 | %endif |
| 427 | LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 |
| 428 | DEFINE_ARGS %4 |
| 429 | %endmacro |
| 430 | |
| 431 | %macro WIN64_SPILL_XMM 1 |
| 432 | %assign xmm_regs_used %1 |
| 433 | ASSERT xmm_regs_used <= 16 |
| 434 | %if xmm_regs_used > 6 |
| 435 | SUB rsp, (xmm_regs_used-6)*16+16 |
| 436 | %assign %%i xmm_regs_used |
| 437 | %rep (xmm_regs_used-6) |
| 438 | %assign %%i %%i-1 |
| 439 | movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i |
| 440 | %endrep |
| 441 | %endif |
| 442 | %endmacro |
| 443 | |
| 444 | %macro WIN64_RESTORE_XMM_INTERNAL 1 |
| 445 | %if xmm_regs_used > 6 |
| 446 | %assign %%i xmm_regs_used |
| 447 | %rep (xmm_regs_used-6) |
| 448 | %assign %%i %%i-1 |
| 449 | movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)] |
| 450 | %endrep |
| 451 | add %1, (xmm_regs_used-6)*16+16 |
| 452 | %endif |
| 453 | %endmacro |
| 454 | |
| 455 | %macro WIN64_RESTORE_XMM 1 |
| 456 | WIN64_RESTORE_XMM_INTERNAL %1 |
| 457 | %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16 |
| 458 | %assign xmm_regs_used 0 |
| 459 | %endmacro |
| 460 | |
| 461 | %macro RET 0 |
| 462 | WIN64_RESTORE_XMM_INTERNAL rsp |
| 463 | POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7 |
| 464 | ret |
| 465 | %endmacro |
| 466 | |
| 467 | %macro REP_RET 0 |
| 468 | %if regs_used > 7 || xmm_regs_used > 6 |
| 469 | RET |
| 470 | %else |
| 471 | rep ret |
| 472 | %endif |
| 473 | %endmacro |
| 474 | |
| 475 | %elif ARCH_X86_64 ; *nix x64 ;============================================= |
| 476 | |
| 477 | DECLARE_REG 0, rdi, edi, di, dil |
| 478 | DECLARE_REG 1, rsi, esi, si, sil |
| 479 | DECLARE_REG 2, rdx, edx, dx, dl |
| 480 | DECLARE_REG 3, rcx, ecx, cx, cl |
| 481 | DECLARE_REG 4, R8, R8D, R8W, R8B |
| 482 | DECLARE_REG 5, R9, R9D, R9W, R9B |
| 483 | DECLARE_REG 6, rax, eax, ax, al, 8 |
| 484 | DECLARE_REG 7, R10, R10D, R10W, R10B, 16 |
| 485 | DECLARE_REG 8, R11, R11D, R11W, R11B, 24 |
| 486 | DECLARE_REG 9, rbx, ebx, bx, bl, 32 |
| 487 | DECLARE_REG 10, rbp, ebp, bp, bpl, 40 |
| 488 | DECLARE_REG 11, R12, R12D, R12W, R12B, 48 |
| 489 | DECLARE_REG 12, R13, R13D, R13W, R13B, 56 |
| 490 | DECLARE_REG 13, R14, R14D, R14W, R14B, 64 |
| 491 | DECLARE_REG 14, R15, R15D, R15W, R15B, 72 |
| 492 | |
| 493 | %macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names... |
| 494 | %assign num_args %1 |
| 495 | %assign regs_used %2 |
| 496 | ASSERT regs_used >= num_args |
| 497 | ASSERT regs_used <= 15 |
| 498 | PUSH_IF_USED 9, 10, 11, 12, 13, 14 |
| 499 | LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14 |
| 500 | DEFINE_ARGS %4 |
| 501 | %endmacro |
| 502 | |
| 503 | %macro RET 0 |
| 504 | POP_IF_USED 14, 13, 12, 11, 10, 9 |
| 505 | ret |
| 506 | %endmacro |
| 507 | |
| 508 | %macro REP_RET 0 |
| 509 | %if regs_used > 9 |
| 510 | RET |
| 511 | %else |
| 512 | rep ret |
| 513 | %endif |
| 514 | %endmacro |
| 515 | |
| 516 | %else ; X86_32 ;============================================================== |
| 517 | |
| 518 | DECLARE_REG 0, eax, eax, ax, al, 4 |
| 519 | DECLARE_REG 1, ecx, ecx, cx, cl, 8 |
| 520 | DECLARE_REG 2, edx, edx, dx, dl, 12 |
| 521 | DECLARE_REG 3, ebx, ebx, bx, bl, 16 |
| 522 | DECLARE_REG 4, esi, esi, si, null, 20 |
| 523 | DECLARE_REG 5, edi, edi, di, null, 24 |
| 524 | DECLARE_REG 6, ebp, ebp, bp, null, 28 |
| 525 | %define rsp esp |
| 526 | |
| 527 | %macro DECLARE_ARG 1-* |
| 528 | %rep %0 |
| 529 | %define r%1m [esp + stack_offset + 4*%1 + 4] |
| 530 | %define r%1mp dword r%1m |
| 531 | %rotate 1 |
| 532 | %endrep |
| 533 | %endmacro |
| 534 | |
| 535 | DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 |
| 536 | |
| 537 | %macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names... |
| 538 | %assign num_args %1 |
| 539 | %assign regs_used %2 |
| 540 | %if regs_used > 7 |
| 541 | %assign regs_used 7 |
| 542 | %endif |
| 543 | ASSERT regs_used >= num_args |
| 544 | PUSH_IF_USED 3, 4, 5, 6 |
| 545 | LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6 |
| 546 | DEFINE_ARGS %4 |
| 547 | %endmacro |
| 548 | |
| 549 | %macro RET 0 |
| 550 | POP_IF_USED 6, 5, 4, 3 |
| 551 | ret |
| 552 | %endmacro |
| 553 | |
| 554 | %macro REP_RET 0 |
| 555 | %if regs_used > 3 |
| 556 | RET |
| 557 | %else |
| 558 | rep ret |
| 559 | %endif |
| 560 | %endmacro |
| 561 | |
| 562 | %endif ;====================================================================== |
| 563 | |
| 564 | %if WIN64 == 0 |
| 565 | %macro WIN64_SPILL_XMM 1 |
| 566 | %endmacro |
| 567 | %macro WIN64_RESTORE_XMM 1 |
| 568 | %endmacro |
| 569 | %endif |
| 570 | |
| 571 | ;============================================================================= |
| 572 | ; arch-independent part |
| 573 | ;============================================================================= |
| 574 | |
| 575 | %assign function_align 16 |
| 576 | |
| 577 | ; Begin a function. |
| 578 | ; Applies any symbol mangling needed for C linkage, and sets up a define such that |
| 579 | ; subsequent uses of the function name automatically refer to the mangled version. |
| 580 | ; Appends cpuflags to the function name if cpuflags has been specified. |
| 581 | %macro cglobal 1-2+ ; name, [PROLOGUE args] |
| 582 | %if %0 == 1 |
| 583 | cglobal_internal %1 %+ SUFFIX |
| 584 | %else |
| 585 | cglobal_internal %1 %+ SUFFIX, %2 |
| 586 | %endif |
| 587 | %endmacro |
| 588 | %macro cglobal_internal 1-2+ |
| 589 | %ifndef cglobaled_%1 |
| 590 | %xdefine %1 mangle(program_name %+ _ %+ %1) |
| 591 | %xdefine %1.skip_prologue %1 %+ .skip_prologue |
| 592 | CAT_XDEFINE cglobaled_, %1, 1 |
| 593 | %endif |
| 594 | %xdefine current_function %1 |
Johann | 874f7f0 | 2014-05-01 17:43:34 -0700 | [diff] [blame^] | 595 | %ifdef CHROMIUM |
| 596 | %ifidn __OUTPUT_FORMAT__,elf |
| 597 | global %1:function hidden |
| 598 | %elifidn __OUTPUT_FORMAT__,elf32 |
| 599 | global %1:function hidden |
| 600 | %elifidn __OUTPUT_FORMAT__,elf64 |
| 601 | global %1:function hidden |
| 602 | %elifidn __OUTPUT_FORMAT__,macho32 |
| 603 | global %1:private_extern |
| 604 | %elifidn __OUTPUT_FORMAT__,macho64 |
| 605 | global %1:private_extern |
| 606 | %else |
| 607 | global %1 |
| 608 | %endif |
Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 609 | %else |
| 610 | global %1 |
| 611 | %endif |
| 612 | align function_align |
| 613 | %1: |
| 614 | RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer |
| 615 | %assign stack_offset 0 |
| 616 | %if %0 > 1 |
| 617 | PROLOGUE %2 |
| 618 | %endif |
| 619 | %endmacro |
| 620 | |
| 621 | %macro cextern 1 |
| 622 | %xdefine %1 mangle(program_name %+ _ %+ %1) |
| 623 | CAT_XDEFINE cglobaled_, %1, 1 |
| 624 | extern %1 |
| 625 | %endmacro |
| 626 | |
| 627 | ; like cextern, but without the prefix |
| 628 | %macro cextern_naked 1 |
| 629 | %xdefine %1 mangle(%1) |
| 630 | CAT_XDEFINE cglobaled_, %1, 1 |
| 631 | extern %1 |
| 632 | %endmacro |
| 633 | |
| 634 | %macro const 2+ |
| 635 | %xdefine %1 mangle(program_name %+ _ %+ %1) |
| 636 | global %1 |
| 637 | %1: %2 |
| 638 | %endmacro |
| 639 | |
| 640 | ; This is needed for ELF, otherwise the GNU linker assumes the stack is |
| 641 | ; executable by default. |
| 642 | %ifidn __OUTPUT_FORMAT__,elf |
| 643 | SECTION .note.GNU-stack noalloc noexec nowrite progbits |
Ronald S. Bultje | 5ea9e2a | 2012-06-20 09:48:54 -0700 | [diff] [blame] | 644 | %elifidn __OUTPUT_FORMAT__,elf32 |
| 645 | SECTION .note.GNU-stack noalloc noexec nowrite progbits |
| 646 | %elifidn __OUTPUT_FORMAT__,elf64 |
| 647 | SECTION .note.GNU-stack noalloc noexec nowrite progbits |
Daniel Kang | 7a00071 | 2012-06-07 17:25:54 -0700 | [diff] [blame] | 648 | %endif |
| 649 | |
| 650 | ; cpuflags |
| 651 | |
| 652 | %assign cpuflags_mmx (1<<0) |
| 653 | %assign cpuflags_mmx2 (1<<1) | cpuflags_mmx |
| 654 | %assign cpuflags_3dnow (1<<2) | cpuflags_mmx |
| 655 | %assign cpuflags_3dnow2 (1<<3) | cpuflags_3dnow |
| 656 | %assign cpuflags_sse (1<<4) | cpuflags_mmx2 |
| 657 | %assign cpuflags_sse2 (1<<5) | cpuflags_sse |
| 658 | %assign cpuflags_sse2slow (1<<6) | cpuflags_sse2 |
| 659 | %assign cpuflags_sse3 (1<<7) | cpuflags_sse2 |
| 660 | %assign cpuflags_ssse3 (1<<8) | cpuflags_sse3 |
| 661 | %assign cpuflags_sse4 (1<<9) | cpuflags_ssse3 |
| 662 | %assign cpuflags_sse42 (1<<10)| cpuflags_sse4 |
| 663 | %assign cpuflags_avx (1<<11)| cpuflags_sse42 |
| 664 | %assign cpuflags_xop (1<<12)| cpuflags_avx |
| 665 | %assign cpuflags_fma4 (1<<13)| cpuflags_avx |
| 666 | |
| 667 | %assign cpuflags_cache32 (1<<16) |
| 668 | %assign cpuflags_cache64 (1<<17) |
| 669 | %assign cpuflags_slowctz (1<<18) |
| 670 | %assign cpuflags_lzcnt (1<<19) |
| 671 | %assign cpuflags_misalign (1<<20) |
| 672 | %assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant |
| 673 | %assign cpuflags_atom (1<<22) |
| 674 | |
| 675 | %define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x)) |
| 676 | %define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x)) |
| 677 | |
| 678 | ; Takes up to 2 cpuflags from the above list. |
| 679 | ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu. |
| 680 | ; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co. |
| 681 | %macro INIT_CPUFLAGS 0-2 |
| 682 | %if %0 >= 1 |
| 683 | %xdefine cpuname %1 |
| 684 | %assign cpuflags cpuflags_%1 |
| 685 | %if %0 >= 2 |
| 686 | %xdefine cpuname %1_%2 |
| 687 | %assign cpuflags cpuflags | cpuflags_%2 |
| 688 | %endif |
| 689 | %xdefine SUFFIX _ %+ cpuname |
| 690 | %if cpuflag(avx) |
| 691 | %assign avx_enabled 1 |
| 692 | %endif |
| 693 | %if mmsize == 16 && notcpuflag(sse2) |
| 694 | %define mova movaps |
| 695 | %define movu movups |
| 696 | %define movnta movntps |
| 697 | %endif |
| 698 | %if cpuflag(aligned) |
| 699 | %define movu mova |
| 700 | %elifidn %1, sse3 |
| 701 | %define movu lddqu |
| 702 | %endif |
| 703 | %else |
| 704 | %xdefine SUFFIX |
| 705 | %undef cpuname |
| 706 | %undef cpuflags |
| 707 | %endif |
| 708 | %endmacro |
| 709 | |
| 710 | ; merge mmx and sse* |
| 711 | |
| 712 | %macro CAT_XDEFINE 3 |
| 713 | %xdefine %1%2 %3 |
| 714 | %endmacro |
| 715 | |
| 716 | %macro CAT_UNDEF 2 |
| 717 | %undef %1%2 |
| 718 | %endmacro |
| 719 | |
| 720 | %macro INIT_MMX 0-1+ |
| 721 | %assign avx_enabled 0 |
| 722 | %define RESET_MM_PERMUTATION INIT_MMX %1 |
| 723 | %define mmsize 8 |
| 724 | %define num_mmregs 8 |
| 725 | %define mova movq |
| 726 | %define movu movq |
| 727 | %define movh movd |
| 728 | %define movnta movntq |
| 729 | %assign %%i 0 |
| 730 | %rep 8 |
| 731 | CAT_XDEFINE m, %%i, mm %+ %%i |
| 732 | CAT_XDEFINE nmm, %%i, %%i |
| 733 | %assign %%i %%i+1 |
| 734 | %endrep |
| 735 | %rep 8 |
| 736 | CAT_UNDEF m, %%i |
| 737 | CAT_UNDEF nmm, %%i |
| 738 | %assign %%i %%i+1 |
| 739 | %endrep |
| 740 | INIT_CPUFLAGS %1 |
| 741 | %endmacro |
| 742 | |
| 743 | %macro INIT_XMM 0-1+ |
| 744 | %assign avx_enabled 0 |
| 745 | %define RESET_MM_PERMUTATION INIT_XMM %1 |
| 746 | %define mmsize 16 |
| 747 | %define num_mmregs 8 |
| 748 | %if ARCH_X86_64 |
| 749 | %define num_mmregs 16 |
| 750 | %endif |
| 751 | %define mova movdqa |
| 752 | %define movu movdqu |
| 753 | %define movh movq |
| 754 | %define movnta movntdq |
| 755 | %assign %%i 0 |
| 756 | %rep num_mmregs |
| 757 | CAT_XDEFINE m, %%i, xmm %+ %%i |
| 758 | CAT_XDEFINE nxmm, %%i, %%i |
| 759 | %assign %%i %%i+1 |
| 760 | %endrep |
| 761 | INIT_CPUFLAGS %1 |
| 762 | %endmacro |
| 763 | |
| 764 | ; FIXME: INIT_AVX can be replaced by INIT_XMM avx |
| 765 | %macro INIT_AVX 0 |
| 766 | INIT_XMM |
| 767 | %assign avx_enabled 1 |
| 768 | %define PALIGNR PALIGNR_SSSE3 |
| 769 | %define RESET_MM_PERMUTATION INIT_AVX |
| 770 | %endmacro |
| 771 | |
| 772 | %macro INIT_YMM 0-1+ |
| 773 | %assign avx_enabled 1 |
| 774 | %define RESET_MM_PERMUTATION INIT_YMM %1 |
| 775 | %define mmsize 32 |
| 776 | %define num_mmregs 8 |
| 777 | %if ARCH_X86_64 |
| 778 | %define num_mmregs 16 |
| 779 | %endif |
| 780 | %define mova vmovaps |
| 781 | %define movu vmovups |
| 782 | %undef movh |
| 783 | %define movnta vmovntps |
| 784 | %assign %%i 0 |
| 785 | %rep num_mmregs |
| 786 | CAT_XDEFINE m, %%i, ymm %+ %%i |
| 787 | CAT_XDEFINE nymm, %%i, %%i |
| 788 | %assign %%i %%i+1 |
| 789 | %endrep |
| 790 | INIT_CPUFLAGS %1 |
| 791 | %endmacro |
| 792 | |
| 793 | INIT_XMM |
| 794 | |
| 795 | ; I often want to use macros that permute their arguments. e.g. there's no |
| 796 | ; efficient way to implement butterfly or transpose or dct without swapping some |
| 797 | ; arguments. |
| 798 | ; |
| 799 | ; I would like to not have to manually keep track of the permutations: |
| 800 | ; If I insert a permutation in the middle of a function, it should automatically |
| 801 | ; change everything that follows. For more complex macros I may also have multiple |
| 802 | ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations. |
| 803 | ; |
| 804 | ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that |
| 805 | ; permutes its arguments. It's equivalent to exchanging the contents of the |
| 806 | ; registers, except that this way you exchange the register names instead, so it |
| 807 | ; doesn't cost any cycles. |
| 808 | |
| 809 | %macro PERMUTE 2-* ; takes a list of pairs to swap |
| 810 | %rep %0/2 |
| 811 | %xdefine tmp%2 m%2 |
| 812 | %xdefine ntmp%2 nm%2 |
| 813 | %rotate 2 |
| 814 | %endrep |
| 815 | %rep %0/2 |
| 816 | %xdefine m%1 tmp%2 |
| 817 | %xdefine nm%1 ntmp%2 |
| 818 | %undef tmp%2 |
| 819 | %undef ntmp%2 |
| 820 | %rotate 2 |
| 821 | %endrep |
| 822 | %endmacro |
| 823 | |
| 824 | %macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs) |
| 825 | %rep %0-1 |
| 826 | %ifdef m%1 |
| 827 | %xdefine tmp m%1 |
| 828 | %xdefine m%1 m%2 |
| 829 | %xdefine m%2 tmp |
| 830 | CAT_XDEFINE n, m%1, %1 |
| 831 | CAT_XDEFINE n, m%2, %2 |
| 832 | %else |
| 833 | ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here. |
| 834 | ; Be careful using this mode in nested macros though, as in some cases there may be |
| 835 | ; other copies of m# that have already been dereferenced and don't get updated correctly. |
| 836 | %xdefine %%n1 n %+ %1 |
| 837 | %xdefine %%n2 n %+ %2 |
| 838 | %xdefine tmp m %+ %%n1 |
| 839 | CAT_XDEFINE m, %%n1, m %+ %%n2 |
| 840 | CAT_XDEFINE m, %%n2, tmp |
| 841 | CAT_XDEFINE n, m %+ %%n1, %%n1 |
| 842 | CAT_XDEFINE n, m %+ %%n2, %%n2 |
| 843 | %endif |
| 844 | %undef tmp |
| 845 | %rotate 1 |
| 846 | %endrep |
| 847 | %endmacro |
| 848 | |
| 849 | ; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later |
| 850 | ; calls to that function will automatically load the permutation, so values can |
| 851 | ; be returned in mmregs. |
| 852 | %macro SAVE_MM_PERMUTATION 0-1 |
| 853 | %if %0 |
| 854 | %xdefine %%f %1_m |
| 855 | %else |
| 856 | %xdefine %%f current_function %+ _m |
| 857 | %endif |
| 858 | %assign %%i 0 |
| 859 | %rep num_mmregs |
| 860 | CAT_XDEFINE %%f, %%i, m %+ %%i |
| 861 | %assign %%i %%i+1 |
| 862 | %endrep |
| 863 | %endmacro |
| 864 | |
| 865 | %macro LOAD_MM_PERMUTATION 1 ; name to load from |
| 866 | %ifdef %1_m0 |
| 867 | %assign %%i 0 |
| 868 | %rep num_mmregs |
| 869 | CAT_XDEFINE m, %%i, %1_m %+ %%i |
| 870 | CAT_XDEFINE n, m %+ %%i, %%i |
| 871 | %assign %%i %%i+1 |
| 872 | %endrep |
| 873 | %endif |
| 874 | %endmacro |
| 875 | |
| 876 | ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't |
| 877 | %macro call 1 |
| 878 | call_internal %1, %1 %+ SUFFIX |
| 879 | %endmacro |
| 880 | %macro call_internal 2 |
| 881 | %xdefine %%i %1 |
| 882 | %ifndef cglobaled_%1 |
| 883 | %ifdef cglobaled_%2 |
| 884 | %xdefine %%i %2 |
| 885 | %endif |
| 886 | %endif |
| 887 | call %%i |
| 888 | LOAD_MM_PERMUTATION %%i |
| 889 | %endmacro |
| 890 | |
| 891 | ; Substitutions that reduce instruction size but are functionally equivalent |
| 892 | %macro add 2 |
| 893 | %ifnum %2 |
| 894 | %if %2==128 |
| 895 | sub %1, -128 |
| 896 | %else |
| 897 | add %1, %2 |
| 898 | %endif |
| 899 | %else |
| 900 | add %1, %2 |
| 901 | %endif |
| 902 | %endmacro |
| 903 | |
| 904 | %macro sub 2 |
| 905 | %ifnum %2 |
| 906 | %if %2==128 |
| 907 | add %1, -128 |
| 908 | %else |
| 909 | sub %1, %2 |
| 910 | %endif |
| 911 | %else |
| 912 | sub %1, %2 |
| 913 | %endif |
| 914 | %endmacro |
| 915 | |
| 916 | ;============================================================================= |
| 917 | ; AVX abstraction layer |
| 918 | ;============================================================================= |
| 919 | |
| 920 | %assign i 0 |
| 921 | %rep 16 |
| 922 | %if i < 8 |
| 923 | CAT_XDEFINE sizeofmm, i, 8 |
| 924 | %endif |
| 925 | CAT_XDEFINE sizeofxmm, i, 16 |
| 926 | CAT_XDEFINE sizeofymm, i, 32 |
| 927 | %assign i i+1 |
| 928 | %endrep |
| 929 | %undef i |
| 930 | |
| 931 | ;%1 == instruction |
| 932 | ;%2 == 1 if float, 0 if int |
| 933 | ;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm) |
| 934 | ;%4 == number of operands given |
| 935 | ;%5+: operands |
| 936 | %macro RUN_AVX_INSTR 6-7+ |
| 937 | %ifid %5 |
| 938 | %define %%size sizeof%5 |
| 939 | %else |
| 940 | %define %%size mmsize |
| 941 | %endif |
| 942 | %if %%size==32 |
| 943 | %if %0 >= 7 |
| 944 | v%1 %5, %6, %7 |
| 945 | %else |
| 946 | v%1 %5, %6 |
| 947 | %endif |
| 948 | %else |
| 949 | %if %%size==8 |
| 950 | %define %%regmov movq |
| 951 | %elif %2 |
| 952 | %define %%regmov movaps |
| 953 | %else |
| 954 | %define %%regmov movdqa |
| 955 | %endif |
| 956 | |
| 957 | %if %4>=3+%3 |
| 958 | %ifnidn %5, %6 |
| 959 | %if avx_enabled && sizeof%5==16 |
| 960 | v%1 %5, %6, %7 |
| 961 | %else |
| 962 | %%regmov %5, %6 |
| 963 | %1 %5, %7 |
| 964 | %endif |
| 965 | %else |
| 966 | %1 %5, %7 |
| 967 | %endif |
| 968 | %elif %3 |
| 969 | %1 %5, %6, %7 |
| 970 | %else |
| 971 | %1 %5, %6 |
| 972 | %endif |
| 973 | %endif |
| 974 | %endmacro |
| 975 | |
| 976 | ; 3arg AVX ops with a memory arg can only have it in src2, |
| 977 | ; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov). |
| 978 | ; So, if the op is symmetric and the wrong one is memory, swap them. |
| 979 | %macro RUN_AVX_INSTR1 8 |
| 980 | %assign %%swap 0 |
| 981 | %if avx_enabled |
| 982 | %ifnid %6 |
| 983 | %assign %%swap 1 |
| 984 | %endif |
| 985 | %elifnidn %5, %6 |
| 986 | %ifnid %7 |
| 987 | %assign %%swap 1 |
| 988 | %endif |
| 989 | %endif |
| 990 | %if %%swap && %3 == 0 && %8 == 1 |
| 991 | RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6 |
| 992 | %else |
| 993 | RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7 |
| 994 | %endif |
| 995 | %endmacro |
| 996 | |
| 997 | ;%1 == instruction |
| 998 | ;%2 == 1 if float, 0 if int |
| 999 | ;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 3-operand (xmm, xmm, xmm) |
| 1000 | ;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not |
| 1001 | %macro AVX_INSTR 4 |
| 1002 | %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4 |
| 1003 | %ifidn %3, fnord |
| 1004 | RUN_AVX_INSTR %6, %7, %8, 2, %1, %2 |
| 1005 | %elifidn %4, fnord |
| 1006 | RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9 |
| 1007 | %elifidn %5, fnord |
| 1008 | RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4 |
| 1009 | %else |
| 1010 | RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5 |
| 1011 | %endif |
| 1012 | %endmacro |
| 1013 | %endmacro |
| 1014 | |
| 1015 | AVX_INSTR addpd, 1, 0, 1 |
| 1016 | AVX_INSTR addps, 1, 0, 1 |
| 1017 | AVX_INSTR addsd, 1, 0, 1 |
| 1018 | AVX_INSTR addss, 1, 0, 1 |
| 1019 | AVX_INSTR addsubpd, 1, 0, 0 |
| 1020 | AVX_INSTR addsubps, 1, 0, 0 |
| 1021 | AVX_INSTR andpd, 1, 0, 1 |
| 1022 | AVX_INSTR andps, 1, 0, 1 |
| 1023 | AVX_INSTR andnpd, 1, 0, 0 |
| 1024 | AVX_INSTR andnps, 1, 0, 0 |
| 1025 | AVX_INSTR blendpd, 1, 0, 0 |
| 1026 | AVX_INSTR blendps, 1, 0, 0 |
| 1027 | AVX_INSTR blendvpd, 1, 0, 0 |
| 1028 | AVX_INSTR blendvps, 1, 0, 0 |
| 1029 | AVX_INSTR cmppd, 1, 0, 0 |
| 1030 | AVX_INSTR cmpps, 1, 0, 0 |
| 1031 | AVX_INSTR cmpsd, 1, 0, 0 |
| 1032 | AVX_INSTR cmpss, 1, 0, 0 |
| 1033 | AVX_INSTR cvtdq2ps, 1, 0, 0 |
| 1034 | AVX_INSTR cvtps2dq, 1, 0, 0 |
| 1035 | AVX_INSTR divpd, 1, 0, 0 |
| 1036 | AVX_INSTR divps, 1, 0, 0 |
| 1037 | AVX_INSTR divsd, 1, 0, 0 |
| 1038 | AVX_INSTR divss, 1, 0, 0 |
| 1039 | AVX_INSTR dppd, 1, 1, 0 |
| 1040 | AVX_INSTR dpps, 1, 1, 0 |
| 1041 | AVX_INSTR haddpd, 1, 0, 0 |
| 1042 | AVX_INSTR haddps, 1, 0, 0 |
| 1043 | AVX_INSTR hsubpd, 1, 0, 0 |
| 1044 | AVX_INSTR hsubps, 1, 0, 0 |
| 1045 | AVX_INSTR maxpd, 1, 0, 1 |
| 1046 | AVX_INSTR maxps, 1, 0, 1 |
| 1047 | AVX_INSTR maxsd, 1, 0, 1 |
| 1048 | AVX_INSTR maxss, 1, 0, 1 |
| 1049 | AVX_INSTR minpd, 1, 0, 1 |
| 1050 | AVX_INSTR minps, 1, 0, 1 |
| 1051 | AVX_INSTR minsd, 1, 0, 1 |
| 1052 | AVX_INSTR minss, 1, 0, 1 |
| 1053 | AVX_INSTR movhlps, 1, 0, 0 |
| 1054 | AVX_INSTR movlhps, 1, 0, 0 |
| 1055 | AVX_INSTR movsd, 1, 0, 0 |
| 1056 | AVX_INSTR movss, 1, 0, 0 |
| 1057 | AVX_INSTR mpsadbw, 0, 1, 0 |
| 1058 | AVX_INSTR mulpd, 1, 0, 1 |
| 1059 | AVX_INSTR mulps, 1, 0, 1 |
| 1060 | AVX_INSTR mulsd, 1, 0, 1 |
| 1061 | AVX_INSTR mulss, 1, 0, 1 |
| 1062 | AVX_INSTR orpd, 1, 0, 1 |
| 1063 | AVX_INSTR orps, 1, 0, 1 |
| 1064 | AVX_INSTR packsswb, 0, 0, 0 |
| 1065 | AVX_INSTR packssdw, 0, 0, 0 |
| 1066 | AVX_INSTR packuswb, 0, 0, 0 |
| 1067 | AVX_INSTR packusdw, 0, 0, 0 |
| 1068 | AVX_INSTR paddb, 0, 0, 1 |
| 1069 | AVX_INSTR paddw, 0, 0, 1 |
| 1070 | AVX_INSTR paddd, 0, 0, 1 |
| 1071 | AVX_INSTR paddq, 0, 0, 1 |
| 1072 | AVX_INSTR paddsb, 0, 0, 1 |
| 1073 | AVX_INSTR paddsw, 0, 0, 1 |
| 1074 | AVX_INSTR paddusb, 0, 0, 1 |
| 1075 | AVX_INSTR paddusw, 0, 0, 1 |
| 1076 | AVX_INSTR palignr, 0, 1, 0 |
| 1077 | AVX_INSTR pand, 0, 0, 1 |
| 1078 | AVX_INSTR pandn, 0, 0, 0 |
| 1079 | AVX_INSTR pavgb, 0, 0, 1 |
| 1080 | AVX_INSTR pavgw, 0, 0, 1 |
| 1081 | AVX_INSTR pblendvb, 0, 0, 0 |
| 1082 | AVX_INSTR pblendw, 0, 1, 0 |
| 1083 | AVX_INSTR pcmpestri, 0, 0, 0 |
| 1084 | AVX_INSTR pcmpestrm, 0, 0, 0 |
| 1085 | AVX_INSTR pcmpistri, 0, 0, 0 |
| 1086 | AVX_INSTR pcmpistrm, 0, 0, 0 |
| 1087 | AVX_INSTR pcmpeqb, 0, 0, 1 |
| 1088 | AVX_INSTR pcmpeqw, 0, 0, 1 |
| 1089 | AVX_INSTR pcmpeqd, 0, 0, 1 |
| 1090 | AVX_INSTR pcmpeqq, 0, 0, 1 |
| 1091 | AVX_INSTR pcmpgtb, 0, 0, 0 |
| 1092 | AVX_INSTR pcmpgtw, 0, 0, 0 |
| 1093 | AVX_INSTR pcmpgtd, 0, 0, 0 |
| 1094 | AVX_INSTR pcmpgtq, 0, 0, 0 |
| 1095 | AVX_INSTR phaddw, 0, 0, 0 |
| 1096 | AVX_INSTR phaddd, 0, 0, 0 |
| 1097 | AVX_INSTR phaddsw, 0, 0, 0 |
| 1098 | AVX_INSTR phsubw, 0, 0, 0 |
| 1099 | AVX_INSTR phsubd, 0, 0, 0 |
| 1100 | AVX_INSTR phsubsw, 0, 0, 0 |
| 1101 | AVX_INSTR pmaddwd, 0, 0, 1 |
| 1102 | AVX_INSTR pmaddubsw, 0, 0, 0 |
| 1103 | AVX_INSTR pmaxsb, 0, 0, 1 |
| 1104 | AVX_INSTR pmaxsw, 0, 0, 1 |
| 1105 | AVX_INSTR pmaxsd, 0, 0, 1 |
| 1106 | AVX_INSTR pmaxub, 0, 0, 1 |
| 1107 | AVX_INSTR pmaxuw, 0, 0, 1 |
| 1108 | AVX_INSTR pmaxud, 0, 0, 1 |
| 1109 | AVX_INSTR pminsb, 0, 0, 1 |
| 1110 | AVX_INSTR pminsw, 0, 0, 1 |
| 1111 | AVX_INSTR pminsd, 0, 0, 1 |
| 1112 | AVX_INSTR pminub, 0, 0, 1 |
| 1113 | AVX_INSTR pminuw, 0, 0, 1 |
| 1114 | AVX_INSTR pminud, 0, 0, 1 |
| 1115 | AVX_INSTR pmulhuw, 0, 0, 1 |
| 1116 | AVX_INSTR pmulhrsw, 0, 0, 1 |
| 1117 | AVX_INSTR pmulhw, 0, 0, 1 |
| 1118 | AVX_INSTR pmullw, 0, 0, 1 |
| 1119 | AVX_INSTR pmulld, 0, 0, 1 |
| 1120 | AVX_INSTR pmuludq, 0, 0, 1 |
| 1121 | AVX_INSTR pmuldq, 0, 0, 1 |
| 1122 | AVX_INSTR por, 0, 0, 1 |
| 1123 | AVX_INSTR psadbw, 0, 0, 1 |
| 1124 | AVX_INSTR pshufb, 0, 0, 0 |
| 1125 | AVX_INSTR psignb, 0, 0, 0 |
| 1126 | AVX_INSTR psignw, 0, 0, 0 |
| 1127 | AVX_INSTR psignd, 0, 0, 0 |
| 1128 | AVX_INSTR psllw, 0, 0, 0 |
| 1129 | AVX_INSTR pslld, 0, 0, 0 |
| 1130 | AVX_INSTR psllq, 0, 0, 0 |
| 1131 | AVX_INSTR pslldq, 0, 0, 0 |
| 1132 | AVX_INSTR psraw, 0, 0, 0 |
| 1133 | AVX_INSTR psrad, 0, 0, 0 |
| 1134 | AVX_INSTR psrlw, 0, 0, 0 |
| 1135 | AVX_INSTR psrld, 0, 0, 0 |
| 1136 | AVX_INSTR psrlq, 0, 0, 0 |
| 1137 | AVX_INSTR psrldq, 0, 0, 0 |
| 1138 | AVX_INSTR psubb, 0, 0, 0 |
| 1139 | AVX_INSTR psubw, 0, 0, 0 |
| 1140 | AVX_INSTR psubd, 0, 0, 0 |
| 1141 | AVX_INSTR psubq, 0, 0, 0 |
| 1142 | AVX_INSTR psubsb, 0, 0, 0 |
| 1143 | AVX_INSTR psubsw, 0, 0, 0 |
| 1144 | AVX_INSTR psubusb, 0, 0, 0 |
| 1145 | AVX_INSTR psubusw, 0, 0, 0 |
| 1146 | AVX_INSTR punpckhbw, 0, 0, 0 |
| 1147 | AVX_INSTR punpckhwd, 0, 0, 0 |
| 1148 | AVX_INSTR punpckhdq, 0, 0, 0 |
| 1149 | AVX_INSTR punpckhqdq, 0, 0, 0 |
| 1150 | AVX_INSTR punpcklbw, 0, 0, 0 |
| 1151 | AVX_INSTR punpcklwd, 0, 0, 0 |
| 1152 | AVX_INSTR punpckldq, 0, 0, 0 |
| 1153 | AVX_INSTR punpcklqdq, 0, 0, 0 |
| 1154 | AVX_INSTR pxor, 0, 0, 1 |
| 1155 | AVX_INSTR shufps, 1, 1, 0 |
| 1156 | AVX_INSTR subpd, 1, 0, 0 |
| 1157 | AVX_INSTR subps, 1, 0, 0 |
| 1158 | AVX_INSTR subsd, 1, 0, 0 |
| 1159 | AVX_INSTR subss, 1, 0, 0 |
| 1160 | AVX_INSTR unpckhpd, 1, 0, 0 |
| 1161 | AVX_INSTR unpckhps, 1, 0, 0 |
| 1162 | AVX_INSTR unpcklpd, 1, 0, 0 |
| 1163 | AVX_INSTR unpcklps, 1, 0, 0 |
| 1164 | AVX_INSTR xorpd, 1, 0, 1 |
| 1165 | AVX_INSTR xorps, 1, 0, 1 |
| 1166 | |
| 1167 | ; 3DNow instructions, for sharing code between AVX, SSE and 3DN |
| 1168 | AVX_INSTR pfadd, 1, 0, 1 |
| 1169 | AVX_INSTR pfsub, 1, 0, 0 |
| 1170 | AVX_INSTR pfmul, 1, 0, 1 |
| 1171 | |
| 1172 | ; base-4 constants for shuffles |
| 1173 | %assign i 0 |
| 1174 | %rep 256 |
| 1175 | %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3) |
| 1176 | %if j < 10 |
| 1177 | CAT_XDEFINE q000, j, i |
| 1178 | %elif j < 100 |
| 1179 | CAT_XDEFINE q00, j, i |
| 1180 | %elif j < 1000 |
| 1181 | CAT_XDEFINE q0, j, i |
| 1182 | %else |
| 1183 | CAT_XDEFINE q, j, i |
| 1184 | %endif |
| 1185 | %assign i i+1 |
| 1186 | %endrep |
| 1187 | %undef i |
| 1188 | %undef j |
| 1189 | |
| 1190 | %macro FMA_INSTR 3 |
| 1191 | %macro %1 4-7 %1, %2, %3 |
| 1192 | %if cpuflag(xop) |
| 1193 | v%5 %1, %2, %3, %4 |
| 1194 | %else |
| 1195 | %6 %1, %2, %3 |
| 1196 | %7 %1, %4 |
| 1197 | %endif |
| 1198 | %endmacro |
| 1199 | %endmacro |
| 1200 | |
| 1201 | FMA_INSTR pmacsdd, pmulld, paddd |
| 1202 | FMA_INSTR pmacsww, pmullw, paddw |
| 1203 | FMA_INSTR pmadcswd, pmaddwd, paddd |