Lines Matching +full:- +full:8 +full:g

2 # Implement fast SHA-512 with AVX2 instructions. (x86_64)
22 # - Redistributions of source code must retain the above
26 # - Redistributions in binary form must reproduce the above
42 # This code is described in an Intel White-Paper:
43 # "Fast SHA-512 Implementations on Intel Architecture Processors"
91 g = %r10 define
101 XFER_SIZE = 4*8
102 SRND_SIZE = 1*8
103 INP_SIZE = 1*8
104 INPEND_SIZE = 1*8
105 CTX_SIZE = 1*8
118 # Add reg to mem using reg-mem add and store
146 h = g
147 g = f define
157 # YDST = {YSRC1, YSRC2} >> RVAL*8
160 vpalignr $\RVAL, \YSRC2, \YDST, \YDST # YDST = {YDS1, YS2} >> RVAL*8
166 # Extract w[t-7]
167 MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7]
168 # Calculate w[t-16] + w[t-7]
169 vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16]
170 # Extract w[t-15]
171 MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15]
175 # Calculate w[t-15] ror 1
177 vpsllq $(64-1), YTMP1, YTMP3
178 vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1
179 # Calculate w[t-15] shr 7
180 vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7
185 add frame_XFER(%rsp),h # h = k + w + h # --
191 xor g, y2 # y2 = f^g # CH
194 and e, y2 # y2 = (f^g)&e # CH
197 add h, d # d = k + w + h + d # --
203 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
208 add y0, y2 # y2 = S1 + CH # --
210 add y1, h # h = k + w + h + S0 # --
212 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
214 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
215 add y3, h # h = t1 + S0 + MAJ # --
221 # Calculate w[t-15] ror 8
222 vpsrlq $8, YTMP1, YTMP2
223 vpsllq $(64-8), YTMP1, YTMP1
224 vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8
226 vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7
230 # Add three components, w[t-16], w[t-7] and sigma0
231 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0
233 vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA}
235 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00}
240 vperm2f128 $0x11, Y_3, Y_3, YTMP2 # YTMP2 = W[-2] {BABA}
241 vpsrlq $6, YTMP2, YTMP4 # YTMP4 = W[-2] >> 6 {BABA}
247 add 1*8+frame_XFER(%rsp), h # h = k + w + h # --
254 xor g, y2 # y2 = f^g # CH
260 and e, y2 # y2 = (f^g)&e # CH
261 add h, d # d = k + w + h + d # --
267 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
272 add y0, y2 # y2 = S1 + CH # --
275 add y1, h # h = k + w + h + S0 # --
277 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
278 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
279 add y3, h # h = t1 + S0 + MAJ # --
286 vpsrlq $19, YTMP2, YTMP3 # YTMP3 = W[-2] >> 19 {BABA}
287 vpsllq $(64-19), YTMP2, YTMP1 # YTMP1 = W[-2] << 19 {BABA}
288 vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {BABA}
289 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA}
290 vpsrlq $61, YTMP2, YTMP3 # YTMP3 = W[-2] >> 61 {BABA}
291 vpsllq $(64-61), YTMP2, YTMP1 # YTMP1 = W[-2] << 61 {BABA}
292 vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {BABA}
293 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^
294 # (W[-2] ror 61) ^ (W[-2] >> 6) {BABA}
300 vpsrlq $6, Y_0, YTMP4 # YTMP4 = W[-2] >> 6 {DC--}
304 add 2*8+frame_XFER(%rsp), h # h = k + w + h # --
309 xor g, y2 # y2 = f^g # CH
313 and e, y2 # y2 = (f^g)&e # CH
316 add h, d # d = k + w + h + d # --
321 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
329 add y0, y2 # y2 = S1 + CH # --
332 add y1, h # h = k + w + h + S0 # --
333 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
334 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
336 add y3, h # h = t1 + S0 + MAJ # --
342 vpsrlq $19, Y_0, YTMP3 # YTMP3 = W[-2] >> 19 {DC--}
343 vpsllq $(64-19), Y_0, YTMP1 # YTMP1 = W[-2] << 19 {DC--}
344 vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {DC--}
345 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--}
346 vpsrlq $61, Y_0, YTMP3 # YTMP3 = W[-2] >> 61 {DC--}
347 vpsllq $(64-61), Y_0, YTMP1 # YTMP1 = W[-2] << 61 {DC--}
348 vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {DC--}
349 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^
350 # (W[-2] ror 61) ^ (W[-2] >> 6) {DC--}
352 # Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19]
354 vpaddq YTMP4, YTMP0, YTMP2 # YTMP2 = {W[3], W[2], --, --}
362 add 3*8+frame_XFER(%rsp), h # h = k + w + h # --
369 xor g, y2 # y2 = f^g # CH
373 and e, y2 # y2 = (f^g)&e # CH
374 add h, d # d = k + w + h + d # --
378 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
381 add y0, y2 # y2 = S1 + CH # --
384 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
393 add y1, h # h = k + w + h + S0 # --
394 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
395 add y3, h # h = t1 + S0 + MAJ # --
409 xor g, y2 # y2 = f^g # CH
413 and e, y2 # y2 = (f^g)&e # CH
417 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
423 add frame_XFER(%rsp), h # h = k + w + h # --
430 add y0, y2 # y2 = S1 + CH # --
432 add h, d # d = k + w + h + d # --
434 add y1, h # h = k + w + h + S0 # --
436 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
442 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
446 xor g, y2 # y2 = f^g # CH
450 and e, y2 # y2 = (f^g)&e # CH
451 add y3, old_h # h = t1 + S0 + MAJ # --
455 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
461 add 8*1+frame_XFER(%rsp), h # h = k + w + h # --
468 add y0, y2 # y2 = S1 + CH # --
470 add h, d # d = k + w + h + d # --
472 add y1, h # h = k + w + h + S0 # --
474 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
480 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
484 xor g, y2 # y2 = f^g # CH
488 and e, y2 # y2 = (f^g)&e # CH
489 add y3, old_h # h = t1 + S0 + MAJ # --
493 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
499 add 8*2+frame_XFER(%rsp), h # h = k + w + h # --
506 add y0, y2 # y2 = S1 + CH # --
508 add h, d # d = k + w + h + d # --
510 add y1, h # h = k + w + h + S0 # --
512 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
518 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
522 xor g, y2 # y2 = f^g # CH
526 and e, y2 # y2 = (f^g)&e # CH
527 add y3, old_h # h = t1 + S0 + MAJ # --
531 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
537 add 8*3+frame_XFER(%rsp), h # h = k + w + h # --
544 add y0, y2 # y2 = S1 + CH # --
547 add h, d # d = k + w + h + d # --
549 add y1, h # h = k + w + h + S0 # --
551 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
553 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
555 add y3, h # h = t1 + S0 + MAJ # --
581 and $~(0x20 - 1), %rsp
589 mov 8*0(CTX1), a
590 mov 8*1(CTX1), b
591 mov 8*2(CTX1), c
592 mov 8*3(CTX1), d
593 mov 8*4(CTX1), e
594 mov 8*5(CTX1), f
595 mov 8*6(CTX1), g
596 mov 8*7(CTX1), h
656 addm 8*0(CTX2), a
657 addm 8*1(CTX2), b
658 addm 8*2(CTX2), c
659 addm 8*3(CTX2), d
660 addm 8*4(CTX2), e
661 addm 8*5(CTX2), f
662 addm 8*6(CTX2), g
663 addm 8*7(CTX2), h
691 # Mergeable 640-byte rodata section. This allows linker to merge the table
692 # with other, exactly the same 640-byte fragment of another rodata section
741 # Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.