Lines Matching refs:T7

571 .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8
580 vpxor \T7, \T7, \T7
584 vmovdqu (%r10), \T7
585 vpshufb SHUF_MASK(%rip), \T7, \T7
586 vpxor \T7, \T8, \T8
593 vmovdqu \T8, \T7
597 vpxor \T7, \T7, \T7
609 vpsrldq $8, \T7, \T7
610 vpxor \T1, \T7, \T7
620 vpsrldq $4, \T7, \T7
621 vpxor \T1, \T7, \T7
630 vpshufb (%r11), \T7, \T7
631 vpand \T1, \T7, \T7
633 vpshufb SHUF_MASK(%rip), \T7, \T7
634 vpxor \T8, \T7, \T7
635 \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6
638 vmovdqu \T7, AadHash(arg2)
1204 …_PARALLEL_AVX REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
1290 vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0
1313 vpxor \T3, \T7, \T7
1338 vpxor \T3, \T7, \T7
1361 vpxor \T3, \T7, \T7
1385 vpxor \T3, \T7, \T7
1408 vpxor \T3, \T7, \T7
1432 vpxor \T3, \T7, \T7
1457 vpxor \T3, \T7, \T7
1466 vpxor \T7, \T6, \T6
1509 vpxor \T3, \T7, \T7
1510 vpxor \T4, \T6, \T6 # accumulate the results in T6:T7
1517 vpslld $31, \T7, \T2 # packed right shifting << 31
1518 vpslld $30, \T7, \T3 # packed right shifting shift << 30
1519 vpslld $25, \T7, \T4 # packed right shifting shift << 25
1527 vpxor \T2, \T7, \T7 # first phase of the reduction complete
1542 vpsrld $1, \T7, \T2 # packed left shifting >> 1
1543 vpsrld $2, \T7, \T3 # packed left shifting >> 2
1544 vpsrld $7, \T7, \T4 # packed left shifting >> 7
1549 vpxor \T2, \T7, \T7
1550 vpxor \T7, \T6, \T6 # the result is in T6
1571 .macro GHASH_LAST_8_AVX T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8
1580 vpclmulqdq $0x00, \T5, \XMM1, \T7
1594 vpxor \T4, \T7, \T7
1609 vpxor \T4, \T7, \T7
1624 vpxor \T4, \T7, \T7
1639 vpxor \T4, \T7, \T7
1654 vpxor \T4, \T7, \T7
1669 vpxor \T4, \T7, \T7
1684 vpxor \T4, \T7, \T7
1691 vpxor \T7, \XMM1, \T2
1699 vpxor \T4, \T7, \T7
1700 vpxor \T2, \T6, \T6 # <T6:T7> holds the result of
1705 vpslld $31, \T7, \T2 # packed right shifting << 31
1706 vpslld $30, \T7, \T3 # packed right shifting shift << 30
1707 vpslld $25, \T7, \T4 # packed right shifting shift << 25
1715 vpxor \T2, \T7, \T7 # first phase of the reduction complete
1720 vpsrld $1, \T7, \T2 # packed left shifting >> 1
1721 vpsrld $2, \T7, \T3 # packed left shifting >> 2
1722 vpsrld $7, \T7, \T4 # packed left shifting >> 7
1727 vpxor \T2, \T7, \T7
1728 vpxor \T7, \T6, \T6 # the result is in T6
2159 …PARALLEL_AVX2 REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
2245 vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0
2266 vpxor \T3, \T7, \T7
2292 vpxor \T3, \T7, \T7
2316 vpxor \T3, \T7, \T7
2341 vpxor \T3, \T7, \T7
2365 vpxor \T3, \T7, \T7
2389 vpxor \T3, \T7, \T7
2414 vpxor \T3, \T7, \T7
2466 vpxor \T3, \T7, \T7
2467 vpxor \T6, \T1, \T1 # accumulate the results in T1:T7
2475 vpclmulqdq $0x01, \T7, \T3, \T2
2478 vpxor \T2, \T7, \T7 # first phase of the reduction complete
2493 vpclmulqdq $0x00, \T7, \T3, \T2
2496 vpclmulqdq $0x10, \T7, \T3, \T4
2521 .macro GHASH_LAST_8_AVX2 T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8
2533 vpclmulqdq $0x00, \T5, \XMM1, \T7
2549 vpxor \T4, \T7, \T7
2567 vpxor \T4, \T7, \T7
2585 vpxor \T4, \T7, \T7
2603 vpxor \T4, \T7, \T7
2621 vpxor \T4, \T7, \T7
2639 vpxor \T4, \T7, \T7
2657 vpxor \T4, \T7, \T7
2663 vpxor \T7, \XMM1, \T2
2671 vpxor \T4, \T7, \T7
2672 vpxor \T2, \T6, \T6 # <T6:T7> holds the result of the
2679 vpclmulqdq $0x01, \T7, \T3, \T2
2682 vpxor \T2, \T7, \T7 # first phase of the reduction complete
2687 vpclmulqdq $0x00, \T7, \T3, \T2
2690 vpclmulqdq $0x10, \T7, \T3, \T4