Lines Matching full:t4

571 .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8
587 \GHASH_MUL \T8, \T2, \T1, \T3, \T4, \T5, \T6
635 \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6
863 .macro GHASH_MUL_AVX GH HK T1 T2 T3 T4 T5
884 vpslld $25, \GH, \T4 # packed right shifting shift << 25
887 vpxor \T4, \T2, \T2
898 vpsrld $7,\GH, \T4 # packed left shifting >> 7
900 vpxor \T4, \T2, \T2
909 .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6
918 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly
924 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly
930 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly
936 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly
942 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly
948 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly
954 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly
969 .macro INITIAL_BLOCKS_AVX REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 X…
1043 … GHASH_MUL_AVX reg_j, \T2, \T1, \T3, \T4, \T5, \T6 # apply GHASH on num_initial_blocks blocks
1204 .macro GHASH_8_ENCRYPT_8_PARALLEL_AVX REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 …
1289 vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1
1311 vpxor \T3, \T4, \T4
1336 vpxor \T3, \T4, \T4
1359 vpxor \T3, \T4, \T4
1383 vpxor \T3, \T4, \T4
1406 vpxor \T3, \T4, \T4
1430 vpxor \T3, \T4, \T4
1455 vpxor \T3, \T4, \T4
1465 vpxor \T4, \T6, \T6
1510 vpxor \T4, \T6, \T6 # accumulate the results in T6:T7
1519 vpslld $25, \T7, \T4 # packed right shifting shift << 25
1522 vpxor \T4, \T2, \T2
1544 vpsrld $7, \T7, \T4 # packed left shifting >> 7
1546 vpxor \T4, \T2, \T2
1571 .macro GHASH_LAST_8_AVX T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8
1590 vpclmulqdq $0x11, \T5, \XMM2, \T4
1591 vpxor \T4, \T6, \T6
1593 vpclmulqdq $0x00, \T5, \XMM2, \T4
1594 vpxor \T4, \T7, \T7
1605 vpclmulqdq $0x11, \T5, \XMM3, \T4
1606 vpxor \T4, \T6, \T6
1608 vpclmulqdq $0x00, \T5, \XMM3, \T4
1609 vpxor \T4, \T7, \T7
1620 vpclmulqdq $0x11, \T5, \XMM4, \T4
1621 vpxor \T4, \T6, \T6
1623 vpclmulqdq $0x00, \T5, \XMM4, \T4
1624 vpxor \T4, \T7, \T7
1635 vpclmulqdq $0x11, \T5, \XMM5, \T4
1636 vpxor \T4, \T6, \T6
1638 vpclmulqdq $0x00, \T5, \XMM5, \T4
1639 vpxor \T4, \T7, \T7
1650 vpclmulqdq $0x11, \T5, \XMM6, \T4
1651 vpxor \T4, \T6, \T6
1653 vpclmulqdq $0x00, \T5, \XMM6, \T4
1654 vpxor \T4, \T7, \T7
1665 vpclmulqdq $0x11, \T5, \XMM7, \T4
1666 vpxor \T4, \T6, \T6
1668 vpclmulqdq $0x00, \T5, \XMM7, \T4
1669 vpxor \T4, \T7, \T7
1680 vpclmulqdq $0x11, \T5, \XMM8, \T4
1681 vpxor \T4, \T6, \T6
1683 vpclmulqdq $0x00, \T5, \XMM8, \T4
1684 vpxor \T4, \T7, \T7
1696 vpslldq $8, \T2, \T4
1699 vpxor \T4, \T7, \T7
1707 vpslld $25, \T7, \T4 # packed right shifting shift << 25
1710 vpxor \T4, \T2, \T2
1722 vpsrld $7, \T7, \T4 # packed left shifting >> 7
1724 vpxor \T4, \T2, \T2
1845 .macro GHASH_MUL_AVX2 GH HK T1 T2 T3 T4 T5
1883 .macro PRECOMPUTE_AVX2 HK T1 T2 T3 T4 T5 T6
1887 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly
1890 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly
1893 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly
1896 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly
1899 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly
1902 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly
1905 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly
1917 .macro INITIAL_BLOCKS_AVX2 REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 …
1993 …GHASH_MUL_AVX2 reg_j, \T2, \T1, \T3, \T4, \T5, \T6 # apply GHASH on num_initial_blocks bloc…
2159 .macro GHASH_8_ENCRYPT_8_PARALLEL_AVX2 REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7…
2244 vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1
2263 vpxor \T3, \T4, \T4
2289 vpxor \T3, \T4, \T4
2313 vpxor \T3, \T4, \T4
2338 vpxor \T3, \T4, \T4
2362 vpxor \T3, \T4, \T4
2386 vpxor \T3, \T4, \T4
2423 vpxor \T3, \T4, \T1
2496 vpclmulqdq $0x10, \T7, \T3, \T4
2497 vpslldq $4, \T4, \T4 # shift-L xmm0 1 DW (Shift-L 1-DW to obtain result with no shifts)
2499 vpxor \T2, \T4, \T4 # second phase of the reduction complete
2501 vpxor \T4, \T1, \T1 # the result is in T1
2521 .macro GHASH_LAST_8_AVX2 T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8
2545 vpclmulqdq $0x11, \T5, \XMM2, \T4
2546 vpxor \T4, \T6, \T6
2548 vpclmulqdq $0x00, \T5, \XMM2, \T4
2549 vpxor \T4, \T7, \T7
2563 vpclmulqdq $0x11, \T5, \XMM3, \T4
2564 vpxor \T4, \T6, \T6
2566 vpclmulqdq $0x00, \T5, \XMM3, \T4
2567 vpxor \T4, \T7, \T7
2581 vpclmulqdq $0x11, \T5, \XMM4, \T4
2582 vpxor \T4, \T6, \T6
2584 vpclmulqdq $0x00, \T5, \XMM4, \T4
2585 vpxor \T4, \T7, \T7
2599 vpclmulqdq $0x11, \T5, \XMM5, \T4
2600 vpxor \T4, \T6, \T6
2602 vpclmulqdq $0x00, \T5, \XMM5, \T4
2603 vpxor \T4, \T7, \T7
2617 vpclmulqdq $0x11, \T5, \XMM6, \T4
2618 vpxor \T4, \T6, \T6
2620 vpclmulqdq $0x00, \T5, \XMM6, \T4
2621 vpxor \T4, \T7, \T7
2635 vpclmulqdq $0x11, \T5, \XMM7, \T4
2636 vpxor \T4, \T6, \T6
2638 vpclmulqdq $0x00, \T5, \XMM7, \T4
2639 vpxor \T4, \T7, \T7
2653 vpclmulqdq $0x11, \T5, \XMM8, \T4
2654 vpxor \T4, \T6, \T6
2656 vpclmulqdq $0x00, \T5, \XMM8, \T4
2657 vpxor \T4, \T7, \T7
2668 vpslldq $8, \T2, \T4
2671 vpxor \T4, \T7, \T7
2690 vpclmulqdq $0x10, \T7, \T3, \T4
2691 …vpslldq $4, \T4, \T4 # shift-L T4 1 DW (Shift-L 1-DW to obtain result with n…
2693 vpxor \T2, \T4, \T4 # second phase of the reduction complete
2695 vpxor \T4, \T6, \T6 # the result is in T6