Lines Matching refs:TMP4

195 .macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7
224 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
232 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
239 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
510 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
531 movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4
536 pslld $25, \TMP4 # packed right shift <<25
538 pxor \TMP4, \TMP2
546 movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4
550 movdqa \GH,\TMP4
553 psrld $7,\TMP4 # packed left shift >>7
555 pxor \TMP4,\TMP2
594 .macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \
608 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
624 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
787 .macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
849 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
851 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
853 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
856 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
858 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
861 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
976 .macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
987 movdqa \XMM5, \TMP4
992 pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1
1045 pxor \TMP1, \TMP4
1046 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
1075 pxor \TMP1, \TMP4
1076 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
1134 pxor \TMP4, \TMP1
1149 movdqa \XMM5, \TMP4
1150 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1153 pslld $25, \TMP4 # packed right shift << 25
1155 pxor \TMP4, \TMP2
1163 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1165 movdqa \XMM5,\TMP4
1168 psrld $7, \TMP4 # packed left shift >>7
1170 pxor \TMP4,\TMP2
1184 .macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
1195 movdqa \XMM5, \TMP4
1200 pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1
1253 pxor \TMP1, \TMP4
1254 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
1283 pxor \TMP1, \TMP4
1284 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
1346 pxor \TMP4, \TMP1
1361 movdqa \XMM5, \TMP4
1362 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1365 pslld $25, \TMP4 # packed right shift << 25
1367 pxor \TMP4, \TMP2
1375 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1377 movdqa \XMM5,\TMP4
1380 psrld $7, \TMP4 # packed left shift >>7
1382 pxor \TMP4,\TMP2
1391 .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
1402 movdqu HashKey_4_k(%arg2), \TMP4
1403 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1415 movdqu HashKey_3_k(%arg2), \TMP4
1416 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1430 movdqu HashKey_2_k(%arg2), \TMP4
1431 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1443 movdqu HashKey_k(%arg2), \TMP4
1444 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1451 movdqa \TMP2, \TMP4
1452 pslldq $8, \TMP4 # left shift TMP4 2 DWs
1454 pxor \TMP4, \XMMDst
1460 movdqa \XMMDst, \TMP4
1461 # move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
1464 pslld $25, \TMP4 # packed right shifting << 25
1466 pxor \TMP4, \TMP2
1476 movdqa \XMMDst, \TMP4
1479 psrld $7, \TMP4 # packed left shift >> 7
1481 pxor \TMP4, \TMP2