1*5663535bSTim Chen######################################################################## 2*5663535bSTim Chen# Implement fast SHA-512 with AVX2 instructions. (x86_64) 3*5663535bSTim Chen# 4*5663535bSTim Chen# Copyright (C) 2013 Intel Corporation. 5*5663535bSTim Chen# 6*5663535bSTim Chen# Authors: 7*5663535bSTim Chen# James Guilford <james.guilford@intel.com> 8*5663535bSTim Chen# Kirk Yap <kirk.s.yap@intel.com> 9*5663535bSTim Chen# David Cote <david.m.cote@intel.com> 10*5663535bSTim Chen# Tim Chen <tim.c.chen@linux.intel.com> 11*5663535bSTim Chen# 12*5663535bSTim Chen# This software is available to you under a choice of one of two 13*5663535bSTim Chen# licenses. You may choose to be licensed under the terms of the GNU 14*5663535bSTim Chen# General Public License (GPL) Version 2, available from the file 15*5663535bSTim Chen# COPYING in the main directory of this source tree, or the 16*5663535bSTim Chen# OpenIB.org BSD license below: 17*5663535bSTim Chen# 18*5663535bSTim Chen# Redistribution and use in source and binary forms, with or 19*5663535bSTim Chen# without modification, are permitted provided that the following 20*5663535bSTim Chen# conditions are met: 21*5663535bSTim Chen# 22*5663535bSTim Chen# - Redistributions of source code must retain the above 23*5663535bSTim Chen# copyright notice, this list of conditions and the following 24*5663535bSTim Chen# disclaimer. 25*5663535bSTim Chen# 26*5663535bSTim Chen# - Redistributions in binary form must reproduce the above 27*5663535bSTim Chen# copyright notice, this list of conditions and the following 28*5663535bSTim Chen# disclaimer in the documentation and/or other materials 29*5663535bSTim Chen# provided with the distribution. 30*5663535bSTim Chen# 31*5663535bSTim Chen# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 32*5663535bSTim Chen# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 33*5663535bSTim Chen# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 34*5663535bSTim Chen# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 35*5663535bSTim Chen# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 36*5663535bSTim Chen# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 37*5663535bSTim Chen# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 38*5663535bSTim Chen# SOFTWARE. 39*5663535bSTim Chen# 40*5663535bSTim Chen######################################################################## 41*5663535bSTim Chen# 42*5663535bSTim Chen# This code is described in an Intel White-Paper: 43*5663535bSTim Chen# "Fast SHA-512 Implementations on Intel Architecture Processors" 44*5663535bSTim Chen# 45*5663535bSTim Chen# To find it, surf to http://www.intel.com/p/en_US/embedded 46*5663535bSTim Chen# and search for that title. 47*5663535bSTim Chen# 48*5663535bSTim Chen######################################################################## 49*5663535bSTim Chen# This code schedules 1 blocks at a time, with 4 lanes per block 50*5663535bSTim Chen######################################################################## 51*5663535bSTim Chen 52*5663535bSTim Chen#ifdef CONFIG_AS_AVX2 53*5663535bSTim Chen#include <linux/linkage.h> 54*5663535bSTim Chen 55*5663535bSTim Chen.text 56*5663535bSTim Chen 57*5663535bSTim Chen# Virtual Registers 58*5663535bSTim ChenY_0 = %ymm4 59*5663535bSTim ChenY_1 = %ymm5 60*5663535bSTim ChenY_2 = %ymm6 61*5663535bSTim ChenY_3 = %ymm7 62*5663535bSTim Chen 63*5663535bSTim ChenYTMP0 = %ymm0 64*5663535bSTim ChenYTMP1 = %ymm1 65*5663535bSTim ChenYTMP2 = %ymm2 66*5663535bSTim ChenYTMP3 = %ymm3 67*5663535bSTim ChenYTMP4 = %ymm8 68*5663535bSTim ChenXFER = YTMP0 69*5663535bSTim Chen 70*5663535bSTim ChenBYTE_FLIP_MASK = %ymm9 71*5663535bSTim Chen 72*5663535bSTim Chen# 1st arg 73*5663535bSTim ChenINP = %rdi 74*5663535bSTim Chen# 2nd arg 75*5663535bSTim ChenCTX = %rsi 76*5663535bSTim Chen# 3rd arg 77*5663535bSTim ChenNUM_BLKS = %rdx 78*5663535bSTim Chen 79*5663535bSTim Chenc = %rcx 80*5663535bSTim Chend = %r8 81*5663535bSTim Chene = %rdx 82*5663535bSTim Cheny3 = %rdi 83*5663535bSTim Chen 84*5663535bSTim ChenTBL = %rbp 85*5663535bSTim Chen 86*5663535bSTim Chena = %rax 87*5663535bSTim Chenb = %rbx 88*5663535bSTim Chen 89*5663535bSTim Chenf = %r9 90*5663535bSTim Cheng = %r10 91*5663535bSTim Chenh = %r11 92*5663535bSTim Chenold_h = %r11 93*5663535bSTim Chen 94*5663535bSTim ChenT1 = %r12 95*5663535bSTim Cheny0 = %r13 96*5663535bSTim Cheny1 = %r14 97*5663535bSTim Cheny2 = %r15 98*5663535bSTim Chen 99*5663535bSTim Cheny4 = %r12 100*5663535bSTim Chen 101*5663535bSTim Chen# Local variables (stack frame) 102*5663535bSTim ChenXFER_SIZE = 4*8 103*5663535bSTim ChenSRND_SIZE = 1*8 104*5663535bSTim ChenINP_SIZE = 1*8 105*5663535bSTim ChenINPEND_SIZE = 1*8 106*5663535bSTim ChenRSPSAVE_SIZE = 1*8 107*5663535bSTim ChenGPRSAVE_SIZE = 6*8 108*5663535bSTim Chen 109*5663535bSTim Chenframe_XFER = 0 110*5663535bSTim Chenframe_SRND = frame_XFER + XFER_SIZE 111*5663535bSTim Chenframe_INP = frame_SRND + SRND_SIZE 112*5663535bSTim Chenframe_INPEND = frame_INP + INP_SIZE 113*5663535bSTim Chenframe_RSPSAVE = frame_INPEND + INPEND_SIZE 114*5663535bSTim Chenframe_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE 115*5663535bSTim Chenframe_size = frame_GPRSAVE + GPRSAVE_SIZE 116*5663535bSTim Chen 117*5663535bSTim Chen## assume buffers not aligned 118*5663535bSTim Chen#define VMOVDQ vmovdqu 119*5663535bSTim Chen 120*5663535bSTim Chen# addm [mem], reg 121*5663535bSTim Chen# Add reg to mem using reg-mem add and store 122*5663535bSTim Chen.macro addm p1 p2 123*5663535bSTim Chen add \p1, \p2 124*5663535bSTim Chen mov \p2, \p1 125*5663535bSTim Chen.endm 126*5663535bSTim Chen 127*5663535bSTim Chen 128*5663535bSTim Chen# COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask 129*5663535bSTim Chen# Load ymm with mem and byte swap each dword 130*5663535bSTim Chen.macro COPY_YMM_AND_BSWAP p1 p2 p3 131*5663535bSTim Chen VMOVDQ \p2, \p1 132*5663535bSTim Chen vpshufb \p3, \p1, \p1 133*5663535bSTim Chen.endm 134*5663535bSTim Chen# rotate_Ys 135*5663535bSTim Chen# Rotate values of symbols Y0...Y3 136*5663535bSTim Chen.macro rotate_Ys 137*5663535bSTim Chen Y_ = Y_0 138*5663535bSTim Chen Y_0 = Y_1 139*5663535bSTim Chen Y_1 = Y_2 140*5663535bSTim Chen Y_2 = Y_3 141*5663535bSTim Chen Y_3 = Y_ 142*5663535bSTim Chen.endm 143*5663535bSTim Chen 144*5663535bSTim Chen# RotateState 145*5663535bSTim Chen.macro RotateState 146*5663535bSTim Chen # Rotate symbols a..h right 147*5663535bSTim Chen old_h = h 148*5663535bSTim Chen TMP_ = h 149*5663535bSTim Chen h = g 150*5663535bSTim Chen g = f 151*5663535bSTim Chen f = e 152*5663535bSTim Chen e = d 153*5663535bSTim Chen d = c 154*5663535bSTim Chen c = b 155*5663535bSTim Chen b = a 156*5663535bSTim Chen a = TMP_ 157*5663535bSTim Chen.endm 158*5663535bSTim Chen 159*5663535bSTim Chen# macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL 160*5663535bSTim Chen# YDST = {YSRC1, YSRC2} >> RVAL*8 161*5663535bSTim Chen.macro MY_VPALIGNR YDST YSRC1 YSRC2 RVAL 162*5663535bSTim Chen vperm2f128 $0x3, \YSRC2, \YSRC1, \YDST # YDST = {YS1_LO, YS2_HI} 163*5663535bSTim Chen vpalignr $\RVAL, \YSRC2, \YDST, \YDST # YDST = {YDS1, YS2} >> RVAL*8 164*5663535bSTim Chen.endm 165*5663535bSTim Chen 166*5663535bSTim Chen.macro FOUR_ROUNDS_AND_SCHED 167*5663535bSTim Chen################################### RND N + 0 ######################################### 168*5663535bSTim Chen 169*5663535bSTim Chen # Extract w[t-7] 170*5663535bSTim Chen MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7] 171*5663535bSTim Chen # Calculate w[t-16] + w[t-7] 172*5663535bSTim Chen vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16] 173*5663535bSTim Chen # Extract w[t-15] 174*5663535bSTim Chen MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15] 175*5663535bSTim Chen 176*5663535bSTim Chen # Calculate sigma0 177*5663535bSTim Chen 178*5663535bSTim Chen # Calculate w[t-15] ror 1 179*5663535bSTim Chen vpsrlq $1, YTMP1, YTMP2 180*5663535bSTim Chen vpsllq $(64-1), YTMP1, YTMP3 181*5663535bSTim Chen vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 182*5663535bSTim Chen # Calculate w[t-15] shr 7 183*5663535bSTim Chen vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7 184*5663535bSTim Chen 185*5663535bSTim Chen mov a, y3 # y3 = a # MAJA 186*5663535bSTim Chen rorx $41, e, y0 # y0 = e >> 41 # S1A 187*5663535bSTim Chen rorx $18, e, y1 # y1 = e >> 18 # S1B 188*5663535bSTim Chen add frame_XFER(%rsp),h # h = k + w + h # -- 189*5663535bSTim Chen or c, y3 # y3 = a|c # MAJA 190*5663535bSTim Chen mov f, y2 # y2 = f # CH 191*5663535bSTim Chen rorx $34, a, T1 # T1 = a >> 34 # S0B 192*5663535bSTim Chen 193*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 194*5663535bSTim Chen xor g, y2 # y2 = f^g # CH 195*5663535bSTim Chen rorx $14, e, y1 # y1 = (e >> 14) # S1 196*5663535bSTim Chen 197*5663535bSTim Chen and e, y2 # y2 = (f^g)&e # CH 198*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 199*5663535bSTim Chen rorx $39, a, y1 # y1 = a >> 39 # S0A 200*5663535bSTim Chen add h, d # d = k + w + h + d # -- 201*5663535bSTim Chen 202*5663535bSTim Chen and b, y3 # y3 = (a|c)&b # MAJA 203*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 204*5663535bSTim Chen rorx $28, a, T1 # T1 = (a >> 28) # S0 205*5663535bSTim Chen 206*5663535bSTim Chen xor g, y2 # y2 = CH = ((f^g)&e)^g # CH 207*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 208*5663535bSTim Chen mov a, T1 # T1 = a # MAJB 209*5663535bSTim Chen and c, T1 # T1 = a&c # MAJB 210*5663535bSTim Chen 211*5663535bSTim Chen add y0, y2 # y2 = S1 + CH # -- 212*5663535bSTim Chen or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ 213*5663535bSTim Chen add y1, h # h = k + w + h + S0 # -- 214*5663535bSTim Chen 215*5663535bSTim Chen add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- 216*5663535bSTim Chen 217*5663535bSTim Chen add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 218*5663535bSTim Chen add y3, h # h = t1 + S0 + MAJ # -- 219*5663535bSTim Chen 220*5663535bSTim Chen RotateState 221*5663535bSTim Chen 222*5663535bSTim Chen################################### RND N + 1 ######################################### 223*5663535bSTim Chen 224*5663535bSTim Chen # Calculate w[t-15] ror 8 225*5663535bSTim Chen vpsrlq $8, YTMP1, YTMP2 226*5663535bSTim Chen vpsllq $(64-8), YTMP1, YTMP1 227*5663535bSTim Chen vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8 228*5663535bSTim Chen # XOR the three components 229*5663535bSTim Chen vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 230*5663535bSTim Chen vpxor YTMP1, YTMP3, YTMP1 # YTMP1 = s0 231*5663535bSTim Chen 232*5663535bSTim Chen 233*5663535bSTim Chen # Add three components, w[t-16], w[t-7] and sigma0 234*5663535bSTim Chen vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 235*5663535bSTim Chen # Move to appropriate lanes for calculating w[16] and w[17] 236*5663535bSTim Chen vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA} 237*5663535bSTim Chen # Move to appropriate lanes for calculating w[18] and w[19] 238*5663535bSTim Chen vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00} 239*5663535bSTim Chen 240*5663535bSTim Chen # Calculate w[16] and w[17] in both 128 bit lanes 241*5663535bSTim Chen 242*5663535bSTim Chen # Calculate sigma1 for w[16] and w[17] on both 128 bit lanes 243*5663535bSTim Chen vperm2f128 $0x11, Y_3, Y_3, YTMP2 # YTMP2 = W[-2] {BABA} 244*5663535bSTim Chen vpsrlq $6, YTMP2, YTMP4 # YTMP4 = W[-2] >> 6 {BABA} 245*5663535bSTim Chen 246*5663535bSTim Chen 247*5663535bSTim Chen mov a, y3 # y3 = a # MAJA 248*5663535bSTim Chen rorx $41, e, y0 # y0 = e >> 41 # S1A 249*5663535bSTim Chen rorx $18, e, y1 # y1 = e >> 18 # S1B 250*5663535bSTim Chen add 1*8+frame_XFER(%rsp), h # h = k + w + h # -- 251*5663535bSTim Chen or c, y3 # y3 = a|c # MAJA 252*5663535bSTim Chen 253*5663535bSTim Chen 254*5663535bSTim Chen mov f, y2 # y2 = f # CH 255*5663535bSTim Chen rorx $34, a, T1 # T1 = a >> 34 # S0B 256*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 257*5663535bSTim Chen xor g, y2 # y2 = f^g # CH 258*5663535bSTim Chen 259*5663535bSTim Chen 260*5663535bSTim Chen rorx $14, e, y1 # y1 = (e >> 14) # S1 261*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 262*5663535bSTim Chen rorx $39, a, y1 # y1 = a >> 39 # S0A 263*5663535bSTim Chen and e, y2 # y2 = (f^g)&e # CH 264*5663535bSTim Chen add h, d # d = k + w + h + d # -- 265*5663535bSTim Chen 266*5663535bSTim Chen and b, y3 # y3 = (a|c)&b # MAJA 267*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 268*5663535bSTim Chen 269*5663535bSTim Chen rorx $28, a, T1 # T1 = (a >> 28) # S0 270*5663535bSTim Chen xor g, y2 # y2 = CH = ((f^g)&e)^g # CH 271*5663535bSTim Chen 272*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 273*5663535bSTim Chen mov a, T1 # T1 = a # MAJB 274*5663535bSTim Chen and c, T1 # T1 = a&c # MAJB 275*5663535bSTim Chen add y0, y2 # y2 = S1 + CH # -- 276*5663535bSTim Chen 277*5663535bSTim Chen or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ 278*5663535bSTim Chen add y1, h # h = k + w + h + S0 # -- 279*5663535bSTim Chen 280*5663535bSTim Chen add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- 281*5663535bSTim Chen add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 282*5663535bSTim Chen add y3, h # h = t1 + S0 + MAJ # -- 283*5663535bSTim Chen 284*5663535bSTim Chen RotateState 285*5663535bSTim Chen 286*5663535bSTim Chen 287*5663535bSTim Chen################################### RND N + 2 ######################################### 288*5663535bSTim Chen 289*5663535bSTim Chen vpsrlq $19, YTMP2, YTMP3 # YTMP3 = W[-2] >> 19 {BABA} 290*5663535bSTim Chen vpsllq $(64-19), YTMP2, YTMP1 # YTMP1 = W[-2] << 19 {BABA} 291*5663535bSTim Chen vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {BABA} 292*5663535bSTim Chen vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA} 293*5663535bSTim Chen vpsrlq $61, YTMP2, YTMP3 # YTMP3 = W[-2] >> 61 {BABA} 294*5663535bSTim Chen vpsllq $(64-61), YTMP2, YTMP1 # YTMP1 = W[-2] << 61 {BABA} 295*5663535bSTim Chen vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {BABA} 296*5663535bSTim Chen vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^ 297*5663535bSTim Chen # (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} 298*5663535bSTim Chen 299*5663535bSTim Chen # Add sigma1 to the other compunents to get w[16] and w[17] 300*5663535bSTim Chen vpaddq YTMP4, Y_0, Y_0 # Y_0 = {W[1], W[0], W[1], W[0]} 301*5663535bSTim Chen 302*5663535bSTim Chen # Calculate sigma1 for w[18] and w[19] for upper 128 bit lane 303*5663535bSTim Chen vpsrlq $6, Y_0, YTMP4 # YTMP4 = W[-2] >> 6 {DC--} 304*5663535bSTim Chen 305*5663535bSTim Chen mov a, y3 # y3 = a # MAJA 306*5663535bSTim Chen rorx $41, e, y0 # y0 = e >> 41 # S1A 307*5663535bSTim Chen add 2*8+frame_XFER(%rsp), h # h = k + w + h # -- 308*5663535bSTim Chen 309*5663535bSTim Chen rorx $18, e, y1 # y1 = e >> 18 # S1B 310*5663535bSTim Chen or c, y3 # y3 = a|c # MAJA 311*5663535bSTim Chen mov f, y2 # y2 = f # CH 312*5663535bSTim Chen xor g, y2 # y2 = f^g # CH 313*5663535bSTim Chen 314*5663535bSTim Chen rorx $34, a, T1 # T1 = a >> 34 # S0B 315*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 316*5663535bSTim Chen and e, y2 # y2 = (f^g)&e # CH 317*5663535bSTim Chen 318*5663535bSTim Chen rorx $14, e, y1 # y1 = (e >> 14) # S1 319*5663535bSTim Chen add h, d # d = k + w + h + d # -- 320*5663535bSTim Chen and b, y3 # y3 = (a|c)&b # MAJA 321*5663535bSTim Chen 322*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 323*5663535bSTim Chen rorx $39, a, y1 # y1 = a >> 39 # S0A 324*5663535bSTim Chen xor g, y2 # y2 = CH = ((f^g)&e)^g # CH 325*5663535bSTim Chen 326*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 327*5663535bSTim Chen rorx $28, a, T1 # T1 = (a >> 28) # S0 328*5663535bSTim Chen 329*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 330*5663535bSTim Chen mov a, T1 # T1 = a # MAJB 331*5663535bSTim Chen and c, T1 # T1 = a&c # MAJB 332*5663535bSTim Chen add y0, y2 # y2 = S1 + CH # -- 333*5663535bSTim Chen 334*5663535bSTim Chen or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ 335*5663535bSTim Chen add y1, h # h = k + w + h + S0 # -- 336*5663535bSTim Chen add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- 337*5663535bSTim Chen add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 338*5663535bSTim Chen 339*5663535bSTim Chen add y3, h # h = t1 + S0 + MAJ # -- 340*5663535bSTim Chen 341*5663535bSTim Chen RotateState 342*5663535bSTim Chen 343*5663535bSTim Chen################################### RND N + 3 ######################################### 344*5663535bSTim Chen 345*5663535bSTim Chen vpsrlq $19, Y_0, YTMP3 # YTMP3 = W[-2] >> 19 {DC--} 346*5663535bSTim Chen vpsllq $(64-19), Y_0, YTMP1 # YTMP1 = W[-2] << 19 {DC--} 347*5663535bSTim Chen vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {DC--} 348*5663535bSTim Chen vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--} 349*5663535bSTim Chen vpsrlq $61, Y_0, YTMP3 # YTMP3 = W[-2] >> 61 {DC--} 350*5663535bSTim Chen vpsllq $(64-61), Y_0, YTMP1 # YTMP1 = W[-2] << 61 {DC--} 351*5663535bSTim Chen vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {DC--} 352*5663535bSTim Chen vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^ 353*5663535bSTim Chen # (W[-2] ror 61) ^ (W[-2] >> 6) {DC--} 354*5663535bSTim Chen 355*5663535bSTim Chen # Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] 356*5663535bSTim Chen # to newly calculated sigma1 to get w[18] and w[19] 357*5663535bSTim Chen vpaddq YTMP4, YTMP0, YTMP2 # YTMP2 = {W[3], W[2], --, --} 358*5663535bSTim Chen 359*5663535bSTim Chen # Form w[19, w[18], w17], w[16] 360*5663535bSTim Chen vpblendd $0xF0, YTMP2, Y_0, Y_0 # Y_0 = {W[3], W[2], W[1], W[0]} 361*5663535bSTim Chen 362*5663535bSTim Chen mov a, y3 # y3 = a # MAJA 363*5663535bSTim Chen rorx $41, e, y0 # y0 = e >> 41 # S1A 364*5663535bSTim Chen rorx $18, e, y1 # y1 = e >> 18 # S1B 365*5663535bSTim Chen add 3*8+frame_XFER(%rsp), h # h = k + w + h # -- 366*5663535bSTim Chen or c, y3 # y3 = a|c # MAJA 367*5663535bSTim Chen 368*5663535bSTim Chen 369*5663535bSTim Chen mov f, y2 # y2 = f # CH 370*5663535bSTim Chen rorx $34, a, T1 # T1 = a >> 34 # S0B 371*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 372*5663535bSTim Chen xor g, y2 # y2 = f^g # CH 373*5663535bSTim Chen 374*5663535bSTim Chen 375*5663535bSTim Chen rorx $14, e, y1 # y1 = (e >> 14) # S1 376*5663535bSTim Chen and e, y2 # y2 = (f^g)&e # CH 377*5663535bSTim Chen add h, d # d = k + w + h + d # -- 378*5663535bSTim Chen and b, y3 # y3 = (a|c)&b # MAJA 379*5663535bSTim Chen 380*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 381*5663535bSTim Chen xor g, y2 # y2 = CH = ((f^g)&e)^g # CH 382*5663535bSTim Chen 383*5663535bSTim Chen rorx $39, a, y1 # y1 = a >> 39 # S0A 384*5663535bSTim Chen add y0, y2 # y2 = S1 + CH # -- 385*5663535bSTim Chen 386*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 387*5663535bSTim Chen add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- 388*5663535bSTim Chen 389*5663535bSTim Chen rorx $28, a, T1 # T1 = (a >> 28) # S0 390*5663535bSTim Chen 391*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 392*5663535bSTim Chen mov a, T1 # T1 = a # MAJB 393*5663535bSTim Chen and c, T1 # T1 = a&c # MAJB 394*5663535bSTim Chen or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ 395*5663535bSTim Chen 396*5663535bSTim Chen add y1, h # h = k + w + h + S0 # -- 397*5663535bSTim Chen add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 398*5663535bSTim Chen add y3, h # h = t1 + S0 + MAJ # -- 399*5663535bSTim Chen 400*5663535bSTim Chen RotateState 401*5663535bSTim Chen 402*5663535bSTim Chen rotate_Ys 403*5663535bSTim Chen.endm 404*5663535bSTim Chen 405*5663535bSTim Chen.macro DO_4ROUNDS 406*5663535bSTim Chen 407*5663535bSTim Chen################################### RND N + 0 ######################################### 408*5663535bSTim Chen 409*5663535bSTim Chen mov f, y2 # y2 = f # CH 410*5663535bSTim Chen rorx $41, e, y0 # y0 = e >> 41 # S1A 411*5663535bSTim Chen rorx $18, e, y1 # y1 = e >> 18 # S1B 412*5663535bSTim Chen xor g, y2 # y2 = f^g # CH 413*5663535bSTim Chen 414*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 415*5663535bSTim Chen rorx $14, e, y1 # y1 = (e >> 14) # S1 416*5663535bSTim Chen and e, y2 # y2 = (f^g)&e # CH 417*5663535bSTim Chen 418*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 419*5663535bSTim Chen rorx $34, a, T1 # T1 = a >> 34 # S0B 420*5663535bSTim Chen xor g, y2 # y2 = CH = ((f^g)&e)^g # CH 421*5663535bSTim Chen rorx $39, a, y1 # y1 = a >> 39 # S0A 422*5663535bSTim Chen mov a, y3 # y3 = a # MAJA 423*5663535bSTim Chen 424*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 425*5663535bSTim Chen rorx $28, a, T1 # T1 = (a >> 28) # S0 426*5663535bSTim Chen add frame_XFER(%rsp), h # h = k + w + h # -- 427*5663535bSTim Chen or c, y3 # y3 = a|c # MAJA 428*5663535bSTim Chen 429*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 430*5663535bSTim Chen mov a, T1 # T1 = a # MAJB 431*5663535bSTim Chen and b, y3 # y3 = (a|c)&b # MAJA 432*5663535bSTim Chen and c, T1 # T1 = a&c # MAJB 433*5663535bSTim Chen add y0, y2 # y2 = S1 + CH # -- 434*5663535bSTim Chen 435*5663535bSTim Chen add h, d # d = k + w + h + d # -- 436*5663535bSTim Chen or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ 437*5663535bSTim Chen add y1, h # h = k + w + h + S0 # -- 438*5663535bSTim Chen 439*5663535bSTim Chen add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- 440*5663535bSTim Chen 441*5663535bSTim Chen RotateState 442*5663535bSTim Chen 443*5663535bSTim Chen################################### RND N + 1 ######################################### 444*5663535bSTim Chen 445*5663535bSTim Chen add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 446*5663535bSTim Chen mov f, y2 # y2 = f # CH 447*5663535bSTim Chen rorx $41, e, y0 # y0 = e >> 41 # S1A 448*5663535bSTim Chen rorx $18, e, y1 # y1 = e >> 18 # S1B 449*5663535bSTim Chen xor g, y2 # y2 = f^g # CH 450*5663535bSTim Chen 451*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 452*5663535bSTim Chen rorx $14, e, y1 # y1 = (e >> 14) # S1 453*5663535bSTim Chen and e, y2 # y2 = (f^g)&e # CH 454*5663535bSTim Chen add y3, old_h # h = t1 + S0 + MAJ # -- 455*5663535bSTim Chen 456*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 457*5663535bSTim Chen rorx $34, a, T1 # T1 = a >> 34 # S0B 458*5663535bSTim Chen xor g, y2 # y2 = CH = ((f^g)&e)^g # CH 459*5663535bSTim Chen rorx $39, a, y1 # y1 = a >> 39 # S0A 460*5663535bSTim Chen mov a, y3 # y3 = a # MAJA 461*5663535bSTim Chen 462*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 463*5663535bSTim Chen rorx $28, a, T1 # T1 = (a >> 28) # S0 464*5663535bSTim Chen add 8*1+frame_XFER(%rsp), h # h = k + w + h # -- 465*5663535bSTim Chen or c, y3 # y3 = a|c # MAJA 466*5663535bSTim Chen 467*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 468*5663535bSTim Chen mov a, T1 # T1 = a # MAJB 469*5663535bSTim Chen and b, y3 # y3 = (a|c)&b # MAJA 470*5663535bSTim Chen and c, T1 # T1 = a&c # MAJB 471*5663535bSTim Chen add y0, y2 # y2 = S1 + CH # -- 472*5663535bSTim Chen 473*5663535bSTim Chen add h, d # d = k + w + h + d # -- 474*5663535bSTim Chen or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ 475*5663535bSTim Chen add y1, h # h = k + w + h + S0 # -- 476*5663535bSTim Chen 477*5663535bSTim Chen add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- 478*5663535bSTim Chen 479*5663535bSTim Chen RotateState 480*5663535bSTim Chen 481*5663535bSTim Chen################################### RND N + 2 ######################################### 482*5663535bSTim Chen 483*5663535bSTim Chen add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 484*5663535bSTim Chen mov f, y2 # y2 = f # CH 485*5663535bSTim Chen rorx $41, e, y0 # y0 = e >> 41 # S1A 486*5663535bSTim Chen rorx $18, e, y1 # y1 = e >> 18 # S1B 487*5663535bSTim Chen xor g, y2 # y2 = f^g # CH 488*5663535bSTim Chen 489*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 490*5663535bSTim Chen rorx $14, e, y1 # y1 = (e >> 14) # S1 491*5663535bSTim Chen and e, y2 # y2 = (f^g)&e # CH 492*5663535bSTim Chen add y3, old_h # h = t1 + S0 + MAJ # -- 493*5663535bSTim Chen 494*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 495*5663535bSTim Chen rorx $34, a, T1 # T1 = a >> 34 # S0B 496*5663535bSTim Chen xor g, y2 # y2 = CH = ((f^g)&e)^g # CH 497*5663535bSTim Chen rorx $39, a, y1 # y1 = a >> 39 # S0A 498*5663535bSTim Chen mov a, y3 # y3 = a # MAJA 499*5663535bSTim Chen 500*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 501*5663535bSTim Chen rorx $28, a, T1 # T1 = (a >> 28) # S0 502*5663535bSTim Chen add 8*2+frame_XFER(%rsp), h # h = k + w + h # -- 503*5663535bSTim Chen or c, y3 # y3 = a|c # MAJA 504*5663535bSTim Chen 505*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 506*5663535bSTim Chen mov a, T1 # T1 = a # MAJB 507*5663535bSTim Chen and b, y3 # y3 = (a|c)&b # MAJA 508*5663535bSTim Chen and c, T1 # T1 = a&c # MAJB 509*5663535bSTim Chen add y0, y2 # y2 = S1 + CH # -- 510*5663535bSTim Chen 511*5663535bSTim Chen add h, d # d = k + w + h + d # -- 512*5663535bSTim Chen or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ 513*5663535bSTim Chen add y1, h # h = k + w + h + S0 # -- 514*5663535bSTim Chen 515*5663535bSTim Chen add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- 516*5663535bSTim Chen 517*5663535bSTim Chen RotateState 518*5663535bSTim Chen 519*5663535bSTim Chen################################### RND N + 3 ######################################### 520*5663535bSTim Chen 521*5663535bSTim Chen add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 522*5663535bSTim Chen mov f, y2 # y2 = f # CH 523*5663535bSTim Chen rorx $41, e, y0 # y0 = e >> 41 # S1A 524*5663535bSTim Chen rorx $18, e, y1 # y1 = e >> 18 # S1B 525*5663535bSTim Chen xor g, y2 # y2 = f^g # CH 526*5663535bSTim Chen 527*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 528*5663535bSTim Chen rorx $14, e, y1 # y1 = (e >> 14) # S1 529*5663535bSTim Chen and e, y2 # y2 = (f^g)&e # CH 530*5663535bSTim Chen add y3, old_h # h = t1 + S0 + MAJ # -- 531*5663535bSTim Chen 532*5663535bSTim Chen xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 533*5663535bSTim Chen rorx $34, a, T1 # T1 = a >> 34 # S0B 534*5663535bSTim Chen xor g, y2 # y2 = CH = ((f^g)&e)^g # CH 535*5663535bSTim Chen rorx $39, a, y1 # y1 = a >> 39 # S0A 536*5663535bSTim Chen mov a, y3 # y3 = a # MAJA 537*5663535bSTim Chen 538*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 539*5663535bSTim Chen rorx $28, a, T1 # T1 = (a >> 28) # S0 540*5663535bSTim Chen add 8*3+frame_XFER(%rsp), h # h = k + w + h # -- 541*5663535bSTim Chen or c, y3 # y3 = a|c # MAJA 542*5663535bSTim Chen 543*5663535bSTim Chen xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 544*5663535bSTim Chen mov a, T1 # T1 = a # MAJB 545*5663535bSTim Chen and b, y3 # y3 = (a|c)&b # MAJA 546*5663535bSTim Chen and c, T1 # T1 = a&c # MAJB 547*5663535bSTim Chen add y0, y2 # y2 = S1 + CH # -- 548*5663535bSTim Chen 549*5663535bSTim Chen 550*5663535bSTim Chen add h, d # d = k + w + h + d # -- 551*5663535bSTim Chen or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ 552*5663535bSTim Chen add y1, h # h = k + w + h + S0 # -- 553*5663535bSTim Chen 554*5663535bSTim Chen add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- 555*5663535bSTim Chen 556*5663535bSTim Chen add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 557*5663535bSTim Chen 558*5663535bSTim Chen add y3, h # h = t1 + S0 + MAJ # -- 559*5663535bSTim Chen 560*5663535bSTim Chen RotateState 561*5663535bSTim Chen 562*5663535bSTim Chen.endm 563*5663535bSTim Chen 564*5663535bSTim Chen######################################################################## 565*5663535bSTim Chen# void sha512_transform_rorx(const void* M, void* D, uint64_t L)# 566*5663535bSTim Chen# Purpose: Updates the SHA512 digest stored at D with the message stored in M. 567*5663535bSTim Chen# The size of the message pointed to by M must be an integer multiple of SHA512 568*5663535bSTim Chen# message blocks. 569*5663535bSTim Chen# L is the message length in SHA512 blocks 570*5663535bSTim Chen######################################################################## 571*5663535bSTim ChenENTRY(sha512_transform_rorx) 572*5663535bSTim Chen # Allocate Stack Space 573*5663535bSTim Chen mov %rsp, %rax 574*5663535bSTim Chen sub $frame_size, %rsp 575*5663535bSTim Chen and $~(0x20 - 1), %rsp 576*5663535bSTim Chen mov %rax, frame_RSPSAVE(%rsp) 577*5663535bSTim Chen 578*5663535bSTim Chen # Save GPRs 579*5663535bSTim Chen mov %rbp, frame_GPRSAVE(%rsp) 580*5663535bSTim Chen mov %rbx, 8*1+frame_GPRSAVE(%rsp) 581*5663535bSTim Chen mov %r12, 8*2+frame_GPRSAVE(%rsp) 582*5663535bSTim Chen mov %r13, 8*3+frame_GPRSAVE(%rsp) 583*5663535bSTim Chen mov %r14, 8*4+frame_GPRSAVE(%rsp) 584*5663535bSTim Chen mov %r15, 8*5+frame_GPRSAVE(%rsp) 585*5663535bSTim Chen 586*5663535bSTim Chen shl $7, NUM_BLKS # convert to bytes 587*5663535bSTim Chen jz done_hash 588*5663535bSTim Chen add INP, NUM_BLKS # pointer to end of data 589*5663535bSTim Chen mov NUM_BLKS, frame_INPEND(%rsp) 590*5663535bSTim Chen 591*5663535bSTim Chen ## load initial digest 592*5663535bSTim Chen mov 8*0(CTX),a 593*5663535bSTim Chen mov 8*1(CTX),b 594*5663535bSTim Chen mov 8*2(CTX),c 595*5663535bSTim Chen mov 8*3(CTX),d 596*5663535bSTim Chen mov 8*4(CTX),e 597*5663535bSTim Chen mov 8*5(CTX),f 598*5663535bSTim Chen mov 8*6(CTX),g 599*5663535bSTim Chen mov 8*7(CTX),h 600*5663535bSTim Chen 601*5663535bSTim Chen vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK 602*5663535bSTim Chen 603*5663535bSTim Chenloop0: 604*5663535bSTim Chen lea K512(%rip), TBL 605*5663535bSTim Chen 606*5663535bSTim Chen ## byte swap first 16 dwords 607*5663535bSTim Chen COPY_YMM_AND_BSWAP Y_0, (INP), BYTE_FLIP_MASK 608*5663535bSTim Chen COPY_YMM_AND_BSWAP Y_1, 1*32(INP), BYTE_FLIP_MASK 609*5663535bSTim Chen COPY_YMM_AND_BSWAP Y_2, 2*32(INP), BYTE_FLIP_MASK 610*5663535bSTim Chen COPY_YMM_AND_BSWAP Y_3, 3*32(INP), BYTE_FLIP_MASK 611*5663535bSTim Chen 612*5663535bSTim Chen mov INP, frame_INP(%rsp) 613*5663535bSTim Chen 614*5663535bSTim Chen ## schedule 64 input dwords, by doing 12 rounds of 4 each 615*5663535bSTim Chen movq $4, frame_SRND(%rsp) 616*5663535bSTim Chen 617*5663535bSTim Chen.align 16 618*5663535bSTim Chenloop1: 619*5663535bSTim Chen vpaddq (TBL), Y_0, XFER 620*5663535bSTim Chen vmovdqa XFER, frame_XFER(%rsp) 621*5663535bSTim Chen FOUR_ROUNDS_AND_SCHED 622*5663535bSTim Chen 623*5663535bSTim Chen vpaddq 1*32(TBL), Y_0, XFER 624*5663535bSTim Chen vmovdqa XFER, frame_XFER(%rsp) 625*5663535bSTim Chen FOUR_ROUNDS_AND_SCHED 626*5663535bSTim Chen 627*5663535bSTim Chen vpaddq 2*32(TBL), Y_0, XFER 628*5663535bSTim Chen vmovdqa XFER, frame_XFER(%rsp) 629*5663535bSTim Chen FOUR_ROUNDS_AND_SCHED 630*5663535bSTim Chen 631*5663535bSTim Chen vpaddq 3*32(TBL), Y_0, XFER 632*5663535bSTim Chen vmovdqa XFER, frame_XFER(%rsp) 633*5663535bSTim Chen add $(4*32), TBL 634*5663535bSTim Chen FOUR_ROUNDS_AND_SCHED 635*5663535bSTim Chen 636*5663535bSTim Chen subq $1, frame_SRND(%rsp) 637*5663535bSTim Chen jne loop1 638*5663535bSTim Chen 639*5663535bSTim Chen movq $2, frame_SRND(%rsp) 640*5663535bSTim Chenloop2: 641*5663535bSTim Chen vpaddq (TBL), Y_0, XFER 642*5663535bSTim Chen vmovdqa XFER, frame_XFER(%rsp) 643*5663535bSTim Chen DO_4ROUNDS 644*5663535bSTim Chen vpaddq 1*32(TBL), Y_1, XFER 645*5663535bSTim Chen vmovdqa XFER, frame_XFER(%rsp) 646*5663535bSTim Chen add $(2*32), TBL 647*5663535bSTim Chen DO_4ROUNDS 648*5663535bSTim Chen 649*5663535bSTim Chen vmovdqa Y_2, Y_0 650*5663535bSTim Chen vmovdqa Y_3, Y_1 651*5663535bSTim Chen 652*5663535bSTim Chen subq $1, frame_SRND(%rsp) 653*5663535bSTim Chen jne loop2 654*5663535bSTim Chen 655*5663535bSTim Chen addm 8*0(CTX),a 656*5663535bSTim Chen addm 8*1(CTX),b 657*5663535bSTim Chen addm 8*2(CTX),c 658*5663535bSTim Chen addm 8*3(CTX),d 659*5663535bSTim Chen addm 8*4(CTX),e 660*5663535bSTim Chen addm 8*5(CTX),f 661*5663535bSTim Chen addm 8*6(CTX),g 662*5663535bSTim Chen addm 8*7(CTX),h 663*5663535bSTim Chen 664*5663535bSTim Chen mov frame_INP(%rsp), INP 665*5663535bSTim Chen add $128, INP 666*5663535bSTim Chen cmp frame_INPEND(%rsp), INP 667*5663535bSTim Chen jne loop0 668*5663535bSTim Chen 669*5663535bSTim Chendone_hash: 670*5663535bSTim Chen 671*5663535bSTim Chen# Restore GPRs 672*5663535bSTim Chen mov frame_GPRSAVE(%rsp) ,%rbp 673*5663535bSTim Chen mov 8*1+frame_GPRSAVE(%rsp) ,%rbx 674*5663535bSTim Chen mov 8*2+frame_GPRSAVE(%rsp) ,%r12 675*5663535bSTim Chen mov 8*3+frame_GPRSAVE(%rsp) ,%r13 676*5663535bSTim Chen mov 8*4+frame_GPRSAVE(%rsp) ,%r14 677*5663535bSTim Chen mov 8*5+frame_GPRSAVE(%rsp) ,%r15 678*5663535bSTim Chen 679*5663535bSTim Chen # Restore Stack Pointer 680*5663535bSTim Chen mov frame_RSPSAVE(%rsp), %rsp 681*5663535bSTim Chen ret 682*5663535bSTim ChenENDPROC(sha512_transform_rorx) 683*5663535bSTim Chen 684*5663535bSTim Chen######################################################################## 685*5663535bSTim Chen### Binary Data 686*5663535bSTim Chen 687*5663535bSTim Chen.data 688*5663535bSTim Chen 689*5663535bSTim Chen.align 64 690*5663535bSTim Chen# K[t] used in SHA512 hashing 691*5663535bSTim ChenK512: 692*5663535bSTim Chen .quad 0x428a2f98d728ae22,0x7137449123ef65cd 693*5663535bSTim Chen .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc 694*5663535bSTim Chen .quad 0x3956c25bf348b538,0x59f111f1b605d019 695*5663535bSTim Chen .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 696*5663535bSTim Chen .quad 0xd807aa98a3030242,0x12835b0145706fbe 697*5663535bSTim Chen .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 698*5663535bSTim Chen .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 699*5663535bSTim Chen .quad 0x9bdc06a725c71235,0xc19bf174cf692694 700*5663535bSTim Chen .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 701*5663535bSTim Chen .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 702*5663535bSTim Chen .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 703*5663535bSTim Chen .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 704*5663535bSTim Chen .quad 0x983e5152ee66dfab,0xa831c66d2db43210 705*5663535bSTim Chen .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 706*5663535bSTim Chen .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 707*5663535bSTim Chen .quad 0x06ca6351e003826f,0x142929670a0e6e70 708*5663535bSTim Chen .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 709*5663535bSTim Chen .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df 710*5663535bSTim Chen .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 711*5663535bSTim Chen .quad 0x81c2c92e47edaee6,0x92722c851482353b 712*5663535bSTim Chen .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 713*5663535bSTim Chen .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 714*5663535bSTim Chen .quad 0xd192e819d6ef5218,0xd69906245565a910 715*5663535bSTim Chen .quad 0xf40e35855771202a,0x106aa07032bbd1b8 716*5663535bSTim Chen .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 717*5663535bSTim Chen .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 718*5663535bSTim Chen .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb 719*5663535bSTim Chen .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 720*5663535bSTim Chen .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 721*5663535bSTim Chen .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec 722*5663535bSTim Chen .quad 0x90befffa23631e28,0xa4506cebde82bde9 723*5663535bSTim Chen .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b 724*5663535bSTim Chen .quad 0xca273eceea26619c,0xd186b8c721c0c207 725*5663535bSTim Chen .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 726*5663535bSTim Chen .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 727*5663535bSTim Chen .quad 0x113f9804bef90dae,0x1b710b35131c471b 728*5663535bSTim Chen .quad 0x28db77f523047d84,0x32caab7b40c72493 729*5663535bSTim Chen .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c 730*5663535bSTim Chen .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a 731*5663535bSTim Chen .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 732*5663535bSTim Chen 733*5663535bSTim Chen.align 32 734*5663535bSTim Chen 735*5663535bSTim Chen# Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 736*5663535bSTim ChenPSHUFFLE_BYTE_FLIP_MASK: 737*5663535bSTim Chen .octa 0x08090a0b0c0d0e0f0001020304050607 738*5663535bSTim Chen .octa 0x18191a1b1c1d1e1f1011121314151617 739*5663535bSTim Chen 740*5663535bSTim ChenMASK_YMM_LO: 741*5663535bSTim Chen .octa 0x00000000000000000000000000000000 742*5663535bSTim Chen .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 743*5663535bSTim Chen#endif 744