1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * SM4 Cipher Algorithm, AES-NI/AVX optimized. 4 * as specified in 5 * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html 6 * 7 * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi> 8 * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi> 9 * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> 10 */ 11 12/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at: 13 * https://github.com/mjosaarinen/sm4ni 14 */ 15 16#include <linux/linkage.h> 17#include <asm/frame.h> 18 19#define rRIP (%rip) 20 21#define RX0 %xmm0 22#define RX1 %xmm1 23#define MASK_4BIT %xmm2 24#define RTMP0 %xmm3 25#define RTMP1 %xmm4 26#define RTMP2 %xmm5 27#define RTMP3 %xmm6 28#define RTMP4 %xmm7 29 30#define RA0 %xmm8 31#define RA1 %xmm9 32#define RA2 %xmm10 33#define RA3 %xmm11 34 35#define RB0 %xmm12 36#define RB1 %xmm13 37#define RB2 %xmm14 38#define RB3 %xmm15 39 40#define RNOT %xmm0 41#define RBSWAP %xmm1 42 43 44/* Transpose four 32-bit words between 128-bit vectors. */ 45#define transpose_4x4(x0, x1, x2, x3, t1, t2) \ 46 vpunpckhdq x1, x0, t2; \ 47 vpunpckldq x1, x0, x0; \ 48 \ 49 vpunpckldq x3, x2, t1; \ 50 vpunpckhdq x3, x2, x2; \ 51 \ 52 vpunpckhqdq t1, x0, x1; \ 53 vpunpcklqdq t1, x0, x0; \ 54 \ 55 vpunpckhqdq x2, t2, x3; \ 56 vpunpcklqdq x2, t2, x2; 57 58/* pre-SubByte transform. */ 59#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \ 60 vpand x, mask4bit, tmp0; \ 61 vpandn x, mask4bit, x; \ 62 vpsrld $4, x, x; \ 63 \ 64 vpshufb tmp0, lo_t, tmp0; \ 65 vpshufb x, hi_t, x; \ 66 vpxor tmp0, x, x; 67 68/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by 69 * 'vaeslastenc' instruction. 70 */ 71#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \ 72 vpandn mask4bit, x, tmp0; \ 73 vpsrld $4, x, x; \ 74 vpand x, mask4bit, x; \ 75 \ 76 vpshufb tmp0, lo_t, tmp0; \ 77 vpshufb x, hi_t, x; \ 78 vpxor tmp0, x, x; 79 80 81.section .rodata.cst164, "aM", @progbits, 164 82.align 16 83 84/* 85 * Following four affine transform look-up tables are from work by 86 * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni 87 * 88 * These allow exposing SM4 S-Box from AES SubByte. 89 */ 90 91/* pre-SubByte affine transform, from SM4 field to AES field. */ 92.Lpre_tf_lo_s: 93 .quad 0x9197E2E474720701, 0xC7C1B4B222245157 94.Lpre_tf_hi_s: 95 .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012 96 97/* post-SubByte affine transform, from AES field to SM4 field. */ 98.Lpost_tf_lo_s: 99 .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82 100.Lpost_tf_hi_s: 101 .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF 102 103/* For isolating SubBytes from AESENCLAST, inverse shift row */ 104.Linv_shift_row: 105 .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b 106 .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 107 108/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */ 109.Linv_shift_row_rol_8: 110 .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e 111 .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06 112 113/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */ 114.Linv_shift_row_rol_16: 115 .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01 116 .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09 117 118/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */ 119.Linv_shift_row_rol_24: 120 .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04 121 .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c 122 123/* For CTR-mode IV byteswap */ 124.Lbswap128_mask: 125 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 126 127/* For input word byte-swap */ 128.Lbswap32_mask: 129 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 130 131.align 4 132/* 4-bit mask */ 133.L0f0f0f0f: 134 .long 0x0f0f0f0f 135 136 137.text 138.align 16 139 140/* 141 * void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst, 142 * const u8 *src, int nblocks) 143 */ 144.align 8 145SYM_FUNC_START(sm4_aesni_avx_crypt4) 146 /* input: 147 * %rdi: round key array, CTX 148 * %rsi: dst (1..4 blocks) 149 * %rdx: src (1..4 blocks) 150 * %rcx: num blocks (1..4) 151 */ 152 FRAME_BEGIN 153 154 vmovdqu 0*16(%rdx), RA0; 155 vmovdqa RA0, RA1; 156 vmovdqa RA0, RA2; 157 vmovdqa RA0, RA3; 158 cmpq $2, %rcx; 159 jb .Lblk4_load_input_done; 160 vmovdqu 1*16(%rdx), RA1; 161 je .Lblk4_load_input_done; 162 vmovdqu 2*16(%rdx), RA2; 163 cmpq $3, %rcx; 164 je .Lblk4_load_input_done; 165 vmovdqu 3*16(%rdx), RA3; 166 167.Lblk4_load_input_done: 168 169 vmovdqa .Lbswap32_mask rRIP, RTMP2; 170 vpshufb RTMP2, RA0, RA0; 171 vpshufb RTMP2, RA1, RA1; 172 vpshufb RTMP2, RA2, RA2; 173 vpshufb RTMP2, RA3, RA3; 174 175 vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT; 176 vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; 177 vmovdqa .Lpre_tf_hi_s rRIP, RB0; 178 vmovdqa .Lpost_tf_lo_s rRIP, RB1; 179 vmovdqa .Lpost_tf_hi_s rRIP, RB2; 180 vmovdqa .Linv_shift_row rRIP, RB3; 181 vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP2; 182 vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP3; 183 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 184 185#define ROUND(round, s0, s1, s2, s3) \ 186 vbroadcastss (4*(round))(%rdi), RX0; \ 187 vpxor s1, RX0, RX0; \ 188 vpxor s2, RX0, RX0; \ 189 vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ 190 \ 191 /* sbox, non-linear part */ \ 192 transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0); \ 193 vaesenclast MASK_4BIT, RX0, RX0; \ 194 transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0); \ 195 \ 196 /* linear part */ \ 197 vpshufb RB3, RX0, RTMP0; \ 198 vpxor RTMP0, s0, s0; /* s0 ^ x */ \ 199 vpshufb RTMP2, RX0, RTMP1; \ 200 vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \ 201 vpshufb RTMP3, RX0, RTMP1; \ 202 vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ 203 vpshufb .Linv_shift_row_rol_24 rRIP, RX0, RTMP1; \ 204 vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ 205 vpslld $2, RTMP0, RTMP1; \ 206 vpsrld $30, RTMP0, RTMP0; \ 207 vpxor RTMP0, s0, s0; \ 208 /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ 209 vpxor RTMP1, s0, s0; 210 211 leaq (32*4)(%rdi), %rax; 212.align 16 213.Lroundloop_blk4: 214 ROUND(0, RA0, RA1, RA2, RA3); 215 ROUND(1, RA1, RA2, RA3, RA0); 216 ROUND(2, RA2, RA3, RA0, RA1); 217 ROUND(3, RA3, RA0, RA1, RA2); 218 leaq (4*4)(%rdi), %rdi; 219 cmpq %rax, %rdi; 220 jne .Lroundloop_blk4; 221 222#undef ROUND 223 224 vmovdqa .Lbswap128_mask rRIP, RTMP2; 225 226 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 227 vpshufb RTMP2, RA0, RA0; 228 vpshufb RTMP2, RA1, RA1; 229 vpshufb RTMP2, RA2, RA2; 230 vpshufb RTMP2, RA3, RA3; 231 232 vmovdqu RA0, 0*16(%rsi); 233 cmpq $2, %rcx; 234 jb .Lblk4_store_output_done; 235 vmovdqu RA1, 1*16(%rsi); 236 je .Lblk4_store_output_done; 237 vmovdqu RA2, 2*16(%rsi); 238 cmpq $3, %rcx; 239 je .Lblk4_store_output_done; 240 vmovdqu RA3, 3*16(%rsi); 241 242.Lblk4_store_output_done: 243 vzeroall; 244 FRAME_END 245 ret; 246SYM_FUNC_END(sm4_aesni_avx_crypt4) 247 248.align 8 249SYM_FUNC_START_LOCAL(__sm4_crypt_blk8) 250 /* input: 251 * %rdi: round key array, CTX 252 * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel 253 * plaintext blocks 254 * output: 255 * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel 256 * ciphertext blocks 257 */ 258 FRAME_BEGIN 259 260 vmovdqa .Lbswap32_mask rRIP, RTMP2; 261 vpshufb RTMP2, RA0, RA0; 262 vpshufb RTMP2, RA1, RA1; 263 vpshufb RTMP2, RA2, RA2; 264 vpshufb RTMP2, RA3, RA3; 265 vpshufb RTMP2, RB0, RB0; 266 vpshufb RTMP2, RB1, RB1; 267 vpshufb RTMP2, RB2, RB2; 268 vpshufb RTMP2, RB3, RB3; 269 270 vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT; 271 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 272 transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); 273 274#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ 275 vbroadcastss (4*(round))(%rdi), RX0; \ 276 vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; \ 277 vmovdqa .Lpre_tf_hi_s rRIP, RTMP1; \ 278 vmovdqa RX0, RX1; \ 279 vpxor s1, RX0, RX0; \ 280 vpxor s2, RX0, RX0; \ 281 vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ 282 vmovdqa .Lpost_tf_lo_s rRIP, RTMP2; \ 283 vmovdqa .Lpost_tf_hi_s rRIP, RTMP3; \ 284 vpxor r1, RX1, RX1; \ 285 vpxor r2, RX1, RX1; \ 286 vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \ 287 \ 288 /* sbox, non-linear part */ \ 289 transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ 290 transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ 291 vmovdqa .Linv_shift_row rRIP, RTMP4; \ 292 vaesenclast MASK_4BIT, RX0, RX0; \ 293 vaesenclast MASK_4BIT, RX1, RX1; \ 294 transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ 295 transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ 296 \ 297 /* linear part */ \ 298 vpshufb RTMP4, RX0, RTMP0; \ 299 vpxor RTMP0, s0, s0; /* s0 ^ x */ \ 300 vpshufb RTMP4, RX1, RTMP2; \ 301 vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP4; \ 302 vpxor RTMP2, r0, r0; /* r0 ^ x */ \ 303 vpshufb RTMP4, RX0, RTMP1; \ 304 vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \ 305 vpshufb RTMP4, RX1, RTMP3; \ 306 vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP4; \ 307 vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \ 308 vpshufb RTMP4, RX0, RTMP1; \ 309 vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ 310 vpshufb RTMP4, RX1, RTMP3; \ 311 vmovdqa .Linv_shift_row_rol_24 rRIP, RTMP4; \ 312 vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \ 313 vpshufb RTMP4, RX0, RTMP1; \ 314 vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ 315 /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ 316 vpslld $2, RTMP0, RTMP1; \ 317 vpsrld $30, RTMP0, RTMP0; \ 318 vpxor RTMP0, s0, s0; \ 319 vpxor RTMP1, s0, s0; \ 320 vpshufb RTMP4, RX1, RTMP3; \ 321 vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \ 322 /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ 323 vpslld $2, RTMP2, RTMP3; \ 324 vpsrld $30, RTMP2, RTMP2; \ 325 vpxor RTMP2, r0, r0; \ 326 vpxor RTMP3, r0, r0; 327 328 leaq (32*4)(%rdi), %rax; 329.align 16 330.Lroundloop_blk8: 331 ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3); 332 ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0); 333 ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1); 334 ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2); 335 leaq (4*4)(%rdi), %rdi; 336 cmpq %rax, %rdi; 337 jne .Lroundloop_blk8; 338 339#undef ROUND 340 341 vmovdqa .Lbswap128_mask rRIP, RTMP2; 342 343 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 344 transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); 345 vpshufb RTMP2, RA0, RA0; 346 vpshufb RTMP2, RA1, RA1; 347 vpshufb RTMP2, RA2, RA2; 348 vpshufb RTMP2, RA3, RA3; 349 vpshufb RTMP2, RB0, RB0; 350 vpshufb RTMP2, RB1, RB1; 351 vpshufb RTMP2, RB2, RB2; 352 vpshufb RTMP2, RB3, RB3; 353 354 FRAME_END 355 ret; 356SYM_FUNC_END(__sm4_crypt_blk8) 357 358/* 359 * void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst, 360 * const u8 *src, int nblocks) 361 */ 362.align 8 363SYM_FUNC_START(sm4_aesni_avx_crypt8) 364 /* input: 365 * %rdi: round key array, CTX 366 * %rsi: dst (1..8 blocks) 367 * %rdx: src (1..8 blocks) 368 * %rcx: num blocks (1..8) 369 */ 370 FRAME_BEGIN 371 372 cmpq $5, %rcx; 373 jb sm4_aesni_avx_crypt4; 374 vmovdqu (0 * 16)(%rdx), RA0; 375 vmovdqu (1 * 16)(%rdx), RA1; 376 vmovdqu (2 * 16)(%rdx), RA2; 377 vmovdqu (3 * 16)(%rdx), RA3; 378 vmovdqu (4 * 16)(%rdx), RB0; 379 vmovdqa RB0, RB1; 380 vmovdqa RB0, RB2; 381 vmovdqa RB0, RB3; 382 je .Lblk8_load_input_done; 383 vmovdqu (5 * 16)(%rdx), RB1; 384 cmpq $7, %rcx; 385 jb .Lblk8_load_input_done; 386 vmovdqu (6 * 16)(%rdx), RB2; 387 je .Lblk8_load_input_done; 388 vmovdqu (7 * 16)(%rdx), RB3; 389 390.Lblk8_load_input_done: 391 call __sm4_crypt_blk8; 392 393 cmpq $6, %rcx; 394 vmovdqu RA0, (0 * 16)(%rsi); 395 vmovdqu RA1, (1 * 16)(%rsi); 396 vmovdqu RA2, (2 * 16)(%rsi); 397 vmovdqu RA3, (3 * 16)(%rsi); 398 vmovdqu RB0, (4 * 16)(%rsi); 399 jb .Lblk8_store_output_done; 400 vmovdqu RB1, (5 * 16)(%rsi); 401 je .Lblk8_store_output_done; 402 vmovdqu RB2, (6 * 16)(%rsi); 403 cmpq $7, %rcx; 404 je .Lblk8_store_output_done; 405 vmovdqu RB3, (7 * 16)(%rsi); 406 407.Lblk8_store_output_done: 408 vzeroall; 409 FRAME_END 410 ret; 411SYM_FUNC_END(sm4_aesni_avx_crypt8) 412 413/* 414 * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst, 415 * const u8 *src, u8 *iv) 416 */ 417.align 8 418SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8) 419 /* input: 420 * %rdi: round key array, CTX 421 * %rsi: dst (8 blocks) 422 * %rdx: src (8 blocks) 423 * %rcx: iv (big endian, 128bit) 424 */ 425 FRAME_BEGIN 426 427 /* load IV and byteswap */ 428 vmovdqu (%rcx), RA0; 429 430 vmovdqa .Lbswap128_mask rRIP, RBSWAP; 431 vpshufb RBSWAP, RA0, RTMP0; /* be => le */ 432 433 vpcmpeqd RNOT, RNOT, RNOT; 434 vpsrldq $8, RNOT, RNOT; /* low: -1, high: 0 */ 435 436#define inc_le128(x, minus_one, tmp) \ 437 vpcmpeqq minus_one, x, tmp; \ 438 vpsubq minus_one, x, x; \ 439 vpslldq $8, tmp, tmp; \ 440 vpsubq tmp, x, x; 441 442 /* construct IVs */ 443 inc_le128(RTMP0, RNOT, RTMP2); /* +1 */ 444 vpshufb RBSWAP, RTMP0, RA1; 445 inc_le128(RTMP0, RNOT, RTMP2); /* +2 */ 446 vpshufb RBSWAP, RTMP0, RA2; 447 inc_le128(RTMP0, RNOT, RTMP2); /* +3 */ 448 vpshufb RBSWAP, RTMP0, RA3; 449 inc_le128(RTMP0, RNOT, RTMP2); /* +4 */ 450 vpshufb RBSWAP, RTMP0, RB0; 451 inc_le128(RTMP0, RNOT, RTMP2); /* +5 */ 452 vpshufb RBSWAP, RTMP0, RB1; 453 inc_le128(RTMP0, RNOT, RTMP2); /* +6 */ 454 vpshufb RBSWAP, RTMP0, RB2; 455 inc_le128(RTMP0, RNOT, RTMP2); /* +7 */ 456 vpshufb RBSWAP, RTMP0, RB3; 457 inc_le128(RTMP0, RNOT, RTMP2); /* +8 */ 458 vpshufb RBSWAP, RTMP0, RTMP1; 459 460 /* store new IV */ 461 vmovdqu RTMP1, (%rcx); 462 463 call __sm4_crypt_blk8; 464 465 vpxor (0 * 16)(%rdx), RA0, RA0; 466 vpxor (1 * 16)(%rdx), RA1, RA1; 467 vpxor (2 * 16)(%rdx), RA2, RA2; 468 vpxor (3 * 16)(%rdx), RA3, RA3; 469 vpxor (4 * 16)(%rdx), RB0, RB0; 470 vpxor (5 * 16)(%rdx), RB1, RB1; 471 vpxor (6 * 16)(%rdx), RB2, RB2; 472 vpxor (7 * 16)(%rdx), RB3, RB3; 473 474 vmovdqu RA0, (0 * 16)(%rsi); 475 vmovdqu RA1, (1 * 16)(%rsi); 476 vmovdqu RA2, (2 * 16)(%rsi); 477 vmovdqu RA3, (3 * 16)(%rsi); 478 vmovdqu RB0, (4 * 16)(%rsi); 479 vmovdqu RB1, (5 * 16)(%rsi); 480 vmovdqu RB2, (6 * 16)(%rsi); 481 vmovdqu RB3, (7 * 16)(%rsi); 482 483 vzeroall; 484 FRAME_END 485 ret; 486SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8) 487 488/* 489 * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst, 490 * const u8 *src, u8 *iv) 491 */ 492.align 8 493SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) 494 /* input: 495 * %rdi: round key array, CTX 496 * %rsi: dst (8 blocks) 497 * %rdx: src (8 blocks) 498 * %rcx: iv 499 */ 500 FRAME_BEGIN 501 502 vmovdqu (0 * 16)(%rdx), RA0; 503 vmovdqu (1 * 16)(%rdx), RA1; 504 vmovdqu (2 * 16)(%rdx), RA2; 505 vmovdqu (3 * 16)(%rdx), RA3; 506 vmovdqu (4 * 16)(%rdx), RB0; 507 vmovdqu (5 * 16)(%rdx), RB1; 508 vmovdqu (6 * 16)(%rdx), RB2; 509 vmovdqu (7 * 16)(%rdx), RB3; 510 511 call __sm4_crypt_blk8; 512 513 vmovdqu (7 * 16)(%rdx), RNOT; 514 vpxor (%rcx), RA0, RA0; 515 vpxor (0 * 16)(%rdx), RA1, RA1; 516 vpxor (1 * 16)(%rdx), RA2, RA2; 517 vpxor (2 * 16)(%rdx), RA3, RA3; 518 vpxor (3 * 16)(%rdx), RB0, RB0; 519 vpxor (4 * 16)(%rdx), RB1, RB1; 520 vpxor (5 * 16)(%rdx), RB2, RB2; 521 vpxor (6 * 16)(%rdx), RB3, RB3; 522 vmovdqu RNOT, (%rcx); /* store new IV */ 523 524 vmovdqu RA0, (0 * 16)(%rsi); 525 vmovdqu RA1, (1 * 16)(%rsi); 526 vmovdqu RA2, (2 * 16)(%rsi); 527 vmovdqu RA3, (3 * 16)(%rsi); 528 vmovdqu RB0, (4 * 16)(%rsi); 529 vmovdqu RB1, (5 * 16)(%rsi); 530 vmovdqu RB2, (6 * 16)(%rsi); 531 vmovdqu RB3, (7 * 16)(%rsi); 532 533 vzeroall; 534 FRAME_END 535 ret; 536SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8) 537 538/* 539 * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst, 540 * const u8 *src, u8 *iv) 541 */ 542.align 8 543SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) 544 /* input: 545 * %rdi: round key array, CTX 546 * %rsi: dst (8 blocks) 547 * %rdx: src (8 blocks) 548 * %rcx: iv 549 */ 550 FRAME_BEGIN 551 552 /* Load input */ 553 vmovdqu (%rcx), RA0; 554 vmovdqu 0 * 16(%rdx), RA1; 555 vmovdqu 1 * 16(%rdx), RA2; 556 vmovdqu 2 * 16(%rdx), RA3; 557 vmovdqu 3 * 16(%rdx), RB0; 558 vmovdqu 4 * 16(%rdx), RB1; 559 vmovdqu 5 * 16(%rdx), RB2; 560 vmovdqu 6 * 16(%rdx), RB3; 561 562 /* Update IV */ 563 vmovdqu 7 * 16(%rdx), RNOT; 564 vmovdqu RNOT, (%rcx); 565 566 call __sm4_crypt_blk8; 567 568 vpxor (0 * 16)(%rdx), RA0, RA0; 569 vpxor (1 * 16)(%rdx), RA1, RA1; 570 vpxor (2 * 16)(%rdx), RA2, RA2; 571 vpxor (3 * 16)(%rdx), RA3, RA3; 572 vpxor (4 * 16)(%rdx), RB0, RB0; 573 vpxor (5 * 16)(%rdx), RB1, RB1; 574 vpxor (6 * 16)(%rdx), RB2, RB2; 575 vpxor (7 * 16)(%rdx), RB3, RB3; 576 577 vmovdqu RA0, (0 * 16)(%rsi); 578 vmovdqu RA1, (1 * 16)(%rsi); 579 vmovdqu RA2, (2 * 16)(%rsi); 580 vmovdqu RA3, (3 * 16)(%rsi); 581 vmovdqu RB0, (4 * 16)(%rsi); 582 vmovdqu RB1, (5 * 16)(%rsi); 583 vmovdqu RB2, (6 * 16)(%rsi); 584 vmovdqu RB3, (7 * 16)(%rsi); 585 586 vzeroall; 587 FRAME_END 588 ret; 589SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8) 590