15b2efa2bSTianjia Zhang// SPDX-License-Identifier: GPL-2.0-or-later 25b2efa2bSTianjia Zhang/* 35b2efa2bSTianjia Zhang * SM4 Cipher Algorithm, AES-NI/AVX2 optimized. 45b2efa2bSTianjia Zhang * as specified in 55b2efa2bSTianjia Zhang * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html 65b2efa2bSTianjia Zhang * 75b2efa2bSTianjia Zhang * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi> 85b2efa2bSTianjia Zhang * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi> 95b2efa2bSTianjia Zhang * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> 105b2efa2bSTianjia Zhang */ 115b2efa2bSTianjia Zhang 125b2efa2bSTianjia Zhang/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at: 135b2efa2bSTianjia Zhang * https://github.com/mjosaarinen/sm4ni 145b2efa2bSTianjia Zhang */ 155b2efa2bSTianjia Zhang 165b2efa2bSTianjia Zhang#include <linux/linkage.h> 17*2d203c46SEric Biggers#include <linux/cfi_types.h> 185b2efa2bSTianjia Zhang#include <asm/frame.h> 195b2efa2bSTianjia Zhang 205b2efa2bSTianjia Zhang#define rRIP (%rip) 215b2efa2bSTianjia Zhang 225b2efa2bSTianjia Zhang/* vector registers */ 235b2efa2bSTianjia Zhang#define RX0 %ymm0 245b2efa2bSTianjia Zhang#define RX1 %ymm1 255b2efa2bSTianjia Zhang#define MASK_4BIT %ymm2 265b2efa2bSTianjia Zhang#define RTMP0 %ymm3 275b2efa2bSTianjia Zhang#define RTMP1 %ymm4 285b2efa2bSTianjia Zhang#define RTMP2 %ymm5 295b2efa2bSTianjia Zhang#define RTMP3 %ymm6 305b2efa2bSTianjia Zhang#define RTMP4 %ymm7 315b2efa2bSTianjia Zhang 325b2efa2bSTianjia Zhang#define RA0 %ymm8 335b2efa2bSTianjia Zhang#define RA1 %ymm9 345b2efa2bSTianjia Zhang#define RA2 %ymm10 355b2efa2bSTianjia Zhang#define RA3 %ymm11 365b2efa2bSTianjia Zhang 375b2efa2bSTianjia Zhang#define RB0 %ymm12 385b2efa2bSTianjia Zhang#define RB1 %ymm13 395b2efa2bSTianjia Zhang#define RB2 %ymm14 405b2efa2bSTianjia Zhang#define RB3 %ymm15 415b2efa2bSTianjia Zhang 425b2efa2bSTianjia Zhang#define RNOT %ymm0 435b2efa2bSTianjia Zhang#define RBSWAP %ymm1 445b2efa2bSTianjia Zhang 455b2efa2bSTianjia Zhang#define RX0x %xmm0 465b2efa2bSTianjia Zhang#define RX1x %xmm1 475b2efa2bSTianjia Zhang#define MASK_4BITx %xmm2 485b2efa2bSTianjia Zhang 495b2efa2bSTianjia Zhang#define RNOTx %xmm0 505b2efa2bSTianjia Zhang#define RBSWAPx %xmm1 515b2efa2bSTianjia Zhang 525b2efa2bSTianjia Zhang#define RTMP0x %xmm3 535b2efa2bSTianjia Zhang#define RTMP1x %xmm4 545b2efa2bSTianjia Zhang#define RTMP2x %xmm5 555b2efa2bSTianjia Zhang#define RTMP3x %xmm6 565b2efa2bSTianjia Zhang#define RTMP4x %xmm7 575b2efa2bSTianjia Zhang 585b2efa2bSTianjia Zhang 595b2efa2bSTianjia Zhang/* helper macros */ 605b2efa2bSTianjia Zhang 615b2efa2bSTianjia Zhang/* Transpose four 32-bit words between 128-bit vector lanes. */ 625b2efa2bSTianjia Zhang#define transpose_4x4(x0, x1, x2, x3, t1, t2) \ 635b2efa2bSTianjia Zhang vpunpckhdq x1, x0, t2; \ 645b2efa2bSTianjia Zhang vpunpckldq x1, x0, x0; \ 655b2efa2bSTianjia Zhang \ 665b2efa2bSTianjia Zhang vpunpckldq x3, x2, t1; \ 675b2efa2bSTianjia Zhang vpunpckhdq x3, x2, x2; \ 685b2efa2bSTianjia Zhang \ 695b2efa2bSTianjia Zhang vpunpckhqdq t1, x0, x1; \ 705b2efa2bSTianjia Zhang vpunpcklqdq t1, x0, x0; \ 715b2efa2bSTianjia Zhang \ 725b2efa2bSTianjia Zhang vpunpckhqdq x2, t2, x3; \ 735b2efa2bSTianjia Zhang vpunpcklqdq x2, t2, x2; 745b2efa2bSTianjia Zhang 755b2efa2bSTianjia Zhang/* post-SubByte transform. */ 765b2efa2bSTianjia Zhang#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \ 775b2efa2bSTianjia Zhang vpand x, mask4bit, tmp0; \ 785b2efa2bSTianjia Zhang vpandn x, mask4bit, x; \ 795b2efa2bSTianjia Zhang vpsrld $4, x, x; \ 805b2efa2bSTianjia Zhang \ 815b2efa2bSTianjia Zhang vpshufb tmp0, lo_t, tmp0; \ 825b2efa2bSTianjia Zhang vpshufb x, hi_t, x; \ 835b2efa2bSTianjia Zhang vpxor tmp0, x, x; 845b2efa2bSTianjia Zhang 855b2efa2bSTianjia Zhang/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by 865b2efa2bSTianjia Zhang * 'vaeslastenc' instruction. */ 875b2efa2bSTianjia Zhang#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \ 885b2efa2bSTianjia Zhang vpandn mask4bit, x, tmp0; \ 895b2efa2bSTianjia Zhang vpsrld $4, x, x; \ 905b2efa2bSTianjia Zhang vpand x, mask4bit, x; \ 915b2efa2bSTianjia Zhang \ 925b2efa2bSTianjia Zhang vpshufb tmp0, lo_t, tmp0; \ 935b2efa2bSTianjia Zhang vpshufb x, hi_t, x; \ 945b2efa2bSTianjia Zhang vpxor tmp0, x, x; 955b2efa2bSTianjia Zhang 965b2efa2bSTianjia Zhang 97f8690a4bSTianjia Zhang.section .rodata.cst16, "aM", @progbits, 16 985b2efa2bSTianjia Zhang.align 16 995b2efa2bSTianjia Zhang 1005b2efa2bSTianjia Zhang/* 1015b2efa2bSTianjia Zhang * Following four affine transform look-up tables are from work by 1025b2efa2bSTianjia Zhang * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni 1035b2efa2bSTianjia Zhang * 1045b2efa2bSTianjia Zhang * These allow exposing SM4 S-Box from AES SubByte. 1055b2efa2bSTianjia Zhang */ 1065b2efa2bSTianjia Zhang 1075b2efa2bSTianjia Zhang/* pre-SubByte affine transform, from SM4 field to AES field. */ 1085b2efa2bSTianjia Zhang.Lpre_tf_lo_s: 1095b2efa2bSTianjia Zhang .quad 0x9197E2E474720701, 0xC7C1B4B222245157 1105b2efa2bSTianjia Zhang.Lpre_tf_hi_s: 1115b2efa2bSTianjia Zhang .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012 1125b2efa2bSTianjia Zhang 1135b2efa2bSTianjia Zhang/* post-SubByte affine transform, from AES field to SM4 field. */ 1145b2efa2bSTianjia Zhang.Lpost_tf_lo_s: 1155b2efa2bSTianjia Zhang .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82 1165b2efa2bSTianjia Zhang.Lpost_tf_hi_s: 1175b2efa2bSTianjia Zhang .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF 1185b2efa2bSTianjia Zhang 1195b2efa2bSTianjia Zhang/* For isolating SubBytes from AESENCLAST, inverse shift row */ 1205b2efa2bSTianjia Zhang.Linv_shift_row: 1215b2efa2bSTianjia Zhang .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b 1225b2efa2bSTianjia Zhang .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 1235b2efa2bSTianjia Zhang 1245b2efa2bSTianjia Zhang/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */ 1255b2efa2bSTianjia Zhang.Linv_shift_row_rol_8: 1265b2efa2bSTianjia Zhang .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e 1275b2efa2bSTianjia Zhang .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06 1285b2efa2bSTianjia Zhang 1295b2efa2bSTianjia Zhang/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */ 1305b2efa2bSTianjia Zhang.Linv_shift_row_rol_16: 1315b2efa2bSTianjia Zhang .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01 1325b2efa2bSTianjia Zhang .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09 1335b2efa2bSTianjia Zhang 1345b2efa2bSTianjia Zhang/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */ 1355b2efa2bSTianjia Zhang.Linv_shift_row_rol_24: 1365b2efa2bSTianjia Zhang .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04 1375b2efa2bSTianjia Zhang .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c 1385b2efa2bSTianjia Zhang 1395b2efa2bSTianjia Zhang/* For CTR-mode IV byteswap */ 1405b2efa2bSTianjia Zhang.Lbswap128_mask: 1415b2efa2bSTianjia Zhang .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 1425b2efa2bSTianjia Zhang 1435b2efa2bSTianjia Zhang/* For input word byte-swap */ 1445b2efa2bSTianjia Zhang.Lbswap32_mask: 1455b2efa2bSTianjia Zhang .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 1465b2efa2bSTianjia Zhang 1475b2efa2bSTianjia Zhang.align 4 1485b2efa2bSTianjia Zhang/* 4-bit mask */ 1495b2efa2bSTianjia Zhang.L0f0f0f0f: 1505b2efa2bSTianjia Zhang .long 0x0f0f0f0f 1515b2efa2bSTianjia Zhang 152f8690a4bSTianjia Zhang/* 12 bytes, only for padding */ 153f8690a4bSTianjia Zhang.Lpadding_deadbeef: 154f8690a4bSTianjia Zhang .long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef 155f8690a4bSTianjia Zhang 1565b2efa2bSTianjia Zhang.text 1575b2efa2bSTianjia ZhangSYM_FUNC_START_LOCAL(__sm4_crypt_blk16) 1585b2efa2bSTianjia Zhang /* input: 1595b2efa2bSTianjia Zhang * %rdi: round key array, CTX 1605b2efa2bSTianjia Zhang * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel 1615b2efa2bSTianjia Zhang * plaintext blocks 1625b2efa2bSTianjia Zhang * output: 1635b2efa2bSTianjia Zhang * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel 1645b2efa2bSTianjia Zhang * ciphertext blocks 1655b2efa2bSTianjia Zhang */ 1665b2efa2bSTianjia Zhang FRAME_BEGIN 1675b2efa2bSTianjia Zhang 1685b2efa2bSTianjia Zhang vbroadcasti128 .Lbswap32_mask rRIP, RTMP2; 1695b2efa2bSTianjia Zhang vpshufb RTMP2, RA0, RA0; 1705b2efa2bSTianjia Zhang vpshufb RTMP2, RA1, RA1; 1715b2efa2bSTianjia Zhang vpshufb RTMP2, RA2, RA2; 1725b2efa2bSTianjia Zhang vpshufb RTMP2, RA3, RA3; 1735b2efa2bSTianjia Zhang vpshufb RTMP2, RB0, RB0; 1745b2efa2bSTianjia Zhang vpshufb RTMP2, RB1, RB1; 1755b2efa2bSTianjia Zhang vpshufb RTMP2, RB2, RB2; 1765b2efa2bSTianjia Zhang vpshufb RTMP2, RB3, RB3; 1775b2efa2bSTianjia Zhang 1785b2efa2bSTianjia Zhang vpbroadcastd .L0f0f0f0f rRIP, MASK_4BIT; 1795b2efa2bSTianjia Zhang transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 1805b2efa2bSTianjia Zhang transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); 1815b2efa2bSTianjia Zhang 1825b2efa2bSTianjia Zhang#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ 1835b2efa2bSTianjia Zhang vpbroadcastd (4*(round))(%rdi), RX0; \ 1845b2efa2bSTianjia Zhang vbroadcasti128 .Lpre_tf_lo_s rRIP, RTMP4; \ 1855b2efa2bSTianjia Zhang vbroadcasti128 .Lpre_tf_hi_s rRIP, RTMP1; \ 1865b2efa2bSTianjia Zhang vmovdqa RX0, RX1; \ 1875b2efa2bSTianjia Zhang vpxor s1, RX0, RX0; \ 1885b2efa2bSTianjia Zhang vpxor s2, RX0, RX0; \ 1895b2efa2bSTianjia Zhang vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ 1905b2efa2bSTianjia Zhang vbroadcasti128 .Lpost_tf_lo_s rRIP, RTMP2; \ 1915b2efa2bSTianjia Zhang vbroadcasti128 .Lpost_tf_hi_s rRIP, RTMP3; \ 1925b2efa2bSTianjia Zhang vpxor r1, RX1, RX1; \ 1935b2efa2bSTianjia Zhang vpxor r2, RX1, RX1; \ 1945b2efa2bSTianjia Zhang vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \ 1955b2efa2bSTianjia Zhang \ 1965b2efa2bSTianjia Zhang /* sbox, non-linear part */ \ 1975b2efa2bSTianjia Zhang transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ 1985b2efa2bSTianjia Zhang transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ 1995b2efa2bSTianjia Zhang vextracti128 $1, RX0, RTMP4x; \ 2005b2efa2bSTianjia Zhang vextracti128 $1, RX1, RTMP0x; \ 2015b2efa2bSTianjia Zhang vaesenclast MASK_4BITx, RX0x, RX0x; \ 2025b2efa2bSTianjia Zhang vaesenclast MASK_4BITx, RTMP4x, RTMP4x; \ 2035b2efa2bSTianjia Zhang vaesenclast MASK_4BITx, RX1x, RX1x; \ 2045b2efa2bSTianjia Zhang vaesenclast MASK_4BITx, RTMP0x, RTMP0x; \ 2055b2efa2bSTianjia Zhang vinserti128 $1, RTMP4x, RX0, RX0; \ 2065b2efa2bSTianjia Zhang vbroadcasti128 .Linv_shift_row rRIP, RTMP4; \ 2075b2efa2bSTianjia Zhang vinserti128 $1, RTMP0x, RX1, RX1; \ 2085b2efa2bSTianjia Zhang transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ 2095b2efa2bSTianjia Zhang transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ 2105b2efa2bSTianjia Zhang \ 2115b2efa2bSTianjia Zhang /* linear part */ \ 2125b2efa2bSTianjia Zhang vpshufb RTMP4, RX0, RTMP0; \ 2135b2efa2bSTianjia Zhang vpxor RTMP0, s0, s0; /* s0 ^ x */ \ 2145b2efa2bSTianjia Zhang vpshufb RTMP4, RX1, RTMP2; \ 2155b2efa2bSTianjia Zhang vbroadcasti128 .Linv_shift_row_rol_8 rRIP, RTMP4; \ 2165b2efa2bSTianjia Zhang vpxor RTMP2, r0, r0; /* r0 ^ x */ \ 2175b2efa2bSTianjia Zhang vpshufb RTMP4, RX0, RTMP1; \ 2185b2efa2bSTianjia Zhang vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \ 2195b2efa2bSTianjia Zhang vpshufb RTMP4, RX1, RTMP3; \ 2205b2efa2bSTianjia Zhang vbroadcasti128 .Linv_shift_row_rol_16 rRIP, RTMP4; \ 2215b2efa2bSTianjia Zhang vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \ 2225b2efa2bSTianjia Zhang vpshufb RTMP4, RX0, RTMP1; \ 2235b2efa2bSTianjia Zhang vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ 2245b2efa2bSTianjia Zhang vpshufb RTMP4, RX1, RTMP3; \ 2255b2efa2bSTianjia Zhang vbroadcasti128 .Linv_shift_row_rol_24 rRIP, RTMP4; \ 2265b2efa2bSTianjia Zhang vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \ 2275b2efa2bSTianjia Zhang vpshufb RTMP4, RX0, RTMP1; \ 2285b2efa2bSTianjia Zhang vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ 2295b2efa2bSTianjia Zhang vpslld $2, RTMP0, RTMP1; \ 2305b2efa2bSTianjia Zhang vpsrld $30, RTMP0, RTMP0; \ 2315b2efa2bSTianjia Zhang vpxor RTMP0, s0, s0; \ 2325b2efa2bSTianjia Zhang /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ 2335b2efa2bSTianjia Zhang vpxor RTMP1, s0, s0; \ 2345b2efa2bSTianjia Zhang vpshufb RTMP4, RX1, RTMP3; \ 2355b2efa2bSTianjia Zhang vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \ 2365b2efa2bSTianjia Zhang vpslld $2, RTMP2, RTMP3; \ 2375b2efa2bSTianjia Zhang vpsrld $30, RTMP2, RTMP2; \ 2385b2efa2bSTianjia Zhang vpxor RTMP2, r0, r0; \ 2395b2efa2bSTianjia Zhang /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ 2405b2efa2bSTianjia Zhang vpxor RTMP3, r0, r0; 2415b2efa2bSTianjia Zhang 2425b2efa2bSTianjia Zhang leaq (32*4)(%rdi), %rax; 2435b2efa2bSTianjia Zhang.align 16 2445b2efa2bSTianjia Zhang.Lroundloop_blk8: 2455b2efa2bSTianjia Zhang ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3); 2465b2efa2bSTianjia Zhang ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0); 2475b2efa2bSTianjia Zhang ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1); 2485b2efa2bSTianjia Zhang ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2); 2495b2efa2bSTianjia Zhang leaq (4*4)(%rdi), %rdi; 2505b2efa2bSTianjia Zhang cmpq %rax, %rdi; 2515b2efa2bSTianjia Zhang jne .Lroundloop_blk8; 2525b2efa2bSTianjia Zhang 2535b2efa2bSTianjia Zhang#undef ROUND 2545b2efa2bSTianjia Zhang 2555b2efa2bSTianjia Zhang vbroadcasti128 .Lbswap128_mask rRIP, RTMP2; 2565b2efa2bSTianjia Zhang 2575b2efa2bSTianjia Zhang transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 2585b2efa2bSTianjia Zhang transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); 2595b2efa2bSTianjia Zhang vpshufb RTMP2, RA0, RA0; 2605b2efa2bSTianjia Zhang vpshufb RTMP2, RA1, RA1; 2615b2efa2bSTianjia Zhang vpshufb RTMP2, RA2, RA2; 2625b2efa2bSTianjia Zhang vpshufb RTMP2, RA3, RA3; 2635b2efa2bSTianjia Zhang vpshufb RTMP2, RB0, RB0; 2645b2efa2bSTianjia Zhang vpshufb RTMP2, RB1, RB1; 2655b2efa2bSTianjia Zhang vpshufb RTMP2, RB2, RB2; 2665b2efa2bSTianjia Zhang vpshufb RTMP2, RB3, RB3; 2675b2efa2bSTianjia Zhang 2685b2efa2bSTianjia Zhang FRAME_END 269f94909ceSPeter Zijlstra RET; 2705b2efa2bSTianjia ZhangSYM_FUNC_END(__sm4_crypt_blk16) 2715b2efa2bSTianjia Zhang 2725b2efa2bSTianjia Zhang#define inc_le128(x, minus_one, tmp) \ 2735b2efa2bSTianjia Zhang vpcmpeqq minus_one, x, tmp; \ 2745b2efa2bSTianjia Zhang vpsubq minus_one, x, x; \ 2755b2efa2bSTianjia Zhang vpslldq $8, tmp, tmp; \ 2765b2efa2bSTianjia Zhang vpsubq tmp, x, x; 2775b2efa2bSTianjia Zhang 2785b2efa2bSTianjia Zhang/* 2795b2efa2bSTianjia Zhang * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst, 2805b2efa2bSTianjia Zhang * const u8 *src, u8 *iv) 2815b2efa2bSTianjia Zhang */ 282*2d203c46SEric BiggersSYM_TYPED_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16) 2835b2efa2bSTianjia Zhang /* input: 2845b2efa2bSTianjia Zhang * %rdi: round key array, CTX 2855b2efa2bSTianjia Zhang * %rsi: dst (16 blocks) 2865b2efa2bSTianjia Zhang * %rdx: src (16 blocks) 2875b2efa2bSTianjia Zhang * %rcx: iv (big endian, 128bit) 2885b2efa2bSTianjia Zhang */ 2895b2efa2bSTianjia Zhang FRAME_BEGIN 2905b2efa2bSTianjia Zhang 2915b2efa2bSTianjia Zhang movq 8(%rcx), %rax; 2925b2efa2bSTianjia Zhang bswapq %rax; 2935b2efa2bSTianjia Zhang 2945b2efa2bSTianjia Zhang vzeroupper; 2955b2efa2bSTianjia Zhang 2965b2efa2bSTianjia Zhang vbroadcasti128 .Lbswap128_mask rRIP, RTMP3; 2975b2efa2bSTianjia Zhang vpcmpeqd RNOT, RNOT, RNOT; 2985b2efa2bSTianjia Zhang vpsrldq $8, RNOT, RNOT; /* ab: -1:0 ; cd: -1:0 */ 2995b2efa2bSTianjia Zhang vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */ 3005b2efa2bSTianjia Zhang 3015b2efa2bSTianjia Zhang /* load IV and byteswap */ 3025b2efa2bSTianjia Zhang vmovdqu (%rcx), RTMP4x; 3035b2efa2bSTianjia Zhang vpshufb RTMP3x, RTMP4x, RTMP4x; 3045b2efa2bSTianjia Zhang vmovdqa RTMP4x, RTMP0x; 3055b2efa2bSTianjia Zhang inc_le128(RTMP4x, RNOTx, RTMP1x); 3065b2efa2bSTianjia Zhang vinserti128 $1, RTMP4x, RTMP0, RTMP0; 3075b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */ 3085b2efa2bSTianjia Zhang 3095b2efa2bSTianjia Zhang /* check need for handling 64-bit overflow and carry */ 3105b2efa2bSTianjia Zhang cmpq $(0xffffffffffffffff - 16), %rax; 3115b2efa2bSTianjia Zhang ja .Lhandle_ctr_carry; 3125b2efa2bSTianjia Zhang 3135b2efa2bSTianjia Zhang /* construct IVs */ 3145b2efa2bSTianjia Zhang vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */ 3155b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RA1; 3165b2efa2bSTianjia Zhang vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */ 3175b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RA2; 3185b2efa2bSTianjia Zhang vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */ 3195b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RA3; 3205b2efa2bSTianjia Zhang vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */ 3215b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RB0; 3225b2efa2bSTianjia Zhang vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */ 3235b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RB1; 3245b2efa2bSTianjia Zhang vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */ 3255b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RB2; 3265b2efa2bSTianjia Zhang vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */ 3275b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RB3; 3285b2efa2bSTianjia Zhang vpsubq RTMP2, RTMP0, RTMP0; /* +16 */ 3295b2efa2bSTianjia Zhang vpshufb RTMP3x, RTMP0x, RTMP0x; 3305b2efa2bSTianjia Zhang 3315b2efa2bSTianjia Zhang jmp .Lctr_carry_done; 3325b2efa2bSTianjia Zhang 3335b2efa2bSTianjia Zhang.Lhandle_ctr_carry: 3345b2efa2bSTianjia Zhang /* construct IVs */ 3355b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3365b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3375b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */ 3385b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3395b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3405b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */ 3415b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3425b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3435b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */ 3445b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3455b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3465b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */ 3475b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3485b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3495b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */ 3505b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3515b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3525b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */ 3535b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3545b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3555b2efa2bSTianjia Zhang vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */ 3565b2efa2bSTianjia Zhang inc_le128(RTMP0, RNOT, RTMP1); 3575b2efa2bSTianjia Zhang vextracti128 $1, RTMP0, RTMP0x; 3585b2efa2bSTianjia Zhang vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */ 3595b2efa2bSTianjia Zhang 3605b2efa2bSTianjia Zhang.align 4 3615b2efa2bSTianjia Zhang.Lctr_carry_done: 3625b2efa2bSTianjia Zhang /* store new IV */ 3635b2efa2bSTianjia Zhang vmovdqu RTMP0x, (%rcx); 3645b2efa2bSTianjia Zhang 3655b2efa2bSTianjia Zhang call __sm4_crypt_blk16; 3665b2efa2bSTianjia Zhang 3675b2efa2bSTianjia Zhang vpxor (0 * 32)(%rdx), RA0, RA0; 3685b2efa2bSTianjia Zhang vpxor (1 * 32)(%rdx), RA1, RA1; 3695b2efa2bSTianjia Zhang vpxor (2 * 32)(%rdx), RA2, RA2; 3705b2efa2bSTianjia Zhang vpxor (3 * 32)(%rdx), RA3, RA3; 3715b2efa2bSTianjia Zhang vpxor (4 * 32)(%rdx), RB0, RB0; 3725b2efa2bSTianjia Zhang vpxor (5 * 32)(%rdx), RB1, RB1; 3735b2efa2bSTianjia Zhang vpxor (6 * 32)(%rdx), RB2, RB2; 3745b2efa2bSTianjia Zhang vpxor (7 * 32)(%rdx), RB3, RB3; 3755b2efa2bSTianjia Zhang 3765b2efa2bSTianjia Zhang vmovdqu RA0, (0 * 32)(%rsi); 3775b2efa2bSTianjia Zhang vmovdqu RA1, (1 * 32)(%rsi); 3785b2efa2bSTianjia Zhang vmovdqu RA2, (2 * 32)(%rsi); 3795b2efa2bSTianjia Zhang vmovdqu RA3, (3 * 32)(%rsi); 3805b2efa2bSTianjia Zhang vmovdqu RB0, (4 * 32)(%rsi); 3815b2efa2bSTianjia Zhang vmovdqu RB1, (5 * 32)(%rsi); 3825b2efa2bSTianjia Zhang vmovdqu RB2, (6 * 32)(%rsi); 3835b2efa2bSTianjia Zhang vmovdqu RB3, (7 * 32)(%rsi); 3845b2efa2bSTianjia Zhang 3855b2efa2bSTianjia Zhang vzeroall; 3865b2efa2bSTianjia Zhang FRAME_END 387f94909ceSPeter Zijlstra RET; 3885b2efa2bSTianjia ZhangSYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16) 3895b2efa2bSTianjia Zhang 3905b2efa2bSTianjia Zhang/* 3915b2efa2bSTianjia Zhang * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst, 3925b2efa2bSTianjia Zhang * const u8 *src, u8 *iv) 3935b2efa2bSTianjia Zhang */ 394*2d203c46SEric BiggersSYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16) 3955b2efa2bSTianjia Zhang /* input: 3965b2efa2bSTianjia Zhang * %rdi: round key array, CTX 3975b2efa2bSTianjia Zhang * %rsi: dst (16 blocks) 3985b2efa2bSTianjia Zhang * %rdx: src (16 blocks) 3995b2efa2bSTianjia Zhang * %rcx: iv 4005b2efa2bSTianjia Zhang */ 4015b2efa2bSTianjia Zhang FRAME_BEGIN 4025b2efa2bSTianjia Zhang 4035b2efa2bSTianjia Zhang vzeroupper; 4045b2efa2bSTianjia Zhang 4055b2efa2bSTianjia Zhang vmovdqu (0 * 32)(%rdx), RA0; 4065b2efa2bSTianjia Zhang vmovdqu (1 * 32)(%rdx), RA1; 4075b2efa2bSTianjia Zhang vmovdqu (2 * 32)(%rdx), RA2; 4085b2efa2bSTianjia Zhang vmovdqu (3 * 32)(%rdx), RA3; 4095b2efa2bSTianjia Zhang vmovdqu (4 * 32)(%rdx), RB0; 4105b2efa2bSTianjia Zhang vmovdqu (5 * 32)(%rdx), RB1; 4115b2efa2bSTianjia Zhang vmovdqu (6 * 32)(%rdx), RB2; 4125b2efa2bSTianjia Zhang vmovdqu (7 * 32)(%rdx), RB3; 4135b2efa2bSTianjia Zhang 4145b2efa2bSTianjia Zhang call __sm4_crypt_blk16; 4155b2efa2bSTianjia Zhang 4165b2efa2bSTianjia Zhang vmovdqu (%rcx), RNOTx; 4175b2efa2bSTianjia Zhang vinserti128 $1, (%rdx), RNOT, RNOT; 4185b2efa2bSTianjia Zhang vpxor RNOT, RA0, RA0; 4195b2efa2bSTianjia Zhang vpxor (0 * 32 + 16)(%rdx), RA1, RA1; 4205b2efa2bSTianjia Zhang vpxor (1 * 32 + 16)(%rdx), RA2, RA2; 4215b2efa2bSTianjia Zhang vpxor (2 * 32 + 16)(%rdx), RA3, RA3; 4225b2efa2bSTianjia Zhang vpxor (3 * 32 + 16)(%rdx), RB0, RB0; 4235b2efa2bSTianjia Zhang vpxor (4 * 32 + 16)(%rdx), RB1, RB1; 4245b2efa2bSTianjia Zhang vpxor (5 * 32 + 16)(%rdx), RB2, RB2; 4255b2efa2bSTianjia Zhang vpxor (6 * 32 + 16)(%rdx), RB3, RB3; 4265b2efa2bSTianjia Zhang vmovdqu (7 * 32 + 16)(%rdx), RNOTx; 4275b2efa2bSTianjia Zhang vmovdqu RNOTx, (%rcx); /* store new IV */ 4285b2efa2bSTianjia Zhang 4295b2efa2bSTianjia Zhang vmovdqu RA0, (0 * 32)(%rsi); 4305b2efa2bSTianjia Zhang vmovdqu RA1, (1 * 32)(%rsi); 4315b2efa2bSTianjia Zhang vmovdqu RA2, (2 * 32)(%rsi); 4325b2efa2bSTianjia Zhang vmovdqu RA3, (3 * 32)(%rsi); 4335b2efa2bSTianjia Zhang vmovdqu RB0, (4 * 32)(%rsi); 4345b2efa2bSTianjia Zhang vmovdqu RB1, (5 * 32)(%rsi); 4355b2efa2bSTianjia Zhang vmovdqu RB2, (6 * 32)(%rsi); 4365b2efa2bSTianjia Zhang vmovdqu RB3, (7 * 32)(%rsi); 4375b2efa2bSTianjia Zhang 4385b2efa2bSTianjia Zhang vzeroall; 4395b2efa2bSTianjia Zhang FRAME_END 440f94909ceSPeter Zijlstra RET; 4415b2efa2bSTianjia ZhangSYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16) 4425b2efa2bSTianjia Zhang 4435b2efa2bSTianjia Zhang/* 4445b2efa2bSTianjia Zhang * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst, 4455b2efa2bSTianjia Zhang * const u8 *src, u8 *iv) 4465b2efa2bSTianjia Zhang */ 447*2d203c46SEric BiggersSYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16) 4485b2efa2bSTianjia Zhang /* input: 4495b2efa2bSTianjia Zhang * %rdi: round key array, CTX 4505b2efa2bSTianjia Zhang * %rsi: dst (16 blocks) 4515b2efa2bSTianjia Zhang * %rdx: src (16 blocks) 4525b2efa2bSTianjia Zhang * %rcx: iv 4535b2efa2bSTianjia Zhang */ 4545b2efa2bSTianjia Zhang FRAME_BEGIN 4555b2efa2bSTianjia Zhang 4565b2efa2bSTianjia Zhang vzeroupper; 4575b2efa2bSTianjia Zhang 4585b2efa2bSTianjia Zhang /* Load input */ 4595b2efa2bSTianjia Zhang vmovdqu (%rcx), RNOTx; 4605b2efa2bSTianjia Zhang vinserti128 $1, (%rdx), RNOT, RA0; 4615b2efa2bSTianjia Zhang vmovdqu (0 * 32 + 16)(%rdx), RA1; 4625b2efa2bSTianjia Zhang vmovdqu (1 * 32 + 16)(%rdx), RA2; 4635b2efa2bSTianjia Zhang vmovdqu (2 * 32 + 16)(%rdx), RA3; 4645b2efa2bSTianjia Zhang vmovdqu (3 * 32 + 16)(%rdx), RB0; 4655b2efa2bSTianjia Zhang vmovdqu (4 * 32 + 16)(%rdx), RB1; 4665b2efa2bSTianjia Zhang vmovdqu (5 * 32 + 16)(%rdx), RB2; 4675b2efa2bSTianjia Zhang vmovdqu (6 * 32 + 16)(%rdx), RB3; 4685b2efa2bSTianjia Zhang 4695b2efa2bSTianjia Zhang /* Update IV */ 4705b2efa2bSTianjia Zhang vmovdqu (7 * 32 + 16)(%rdx), RNOTx; 4715b2efa2bSTianjia Zhang vmovdqu RNOTx, (%rcx); 4725b2efa2bSTianjia Zhang 4735b2efa2bSTianjia Zhang call __sm4_crypt_blk16; 4745b2efa2bSTianjia Zhang 4755b2efa2bSTianjia Zhang vpxor (0 * 32)(%rdx), RA0, RA0; 4765b2efa2bSTianjia Zhang vpxor (1 * 32)(%rdx), RA1, RA1; 4775b2efa2bSTianjia Zhang vpxor (2 * 32)(%rdx), RA2, RA2; 4785b2efa2bSTianjia Zhang vpxor (3 * 32)(%rdx), RA3, RA3; 4795b2efa2bSTianjia Zhang vpxor (4 * 32)(%rdx), RB0, RB0; 4805b2efa2bSTianjia Zhang vpxor (5 * 32)(%rdx), RB1, RB1; 4815b2efa2bSTianjia Zhang vpxor (6 * 32)(%rdx), RB2, RB2; 4825b2efa2bSTianjia Zhang vpxor (7 * 32)(%rdx), RB3, RB3; 4835b2efa2bSTianjia Zhang 4845b2efa2bSTianjia Zhang vmovdqu RA0, (0 * 32)(%rsi); 4855b2efa2bSTianjia Zhang vmovdqu RA1, (1 * 32)(%rsi); 4865b2efa2bSTianjia Zhang vmovdqu RA2, (2 * 32)(%rsi); 4875b2efa2bSTianjia Zhang vmovdqu RA3, (3 * 32)(%rsi); 4885b2efa2bSTianjia Zhang vmovdqu RB0, (4 * 32)(%rsi); 4895b2efa2bSTianjia Zhang vmovdqu RB1, (5 * 32)(%rsi); 4905b2efa2bSTianjia Zhang vmovdqu RB2, (6 * 32)(%rsi); 4915b2efa2bSTianjia Zhang vmovdqu RB3, (7 * 32)(%rsi); 4925b2efa2bSTianjia Zhang 4935b2efa2bSTianjia Zhang vzeroall; 4945b2efa2bSTianjia Zhang FRAME_END 495f94909ceSPeter Zijlstra RET; 4965b2efa2bSTianjia ZhangSYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16) 497