1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * SM4 Cipher Algorithm, AES-NI/AVX2 optimized.
4 * as specified in
5 * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
6 *
7 * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi>
8 * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
9 * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
10 */
11
12/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:
13 *  https://github.com/mjosaarinen/sm4ni
14 */
15
16#include <linux/linkage.h>
17#include <linux/cfi_types.h>
18#include <asm/frame.h>
19
20#define rRIP         (%rip)
21
22/* vector registers */
23#define RX0          %ymm0
24#define RX1          %ymm1
25#define MASK_4BIT    %ymm2
26#define RTMP0        %ymm3
27#define RTMP1        %ymm4
28#define RTMP2        %ymm5
29#define RTMP3        %ymm6
30#define RTMP4        %ymm7
31
32#define RA0          %ymm8
33#define RA1          %ymm9
34#define RA2          %ymm10
35#define RA3          %ymm11
36
37#define RB0          %ymm12
38#define RB1          %ymm13
39#define RB2          %ymm14
40#define RB3          %ymm15
41
42#define RNOT         %ymm0
43#define RBSWAP       %ymm1
44
45#define RX0x         %xmm0
46#define RX1x         %xmm1
47#define MASK_4BITx   %xmm2
48
49#define RNOTx        %xmm0
50#define RBSWAPx      %xmm1
51
52#define RTMP0x       %xmm3
53#define RTMP1x       %xmm4
54#define RTMP2x       %xmm5
55#define RTMP3x       %xmm6
56#define RTMP4x       %xmm7
57
58
59/* helper macros */
60
61/* Transpose four 32-bit words between 128-bit vector lanes. */
62#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
63	vpunpckhdq x1, x0, t2;                \
64	vpunpckldq x1, x0, x0;                \
65	                                      \
66	vpunpckldq x3, x2, t1;                \
67	vpunpckhdq x3, x2, x2;                \
68	                                      \
69	vpunpckhqdq t1, x0, x1;               \
70	vpunpcklqdq t1, x0, x0;               \
71	                                      \
72	vpunpckhqdq x2, t2, x3;               \
73	vpunpcklqdq x2, t2, x2;
74
75/* post-SubByte transform. */
76#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
77	vpand x, mask4bit, tmp0;                     \
78	vpandn x, mask4bit, x;                       \
79	vpsrld $4, x, x;                             \
80	                                             \
81	vpshufb tmp0, lo_t, tmp0;                    \
82	vpshufb x, hi_t, x;                          \
83	vpxor tmp0, x, x;
84
85/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
86 * 'vaeslastenc' instruction. */
87#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
88	vpandn mask4bit, x, tmp0;                     \
89	vpsrld $4, x, x;                              \
90	vpand x, mask4bit, x;                         \
91	                                              \
92	vpshufb tmp0, lo_t, tmp0;                     \
93	vpshufb x, hi_t, x;                           \
94	vpxor tmp0, x, x;
95
96
97.section	.rodata.cst16, "aM", @progbits, 16
98.align 16
99
100/*
101 * Following four affine transform look-up tables are from work by
102 * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
103 *
104 * These allow exposing SM4 S-Box from AES SubByte.
105 */
106
107/* pre-SubByte affine transform, from SM4 field to AES field. */
108.Lpre_tf_lo_s:
109	.quad 0x9197E2E474720701, 0xC7C1B4B222245157
110.Lpre_tf_hi_s:
111	.quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
112
113/* post-SubByte affine transform, from AES field to SM4 field. */
114.Lpost_tf_lo_s:
115	.quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
116.Lpost_tf_hi_s:
117	.quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
118
119/* For isolating SubBytes from AESENCLAST, inverse shift row */
120.Linv_shift_row:
121	.byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
122	.byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
123
124/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
125.Linv_shift_row_rol_8:
126	.byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
127	.byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
128
129/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
130.Linv_shift_row_rol_16:
131	.byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
132	.byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
133
134/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
135.Linv_shift_row_rol_24:
136	.byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
137	.byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
138
139/* For CTR-mode IV byteswap */
140.Lbswap128_mask:
141	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
142
143/* For input word byte-swap */
144.Lbswap32_mask:
145	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
146
147.align 4
148/* 4-bit mask */
149.L0f0f0f0f:
150	.long 0x0f0f0f0f
151
152/* 12 bytes, only for padding */
153.Lpadding_deadbeef:
154	.long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef
155
156.text
157.align 16
158
159.align 8
160SYM_FUNC_START_LOCAL(__sm4_crypt_blk16)
161	/* input:
162	 *	%rdi: round key array, CTX
163	 *	RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
164	 *						plaintext blocks
165	 * output:
166	 *	RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
167	 * 						ciphertext blocks
168	 */
169	FRAME_BEGIN
170
171	vbroadcasti128 .Lbswap32_mask rRIP, RTMP2;
172	vpshufb RTMP2, RA0, RA0;
173	vpshufb RTMP2, RA1, RA1;
174	vpshufb RTMP2, RA2, RA2;
175	vpshufb RTMP2, RA3, RA3;
176	vpshufb RTMP2, RB0, RB0;
177	vpshufb RTMP2, RB1, RB1;
178	vpshufb RTMP2, RB2, RB2;
179	vpshufb RTMP2, RB3, RB3;
180
181	vpbroadcastd .L0f0f0f0f rRIP, MASK_4BIT;
182	transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
183	transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
184
185#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3)                \
186	vpbroadcastd (4*(round))(%rdi), RX0;                        \
187	vbroadcasti128 .Lpre_tf_lo_s rRIP, RTMP4;                   \
188	vbroadcasti128 .Lpre_tf_hi_s rRIP, RTMP1;                   \
189	vmovdqa RX0, RX1;                                           \
190	vpxor s1, RX0, RX0;                                         \
191	vpxor s2, RX0, RX0;                                         \
192	vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
193	vbroadcasti128 .Lpost_tf_lo_s rRIP, RTMP2;                  \
194	vbroadcasti128 .Lpost_tf_hi_s rRIP, RTMP3;                  \
195	vpxor r1, RX1, RX1;                                         \
196	vpxor r2, RX1, RX1;                                         \
197	vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */                 \
198	                                                            \
199	/* sbox, non-linear part */                                 \
200	transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
201	transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
202	vextracti128 $1, RX0, RTMP4x;                               \
203	vextracti128 $1, RX1, RTMP0x;                               \
204	vaesenclast MASK_4BITx, RX0x, RX0x;                         \
205	vaesenclast MASK_4BITx, RTMP4x, RTMP4x;                     \
206	vaesenclast MASK_4BITx, RX1x, RX1x;                         \
207	vaesenclast MASK_4BITx, RTMP0x, RTMP0x;                     \
208	vinserti128 $1, RTMP4x, RX0, RX0;                           \
209	vbroadcasti128 .Linv_shift_row rRIP, RTMP4;                 \
210	vinserti128 $1, RTMP0x, RX1, RX1;                           \
211	transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
212	transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
213	                                                            \
214	/* linear part */                                           \
215	vpshufb RTMP4, RX0, RTMP0;                                  \
216	vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
217	vpshufb RTMP4, RX1, RTMP2;                                  \
218	vbroadcasti128 .Linv_shift_row_rol_8 rRIP, RTMP4;           \
219	vpxor RTMP2, r0, r0; /* r0 ^ x */                           \
220	vpshufb RTMP4, RX0, RTMP1;                                  \
221	vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
222	vpshufb RTMP4, RX1, RTMP3;                                  \
223	vbroadcasti128 .Linv_shift_row_rol_16 rRIP, RTMP4;          \
224	vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */               \
225	vpshufb RTMP4, RX0, RTMP1;                                  \
226	vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
227	vpshufb RTMP4, RX1, RTMP3;                                  \
228	vbroadcasti128 .Linv_shift_row_rol_24 rRIP, RTMP4;          \
229	vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */   \
230	vpshufb RTMP4, RX0, RTMP1;                                  \
231	vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
232	vpslld $2, RTMP0, RTMP1;                                    \
233	vpsrld $30, RTMP0, RTMP0;                                   \
234	vpxor RTMP0, s0, s0;                                        \
235	/* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
236	vpxor RTMP1, s0, s0;                                        \
237	vpshufb RTMP4, RX1, RTMP3;                                  \
238	vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */               \
239	vpslld $2, RTMP2, RTMP3;                                    \
240	vpsrld $30, RTMP2, RTMP2;                                   \
241	vpxor RTMP2, r0, r0;                                        \
242	/* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
243	vpxor RTMP3, r0, r0;
244
245	leaq (32*4)(%rdi), %rax;
246.align 16
247.Lroundloop_blk8:
248	ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
249	ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
250	ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
251	ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
252	leaq (4*4)(%rdi), %rdi;
253	cmpq %rax, %rdi;
254	jne .Lroundloop_blk8;
255
256#undef ROUND
257
258	vbroadcasti128 .Lbswap128_mask rRIP, RTMP2;
259
260	transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
261	transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
262	vpshufb RTMP2, RA0, RA0;
263	vpshufb RTMP2, RA1, RA1;
264	vpshufb RTMP2, RA2, RA2;
265	vpshufb RTMP2, RA3, RA3;
266	vpshufb RTMP2, RB0, RB0;
267	vpshufb RTMP2, RB1, RB1;
268	vpshufb RTMP2, RB2, RB2;
269	vpshufb RTMP2, RB3, RB3;
270
271	FRAME_END
272	RET;
273SYM_FUNC_END(__sm4_crypt_blk16)
274
275#define inc_le128(x, minus_one, tmp) \
276	vpcmpeqq minus_one, x, tmp;  \
277	vpsubq minus_one, x, x;      \
278	vpslldq $8, tmp, tmp;        \
279	vpsubq tmp, x, x;
280
281/*
282 * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
283 *                                   const u8 *src, u8 *iv)
284 */
285.align 8
286SYM_TYPED_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
287	/* input:
288	 *	%rdi: round key array, CTX
289	 *	%rsi: dst (16 blocks)
290	 *	%rdx: src (16 blocks)
291	 *	%rcx: iv (big endian, 128bit)
292	 */
293	FRAME_BEGIN
294
295	movq 8(%rcx), %rax;
296	bswapq %rax;
297
298	vzeroupper;
299
300	vbroadcasti128 .Lbswap128_mask rRIP, RTMP3;
301	vpcmpeqd RNOT, RNOT, RNOT;
302	vpsrldq $8, RNOT, RNOT;   /* ab: -1:0 ; cd: -1:0 */
303	vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */
304
305	/* load IV and byteswap */
306	vmovdqu (%rcx), RTMP4x;
307	vpshufb RTMP3x, RTMP4x, RTMP4x;
308	vmovdqa RTMP4x, RTMP0x;
309	inc_le128(RTMP4x, RNOTx, RTMP1x);
310	vinserti128 $1, RTMP4x, RTMP0, RTMP0;
311	vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */
312
313	/* check need for handling 64-bit overflow and carry */
314	cmpq $(0xffffffffffffffff - 16), %rax;
315	ja .Lhandle_ctr_carry;
316
317	/* construct IVs */
318	vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */
319	vpshufb RTMP3, RTMP0, RA1;
320	vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */
321	vpshufb RTMP3, RTMP0, RA2;
322	vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */
323	vpshufb RTMP3, RTMP0, RA3;
324	vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */
325	vpshufb RTMP3, RTMP0, RB0;
326	vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */
327	vpshufb RTMP3, RTMP0, RB1;
328	vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */
329	vpshufb RTMP3, RTMP0, RB2;
330	vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */
331	vpshufb RTMP3, RTMP0, RB3;
332	vpsubq RTMP2, RTMP0, RTMP0; /* +16 */
333	vpshufb RTMP3x, RTMP0x, RTMP0x;
334
335	jmp .Lctr_carry_done;
336
337.Lhandle_ctr_carry:
338	/* construct IVs */
339	inc_le128(RTMP0, RNOT, RTMP1);
340	inc_le128(RTMP0, RNOT, RTMP1);
341	vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */
342	inc_le128(RTMP0, RNOT, RTMP1);
343	inc_le128(RTMP0, RNOT, RTMP1);
344	vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */
345	inc_le128(RTMP0, RNOT, RTMP1);
346	inc_le128(RTMP0, RNOT, RTMP1);
347	vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */
348	inc_le128(RTMP0, RNOT, RTMP1);
349	inc_le128(RTMP0, RNOT, RTMP1);
350	vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */
351	inc_le128(RTMP0, RNOT, RTMP1);
352	inc_le128(RTMP0, RNOT, RTMP1);
353	vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */
354	inc_le128(RTMP0, RNOT, RTMP1);
355	inc_le128(RTMP0, RNOT, RTMP1);
356	vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */
357	inc_le128(RTMP0, RNOT, RTMP1);
358	inc_le128(RTMP0, RNOT, RTMP1);
359	vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */
360	inc_le128(RTMP0, RNOT, RTMP1);
361	vextracti128 $1, RTMP0, RTMP0x;
362	vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */
363
364.align 4
365.Lctr_carry_done:
366	/* store new IV */
367	vmovdqu RTMP0x, (%rcx);
368
369	call __sm4_crypt_blk16;
370
371	vpxor (0 * 32)(%rdx), RA0, RA0;
372	vpxor (1 * 32)(%rdx), RA1, RA1;
373	vpxor (2 * 32)(%rdx), RA2, RA2;
374	vpxor (3 * 32)(%rdx), RA3, RA3;
375	vpxor (4 * 32)(%rdx), RB0, RB0;
376	vpxor (5 * 32)(%rdx), RB1, RB1;
377	vpxor (6 * 32)(%rdx), RB2, RB2;
378	vpxor (7 * 32)(%rdx), RB3, RB3;
379
380	vmovdqu RA0, (0 * 32)(%rsi);
381	vmovdqu RA1, (1 * 32)(%rsi);
382	vmovdqu RA2, (2 * 32)(%rsi);
383	vmovdqu RA3, (3 * 32)(%rsi);
384	vmovdqu RB0, (4 * 32)(%rsi);
385	vmovdqu RB1, (5 * 32)(%rsi);
386	vmovdqu RB2, (6 * 32)(%rsi);
387	vmovdqu RB3, (7 * 32)(%rsi);
388
389	vzeroall;
390	FRAME_END
391	RET;
392SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
393
394/*
395 * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
396 *                                   const u8 *src, u8 *iv)
397 */
398.align 8
399SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
400	/* input:
401	 *	%rdi: round key array, CTX
402	 *	%rsi: dst (16 blocks)
403	 *	%rdx: src (16 blocks)
404	 *	%rcx: iv
405	 */
406	FRAME_BEGIN
407
408	vzeroupper;
409
410	vmovdqu (0 * 32)(%rdx), RA0;
411	vmovdqu (1 * 32)(%rdx), RA1;
412	vmovdqu (2 * 32)(%rdx), RA2;
413	vmovdqu (3 * 32)(%rdx), RA3;
414	vmovdqu (4 * 32)(%rdx), RB0;
415	vmovdqu (5 * 32)(%rdx), RB1;
416	vmovdqu (6 * 32)(%rdx), RB2;
417	vmovdqu (7 * 32)(%rdx), RB3;
418
419	call __sm4_crypt_blk16;
420
421	vmovdqu (%rcx), RNOTx;
422	vinserti128 $1, (%rdx), RNOT, RNOT;
423	vpxor RNOT, RA0, RA0;
424	vpxor (0 * 32 + 16)(%rdx), RA1, RA1;
425	vpxor (1 * 32 + 16)(%rdx), RA2, RA2;
426	vpxor (2 * 32 + 16)(%rdx), RA3, RA3;
427	vpxor (3 * 32 + 16)(%rdx), RB0, RB0;
428	vpxor (4 * 32 + 16)(%rdx), RB1, RB1;
429	vpxor (5 * 32 + 16)(%rdx), RB2, RB2;
430	vpxor (6 * 32 + 16)(%rdx), RB3, RB3;
431	vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
432	vmovdqu RNOTx, (%rcx); /* store new IV */
433
434	vmovdqu RA0, (0 * 32)(%rsi);
435	vmovdqu RA1, (1 * 32)(%rsi);
436	vmovdqu RA2, (2 * 32)(%rsi);
437	vmovdqu RA3, (3 * 32)(%rsi);
438	vmovdqu RB0, (4 * 32)(%rsi);
439	vmovdqu RB1, (5 * 32)(%rsi);
440	vmovdqu RB2, (6 * 32)(%rsi);
441	vmovdqu RB3, (7 * 32)(%rsi);
442
443	vzeroall;
444	FRAME_END
445	RET;
446SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
447
448/*
449 * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
450 *                                   const u8 *src, u8 *iv)
451 */
452.align 8
453SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
454	/* input:
455	 *	%rdi: round key array, CTX
456	 *	%rsi: dst (16 blocks)
457	 *	%rdx: src (16 blocks)
458	 *	%rcx: iv
459	 */
460	FRAME_BEGIN
461
462	vzeroupper;
463
464	/* Load input */
465	vmovdqu (%rcx), RNOTx;
466	vinserti128 $1, (%rdx), RNOT, RA0;
467	vmovdqu (0 * 32 + 16)(%rdx), RA1;
468	vmovdqu (1 * 32 + 16)(%rdx), RA2;
469	vmovdqu (2 * 32 + 16)(%rdx), RA3;
470	vmovdqu (3 * 32 + 16)(%rdx), RB0;
471	vmovdqu (4 * 32 + 16)(%rdx), RB1;
472	vmovdqu (5 * 32 + 16)(%rdx), RB2;
473	vmovdqu (6 * 32 + 16)(%rdx), RB3;
474
475	/* Update IV */
476	vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
477	vmovdqu RNOTx, (%rcx);
478
479	call __sm4_crypt_blk16;
480
481	vpxor (0 * 32)(%rdx), RA0, RA0;
482	vpxor (1 * 32)(%rdx), RA1, RA1;
483	vpxor (2 * 32)(%rdx), RA2, RA2;
484	vpxor (3 * 32)(%rdx), RA3, RA3;
485	vpxor (4 * 32)(%rdx), RB0, RB0;
486	vpxor (5 * 32)(%rdx), RB1, RB1;
487	vpxor (6 * 32)(%rdx), RB2, RB2;
488	vpxor (7 * 32)(%rdx), RB3, RB3;
489
490	vmovdqu RA0, (0 * 32)(%rsi);
491	vmovdqu RA1, (1 * 32)(%rsi);
492	vmovdqu RA2, (2 * 32)(%rsi);
493	vmovdqu RA3, (3 * 32)(%rsi);
494	vmovdqu RB0, (4 * 32)(%rsi);
495	vmovdqu RB1, (5 * 32)(%rsi);
496	vmovdqu RB2, (6 * 32)(%rsi);
497	vmovdqu RB3, (7 * 32)(%rsi);
498
499	vzeroall;
500	FRAME_END
501	RET;
502SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
503