1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * SM4 Cipher Algorithm, AES-NI/AVX2 optimized.
4 * as specified in
5 * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
6 *
7 * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi>
8 * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
9 * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
10 */
11
12/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:
13 *  https://github.com/mjosaarinen/sm4ni
14 */
15
16#include <linux/linkage.h>
17#include <asm/frame.h>
18
19#define rRIP         (%rip)
20
21/* vector registers */
22#define RX0          %ymm0
23#define RX1          %ymm1
24#define MASK_4BIT    %ymm2
25#define RTMP0        %ymm3
26#define RTMP1        %ymm4
27#define RTMP2        %ymm5
28#define RTMP3        %ymm6
29#define RTMP4        %ymm7
30
31#define RA0          %ymm8
32#define RA1          %ymm9
33#define RA2          %ymm10
34#define RA3          %ymm11
35
36#define RB0          %ymm12
37#define RB1          %ymm13
38#define RB2          %ymm14
39#define RB3          %ymm15
40
41#define RNOT         %ymm0
42#define RBSWAP       %ymm1
43
44#define RX0x         %xmm0
45#define RX1x         %xmm1
46#define MASK_4BITx   %xmm2
47
48#define RNOTx        %xmm0
49#define RBSWAPx      %xmm1
50
51#define RTMP0x       %xmm3
52#define RTMP1x       %xmm4
53#define RTMP2x       %xmm5
54#define RTMP3x       %xmm6
55#define RTMP4x       %xmm7
56
57
58/* helper macros */
59
60/* Transpose four 32-bit words between 128-bit vector lanes. */
61#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
62	vpunpckhdq x1, x0, t2;                \
63	vpunpckldq x1, x0, x0;                \
64	                                      \
65	vpunpckldq x3, x2, t1;                \
66	vpunpckhdq x3, x2, x2;                \
67	                                      \
68	vpunpckhqdq t1, x0, x1;               \
69	vpunpcklqdq t1, x0, x0;               \
70	                                      \
71	vpunpckhqdq x2, t2, x3;               \
72	vpunpcklqdq x2, t2, x2;
73
74/* post-SubByte transform. */
75#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
76	vpand x, mask4bit, tmp0;                     \
77	vpandn x, mask4bit, x;                       \
78	vpsrld $4, x, x;                             \
79	                                             \
80	vpshufb tmp0, lo_t, tmp0;                    \
81	vpshufb x, hi_t, x;                          \
82	vpxor tmp0, x, x;
83
84/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
85 * 'vaeslastenc' instruction. */
86#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
87	vpandn mask4bit, x, tmp0;                     \
88	vpsrld $4, x, x;                              \
89	vpand x, mask4bit, x;                         \
90	                                              \
91	vpshufb tmp0, lo_t, tmp0;                     \
92	vpshufb x, hi_t, x;                           \
93	vpxor tmp0, x, x;
94
95
96.section	.rodata.cst164, "aM", @progbits, 164
97.align 16
98
99/*
100 * Following four affine transform look-up tables are from work by
101 * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
102 *
103 * These allow exposing SM4 S-Box from AES SubByte.
104 */
105
106/* pre-SubByte affine transform, from SM4 field to AES field. */
107.Lpre_tf_lo_s:
108	.quad 0x9197E2E474720701, 0xC7C1B4B222245157
109.Lpre_tf_hi_s:
110	.quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
111
112/* post-SubByte affine transform, from AES field to SM4 field. */
113.Lpost_tf_lo_s:
114	.quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
115.Lpost_tf_hi_s:
116	.quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
117
118/* For isolating SubBytes from AESENCLAST, inverse shift row */
119.Linv_shift_row:
120	.byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
121	.byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
122
123/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
124.Linv_shift_row_rol_8:
125	.byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
126	.byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
127
128/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
129.Linv_shift_row_rol_16:
130	.byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
131	.byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
132
133/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
134.Linv_shift_row_rol_24:
135	.byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
136	.byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
137
138/* For CTR-mode IV byteswap */
139.Lbswap128_mask:
140	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
141
142/* For input word byte-swap */
143.Lbswap32_mask:
144	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
145
146.align 4
147/* 4-bit mask */
148.L0f0f0f0f:
149	.long 0x0f0f0f0f
150
151.text
152.align 16
153
154.align 8
155SYM_FUNC_START_LOCAL(__sm4_crypt_blk16)
156	/* input:
157	 *	%rdi: round key array, CTX
158	 *	RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
159	 *						plaintext blocks
160	 * output:
161	 *	RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
162	 * 						ciphertext blocks
163	 */
164	FRAME_BEGIN
165
166	vbroadcasti128 .Lbswap32_mask rRIP, RTMP2;
167	vpshufb RTMP2, RA0, RA0;
168	vpshufb RTMP2, RA1, RA1;
169	vpshufb RTMP2, RA2, RA2;
170	vpshufb RTMP2, RA3, RA3;
171	vpshufb RTMP2, RB0, RB0;
172	vpshufb RTMP2, RB1, RB1;
173	vpshufb RTMP2, RB2, RB2;
174	vpshufb RTMP2, RB3, RB3;
175
176	vpbroadcastd .L0f0f0f0f rRIP, MASK_4BIT;
177	transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
178	transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
179
180#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3)                \
181	vpbroadcastd (4*(round))(%rdi), RX0;                        \
182	vbroadcasti128 .Lpre_tf_lo_s rRIP, RTMP4;                   \
183	vbroadcasti128 .Lpre_tf_hi_s rRIP, RTMP1;                   \
184	vmovdqa RX0, RX1;                                           \
185	vpxor s1, RX0, RX0;                                         \
186	vpxor s2, RX0, RX0;                                         \
187	vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
188	vbroadcasti128 .Lpost_tf_lo_s rRIP, RTMP2;                  \
189	vbroadcasti128 .Lpost_tf_hi_s rRIP, RTMP3;                  \
190	vpxor r1, RX1, RX1;                                         \
191	vpxor r2, RX1, RX1;                                         \
192	vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */                 \
193	                                                            \
194	/* sbox, non-linear part */                                 \
195	transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
196	transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
197	vextracti128 $1, RX0, RTMP4x;                               \
198	vextracti128 $1, RX1, RTMP0x;                               \
199	vaesenclast MASK_4BITx, RX0x, RX0x;                         \
200	vaesenclast MASK_4BITx, RTMP4x, RTMP4x;                     \
201	vaesenclast MASK_4BITx, RX1x, RX1x;                         \
202	vaesenclast MASK_4BITx, RTMP0x, RTMP0x;                     \
203	vinserti128 $1, RTMP4x, RX0, RX0;                           \
204	vbroadcasti128 .Linv_shift_row rRIP, RTMP4;                 \
205	vinserti128 $1, RTMP0x, RX1, RX1;                           \
206	transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
207	transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
208	                                                            \
209	/* linear part */                                           \
210	vpshufb RTMP4, RX0, RTMP0;                                  \
211	vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
212	vpshufb RTMP4, RX1, RTMP2;                                  \
213	vbroadcasti128 .Linv_shift_row_rol_8 rRIP, RTMP4;           \
214	vpxor RTMP2, r0, r0; /* r0 ^ x */                           \
215	vpshufb RTMP4, RX0, RTMP1;                                  \
216	vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
217	vpshufb RTMP4, RX1, RTMP3;                                  \
218	vbroadcasti128 .Linv_shift_row_rol_16 rRIP, RTMP4;          \
219	vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */               \
220	vpshufb RTMP4, RX0, RTMP1;                                  \
221	vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
222	vpshufb RTMP4, RX1, RTMP3;                                  \
223	vbroadcasti128 .Linv_shift_row_rol_24 rRIP, RTMP4;          \
224	vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */   \
225	vpshufb RTMP4, RX0, RTMP1;                                  \
226	vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
227	vpslld $2, RTMP0, RTMP1;                                    \
228	vpsrld $30, RTMP0, RTMP0;                                   \
229	vpxor RTMP0, s0, s0;                                        \
230	/* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
231	vpxor RTMP1, s0, s0;                                        \
232	vpshufb RTMP4, RX1, RTMP3;                                  \
233	vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */               \
234	vpslld $2, RTMP2, RTMP3;                                    \
235	vpsrld $30, RTMP2, RTMP2;                                   \
236	vpxor RTMP2, r0, r0;                                        \
237	/* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
238	vpxor RTMP3, r0, r0;
239
240	leaq (32*4)(%rdi), %rax;
241.align 16
242.Lroundloop_blk8:
243	ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
244	ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
245	ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
246	ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
247	leaq (4*4)(%rdi), %rdi;
248	cmpq %rax, %rdi;
249	jne .Lroundloop_blk8;
250
251#undef ROUND
252
253	vbroadcasti128 .Lbswap128_mask rRIP, RTMP2;
254
255	transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
256	transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
257	vpshufb RTMP2, RA0, RA0;
258	vpshufb RTMP2, RA1, RA1;
259	vpshufb RTMP2, RA2, RA2;
260	vpshufb RTMP2, RA3, RA3;
261	vpshufb RTMP2, RB0, RB0;
262	vpshufb RTMP2, RB1, RB1;
263	vpshufb RTMP2, RB2, RB2;
264	vpshufb RTMP2, RB3, RB3;
265
266	FRAME_END
267	ret;
268SYM_FUNC_END(__sm4_crypt_blk16)
269
270#define inc_le128(x, minus_one, tmp) \
271	vpcmpeqq minus_one, x, tmp;  \
272	vpsubq minus_one, x, x;      \
273	vpslldq $8, tmp, tmp;        \
274	vpsubq tmp, x, x;
275
276/*
277 * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
278 *                                   const u8 *src, u8 *iv)
279 */
280.align 8
281SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
282	/* input:
283	 *	%rdi: round key array, CTX
284	 *	%rsi: dst (16 blocks)
285	 *	%rdx: src (16 blocks)
286	 *	%rcx: iv (big endian, 128bit)
287	 */
288	FRAME_BEGIN
289
290	movq 8(%rcx), %rax;
291	bswapq %rax;
292
293	vzeroupper;
294
295	vbroadcasti128 .Lbswap128_mask rRIP, RTMP3;
296	vpcmpeqd RNOT, RNOT, RNOT;
297	vpsrldq $8, RNOT, RNOT;   /* ab: -1:0 ; cd: -1:0 */
298	vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */
299
300	/* load IV and byteswap */
301	vmovdqu (%rcx), RTMP4x;
302	vpshufb RTMP3x, RTMP4x, RTMP4x;
303	vmovdqa RTMP4x, RTMP0x;
304	inc_le128(RTMP4x, RNOTx, RTMP1x);
305	vinserti128 $1, RTMP4x, RTMP0, RTMP0;
306	vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */
307
308	/* check need for handling 64-bit overflow and carry */
309	cmpq $(0xffffffffffffffff - 16), %rax;
310	ja .Lhandle_ctr_carry;
311
312	/* construct IVs */
313	vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */
314	vpshufb RTMP3, RTMP0, RA1;
315	vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */
316	vpshufb RTMP3, RTMP0, RA2;
317	vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */
318	vpshufb RTMP3, RTMP0, RA3;
319	vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */
320	vpshufb RTMP3, RTMP0, RB0;
321	vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */
322	vpshufb RTMP3, RTMP0, RB1;
323	vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */
324	vpshufb RTMP3, RTMP0, RB2;
325	vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */
326	vpshufb RTMP3, RTMP0, RB3;
327	vpsubq RTMP2, RTMP0, RTMP0; /* +16 */
328	vpshufb RTMP3x, RTMP0x, RTMP0x;
329
330	jmp .Lctr_carry_done;
331
332.Lhandle_ctr_carry:
333	/* construct IVs */
334	inc_le128(RTMP0, RNOT, RTMP1);
335	inc_le128(RTMP0, RNOT, RTMP1);
336	vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */
337	inc_le128(RTMP0, RNOT, RTMP1);
338	inc_le128(RTMP0, RNOT, RTMP1);
339	vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */
340	inc_le128(RTMP0, RNOT, RTMP1);
341	inc_le128(RTMP0, RNOT, RTMP1);
342	vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */
343	inc_le128(RTMP0, RNOT, RTMP1);
344	inc_le128(RTMP0, RNOT, RTMP1);
345	vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */
346	inc_le128(RTMP0, RNOT, RTMP1);
347	inc_le128(RTMP0, RNOT, RTMP1);
348	vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */
349	inc_le128(RTMP0, RNOT, RTMP1);
350	inc_le128(RTMP0, RNOT, RTMP1);
351	vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */
352	inc_le128(RTMP0, RNOT, RTMP1);
353	inc_le128(RTMP0, RNOT, RTMP1);
354	vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */
355	inc_le128(RTMP0, RNOT, RTMP1);
356	vextracti128 $1, RTMP0, RTMP0x;
357	vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */
358
359.align 4
360.Lctr_carry_done:
361	/* store new IV */
362	vmovdqu RTMP0x, (%rcx);
363
364	call __sm4_crypt_blk16;
365
366	vpxor (0 * 32)(%rdx), RA0, RA0;
367	vpxor (1 * 32)(%rdx), RA1, RA1;
368	vpxor (2 * 32)(%rdx), RA2, RA2;
369	vpxor (3 * 32)(%rdx), RA3, RA3;
370	vpxor (4 * 32)(%rdx), RB0, RB0;
371	vpxor (5 * 32)(%rdx), RB1, RB1;
372	vpxor (6 * 32)(%rdx), RB2, RB2;
373	vpxor (7 * 32)(%rdx), RB3, RB3;
374
375	vmovdqu RA0, (0 * 32)(%rsi);
376	vmovdqu RA1, (1 * 32)(%rsi);
377	vmovdqu RA2, (2 * 32)(%rsi);
378	vmovdqu RA3, (3 * 32)(%rsi);
379	vmovdqu RB0, (4 * 32)(%rsi);
380	vmovdqu RB1, (5 * 32)(%rsi);
381	vmovdqu RB2, (6 * 32)(%rsi);
382	vmovdqu RB3, (7 * 32)(%rsi);
383
384	vzeroall;
385	FRAME_END
386	ret;
387SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
388
389/*
390 * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
391 *                                   const u8 *src, u8 *iv)
392 */
393.align 8
394SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
395	/* input:
396	 *	%rdi: round key array, CTX
397	 *	%rsi: dst (16 blocks)
398	 *	%rdx: src (16 blocks)
399	 *	%rcx: iv
400	 */
401	FRAME_BEGIN
402
403	vzeroupper;
404
405	vmovdqu (0 * 32)(%rdx), RA0;
406	vmovdqu (1 * 32)(%rdx), RA1;
407	vmovdqu (2 * 32)(%rdx), RA2;
408	vmovdqu (3 * 32)(%rdx), RA3;
409	vmovdqu (4 * 32)(%rdx), RB0;
410	vmovdqu (5 * 32)(%rdx), RB1;
411	vmovdqu (6 * 32)(%rdx), RB2;
412	vmovdqu (7 * 32)(%rdx), RB3;
413
414	call __sm4_crypt_blk16;
415
416	vmovdqu (%rcx), RNOTx;
417	vinserti128 $1, (%rdx), RNOT, RNOT;
418	vpxor RNOT, RA0, RA0;
419	vpxor (0 * 32 + 16)(%rdx), RA1, RA1;
420	vpxor (1 * 32 + 16)(%rdx), RA2, RA2;
421	vpxor (2 * 32 + 16)(%rdx), RA3, RA3;
422	vpxor (3 * 32 + 16)(%rdx), RB0, RB0;
423	vpxor (4 * 32 + 16)(%rdx), RB1, RB1;
424	vpxor (5 * 32 + 16)(%rdx), RB2, RB2;
425	vpxor (6 * 32 + 16)(%rdx), RB3, RB3;
426	vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
427	vmovdqu RNOTx, (%rcx); /* store new IV */
428
429	vmovdqu RA0, (0 * 32)(%rsi);
430	vmovdqu RA1, (1 * 32)(%rsi);
431	vmovdqu RA2, (2 * 32)(%rsi);
432	vmovdqu RA3, (3 * 32)(%rsi);
433	vmovdqu RB0, (4 * 32)(%rsi);
434	vmovdqu RB1, (5 * 32)(%rsi);
435	vmovdqu RB2, (6 * 32)(%rsi);
436	vmovdqu RB3, (7 * 32)(%rsi);
437
438	vzeroall;
439	FRAME_END
440	ret;
441SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
442
443/*
444 * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
445 *                                   const u8 *src, u8 *iv)
446 */
447.align 8
448SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
449	/* input:
450	 *	%rdi: round key array, CTX
451	 *	%rsi: dst (16 blocks)
452	 *	%rdx: src (16 blocks)
453	 *	%rcx: iv
454	 */
455	FRAME_BEGIN
456
457	vzeroupper;
458
459	/* Load input */
460	vmovdqu (%rcx), RNOTx;
461	vinserti128 $1, (%rdx), RNOT, RA0;
462	vmovdqu (0 * 32 + 16)(%rdx), RA1;
463	vmovdqu (1 * 32 + 16)(%rdx), RA2;
464	vmovdqu (2 * 32 + 16)(%rdx), RA3;
465	vmovdqu (3 * 32 + 16)(%rdx), RB0;
466	vmovdqu (4 * 32 + 16)(%rdx), RB1;
467	vmovdqu (5 * 32 + 16)(%rdx), RB2;
468	vmovdqu (6 * 32 + 16)(%rdx), RB3;
469
470	/* Update IV */
471	vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
472	vmovdqu RNOTx, (%rcx);
473
474	call __sm4_crypt_blk16;
475
476	vpxor (0 * 32)(%rdx), RA0, RA0;
477	vpxor (1 * 32)(%rdx), RA1, RA1;
478	vpxor (2 * 32)(%rdx), RA2, RA2;
479	vpxor (3 * 32)(%rdx), RA3, RA3;
480	vpxor (4 * 32)(%rdx), RB0, RB0;
481	vpxor (5 * 32)(%rdx), RB1, RB1;
482	vpxor (6 * 32)(%rdx), RB2, RB2;
483	vpxor (7 * 32)(%rdx), RB3, RB3;
484
485	vmovdqu RA0, (0 * 32)(%rsi);
486	vmovdqu RA1, (1 * 32)(%rsi);
487	vmovdqu RA2, (2 * 32)(%rsi);
488	vmovdqu RA3, (3 * 32)(%rsi);
489	vmovdqu RB0, (4 * 32)(%rsi);
490	vmovdqu RB1, (5 * 32)(%rsi);
491	vmovdqu RB2, (6 * 32)(%rsi);
492	vmovdqu RB3, (7 * 32)(%rsi);
493
494	vzeroall;
495	FRAME_END
496	ret;
497SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
498