1 /*
2  * Glue Code for AVX assembler versions of Serpent Cipher
3  *
4  * Copyright (C) 2012 Johannes Goetzfried
5  *     <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6  *
7  * Copyright © 2011-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
22  * USA
23  *
24  */
25 
26 #include <linux/module.h>
27 #include <linux/hardirq.h>
28 #include <linux/types.h>
29 #include <linux/crypto.h>
30 #include <linux/err.h>
31 #include <crypto/ablk_helper.h>
32 #include <crypto/algapi.h>
33 #include <crypto/serpent.h>
34 #include <crypto/cryptd.h>
35 #include <crypto/b128ops.h>
36 #include <crypto/ctr.h>
37 #include <crypto/lrw.h>
38 #include <crypto/xts.h>
39 #include <asm/fpu/api.h>
40 #include <asm/crypto/serpent-avx.h>
41 #include <asm/crypto/glue_helper.h>
42 
43 /* 8-way parallel cipher functions */
44 asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
45 					 const u8 *src);
46 EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx);
47 
48 asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
49 					 const u8 *src);
50 EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx);
51 
52 asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
53 					 const u8 *src);
54 EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx);
55 
56 asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
57 				     const u8 *src, le128 *iv);
58 EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx);
59 
60 asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
61 					 const u8 *src, le128 *iv);
62 EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx);
63 
64 asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
65 					 const u8 *src, le128 *iv);
66 EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx);
67 
68 void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
69 {
70 	be128 ctrblk;
71 
72 	le128_to_be128(&ctrblk, iv);
73 	le128_inc(iv);
74 
75 	__serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
76 	u128_xor(dst, src, (u128 *)&ctrblk);
77 }
78 EXPORT_SYMBOL_GPL(__serpent_crypt_ctr);
79 
80 void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
81 {
82 	glue_xts_crypt_128bit_one(ctx, dst, src, iv,
83 				  GLUE_FUNC_CAST(__serpent_encrypt));
84 }
85 EXPORT_SYMBOL_GPL(serpent_xts_enc);
86 
87 void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
88 {
89 	glue_xts_crypt_128bit_one(ctx, dst, src, iv,
90 				  GLUE_FUNC_CAST(__serpent_decrypt));
91 }
92 EXPORT_SYMBOL_GPL(serpent_xts_dec);
93 
94 
95 static const struct common_glue_ctx serpent_enc = {
96 	.num_funcs = 2,
97 	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
98 
99 	.funcs = { {
100 		.num_blocks = SERPENT_PARALLEL_BLOCKS,
101 		.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) }
102 	}, {
103 		.num_blocks = 1,
104 		.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
105 	} }
106 };
107 
108 static const struct common_glue_ctx serpent_ctr = {
109 	.num_funcs = 2,
110 	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
111 
112 	.funcs = { {
113 		.num_blocks = SERPENT_PARALLEL_BLOCKS,
114 		.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) }
115 	}, {
116 		.num_blocks = 1,
117 		.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) }
118 	} }
119 };
120 
121 static const struct common_glue_ctx serpent_enc_xts = {
122 	.num_funcs = 2,
123 	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
124 
125 	.funcs = { {
126 		.num_blocks = SERPENT_PARALLEL_BLOCKS,
127 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) }
128 	}, {
129 		.num_blocks = 1,
130 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) }
131 	} }
132 };
133 
134 static const struct common_glue_ctx serpent_dec = {
135 	.num_funcs = 2,
136 	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
137 
138 	.funcs = { {
139 		.num_blocks = SERPENT_PARALLEL_BLOCKS,
140 		.fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) }
141 	}, {
142 		.num_blocks = 1,
143 		.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
144 	} }
145 };
146 
147 static const struct common_glue_ctx serpent_dec_cbc = {
148 	.num_funcs = 2,
149 	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
150 
151 	.funcs = { {
152 		.num_blocks = SERPENT_PARALLEL_BLOCKS,
153 		.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) }
154 	}, {
155 		.num_blocks = 1,
156 		.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
157 	} }
158 };
159 
160 static const struct common_glue_ctx serpent_dec_xts = {
161 	.num_funcs = 2,
162 	.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
163 
164 	.funcs = { {
165 		.num_blocks = SERPENT_PARALLEL_BLOCKS,
166 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) }
167 	}, {
168 		.num_blocks = 1,
169 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) }
170 	} }
171 };
172 
173 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
174 		       struct scatterlist *src, unsigned int nbytes)
175 {
176 	return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
177 }
178 
179 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
180 		       struct scatterlist *src, unsigned int nbytes)
181 {
182 	return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
183 }
184 
185 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
186 		       struct scatterlist *src, unsigned int nbytes)
187 {
188 	return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
189 				     dst, src, nbytes);
190 }
191 
192 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
193 		       struct scatterlist *src, unsigned int nbytes)
194 {
195 	return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
196 				       nbytes);
197 }
198 
199 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
200 		     struct scatterlist *src, unsigned int nbytes)
201 {
202 	return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
203 }
204 
205 static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
206 {
207 	return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
208 			      NULL, fpu_enabled, nbytes);
209 }
210 
211 static inline void serpent_fpu_end(bool fpu_enabled)
212 {
213 	glue_fpu_end(fpu_enabled);
214 }
215 
216 struct crypt_priv {
217 	struct serpent_ctx *ctx;
218 	bool fpu_enabled;
219 };
220 
221 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
222 {
223 	const unsigned int bsize = SERPENT_BLOCK_SIZE;
224 	struct crypt_priv *ctx = priv;
225 	int i;
226 
227 	ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
228 
229 	if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
230 		serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
231 		return;
232 	}
233 
234 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
235 		__serpent_encrypt(ctx->ctx, srcdst, srcdst);
236 }
237 
238 static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
239 {
240 	const unsigned int bsize = SERPENT_BLOCK_SIZE;
241 	struct crypt_priv *ctx = priv;
242 	int i;
243 
244 	ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
245 
246 	if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
247 		serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
248 		return;
249 	}
250 
251 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
252 		__serpent_decrypt(ctx->ctx, srcdst, srcdst);
253 }
254 
255 int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
256 		       unsigned int keylen)
257 {
258 	struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
259 	int err;
260 
261 	err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
262 							SERPENT_BLOCK_SIZE);
263 	if (err)
264 		return err;
265 
266 	return lrw_init_table(&ctx->lrw_table, key + keylen -
267 						SERPENT_BLOCK_SIZE);
268 }
269 EXPORT_SYMBOL_GPL(lrw_serpent_setkey);
270 
271 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
272 		       struct scatterlist *src, unsigned int nbytes)
273 {
274 	struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
275 	be128 buf[SERPENT_PARALLEL_BLOCKS];
276 	struct crypt_priv crypt_ctx = {
277 		.ctx = &ctx->serpent_ctx,
278 		.fpu_enabled = false,
279 	};
280 	struct lrw_crypt_req req = {
281 		.tbuf = buf,
282 		.tbuflen = sizeof(buf),
283 
284 		.table_ctx = &ctx->lrw_table,
285 		.crypt_ctx = &crypt_ctx,
286 		.crypt_fn = encrypt_callback,
287 	};
288 	int ret;
289 
290 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
291 	ret = lrw_crypt(desc, dst, src, nbytes, &req);
292 	serpent_fpu_end(crypt_ctx.fpu_enabled);
293 
294 	return ret;
295 }
296 
297 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
298 		       struct scatterlist *src, unsigned int nbytes)
299 {
300 	struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
301 	be128 buf[SERPENT_PARALLEL_BLOCKS];
302 	struct crypt_priv crypt_ctx = {
303 		.ctx = &ctx->serpent_ctx,
304 		.fpu_enabled = false,
305 	};
306 	struct lrw_crypt_req req = {
307 		.tbuf = buf,
308 		.tbuflen = sizeof(buf),
309 
310 		.table_ctx = &ctx->lrw_table,
311 		.crypt_ctx = &crypt_ctx,
312 		.crypt_fn = decrypt_callback,
313 	};
314 	int ret;
315 
316 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
317 	ret = lrw_crypt(desc, dst, src, nbytes, &req);
318 	serpent_fpu_end(crypt_ctx.fpu_enabled);
319 
320 	return ret;
321 }
322 
323 void lrw_serpent_exit_tfm(struct crypto_tfm *tfm)
324 {
325 	struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
326 
327 	lrw_free_table(&ctx->lrw_table);
328 }
329 EXPORT_SYMBOL_GPL(lrw_serpent_exit_tfm);
330 
331 int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
332 		       unsigned int keylen)
333 {
334 	struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
335 	int err;
336 
337 	err = xts_check_key(tfm, key, keylen);
338 	if (err)
339 		return err;
340 
341 	/* first half of xts-key is for crypt */
342 	err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
343 	if (err)
344 		return err;
345 
346 	/* second half of xts-key is for tweak */
347 	return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
348 }
349 EXPORT_SYMBOL_GPL(xts_serpent_setkey);
350 
351 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
352 		       struct scatterlist *src, unsigned int nbytes)
353 {
354 	struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
355 
356 	return glue_xts_crypt_128bit(&serpent_enc_xts, desc, dst, src, nbytes,
357 				     XTS_TWEAK_CAST(__serpent_encrypt),
358 				     &ctx->tweak_ctx, &ctx->crypt_ctx);
359 }
360 
361 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
362 		       struct scatterlist *src, unsigned int nbytes)
363 {
364 	struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
365 
366 	return glue_xts_crypt_128bit(&serpent_dec_xts, desc, dst, src, nbytes,
367 				     XTS_TWEAK_CAST(__serpent_encrypt),
368 				     &ctx->tweak_ctx, &ctx->crypt_ctx);
369 }
370 
371 static struct crypto_alg serpent_algs[10] = { {
372 	.cra_name		= "__ecb-serpent-avx",
373 	.cra_driver_name	= "__driver-ecb-serpent-avx",
374 	.cra_priority		= 0,
375 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
376 				  CRYPTO_ALG_INTERNAL,
377 	.cra_blocksize		= SERPENT_BLOCK_SIZE,
378 	.cra_ctxsize		= sizeof(struct serpent_ctx),
379 	.cra_alignmask		= 0,
380 	.cra_type		= &crypto_blkcipher_type,
381 	.cra_module		= THIS_MODULE,
382 	.cra_u = {
383 		.blkcipher = {
384 			.min_keysize	= SERPENT_MIN_KEY_SIZE,
385 			.max_keysize	= SERPENT_MAX_KEY_SIZE,
386 			.setkey		= serpent_setkey,
387 			.encrypt	= ecb_encrypt,
388 			.decrypt	= ecb_decrypt,
389 		},
390 	},
391 }, {
392 	.cra_name		= "__cbc-serpent-avx",
393 	.cra_driver_name	= "__driver-cbc-serpent-avx",
394 	.cra_priority		= 0,
395 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
396 				  CRYPTO_ALG_INTERNAL,
397 	.cra_blocksize		= SERPENT_BLOCK_SIZE,
398 	.cra_ctxsize		= sizeof(struct serpent_ctx),
399 	.cra_alignmask		= 0,
400 	.cra_type		= &crypto_blkcipher_type,
401 	.cra_module		= THIS_MODULE,
402 	.cra_u = {
403 		.blkcipher = {
404 			.min_keysize	= SERPENT_MIN_KEY_SIZE,
405 			.max_keysize	= SERPENT_MAX_KEY_SIZE,
406 			.setkey		= serpent_setkey,
407 			.encrypt	= cbc_encrypt,
408 			.decrypt	= cbc_decrypt,
409 		},
410 	},
411 }, {
412 	.cra_name		= "__ctr-serpent-avx",
413 	.cra_driver_name	= "__driver-ctr-serpent-avx",
414 	.cra_priority		= 0,
415 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
416 				  CRYPTO_ALG_INTERNAL,
417 	.cra_blocksize		= 1,
418 	.cra_ctxsize		= sizeof(struct serpent_ctx),
419 	.cra_alignmask		= 0,
420 	.cra_type		= &crypto_blkcipher_type,
421 	.cra_module		= THIS_MODULE,
422 	.cra_u = {
423 		.blkcipher = {
424 			.min_keysize	= SERPENT_MIN_KEY_SIZE,
425 			.max_keysize	= SERPENT_MAX_KEY_SIZE,
426 			.ivsize		= SERPENT_BLOCK_SIZE,
427 			.setkey		= serpent_setkey,
428 			.encrypt	= ctr_crypt,
429 			.decrypt	= ctr_crypt,
430 		},
431 	},
432 }, {
433 	.cra_name		= "__lrw-serpent-avx",
434 	.cra_driver_name	= "__driver-lrw-serpent-avx",
435 	.cra_priority		= 0,
436 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
437 				  CRYPTO_ALG_INTERNAL,
438 	.cra_blocksize		= SERPENT_BLOCK_SIZE,
439 	.cra_ctxsize		= sizeof(struct serpent_lrw_ctx),
440 	.cra_alignmask		= 0,
441 	.cra_type		= &crypto_blkcipher_type,
442 	.cra_module		= THIS_MODULE,
443 	.cra_exit		= lrw_serpent_exit_tfm,
444 	.cra_u = {
445 		.blkcipher = {
446 			.min_keysize	= SERPENT_MIN_KEY_SIZE +
447 					  SERPENT_BLOCK_SIZE,
448 			.max_keysize	= SERPENT_MAX_KEY_SIZE +
449 					  SERPENT_BLOCK_SIZE,
450 			.ivsize		= SERPENT_BLOCK_SIZE,
451 			.setkey		= lrw_serpent_setkey,
452 			.encrypt	= lrw_encrypt,
453 			.decrypt	= lrw_decrypt,
454 		},
455 	},
456 }, {
457 	.cra_name		= "__xts-serpent-avx",
458 	.cra_driver_name	= "__driver-xts-serpent-avx",
459 	.cra_priority		= 0,
460 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
461 				  CRYPTO_ALG_INTERNAL,
462 	.cra_blocksize		= SERPENT_BLOCK_SIZE,
463 	.cra_ctxsize		= sizeof(struct serpent_xts_ctx),
464 	.cra_alignmask		= 0,
465 	.cra_type		= &crypto_blkcipher_type,
466 	.cra_module		= THIS_MODULE,
467 	.cra_u = {
468 		.blkcipher = {
469 			.min_keysize	= SERPENT_MIN_KEY_SIZE * 2,
470 			.max_keysize	= SERPENT_MAX_KEY_SIZE * 2,
471 			.ivsize		= SERPENT_BLOCK_SIZE,
472 			.setkey		= xts_serpent_setkey,
473 			.encrypt	= xts_encrypt,
474 			.decrypt	= xts_decrypt,
475 		},
476 	},
477 }, {
478 	.cra_name		= "ecb(serpent)",
479 	.cra_driver_name	= "ecb-serpent-avx",
480 	.cra_priority		= 500,
481 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
482 	.cra_blocksize		= SERPENT_BLOCK_SIZE,
483 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
484 	.cra_alignmask		= 0,
485 	.cra_type		= &crypto_ablkcipher_type,
486 	.cra_module		= THIS_MODULE,
487 	.cra_init		= ablk_init,
488 	.cra_exit		= ablk_exit,
489 	.cra_u = {
490 		.ablkcipher = {
491 			.min_keysize	= SERPENT_MIN_KEY_SIZE,
492 			.max_keysize	= SERPENT_MAX_KEY_SIZE,
493 			.setkey		= ablk_set_key,
494 			.encrypt	= ablk_encrypt,
495 			.decrypt	= ablk_decrypt,
496 		},
497 	},
498 }, {
499 	.cra_name		= "cbc(serpent)",
500 	.cra_driver_name	= "cbc-serpent-avx",
501 	.cra_priority		= 500,
502 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
503 	.cra_blocksize		= SERPENT_BLOCK_SIZE,
504 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
505 	.cra_alignmask		= 0,
506 	.cra_type		= &crypto_ablkcipher_type,
507 	.cra_module		= THIS_MODULE,
508 	.cra_init		= ablk_init,
509 	.cra_exit		= ablk_exit,
510 	.cra_u = {
511 		.ablkcipher = {
512 			.min_keysize	= SERPENT_MIN_KEY_SIZE,
513 			.max_keysize	= SERPENT_MAX_KEY_SIZE,
514 			.ivsize		= SERPENT_BLOCK_SIZE,
515 			.setkey		= ablk_set_key,
516 			.encrypt	= __ablk_encrypt,
517 			.decrypt	= ablk_decrypt,
518 		},
519 	},
520 }, {
521 	.cra_name		= "ctr(serpent)",
522 	.cra_driver_name	= "ctr-serpent-avx",
523 	.cra_priority		= 500,
524 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
525 	.cra_blocksize		= 1,
526 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
527 	.cra_alignmask		= 0,
528 	.cra_type		= &crypto_ablkcipher_type,
529 	.cra_module		= THIS_MODULE,
530 	.cra_init		= ablk_init,
531 	.cra_exit		= ablk_exit,
532 	.cra_u = {
533 		.ablkcipher = {
534 			.min_keysize	= SERPENT_MIN_KEY_SIZE,
535 			.max_keysize	= SERPENT_MAX_KEY_SIZE,
536 			.ivsize		= SERPENT_BLOCK_SIZE,
537 			.setkey		= ablk_set_key,
538 			.encrypt	= ablk_encrypt,
539 			.decrypt	= ablk_encrypt,
540 			.geniv		= "chainiv",
541 		},
542 	},
543 }, {
544 	.cra_name		= "lrw(serpent)",
545 	.cra_driver_name	= "lrw-serpent-avx",
546 	.cra_priority		= 500,
547 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
548 	.cra_blocksize		= SERPENT_BLOCK_SIZE,
549 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
550 	.cra_alignmask		= 0,
551 	.cra_type		= &crypto_ablkcipher_type,
552 	.cra_module		= THIS_MODULE,
553 	.cra_init		= ablk_init,
554 	.cra_exit		= ablk_exit,
555 	.cra_u = {
556 		.ablkcipher = {
557 			.min_keysize	= SERPENT_MIN_KEY_SIZE +
558 					  SERPENT_BLOCK_SIZE,
559 			.max_keysize	= SERPENT_MAX_KEY_SIZE +
560 					  SERPENT_BLOCK_SIZE,
561 			.ivsize		= SERPENT_BLOCK_SIZE,
562 			.setkey		= ablk_set_key,
563 			.encrypt	= ablk_encrypt,
564 			.decrypt	= ablk_decrypt,
565 		},
566 	},
567 }, {
568 	.cra_name		= "xts(serpent)",
569 	.cra_driver_name	= "xts-serpent-avx",
570 	.cra_priority		= 500,
571 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
572 	.cra_blocksize		= SERPENT_BLOCK_SIZE,
573 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
574 	.cra_alignmask		= 0,
575 	.cra_type		= &crypto_ablkcipher_type,
576 	.cra_module		= THIS_MODULE,
577 	.cra_init		= ablk_init,
578 	.cra_exit		= ablk_exit,
579 	.cra_u = {
580 		.ablkcipher = {
581 			.min_keysize	= SERPENT_MIN_KEY_SIZE * 2,
582 			.max_keysize	= SERPENT_MAX_KEY_SIZE * 2,
583 			.ivsize		= SERPENT_BLOCK_SIZE,
584 			.setkey		= ablk_set_key,
585 			.encrypt	= ablk_encrypt,
586 			.decrypt	= ablk_decrypt,
587 		},
588 	},
589 } };
590 
591 static int __init serpent_init(void)
592 {
593 	const char *feature_name;
594 
595 	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
596 				&feature_name)) {
597 		pr_info("CPU feature '%s' is not supported.\n", feature_name);
598 		return -ENODEV;
599 	}
600 
601 	return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
602 }
603 
604 static void __exit serpent_exit(void)
605 {
606 	crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
607 }
608 
609 module_init(serpent_init);
610 module_exit(serpent_exit);
611 
612 MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
613 MODULE_LICENSE("GPL");
614 MODULE_ALIAS_CRYPTO("serpent");
615