xref: /openbmc/linux/arch/s390/crypto/aes_s390.c (revision 0177db01)
1 /*
2  * Cryptographic API.
3  *
4  * s390 implementation of the AES Cipher Algorithm.
5  *
6  * s390 Version:
7  *   Copyright IBM Corp. 2005, 2007
8  *   Author(s): Jan Glauber (jang@de.ibm.com)
9  *		Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
10  *
11  * Derived from "crypto/aes_generic.c"
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 2 of the License, or (at your option)
16  * any later version.
17  *
18  */
19 
20 #define KMSG_COMPONENT "aes_s390"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/internal/skcipher.h>
26 #include <linux/err.h>
27 #include <linux/module.h>
28 #include <linux/cpufeature.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <crypto/xts.h>
32 #include <asm/cpacf.h>
33 
34 #define AES_KEYLEN_128		1
35 #define AES_KEYLEN_192		2
36 #define AES_KEYLEN_256		4
37 
38 static u8 *ctrblk;
39 static DEFINE_SPINLOCK(ctrblk_lock);
40 static char keylen_flag;
41 
42 struct s390_aes_ctx {
43 	u8 key[AES_MAX_KEY_SIZE];
44 	int key_len;
45 	unsigned long fc;
46 	union {
47 		struct crypto_skcipher *blk;
48 		struct crypto_cipher *cip;
49 	} fallback;
50 };
51 
52 struct pcc_param {
53 	u8 key[32];
54 	u8 tweak[16];
55 	u8 block[16];
56 	u8 bit[16];
57 	u8 xts[16];
58 };
59 
60 struct s390_xts_ctx {
61 	u8 key[32];
62 	u8 pcc_key[32];
63 	int key_len;
64 	unsigned long fc;
65 	struct crypto_skcipher *fallback;
66 };
67 
68 /*
69  * Check if the key_len is supported by the HW.
70  * Returns 0 if it is, a positive number if it is not and software fallback is
71  * required or a negative number in case the key size is not valid
72  */
73 static int need_fallback(unsigned int key_len)
74 {
75 	switch (key_len) {
76 	case 16:
77 		if (!(keylen_flag & AES_KEYLEN_128))
78 			return 1;
79 		break;
80 	case 24:
81 		if (!(keylen_flag & AES_KEYLEN_192))
82 			return 1;
83 		break;
84 	case 32:
85 		if (!(keylen_flag & AES_KEYLEN_256))
86 			return 1;
87 		break;
88 	default:
89 		return -1;
90 		break;
91 	}
92 	return 0;
93 }
94 
95 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
96 		unsigned int key_len)
97 {
98 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
99 	int ret;
100 
101 	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
102 	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
103 			CRYPTO_TFM_REQ_MASK);
104 
105 	ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
106 	if (ret) {
107 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
108 		tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
109 				CRYPTO_TFM_RES_MASK);
110 	}
111 	return ret;
112 }
113 
114 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
115 		       unsigned int key_len)
116 {
117 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
118 	u32 *flags = &tfm->crt_flags;
119 	int ret;
120 
121 	ret = need_fallback(key_len);
122 	if (ret < 0) {
123 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
124 		return -EINVAL;
125 	}
126 
127 	sctx->key_len = key_len;
128 	if (!ret) {
129 		memcpy(sctx->key, in_key, key_len);
130 		return 0;
131 	}
132 
133 	return setkey_fallback_cip(tfm, in_key, key_len);
134 }
135 
136 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
137 {
138 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
139 
140 	if (unlikely(need_fallback(sctx->key_len))) {
141 		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
142 		return;
143 	}
144 
145 	switch (sctx->key_len) {
146 	case 16:
147 		cpacf_km(CPACF_KM_AES_128,
148 			 &sctx->key, out, in, AES_BLOCK_SIZE);
149 		break;
150 	case 24:
151 		cpacf_km(CPACF_KM_AES_192,
152 			 &sctx->key, out, in, AES_BLOCK_SIZE);
153 		break;
154 	case 32:
155 		cpacf_km(CPACF_KM_AES_256,
156 			 &sctx->key, out, in, AES_BLOCK_SIZE);
157 		break;
158 	}
159 }
160 
161 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
162 {
163 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
164 
165 	if (unlikely(need_fallback(sctx->key_len))) {
166 		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
167 		return;
168 	}
169 
170 	switch (sctx->key_len) {
171 	case 16:
172 		cpacf_km(CPACF_KM_AES_128 | CPACF_DECRYPT,
173 			 &sctx->key, out, in, AES_BLOCK_SIZE);
174 		break;
175 	case 24:
176 		cpacf_km(CPACF_KM_AES_192 | CPACF_DECRYPT,
177 			 &sctx->key, out, in, AES_BLOCK_SIZE);
178 		break;
179 	case 32:
180 		cpacf_km(CPACF_KM_AES_256 | CPACF_DECRYPT,
181 			 &sctx->key, out, in, AES_BLOCK_SIZE);
182 		break;
183 	}
184 }
185 
186 static int fallback_init_cip(struct crypto_tfm *tfm)
187 {
188 	const char *name = tfm->__crt_alg->cra_name;
189 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
190 
191 	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
192 			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
193 
194 	if (IS_ERR(sctx->fallback.cip)) {
195 		pr_err("Allocating AES fallback algorithm %s failed\n",
196 		       name);
197 		return PTR_ERR(sctx->fallback.cip);
198 	}
199 
200 	return 0;
201 }
202 
203 static void fallback_exit_cip(struct crypto_tfm *tfm)
204 {
205 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
206 
207 	crypto_free_cipher(sctx->fallback.cip);
208 	sctx->fallback.cip = NULL;
209 }
210 
211 static struct crypto_alg aes_alg = {
212 	.cra_name		=	"aes",
213 	.cra_driver_name	=	"aes-s390",
214 	.cra_priority		=	300,
215 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
216 					CRYPTO_ALG_NEED_FALLBACK,
217 	.cra_blocksize		=	AES_BLOCK_SIZE,
218 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
219 	.cra_module		=	THIS_MODULE,
220 	.cra_init               =       fallback_init_cip,
221 	.cra_exit               =       fallback_exit_cip,
222 	.cra_u			=	{
223 		.cipher = {
224 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
225 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
226 			.cia_setkey		=	aes_set_key,
227 			.cia_encrypt		=	aes_encrypt,
228 			.cia_decrypt		=	aes_decrypt,
229 		}
230 	}
231 };
232 
233 static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
234 		unsigned int len)
235 {
236 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
237 	unsigned int ret;
238 
239 	crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
240 	crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
241 						      CRYPTO_TFM_REQ_MASK);
242 
243 	ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
244 
245 	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
246 	tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
247 			  CRYPTO_TFM_RES_MASK;
248 
249 	return ret;
250 }
251 
252 static int fallback_blk_dec(struct blkcipher_desc *desc,
253 		struct scatterlist *dst, struct scatterlist *src,
254 		unsigned int nbytes)
255 {
256 	unsigned int ret;
257 	struct crypto_blkcipher *tfm = desc->tfm;
258 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
259 	SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
260 
261 	skcipher_request_set_tfm(req, sctx->fallback.blk);
262 	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
263 	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
264 
265 	ret = crypto_skcipher_decrypt(req);
266 
267 	skcipher_request_zero(req);
268 	return ret;
269 }
270 
271 static int fallback_blk_enc(struct blkcipher_desc *desc,
272 		struct scatterlist *dst, struct scatterlist *src,
273 		unsigned int nbytes)
274 {
275 	unsigned int ret;
276 	struct crypto_blkcipher *tfm = desc->tfm;
277 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
278 	SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
279 
280 	skcipher_request_set_tfm(req, sctx->fallback.blk);
281 	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
282 	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
283 
284 	ret = crypto_skcipher_encrypt(req);
285 	return ret;
286 }
287 
288 static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
289 			   unsigned int key_len)
290 {
291 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
292 	int ret;
293 
294 	ret = need_fallback(key_len);
295 	if (ret > 0) {
296 		sctx->key_len = key_len;
297 		return setkey_fallback_blk(tfm, in_key, key_len);
298 	}
299 
300 	switch (key_len) {
301 	case 16:
302 		sctx->fc = CPACF_KM_AES_128;
303 		break;
304 	case 24:
305 		sctx->fc = CPACF_KM_AES_192;
306 		break;
307 	case 32:
308 		sctx->fc = CPACF_KM_AES_256;
309 		break;
310 	}
311 
312 	return aes_set_key(tfm, in_key, key_len);
313 }
314 
315 static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
316 			 struct blkcipher_walk *walk)
317 {
318 	int ret = blkcipher_walk_virt(desc, walk);
319 	unsigned int nbytes;
320 
321 	while ((nbytes = walk->nbytes)) {
322 		/* only use complete blocks */
323 		unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
324 		u8 *out = walk->dst.virt.addr;
325 		u8 *in = walk->src.virt.addr;
326 
327 		cpacf_km(func, param, out, in, n);
328 
329 		nbytes &= AES_BLOCK_SIZE - 1;
330 		ret = blkcipher_walk_done(desc, walk, nbytes);
331 	}
332 
333 	return ret;
334 }
335 
336 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
337 			   struct scatterlist *dst, struct scatterlist *src,
338 			   unsigned int nbytes)
339 {
340 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
341 	struct blkcipher_walk walk;
342 
343 	if (unlikely(need_fallback(sctx->key_len)))
344 		return fallback_blk_enc(desc, dst, src, nbytes);
345 
346 	blkcipher_walk_init(&walk, dst, src, nbytes);
347 	return ecb_aes_crypt(desc, sctx->fc, sctx->key, &walk);
348 }
349 
350 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
351 			   struct scatterlist *dst, struct scatterlist *src,
352 			   unsigned int nbytes)
353 {
354 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
355 	struct blkcipher_walk walk;
356 
357 	if (unlikely(need_fallback(sctx->key_len)))
358 		return fallback_blk_dec(desc, dst, src, nbytes);
359 
360 	blkcipher_walk_init(&walk, dst, src, nbytes);
361 	return ecb_aes_crypt(desc, sctx->fc | CPACF_DECRYPT, sctx->key, &walk);
362 }
363 
364 static int fallback_init_blk(struct crypto_tfm *tfm)
365 {
366 	const char *name = tfm->__crt_alg->cra_name;
367 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
368 
369 	sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
370 						   CRYPTO_ALG_ASYNC |
371 						   CRYPTO_ALG_NEED_FALLBACK);
372 
373 	if (IS_ERR(sctx->fallback.blk)) {
374 		pr_err("Allocating AES fallback algorithm %s failed\n",
375 		       name);
376 		return PTR_ERR(sctx->fallback.blk);
377 	}
378 
379 	return 0;
380 }
381 
382 static void fallback_exit_blk(struct crypto_tfm *tfm)
383 {
384 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
385 
386 	crypto_free_skcipher(sctx->fallback.blk);
387 }
388 
389 static struct crypto_alg ecb_aes_alg = {
390 	.cra_name		=	"ecb(aes)",
391 	.cra_driver_name	=	"ecb-aes-s390",
392 	.cra_priority		=	400,	/* combo: aes + ecb */
393 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
394 					CRYPTO_ALG_NEED_FALLBACK,
395 	.cra_blocksize		=	AES_BLOCK_SIZE,
396 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
397 	.cra_type		=	&crypto_blkcipher_type,
398 	.cra_module		=	THIS_MODULE,
399 	.cra_init		=	fallback_init_blk,
400 	.cra_exit		=	fallback_exit_blk,
401 	.cra_u			=	{
402 		.blkcipher = {
403 			.min_keysize		=	AES_MIN_KEY_SIZE,
404 			.max_keysize		=	AES_MAX_KEY_SIZE,
405 			.setkey			=	ecb_aes_set_key,
406 			.encrypt		=	ecb_aes_encrypt,
407 			.decrypt		=	ecb_aes_decrypt,
408 		}
409 	}
410 };
411 
412 static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
413 			   unsigned int key_len)
414 {
415 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
416 	int ret;
417 
418 	ret = need_fallback(key_len);
419 	if (ret > 0) {
420 		sctx->key_len = key_len;
421 		return setkey_fallback_blk(tfm, in_key, key_len);
422 	}
423 
424 	switch (key_len) {
425 	case 16:
426 		sctx->fc = CPACF_KMC_AES_128;
427 		break;
428 	case 24:
429 		sctx->fc = CPACF_KMC_AES_192;
430 		break;
431 	case 32:
432 		sctx->fc = CPACF_KMC_AES_256;
433 		break;
434 	}
435 
436 	return aes_set_key(tfm, in_key, key_len);
437 }
438 
439 static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
440 			 struct blkcipher_walk *walk)
441 {
442 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
443 	int ret = blkcipher_walk_virt(desc, walk);
444 	unsigned int nbytes = walk->nbytes;
445 	struct {
446 		u8 iv[AES_BLOCK_SIZE];
447 		u8 key[AES_MAX_KEY_SIZE];
448 	} param;
449 
450 	if (!nbytes)
451 		goto out;
452 
453 	memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
454 	memcpy(param.key, sctx->key, sctx->key_len);
455 	do {
456 		/* only use complete blocks */
457 		unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
458 		u8 *out = walk->dst.virt.addr;
459 		u8 *in = walk->src.virt.addr;
460 
461 		cpacf_kmc(func, &param, out, in, n);
462 
463 		nbytes &= AES_BLOCK_SIZE - 1;
464 		ret = blkcipher_walk_done(desc, walk, nbytes);
465 	} while ((nbytes = walk->nbytes));
466 	memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
467 
468 out:
469 	return ret;
470 }
471 
472 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
473 			   struct scatterlist *dst, struct scatterlist *src,
474 			   unsigned int nbytes)
475 {
476 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
477 	struct blkcipher_walk walk;
478 
479 	if (unlikely(need_fallback(sctx->key_len)))
480 		return fallback_blk_enc(desc, dst, src, nbytes);
481 
482 	blkcipher_walk_init(&walk, dst, src, nbytes);
483 	return cbc_aes_crypt(desc, sctx->fc, &walk);
484 }
485 
486 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
487 			   struct scatterlist *dst, struct scatterlist *src,
488 			   unsigned int nbytes)
489 {
490 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
491 	struct blkcipher_walk walk;
492 
493 	if (unlikely(need_fallback(sctx->key_len)))
494 		return fallback_blk_dec(desc, dst, src, nbytes);
495 
496 	blkcipher_walk_init(&walk, dst, src, nbytes);
497 	return cbc_aes_crypt(desc, sctx->fc | CPACF_DECRYPT, &walk);
498 }
499 
500 static struct crypto_alg cbc_aes_alg = {
501 	.cra_name		=	"cbc(aes)",
502 	.cra_driver_name	=	"cbc-aes-s390",
503 	.cra_priority		=	400,	/* combo: aes + cbc */
504 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
505 					CRYPTO_ALG_NEED_FALLBACK,
506 	.cra_blocksize		=	AES_BLOCK_SIZE,
507 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
508 	.cra_type		=	&crypto_blkcipher_type,
509 	.cra_module		=	THIS_MODULE,
510 	.cra_init		=	fallback_init_blk,
511 	.cra_exit		=	fallback_exit_blk,
512 	.cra_u			=	{
513 		.blkcipher = {
514 			.min_keysize		=	AES_MIN_KEY_SIZE,
515 			.max_keysize		=	AES_MAX_KEY_SIZE,
516 			.ivsize			=	AES_BLOCK_SIZE,
517 			.setkey			=	cbc_aes_set_key,
518 			.encrypt		=	cbc_aes_encrypt,
519 			.decrypt		=	cbc_aes_decrypt,
520 		}
521 	}
522 };
523 
524 static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
525 				   unsigned int len)
526 {
527 	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
528 	unsigned int ret;
529 
530 	crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
531 	crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
532 						     CRYPTO_TFM_REQ_MASK);
533 
534 	ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
535 
536 	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
537 	tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
538 			  CRYPTO_TFM_RES_MASK;
539 
540 	return ret;
541 }
542 
543 static int xts_fallback_decrypt(struct blkcipher_desc *desc,
544 		struct scatterlist *dst, struct scatterlist *src,
545 		unsigned int nbytes)
546 {
547 	struct crypto_blkcipher *tfm = desc->tfm;
548 	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
549 	SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
550 	unsigned int ret;
551 
552 	skcipher_request_set_tfm(req, xts_ctx->fallback);
553 	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
554 	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
555 
556 	ret = crypto_skcipher_decrypt(req);
557 
558 	skcipher_request_zero(req);
559 	return ret;
560 }
561 
562 static int xts_fallback_encrypt(struct blkcipher_desc *desc,
563 		struct scatterlist *dst, struct scatterlist *src,
564 		unsigned int nbytes)
565 {
566 	struct crypto_blkcipher *tfm = desc->tfm;
567 	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
568 	SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
569 	unsigned int ret;
570 
571 	skcipher_request_set_tfm(req, xts_ctx->fallback);
572 	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
573 	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
574 
575 	ret = crypto_skcipher_encrypt(req);
576 
577 	skcipher_request_zero(req);
578 	return ret;
579 }
580 
581 static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
582 			   unsigned int key_len)
583 {
584 	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
585 	u32 *flags = &tfm->crt_flags;
586 	int err;
587 
588 	err = xts_check_key(tfm, in_key, key_len);
589 	if (err)
590 		return err;
591 
592 	switch (key_len) {
593 	case 32:
594 		xts_ctx->fc = CPACF_KM_XTS_128;
595 		memcpy(xts_ctx->key + 16, in_key, 16);
596 		memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
597 		break;
598 	case 48:
599 		xts_ctx->fc = 0;
600 		xts_fallback_setkey(tfm, in_key, key_len);
601 		break;
602 	case 64:
603 		xts_ctx->fc = CPACF_KM_XTS_256;
604 		memcpy(xts_ctx->key, in_key, 32);
605 		memcpy(xts_ctx->pcc_key, in_key + 32, 32);
606 		break;
607 	default:
608 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
609 		return -EINVAL;
610 	}
611 	xts_ctx->key_len = key_len;
612 	return 0;
613 }
614 
615 static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
616 			 struct s390_xts_ctx *xts_ctx,
617 			 struct blkcipher_walk *walk)
618 {
619 	unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
620 	int ret = blkcipher_walk_virt(desc, walk);
621 	unsigned int nbytes = walk->nbytes;
622 	unsigned int n;
623 	u8 *in, *out;
624 	struct pcc_param pcc_param;
625 	struct {
626 		u8 key[32];
627 		u8 init[16];
628 	} xts_param;
629 
630 	if (!nbytes)
631 		goto out;
632 
633 	memset(pcc_param.block, 0, sizeof(pcc_param.block));
634 	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
635 	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
636 	memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
637 	memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
638 	/* remove decipher modifier bit from 'func' and call PCC */
639 	cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
640 
641 	memcpy(xts_param.key, xts_ctx->key, 32);
642 	memcpy(xts_param.init, pcc_param.xts, 16);
643 	do {
644 		/* only use complete blocks */
645 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
646 		out = walk->dst.virt.addr;
647 		in = walk->src.virt.addr;
648 
649 		cpacf_km(func, &xts_param.key[offset], out, in, n);
650 
651 		nbytes &= AES_BLOCK_SIZE - 1;
652 		ret = blkcipher_walk_done(desc, walk, nbytes);
653 	} while ((nbytes = walk->nbytes));
654 out:
655 	return ret;
656 }
657 
658 static int xts_aes_encrypt(struct blkcipher_desc *desc,
659 			   struct scatterlist *dst, struct scatterlist *src,
660 			   unsigned int nbytes)
661 {
662 	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
663 	struct blkcipher_walk walk;
664 
665 	if (unlikely(xts_ctx->key_len == 48))
666 		return xts_fallback_encrypt(desc, dst, src, nbytes);
667 
668 	blkcipher_walk_init(&walk, dst, src, nbytes);
669 	return xts_aes_crypt(desc, xts_ctx->fc, xts_ctx, &walk);
670 }
671 
672 static int xts_aes_decrypt(struct blkcipher_desc *desc,
673 			   struct scatterlist *dst, struct scatterlist *src,
674 			   unsigned int nbytes)
675 {
676 	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
677 	struct blkcipher_walk walk;
678 
679 	if (unlikely(xts_ctx->key_len == 48))
680 		return xts_fallback_decrypt(desc, dst, src, nbytes);
681 
682 	blkcipher_walk_init(&walk, dst, src, nbytes);
683 	return xts_aes_crypt(desc, xts_ctx->fc | CPACF_DECRYPT, xts_ctx, &walk);
684 }
685 
686 static int xts_fallback_init(struct crypto_tfm *tfm)
687 {
688 	const char *name = tfm->__crt_alg->cra_name;
689 	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
690 
691 	xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
692 						  CRYPTO_ALG_ASYNC |
693 						  CRYPTO_ALG_NEED_FALLBACK);
694 
695 	if (IS_ERR(xts_ctx->fallback)) {
696 		pr_err("Allocating XTS fallback algorithm %s failed\n",
697 		       name);
698 		return PTR_ERR(xts_ctx->fallback);
699 	}
700 	return 0;
701 }
702 
703 static void xts_fallback_exit(struct crypto_tfm *tfm)
704 {
705 	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
706 
707 	crypto_free_skcipher(xts_ctx->fallback);
708 }
709 
710 static struct crypto_alg xts_aes_alg = {
711 	.cra_name		=	"xts(aes)",
712 	.cra_driver_name	=	"xts-aes-s390",
713 	.cra_priority		=	400,	/* combo: aes + xts */
714 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
715 					CRYPTO_ALG_NEED_FALLBACK,
716 	.cra_blocksize		=	AES_BLOCK_SIZE,
717 	.cra_ctxsize		=	sizeof(struct s390_xts_ctx),
718 	.cra_type		=	&crypto_blkcipher_type,
719 	.cra_module		=	THIS_MODULE,
720 	.cra_init		=	xts_fallback_init,
721 	.cra_exit		=	xts_fallback_exit,
722 	.cra_u			=	{
723 		.blkcipher = {
724 			.min_keysize		=	2 * AES_MIN_KEY_SIZE,
725 			.max_keysize		=	2 * AES_MAX_KEY_SIZE,
726 			.ivsize			=	AES_BLOCK_SIZE,
727 			.setkey			=	xts_aes_set_key,
728 			.encrypt		=	xts_aes_encrypt,
729 			.decrypt		=	xts_aes_decrypt,
730 		}
731 	}
732 };
733 
734 static int xts_aes_alg_reg;
735 
736 static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
737 			   unsigned int key_len)
738 {
739 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
740 
741 	switch (key_len) {
742 	case 16:
743 		sctx->fc = CPACF_KMCTR_AES_128;
744 		break;
745 	case 24:
746 		sctx->fc = CPACF_KMCTR_AES_192;
747 		break;
748 	case 32:
749 		sctx->fc = CPACF_KMCTR_AES_256;
750 		break;
751 	}
752 
753 	return aes_set_key(tfm, in_key, key_len);
754 }
755 
756 static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
757 {
758 	unsigned int i, n;
759 
760 	/* only use complete blocks, max. PAGE_SIZE */
761 	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
762 	for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
763 		memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
764 		       AES_BLOCK_SIZE);
765 		crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
766 	}
767 	return n;
768 }
769 
770 static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
771 			 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
772 {
773 	int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
774 	unsigned int n, nbytes;
775 	u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
776 	u8 *out, *in, *ctrptr = ctrbuf;
777 
778 	if (!walk->nbytes)
779 		return ret;
780 
781 	if (spin_trylock(&ctrblk_lock))
782 		ctrptr = ctrblk;
783 
784 	memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
785 	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
786 		out = walk->dst.virt.addr;
787 		in = walk->src.virt.addr;
788 		while (nbytes >= AES_BLOCK_SIZE) {
789 			if (ctrptr == ctrblk)
790 				n = __ctrblk_init(ctrptr, nbytes);
791 			else
792 				n = AES_BLOCK_SIZE;
793 			cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
794 			if (n > AES_BLOCK_SIZE)
795 				memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
796 				       AES_BLOCK_SIZE);
797 			crypto_inc(ctrptr, AES_BLOCK_SIZE);
798 			out += n;
799 			in += n;
800 			nbytes -= n;
801 		}
802 		ret = blkcipher_walk_done(desc, walk, nbytes);
803 	}
804 	if (ctrptr == ctrblk) {
805 		if (nbytes)
806 			memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
807 		else
808 			memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
809 		spin_unlock(&ctrblk_lock);
810 	} else {
811 		if (!nbytes)
812 			memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
813 	}
814 	/*
815 	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
816 	 */
817 	if (nbytes) {
818 		out = walk->dst.virt.addr;
819 		in = walk->src.virt.addr;
820 		cpacf_kmctr(func, sctx->key, buf, in, AES_BLOCK_SIZE, ctrbuf);
821 		memcpy(out, buf, nbytes);
822 		crypto_inc(ctrbuf, AES_BLOCK_SIZE);
823 		ret = blkcipher_walk_done(desc, walk, 0);
824 		memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
825 	}
826 
827 	return ret;
828 }
829 
830 static int ctr_aes_encrypt(struct blkcipher_desc *desc,
831 			   struct scatterlist *dst, struct scatterlist *src,
832 			   unsigned int nbytes)
833 {
834 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
835 	struct blkcipher_walk walk;
836 
837 	blkcipher_walk_init(&walk, dst, src, nbytes);
838 	return ctr_aes_crypt(desc, sctx->fc, sctx, &walk);
839 }
840 
841 static int ctr_aes_decrypt(struct blkcipher_desc *desc,
842 			   struct scatterlist *dst, struct scatterlist *src,
843 			   unsigned int nbytes)
844 {
845 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
846 	struct blkcipher_walk walk;
847 
848 	blkcipher_walk_init(&walk, dst, src, nbytes);
849 	return ctr_aes_crypt(desc, sctx->fc | CPACF_DECRYPT, sctx, &walk);
850 }
851 
852 static struct crypto_alg ctr_aes_alg = {
853 	.cra_name		=	"ctr(aes)",
854 	.cra_driver_name	=	"ctr-aes-s390",
855 	.cra_priority		=	400,	/* combo: aes + ctr */
856 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
857 	.cra_blocksize		=	1,
858 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
859 	.cra_type		=	&crypto_blkcipher_type,
860 	.cra_module		=	THIS_MODULE,
861 	.cra_u			=	{
862 		.blkcipher = {
863 			.min_keysize		=	AES_MIN_KEY_SIZE,
864 			.max_keysize		=	AES_MAX_KEY_SIZE,
865 			.ivsize			=	AES_BLOCK_SIZE,
866 			.setkey			=	ctr_aes_set_key,
867 			.encrypt		=	ctr_aes_encrypt,
868 			.decrypt		=	ctr_aes_decrypt,
869 		}
870 	}
871 };
872 
873 static int ctr_aes_alg_reg;
874 
875 static int __init aes_s390_init(void)
876 {
877 	int ret;
878 
879 	if (cpacf_query(CPACF_KM, CPACF_KM_AES_128))
880 		keylen_flag |= AES_KEYLEN_128;
881 	if (cpacf_query(CPACF_KM, CPACF_KM_AES_192))
882 		keylen_flag |= AES_KEYLEN_192;
883 	if (cpacf_query(CPACF_KM, CPACF_KM_AES_256))
884 		keylen_flag |= AES_KEYLEN_256;
885 
886 	if (!keylen_flag)
887 		return -EOPNOTSUPP;
888 
889 	/* z9 109 and z9 BC/EC only support 128 bit key length */
890 	if (keylen_flag == AES_KEYLEN_128)
891 		pr_info("AES hardware acceleration is only available for"
892 			" 128-bit keys\n");
893 
894 	ret = crypto_register_alg(&aes_alg);
895 	if (ret)
896 		goto aes_err;
897 
898 	ret = crypto_register_alg(&ecb_aes_alg);
899 	if (ret)
900 		goto ecb_aes_err;
901 
902 	ret = crypto_register_alg(&cbc_aes_alg);
903 	if (ret)
904 		goto cbc_aes_err;
905 
906 	if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128) &&
907 	    cpacf_query(CPACF_KM, CPACF_KM_XTS_256)) {
908 		ret = crypto_register_alg(&xts_aes_alg);
909 		if (ret)
910 			goto xts_aes_err;
911 		xts_aes_alg_reg = 1;
912 	}
913 
914 	if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128) &&
915 	    cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192) &&
916 	    cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256)) {
917 		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
918 		if (!ctrblk) {
919 			ret = -ENOMEM;
920 			goto ctr_aes_err;
921 		}
922 		ret = crypto_register_alg(&ctr_aes_alg);
923 		if (ret) {
924 			free_page((unsigned long) ctrblk);
925 			goto ctr_aes_err;
926 		}
927 		ctr_aes_alg_reg = 1;
928 	}
929 
930 out:
931 	return ret;
932 
933 ctr_aes_err:
934 	crypto_unregister_alg(&xts_aes_alg);
935 xts_aes_err:
936 	crypto_unregister_alg(&cbc_aes_alg);
937 cbc_aes_err:
938 	crypto_unregister_alg(&ecb_aes_alg);
939 ecb_aes_err:
940 	crypto_unregister_alg(&aes_alg);
941 aes_err:
942 	goto out;
943 }
944 
945 static void __exit aes_s390_fini(void)
946 {
947 	if (ctr_aes_alg_reg) {
948 		crypto_unregister_alg(&ctr_aes_alg);
949 		free_page((unsigned long) ctrblk);
950 	}
951 	if (xts_aes_alg_reg)
952 		crypto_unregister_alg(&xts_aes_alg);
953 	crypto_unregister_alg(&cbc_aes_alg);
954 	crypto_unregister_alg(&ecb_aes_alg);
955 	crypto_unregister_alg(&aes_alg);
956 }
957 
958 module_cpu_feature_match(MSA, aes_s390_init);
959 module_exit(aes_s390_fini);
960 
961 MODULE_ALIAS_CRYPTO("aes-all");
962 
963 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
964 MODULE_LICENSE("GPL");
965