xref: /openbmc/linux/arch/s390/crypto/aes_s390.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * Cryptographic API.
3  *
4  * s390 implementation of the AES Cipher Algorithm.
5  *
6  * s390 Version:
7  *   Copyright IBM Corp. 2005,2007
8  *   Author(s): Jan Glauber (jang@de.ibm.com)
9  *		Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
10  *
11  * Derived from "crypto/aes_generic.c"
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 2 of the License, or (at your option)
16  * any later version.
17  *
18  */
19 
20 #define KMSG_COMPONENT "aes_s390"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <linux/err.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include "crypt_s390.h"
29 
30 #define AES_KEYLEN_128		1
31 #define AES_KEYLEN_192		2
32 #define AES_KEYLEN_256		4
33 
34 static char keylen_flag = 0;
35 
36 struct s390_aes_ctx {
37 	u8 iv[AES_BLOCK_SIZE];
38 	u8 key[AES_MAX_KEY_SIZE];
39 	long enc;
40 	long dec;
41 	int key_len;
42 	union {
43 		struct crypto_blkcipher *blk;
44 		struct crypto_cipher *cip;
45 	} fallback;
46 };
47 
48 /*
49  * Check if the key_len is supported by the HW.
50  * Returns 0 if it is, a positive number if it is not and software fallback is
51  * required or a negative number in case the key size is not valid
52  */
53 static int need_fallback(unsigned int key_len)
54 {
55 	switch (key_len) {
56 	case 16:
57 		if (!(keylen_flag & AES_KEYLEN_128))
58 			return 1;
59 		break;
60 	case 24:
61 		if (!(keylen_flag & AES_KEYLEN_192))
62 			return 1;
63 		break;
64 	case 32:
65 		if (!(keylen_flag & AES_KEYLEN_256))
66 			return 1;
67 		break;
68 	default:
69 		return -1;
70 		break;
71 	}
72 	return 0;
73 }
74 
75 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
76 		unsigned int key_len)
77 {
78 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
79 	int ret;
80 
81 	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
82 	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
83 			CRYPTO_TFM_REQ_MASK);
84 
85 	ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
86 	if (ret) {
87 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
88 		tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
89 				CRYPTO_TFM_RES_MASK);
90 	}
91 	return ret;
92 }
93 
94 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
95 		       unsigned int key_len)
96 {
97 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
98 	u32 *flags = &tfm->crt_flags;
99 	int ret;
100 
101 	ret = need_fallback(key_len);
102 	if (ret < 0) {
103 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
104 		return -EINVAL;
105 	}
106 
107 	sctx->key_len = key_len;
108 	if (!ret) {
109 		memcpy(sctx->key, in_key, key_len);
110 		return 0;
111 	}
112 
113 	return setkey_fallback_cip(tfm, in_key, key_len);
114 }
115 
116 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
117 {
118 	const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
119 
120 	if (unlikely(need_fallback(sctx->key_len))) {
121 		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
122 		return;
123 	}
124 
125 	switch (sctx->key_len) {
126 	case 16:
127 		crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
128 			      AES_BLOCK_SIZE);
129 		break;
130 	case 24:
131 		crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
132 			      AES_BLOCK_SIZE);
133 		break;
134 	case 32:
135 		crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
136 			      AES_BLOCK_SIZE);
137 		break;
138 	}
139 }
140 
141 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
142 {
143 	const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
144 
145 	if (unlikely(need_fallback(sctx->key_len))) {
146 		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
147 		return;
148 	}
149 
150 	switch (sctx->key_len) {
151 	case 16:
152 		crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
153 			      AES_BLOCK_SIZE);
154 		break;
155 	case 24:
156 		crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
157 			      AES_BLOCK_SIZE);
158 		break;
159 	case 32:
160 		crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
161 			      AES_BLOCK_SIZE);
162 		break;
163 	}
164 }
165 
166 static int fallback_init_cip(struct crypto_tfm *tfm)
167 {
168 	const char *name = tfm->__crt_alg->cra_name;
169 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
170 
171 	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
172 			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
173 
174 	if (IS_ERR(sctx->fallback.cip)) {
175 		pr_err("Allocating AES fallback algorithm %s failed\n",
176 		       name);
177 		return PTR_ERR(sctx->fallback.cip);
178 	}
179 
180 	return 0;
181 }
182 
183 static void fallback_exit_cip(struct crypto_tfm *tfm)
184 {
185 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
186 
187 	crypto_free_cipher(sctx->fallback.cip);
188 	sctx->fallback.cip = NULL;
189 }
190 
191 static struct crypto_alg aes_alg = {
192 	.cra_name		=	"aes",
193 	.cra_driver_name	=	"aes-s390",
194 	.cra_priority		=	CRYPT_S390_PRIORITY,
195 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
196 					CRYPTO_ALG_NEED_FALLBACK,
197 	.cra_blocksize		=	AES_BLOCK_SIZE,
198 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
199 	.cra_module		=	THIS_MODULE,
200 	.cra_list		=	LIST_HEAD_INIT(aes_alg.cra_list),
201 	.cra_init               =       fallback_init_cip,
202 	.cra_exit               =       fallback_exit_cip,
203 	.cra_u			=	{
204 		.cipher = {
205 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
206 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
207 			.cia_setkey		=	aes_set_key,
208 			.cia_encrypt		=	aes_encrypt,
209 			.cia_decrypt		=	aes_decrypt,
210 		}
211 	}
212 };
213 
214 static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
215 		unsigned int len)
216 {
217 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
218 	unsigned int ret;
219 
220 	sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
221 	sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
222 			CRYPTO_TFM_REQ_MASK);
223 
224 	ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
225 	if (ret) {
226 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
227 		tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
228 				CRYPTO_TFM_RES_MASK);
229 	}
230 	return ret;
231 }
232 
233 static int fallback_blk_dec(struct blkcipher_desc *desc,
234 		struct scatterlist *dst, struct scatterlist *src,
235 		unsigned int nbytes)
236 {
237 	unsigned int ret;
238 	struct crypto_blkcipher *tfm;
239 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
240 
241 	tfm = desc->tfm;
242 	desc->tfm = sctx->fallback.blk;
243 
244 	ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
245 
246 	desc->tfm = tfm;
247 	return ret;
248 }
249 
250 static int fallback_blk_enc(struct blkcipher_desc *desc,
251 		struct scatterlist *dst, struct scatterlist *src,
252 		unsigned int nbytes)
253 {
254 	unsigned int ret;
255 	struct crypto_blkcipher *tfm;
256 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
257 
258 	tfm = desc->tfm;
259 	desc->tfm = sctx->fallback.blk;
260 
261 	ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
262 
263 	desc->tfm = tfm;
264 	return ret;
265 }
266 
267 static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
268 			   unsigned int key_len)
269 {
270 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
271 	int ret;
272 
273 	ret = need_fallback(key_len);
274 	if (ret > 0) {
275 		sctx->key_len = key_len;
276 		return setkey_fallback_blk(tfm, in_key, key_len);
277 	}
278 
279 	switch (key_len) {
280 	case 16:
281 		sctx->enc = KM_AES_128_ENCRYPT;
282 		sctx->dec = KM_AES_128_DECRYPT;
283 		break;
284 	case 24:
285 		sctx->enc = KM_AES_192_ENCRYPT;
286 		sctx->dec = KM_AES_192_DECRYPT;
287 		break;
288 	case 32:
289 		sctx->enc = KM_AES_256_ENCRYPT;
290 		sctx->dec = KM_AES_256_DECRYPT;
291 		break;
292 	}
293 
294 	return aes_set_key(tfm, in_key, key_len);
295 }
296 
297 static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
298 			 struct blkcipher_walk *walk)
299 {
300 	int ret = blkcipher_walk_virt(desc, walk);
301 	unsigned int nbytes;
302 
303 	while ((nbytes = walk->nbytes)) {
304 		/* only use complete blocks */
305 		unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
306 		u8 *out = walk->dst.virt.addr;
307 		u8 *in = walk->src.virt.addr;
308 
309 		ret = crypt_s390_km(func, param, out, in, n);
310 		BUG_ON((ret < 0) || (ret != n));
311 
312 		nbytes &= AES_BLOCK_SIZE - 1;
313 		ret = blkcipher_walk_done(desc, walk, nbytes);
314 	}
315 
316 	return ret;
317 }
318 
319 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
320 			   struct scatterlist *dst, struct scatterlist *src,
321 			   unsigned int nbytes)
322 {
323 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
324 	struct blkcipher_walk walk;
325 
326 	if (unlikely(need_fallback(sctx->key_len)))
327 		return fallback_blk_enc(desc, dst, src, nbytes);
328 
329 	blkcipher_walk_init(&walk, dst, src, nbytes);
330 	return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
331 }
332 
333 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
334 			   struct scatterlist *dst, struct scatterlist *src,
335 			   unsigned int nbytes)
336 {
337 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
338 	struct blkcipher_walk walk;
339 
340 	if (unlikely(need_fallback(sctx->key_len)))
341 		return fallback_blk_dec(desc, dst, src, nbytes);
342 
343 	blkcipher_walk_init(&walk, dst, src, nbytes);
344 	return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
345 }
346 
347 static int fallback_init_blk(struct crypto_tfm *tfm)
348 {
349 	const char *name = tfm->__crt_alg->cra_name;
350 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
351 
352 	sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
353 			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
354 
355 	if (IS_ERR(sctx->fallback.blk)) {
356 		pr_err("Allocating AES fallback algorithm %s failed\n",
357 		       name);
358 		return PTR_ERR(sctx->fallback.blk);
359 	}
360 
361 	return 0;
362 }
363 
364 static void fallback_exit_blk(struct crypto_tfm *tfm)
365 {
366 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
367 
368 	crypto_free_blkcipher(sctx->fallback.blk);
369 	sctx->fallback.blk = NULL;
370 }
371 
372 static struct crypto_alg ecb_aes_alg = {
373 	.cra_name		=	"ecb(aes)",
374 	.cra_driver_name	=	"ecb-aes-s390",
375 	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
376 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
377 					CRYPTO_ALG_NEED_FALLBACK,
378 	.cra_blocksize		=	AES_BLOCK_SIZE,
379 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
380 	.cra_type		=	&crypto_blkcipher_type,
381 	.cra_module		=	THIS_MODULE,
382 	.cra_list		=	LIST_HEAD_INIT(ecb_aes_alg.cra_list),
383 	.cra_init		=	fallback_init_blk,
384 	.cra_exit		=	fallback_exit_blk,
385 	.cra_u			=	{
386 		.blkcipher = {
387 			.min_keysize		=	AES_MIN_KEY_SIZE,
388 			.max_keysize		=	AES_MAX_KEY_SIZE,
389 			.setkey			=	ecb_aes_set_key,
390 			.encrypt		=	ecb_aes_encrypt,
391 			.decrypt		=	ecb_aes_decrypt,
392 		}
393 	}
394 };
395 
396 static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
397 			   unsigned int key_len)
398 {
399 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
400 	int ret;
401 
402 	ret = need_fallback(key_len);
403 	if (ret > 0) {
404 		sctx->key_len = key_len;
405 		return setkey_fallback_blk(tfm, in_key, key_len);
406 	}
407 
408 	switch (key_len) {
409 	case 16:
410 		sctx->enc = KMC_AES_128_ENCRYPT;
411 		sctx->dec = KMC_AES_128_DECRYPT;
412 		break;
413 	case 24:
414 		sctx->enc = KMC_AES_192_ENCRYPT;
415 		sctx->dec = KMC_AES_192_DECRYPT;
416 		break;
417 	case 32:
418 		sctx->enc = KMC_AES_256_ENCRYPT;
419 		sctx->dec = KMC_AES_256_DECRYPT;
420 		break;
421 	}
422 
423 	return aes_set_key(tfm, in_key, key_len);
424 }
425 
426 static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
427 			 struct blkcipher_walk *walk)
428 {
429 	int ret = blkcipher_walk_virt(desc, walk);
430 	unsigned int nbytes = walk->nbytes;
431 
432 	if (!nbytes)
433 		goto out;
434 
435 	memcpy(param, walk->iv, AES_BLOCK_SIZE);
436 	do {
437 		/* only use complete blocks */
438 		unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
439 		u8 *out = walk->dst.virt.addr;
440 		u8 *in = walk->src.virt.addr;
441 
442 		ret = crypt_s390_kmc(func, param, out, in, n);
443 		BUG_ON((ret < 0) || (ret != n));
444 
445 		nbytes &= AES_BLOCK_SIZE - 1;
446 		ret = blkcipher_walk_done(desc, walk, nbytes);
447 	} while ((nbytes = walk->nbytes));
448 	memcpy(walk->iv, param, AES_BLOCK_SIZE);
449 
450 out:
451 	return ret;
452 }
453 
454 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
455 			   struct scatterlist *dst, struct scatterlist *src,
456 			   unsigned int nbytes)
457 {
458 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
459 	struct blkcipher_walk walk;
460 
461 	if (unlikely(need_fallback(sctx->key_len)))
462 		return fallback_blk_enc(desc, dst, src, nbytes);
463 
464 	blkcipher_walk_init(&walk, dst, src, nbytes);
465 	return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
466 }
467 
468 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
469 			   struct scatterlist *dst, struct scatterlist *src,
470 			   unsigned int nbytes)
471 {
472 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
473 	struct blkcipher_walk walk;
474 
475 	if (unlikely(need_fallback(sctx->key_len)))
476 		return fallback_blk_dec(desc, dst, src, nbytes);
477 
478 	blkcipher_walk_init(&walk, dst, src, nbytes);
479 	return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
480 }
481 
482 static struct crypto_alg cbc_aes_alg = {
483 	.cra_name		=	"cbc(aes)",
484 	.cra_driver_name	=	"cbc-aes-s390",
485 	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
486 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
487 					CRYPTO_ALG_NEED_FALLBACK,
488 	.cra_blocksize		=	AES_BLOCK_SIZE,
489 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
490 	.cra_type		=	&crypto_blkcipher_type,
491 	.cra_module		=	THIS_MODULE,
492 	.cra_list		=	LIST_HEAD_INIT(cbc_aes_alg.cra_list),
493 	.cra_init		=	fallback_init_blk,
494 	.cra_exit		=	fallback_exit_blk,
495 	.cra_u			=	{
496 		.blkcipher = {
497 			.min_keysize		=	AES_MIN_KEY_SIZE,
498 			.max_keysize		=	AES_MAX_KEY_SIZE,
499 			.ivsize			=	AES_BLOCK_SIZE,
500 			.setkey			=	cbc_aes_set_key,
501 			.encrypt		=	cbc_aes_encrypt,
502 			.decrypt		=	cbc_aes_decrypt,
503 		}
504 	}
505 };
506 
507 static int __init aes_s390_init(void)
508 {
509 	int ret;
510 
511 	if (crypt_s390_func_available(KM_AES_128_ENCRYPT))
512 		keylen_flag |= AES_KEYLEN_128;
513 	if (crypt_s390_func_available(KM_AES_192_ENCRYPT))
514 		keylen_flag |= AES_KEYLEN_192;
515 	if (crypt_s390_func_available(KM_AES_256_ENCRYPT))
516 		keylen_flag |= AES_KEYLEN_256;
517 
518 	if (!keylen_flag)
519 		return -EOPNOTSUPP;
520 
521 	/* z9 109 and z9 BC/EC only support 128 bit key length */
522 	if (keylen_flag == AES_KEYLEN_128)
523 		pr_info("AES hardware acceleration is only available for"
524 			" 128-bit keys\n");
525 
526 	ret = crypto_register_alg(&aes_alg);
527 	if (ret)
528 		goto aes_err;
529 
530 	ret = crypto_register_alg(&ecb_aes_alg);
531 	if (ret)
532 		goto ecb_aes_err;
533 
534 	ret = crypto_register_alg(&cbc_aes_alg);
535 	if (ret)
536 		goto cbc_aes_err;
537 
538 out:
539 	return ret;
540 
541 cbc_aes_err:
542 	crypto_unregister_alg(&ecb_aes_alg);
543 ecb_aes_err:
544 	crypto_unregister_alg(&aes_alg);
545 aes_err:
546 	goto out;
547 }
548 
549 static void __exit aes_s390_fini(void)
550 {
551 	crypto_unregister_alg(&cbc_aes_alg);
552 	crypto_unregister_alg(&ecb_aes_alg);
553 	crypto_unregister_alg(&aes_alg);
554 }
555 
556 module_init(aes_s390_init);
557 module_exit(aes_s390_fini);
558 
559 MODULE_ALIAS("aes-all");
560 
561 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
562 MODULE_LICENSE("GPL");
563