xref: /openbmc/linux/drivers/crypto/nx/nx-aes-gcm.c (revision 4419617e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3  * AES GCM routines supporting the Power 7+ Nest Accelerators driver
4  *
5  * Copyright (C) 2012 International Business Machines Inc.
6  *
7  * Author: Kent Yoder <yoder1@us.ibm.com>
8  */
9 
10 #include <crypto/internal/aead.h>
11 #include <crypto/aes.h>
12 #include <crypto/algapi.h>
13 #include <crypto/gcm.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <asm/vio.h>
18 
19 #include "nx_csbcpb.h"
20 #include "nx.h"
21 
22 
23 static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
24 			      const u8           *in_key,
25 			      unsigned int        key_len)
26 {
27 	struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
28 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
29 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
30 
31 	nx_ctx_init(nx_ctx, HCOP_FC_AES);
32 
33 	switch (key_len) {
34 	case AES_KEYSIZE_128:
35 		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
36 		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
37 		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
38 		break;
39 	case AES_KEYSIZE_192:
40 		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
41 		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
42 		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
43 		break;
44 	case AES_KEYSIZE_256:
45 		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
46 		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
47 		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
48 		break;
49 	default:
50 		return -EINVAL;
51 	}
52 
53 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
54 	memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
55 
56 	csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
57 	memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
58 
59 	return 0;
60 }
61 
62 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
63 				  const u8           *in_key,
64 				  unsigned int        key_len)
65 {
66 	struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
67 	char *nonce = nx_ctx->priv.gcm.nonce;
68 	int rc;
69 
70 	if (key_len < 4)
71 		return -EINVAL;
72 
73 	key_len -= 4;
74 
75 	rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
76 	if (rc)
77 		goto out;
78 
79 	memcpy(nonce, in_key + key_len, 4);
80 out:
81 	return rc;
82 }
83 
84 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
85 				      unsigned int authsize)
86 {
87 	switch (authsize) {
88 	case 8:
89 	case 12:
90 	case 16:
91 		break;
92 	default:
93 		return -EINVAL;
94 	}
95 
96 	return 0;
97 }
98 
99 static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
100 		  struct aead_request   *req,
101 		  u8                    *out,
102 		  unsigned int assoclen)
103 {
104 	int rc;
105 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
106 	struct scatter_walk walk;
107 	struct nx_sg *nx_sg = nx_ctx->in_sg;
108 	unsigned int nbytes = assoclen;
109 	unsigned int processed = 0, to_process;
110 	unsigned int max_sg_len;
111 
112 	if (nbytes <= AES_BLOCK_SIZE) {
113 		scatterwalk_start(&walk, req->src);
114 		scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
115 		scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
116 		return 0;
117 	}
118 
119 	NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
120 
121 	/* page_limit: number of sg entries that fit on one page */
122 	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
123 			   nx_ctx->ap->sglen);
124 	max_sg_len = min_t(u64, max_sg_len,
125 			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);
126 
127 	do {
128 		/*
129 		 * to_process: the data chunk to process in this update.
130 		 * This value is bound by sg list limits.
131 		 */
132 		to_process = min_t(u64, nbytes - processed,
133 				   nx_ctx->ap->databytelen);
134 		to_process = min_t(u64, to_process,
135 				   NX_PAGE_SIZE * (max_sg_len - 1));
136 
137 		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
138 					  req->src, processed, &to_process);
139 
140 		if ((to_process + processed) < nbytes)
141 			NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
142 		else
143 			NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
144 
145 		nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
146 					* sizeof(struct nx_sg);
147 
148 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
149 				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
150 		if (rc)
151 			return rc;
152 
153 		memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
154 				csbcpb_aead->cpb.aes_gca.out_pat,
155 				AES_BLOCK_SIZE);
156 		NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
157 
158 		atomic_inc(&(nx_ctx->stats->aes_ops));
159 		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
160 
161 		processed += to_process;
162 	} while (processed < nbytes);
163 
164 	memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
165 
166 	return rc;
167 }
168 
169 static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
170 		unsigned int assoclen)
171 {
172 	int rc;
173 	struct nx_crypto_ctx *nx_ctx =
174 		crypto_aead_ctx(crypto_aead_reqtfm(req));
175 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
176 	struct nx_sg *nx_sg;
177 	unsigned int nbytes = assoclen;
178 	unsigned int processed = 0, to_process;
179 	unsigned int max_sg_len;
180 
181 	/* Set GMAC mode */
182 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
183 
184 	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
185 
186 	/* page_limit: number of sg entries that fit on one page */
187 	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
188 			   nx_ctx->ap->sglen);
189 	max_sg_len = min_t(u64, max_sg_len,
190 			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);
191 
192 	/* Copy IV */
193 	memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
194 
195 	do {
196 		/*
197 		 * to_process: the data chunk to process in this update.
198 		 * This value is bound by sg list limits.
199 		 */
200 		to_process = min_t(u64, nbytes - processed,
201 				   nx_ctx->ap->databytelen);
202 		to_process = min_t(u64, to_process,
203 				   NX_PAGE_SIZE * (max_sg_len - 1));
204 
205 		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
206 					  req->src, processed, &to_process);
207 
208 		if ((to_process + processed) < nbytes)
209 			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
210 		else
211 			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
212 
213 		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
214 					* sizeof(struct nx_sg);
215 
216 		csbcpb->cpb.aes_gcm.bit_length_data = 0;
217 		csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
218 
219 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
220 				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
221 		if (rc)
222 			goto out;
223 
224 		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
225 			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
226 		memcpy(csbcpb->cpb.aes_gcm.in_s0,
227 			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
228 
229 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
230 
231 		atomic_inc(&(nx_ctx->stats->aes_ops));
232 		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
233 
234 		processed += to_process;
235 	} while (processed < nbytes);
236 
237 out:
238 	/* Restore GCM mode */
239 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
240 	return rc;
241 }
242 
243 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
244 		     int enc)
245 {
246 	int rc;
247 	struct nx_crypto_ctx *nx_ctx =
248 		crypto_aead_ctx(crypto_aead_reqtfm(req));
249 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
250 	char out[AES_BLOCK_SIZE];
251 	struct nx_sg *in_sg, *out_sg;
252 	int len;
253 
254 	/* For scenarios where the input message is zero length, AES CTR mode
255 	 * may be used. Set the source data to be a single block (16B) of all
256 	 * zeros, and set the input IV value to be the same as the GMAC IV
257 	 * value. - nx_wb 4.8.1.3 */
258 
259 	/* Change to ECB mode */
260 	csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
261 	memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
262 			sizeof(csbcpb->cpb.aes_ecb.key));
263 	if (enc)
264 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
265 	else
266 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
267 
268 	len = AES_BLOCK_SIZE;
269 
270 	/* Encrypt the counter/IV */
271 	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
272 				 &len, nx_ctx->ap->sglen);
273 
274 	if (len != AES_BLOCK_SIZE)
275 		return -EINVAL;
276 
277 	len = sizeof(out);
278 	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
279 				  nx_ctx->ap->sglen);
280 
281 	if (len != sizeof(out))
282 		return -EINVAL;
283 
284 	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
285 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
286 
287 	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
288 			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
289 	if (rc)
290 		goto out;
291 	atomic_inc(&(nx_ctx->stats->aes_ops));
292 
293 	/* Copy out the auth tag */
294 	memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
295 			crypto_aead_authsize(crypto_aead_reqtfm(req)));
296 out:
297 	/* Restore XCBC mode */
298 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
299 
300 	/*
301 	 * ECB key uses the same region that GCM AAD and counter, so it's safe
302 	 * to just fill it with zeroes.
303 	 */
304 	memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
305 
306 	return rc;
307 }
308 
309 static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
310 			    unsigned int assoclen)
311 {
312 	struct nx_crypto_ctx *nx_ctx =
313 		crypto_aead_ctx(crypto_aead_reqtfm(req));
314 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
315 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
316 	struct blkcipher_desc desc;
317 	unsigned int nbytes = req->cryptlen;
318 	unsigned int processed = 0, to_process;
319 	unsigned long irq_flags;
320 	int rc = -EINVAL;
321 
322 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
323 
324 	desc.info = rctx->iv;
325 	/* initialize the counter */
326 	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
327 
328 	if (nbytes == 0) {
329 		if (assoclen == 0)
330 			rc = gcm_empty(req, &desc, enc);
331 		else
332 			rc = gmac(req, &desc, assoclen);
333 		if (rc)
334 			goto out;
335 		else
336 			goto mac;
337 	}
338 
339 	/* Process associated data */
340 	csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
341 	if (assoclen) {
342 		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
343 			    assoclen);
344 		if (rc)
345 			goto out;
346 	}
347 
348 	/* Set flags for encryption */
349 	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
350 	if (enc) {
351 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
352 	} else {
353 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
354 		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
355 	}
356 
357 	do {
358 		to_process = nbytes - processed;
359 
360 		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
361 		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
362 				       req->src, &to_process,
363 				       processed + req->assoclen,
364 				       csbcpb->cpb.aes_gcm.iv_or_cnt);
365 
366 		if (rc)
367 			goto out;
368 
369 		if ((to_process + processed) < nbytes)
370 			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
371 		else
372 			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
373 
374 
375 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
376 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
377 		if (rc)
378 			goto out;
379 
380 		memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
381 		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
382 			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
383 		memcpy(csbcpb->cpb.aes_gcm.in_s0,
384 			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
385 
386 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
387 
388 		atomic_inc(&(nx_ctx->stats->aes_ops));
389 		atomic64_add(csbcpb->csb.processed_byte_count,
390 			     &(nx_ctx->stats->aes_bytes));
391 
392 		processed += to_process;
393 	} while (processed < nbytes);
394 
395 mac:
396 	if (enc) {
397 		/* copy out the auth tag */
398 		scatterwalk_map_and_copy(
399 			csbcpb->cpb.aes_gcm.out_pat_or_mac,
400 			req->dst, req->assoclen + nbytes,
401 			crypto_aead_authsize(crypto_aead_reqtfm(req)),
402 			SCATTERWALK_TO_SG);
403 	} else {
404 		u8 *itag = nx_ctx->priv.gcm.iauth_tag;
405 		u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
406 
407 		scatterwalk_map_and_copy(
408 			itag, req->src, req->assoclen + nbytes,
409 			crypto_aead_authsize(crypto_aead_reqtfm(req)),
410 			SCATTERWALK_FROM_SG);
411 		rc = crypto_memneq(itag, otag,
412 			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
413 		     -EBADMSG : 0;
414 	}
415 out:
416 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
417 	return rc;
418 }
419 
420 static int gcm_aes_nx_encrypt(struct aead_request *req)
421 {
422 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
423 	char *iv = rctx->iv;
424 
425 	memcpy(iv, req->iv, GCM_AES_IV_SIZE);
426 
427 	return gcm_aes_nx_crypt(req, 1, req->assoclen);
428 }
429 
430 static int gcm_aes_nx_decrypt(struct aead_request *req)
431 {
432 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
433 	char *iv = rctx->iv;
434 
435 	memcpy(iv, req->iv, GCM_AES_IV_SIZE);
436 
437 	return gcm_aes_nx_crypt(req, 0, req->assoclen);
438 }
439 
440 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
441 {
442 	struct nx_crypto_ctx *nx_ctx =
443 		crypto_aead_ctx(crypto_aead_reqtfm(req));
444 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
445 	char *iv = rctx->iv;
446 	char *nonce = nx_ctx->priv.gcm.nonce;
447 
448 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
449 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
450 
451 	if (req->assoclen < 8)
452 		return -EINVAL;
453 
454 	return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
455 }
456 
457 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
458 {
459 	struct nx_crypto_ctx *nx_ctx =
460 		crypto_aead_ctx(crypto_aead_reqtfm(req));
461 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
462 	char *iv = rctx->iv;
463 	char *nonce = nx_ctx->priv.gcm.nonce;
464 
465 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
466 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
467 
468 	if (req->assoclen < 8)
469 		return -EINVAL;
470 
471 	return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
472 }
473 
474 /* tell the block cipher walk routines that this is a stream cipher by
475  * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
476  * during encrypt/decrypt doesn't solve this problem, because it calls
477  * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
478  * but instead uses this tfm->blocksize. */
479 struct aead_alg nx_gcm_aes_alg = {
480 	.base = {
481 		.cra_name        = "gcm(aes)",
482 		.cra_driver_name = "gcm-aes-nx",
483 		.cra_priority    = 300,
484 		.cra_blocksize   = 1,
485 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
486 		.cra_module      = THIS_MODULE,
487 	},
488 	.init        = nx_crypto_ctx_aes_gcm_init,
489 	.exit        = nx_crypto_ctx_aead_exit,
490 	.ivsize      = GCM_AES_IV_SIZE,
491 	.maxauthsize = AES_BLOCK_SIZE,
492 	.setkey      = gcm_aes_nx_set_key,
493 	.encrypt     = gcm_aes_nx_encrypt,
494 	.decrypt     = gcm_aes_nx_decrypt,
495 };
496 
497 struct aead_alg nx_gcm4106_aes_alg = {
498 	.base = {
499 		.cra_name        = "rfc4106(gcm(aes))",
500 		.cra_driver_name = "rfc4106-gcm-aes-nx",
501 		.cra_priority    = 300,
502 		.cra_blocksize   = 1,
503 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
504 		.cra_module      = THIS_MODULE,
505 	},
506 	.init        = nx_crypto_ctx_aes_gcm_init,
507 	.exit        = nx_crypto_ctx_aead_exit,
508 	.ivsize      = GCM_RFC4106_IV_SIZE,
509 	.maxauthsize = AES_BLOCK_SIZE,
510 	.setkey      = gcm4106_aes_nx_set_key,
511 	.setauthsize = gcm4106_aes_nx_setauthsize,
512 	.encrypt     = gcm4106_aes_nx_encrypt,
513 	.decrypt     = gcm4106_aes_nx_decrypt,
514 };
515