xref: /openbmc/linux/drivers/crypto/nx/nx-aes-ccm.c (revision 37185b33)
1 /**
2  * AES CCM routines supporting the Power 7+ Nest Accelerators driver
3  *
4  * Copyright (C) 2012 International Business Machines Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 only.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  *
19  * Author: Kent Yoder <yoder1@us.ibm.com>
20  */
21 
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
29 #include <asm/vio.h>
30 
31 #include "nx_csbcpb.h"
32 #include "nx.h"
33 
34 
35 static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
36 			      const u8           *in_key,
37 			      unsigned int        key_len)
38 {
39 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
42 
43 	nx_ctx_init(nx_ctx, HCOP_FC_AES);
44 
45 	switch (key_len) {
46 	case AES_KEYSIZE_128:
47 		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48 		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49 		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50 		break;
51 	default:
52 		return -EINVAL;
53 	}
54 
55 	csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
56 	memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
57 
58 	csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
59 	memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
60 
61 	return 0;
62 
63 }
64 
65 static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
66 				  const u8           *in_key,
67 				  unsigned int        key_len)
68 {
69 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
70 
71 	if (key_len < 3)
72 		return -EINVAL;
73 
74 	key_len -= 3;
75 
76 	memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
77 
78 	return ccm_aes_nx_set_key(tfm, in_key, key_len);
79 }
80 
81 static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
82 				  unsigned int authsize)
83 {
84 	switch (authsize) {
85 	case 4:
86 	case 6:
87 	case 8:
88 	case 10:
89 	case 12:
90 	case 14:
91 	case 16:
92 		break;
93 	default:
94 		return -EINVAL;
95 	}
96 
97 	crypto_aead_crt(tfm)->authsize = authsize;
98 
99 	return 0;
100 }
101 
102 static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
103 				      unsigned int authsize)
104 {
105 	switch (authsize) {
106 	case 8:
107 	case 12:
108 	case 16:
109 		break;
110 	default:
111 		return -EINVAL;
112 	}
113 
114 	crypto_aead_crt(tfm)->authsize = authsize;
115 
116 	return 0;
117 }
118 
119 /* taken from crypto/ccm.c */
120 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
121 {
122 	__be32 data;
123 
124 	memset(block, 0, csize);
125 	block += csize;
126 
127 	if (csize >= 4)
128 		csize = 4;
129 	else if (msglen > (unsigned int)(1 << (8 * csize)))
130 		return -EOVERFLOW;
131 
132 	data = cpu_to_be32(msglen);
133 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
134 
135 	return 0;
136 }
137 
138 /* taken from crypto/ccm.c */
139 static inline int crypto_ccm_check_iv(const u8 *iv)
140 {
141 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
142 	if (1 > iv[0] || iv[0] > 7)
143 		return -EINVAL;
144 
145 	return 0;
146 }
147 
148 /* based on code from crypto/ccm.c */
149 static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
150 		       unsigned int cryptlen, u8 *b0)
151 {
152 	unsigned int l, lp, m = authsize;
153 	int rc;
154 
155 	memcpy(b0, iv, 16);
156 
157 	lp = b0[0];
158 	l = lp + 1;
159 
160 	/* set m, bits 3-5 */
161 	*b0 |= (8 * ((m - 2) / 2));
162 
163 	/* set adata, bit 6, if associated data is used */
164 	if (assoclen)
165 		*b0 |= 64;
166 
167 	rc = set_msg_len(b0 + 16 - l, cryptlen, l);
168 
169 	return rc;
170 }
171 
172 static int generate_pat(u8                   *iv,
173 			struct aead_request  *req,
174 			struct nx_crypto_ctx *nx_ctx,
175 			unsigned int          authsize,
176 			unsigned int          nbytes,
177 			u8                   *out)
178 {
179 	struct nx_sg *nx_insg = nx_ctx->in_sg;
180 	struct nx_sg *nx_outsg = nx_ctx->out_sg;
181 	unsigned int iauth_len = 0;
182 	struct vio_pfo_op *op = NULL;
183 	u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
184 	int rc;
185 
186 	/* zero the ctr value */
187 	memset(iv + 15 - iv[0], 0, iv[0] + 1);
188 
189 	if (!req->assoclen) {
190 		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
191 	} else if (req->assoclen <= 14) {
192 		/* if associated data is 14 bytes or less, we do 1 GCM
193 		 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
194 		 * which is fed in through the source buffers here */
195 		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
196 		b1 = nx_ctx->priv.ccm.iauth_tag;
197 		iauth_len = req->assoclen;
198 
199 		nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
200 		nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
201 					    nx_ctx->ap->sglen);
202 
203 		/* inlen should be negative, indicating to phyp that its a
204 		 * pointer to an sg list */
205 		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
206 					sizeof(struct nx_sg);
207 		nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
208 					sizeof(struct nx_sg);
209 
210 		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
211 		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
212 
213 		op = &nx_ctx->op;
214 		result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
215 	} else if (req->assoclen <= 65280) {
216 		/* if associated data is less than (2^16 - 2^8), we construct
217 		 * B1 differently and feed in the associated data to a CCA
218 		 * operation */
219 		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
220 		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
221 		iauth_len = 14;
222 
223 		/* remaining assoc data must have scatterlist built for it */
224 		nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen,
225 					    req->assoc, iauth_len,
226 					    req->assoclen - iauth_len);
227 		nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
228 						sizeof(struct nx_sg);
229 
230 		op = &nx_ctx->op_aead;
231 		result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
232 	} else {
233 		/* if associated data is less than (2^32), we construct B1
234 		 * differently yet again and feed in the associated data to a
235 		 * CCA operation */
236 		pr_err("associated data len is %u bytes (returning -EINVAL)\n",
237 		       req->assoclen);
238 		rc = -EINVAL;
239 	}
240 
241 	rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
242 	if (rc)
243 		goto done;
244 
245 	if (b1) {
246 		memset(b1, 0, 16);
247 		*(u16 *)b1 = (u16)req->assoclen;
248 
249 		scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
250 					 iauth_len, SCATTERWALK_FROM_SG);
251 
252 		rc = nx_hcall_sync(nx_ctx, op,
253 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
254 		if (rc)
255 			goto done;
256 
257 		atomic_inc(&(nx_ctx->stats->aes_ops));
258 		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
259 
260 		memcpy(out, result, AES_BLOCK_SIZE);
261 	}
262 done:
263 	return rc;
264 }
265 
266 static int ccm_nx_decrypt(struct aead_request   *req,
267 			  struct blkcipher_desc *desc)
268 {
269 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
270 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
271 	unsigned int nbytes = req->cryptlen;
272 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
273 	struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
274 	int rc = -1;
275 
276 	if (nbytes > nx_ctx->ap->databytelen)
277 		return -EINVAL;
278 
279 	nbytes -= authsize;
280 
281 	/* copy out the auth tag to compare with later */
282 	scatterwalk_map_and_copy(priv->oauth_tag,
283 				 req->src, nbytes, authsize,
284 				 SCATTERWALK_FROM_SG);
285 
286 	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
287 			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
288 	if (rc)
289 		goto out;
290 
291 	rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes,
292 			       csbcpb->cpb.aes_ccm.iv_or_ctr);
293 	if (rc)
294 		goto out;
295 
296 	NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
297 	NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_INTERMEDIATE;
298 
299 	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
300 			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
301 	if (rc)
302 		goto out;
303 
304 	atomic_inc(&(nx_ctx->stats->aes_ops));
305 	atomic64_add(csbcpb->csb.processed_byte_count,
306 		     &(nx_ctx->stats->aes_bytes));
307 
308 	rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
309 		    authsize) ? -EBADMSG : 0;
310 out:
311 	return rc;
312 }
313 
314 static int ccm_nx_encrypt(struct aead_request   *req,
315 			  struct blkcipher_desc *desc)
316 {
317 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
318 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
319 	unsigned int nbytes = req->cryptlen;
320 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
321 	int rc = -1;
322 
323 	if (nbytes > nx_ctx->ap->databytelen)
324 		return -EINVAL;
325 
326 	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
327 			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
328 	if (rc)
329 		goto out;
330 
331 	rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes,
332 			       csbcpb->cpb.aes_ccm.iv_or_ctr);
333 	if (rc)
334 		goto out;
335 
336 	NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
337 	NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
338 
339 	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
340 			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
341 	if (rc)
342 		goto out;
343 
344 	atomic_inc(&(nx_ctx->stats->aes_ops));
345 	atomic64_add(csbcpb->csb.processed_byte_count,
346 		     &(nx_ctx->stats->aes_bytes));
347 
348 	/* copy out the auth tag */
349 	scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
350 				 req->dst, nbytes, authsize,
351 				 SCATTERWALK_TO_SG);
352 out:
353 	return rc;
354 }
355 
356 static int ccm4309_aes_nx_encrypt(struct aead_request *req)
357 {
358 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
359 	struct blkcipher_desc desc;
360 	u8 *iv = nx_ctx->priv.ccm.iv;
361 
362 	iv[0] = 3;
363 	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
364 	memcpy(iv + 4, req->iv, 8);
365 
366 	desc.info = iv;
367 	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
368 
369 	return ccm_nx_encrypt(req, &desc);
370 }
371 
372 static int ccm_aes_nx_encrypt(struct aead_request *req)
373 {
374 	struct blkcipher_desc desc;
375 	int rc;
376 
377 	desc.info = req->iv;
378 	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
379 
380 	rc = crypto_ccm_check_iv(desc.info);
381 	if (rc)
382 		return rc;
383 
384 	return ccm_nx_encrypt(req, &desc);
385 }
386 
387 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
388 {
389 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
390 	struct blkcipher_desc desc;
391 	u8 *iv = nx_ctx->priv.ccm.iv;
392 
393 	iv[0] = 3;
394 	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
395 	memcpy(iv + 4, req->iv, 8);
396 
397 	desc.info = iv;
398 	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
399 
400 	return ccm_nx_decrypt(req, &desc);
401 }
402 
403 static int ccm_aes_nx_decrypt(struct aead_request *req)
404 {
405 	struct blkcipher_desc desc;
406 	int rc;
407 
408 	desc.info = req->iv;
409 	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
410 
411 	rc = crypto_ccm_check_iv(desc.info);
412 	if (rc)
413 		return rc;
414 
415 	return ccm_nx_decrypt(req, &desc);
416 }
417 
418 /* tell the block cipher walk routines that this is a stream cipher by
419  * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
420  * during encrypt/decrypt doesn't solve this problem, because it calls
421  * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
422  * but instead uses this tfm->blocksize. */
423 struct crypto_alg nx_ccm_aes_alg = {
424 	.cra_name        = "ccm(aes)",
425 	.cra_driver_name = "ccm-aes-nx",
426 	.cra_priority    = 300,
427 	.cra_flags       = CRYPTO_ALG_TYPE_AEAD |
428 			   CRYPTO_ALG_NEED_FALLBACK,
429 	.cra_blocksize   = 1,
430 	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
431 	.cra_type        = &crypto_aead_type,
432 	.cra_module      = THIS_MODULE,
433 	.cra_list        = LIST_HEAD_INIT(nx_ccm_aes_alg.cra_list),
434 	.cra_init        = nx_crypto_ctx_aes_ccm_init,
435 	.cra_exit        = nx_crypto_ctx_exit,
436 	.cra_aead = {
437 		.ivsize      = AES_BLOCK_SIZE,
438 		.maxauthsize = AES_BLOCK_SIZE,
439 		.setkey      = ccm_aes_nx_set_key,
440 		.setauthsize = ccm_aes_nx_setauthsize,
441 		.encrypt     = ccm_aes_nx_encrypt,
442 		.decrypt     = ccm_aes_nx_decrypt,
443 	}
444 };
445 
446 struct crypto_alg nx_ccm4309_aes_alg = {
447 	.cra_name        = "rfc4309(ccm(aes))",
448 	.cra_driver_name = "rfc4309-ccm-aes-nx",
449 	.cra_priority    = 300,
450 	.cra_flags       = CRYPTO_ALG_TYPE_AEAD |
451 			   CRYPTO_ALG_NEED_FALLBACK,
452 	.cra_blocksize   = 1,
453 	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
454 	.cra_type        = &crypto_nivaead_type,
455 	.cra_module      = THIS_MODULE,
456 	.cra_list        = LIST_HEAD_INIT(nx_ccm4309_aes_alg.cra_list),
457 	.cra_init        = nx_crypto_ctx_aes_ccm_init,
458 	.cra_exit        = nx_crypto_ctx_exit,
459 	.cra_aead = {
460 		.ivsize      = 8,
461 		.maxauthsize = AES_BLOCK_SIZE,
462 		.setkey      = ccm4309_aes_nx_set_key,
463 		.setauthsize = ccm4309_aes_nx_setauthsize,
464 		.encrypt     = ccm4309_aes_nx_encrypt,
465 		.decrypt     = ccm4309_aes_nx_decrypt,
466 		.geniv       = "seqiv",
467 	}
468 };
469