xref: /openbmc/linux/drivers/crypto/nx/nx-aes-ccm.c (revision b34e08d5)
1 /**
2  * AES CCM routines supporting the Power 7+ Nest Accelerators driver
3  *
4  * Copyright (C) 2012 International Business Machines Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 only.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  *
19  * Author: Kent Yoder <yoder1@us.ibm.com>
20  */
21 
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
29 #include <asm/vio.h>
30 
31 #include "nx_csbcpb.h"
32 #include "nx.h"
33 
34 
35 static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
36 			      const u8           *in_key,
37 			      unsigned int        key_len)
38 {
39 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
42 
43 	nx_ctx_init(nx_ctx, HCOP_FC_AES);
44 
45 	switch (key_len) {
46 	case AES_KEYSIZE_128:
47 		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48 		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49 		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50 		break;
51 	default:
52 		return -EINVAL;
53 	}
54 
55 	csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
56 	memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
57 
58 	csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
59 	memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
60 
61 	return 0;
62 
63 }
64 
65 static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
66 				  const u8           *in_key,
67 				  unsigned int        key_len)
68 {
69 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
70 
71 	if (key_len < 3)
72 		return -EINVAL;
73 
74 	key_len -= 3;
75 
76 	memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
77 
78 	return ccm_aes_nx_set_key(tfm, in_key, key_len);
79 }
80 
81 static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
82 				  unsigned int authsize)
83 {
84 	switch (authsize) {
85 	case 4:
86 	case 6:
87 	case 8:
88 	case 10:
89 	case 12:
90 	case 14:
91 	case 16:
92 		break;
93 	default:
94 		return -EINVAL;
95 	}
96 
97 	crypto_aead_crt(tfm)->authsize = authsize;
98 
99 	return 0;
100 }
101 
102 static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
103 				      unsigned int authsize)
104 {
105 	switch (authsize) {
106 	case 8:
107 	case 12:
108 	case 16:
109 		break;
110 	default:
111 		return -EINVAL;
112 	}
113 
114 	crypto_aead_crt(tfm)->authsize = authsize;
115 
116 	return 0;
117 }
118 
119 /* taken from crypto/ccm.c */
120 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
121 {
122 	__be32 data;
123 
124 	memset(block, 0, csize);
125 	block += csize;
126 
127 	if (csize >= 4)
128 		csize = 4;
129 	else if (msglen > (unsigned int)(1 << (8 * csize)))
130 		return -EOVERFLOW;
131 
132 	data = cpu_to_be32(msglen);
133 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
134 
135 	return 0;
136 }
137 
138 /* taken from crypto/ccm.c */
139 static inline int crypto_ccm_check_iv(const u8 *iv)
140 {
141 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
142 	if (1 > iv[0] || iv[0] > 7)
143 		return -EINVAL;
144 
145 	return 0;
146 }
147 
148 /* based on code from crypto/ccm.c */
149 static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
150 		       unsigned int cryptlen, u8 *b0)
151 {
152 	unsigned int l, lp, m = authsize;
153 	int rc;
154 
155 	memcpy(b0, iv, 16);
156 
157 	lp = b0[0];
158 	l = lp + 1;
159 
160 	/* set m, bits 3-5 */
161 	*b0 |= (8 * ((m - 2) / 2));
162 
163 	/* set adata, bit 6, if associated data is used */
164 	if (assoclen)
165 		*b0 |= 64;
166 
167 	rc = set_msg_len(b0 + 16 - l, cryptlen, l);
168 
169 	return rc;
170 }
171 
172 static int generate_pat(u8                   *iv,
173 			struct aead_request  *req,
174 			struct nx_crypto_ctx *nx_ctx,
175 			unsigned int          authsize,
176 			unsigned int          nbytes,
177 			u8                   *out)
178 {
179 	struct nx_sg *nx_insg = nx_ctx->in_sg;
180 	struct nx_sg *nx_outsg = nx_ctx->out_sg;
181 	unsigned int iauth_len = 0;
182 	u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
183 	int rc;
184 
185 	/* zero the ctr value */
186 	memset(iv + 15 - iv[0], 0, iv[0] + 1);
187 
188 	/* page 78 of nx_wb.pdf has,
189 	 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
190 	 * in length. If a full message is used, the AES CCA implementation
191 	 * restricts the maximum AAD length to 2^32 -1 bytes.
192 	 * If partial messages are used, the implementation supports
193 	 * 2^64 -1 bytes maximum AAD length.
194 	 *
195 	 * However, in the cryptoapi's aead_request structure,
196 	 * assoclen is an unsigned int, thus it cannot hold a length
197 	 * value greater than 2^32 - 1.
198 	 * Thus the AAD is further constrained by this and is never
199 	 * greater than 2^32.
200 	 */
201 
202 	if (!req->assoclen) {
203 		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
204 	} else if (req->assoclen <= 14) {
205 		/* if associated data is 14 bytes or less, we do 1 GCM
206 		 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
207 		 * which is fed in through the source buffers here */
208 		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
209 		b1 = nx_ctx->priv.ccm.iauth_tag;
210 		iauth_len = req->assoclen;
211 	} else if (req->assoclen <= 65280) {
212 		/* if associated data is less than (2^16 - 2^8), we construct
213 		 * B1 differently and feed in the associated data to a CCA
214 		 * operation */
215 		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
216 		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
217 		iauth_len = 14;
218 	} else {
219 		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
220 		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
221 		iauth_len = 10;
222 	}
223 
224 	/* generate B0 */
225 	rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
226 	if (rc)
227 		return rc;
228 
229 	/* generate B1:
230 	 * add control info for associated data
231 	 * RFC 3610 and NIST Special Publication 800-38C
232 	 */
233 	if (b1) {
234 		memset(b1, 0, 16);
235 		if (req->assoclen <= 65280) {
236 			*(u16 *)b1 = (u16)req->assoclen;
237 			scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
238 					 iauth_len, SCATTERWALK_FROM_SG);
239 		} else {
240 			*(u16 *)b1 = (u16)(0xfffe);
241 			*(u32 *)&b1[2] = (u32)req->assoclen;
242 			scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
243 					 iauth_len, SCATTERWALK_FROM_SG);
244 		}
245 	}
246 
247 	/* now copy any remaining AAD to scatterlist and call nx... */
248 	if (!req->assoclen) {
249 		return rc;
250 	} else if (req->assoclen <= 14) {
251 		nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
252 		nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
253 					    nx_ctx->ap->sglen);
254 
255 		/* inlen should be negative, indicating to phyp that its a
256 		 * pointer to an sg list */
257 		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
258 					sizeof(struct nx_sg);
259 		nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
260 					sizeof(struct nx_sg);
261 
262 		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
263 		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
264 
265 		result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
266 
267 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
268 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
269 		if (rc)
270 			return rc;
271 
272 		atomic_inc(&(nx_ctx->stats->aes_ops));
273 		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
274 
275 	} else {
276 		u32 max_sg_len;
277 		unsigned int processed = 0, to_process;
278 
279 		/* page_limit: number of sg entries that fit on one page */
280 		max_sg_len = min_t(u32,
281 				   nx_driver.of.max_sg_len/sizeof(struct nx_sg),
282 				   nx_ctx->ap->sglen);
283 
284 		processed += iauth_len;
285 
286 		do {
287 			to_process = min_t(u32, req->assoclen - processed,
288 					   nx_ctx->ap->databytelen);
289 			to_process = min_t(u64, to_process,
290 					   NX_PAGE_SIZE * (max_sg_len - 1));
291 
292 			if ((to_process + processed) < req->assoclen) {
293 				NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
294 					NX_FDM_INTERMEDIATE;
295 			} else {
296 				NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
297 					~NX_FDM_INTERMEDIATE;
298 			}
299 
300 			nx_insg = nx_walk_and_build(nx_ctx->in_sg,
301 						    nx_ctx->ap->sglen,
302 						    req->assoc, processed,
303 						    to_process);
304 
305 			nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
306 						sizeof(struct nx_sg);
307 
308 			result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
309 
310 			rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
311 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
312 			if (rc)
313 				return rc;
314 
315 			memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
316 				nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
317 				AES_BLOCK_SIZE);
318 
319 			NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
320 
321 			atomic_inc(&(nx_ctx->stats->aes_ops));
322 			atomic64_add(req->assoclen,
323 					&(nx_ctx->stats->aes_bytes));
324 
325 			processed += to_process;
326 		} while (processed < req->assoclen);
327 
328 		result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
329 	}
330 
331 	memcpy(out, result, AES_BLOCK_SIZE);
332 
333 	return rc;
334 }
335 
336 static int ccm_nx_decrypt(struct aead_request   *req,
337 			  struct blkcipher_desc *desc)
338 {
339 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
340 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
341 	unsigned int nbytes = req->cryptlen;
342 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
343 	struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
344 	unsigned long irq_flags;
345 	unsigned int processed = 0, to_process;
346 	u32 max_sg_len;
347 	int rc = -1;
348 
349 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
350 
351 	nbytes -= authsize;
352 
353 	/* copy out the auth tag to compare with later */
354 	scatterwalk_map_and_copy(priv->oauth_tag,
355 				 req->src, nbytes, authsize,
356 				 SCATTERWALK_FROM_SG);
357 
358 	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
359 			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
360 	if (rc)
361 		goto out;
362 
363 	/* page_limit: number of sg entries that fit on one page */
364 	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
365 			   nx_ctx->ap->sglen);
366 
367 	do {
368 
369 		/* to_process: the AES_BLOCK_SIZE data chunk to process in this
370 		 * update. This value is bound by sg list limits.
371 		 */
372 		to_process = min_t(u64, nbytes - processed,
373 				   nx_ctx->ap->databytelen);
374 		to_process = min_t(u64, to_process,
375 				   NX_PAGE_SIZE * (max_sg_len - 1));
376 
377 		if ((to_process + processed) < nbytes)
378 			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
379 		else
380 			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
381 
382 		NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
383 
384 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
385 					to_process, processed,
386 					csbcpb->cpb.aes_ccm.iv_or_ctr);
387 		if (rc)
388 			goto out;
389 
390 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
391 			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
392 		if (rc)
393 			goto out;
394 
395 		/* for partial completion, copy following for next
396 		 * entry into loop...
397 		 */
398 		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
399 		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
400 			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
401 		memcpy(csbcpb->cpb.aes_ccm.in_s0,
402 			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
403 
404 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
405 
406 		/* update stats */
407 		atomic_inc(&(nx_ctx->stats->aes_ops));
408 		atomic64_add(csbcpb->csb.processed_byte_count,
409 			     &(nx_ctx->stats->aes_bytes));
410 
411 		processed += to_process;
412 	} while (processed < nbytes);
413 
414 	rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
415 		    authsize) ? -EBADMSG : 0;
416 out:
417 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
418 	return rc;
419 }
420 
421 static int ccm_nx_encrypt(struct aead_request   *req,
422 			  struct blkcipher_desc *desc)
423 {
424 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
425 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
426 	unsigned int nbytes = req->cryptlen;
427 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
428 	unsigned long irq_flags;
429 	unsigned int processed = 0, to_process;
430 	u32 max_sg_len;
431 	int rc = -1;
432 
433 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
434 
435 	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
436 			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
437 	if (rc)
438 		goto out;
439 
440 	/* page_limit: number of sg entries that fit on one page */
441 	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
442 			   nx_ctx->ap->sglen);
443 
444 	do {
445 		/* to process: the AES_BLOCK_SIZE data chunk to process in this
446 		 * update. This value is bound by sg list limits.
447 		 */
448 		to_process = min_t(u64, nbytes - processed,
449 				   nx_ctx->ap->databytelen);
450 		to_process = min_t(u64, to_process,
451 				   NX_PAGE_SIZE * (max_sg_len - 1));
452 
453 		if ((to_process + processed) < nbytes)
454 			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
455 		else
456 			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
457 
458 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
459 
460 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
461 					to_process, processed,
462 				       csbcpb->cpb.aes_ccm.iv_or_ctr);
463 		if (rc)
464 			goto out;
465 
466 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
467 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
468 		if (rc)
469 			goto out;
470 
471 		/* for partial completion, copy following for next
472 		 * entry into loop...
473 		 */
474 		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
475 		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
476 			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
477 		memcpy(csbcpb->cpb.aes_ccm.in_s0,
478 			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
479 
480 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
481 
482 		/* update stats */
483 		atomic_inc(&(nx_ctx->stats->aes_ops));
484 		atomic64_add(csbcpb->csb.processed_byte_count,
485 			     &(nx_ctx->stats->aes_bytes));
486 
487 		processed += to_process;
488 
489 	} while (processed < nbytes);
490 
491 	/* copy out the auth tag */
492 	scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
493 				 req->dst, nbytes, authsize,
494 				 SCATTERWALK_TO_SG);
495 
496 out:
497 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
498 	return rc;
499 }
500 
501 static int ccm4309_aes_nx_encrypt(struct aead_request *req)
502 {
503 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
504 	struct blkcipher_desc desc;
505 	u8 *iv = nx_ctx->priv.ccm.iv;
506 
507 	iv[0] = 3;
508 	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
509 	memcpy(iv + 4, req->iv, 8);
510 
511 	desc.info = iv;
512 	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
513 
514 	return ccm_nx_encrypt(req, &desc);
515 }
516 
517 static int ccm_aes_nx_encrypt(struct aead_request *req)
518 {
519 	struct blkcipher_desc desc;
520 	int rc;
521 
522 	desc.info = req->iv;
523 	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
524 
525 	rc = crypto_ccm_check_iv(desc.info);
526 	if (rc)
527 		return rc;
528 
529 	return ccm_nx_encrypt(req, &desc);
530 }
531 
532 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
533 {
534 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
535 	struct blkcipher_desc desc;
536 	u8 *iv = nx_ctx->priv.ccm.iv;
537 
538 	iv[0] = 3;
539 	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
540 	memcpy(iv + 4, req->iv, 8);
541 
542 	desc.info = iv;
543 	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
544 
545 	return ccm_nx_decrypt(req, &desc);
546 }
547 
548 static int ccm_aes_nx_decrypt(struct aead_request *req)
549 {
550 	struct blkcipher_desc desc;
551 	int rc;
552 
553 	desc.info = req->iv;
554 	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
555 
556 	rc = crypto_ccm_check_iv(desc.info);
557 	if (rc)
558 		return rc;
559 
560 	return ccm_nx_decrypt(req, &desc);
561 }
562 
563 /* tell the block cipher walk routines that this is a stream cipher by
564  * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
565  * during encrypt/decrypt doesn't solve this problem, because it calls
566  * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
567  * but instead uses this tfm->blocksize. */
568 struct crypto_alg nx_ccm_aes_alg = {
569 	.cra_name        = "ccm(aes)",
570 	.cra_driver_name = "ccm-aes-nx",
571 	.cra_priority    = 300,
572 	.cra_flags       = CRYPTO_ALG_TYPE_AEAD |
573 			   CRYPTO_ALG_NEED_FALLBACK,
574 	.cra_blocksize   = 1,
575 	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
576 	.cra_type        = &crypto_aead_type,
577 	.cra_module      = THIS_MODULE,
578 	.cra_init        = nx_crypto_ctx_aes_ccm_init,
579 	.cra_exit        = nx_crypto_ctx_exit,
580 	.cra_aead = {
581 		.ivsize      = AES_BLOCK_SIZE,
582 		.maxauthsize = AES_BLOCK_SIZE,
583 		.setkey      = ccm_aes_nx_set_key,
584 		.setauthsize = ccm_aes_nx_setauthsize,
585 		.encrypt     = ccm_aes_nx_encrypt,
586 		.decrypt     = ccm_aes_nx_decrypt,
587 	}
588 };
589 
590 struct crypto_alg nx_ccm4309_aes_alg = {
591 	.cra_name        = "rfc4309(ccm(aes))",
592 	.cra_driver_name = "rfc4309-ccm-aes-nx",
593 	.cra_priority    = 300,
594 	.cra_flags       = CRYPTO_ALG_TYPE_AEAD |
595 			   CRYPTO_ALG_NEED_FALLBACK,
596 	.cra_blocksize   = 1,
597 	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
598 	.cra_type        = &crypto_nivaead_type,
599 	.cra_module      = THIS_MODULE,
600 	.cra_init        = nx_crypto_ctx_aes_ccm_init,
601 	.cra_exit        = nx_crypto_ctx_exit,
602 	.cra_aead = {
603 		.ivsize      = 8,
604 		.maxauthsize = AES_BLOCK_SIZE,
605 		.setkey      = ccm4309_aes_nx_set_key,
606 		.setauthsize = ccm4309_aes_nx_setauthsize,
607 		.encrypt     = ccm4309_aes_nx_encrypt,
608 		.decrypt     = ccm4309_aes_nx_decrypt,
609 		.geniv       = "seqiv",
610 	}
611 };
612