xref: /openbmc/linux/drivers/crypto/caam/caamalg_qi.c (revision 09bae3b6)
1 /*
2  * Freescale FSL CAAM support for crypto API over QI backend.
3  * Based on caamalg.c
4  *
5  * Copyright 2013-2016 Freescale Semiconductor, Inc.
6  * Copyright 2016-2017 NXP
7  */
8 
9 #include "compat.h"
10 #include "ctrl.h"
11 #include "regs.h"
12 #include "intern.h"
13 #include "desc_constr.h"
14 #include "error.h"
15 #include "sg_sw_qm.h"
16 #include "key_gen.h"
17 #include "qi.h"
18 #include "jr.h"
19 #include "caamalg_desc.h"
20 
21 /*
22  * crypto alg
23  */
24 #define CAAM_CRA_PRIORITY		2000
25 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
26 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
27 					 SHA512_DIGEST_SIZE * 2)
28 
29 #define DESC_MAX_USED_BYTES		(DESC_QI_AEAD_GIVENC_LEN + \
30 					 CAAM_MAX_KEY_SIZE)
31 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
32 
33 struct caam_alg_entry {
34 	int class1_alg_type;
35 	int class2_alg_type;
36 	bool rfc3686;
37 	bool geniv;
38 };
39 
40 struct caam_aead_alg {
41 	struct aead_alg aead;
42 	struct caam_alg_entry caam;
43 	bool registered;
44 };
45 
46 /*
47  * per-session context
48  */
49 struct caam_ctx {
50 	struct device *jrdev;
51 	u32 sh_desc_enc[DESC_MAX_USED_LEN];
52 	u32 sh_desc_dec[DESC_MAX_USED_LEN];
53 	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
54 	u8 key[CAAM_MAX_KEY_SIZE];
55 	dma_addr_t key_dma;
56 	enum dma_data_direction dir;
57 	struct alginfo adata;
58 	struct alginfo cdata;
59 	unsigned int authsize;
60 	struct device *qidev;
61 	spinlock_t lock;	/* Protects multiple init of driver context */
62 	struct caam_drv_ctx *drv_ctx[NUM_OP];
63 };
64 
65 static int aead_set_sh_desc(struct crypto_aead *aead)
66 {
67 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
68 						 typeof(*alg), aead);
69 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
70 	unsigned int ivsize = crypto_aead_ivsize(aead);
71 	u32 ctx1_iv_off = 0;
72 	u32 *nonce = NULL;
73 	unsigned int data_len[2];
74 	u32 inl_mask;
75 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
76 			       OP_ALG_AAI_CTR_MOD128);
77 	const bool is_rfc3686 = alg->caam.rfc3686;
78 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
79 
80 	if (!ctx->cdata.keylen || !ctx->authsize)
81 		return 0;
82 
83 	/*
84 	 * AES-CTR needs to load IV in CONTEXT1 reg
85 	 * at an offset of 128bits (16bytes)
86 	 * CONTEXT1[255:128] = IV
87 	 */
88 	if (ctr_mode)
89 		ctx1_iv_off = 16;
90 
91 	/*
92 	 * RFC3686 specific:
93 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
94 	 */
95 	if (is_rfc3686) {
96 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
97 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
98 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
99 	}
100 
101 	data_len[0] = ctx->adata.keylen_pad;
102 	data_len[1] = ctx->cdata.keylen;
103 
104 	if (alg->caam.geniv)
105 		goto skip_enc;
106 
107 	/* aead_encrypt shared descriptor */
108 	if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
109 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
110 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
111 			      ARRAY_SIZE(data_len)) < 0)
112 		return -EINVAL;
113 
114 	if (inl_mask & 1)
115 		ctx->adata.key_virt = ctx->key;
116 	else
117 		ctx->adata.key_dma = ctx->key_dma;
118 
119 	if (inl_mask & 2)
120 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
121 	else
122 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
123 
124 	ctx->adata.key_inline = !!(inl_mask & 1);
125 	ctx->cdata.key_inline = !!(inl_mask & 2);
126 
127 	cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
128 			       ivsize, ctx->authsize, is_rfc3686, nonce,
129 			       ctx1_iv_off, true, ctrlpriv->era);
130 
131 skip_enc:
132 	/* aead_decrypt shared descriptor */
133 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
134 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
135 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
136 			      ARRAY_SIZE(data_len)) < 0)
137 		return -EINVAL;
138 
139 	if (inl_mask & 1)
140 		ctx->adata.key_virt = ctx->key;
141 	else
142 		ctx->adata.key_dma = ctx->key_dma;
143 
144 	if (inl_mask & 2)
145 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
146 	else
147 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
148 
149 	ctx->adata.key_inline = !!(inl_mask & 1);
150 	ctx->cdata.key_inline = !!(inl_mask & 2);
151 
152 	cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
153 			       ivsize, ctx->authsize, alg->caam.geniv,
154 			       is_rfc3686, nonce, ctx1_iv_off, true,
155 			       ctrlpriv->era);
156 
157 	if (!alg->caam.geniv)
158 		goto skip_givenc;
159 
160 	/* aead_givencrypt shared descriptor */
161 	if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
162 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
163 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
164 			      ARRAY_SIZE(data_len)) < 0)
165 		return -EINVAL;
166 
167 	if (inl_mask & 1)
168 		ctx->adata.key_virt = ctx->key;
169 	else
170 		ctx->adata.key_dma = ctx->key_dma;
171 
172 	if (inl_mask & 2)
173 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
174 	else
175 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
176 
177 	ctx->adata.key_inline = !!(inl_mask & 1);
178 	ctx->cdata.key_inline = !!(inl_mask & 2);
179 
180 	cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
181 				  ivsize, ctx->authsize, is_rfc3686, nonce,
182 				  ctx1_iv_off, true, ctrlpriv->era);
183 
184 skip_givenc:
185 	return 0;
186 }
187 
188 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
189 {
190 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
191 
192 	ctx->authsize = authsize;
193 	aead_set_sh_desc(authenc);
194 
195 	return 0;
196 }
197 
198 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
199 		       unsigned int keylen)
200 {
201 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
202 	struct device *jrdev = ctx->jrdev;
203 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
204 	struct crypto_authenc_keys keys;
205 	int ret = 0;
206 
207 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
208 		goto badkey;
209 
210 #ifdef DEBUG
211 	dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
212 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
213 		keys.authkeylen);
214 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
215 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
216 #endif
217 
218 	/*
219 	 * If DKP is supported, use it in the shared descriptor to generate
220 	 * the split key.
221 	 */
222 	if (ctrlpriv->era >= 6) {
223 		ctx->adata.keylen = keys.authkeylen;
224 		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
225 						      OP_ALG_ALGSEL_MASK);
226 
227 		if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
228 			goto badkey;
229 
230 		memcpy(ctx->key, keys.authkey, keys.authkeylen);
231 		memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
232 		       keys.enckeylen);
233 		dma_sync_single_for_device(jrdev, ctx->key_dma,
234 					   ctx->adata.keylen_pad +
235 					   keys.enckeylen, ctx->dir);
236 		goto skip_split_key;
237 	}
238 
239 	ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
240 			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
241 			    keys.enckeylen);
242 	if (ret)
243 		goto badkey;
244 
245 	/* postpend encryption key to auth split key */
246 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
247 	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
248 				   keys.enckeylen, ctx->dir);
249 #ifdef DEBUG
250 	print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
251 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
252 		       ctx->adata.keylen_pad + keys.enckeylen, 1);
253 #endif
254 
255 skip_split_key:
256 	ctx->cdata.keylen = keys.enckeylen;
257 
258 	ret = aead_set_sh_desc(aead);
259 	if (ret)
260 		goto badkey;
261 
262 	/* Now update the driver contexts with the new shared descriptor */
263 	if (ctx->drv_ctx[ENCRYPT]) {
264 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
265 					  ctx->sh_desc_enc);
266 		if (ret) {
267 			dev_err(jrdev, "driver enc context update failed\n");
268 			goto badkey;
269 		}
270 	}
271 
272 	if (ctx->drv_ctx[DECRYPT]) {
273 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
274 					  ctx->sh_desc_dec);
275 		if (ret) {
276 			dev_err(jrdev, "driver dec context update failed\n");
277 			goto badkey;
278 		}
279 	}
280 
281 	memzero_explicit(&keys, sizeof(keys));
282 	return ret;
283 badkey:
284 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
285 	memzero_explicit(&keys, sizeof(keys));
286 	return -EINVAL;
287 }
288 
289 static int gcm_set_sh_desc(struct crypto_aead *aead)
290 {
291 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
292 	unsigned int ivsize = crypto_aead_ivsize(aead);
293 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
294 			ctx->cdata.keylen;
295 
296 	if (!ctx->cdata.keylen || !ctx->authsize)
297 		return 0;
298 
299 	/*
300 	 * Job Descriptor and Shared Descriptor
301 	 * must fit into the 64-word Descriptor h/w Buffer
302 	 */
303 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
304 		ctx->cdata.key_inline = true;
305 		ctx->cdata.key_virt = ctx->key;
306 	} else {
307 		ctx->cdata.key_inline = false;
308 		ctx->cdata.key_dma = ctx->key_dma;
309 	}
310 
311 	cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
312 			      ctx->authsize, true);
313 
314 	/*
315 	 * Job Descriptor and Shared Descriptor
316 	 * must fit into the 64-word Descriptor h/w Buffer
317 	 */
318 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
319 		ctx->cdata.key_inline = true;
320 		ctx->cdata.key_virt = ctx->key;
321 	} else {
322 		ctx->cdata.key_inline = false;
323 		ctx->cdata.key_dma = ctx->key_dma;
324 	}
325 
326 	cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
327 			      ctx->authsize, true);
328 
329 	return 0;
330 }
331 
332 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
333 {
334 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
335 
336 	ctx->authsize = authsize;
337 	gcm_set_sh_desc(authenc);
338 
339 	return 0;
340 }
341 
342 static int gcm_setkey(struct crypto_aead *aead,
343 		      const u8 *key, unsigned int keylen)
344 {
345 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
346 	struct device *jrdev = ctx->jrdev;
347 	int ret;
348 
349 #ifdef DEBUG
350 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
351 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
352 #endif
353 
354 	memcpy(ctx->key, key, keylen);
355 	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
356 	ctx->cdata.keylen = keylen;
357 
358 	ret = gcm_set_sh_desc(aead);
359 	if (ret)
360 		return ret;
361 
362 	/* Now update the driver contexts with the new shared descriptor */
363 	if (ctx->drv_ctx[ENCRYPT]) {
364 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
365 					  ctx->sh_desc_enc);
366 		if (ret) {
367 			dev_err(jrdev, "driver enc context update failed\n");
368 			return ret;
369 		}
370 	}
371 
372 	if (ctx->drv_ctx[DECRYPT]) {
373 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
374 					  ctx->sh_desc_dec);
375 		if (ret) {
376 			dev_err(jrdev, "driver dec context update failed\n");
377 			return ret;
378 		}
379 	}
380 
381 	return 0;
382 }
383 
384 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
385 {
386 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
387 	unsigned int ivsize = crypto_aead_ivsize(aead);
388 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
389 			ctx->cdata.keylen;
390 
391 	if (!ctx->cdata.keylen || !ctx->authsize)
392 		return 0;
393 
394 	ctx->cdata.key_virt = ctx->key;
395 
396 	/*
397 	 * Job Descriptor and Shared Descriptor
398 	 * must fit into the 64-word Descriptor h/w Buffer
399 	 */
400 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
401 		ctx->cdata.key_inline = true;
402 	} else {
403 		ctx->cdata.key_inline = false;
404 		ctx->cdata.key_dma = ctx->key_dma;
405 	}
406 
407 	cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
408 				  ctx->authsize, true);
409 
410 	/*
411 	 * Job Descriptor and Shared Descriptor
412 	 * must fit into the 64-word Descriptor h/w Buffer
413 	 */
414 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
415 		ctx->cdata.key_inline = true;
416 	} else {
417 		ctx->cdata.key_inline = false;
418 		ctx->cdata.key_dma = ctx->key_dma;
419 	}
420 
421 	cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
422 				  ctx->authsize, true);
423 
424 	return 0;
425 }
426 
427 static int rfc4106_setauthsize(struct crypto_aead *authenc,
428 			       unsigned int authsize)
429 {
430 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
431 
432 	ctx->authsize = authsize;
433 	rfc4106_set_sh_desc(authenc);
434 
435 	return 0;
436 }
437 
438 static int rfc4106_setkey(struct crypto_aead *aead,
439 			  const u8 *key, unsigned int keylen)
440 {
441 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
442 	struct device *jrdev = ctx->jrdev;
443 	int ret;
444 
445 	if (keylen < 4)
446 		return -EINVAL;
447 
448 #ifdef DEBUG
449 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
450 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
451 #endif
452 
453 	memcpy(ctx->key, key, keylen);
454 	/*
455 	 * The last four bytes of the key material are used as the salt value
456 	 * in the nonce. Update the AES key length.
457 	 */
458 	ctx->cdata.keylen = keylen - 4;
459 	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
460 				   ctx->dir);
461 
462 	ret = rfc4106_set_sh_desc(aead);
463 	if (ret)
464 		return ret;
465 
466 	/* Now update the driver contexts with the new shared descriptor */
467 	if (ctx->drv_ctx[ENCRYPT]) {
468 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
469 					  ctx->sh_desc_enc);
470 		if (ret) {
471 			dev_err(jrdev, "driver enc context update failed\n");
472 			return ret;
473 		}
474 	}
475 
476 	if (ctx->drv_ctx[DECRYPT]) {
477 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
478 					  ctx->sh_desc_dec);
479 		if (ret) {
480 			dev_err(jrdev, "driver dec context update failed\n");
481 			return ret;
482 		}
483 	}
484 
485 	return 0;
486 }
487 
488 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
489 {
490 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
491 	unsigned int ivsize = crypto_aead_ivsize(aead);
492 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
493 			ctx->cdata.keylen;
494 
495 	if (!ctx->cdata.keylen || !ctx->authsize)
496 		return 0;
497 
498 	ctx->cdata.key_virt = ctx->key;
499 
500 	/*
501 	 * Job Descriptor and Shared Descriptor
502 	 * must fit into the 64-word Descriptor h/w Buffer
503 	 */
504 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
505 		ctx->cdata.key_inline = true;
506 	} else {
507 		ctx->cdata.key_inline = false;
508 		ctx->cdata.key_dma = ctx->key_dma;
509 	}
510 
511 	cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
512 				  ctx->authsize, true);
513 
514 	/*
515 	 * Job Descriptor and Shared Descriptor
516 	 * must fit into the 64-word Descriptor h/w Buffer
517 	 */
518 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
519 		ctx->cdata.key_inline = true;
520 	} else {
521 		ctx->cdata.key_inline = false;
522 		ctx->cdata.key_dma = ctx->key_dma;
523 	}
524 
525 	cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
526 				  ctx->authsize, true);
527 
528 	return 0;
529 }
530 
531 static int rfc4543_setauthsize(struct crypto_aead *authenc,
532 			       unsigned int authsize)
533 {
534 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
535 
536 	ctx->authsize = authsize;
537 	rfc4543_set_sh_desc(authenc);
538 
539 	return 0;
540 }
541 
542 static int rfc4543_setkey(struct crypto_aead *aead,
543 			  const u8 *key, unsigned int keylen)
544 {
545 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
546 	struct device *jrdev = ctx->jrdev;
547 	int ret;
548 
549 	if (keylen < 4)
550 		return -EINVAL;
551 
552 #ifdef DEBUG
553 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
554 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
555 #endif
556 
557 	memcpy(ctx->key, key, keylen);
558 	/*
559 	 * The last four bytes of the key material are used as the salt value
560 	 * in the nonce. Update the AES key length.
561 	 */
562 	ctx->cdata.keylen = keylen - 4;
563 	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
564 				   ctx->dir);
565 
566 	ret = rfc4543_set_sh_desc(aead);
567 	if (ret)
568 		return ret;
569 
570 	/* Now update the driver contexts with the new shared descriptor */
571 	if (ctx->drv_ctx[ENCRYPT]) {
572 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
573 					  ctx->sh_desc_enc);
574 		if (ret) {
575 			dev_err(jrdev, "driver enc context update failed\n");
576 			return ret;
577 		}
578 	}
579 
580 	if (ctx->drv_ctx[DECRYPT]) {
581 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
582 					  ctx->sh_desc_dec);
583 		if (ret) {
584 			dev_err(jrdev, "driver dec context update failed\n");
585 			return ret;
586 		}
587 	}
588 
589 	return 0;
590 }
591 
592 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
593 			     const u8 *key, unsigned int keylen)
594 {
595 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
596 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
597 	const char *alg_name = crypto_tfm_alg_name(tfm);
598 	struct device *jrdev = ctx->jrdev;
599 	unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
600 	u32 ctx1_iv_off = 0;
601 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
602 			       OP_ALG_AAI_CTR_MOD128);
603 	const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
604 	int ret = 0;
605 
606 #ifdef DEBUG
607 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
608 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
609 #endif
610 	/*
611 	 * AES-CTR needs to load IV in CONTEXT1 reg
612 	 * at an offset of 128bits (16bytes)
613 	 * CONTEXT1[255:128] = IV
614 	 */
615 	if (ctr_mode)
616 		ctx1_iv_off = 16;
617 
618 	/*
619 	 * RFC3686 specific:
620 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
621 	 *	| *key = {KEY, NONCE}
622 	 */
623 	if (is_rfc3686) {
624 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
625 		keylen -= CTR_RFC3686_NONCE_SIZE;
626 	}
627 
628 	ctx->cdata.keylen = keylen;
629 	ctx->cdata.key_virt = key;
630 	ctx->cdata.key_inline = true;
631 
632 	/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
633 	cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
634 				     is_rfc3686, ctx1_iv_off);
635 	cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
636 				     is_rfc3686, ctx1_iv_off);
637 	cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
638 					ivsize, is_rfc3686, ctx1_iv_off);
639 
640 	/* Now update the driver contexts with the new shared descriptor */
641 	if (ctx->drv_ctx[ENCRYPT]) {
642 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
643 					  ctx->sh_desc_enc);
644 		if (ret) {
645 			dev_err(jrdev, "driver enc context update failed\n");
646 			goto badkey;
647 		}
648 	}
649 
650 	if (ctx->drv_ctx[DECRYPT]) {
651 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
652 					  ctx->sh_desc_dec);
653 		if (ret) {
654 			dev_err(jrdev, "driver dec context update failed\n");
655 			goto badkey;
656 		}
657 	}
658 
659 	if (ctx->drv_ctx[GIVENCRYPT]) {
660 		ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
661 					  ctx->sh_desc_givenc);
662 		if (ret) {
663 			dev_err(jrdev, "driver givenc context update failed\n");
664 			goto badkey;
665 		}
666 	}
667 
668 	return ret;
669 badkey:
670 	crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
671 	return -EINVAL;
672 }
673 
674 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
675 				 const u8 *key, unsigned int keylen)
676 {
677 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
678 	struct device *jrdev = ctx->jrdev;
679 	int ret = 0;
680 
681 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
682 		dev_err(jrdev, "key size mismatch\n");
683 		goto badkey;
684 	}
685 
686 	ctx->cdata.keylen = keylen;
687 	ctx->cdata.key_virt = key;
688 	ctx->cdata.key_inline = true;
689 
690 	/* xts ablkcipher encrypt, decrypt shared descriptors */
691 	cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
692 	cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
693 
694 	/* Now update the driver contexts with the new shared descriptor */
695 	if (ctx->drv_ctx[ENCRYPT]) {
696 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
697 					  ctx->sh_desc_enc);
698 		if (ret) {
699 			dev_err(jrdev, "driver enc context update failed\n");
700 			goto badkey;
701 		}
702 	}
703 
704 	if (ctx->drv_ctx[DECRYPT]) {
705 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
706 					  ctx->sh_desc_dec);
707 		if (ret) {
708 			dev_err(jrdev, "driver dec context update failed\n");
709 			goto badkey;
710 		}
711 	}
712 
713 	return ret;
714 badkey:
715 	crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
716 	return -EINVAL;
717 }
718 
719 /*
720  * aead_edesc - s/w-extended aead descriptor
721  * @src_nents: number of segments in input scatterlist
722  * @dst_nents: number of segments in output scatterlist
723  * @iv_dma: dma address of iv for checking continuity and link table
724  * @qm_sg_bytes: length of dma mapped h/w link table
725  * @qm_sg_dma: bus physical mapped address of h/w link table
726  * @assoclen: associated data length, in CAAM endianness
727  * @assoclen_dma: bus physical mapped address of req->assoclen
728  * @drv_req: driver-specific request structure
729  * @sgt: the h/w link table, followed by IV
730  */
731 struct aead_edesc {
732 	int src_nents;
733 	int dst_nents;
734 	dma_addr_t iv_dma;
735 	int qm_sg_bytes;
736 	dma_addr_t qm_sg_dma;
737 	unsigned int assoclen;
738 	dma_addr_t assoclen_dma;
739 	struct caam_drv_req drv_req;
740 	struct qm_sg_entry sgt[0];
741 };
742 
743 /*
744  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
745  * @src_nents: number of segments in input scatterlist
746  * @dst_nents: number of segments in output scatterlist
747  * @iv_dma: dma address of iv for checking continuity and link table
748  * @qm_sg_bytes: length of dma mapped h/w link table
749  * @qm_sg_dma: bus physical mapped address of h/w link table
750  * @drv_req: driver-specific request structure
751  * @sgt: the h/w link table, followed by IV
752  */
753 struct ablkcipher_edesc {
754 	int src_nents;
755 	int dst_nents;
756 	dma_addr_t iv_dma;
757 	int qm_sg_bytes;
758 	dma_addr_t qm_sg_dma;
759 	struct caam_drv_req drv_req;
760 	struct qm_sg_entry sgt[0];
761 };
762 
763 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
764 					enum optype type)
765 {
766 	/*
767 	 * This function is called on the fast path with values of 'type'
768 	 * known at compile time. Invalid arguments are not expected and
769 	 * thus no checks are made.
770 	 */
771 	struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
772 	u32 *desc;
773 
774 	if (unlikely(!drv_ctx)) {
775 		spin_lock(&ctx->lock);
776 
777 		/* Read again to check if some other core init drv_ctx */
778 		drv_ctx = ctx->drv_ctx[type];
779 		if (!drv_ctx) {
780 			int cpu;
781 
782 			if (type == ENCRYPT)
783 				desc = ctx->sh_desc_enc;
784 			else if (type == DECRYPT)
785 				desc = ctx->sh_desc_dec;
786 			else /* (type == GIVENCRYPT) */
787 				desc = ctx->sh_desc_givenc;
788 
789 			cpu = smp_processor_id();
790 			drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
791 			if (likely(!IS_ERR_OR_NULL(drv_ctx)))
792 				drv_ctx->op_type = type;
793 
794 			ctx->drv_ctx[type] = drv_ctx;
795 		}
796 
797 		spin_unlock(&ctx->lock);
798 	}
799 
800 	return drv_ctx;
801 }
802 
803 static void caam_unmap(struct device *dev, struct scatterlist *src,
804 		       struct scatterlist *dst, int src_nents,
805 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
806 		       enum optype op_type, dma_addr_t qm_sg_dma,
807 		       int qm_sg_bytes)
808 {
809 	if (dst != src) {
810 		if (src_nents)
811 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
812 		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
813 	} else {
814 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
815 	}
816 
817 	if (iv_dma)
818 		dma_unmap_single(dev, iv_dma, ivsize,
819 				 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
820 							 DMA_TO_DEVICE);
821 	if (qm_sg_bytes)
822 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
823 }
824 
825 static void aead_unmap(struct device *dev,
826 		       struct aead_edesc *edesc,
827 		       struct aead_request *req)
828 {
829 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
830 	int ivsize = crypto_aead_ivsize(aead);
831 
832 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
833 		   edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
834 		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
835 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
836 }
837 
838 static void ablkcipher_unmap(struct device *dev,
839 			     struct ablkcipher_edesc *edesc,
840 			     struct ablkcipher_request *req)
841 {
842 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
843 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
844 
845 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
846 		   edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
847 		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
848 }
849 
850 static void aead_done(struct caam_drv_req *drv_req, u32 status)
851 {
852 	struct device *qidev;
853 	struct aead_edesc *edesc;
854 	struct aead_request *aead_req = drv_req->app_ctx;
855 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
856 	struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
857 	int ecode = 0;
858 
859 	qidev = caam_ctx->qidev;
860 
861 	if (unlikely(status)) {
862 		u32 ssrc = status & JRSTA_SSRC_MASK;
863 		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
864 
865 		caam_jr_strstatus(qidev, status);
866 		/*
867 		 * verify hw auth check passed else return -EBADMSG
868 		 */
869 		if (ssrc == JRSTA_SSRC_CCB_ERROR &&
870 		    err_id == JRSTA_CCBERR_ERRID_ICVCHK)
871 			ecode = -EBADMSG;
872 		else
873 			ecode = -EIO;
874 	}
875 
876 	edesc = container_of(drv_req, typeof(*edesc), drv_req);
877 	aead_unmap(qidev, edesc, aead_req);
878 
879 	aead_request_complete(aead_req, ecode);
880 	qi_cache_free(edesc);
881 }
882 
883 /*
884  * allocate and map the aead extended descriptor
885  */
886 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
887 					   bool encrypt)
888 {
889 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
890 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
891 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
892 						 typeof(*alg), aead);
893 	struct device *qidev = ctx->qidev;
894 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
895 		       GFP_KERNEL : GFP_ATOMIC;
896 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
897 	struct aead_edesc *edesc;
898 	dma_addr_t qm_sg_dma, iv_dma = 0;
899 	int ivsize = 0;
900 	unsigned int authsize = ctx->authsize;
901 	int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
902 	int in_len, out_len;
903 	struct qm_sg_entry *sg_table, *fd_sgt;
904 	struct caam_drv_ctx *drv_ctx;
905 	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
906 
907 	drv_ctx = get_drv_ctx(ctx, op_type);
908 	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
909 		return (struct aead_edesc *)drv_ctx;
910 
911 	/* allocate space for base edesc and hw desc commands, link tables */
912 	edesc = qi_cache_alloc(GFP_DMA | flags);
913 	if (unlikely(!edesc)) {
914 		dev_err(qidev, "could not allocate extended descriptor\n");
915 		return ERR_PTR(-ENOMEM);
916 	}
917 
918 	if (likely(req->src == req->dst)) {
919 		src_nents = sg_nents_for_len(req->src, req->assoclen +
920 					     req->cryptlen +
921 						(encrypt ? authsize : 0));
922 		if (unlikely(src_nents < 0)) {
923 			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
924 				req->assoclen + req->cryptlen +
925 				(encrypt ? authsize : 0));
926 			qi_cache_free(edesc);
927 			return ERR_PTR(src_nents);
928 		}
929 
930 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
931 					      DMA_BIDIRECTIONAL);
932 		if (unlikely(!mapped_src_nents)) {
933 			dev_err(qidev, "unable to map source\n");
934 			qi_cache_free(edesc);
935 			return ERR_PTR(-ENOMEM);
936 		}
937 	} else {
938 		src_nents = sg_nents_for_len(req->src, req->assoclen +
939 					     req->cryptlen);
940 		if (unlikely(src_nents < 0)) {
941 			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
942 				req->assoclen + req->cryptlen);
943 			qi_cache_free(edesc);
944 			return ERR_PTR(src_nents);
945 		}
946 
947 		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
948 					     req->cryptlen +
949 					     (encrypt ? authsize :
950 							(-authsize)));
951 		if (unlikely(dst_nents < 0)) {
952 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
953 				req->assoclen + req->cryptlen +
954 				(encrypt ? authsize : (-authsize)));
955 			qi_cache_free(edesc);
956 			return ERR_PTR(dst_nents);
957 		}
958 
959 		if (src_nents) {
960 			mapped_src_nents = dma_map_sg(qidev, req->src,
961 						      src_nents, DMA_TO_DEVICE);
962 			if (unlikely(!mapped_src_nents)) {
963 				dev_err(qidev, "unable to map source\n");
964 				qi_cache_free(edesc);
965 				return ERR_PTR(-ENOMEM);
966 			}
967 		} else {
968 			mapped_src_nents = 0;
969 		}
970 
971 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
972 					      DMA_FROM_DEVICE);
973 		if (unlikely(!mapped_dst_nents)) {
974 			dev_err(qidev, "unable to map destination\n");
975 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
976 			qi_cache_free(edesc);
977 			return ERR_PTR(-ENOMEM);
978 		}
979 	}
980 
981 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
982 		ivsize = crypto_aead_ivsize(aead);
983 
984 	/*
985 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
986 	 * Input is not contiguous.
987 	 */
988 	qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
989 		     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
990 	sg_table = &edesc->sgt[0];
991 	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
992 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
993 		     CAAM_QI_MEMCACHE_SIZE)) {
994 		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
995 			qm_sg_ents, ivsize);
996 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
997 			   0, 0, 0, 0);
998 		qi_cache_free(edesc);
999 		return ERR_PTR(-ENOMEM);
1000 	}
1001 
1002 	if (ivsize) {
1003 		u8 *iv = (u8 *)(sg_table + qm_sg_ents);
1004 
1005 		/* Make sure IV is located in a DMAable area */
1006 		memcpy(iv, req->iv, ivsize);
1007 
1008 		iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1009 		if (dma_mapping_error(qidev, iv_dma)) {
1010 			dev_err(qidev, "unable to map IV\n");
1011 			caam_unmap(qidev, req->src, req->dst, src_nents,
1012 				   dst_nents, 0, 0, 0, 0, 0);
1013 			qi_cache_free(edesc);
1014 			return ERR_PTR(-ENOMEM);
1015 		}
1016 	}
1017 
1018 	edesc->src_nents = src_nents;
1019 	edesc->dst_nents = dst_nents;
1020 	edesc->iv_dma = iv_dma;
1021 	edesc->drv_req.app_ctx = req;
1022 	edesc->drv_req.cbk = aead_done;
1023 	edesc->drv_req.drv_ctx = drv_ctx;
1024 
1025 	edesc->assoclen = cpu_to_caam32(req->assoclen);
1026 	edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1027 					     DMA_TO_DEVICE);
1028 	if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1029 		dev_err(qidev, "unable to map assoclen\n");
1030 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1031 			   iv_dma, ivsize, op_type, 0, 0);
1032 		qi_cache_free(edesc);
1033 		return ERR_PTR(-ENOMEM);
1034 	}
1035 
1036 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1037 	qm_sg_index++;
1038 	if (ivsize) {
1039 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
1040 		qm_sg_index++;
1041 	}
1042 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
1043 	qm_sg_index += mapped_src_nents;
1044 
1045 	if (mapped_dst_nents > 1)
1046 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1047 				 qm_sg_index, 0);
1048 
1049 	qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1050 	if (dma_mapping_error(qidev, qm_sg_dma)) {
1051 		dev_err(qidev, "unable to map S/G table\n");
1052 		dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1053 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1054 			   iv_dma, ivsize, op_type, 0, 0);
1055 		qi_cache_free(edesc);
1056 		return ERR_PTR(-ENOMEM);
1057 	}
1058 
1059 	edesc->qm_sg_dma = qm_sg_dma;
1060 	edesc->qm_sg_bytes = qm_sg_bytes;
1061 
1062 	out_len = req->assoclen + req->cryptlen +
1063 		  (encrypt ? ctx->authsize : (-ctx->authsize));
1064 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
1065 
1066 	fd_sgt = &edesc->drv_req.fd_sgt[0];
1067 	dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
1068 
1069 	if (req->dst == req->src) {
1070 		if (mapped_src_nents == 1)
1071 			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1072 					 out_len, 0);
1073 		else
1074 			dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
1075 					     (1 + !!ivsize) * sizeof(*sg_table),
1076 					     out_len, 0);
1077 	} else if (mapped_dst_nents == 1) {
1078 		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
1079 				 0);
1080 	} else {
1081 		dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
1082 				     qm_sg_index, out_len, 0);
1083 	}
1084 
1085 	return edesc;
1086 }
1087 
1088 static inline int aead_crypt(struct aead_request *req, bool encrypt)
1089 {
1090 	struct aead_edesc *edesc;
1091 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1092 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1093 	int ret;
1094 
1095 	if (unlikely(caam_congested))
1096 		return -EAGAIN;
1097 
1098 	/* allocate extended descriptor */
1099 	edesc = aead_edesc_alloc(req, encrypt);
1100 	if (IS_ERR_OR_NULL(edesc))
1101 		return PTR_ERR(edesc);
1102 
1103 	/* Create and submit job descriptor */
1104 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1105 	if (!ret) {
1106 		ret = -EINPROGRESS;
1107 	} else {
1108 		aead_unmap(ctx->qidev, edesc, req);
1109 		qi_cache_free(edesc);
1110 	}
1111 
1112 	return ret;
1113 }
1114 
1115 static int aead_encrypt(struct aead_request *req)
1116 {
1117 	return aead_crypt(req, true);
1118 }
1119 
1120 static int aead_decrypt(struct aead_request *req)
1121 {
1122 	return aead_crypt(req, false);
1123 }
1124 
1125 static int ipsec_gcm_encrypt(struct aead_request *req)
1126 {
1127 	if (req->assoclen < 8)
1128 		return -EINVAL;
1129 
1130 	return aead_crypt(req, true);
1131 }
1132 
1133 static int ipsec_gcm_decrypt(struct aead_request *req)
1134 {
1135 	if (req->assoclen < 8)
1136 		return -EINVAL;
1137 
1138 	return aead_crypt(req, false);
1139 }
1140 
1141 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
1142 {
1143 	struct ablkcipher_edesc *edesc;
1144 	struct ablkcipher_request *req = drv_req->app_ctx;
1145 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1146 	struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
1147 	struct device *qidev = caam_ctx->qidev;
1148 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1149 
1150 #ifdef DEBUG
1151 	dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1152 #endif
1153 
1154 	edesc = container_of(drv_req, typeof(*edesc), drv_req);
1155 
1156 	if (status)
1157 		caam_jr_strstatus(qidev, status);
1158 
1159 #ifdef DEBUG
1160 	print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
1161 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1162 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
1163 	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
1164 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1165 		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1166 #endif
1167 
1168 	ablkcipher_unmap(qidev, edesc, req);
1169 
1170 	/* In case initial IV was generated, copy it in GIVCIPHER request */
1171 	if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
1172 		u8 *iv;
1173 		struct skcipher_givcrypt_request *greq;
1174 
1175 		greq = container_of(req, struct skcipher_givcrypt_request,
1176 				    creq);
1177 		iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
1178 		memcpy(greq->giv, iv, ivsize);
1179 	}
1180 
1181 	/*
1182 	 * The crypto API expects us to set the IV (req->info) to the last
1183 	 * ciphertext block. This is used e.g. by the CTS mode.
1184 	 */
1185 	if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
1186 		scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
1187 					 ivsize, ivsize, 0);
1188 
1189 	qi_cache_free(edesc);
1190 	ablkcipher_request_complete(req, status);
1191 }
1192 
1193 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1194 						       *req, bool encrypt)
1195 {
1196 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1197 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1198 	struct device *qidev = ctx->qidev;
1199 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1200 		       GFP_KERNEL : GFP_ATOMIC;
1201 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1202 	struct ablkcipher_edesc *edesc;
1203 	dma_addr_t iv_dma;
1204 	u8 *iv;
1205 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1206 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1207 	struct qm_sg_entry *sg_table, *fd_sgt;
1208 	struct caam_drv_ctx *drv_ctx;
1209 	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
1210 
1211 	drv_ctx = get_drv_ctx(ctx, op_type);
1212 	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1213 		return (struct ablkcipher_edesc *)drv_ctx;
1214 
1215 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1216 	if (unlikely(src_nents < 0)) {
1217 		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1218 			req->nbytes);
1219 		return ERR_PTR(src_nents);
1220 	}
1221 
1222 	if (unlikely(req->src != req->dst)) {
1223 		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1224 		if (unlikely(dst_nents < 0)) {
1225 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1226 				req->nbytes);
1227 			return ERR_PTR(dst_nents);
1228 		}
1229 
1230 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1231 					      DMA_TO_DEVICE);
1232 		if (unlikely(!mapped_src_nents)) {
1233 			dev_err(qidev, "unable to map source\n");
1234 			return ERR_PTR(-ENOMEM);
1235 		}
1236 
1237 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1238 					      DMA_FROM_DEVICE);
1239 		if (unlikely(!mapped_dst_nents)) {
1240 			dev_err(qidev, "unable to map destination\n");
1241 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1242 			return ERR_PTR(-ENOMEM);
1243 		}
1244 	} else {
1245 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1246 					      DMA_BIDIRECTIONAL);
1247 		if (unlikely(!mapped_src_nents)) {
1248 			dev_err(qidev, "unable to map source\n");
1249 			return ERR_PTR(-ENOMEM);
1250 		}
1251 	}
1252 
1253 	qm_sg_ents = 1 + mapped_src_nents;
1254 	dst_sg_idx = qm_sg_ents;
1255 
1256 	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1257 	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1258 	if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1259 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1260 		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1261 			qm_sg_ents, ivsize);
1262 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1263 			   0, 0, 0, 0);
1264 		return ERR_PTR(-ENOMEM);
1265 	}
1266 
1267 	/* allocate space for base edesc, link tables and IV */
1268 	edesc = qi_cache_alloc(GFP_DMA | flags);
1269 	if (unlikely(!edesc)) {
1270 		dev_err(qidev, "could not allocate extended descriptor\n");
1271 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1272 			   0, 0, 0, 0);
1273 		return ERR_PTR(-ENOMEM);
1274 	}
1275 
1276 	/* Make sure IV is located in a DMAable area */
1277 	sg_table = &edesc->sgt[0];
1278 	iv = (u8 *)(sg_table + qm_sg_ents);
1279 	memcpy(iv, req->info, ivsize);
1280 
1281 	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1282 	if (dma_mapping_error(qidev, iv_dma)) {
1283 		dev_err(qidev, "unable to map IV\n");
1284 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1285 			   0, 0, 0, 0);
1286 		qi_cache_free(edesc);
1287 		return ERR_PTR(-ENOMEM);
1288 	}
1289 
1290 	edesc->src_nents = src_nents;
1291 	edesc->dst_nents = dst_nents;
1292 	edesc->iv_dma = iv_dma;
1293 	edesc->qm_sg_bytes = qm_sg_bytes;
1294 	edesc->drv_req.app_ctx = req;
1295 	edesc->drv_req.cbk = ablkcipher_done;
1296 	edesc->drv_req.drv_ctx = drv_ctx;
1297 
1298 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1299 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1300 
1301 	if (mapped_dst_nents > 1)
1302 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1303 				 dst_sg_idx, 0);
1304 
1305 	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1306 					  DMA_TO_DEVICE);
1307 	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1308 		dev_err(qidev, "unable to map S/G table\n");
1309 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1310 			   iv_dma, ivsize, op_type, 0, 0);
1311 		qi_cache_free(edesc);
1312 		return ERR_PTR(-ENOMEM);
1313 	}
1314 
1315 	fd_sgt = &edesc->drv_req.fd_sgt[0];
1316 
1317 	dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1318 				  ivsize + req->nbytes, 0);
1319 
1320 	if (req->src == req->dst) {
1321 		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1322 				     sizeof(*sg_table), req->nbytes, 0);
1323 	} else if (mapped_dst_nents > 1) {
1324 		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1325 				     sizeof(*sg_table), req->nbytes, 0);
1326 	} else {
1327 		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1328 				 req->nbytes, 0);
1329 	}
1330 
1331 	return edesc;
1332 }
1333 
1334 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1335 	struct skcipher_givcrypt_request *creq)
1336 {
1337 	struct ablkcipher_request *req = &creq->creq;
1338 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1339 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1340 	struct device *qidev = ctx->qidev;
1341 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1342 		       GFP_KERNEL : GFP_ATOMIC;
1343 	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1344 	struct ablkcipher_edesc *edesc;
1345 	dma_addr_t iv_dma;
1346 	u8 *iv;
1347 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1348 	struct qm_sg_entry *sg_table, *fd_sgt;
1349 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1350 	struct caam_drv_ctx *drv_ctx;
1351 
1352 	drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1353 	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1354 		return (struct ablkcipher_edesc *)drv_ctx;
1355 
1356 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1357 	if (unlikely(src_nents < 0)) {
1358 		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1359 			req->nbytes);
1360 		return ERR_PTR(src_nents);
1361 	}
1362 
1363 	if (unlikely(req->src != req->dst)) {
1364 		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1365 		if (unlikely(dst_nents < 0)) {
1366 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1367 				req->nbytes);
1368 			return ERR_PTR(dst_nents);
1369 		}
1370 
1371 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1372 					      DMA_TO_DEVICE);
1373 		if (unlikely(!mapped_src_nents)) {
1374 			dev_err(qidev, "unable to map source\n");
1375 			return ERR_PTR(-ENOMEM);
1376 		}
1377 
1378 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1379 					      DMA_FROM_DEVICE);
1380 		if (unlikely(!mapped_dst_nents)) {
1381 			dev_err(qidev, "unable to map destination\n");
1382 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1383 			return ERR_PTR(-ENOMEM);
1384 		}
1385 	} else {
1386 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1387 					      DMA_BIDIRECTIONAL);
1388 		if (unlikely(!mapped_src_nents)) {
1389 			dev_err(qidev, "unable to map source\n");
1390 			return ERR_PTR(-ENOMEM);
1391 		}
1392 
1393 		dst_nents = src_nents;
1394 		mapped_dst_nents = src_nents;
1395 	}
1396 
1397 	qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1398 	dst_sg_idx = qm_sg_ents;
1399 
1400 	qm_sg_ents += 1 + mapped_dst_nents;
1401 	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1402 	if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1403 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1404 		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1405 			qm_sg_ents, ivsize);
1406 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1407 			   0, 0, 0, 0);
1408 		return ERR_PTR(-ENOMEM);
1409 	}
1410 
1411 	/* allocate space for base edesc, link tables and IV */
1412 	edesc = qi_cache_alloc(GFP_DMA | flags);
1413 	if (!edesc) {
1414 		dev_err(qidev, "could not allocate extended descriptor\n");
1415 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1416 			   0, 0, 0, 0);
1417 		return ERR_PTR(-ENOMEM);
1418 	}
1419 
1420 	/* Make sure IV is located in a DMAable area */
1421 	sg_table = &edesc->sgt[0];
1422 	iv = (u8 *)(sg_table + qm_sg_ents);
1423 	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
1424 	if (dma_mapping_error(qidev, iv_dma)) {
1425 		dev_err(qidev, "unable to map IV\n");
1426 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1427 			   0, 0, 0, 0);
1428 		qi_cache_free(edesc);
1429 		return ERR_PTR(-ENOMEM);
1430 	}
1431 
1432 	edesc->src_nents = src_nents;
1433 	edesc->dst_nents = dst_nents;
1434 	edesc->iv_dma = iv_dma;
1435 	edesc->qm_sg_bytes = qm_sg_bytes;
1436 	edesc->drv_req.app_ctx = req;
1437 	edesc->drv_req.cbk = ablkcipher_done;
1438 	edesc->drv_req.drv_ctx = drv_ctx;
1439 
1440 	if (mapped_src_nents > 1)
1441 		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1442 
1443 	dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1444 	sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
1445 			 0);
1446 
1447 	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1448 					  DMA_TO_DEVICE);
1449 	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1450 		dev_err(qidev, "unable to map S/G table\n");
1451 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1452 			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
1453 		qi_cache_free(edesc);
1454 		return ERR_PTR(-ENOMEM);
1455 	}
1456 
1457 	fd_sgt = &edesc->drv_req.fd_sgt[0];
1458 
1459 	if (mapped_src_nents > 1)
1460 		dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1461 				     0);
1462 	else
1463 		dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1464 				 req->nbytes, 0);
1465 
1466 	dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1467 			     sizeof(*sg_table), ivsize + req->nbytes, 0);
1468 
1469 	return edesc;
1470 }
1471 
1472 static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1473 {
1474 	struct ablkcipher_edesc *edesc;
1475 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1476 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1477 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1478 	int ret;
1479 
1480 	if (unlikely(caam_congested))
1481 		return -EAGAIN;
1482 
1483 	/* allocate extended descriptor */
1484 	edesc = ablkcipher_edesc_alloc(req, encrypt);
1485 	if (IS_ERR(edesc))
1486 		return PTR_ERR(edesc);
1487 
1488 	/*
1489 	 * The crypto API expects us to set the IV (req->info) to the last
1490 	 * ciphertext block.
1491 	 */
1492 	if (!encrypt)
1493 		scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1494 					 ivsize, ivsize, 0);
1495 
1496 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1497 	if (!ret) {
1498 		ret = -EINPROGRESS;
1499 	} else {
1500 		ablkcipher_unmap(ctx->qidev, edesc, req);
1501 		qi_cache_free(edesc);
1502 	}
1503 
1504 	return ret;
1505 }
1506 
1507 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1508 {
1509 	return ablkcipher_crypt(req, true);
1510 }
1511 
1512 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1513 {
1514 	return ablkcipher_crypt(req, false);
1515 }
1516 
1517 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1518 {
1519 	struct ablkcipher_request *req = &creq->creq;
1520 	struct ablkcipher_edesc *edesc;
1521 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1522 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1523 	int ret;
1524 
1525 	if (unlikely(caam_congested))
1526 		return -EAGAIN;
1527 
1528 	/* allocate extended descriptor */
1529 	edesc = ablkcipher_giv_edesc_alloc(creq);
1530 	if (IS_ERR(edesc))
1531 		return PTR_ERR(edesc);
1532 
1533 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1534 	if (!ret) {
1535 		ret = -EINPROGRESS;
1536 	} else {
1537 		ablkcipher_unmap(ctx->qidev, edesc, req);
1538 		qi_cache_free(edesc);
1539 	}
1540 
1541 	return ret;
1542 }
1543 
1544 #define template_ablkcipher	template_u.ablkcipher
1545 struct caam_alg_template {
1546 	char name[CRYPTO_MAX_ALG_NAME];
1547 	char driver_name[CRYPTO_MAX_ALG_NAME];
1548 	unsigned int blocksize;
1549 	u32 type;
1550 	union {
1551 		struct ablkcipher_alg ablkcipher;
1552 	} template_u;
1553 	u32 class1_alg_type;
1554 	u32 class2_alg_type;
1555 };
1556 
1557 static struct caam_alg_template driver_algs[] = {
1558 	/* ablkcipher descriptor */
1559 	{
1560 		.name = "cbc(aes)",
1561 		.driver_name = "cbc-aes-caam-qi",
1562 		.blocksize = AES_BLOCK_SIZE,
1563 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1564 		.template_ablkcipher = {
1565 			.setkey = ablkcipher_setkey,
1566 			.encrypt = ablkcipher_encrypt,
1567 			.decrypt = ablkcipher_decrypt,
1568 			.givencrypt = ablkcipher_givencrypt,
1569 			.geniv = "<built-in>",
1570 			.min_keysize = AES_MIN_KEY_SIZE,
1571 			.max_keysize = AES_MAX_KEY_SIZE,
1572 			.ivsize = AES_BLOCK_SIZE,
1573 		},
1574 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1575 	},
1576 	{
1577 		.name = "cbc(des3_ede)",
1578 		.driver_name = "cbc-3des-caam-qi",
1579 		.blocksize = DES3_EDE_BLOCK_SIZE,
1580 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1581 		.template_ablkcipher = {
1582 			.setkey = ablkcipher_setkey,
1583 			.encrypt = ablkcipher_encrypt,
1584 			.decrypt = ablkcipher_decrypt,
1585 			.givencrypt = ablkcipher_givencrypt,
1586 			.geniv = "<built-in>",
1587 			.min_keysize = DES3_EDE_KEY_SIZE,
1588 			.max_keysize = DES3_EDE_KEY_SIZE,
1589 			.ivsize = DES3_EDE_BLOCK_SIZE,
1590 		},
1591 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1592 	},
1593 	{
1594 		.name = "cbc(des)",
1595 		.driver_name = "cbc-des-caam-qi",
1596 		.blocksize = DES_BLOCK_SIZE,
1597 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1598 		.template_ablkcipher = {
1599 			.setkey = ablkcipher_setkey,
1600 			.encrypt = ablkcipher_encrypt,
1601 			.decrypt = ablkcipher_decrypt,
1602 			.givencrypt = ablkcipher_givencrypt,
1603 			.geniv = "<built-in>",
1604 			.min_keysize = DES_KEY_SIZE,
1605 			.max_keysize = DES_KEY_SIZE,
1606 			.ivsize = DES_BLOCK_SIZE,
1607 		},
1608 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1609 	},
1610 	{
1611 		.name = "ctr(aes)",
1612 		.driver_name = "ctr-aes-caam-qi",
1613 		.blocksize = 1,
1614 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1615 		.template_ablkcipher = {
1616 			.setkey = ablkcipher_setkey,
1617 			.encrypt = ablkcipher_encrypt,
1618 			.decrypt = ablkcipher_decrypt,
1619 			.geniv = "chainiv",
1620 			.min_keysize = AES_MIN_KEY_SIZE,
1621 			.max_keysize = AES_MAX_KEY_SIZE,
1622 			.ivsize = AES_BLOCK_SIZE,
1623 		},
1624 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1625 	},
1626 	{
1627 		.name = "rfc3686(ctr(aes))",
1628 		.driver_name = "rfc3686-ctr-aes-caam-qi",
1629 		.blocksize = 1,
1630 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1631 		.template_ablkcipher = {
1632 			.setkey = ablkcipher_setkey,
1633 			.encrypt = ablkcipher_encrypt,
1634 			.decrypt = ablkcipher_decrypt,
1635 			.givencrypt = ablkcipher_givencrypt,
1636 			.geniv = "<built-in>",
1637 			.min_keysize = AES_MIN_KEY_SIZE +
1638 				       CTR_RFC3686_NONCE_SIZE,
1639 			.max_keysize = AES_MAX_KEY_SIZE +
1640 				       CTR_RFC3686_NONCE_SIZE,
1641 			.ivsize = CTR_RFC3686_IV_SIZE,
1642 		},
1643 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1644 	},
1645 	{
1646 		.name = "xts(aes)",
1647 		.driver_name = "xts-aes-caam-qi",
1648 		.blocksize = AES_BLOCK_SIZE,
1649 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1650 		.template_ablkcipher = {
1651 			.setkey = xts_ablkcipher_setkey,
1652 			.encrypt = ablkcipher_encrypt,
1653 			.decrypt = ablkcipher_decrypt,
1654 			.geniv = "eseqiv",
1655 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1656 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1657 			.ivsize = AES_BLOCK_SIZE,
1658 		},
1659 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1660 	},
1661 };
1662 
1663 static struct caam_aead_alg driver_aeads[] = {
1664 	{
1665 		.aead = {
1666 			.base = {
1667 				.cra_name = "rfc4106(gcm(aes))",
1668 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi",
1669 				.cra_blocksize = 1,
1670 			},
1671 			.setkey = rfc4106_setkey,
1672 			.setauthsize = rfc4106_setauthsize,
1673 			.encrypt = ipsec_gcm_encrypt,
1674 			.decrypt = ipsec_gcm_decrypt,
1675 			.ivsize = 8,
1676 			.maxauthsize = AES_BLOCK_SIZE,
1677 		},
1678 		.caam = {
1679 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1680 		},
1681 	},
1682 	{
1683 		.aead = {
1684 			.base = {
1685 				.cra_name = "rfc4543(gcm(aes))",
1686 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi",
1687 				.cra_blocksize = 1,
1688 			},
1689 			.setkey = rfc4543_setkey,
1690 			.setauthsize = rfc4543_setauthsize,
1691 			.encrypt = ipsec_gcm_encrypt,
1692 			.decrypt = ipsec_gcm_decrypt,
1693 			.ivsize = 8,
1694 			.maxauthsize = AES_BLOCK_SIZE,
1695 		},
1696 		.caam = {
1697 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1698 		},
1699 	},
1700 	/* Galois Counter Mode */
1701 	{
1702 		.aead = {
1703 			.base = {
1704 				.cra_name = "gcm(aes)",
1705 				.cra_driver_name = "gcm-aes-caam-qi",
1706 				.cra_blocksize = 1,
1707 			},
1708 			.setkey = gcm_setkey,
1709 			.setauthsize = gcm_setauthsize,
1710 			.encrypt = aead_encrypt,
1711 			.decrypt = aead_decrypt,
1712 			.ivsize = 12,
1713 			.maxauthsize = AES_BLOCK_SIZE,
1714 		},
1715 		.caam = {
1716 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1717 		}
1718 	},
1719 	/* single-pass ipsec_esp descriptor */
1720 	{
1721 		.aead = {
1722 			.base = {
1723 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1724 				.cra_driver_name = "authenc-hmac-md5-"
1725 						   "cbc-aes-caam-qi",
1726 				.cra_blocksize = AES_BLOCK_SIZE,
1727 			},
1728 			.setkey = aead_setkey,
1729 			.setauthsize = aead_setauthsize,
1730 			.encrypt = aead_encrypt,
1731 			.decrypt = aead_decrypt,
1732 			.ivsize = AES_BLOCK_SIZE,
1733 			.maxauthsize = MD5_DIGEST_SIZE,
1734 		},
1735 		.caam = {
1736 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1737 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1738 					   OP_ALG_AAI_HMAC_PRECOMP,
1739 		}
1740 	},
1741 	{
1742 		.aead = {
1743 			.base = {
1744 				.cra_name = "echainiv(authenc(hmac(md5),"
1745 					    "cbc(aes)))",
1746 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1747 						   "cbc-aes-caam-qi",
1748 				.cra_blocksize = AES_BLOCK_SIZE,
1749 			},
1750 			.setkey = aead_setkey,
1751 			.setauthsize = aead_setauthsize,
1752 			.encrypt = aead_encrypt,
1753 			.decrypt = aead_decrypt,
1754 			.ivsize = AES_BLOCK_SIZE,
1755 			.maxauthsize = MD5_DIGEST_SIZE,
1756 		},
1757 		.caam = {
1758 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1759 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1760 					   OP_ALG_AAI_HMAC_PRECOMP,
1761 			.geniv = true,
1762 		}
1763 	},
1764 	{
1765 		.aead = {
1766 			.base = {
1767 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1768 				.cra_driver_name = "authenc-hmac-sha1-"
1769 						   "cbc-aes-caam-qi",
1770 				.cra_blocksize = AES_BLOCK_SIZE,
1771 			},
1772 			.setkey = aead_setkey,
1773 			.setauthsize = aead_setauthsize,
1774 			.encrypt = aead_encrypt,
1775 			.decrypt = aead_decrypt,
1776 			.ivsize = AES_BLOCK_SIZE,
1777 			.maxauthsize = SHA1_DIGEST_SIZE,
1778 		},
1779 		.caam = {
1780 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1781 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1782 					   OP_ALG_AAI_HMAC_PRECOMP,
1783 		}
1784 	},
1785 	{
1786 		.aead = {
1787 			.base = {
1788 				.cra_name = "echainiv(authenc(hmac(sha1),"
1789 					    "cbc(aes)))",
1790 				.cra_driver_name = "echainiv-authenc-"
1791 						   "hmac-sha1-cbc-aes-caam-qi",
1792 				.cra_blocksize = AES_BLOCK_SIZE,
1793 			},
1794 			.setkey = aead_setkey,
1795 			.setauthsize = aead_setauthsize,
1796 			.encrypt = aead_encrypt,
1797 			.decrypt = aead_decrypt,
1798 			.ivsize = AES_BLOCK_SIZE,
1799 			.maxauthsize = SHA1_DIGEST_SIZE,
1800 		},
1801 		.caam = {
1802 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1803 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1804 					   OP_ALG_AAI_HMAC_PRECOMP,
1805 			.geniv = true,
1806 		},
1807 	},
1808 	{
1809 		.aead = {
1810 			.base = {
1811 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1812 				.cra_driver_name = "authenc-hmac-sha224-"
1813 						   "cbc-aes-caam-qi",
1814 				.cra_blocksize = AES_BLOCK_SIZE,
1815 			},
1816 			.setkey = aead_setkey,
1817 			.setauthsize = aead_setauthsize,
1818 			.encrypt = aead_encrypt,
1819 			.decrypt = aead_decrypt,
1820 			.ivsize = AES_BLOCK_SIZE,
1821 			.maxauthsize = SHA224_DIGEST_SIZE,
1822 		},
1823 		.caam = {
1824 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1825 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1826 					   OP_ALG_AAI_HMAC_PRECOMP,
1827 		}
1828 	},
1829 	{
1830 		.aead = {
1831 			.base = {
1832 				.cra_name = "echainiv(authenc(hmac(sha224),"
1833 					    "cbc(aes)))",
1834 				.cra_driver_name = "echainiv-authenc-"
1835 						   "hmac-sha224-cbc-aes-caam-qi",
1836 				.cra_blocksize = AES_BLOCK_SIZE,
1837 			},
1838 			.setkey = aead_setkey,
1839 			.setauthsize = aead_setauthsize,
1840 			.encrypt = aead_encrypt,
1841 			.decrypt = aead_decrypt,
1842 			.ivsize = AES_BLOCK_SIZE,
1843 			.maxauthsize = SHA224_DIGEST_SIZE,
1844 		},
1845 		.caam = {
1846 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1847 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1848 					   OP_ALG_AAI_HMAC_PRECOMP,
1849 			.geniv = true,
1850 		}
1851 	},
1852 	{
1853 		.aead = {
1854 			.base = {
1855 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1856 				.cra_driver_name = "authenc-hmac-sha256-"
1857 						   "cbc-aes-caam-qi",
1858 				.cra_blocksize = AES_BLOCK_SIZE,
1859 			},
1860 			.setkey = aead_setkey,
1861 			.setauthsize = aead_setauthsize,
1862 			.encrypt = aead_encrypt,
1863 			.decrypt = aead_decrypt,
1864 			.ivsize = AES_BLOCK_SIZE,
1865 			.maxauthsize = SHA256_DIGEST_SIZE,
1866 		},
1867 		.caam = {
1868 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1869 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1870 					   OP_ALG_AAI_HMAC_PRECOMP,
1871 		}
1872 	},
1873 	{
1874 		.aead = {
1875 			.base = {
1876 				.cra_name = "echainiv(authenc(hmac(sha256),"
1877 					    "cbc(aes)))",
1878 				.cra_driver_name = "echainiv-authenc-"
1879 						   "hmac-sha256-cbc-aes-"
1880 						   "caam-qi",
1881 				.cra_blocksize = AES_BLOCK_SIZE,
1882 			},
1883 			.setkey = aead_setkey,
1884 			.setauthsize = aead_setauthsize,
1885 			.encrypt = aead_encrypt,
1886 			.decrypt = aead_decrypt,
1887 			.ivsize = AES_BLOCK_SIZE,
1888 			.maxauthsize = SHA256_DIGEST_SIZE,
1889 		},
1890 		.caam = {
1891 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1892 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1893 					   OP_ALG_AAI_HMAC_PRECOMP,
1894 			.geniv = true,
1895 		}
1896 	},
1897 	{
1898 		.aead = {
1899 			.base = {
1900 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
1901 				.cra_driver_name = "authenc-hmac-sha384-"
1902 						   "cbc-aes-caam-qi",
1903 				.cra_blocksize = AES_BLOCK_SIZE,
1904 			},
1905 			.setkey = aead_setkey,
1906 			.setauthsize = aead_setauthsize,
1907 			.encrypt = aead_encrypt,
1908 			.decrypt = aead_decrypt,
1909 			.ivsize = AES_BLOCK_SIZE,
1910 			.maxauthsize = SHA384_DIGEST_SIZE,
1911 		},
1912 		.caam = {
1913 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1914 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1915 					   OP_ALG_AAI_HMAC_PRECOMP,
1916 		}
1917 	},
1918 	{
1919 		.aead = {
1920 			.base = {
1921 				.cra_name = "echainiv(authenc(hmac(sha384),"
1922 					    "cbc(aes)))",
1923 				.cra_driver_name = "echainiv-authenc-"
1924 						   "hmac-sha384-cbc-aes-"
1925 						   "caam-qi",
1926 				.cra_blocksize = AES_BLOCK_SIZE,
1927 			},
1928 			.setkey = aead_setkey,
1929 			.setauthsize = aead_setauthsize,
1930 			.encrypt = aead_encrypt,
1931 			.decrypt = aead_decrypt,
1932 			.ivsize = AES_BLOCK_SIZE,
1933 			.maxauthsize = SHA384_DIGEST_SIZE,
1934 		},
1935 		.caam = {
1936 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1937 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1938 					   OP_ALG_AAI_HMAC_PRECOMP,
1939 			.geniv = true,
1940 		}
1941 	},
1942 	{
1943 		.aead = {
1944 			.base = {
1945 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
1946 				.cra_driver_name = "authenc-hmac-sha512-"
1947 						   "cbc-aes-caam-qi",
1948 				.cra_blocksize = AES_BLOCK_SIZE,
1949 			},
1950 			.setkey = aead_setkey,
1951 			.setauthsize = aead_setauthsize,
1952 			.encrypt = aead_encrypt,
1953 			.decrypt = aead_decrypt,
1954 			.ivsize = AES_BLOCK_SIZE,
1955 			.maxauthsize = SHA512_DIGEST_SIZE,
1956 		},
1957 		.caam = {
1958 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1959 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1960 					   OP_ALG_AAI_HMAC_PRECOMP,
1961 		}
1962 	},
1963 	{
1964 		.aead = {
1965 			.base = {
1966 				.cra_name = "echainiv(authenc(hmac(sha512),"
1967 					    "cbc(aes)))",
1968 				.cra_driver_name = "echainiv-authenc-"
1969 						   "hmac-sha512-cbc-aes-"
1970 						   "caam-qi",
1971 				.cra_blocksize = AES_BLOCK_SIZE,
1972 			},
1973 			.setkey = aead_setkey,
1974 			.setauthsize = aead_setauthsize,
1975 			.encrypt = aead_encrypt,
1976 			.decrypt = aead_decrypt,
1977 			.ivsize = AES_BLOCK_SIZE,
1978 			.maxauthsize = SHA512_DIGEST_SIZE,
1979 		},
1980 		.caam = {
1981 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1982 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1983 					   OP_ALG_AAI_HMAC_PRECOMP,
1984 			.geniv = true,
1985 		}
1986 	},
1987 	{
1988 		.aead = {
1989 			.base = {
1990 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1991 				.cra_driver_name = "authenc-hmac-md5-"
1992 						   "cbc-des3_ede-caam-qi",
1993 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1994 			},
1995 			.setkey = aead_setkey,
1996 			.setauthsize = aead_setauthsize,
1997 			.encrypt = aead_encrypt,
1998 			.decrypt = aead_decrypt,
1999 			.ivsize = DES3_EDE_BLOCK_SIZE,
2000 			.maxauthsize = MD5_DIGEST_SIZE,
2001 		},
2002 		.caam = {
2003 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2004 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2005 					   OP_ALG_AAI_HMAC_PRECOMP,
2006 		}
2007 	},
2008 	{
2009 		.aead = {
2010 			.base = {
2011 				.cra_name = "echainiv(authenc(hmac(md5),"
2012 					    "cbc(des3_ede)))",
2013 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2014 						   "cbc-des3_ede-caam-qi",
2015 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2016 			},
2017 			.setkey = aead_setkey,
2018 			.setauthsize = aead_setauthsize,
2019 			.encrypt = aead_encrypt,
2020 			.decrypt = aead_decrypt,
2021 			.ivsize = DES3_EDE_BLOCK_SIZE,
2022 			.maxauthsize = MD5_DIGEST_SIZE,
2023 		},
2024 		.caam = {
2025 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2026 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2027 					   OP_ALG_AAI_HMAC_PRECOMP,
2028 			.geniv = true,
2029 		}
2030 	},
2031 	{
2032 		.aead = {
2033 			.base = {
2034 				.cra_name = "authenc(hmac(sha1),"
2035 					    "cbc(des3_ede))",
2036 				.cra_driver_name = "authenc-hmac-sha1-"
2037 						   "cbc-des3_ede-caam-qi",
2038 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2039 			},
2040 			.setkey = aead_setkey,
2041 			.setauthsize = aead_setauthsize,
2042 			.encrypt = aead_encrypt,
2043 			.decrypt = aead_decrypt,
2044 			.ivsize = DES3_EDE_BLOCK_SIZE,
2045 			.maxauthsize = SHA1_DIGEST_SIZE,
2046 		},
2047 		.caam = {
2048 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2049 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2050 					   OP_ALG_AAI_HMAC_PRECOMP,
2051 		},
2052 	},
2053 	{
2054 		.aead = {
2055 			.base = {
2056 				.cra_name = "echainiv(authenc(hmac(sha1),"
2057 					    "cbc(des3_ede)))",
2058 				.cra_driver_name = "echainiv-authenc-"
2059 						   "hmac-sha1-"
2060 						   "cbc-des3_ede-caam-qi",
2061 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2062 			},
2063 			.setkey = aead_setkey,
2064 			.setauthsize = aead_setauthsize,
2065 			.encrypt = aead_encrypt,
2066 			.decrypt = aead_decrypt,
2067 			.ivsize = DES3_EDE_BLOCK_SIZE,
2068 			.maxauthsize = SHA1_DIGEST_SIZE,
2069 		},
2070 		.caam = {
2071 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2072 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2073 					   OP_ALG_AAI_HMAC_PRECOMP,
2074 			.geniv = true,
2075 		}
2076 	},
2077 	{
2078 		.aead = {
2079 			.base = {
2080 				.cra_name = "authenc(hmac(sha224),"
2081 					    "cbc(des3_ede))",
2082 				.cra_driver_name = "authenc-hmac-sha224-"
2083 						   "cbc-des3_ede-caam-qi",
2084 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2085 			},
2086 			.setkey = aead_setkey,
2087 			.setauthsize = aead_setauthsize,
2088 			.encrypt = aead_encrypt,
2089 			.decrypt = aead_decrypt,
2090 			.ivsize = DES3_EDE_BLOCK_SIZE,
2091 			.maxauthsize = SHA224_DIGEST_SIZE,
2092 		},
2093 		.caam = {
2094 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2095 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2096 					   OP_ALG_AAI_HMAC_PRECOMP,
2097 		},
2098 	},
2099 	{
2100 		.aead = {
2101 			.base = {
2102 				.cra_name = "echainiv(authenc(hmac(sha224),"
2103 					    "cbc(des3_ede)))",
2104 				.cra_driver_name = "echainiv-authenc-"
2105 						   "hmac-sha224-"
2106 						   "cbc-des3_ede-caam-qi",
2107 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2108 			},
2109 			.setkey = aead_setkey,
2110 			.setauthsize = aead_setauthsize,
2111 			.encrypt = aead_encrypt,
2112 			.decrypt = aead_decrypt,
2113 			.ivsize = DES3_EDE_BLOCK_SIZE,
2114 			.maxauthsize = SHA224_DIGEST_SIZE,
2115 		},
2116 		.caam = {
2117 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2118 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2119 					   OP_ALG_AAI_HMAC_PRECOMP,
2120 			.geniv = true,
2121 		}
2122 	},
2123 	{
2124 		.aead = {
2125 			.base = {
2126 				.cra_name = "authenc(hmac(sha256),"
2127 					    "cbc(des3_ede))",
2128 				.cra_driver_name = "authenc-hmac-sha256-"
2129 						   "cbc-des3_ede-caam-qi",
2130 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2131 			},
2132 			.setkey = aead_setkey,
2133 			.setauthsize = aead_setauthsize,
2134 			.encrypt = aead_encrypt,
2135 			.decrypt = aead_decrypt,
2136 			.ivsize = DES3_EDE_BLOCK_SIZE,
2137 			.maxauthsize = SHA256_DIGEST_SIZE,
2138 		},
2139 		.caam = {
2140 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2141 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2142 					   OP_ALG_AAI_HMAC_PRECOMP,
2143 		},
2144 	},
2145 	{
2146 		.aead = {
2147 			.base = {
2148 				.cra_name = "echainiv(authenc(hmac(sha256),"
2149 					    "cbc(des3_ede)))",
2150 				.cra_driver_name = "echainiv-authenc-"
2151 						   "hmac-sha256-"
2152 						   "cbc-des3_ede-caam-qi",
2153 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2154 			},
2155 			.setkey = aead_setkey,
2156 			.setauthsize = aead_setauthsize,
2157 			.encrypt = aead_encrypt,
2158 			.decrypt = aead_decrypt,
2159 			.ivsize = DES3_EDE_BLOCK_SIZE,
2160 			.maxauthsize = SHA256_DIGEST_SIZE,
2161 		},
2162 		.caam = {
2163 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2164 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2165 					   OP_ALG_AAI_HMAC_PRECOMP,
2166 			.geniv = true,
2167 		}
2168 	},
2169 	{
2170 		.aead = {
2171 			.base = {
2172 				.cra_name = "authenc(hmac(sha384),"
2173 					    "cbc(des3_ede))",
2174 				.cra_driver_name = "authenc-hmac-sha384-"
2175 						   "cbc-des3_ede-caam-qi",
2176 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2177 			},
2178 			.setkey = aead_setkey,
2179 			.setauthsize = aead_setauthsize,
2180 			.encrypt = aead_encrypt,
2181 			.decrypt = aead_decrypt,
2182 			.ivsize = DES3_EDE_BLOCK_SIZE,
2183 			.maxauthsize = SHA384_DIGEST_SIZE,
2184 		},
2185 		.caam = {
2186 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2187 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2188 					   OP_ALG_AAI_HMAC_PRECOMP,
2189 		},
2190 	},
2191 	{
2192 		.aead = {
2193 			.base = {
2194 				.cra_name = "echainiv(authenc(hmac(sha384),"
2195 					    "cbc(des3_ede)))",
2196 				.cra_driver_name = "echainiv-authenc-"
2197 						   "hmac-sha384-"
2198 						   "cbc-des3_ede-caam-qi",
2199 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2200 			},
2201 			.setkey = aead_setkey,
2202 			.setauthsize = aead_setauthsize,
2203 			.encrypt = aead_encrypt,
2204 			.decrypt = aead_decrypt,
2205 			.ivsize = DES3_EDE_BLOCK_SIZE,
2206 			.maxauthsize = SHA384_DIGEST_SIZE,
2207 		},
2208 		.caam = {
2209 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2210 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2211 					   OP_ALG_AAI_HMAC_PRECOMP,
2212 			.geniv = true,
2213 		}
2214 	},
2215 	{
2216 		.aead = {
2217 			.base = {
2218 				.cra_name = "authenc(hmac(sha512),"
2219 					    "cbc(des3_ede))",
2220 				.cra_driver_name = "authenc-hmac-sha512-"
2221 						   "cbc-des3_ede-caam-qi",
2222 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2223 			},
2224 			.setkey = aead_setkey,
2225 			.setauthsize = aead_setauthsize,
2226 			.encrypt = aead_encrypt,
2227 			.decrypt = aead_decrypt,
2228 			.ivsize = DES3_EDE_BLOCK_SIZE,
2229 			.maxauthsize = SHA512_DIGEST_SIZE,
2230 		},
2231 		.caam = {
2232 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2233 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2234 					   OP_ALG_AAI_HMAC_PRECOMP,
2235 		},
2236 	},
2237 	{
2238 		.aead = {
2239 			.base = {
2240 				.cra_name = "echainiv(authenc(hmac(sha512),"
2241 					    "cbc(des3_ede)))",
2242 				.cra_driver_name = "echainiv-authenc-"
2243 						   "hmac-sha512-"
2244 						   "cbc-des3_ede-caam-qi",
2245 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2246 			},
2247 			.setkey = aead_setkey,
2248 			.setauthsize = aead_setauthsize,
2249 			.encrypt = aead_encrypt,
2250 			.decrypt = aead_decrypt,
2251 			.ivsize = DES3_EDE_BLOCK_SIZE,
2252 			.maxauthsize = SHA512_DIGEST_SIZE,
2253 		},
2254 		.caam = {
2255 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2256 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2257 					   OP_ALG_AAI_HMAC_PRECOMP,
2258 			.geniv = true,
2259 		}
2260 	},
2261 	{
2262 		.aead = {
2263 			.base = {
2264 				.cra_name = "authenc(hmac(md5),cbc(des))",
2265 				.cra_driver_name = "authenc-hmac-md5-"
2266 						   "cbc-des-caam-qi",
2267 				.cra_blocksize = DES_BLOCK_SIZE,
2268 			},
2269 			.setkey = aead_setkey,
2270 			.setauthsize = aead_setauthsize,
2271 			.encrypt = aead_encrypt,
2272 			.decrypt = aead_decrypt,
2273 			.ivsize = DES_BLOCK_SIZE,
2274 			.maxauthsize = MD5_DIGEST_SIZE,
2275 		},
2276 		.caam = {
2277 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2278 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2279 					   OP_ALG_AAI_HMAC_PRECOMP,
2280 		},
2281 	},
2282 	{
2283 		.aead = {
2284 			.base = {
2285 				.cra_name = "echainiv(authenc(hmac(md5),"
2286 					    "cbc(des)))",
2287 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2288 						   "cbc-des-caam-qi",
2289 				.cra_blocksize = DES_BLOCK_SIZE,
2290 			},
2291 			.setkey = aead_setkey,
2292 			.setauthsize = aead_setauthsize,
2293 			.encrypt = aead_encrypt,
2294 			.decrypt = aead_decrypt,
2295 			.ivsize = DES_BLOCK_SIZE,
2296 			.maxauthsize = MD5_DIGEST_SIZE,
2297 		},
2298 		.caam = {
2299 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2300 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2301 					   OP_ALG_AAI_HMAC_PRECOMP,
2302 			.geniv = true,
2303 		}
2304 	},
2305 	{
2306 		.aead = {
2307 			.base = {
2308 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2309 				.cra_driver_name = "authenc-hmac-sha1-"
2310 						   "cbc-des-caam-qi",
2311 				.cra_blocksize = DES_BLOCK_SIZE,
2312 			},
2313 			.setkey = aead_setkey,
2314 			.setauthsize = aead_setauthsize,
2315 			.encrypt = aead_encrypt,
2316 			.decrypt = aead_decrypt,
2317 			.ivsize = DES_BLOCK_SIZE,
2318 			.maxauthsize = SHA1_DIGEST_SIZE,
2319 		},
2320 		.caam = {
2321 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2322 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2323 					   OP_ALG_AAI_HMAC_PRECOMP,
2324 		},
2325 	},
2326 	{
2327 		.aead = {
2328 			.base = {
2329 				.cra_name = "echainiv(authenc(hmac(sha1),"
2330 					    "cbc(des)))",
2331 				.cra_driver_name = "echainiv-authenc-"
2332 						   "hmac-sha1-cbc-des-caam-qi",
2333 				.cra_blocksize = DES_BLOCK_SIZE,
2334 			},
2335 			.setkey = aead_setkey,
2336 			.setauthsize = aead_setauthsize,
2337 			.encrypt = aead_encrypt,
2338 			.decrypt = aead_decrypt,
2339 			.ivsize = DES_BLOCK_SIZE,
2340 			.maxauthsize = SHA1_DIGEST_SIZE,
2341 		},
2342 		.caam = {
2343 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2344 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2345 					   OP_ALG_AAI_HMAC_PRECOMP,
2346 			.geniv = true,
2347 		}
2348 	},
2349 	{
2350 		.aead = {
2351 			.base = {
2352 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2353 				.cra_driver_name = "authenc-hmac-sha224-"
2354 						   "cbc-des-caam-qi",
2355 				.cra_blocksize = DES_BLOCK_SIZE,
2356 			},
2357 			.setkey = aead_setkey,
2358 			.setauthsize = aead_setauthsize,
2359 			.encrypt = aead_encrypt,
2360 			.decrypt = aead_decrypt,
2361 			.ivsize = DES_BLOCK_SIZE,
2362 			.maxauthsize = SHA224_DIGEST_SIZE,
2363 		},
2364 		.caam = {
2365 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2366 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2367 					   OP_ALG_AAI_HMAC_PRECOMP,
2368 		},
2369 	},
2370 	{
2371 		.aead = {
2372 			.base = {
2373 				.cra_name = "echainiv(authenc(hmac(sha224),"
2374 					    "cbc(des)))",
2375 				.cra_driver_name = "echainiv-authenc-"
2376 						   "hmac-sha224-cbc-des-"
2377 						   "caam-qi",
2378 				.cra_blocksize = DES_BLOCK_SIZE,
2379 			},
2380 			.setkey = aead_setkey,
2381 			.setauthsize = aead_setauthsize,
2382 			.encrypt = aead_encrypt,
2383 			.decrypt = aead_decrypt,
2384 			.ivsize = DES_BLOCK_SIZE,
2385 			.maxauthsize = SHA224_DIGEST_SIZE,
2386 		},
2387 		.caam = {
2388 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2389 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2390 					   OP_ALG_AAI_HMAC_PRECOMP,
2391 			.geniv = true,
2392 		}
2393 	},
2394 	{
2395 		.aead = {
2396 			.base = {
2397 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2398 				.cra_driver_name = "authenc-hmac-sha256-"
2399 						   "cbc-des-caam-qi",
2400 				.cra_blocksize = DES_BLOCK_SIZE,
2401 			},
2402 			.setkey = aead_setkey,
2403 			.setauthsize = aead_setauthsize,
2404 			.encrypt = aead_encrypt,
2405 			.decrypt = aead_decrypt,
2406 			.ivsize = DES_BLOCK_SIZE,
2407 			.maxauthsize = SHA256_DIGEST_SIZE,
2408 		},
2409 		.caam = {
2410 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2411 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2412 					   OP_ALG_AAI_HMAC_PRECOMP,
2413 		},
2414 	},
2415 	{
2416 		.aead = {
2417 			.base = {
2418 				.cra_name = "echainiv(authenc(hmac(sha256),"
2419 					    "cbc(des)))",
2420 				.cra_driver_name = "echainiv-authenc-"
2421 						   "hmac-sha256-cbc-des-"
2422 						   "caam-qi",
2423 				.cra_blocksize = DES_BLOCK_SIZE,
2424 			},
2425 			.setkey = aead_setkey,
2426 			.setauthsize = aead_setauthsize,
2427 			.encrypt = aead_encrypt,
2428 			.decrypt = aead_decrypt,
2429 			.ivsize = DES_BLOCK_SIZE,
2430 			.maxauthsize = SHA256_DIGEST_SIZE,
2431 		},
2432 		.caam = {
2433 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2434 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2435 					   OP_ALG_AAI_HMAC_PRECOMP,
2436 			.geniv = true,
2437 		},
2438 	},
2439 	{
2440 		.aead = {
2441 			.base = {
2442 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2443 				.cra_driver_name = "authenc-hmac-sha384-"
2444 						   "cbc-des-caam-qi",
2445 				.cra_blocksize = DES_BLOCK_SIZE,
2446 			},
2447 			.setkey = aead_setkey,
2448 			.setauthsize = aead_setauthsize,
2449 			.encrypt = aead_encrypt,
2450 			.decrypt = aead_decrypt,
2451 			.ivsize = DES_BLOCK_SIZE,
2452 			.maxauthsize = SHA384_DIGEST_SIZE,
2453 		},
2454 		.caam = {
2455 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2456 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2457 					   OP_ALG_AAI_HMAC_PRECOMP,
2458 		},
2459 	},
2460 	{
2461 		.aead = {
2462 			.base = {
2463 				.cra_name = "echainiv(authenc(hmac(sha384),"
2464 					    "cbc(des)))",
2465 				.cra_driver_name = "echainiv-authenc-"
2466 						   "hmac-sha384-cbc-des-"
2467 						   "caam-qi",
2468 				.cra_blocksize = DES_BLOCK_SIZE,
2469 			},
2470 			.setkey = aead_setkey,
2471 			.setauthsize = aead_setauthsize,
2472 			.encrypt = aead_encrypt,
2473 			.decrypt = aead_decrypt,
2474 			.ivsize = DES_BLOCK_SIZE,
2475 			.maxauthsize = SHA384_DIGEST_SIZE,
2476 		},
2477 		.caam = {
2478 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2479 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2480 					   OP_ALG_AAI_HMAC_PRECOMP,
2481 			.geniv = true,
2482 		}
2483 	},
2484 	{
2485 		.aead = {
2486 			.base = {
2487 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2488 				.cra_driver_name = "authenc-hmac-sha512-"
2489 						   "cbc-des-caam-qi",
2490 				.cra_blocksize = DES_BLOCK_SIZE,
2491 			},
2492 			.setkey = aead_setkey,
2493 			.setauthsize = aead_setauthsize,
2494 			.encrypt = aead_encrypt,
2495 			.decrypt = aead_decrypt,
2496 			.ivsize = DES_BLOCK_SIZE,
2497 			.maxauthsize = SHA512_DIGEST_SIZE,
2498 		},
2499 		.caam = {
2500 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2501 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2502 					   OP_ALG_AAI_HMAC_PRECOMP,
2503 		}
2504 	},
2505 	{
2506 		.aead = {
2507 			.base = {
2508 				.cra_name = "echainiv(authenc(hmac(sha512),"
2509 					    "cbc(des)))",
2510 				.cra_driver_name = "echainiv-authenc-"
2511 						   "hmac-sha512-cbc-des-"
2512 						   "caam-qi",
2513 				.cra_blocksize = DES_BLOCK_SIZE,
2514 			},
2515 			.setkey = aead_setkey,
2516 			.setauthsize = aead_setauthsize,
2517 			.encrypt = aead_encrypt,
2518 			.decrypt = aead_decrypt,
2519 			.ivsize = DES_BLOCK_SIZE,
2520 			.maxauthsize = SHA512_DIGEST_SIZE,
2521 		},
2522 		.caam = {
2523 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2524 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2525 					   OP_ALG_AAI_HMAC_PRECOMP,
2526 			.geniv = true,
2527 		}
2528 	},
2529 };
2530 
2531 struct caam_crypto_alg {
2532 	struct list_head entry;
2533 	struct crypto_alg crypto_alg;
2534 	struct caam_alg_entry caam;
2535 };
2536 
2537 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2538 			    bool uses_dkp)
2539 {
2540 	struct caam_drv_private *priv;
2541 
2542 	/*
2543 	 * distribute tfms across job rings to ensure in-order
2544 	 * crypto request processing per tfm
2545 	 */
2546 	ctx->jrdev = caam_jr_alloc();
2547 	if (IS_ERR(ctx->jrdev)) {
2548 		pr_err("Job Ring Device allocation for transform failed\n");
2549 		return PTR_ERR(ctx->jrdev);
2550 	}
2551 
2552 	priv = dev_get_drvdata(ctx->jrdev->parent);
2553 	if (priv->era >= 6 && uses_dkp)
2554 		ctx->dir = DMA_BIDIRECTIONAL;
2555 	else
2556 		ctx->dir = DMA_TO_DEVICE;
2557 
2558 	ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2559 				      ctx->dir);
2560 	if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2561 		dev_err(ctx->jrdev, "unable to map key\n");
2562 		caam_jr_free(ctx->jrdev);
2563 		return -ENOMEM;
2564 	}
2565 
2566 	/* copy descriptor header template value */
2567 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2568 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2569 
2570 	ctx->qidev = priv->qidev;
2571 
2572 	spin_lock_init(&ctx->lock);
2573 	ctx->drv_ctx[ENCRYPT] = NULL;
2574 	ctx->drv_ctx[DECRYPT] = NULL;
2575 	ctx->drv_ctx[GIVENCRYPT] = NULL;
2576 
2577 	return 0;
2578 }
2579 
2580 static int caam_cra_init(struct crypto_tfm *tfm)
2581 {
2582 	struct crypto_alg *alg = tfm->__crt_alg;
2583 	struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2584 							crypto_alg);
2585 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2586 
2587 	return caam_init_common(ctx, &caam_alg->caam, false);
2588 }
2589 
2590 static int caam_aead_init(struct crypto_aead *tfm)
2591 {
2592 	struct aead_alg *alg = crypto_aead_alg(tfm);
2593 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2594 						      aead);
2595 	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2596 
2597 	return caam_init_common(ctx, &caam_alg->caam,
2598 				alg->setkey == aead_setkey);
2599 }
2600 
2601 static void caam_exit_common(struct caam_ctx *ctx)
2602 {
2603 	caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2604 	caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2605 	caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2606 
2607 	dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
2608 
2609 	caam_jr_free(ctx->jrdev);
2610 }
2611 
2612 static void caam_cra_exit(struct crypto_tfm *tfm)
2613 {
2614 	caam_exit_common(crypto_tfm_ctx(tfm));
2615 }
2616 
2617 static void caam_aead_exit(struct crypto_aead *tfm)
2618 {
2619 	caam_exit_common(crypto_aead_ctx(tfm));
2620 }
2621 
2622 static struct list_head alg_list;
2623 static void __exit caam_qi_algapi_exit(void)
2624 {
2625 	struct caam_crypto_alg *t_alg, *n;
2626 	int i;
2627 
2628 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2629 		struct caam_aead_alg *t_alg = driver_aeads + i;
2630 
2631 		if (t_alg->registered)
2632 			crypto_unregister_aead(&t_alg->aead);
2633 	}
2634 
2635 	if (!alg_list.next)
2636 		return;
2637 
2638 	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2639 		crypto_unregister_alg(&t_alg->crypto_alg);
2640 		list_del(&t_alg->entry);
2641 		kfree(t_alg);
2642 	}
2643 }
2644 
2645 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2646 					      *template)
2647 {
2648 	struct caam_crypto_alg *t_alg;
2649 	struct crypto_alg *alg;
2650 
2651 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2652 	if (!t_alg)
2653 		return ERR_PTR(-ENOMEM);
2654 
2655 	alg = &t_alg->crypto_alg;
2656 
2657 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2658 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2659 		 template->driver_name);
2660 	alg->cra_module = THIS_MODULE;
2661 	alg->cra_init = caam_cra_init;
2662 	alg->cra_exit = caam_cra_exit;
2663 	alg->cra_priority = CAAM_CRA_PRIORITY;
2664 	alg->cra_blocksize = template->blocksize;
2665 	alg->cra_alignmask = 0;
2666 	alg->cra_ctxsize = sizeof(struct caam_ctx);
2667 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2668 			 template->type;
2669 	switch (template->type) {
2670 	case CRYPTO_ALG_TYPE_GIVCIPHER:
2671 		alg->cra_type = &crypto_givcipher_type;
2672 		alg->cra_ablkcipher = template->template_ablkcipher;
2673 		break;
2674 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2675 		alg->cra_type = &crypto_ablkcipher_type;
2676 		alg->cra_ablkcipher = template->template_ablkcipher;
2677 		break;
2678 	}
2679 
2680 	t_alg->caam.class1_alg_type = template->class1_alg_type;
2681 	t_alg->caam.class2_alg_type = template->class2_alg_type;
2682 
2683 	return t_alg;
2684 }
2685 
2686 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2687 {
2688 	struct aead_alg *alg = &t_alg->aead;
2689 
2690 	alg->base.cra_module = THIS_MODULE;
2691 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2692 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2693 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2694 
2695 	alg->init = caam_aead_init;
2696 	alg->exit = caam_aead_exit;
2697 }
2698 
2699 static int __init caam_qi_algapi_init(void)
2700 {
2701 	struct device_node *dev_node;
2702 	struct platform_device *pdev;
2703 	struct device *ctrldev;
2704 	struct caam_drv_private *priv;
2705 	int i = 0, err = 0;
2706 	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2707 	unsigned int md_limit = SHA512_DIGEST_SIZE;
2708 	bool registered = false;
2709 
2710 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2711 	if (!dev_node) {
2712 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2713 		if (!dev_node)
2714 			return -ENODEV;
2715 	}
2716 
2717 	pdev = of_find_device_by_node(dev_node);
2718 	of_node_put(dev_node);
2719 	if (!pdev)
2720 		return -ENODEV;
2721 
2722 	ctrldev = &pdev->dev;
2723 	priv = dev_get_drvdata(ctrldev);
2724 
2725 	/*
2726 	 * If priv is NULL, it's probably because the caam driver wasn't
2727 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2728 	 */
2729 	if (!priv || !priv->qi_present)
2730 		return -ENODEV;
2731 
2732 	if (caam_dpaa2) {
2733 		dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2734 		return -ENODEV;
2735 	}
2736 
2737 	INIT_LIST_HEAD(&alg_list);
2738 
2739 	/*
2740 	 * Register crypto algorithms the device supports.
2741 	 * First, detect presence and attributes of DES, AES, and MD blocks.
2742 	 */
2743 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2744 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2745 	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2746 	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2747 	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2748 
2749 	/* If MD is present, limit digest size based on LP256 */
2750 	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2751 		md_limit = SHA256_DIGEST_SIZE;
2752 
2753 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2754 		struct caam_crypto_alg *t_alg;
2755 		struct caam_alg_template *alg = driver_algs + i;
2756 		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2757 
2758 		/* Skip DES algorithms if not supported by device */
2759 		if (!des_inst &&
2760 		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2761 		     (alg_sel == OP_ALG_ALGSEL_DES)))
2762 			continue;
2763 
2764 		/* Skip AES algorithms if not supported by device */
2765 		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2766 			continue;
2767 
2768 		t_alg = caam_alg_alloc(alg);
2769 		if (IS_ERR(t_alg)) {
2770 			err = PTR_ERR(t_alg);
2771 			dev_warn(priv->qidev, "%s alg allocation failed\n",
2772 				 alg->driver_name);
2773 			continue;
2774 		}
2775 
2776 		err = crypto_register_alg(&t_alg->crypto_alg);
2777 		if (err) {
2778 			dev_warn(priv->qidev, "%s alg registration failed\n",
2779 				 t_alg->crypto_alg.cra_driver_name);
2780 			kfree(t_alg);
2781 			continue;
2782 		}
2783 
2784 		list_add_tail(&t_alg->entry, &alg_list);
2785 		registered = true;
2786 	}
2787 
2788 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2789 		struct caam_aead_alg *t_alg = driver_aeads + i;
2790 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2791 				 OP_ALG_ALGSEL_MASK;
2792 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2793 				 OP_ALG_ALGSEL_MASK;
2794 		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2795 
2796 		/* Skip DES algorithms if not supported by device */
2797 		if (!des_inst &&
2798 		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2799 		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2800 			continue;
2801 
2802 		/* Skip AES algorithms if not supported by device */
2803 		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2804 			continue;
2805 
2806 		/*
2807 		 * Check support for AES algorithms not available
2808 		 * on LP devices.
2809 		 */
2810 		if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
2811 		    (alg_aai == OP_ALG_AAI_GCM))
2812 			continue;
2813 
2814 		/*
2815 		 * Skip algorithms requiring message digests
2816 		 * if MD or MD size is not supported by device.
2817 		 */
2818 		if (c2_alg_sel &&
2819 		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2820 			continue;
2821 
2822 		caam_aead_alg_init(t_alg);
2823 
2824 		err = crypto_register_aead(&t_alg->aead);
2825 		if (err) {
2826 			pr_warn("%s alg registration failed\n",
2827 				t_alg->aead.base.cra_driver_name);
2828 			continue;
2829 		}
2830 
2831 		t_alg->registered = true;
2832 		registered = true;
2833 	}
2834 
2835 	if (registered)
2836 		dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2837 
2838 	return err;
2839 }
2840 
2841 module_init(caam_qi_algapi_init);
2842 module_exit(caam_qi_algapi_exit);
2843 
2844 MODULE_LICENSE("GPL");
2845 MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2846 MODULE_AUTHOR("Freescale Semiconductor");
2847