xref: /openbmc/linux/drivers/crypto/caam/caamalg_qi.c (revision 2c64e9cb)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Freescale FSL CAAM support for crypto API over QI backend.
4  * Based on caamalg.c
5  *
6  * Copyright 2013-2016 Freescale Semiconductor, Inc.
7  * Copyright 2016-2018 NXP
8  */
9 
10 #include "compat.h"
11 #include "ctrl.h"
12 #include "regs.h"
13 #include "intern.h"
14 #include "desc_constr.h"
15 #include "error.h"
16 #include "sg_sw_qm.h"
17 #include "key_gen.h"
18 #include "qi.h"
19 #include "jr.h"
20 #include "caamalg_desc.h"
21 
22 /*
23  * crypto alg
24  */
25 #define CAAM_CRA_PRIORITY		2000
26 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
27 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
28 					 SHA512_DIGEST_SIZE * 2)
29 
30 #define DESC_MAX_USED_BYTES		(DESC_QI_AEAD_GIVENC_LEN + \
31 					 CAAM_MAX_KEY_SIZE)
32 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
33 
34 struct caam_alg_entry {
35 	int class1_alg_type;
36 	int class2_alg_type;
37 	bool rfc3686;
38 	bool geniv;
39 };
40 
41 struct caam_aead_alg {
42 	struct aead_alg aead;
43 	struct caam_alg_entry caam;
44 	bool registered;
45 };
46 
47 struct caam_skcipher_alg {
48 	struct skcipher_alg skcipher;
49 	struct caam_alg_entry caam;
50 	bool registered;
51 };
52 
53 /*
54  * per-session context
55  */
56 struct caam_ctx {
57 	struct device *jrdev;
58 	u32 sh_desc_enc[DESC_MAX_USED_LEN];
59 	u32 sh_desc_dec[DESC_MAX_USED_LEN];
60 	u8 key[CAAM_MAX_KEY_SIZE];
61 	dma_addr_t key_dma;
62 	enum dma_data_direction dir;
63 	struct alginfo adata;
64 	struct alginfo cdata;
65 	unsigned int authsize;
66 	struct device *qidev;
67 	spinlock_t lock;	/* Protects multiple init of driver context */
68 	struct caam_drv_ctx *drv_ctx[NUM_OP];
69 };
70 
71 static int aead_set_sh_desc(struct crypto_aead *aead)
72 {
73 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
74 						 typeof(*alg), aead);
75 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
76 	unsigned int ivsize = crypto_aead_ivsize(aead);
77 	u32 ctx1_iv_off = 0;
78 	u32 *nonce = NULL;
79 	unsigned int data_len[2];
80 	u32 inl_mask;
81 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
82 			       OP_ALG_AAI_CTR_MOD128);
83 	const bool is_rfc3686 = alg->caam.rfc3686;
84 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
85 
86 	if (!ctx->cdata.keylen || !ctx->authsize)
87 		return 0;
88 
89 	/*
90 	 * AES-CTR needs to load IV in CONTEXT1 reg
91 	 * at an offset of 128bits (16bytes)
92 	 * CONTEXT1[255:128] = IV
93 	 */
94 	if (ctr_mode)
95 		ctx1_iv_off = 16;
96 
97 	/*
98 	 * RFC3686 specific:
99 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
100 	 */
101 	if (is_rfc3686) {
102 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
103 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
104 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
105 	}
106 
107 	data_len[0] = ctx->adata.keylen_pad;
108 	data_len[1] = ctx->cdata.keylen;
109 
110 	if (alg->caam.geniv)
111 		goto skip_enc;
112 
113 	/* aead_encrypt shared descriptor */
114 	if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
115 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
116 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
117 			      ARRAY_SIZE(data_len)) < 0)
118 		return -EINVAL;
119 
120 	if (inl_mask & 1)
121 		ctx->adata.key_virt = ctx->key;
122 	else
123 		ctx->adata.key_dma = ctx->key_dma;
124 
125 	if (inl_mask & 2)
126 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
127 	else
128 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
129 
130 	ctx->adata.key_inline = !!(inl_mask & 1);
131 	ctx->cdata.key_inline = !!(inl_mask & 2);
132 
133 	cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
134 			       ivsize, ctx->authsize, is_rfc3686, nonce,
135 			       ctx1_iv_off, true, ctrlpriv->era);
136 
137 skip_enc:
138 	/* aead_decrypt shared descriptor */
139 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
140 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
141 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
142 			      ARRAY_SIZE(data_len)) < 0)
143 		return -EINVAL;
144 
145 	if (inl_mask & 1)
146 		ctx->adata.key_virt = ctx->key;
147 	else
148 		ctx->adata.key_dma = ctx->key_dma;
149 
150 	if (inl_mask & 2)
151 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
152 	else
153 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
154 
155 	ctx->adata.key_inline = !!(inl_mask & 1);
156 	ctx->cdata.key_inline = !!(inl_mask & 2);
157 
158 	cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
159 			       ivsize, ctx->authsize, alg->caam.geniv,
160 			       is_rfc3686, nonce, ctx1_iv_off, true,
161 			       ctrlpriv->era);
162 
163 	if (!alg->caam.geniv)
164 		goto skip_givenc;
165 
166 	/* aead_givencrypt shared descriptor */
167 	if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
168 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
169 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
170 			      ARRAY_SIZE(data_len)) < 0)
171 		return -EINVAL;
172 
173 	if (inl_mask & 1)
174 		ctx->adata.key_virt = ctx->key;
175 	else
176 		ctx->adata.key_dma = ctx->key_dma;
177 
178 	if (inl_mask & 2)
179 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
180 	else
181 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
182 
183 	ctx->adata.key_inline = !!(inl_mask & 1);
184 	ctx->cdata.key_inline = !!(inl_mask & 2);
185 
186 	cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
187 				  ivsize, ctx->authsize, is_rfc3686, nonce,
188 				  ctx1_iv_off, true, ctrlpriv->era);
189 
190 skip_givenc:
191 	return 0;
192 }
193 
194 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
195 {
196 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
197 
198 	ctx->authsize = authsize;
199 	aead_set_sh_desc(authenc);
200 
201 	return 0;
202 }
203 
204 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
205 		       unsigned int keylen)
206 {
207 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
208 	struct device *jrdev = ctx->jrdev;
209 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
210 	struct crypto_authenc_keys keys;
211 	int ret = 0;
212 
213 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
214 		goto badkey;
215 
216 #ifdef DEBUG
217 	dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
218 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
219 		keys.authkeylen);
220 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
221 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
222 #endif
223 
224 	/*
225 	 * If DKP is supported, use it in the shared descriptor to generate
226 	 * the split key.
227 	 */
228 	if (ctrlpriv->era >= 6) {
229 		ctx->adata.keylen = keys.authkeylen;
230 		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
231 						      OP_ALG_ALGSEL_MASK);
232 
233 		if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
234 			goto badkey;
235 
236 		memcpy(ctx->key, keys.authkey, keys.authkeylen);
237 		memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
238 		       keys.enckeylen);
239 		dma_sync_single_for_device(jrdev, ctx->key_dma,
240 					   ctx->adata.keylen_pad +
241 					   keys.enckeylen, ctx->dir);
242 		goto skip_split_key;
243 	}
244 
245 	ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
246 			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
247 			    keys.enckeylen);
248 	if (ret)
249 		goto badkey;
250 
251 	/* postpend encryption key to auth split key */
252 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
253 	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
254 				   keys.enckeylen, ctx->dir);
255 #ifdef DEBUG
256 	print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
257 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
258 		       ctx->adata.keylen_pad + keys.enckeylen, 1);
259 #endif
260 
261 skip_split_key:
262 	ctx->cdata.keylen = keys.enckeylen;
263 
264 	ret = aead_set_sh_desc(aead);
265 	if (ret)
266 		goto badkey;
267 
268 	/* Now update the driver contexts with the new shared descriptor */
269 	if (ctx->drv_ctx[ENCRYPT]) {
270 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
271 					  ctx->sh_desc_enc);
272 		if (ret) {
273 			dev_err(jrdev, "driver enc context update failed\n");
274 			goto badkey;
275 		}
276 	}
277 
278 	if (ctx->drv_ctx[DECRYPT]) {
279 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
280 					  ctx->sh_desc_dec);
281 		if (ret) {
282 			dev_err(jrdev, "driver dec context update failed\n");
283 			goto badkey;
284 		}
285 	}
286 
287 	memzero_explicit(&keys, sizeof(keys));
288 	return ret;
289 badkey:
290 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
291 	memzero_explicit(&keys, sizeof(keys));
292 	return -EINVAL;
293 }
294 
295 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
296 			    unsigned int keylen)
297 {
298 	struct crypto_authenc_keys keys;
299 	u32 flags;
300 	int err;
301 
302 	err = crypto_authenc_extractkeys(&keys, key, keylen);
303 	if (unlikely(err))
304 		goto badkey;
305 
306 	err = -EINVAL;
307 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
308 		goto badkey;
309 
310 	flags = crypto_aead_get_flags(aead);
311 	err = __des3_verify_key(&flags, keys.enckey);
312 	if (unlikely(err)) {
313 		crypto_aead_set_flags(aead, flags);
314 		goto out;
315 	}
316 
317 	err = aead_setkey(aead, key, keylen);
318 
319 out:
320 	memzero_explicit(&keys, sizeof(keys));
321 	return err;
322 
323 badkey:
324 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
325 	goto out;
326 }
327 
328 static int gcm_set_sh_desc(struct crypto_aead *aead)
329 {
330 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
331 	unsigned int ivsize = crypto_aead_ivsize(aead);
332 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
333 			ctx->cdata.keylen;
334 
335 	if (!ctx->cdata.keylen || !ctx->authsize)
336 		return 0;
337 
338 	/*
339 	 * Job Descriptor and Shared Descriptor
340 	 * must fit into the 64-word Descriptor h/w Buffer
341 	 */
342 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
343 		ctx->cdata.key_inline = true;
344 		ctx->cdata.key_virt = ctx->key;
345 	} else {
346 		ctx->cdata.key_inline = false;
347 		ctx->cdata.key_dma = ctx->key_dma;
348 	}
349 
350 	cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
351 			      ctx->authsize, true);
352 
353 	/*
354 	 * Job Descriptor and Shared Descriptor
355 	 * must fit into the 64-word Descriptor h/w Buffer
356 	 */
357 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
358 		ctx->cdata.key_inline = true;
359 		ctx->cdata.key_virt = ctx->key;
360 	} else {
361 		ctx->cdata.key_inline = false;
362 		ctx->cdata.key_dma = ctx->key_dma;
363 	}
364 
365 	cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
366 			      ctx->authsize, true);
367 
368 	return 0;
369 }
370 
371 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
372 {
373 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
374 
375 	ctx->authsize = authsize;
376 	gcm_set_sh_desc(authenc);
377 
378 	return 0;
379 }
380 
381 static int gcm_setkey(struct crypto_aead *aead,
382 		      const u8 *key, unsigned int keylen)
383 {
384 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
385 	struct device *jrdev = ctx->jrdev;
386 	int ret;
387 
388 #ifdef DEBUG
389 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
390 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
391 #endif
392 
393 	memcpy(ctx->key, key, keylen);
394 	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
395 	ctx->cdata.keylen = keylen;
396 
397 	ret = gcm_set_sh_desc(aead);
398 	if (ret)
399 		return ret;
400 
401 	/* Now update the driver contexts with the new shared descriptor */
402 	if (ctx->drv_ctx[ENCRYPT]) {
403 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
404 					  ctx->sh_desc_enc);
405 		if (ret) {
406 			dev_err(jrdev, "driver enc context update failed\n");
407 			return ret;
408 		}
409 	}
410 
411 	if (ctx->drv_ctx[DECRYPT]) {
412 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
413 					  ctx->sh_desc_dec);
414 		if (ret) {
415 			dev_err(jrdev, "driver dec context update failed\n");
416 			return ret;
417 		}
418 	}
419 
420 	return 0;
421 }
422 
423 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
424 {
425 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
426 	unsigned int ivsize = crypto_aead_ivsize(aead);
427 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
428 			ctx->cdata.keylen;
429 
430 	if (!ctx->cdata.keylen || !ctx->authsize)
431 		return 0;
432 
433 	ctx->cdata.key_virt = ctx->key;
434 
435 	/*
436 	 * Job Descriptor and Shared Descriptor
437 	 * must fit into the 64-word Descriptor h/w Buffer
438 	 */
439 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
440 		ctx->cdata.key_inline = true;
441 	} else {
442 		ctx->cdata.key_inline = false;
443 		ctx->cdata.key_dma = ctx->key_dma;
444 	}
445 
446 	cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
447 				  ctx->authsize, true);
448 
449 	/*
450 	 * Job Descriptor and Shared Descriptor
451 	 * must fit into the 64-word Descriptor h/w Buffer
452 	 */
453 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
454 		ctx->cdata.key_inline = true;
455 	} else {
456 		ctx->cdata.key_inline = false;
457 		ctx->cdata.key_dma = ctx->key_dma;
458 	}
459 
460 	cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
461 				  ctx->authsize, true);
462 
463 	return 0;
464 }
465 
466 static int rfc4106_setauthsize(struct crypto_aead *authenc,
467 			       unsigned int authsize)
468 {
469 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
470 
471 	ctx->authsize = authsize;
472 	rfc4106_set_sh_desc(authenc);
473 
474 	return 0;
475 }
476 
477 static int rfc4106_setkey(struct crypto_aead *aead,
478 			  const u8 *key, unsigned int keylen)
479 {
480 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
481 	struct device *jrdev = ctx->jrdev;
482 	int ret;
483 
484 	if (keylen < 4)
485 		return -EINVAL;
486 
487 #ifdef DEBUG
488 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
489 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
490 #endif
491 
492 	memcpy(ctx->key, key, keylen);
493 	/*
494 	 * The last four bytes of the key material are used as the salt value
495 	 * in the nonce. Update the AES key length.
496 	 */
497 	ctx->cdata.keylen = keylen - 4;
498 	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
499 				   ctx->dir);
500 
501 	ret = rfc4106_set_sh_desc(aead);
502 	if (ret)
503 		return ret;
504 
505 	/* Now update the driver contexts with the new shared descriptor */
506 	if (ctx->drv_ctx[ENCRYPT]) {
507 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
508 					  ctx->sh_desc_enc);
509 		if (ret) {
510 			dev_err(jrdev, "driver enc context update failed\n");
511 			return ret;
512 		}
513 	}
514 
515 	if (ctx->drv_ctx[DECRYPT]) {
516 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
517 					  ctx->sh_desc_dec);
518 		if (ret) {
519 			dev_err(jrdev, "driver dec context update failed\n");
520 			return ret;
521 		}
522 	}
523 
524 	return 0;
525 }
526 
527 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
528 {
529 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
530 	unsigned int ivsize = crypto_aead_ivsize(aead);
531 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
532 			ctx->cdata.keylen;
533 
534 	if (!ctx->cdata.keylen || !ctx->authsize)
535 		return 0;
536 
537 	ctx->cdata.key_virt = ctx->key;
538 
539 	/*
540 	 * Job Descriptor and Shared Descriptor
541 	 * must fit into the 64-word Descriptor h/w Buffer
542 	 */
543 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
544 		ctx->cdata.key_inline = true;
545 	} else {
546 		ctx->cdata.key_inline = false;
547 		ctx->cdata.key_dma = ctx->key_dma;
548 	}
549 
550 	cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
551 				  ctx->authsize, true);
552 
553 	/*
554 	 * Job Descriptor and Shared Descriptor
555 	 * must fit into the 64-word Descriptor h/w Buffer
556 	 */
557 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
558 		ctx->cdata.key_inline = true;
559 	} else {
560 		ctx->cdata.key_inline = false;
561 		ctx->cdata.key_dma = ctx->key_dma;
562 	}
563 
564 	cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
565 				  ctx->authsize, true);
566 
567 	return 0;
568 }
569 
570 static int rfc4543_setauthsize(struct crypto_aead *authenc,
571 			       unsigned int authsize)
572 {
573 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
574 
575 	ctx->authsize = authsize;
576 	rfc4543_set_sh_desc(authenc);
577 
578 	return 0;
579 }
580 
581 static int rfc4543_setkey(struct crypto_aead *aead,
582 			  const u8 *key, unsigned int keylen)
583 {
584 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
585 	struct device *jrdev = ctx->jrdev;
586 	int ret;
587 
588 	if (keylen < 4)
589 		return -EINVAL;
590 
591 #ifdef DEBUG
592 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
593 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
594 #endif
595 
596 	memcpy(ctx->key, key, keylen);
597 	/*
598 	 * The last four bytes of the key material are used as the salt value
599 	 * in the nonce. Update the AES key length.
600 	 */
601 	ctx->cdata.keylen = keylen - 4;
602 	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
603 				   ctx->dir);
604 
605 	ret = rfc4543_set_sh_desc(aead);
606 	if (ret)
607 		return ret;
608 
609 	/* Now update the driver contexts with the new shared descriptor */
610 	if (ctx->drv_ctx[ENCRYPT]) {
611 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
612 					  ctx->sh_desc_enc);
613 		if (ret) {
614 			dev_err(jrdev, "driver enc context update failed\n");
615 			return ret;
616 		}
617 	}
618 
619 	if (ctx->drv_ctx[DECRYPT]) {
620 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
621 					  ctx->sh_desc_dec);
622 		if (ret) {
623 			dev_err(jrdev, "driver dec context update failed\n");
624 			return ret;
625 		}
626 	}
627 
628 	return 0;
629 }
630 
631 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
632 			   unsigned int keylen)
633 {
634 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
635 	struct caam_skcipher_alg *alg =
636 		container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
637 			     skcipher);
638 	struct device *jrdev = ctx->jrdev;
639 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
640 	u32 ctx1_iv_off = 0;
641 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
642 			       OP_ALG_AAI_CTR_MOD128);
643 	const bool is_rfc3686 = alg->caam.rfc3686;
644 	int ret = 0;
645 
646 #ifdef DEBUG
647 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
648 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
649 #endif
650 	/*
651 	 * AES-CTR needs to load IV in CONTEXT1 reg
652 	 * at an offset of 128bits (16bytes)
653 	 * CONTEXT1[255:128] = IV
654 	 */
655 	if (ctr_mode)
656 		ctx1_iv_off = 16;
657 
658 	/*
659 	 * RFC3686 specific:
660 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
661 	 *	| *key = {KEY, NONCE}
662 	 */
663 	if (is_rfc3686) {
664 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
665 		keylen -= CTR_RFC3686_NONCE_SIZE;
666 	}
667 
668 	ctx->cdata.keylen = keylen;
669 	ctx->cdata.key_virt = key;
670 	ctx->cdata.key_inline = true;
671 
672 	/* skcipher encrypt, decrypt shared descriptors */
673 	cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
674 				   is_rfc3686, ctx1_iv_off);
675 	cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
676 				   is_rfc3686, ctx1_iv_off);
677 
678 	/* Now update the driver contexts with the new shared descriptor */
679 	if (ctx->drv_ctx[ENCRYPT]) {
680 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
681 					  ctx->sh_desc_enc);
682 		if (ret) {
683 			dev_err(jrdev, "driver enc context update failed\n");
684 			goto badkey;
685 		}
686 	}
687 
688 	if (ctx->drv_ctx[DECRYPT]) {
689 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
690 					  ctx->sh_desc_dec);
691 		if (ret) {
692 			dev_err(jrdev, "driver dec context update failed\n");
693 			goto badkey;
694 		}
695 	}
696 
697 	return ret;
698 badkey:
699 	crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
700 	return -EINVAL;
701 }
702 
703 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
704 				const u8 *key, unsigned int keylen)
705 {
706 	return unlikely(des3_verify_key(skcipher, key)) ?:
707 	       skcipher_setkey(skcipher, key, keylen);
708 }
709 
710 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
711 			       unsigned int keylen)
712 {
713 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
714 	struct device *jrdev = ctx->jrdev;
715 	int ret = 0;
716 
717 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
718 		dev_err(jrdev, "key size mismatch\n");
719 		goto badkey;
720 	}
721 
722 	ctx->cdata.keylen = keylen;
723 	ctx->cdata.key_virt = key;
724 	ctx->cdata.key_inline = true;
725 
726 	/* xts skcipher encrypt, decrypt shared descriptors */
727 	cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
728 	cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
729 
730 	/* Now update the driver contexts with the new shared descriptor */
731 	if (ctx->drv_ctx[ENCRYPT]) {
732 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
733 					  ctx->sh_desc_enc);
734 		if (ret) {
735 			dev_err(jrdev, "driver enc context update failed\n");
736 			goto badkey;
737 		}
738 	}
739 
740 	if (ctx->drv_ctx[DECRYPT]) {
741 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
742 					  ctx->sh_desc_dec);
743 		if (ret) {
744 			dev_err(jrdev, "driver dec context update failed\n");
745 			goto badkey;
746 		}
747 	}
748 
749 	return ret;
750 badkey:
751 	crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
752 	return -EINVAL;
753 }
754 
755 /*
756  * aead_edesc - s/w-extended aead descriptor
757  * @src_nents: number of segments in input scatterlist
758  * @dst_nents: number of segments in output scatterlist
759  * @iv_dma: dma address of iv for checking continuity and link table
760  * @qm_sg_bytes: length of dma mapped h/w link table
761  * @qm_sg_dma: bus physical mapped address of h/w link table
762  * @assoclen: associated data length, in CAAM endianness
763  * @assoclen_dma: bus physical mapped address of req->assoclen
764  * @drv_req: driver-specific request structure
765  * @sgt: the h/w link table, followed by IV
766  */
767 struct aead_edesc {
768 	int src_nents;
769 	int dst_nents;
770 	dma_addr_t iv_dma;
771 	int qm_sg_bytes;
772 	dma_addr_t qm_sg_dma;
773 	unsigned int assoclen;
774 	dma_addr_t assoclen_dma;
775 	struct caam_drv_req drv_req;
776 	struct qm_sg_entry sgt[0];
777 };
778 
779 /*
780  * skcipher_edesc - s/w-extended skcipher descriptor
781  * @src_nents: number of segments in input scatterlist
782  * @dst_nents: number of segments in output scatterlist
783  * @iv_dma: dma address of iv for checking continuity and link table
784  * @qm_sg_bytes: length of dma mapped h/w link table
785  * @qm_sg_dma: bus physical mapped address of h/w link table
786  * @drv_req: driver-specific request structure
787  * @sgt: the h/w link table, followed by IV
788  */
789 struct skcipher_edesc {
790 	int src_nents;
791 	int dst_nents;
792 	dma_addr_t iv_dma;
793 	int qm_sg_bytes;
794 	dma_addr_t qm_sg_dma;
795 	struct caam_drv_req drv_req;
796 	struct qm_sg_entry sgt[0];
797 };
798 
799 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
800 					enum optype type)
801 {
802 	/*
803 	 * This function is called on the fast path with values of 'type'
804 	 * known at compile time. Invalid arguments are not expected and
805 	 * thus no checks are made.
806 	 */
807 	struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
808 	u32 *desc;
809 
810 	if (unlikely(!drv_ctx)) {
811 		spin_lock(&ctx->lock);
812 
813 		/* Read again to check if some other core init drv_ctx */
814 		drv_ctx = ctx->drv_ctx[type];
815 		if (!drv_ctx) {
816 			int cpu;
817 
818 			if (type == ENCRYPT)
819 				desc = ctx->sh_desc_enc;
820 			else /* (type == DECRYPT) */
821 				desc = ctx->sh_desc_dec;
822 
823 			cpu = smp_processor_id();
824 			drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
825 			if (!IS_ERR_OR_NULL(drv_ctx))
826 				drv_ctx->op_type = type;
827 
828 			ctx->drv_ctx[type] = drv_ctx;
829 		}
830 
831 		spin_unlock(&ctx->lock);
832 	}
833 
834 	return drv_ctx;
835 }
836 
837 static void caam_unmap(struct device *dev, struct scatterlist *src,
838 		       struct scatterlist *dst, int src_nents,
839 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
840 		       dma_addr_t qm_sg_dma, int qm_sg_bytes)
841 {
842 	if (dst != src) {
843 		if (src_nents)
844 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
845 		if (dst_nents)
846 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
847 	} else {
848 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
849 	}
850 
851 	if (iv_dma)
852 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
853 	if (qm_sg_bytes)
854 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
855 }
856 
857 static void aead_unmap(struct device *dev,
858 		       struct aead_edesc *edesc,
859 		       struct aead_request *req)
860 {
861 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
862 	int ivsize = crypto_aead_ivsize(aead);
863 
864 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
865 		   edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
866 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
867 }
868 
869 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
870 			   struct skcipher_request *req)
871 {
872 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
873 	int ivsize = crypto_skcipher_ivsize(skcipher);
874 
875 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
876 		   edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
877 }
878 
879 static void aead_done(struct caam_drv_req *drv_req, u32 status)
880 {
881 	struct device *qidev;
882 	struct aead_edesc *edesc;
883 	struct aead_request *aead_req = drv_req->app_ctx;
884 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
885 	struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
886 	int ecode = 0;
887 
888 	qidev = caam_ctx->qidev;
889 
890 	if (unlikely(status)) {
891 		u32 ssrc = status & JRSTA_SSRC_MASK;
892 		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
893 
894 		caam_jr_strstatus(qidev, status);
895 		/*
896 		 * verify hw auth check passed else return -EBADMSG
897 		 */
898 		if (ssrc == JRSTA_SSRC_CCB_ERROR &&
899 		    err_id == JRSTA_CCBERR_ERRID_ICVCHK)
900 			ecode = -EBADMSG;
901 		else
902 			ecode = -EIO;
903 	}
904 
905 	edesc = container_of(drv_req, typeof(*edesc), drv_req);
906 	aead_unmap(qidev, edesc, aead_req);
907 
908 	aead_request_complete(aead_req, ecode);
909 	qi_cache_free(edesc);
910 }
911 
912 /*
913  * allocate and map the aead extended descriptor
914  */
915 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
916 					   bool encrypt)
917 {
918 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
919 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
920 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
921 						 typeof(*alg), aead);
922 	struct device *qidev = ctx->qidev;
923 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
924 		       GFP_KERNEL : GFP_ATOMIC;
925 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
926 	struct aead_edesc *edesc;
927 	dma_addr_t qm_sg_dma, iv_dma = 0;
928 	int ivsize = 0;
929 	unsigned int authsize = ctx->authsize;
930 	int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
931 	int in_len, out_len;
932 	struct qm_sg_entry *sg_table, *fd_sgt;
933 	struct caam_drv_ctx *drv_ctx;
934 
935 	drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
936 	if (IS_ERR_OR_NULL(drv_ctx))
937 		return (struct aead_edesc *)drv_ctx;
938 
939 	/* allocate space for base edesc and hw desc commands, link tables */
940 	edesc = qi_cache_alloc(GFP_DMA | flags);
941 	if (unlikely(!edesc)) {
942 		dev_err(qidev, "could not allocate extended descriptor\n");
943 		return ERR_PTR(-ENOMEM);
944 	}
945 
946 	if (likely(req->src == req->dst)) {
947 		src_nents = sg_nents_for_len(req->src, req->assoclen +
948 					     req->cryptlen +
949 						(encrypt ? authsize : 0));
950 		if (unlikely(src_nents < 0)) {
951 			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
952 				req->assoclen + req->cryptlen +
953 				(encrypt ? authsize : 0));
954 			qi_cache_free(edesc);
955 			return ERR_PTR(src_nents);
956 		}
957 
958 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
959 					      DMA_BIDIRECTIONAL);
960 		if (unlikely(!mapped_src_nents)) {
961 			dev_err(qidev, "unable to map source\n");
962 			qi_cache_free(edesc);
963 			return ERR_PTR(-ENOMEM);
964 		}
965 	} else {
966 		src_nents = sg_nents_for_len(req->src, req->assoclen +
967 					     req->cryptlen);
968 		if (unlikely(src_nents < 0)) {
969 			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
970 				req->assoclen + req->cryptlen);
971 			qi_cache_free(edesc);
972 			return ERR_PTR(src_nents);
973 		}
974 
975 		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
976 					     req->cryptlen +
977 					     (encrypt ? authsize :
978 							(-authsize)));
979 		if (unlikely(dst_nents < 0)) {
980 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
981 				req->assoclen + req->cryptlen +
982 				(encrypt ? authsize : (-authsize)));
983 			qi_cache_free(edesc);
984 			return ERR_PTR(dst_nents);
985 		}
986 
987 		if (src_nents) {
988 			mapped_src_nents = dma_map_sg(qidev, req->src,
989 						      src_nents, DMA_TO_DEVICE);
990 			if (unlikely(!mapped_src_nents)) {
991 				dev_err(qidev, "unable to map source\n");
992 				qi_cache_free(edesc);
993 				return ERR_PTR(-ENOMEM);
994 			}
995 		} else {
996 			mapped_src_nents = 0;
997 		}
998 
999 		if (dst_nents) {
1000 			mapped_dst_nents = dma_map_sg(qidev, req->dst,
1001 						      dst_nents,
1002 						      DMA_FROM_DEVICE);
1003 			if (unlikely(!mapped_dst_nents)) {
1004 				dev_err(qidev, "unable to map destination\n");
1005 				dma_unmap_sg(qidev, req->src, src_nents,
1006 					     DMA_TO_DEVICE);
1007 				qi_cache_free(edesc);
1008 				return ERR_PTR(-ENOMEM);
1009 			}
1010 		} else {
1011 			mapped_dst_nents = 0;
1012 		}
1013 	}
1014 
1015 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
1016 		ivsize = crypto_aead_ivsize(aead);
1017 
1018 	/*
1019 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
1020 	 * Input is not contiguous.
1021 	 */
1022 	qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
1023 		     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
1024 	sg_table = &edesc->sgt[0];
1025 	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1026 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
1027 		     CAAM_QI_MEMCACHE_SIZE)) {
1028 		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1029 			qm_sg_ents, ivsize);
1030 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1031 			   0, 0, 0);
1032 		qi_cache_free(edesc);
1033 		return ERR_PTR(-ENOMEM);
1034 	}
1035 
1036 	if (ivsize) {
1037 		u8 *iv = (u8 *)(sg_table + qm_sg_ents);
1038 
1039 		/* Make sure IV is located in a DMAable area */
1040 		memcpy(iv, req->iv, ivsize);
1041 
1042 		iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1043 		if (dma_mapping_error(qidev, iv_dma)) {
1044 			dev_err(qidev, "unable to map IV\n");
1045 			caam_unmap(qidev, req->src, req->dst, src_nents,
1046 				   dst_nents, 0, 0, 0, 0);
1047 			qi_cache_free(edesc);
1048 			return ERR_PTR(-ENOMEM);
1049 		}
1050 	}
1051 
1052 	edesc->src_nents = src_nents;
1053 	edesc->dst_nents = dst_nents;
1054 	edesc->iv_dma = iv_dma;
1055 	edesc->drv_req.app_ctx = req;
1056 	edesc->drv_req.cbk = aead_done;
1057 	edesc->drv_req.drv_ctx = drv_ctx;
1058 
1059 	edesc->assoclen = cpu_to_caam32(req->assoclen);
1060 	edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1061 					     DMA_TO_DEVICE);
1062 	if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1063 		dev_err(qidev, "unable to map assoclen\n");
1064 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1065 			   iv_dma, ivsize, 0, 0);
1066 		qi_cache_free(edesc);
1067 		return ERR_PTR(-ENOMEM);
1068 	}
1069 
1070 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1071 	qm_sg_index++;
1072 	if (ivsize) {
1073 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
1074 		qm_sg_index++;
1075 	}
1076 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
1077 	qm_sg_index += mapped_src_nents;
1078 
1079 	if (mapped_dst_nents > 1)
1080 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1081 				 qm_sg_index, 0);
1082 
1083 	qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1084 	if (dma_mapping_error(qidev, qm_sg_dma)) {
1085 		dev_err(qidev, "unable to map S/G table\n");
1086 		dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1087 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1088 			   iv_dma, ivsize, 0, 0);
1089 		qi_cache_free(edesc);
1090 		return ERR_PTR(-ENOMEM);
1091 	}
1092 
1093 	edesc->qm_sg_dma = qm_sg_dma;
1094 	edesc->qm_sg_bytes = qm_sg_bytes;
1095 
1096 	out_len = req->assoclen + req->cryptlen +
1097 		  (encrypt ? ctx->authsize : (-ctx->authsize));
1098 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
1099 
1100 	fd_sgt = &edesc->drv_req.fd_sgt[0];
1101 	dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
1102 
1103 	if (req->dst == req->src) {
1104 		if (mapped_src_nents == 1)
1105 			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1106 					 out_len, 0);
1107 		else
1108 			dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
1109 					     (1 + !!ivsize) * sizeof(*sg_table),
1110 					     out_len, 0);
1111 	} else if (mapped_dst_nents == 1) {
1112 		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
1113 				 0);
1114 	} else {
1115 		dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
1116 				     qm_sg_index, out_len, 0);
1117 	}
1118 
1119 	return edesc;
1120 }
1121 
1122 static inline int aead_crypt(struct aead_request *req, bool encrypt)
1123 {
1124 	struct aead_edesc *edesc;
1125 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1126 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1127 	int ret;
1128 
1129 	if (unlikely(caam_congested))
1130 		return -EAGAIN;
1131 
1132 	/* allocate extended descriptor */
1133 	edesc = aead_edesc_alloc(req, encrypt);
1134 	if (IS_ERR_OR_NULL(edesc))
1135 		return PTR_ERR(edesc);
1136 
1137 	/* Create and submit job descriptor */
1138 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1139 	if (!ret) {
1140 		ret = -EINPROGRESS;
1141 	} else {
1142 		aead_unmap(ctx->qidev, edesc, req);
1143 		qi_cache_free(edesc);
1144 	}
1145 
1146 	return ret;
1147 }
1148 
1149 static int aead_encrypt(struct aead_request *req)
1150 {
1151 	return aead_crypt(req, true);
1152 }
1153 
1154 static int aead_decrypt(struct aead_request *req)
1155 {
1156 	return aead_crypt(req, false);
1157 }
1158 
1159 static int ipsec_gcm_encrypt(struct aead_request *req)
1160 {
1161 	if (req->assoclen < 8)
1162 		return -EINVAL;
1163 
1164 	return aead_crypt(req, true);
1165 }
1166 
1167 static int ipsec_gcm_decrypt(struct aead_request *req)
1168 {
1169 	if (req->assoclen < 8)
1170 		return -EINVAL;
1171 
1172 	return aead_crypt(req, false);
1173 }
1174 
1175 static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
1176 {
1177 	struct skcipher_edesc *edesc;
1178 	struct skcipher_request *req = drv_req->app_ctx;
1179 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1180 	struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
1181 	struct device *qidev = caam_ctx->qidev;
1182 	int ivsize = crypto_skcipher_ivsize(skcipher);
1183 
1184 #ifdef DEBUG
1185 	dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1186 #endif
1187 
1188 	edesc = container_of(drv_req, typeof(*edesc), drv_req);
1189 
1190 	if (status)
1191 		caam_jr_strstatus(qidev, status);
1192 
1193 #ifdef DEBUG
1194 	print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
1195 		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1196 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
1197 	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
1198 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1199 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1200 #endif
1201 
1202 	skcipher_unmap(qidev, edesc, req);
1203 
1204 	/*
1205 	 * The crypto API expects us to set the IV (req->iv) to the last
1206 	 * ciphertext block. This is used e.g. by the CTS mode.
1207 	 */
1208 	if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
1209 		scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
1210 					 ivsize, ivsize, 0);
1211 
1212 	qi_cache_free(edesc);
1213 	skcipher_request_complete(req, status);
1214 }
1215 
1216 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1217 						   bool encrypt)
1218 {
1219 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1220 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1221 	struct device *qidev = ctx->qidev;
1222 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1223 		       GFP_KERNEL : GFP_ATOMIC;
1224 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1225 	struct skcipher_edesc *edesc;
1226 	dma_addr_t iv_dma;
1227 	u8 *iv;
1228 	int ivsize = crypto_skcipher_ivsize(skcipher);
1229 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1230 	struct qm_sg_entry *sg_table, *fd_sgt;
1231 	struct caam_drv_ctx *drv_ctx;
1232 
1233 	drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
1234 	if (IS_ERR_OR_NULL(drv_ctx))
1235 		return (struct skcipher_edesc *)drv_ctx;
1236 
1237 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1238 	if (unlikely(src_nents < 0)) {
1239 		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1240 			req->cryptlen);
1241 		return ERR_PTR(src_nents);
1242 	}
1243 
1244 	if (unlikely(req->src != req->dst)) {
1245 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1246 		if (unlikely(dst_nents < 0)) {
1247 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1248 				req->cryptlen);
1249 			return ERR_PTR(dst_nents);
1250 		}
1251 
1252 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1253 					      DMA_TO_DEVICE);
1254 		if (unlikely(!mapped_src_nents)) {
1255 			dev_err(qidev, "unable to map source\n");
1256 			return ERR_PTR(-ENOMEM);
1257 		}
1258 
1259 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1260 					      DMA_FROM_DEVICE);
1261 		if (unlikely(!mapped_dst_nents)) {
1262 			dev_err(qidev, "unable to map destination\n");
1263 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1264 			return ERR_PTR(-ENOMEM);
1265 		}
1266 	} else {
1267 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1268 					      DMA_BIDIRECTIONAL);
1269 		if (unlikely(!mapped_src_nents)) {
1270 			dev_err(qidev, "unable to map source\n");
1271 			return ERR_PTR(-ENOMEM);
1272 		}
1273 	}
1274 
1275 	qm_sg_ents = 1 + mapped_src_nents;
1276 	dst_sg_idx = qm_sg_ents;
1277 
1278 	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1279 	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1280 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1281 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1282 		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1283 			qm_sg_ents, ivsize);
1284 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1285 			   0, 0, 0);
1286 		return ERR_PTR(-ENOMEM);
1287 	}
1288 
1289 	/* allocate space for base edesc, link tables and IV */
1290 	edesc = qi_cache_alloc(GFP_DMA | flags);
1291 	if (unlikely(!edesc)) {
1292 		dev_err(qidev, "could not allocate extended descriptor\n");
1293 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1294 			   0, 0, 0);
1295 		return ERR_PTR(-ENOMEM);
1296 	}
1297 
1298 	/* Make sure IV is located in a DMAable area */
1299 	sg_table = &edesc->sgt[0];
1300 	iv = (u8 *)(sg_table + qm_sg_ents);
1301 	memcpy(iv, req->iv, ivsize);
1302 
1303 	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1304 	if (dma_mapping_error(qidev, iv_dma)) {
1305 		dev_err(qidev, "unable to map IV\n");
1306 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1307 			   0, 0, 0);
1308 		qi_cache_free(edesc);
1309 		return ERR_PTR(-ENOMEM);
1310 	}
1311 
1312 	edesc->src_nents = src_nents;
1313 	edesc->dst_nents = dst_nents;
1314 	edesc->iv_dma = iv_dma;
1315 	edesc->qm_sg_bytes = qm_sg_bytes;
1316 	edesc->drv_req.app_ctx = req;
1317 	edesc->drv_req.cbk = skcipher_done;
1318 	edesc->drv_req.drv_ctx = drv_ctx;
1319 
1320 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1321 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1322 
1323 	if (mapped_dst_nents > 1)
1324 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1325 				 dst_sg_idx, 0);
1326 
1327 	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1328 					  DMA_TO_DEVICE);
1329 	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1330 		dev_err(qidev, "unable to map S/G table\n");
1331 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1332 			   iv_dma, ivsize, 0, 0);
1333 		qi_cache_free(edesc);
1334 		return ERR_PTR(-ENOMEM);
1335 	}
1336 
1337 	fd_sgt = &edesc->drv_req.fd_sgt[0];
1338 
1339 	dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1340 				  ivsize + req->cryptlen, 0);
1341 
1342 	if (req->src == req->dst) {
1343 		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1344 				     sizeof(*sg_table), req->cryptlen, 0);
1345 	} else if (mapped_dst_nents > 1) {
1346 		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1347 				     sizeof(*sg_table), req->cryptlen, 0);
1348 	} else {
1349 		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1350 				 req->cryptlen, 0);
1351 	}
1352 
1353 	return edesc;
1354 }
1355 
1356 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1357 {
1358 	struct skcipher_edesc *edesc;
1359 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1360 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1361 	int ivsize = crypto_skcipher_ivsize(skcipher);
1362 	int ret;
1363 
1364 	if (unlikely(caam_congested))
1365 		return -EAGAIN;
1366 
1367 	/* allocate extended descriptor */
1368 	edesc = skcipher_edesc_alloc(req, encrypt);
1369 	if (IS_ERR(edesc))
1370 		return PTR_ERR(edesc);
1371 
1372 	/*
1373 	 * The crypto API expects us to set the IV (req->iv) to the last
1374 	 * ciphertext block.
1375 	 */
1376 	if (!encrypt)
1377 		scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
1378 					 ivsize, ivsize, 0);
1379 
1380 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1381 	if (!ret) {
1382 		ret = -EINPROGRESS;
1383 	} else {
1384 		skcipher_unmap(ctx->qidev, edesc, req);
1385 		qi_cache_free(edesc);
1386 	}
1387 
1388 	return ret;
1389 }
1390 
1391 static int skcipher_encrypt(struct skcipher_request *req)
1392 {
1393 	return skcipher_crypt(req, true);
1394 }
1395 
1396 static int skcipher_decrypt(struct skcipher_request *req)
1397 {
1398 	return skcipher_crypt(req, false);
1399 }
1400 
1401 static struct caam_skcipher_alg driver_algs[] = {
1402 	{
1403 		.skcipher = {
1404 			.base = {
1405 				.cra_name = "cbc(aes)",
1406 				.cra_driver_name = "cbc-aes-caam-qi",
1407 				.cra_blocksize = AES_BLOCK_SIZE,
1408 			},
1409 			.setkey = skcipher_setkey,
1410 			.encrypt = skcipher_encrypt,
1411 			.decrypt = skcipher_decrypt,
1412 			.min_keysize = AES_MIN_KEY_SIZE,
1413 			.max_keysize = AES_MAX_KEY_SIZE,
1414 			.ivsize = AES_BLOCK_SIZE,
1415 		},
1416 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1417 	},
1418 	{
1419 		.skcipher = {
1420 			.base = {
1421 				.cra_name = "cbc(des3_ede)",
1422 				.cra_driver_name = "cbc-3des-caam-qi",
1423 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1424 			},
1425 			.setkey = des3_skcipher_setkey,
1426 			.encrypt = skcipher_encrypt,
1427 			.decrypt = skcipher_decrypt,
1428 			.min_keysize = DES3_EDE_KEY_SIZE,
1429 			.max_keysize = DES3_EDE_KEY_SIZE,
1430 			.ivsize = DES3_EDE_BLOCK_SIZE,
1431 		},
1432 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1433 	},
1434 	{
1435 		.skcipher = {
1436 			.base = {
1437 				.cra_name = "cbc(des)",
1438 				.cra_driver_name = "cbc-des-caam-qi",
1439 				.cra_blocksize = DES_BLOCK_SIZE,
1440 			},
1441 			.setkey = skcipher_setkey,
1442 			.encrypt = skcipher_encrypt,
1443 			.decrypt = skcipher_decrypt,
1444 			.min_keysize = DES_KEY_SIZE,
1445 			.max_keysize = DES_KEY_SIZE,
1446 			.ivsize = DES_BLOCK_SIZE,
1447 		},
1448 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1449 	},
1450 	{
1451 		.skcipher = {
1452 			.base = {
1453 				.cra_name = "ctr(aes)",
1454 				.cra_driver_name = "ctr-aes-caam-qi",
1455 				.cra_blocksize = 1,
1456 			},
1457 			.setkey = skcipher_setkey,
1458 			.encrypt = skcipher_encrypt,
1459 			.decrypt = skcipher_decrypt,
1460 			.min_keysize = AES_MIN_KEY_SIZE,
1461 			.max_keysize = AES_MAX_KEY_SIZE,
1462 			.ivsize = AES_BLOCK_SIZE,
1463 			.chunksize = AES_BLOCK_SIZE,
1464 		},
1465 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1466 					OP_ALG_AAI_CTR_MOD128,
1467 	},
1468 	{
1469 		.skcipher = {
1470 			.base = {
1471 				.cra_name = "rfc3686(ctr(aes))",
1472 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi",
1473 				.cra_blocksize = 1,
1474 			},
1475 			.setkey = skcipher_setkey,
1476 			.encrypt = skcipher_encrypt,
1477 			.decrypt = skcipher_decrypt,
1478 			.min_keysize = AES_MIN_KEY_SIZE +
1479 				       CTR_RFC3686_NONCE_SIZE,
1480 			.max_keysize = AES_MAX_KEY_SIZE +
1481 				       CTR_RFC3686_NONCE_SIZE,
1482 			.ivsize = CTR_RFC3686_IV_SIZE,
1483 			.chunksize = AES_BLOCK_SIZE,
1484 		},
1485 		.caam = {
1486 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1487 					   OP_ALG_AAI_CTR_MOD128,
1488 			.rfc3686 = true,
1489 		},
1490 	},
1491 	{
1492 		.skcipher = {
1493 			.base = {
1494 				.cra_name = "xts(aes)",
1495 				.cra_driver_name = "xts-aes-caam-qi",
1496 				.cra_blocksize = AES_BLOCK_SIZE,
1497 			},
1498 			.setkey = xts_skcipher_setkey,
1499 			.encrypt = skcipher_encrypt,
1500 			.decrypt = skcipher_decrypt,
1501 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1502 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1503 			.ivsize = AES_BLOCK_SIZE,
1504 		},
1505 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1506 	},
1507 };
1508 
1509 static struct caam_aead_alg driver_aeads[] = {
1510 	{
1511 		.aead = {
1512 			.base = {
1513 				.cra_name = "rfc4106(gcm(aes))",
1514 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi",
1515 				.cra_blocksize = 1,
1516 			},
1517 			.setkey = rfc4106_setkey,
1518 			.setauthsize = rfc4106_setauthsize,
1519 			.encrypt = ipsec_gcm_encrypt,
1520 			.decrypt = ipsec_gcm_decrypt,
1521 			.ivsize = 8,
1522 			.maxauthsize = AES_BLOCK_SIZE,
1523 		},
1524 		.caam = {
1525 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1526 		},
1527 	},
1528 	{
1529 		.aead = {
1530 			.base = {
1531 				.cra_name = "rfc4543(gcm(aes))",
1532 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi",
1533 				.cra_blocksize = 1,
1534 			},
1535 			.setkey = rfc4543_setkey,
1536 			.setauthsize = rfc4543_setauthsize,
1537 			.encrypt = ipsec_gcm_encrypt,
1538 			.decrypt = ipsec_gcm_decrypt,
1539 			.ivsize = 8,
1540 			.maxauthsize = AES_BLOCK_SIZE,
1541 		},
1542 		.caam = {
1543 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1544 		},
1545 	},
1546 	/* Galois Counter Mode */
1547 	{
1548 		.aead = {
1549 			.base = {
1550 				.cra_name = "gcm(aes)",
1551 				.cra_driver_name = "gcm-aes-caam-qi",
1552 				.cra_blocksize = 1,
1553 			},
1554 			.setkey = gcm_setkey,
1555 			.setauthsize = gcm_setauthsize,
1556 			.encrypt = aead_encrypt,
1557 			.decrypt = aead_decrypt,
1558 			.ivsize = 12,
1559 			.maxauthsize = AES_BLOCK_SIZE,
1560 		},
1561 		.caam = {
1562 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1563 		}
1564 	},
1565 	/* single-pass ipsec_esp descriptor */
1566 	{
1567 		.aead = {
1568 			.base = {
1569 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1570 				.cra_driver_name = "authenc-hmac-md5-"
1571 						   "cbc-aes-caam-qi",
1572 				.cra_blocksize = AES_BLOCK_SIZE,
1573 			},
1574 			.setkey = aead_setkey,
1575 			.setauthsize = aead_setauthsize,
1576 			.encrypt = aead_encrypt,
1577 			.decrypt = aead_decrypt,
1578 			.ivsize = AES_BLOCK_SIZE,
1579 			.maxauthsize = MD5_DIGEST_SIZE,
1580 		},
1581 		.caam = {
1582 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1583 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1584 					   OP_ALG_AAI_HMAC_PRECOMP,
1585 		}
1586 	},
1587 	{
1588 		.aead = {
1589 			.base = {
1590 				.cra_name = "echainiv(authenc(hmac(md5),"
1591 					    "cbc(aes)))",
1592 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1593 						   "cbc-aes-caam-qi",
1594 				.cra_blocksize = AES_BLOCK_SIZE,
1595 			},
1596 			.setkey = aead_setkey,
1597 			.setauthsize = aead_setauthsize,
1598 			.encrypt = aead_encrypt,
1599 			.decrypt = aead_decrypt,
1600 			.ivsize = AES_BLOCK_SIZE,
1601 			.maxauthsize = MD5_DIGEST_SIZE,
1602 		},
1603 		.caam = {
1604 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1605 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1606 					   OP_ALG_AAI_HMAC_PRECOMP,
1607 			.geniv = true,
1608 		}
1609 	},
1610 	{
1611 		.aead = {
1612 			.base = {
1613 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1614 				.cra_driver_name = "authenc-hmac-sha1-"
1615 						   "cbc-aes-caam-qi",
1616 				.cra_blocksize = AES_BLOCK_SIZE,
1617 			},
1618 			.setkey = aead_setkey,
1619 			.setauthsize = aead_setauthsize,
1620 			.encrypt = aead_encrypt,
1621 			.decrypt = aead_decrypt,
1622 			.ivsize = AES_BLOCK_SIZE,
1623 			.maxauthsize = SHA1_DIGEST_SIZE,
1624 		},
1625 		.caam = {
1626 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1627 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1628 					   OP_ALG_AAI_HMAC_PRECOMP,
1629 		}
1630 	},
1631 	{
1632 		.aead = {
1633 			.base = {
1634 				.cra_name = "echainiv(authenc(hmac(sha1),"
1635 					    "cbc(aes)))",
1636 				.cra_driver_name = "echainiv-authenc-"
1637 						   "hmac-sha1-cbc-aes-caam-qi",
1638 				.cra_blocksize = AES_BLOCK_SIZE,
1639 			},
1640 			.setkey = aead_setkey,
1641 			.setauthsize = aead_setauthsize,
1642 			.encrypt = aead_encrypt,
1643 			.decrypt = aead_decrypt,
1644 			.ivsize = AES_BLOCK_SIZE,
1645 			.maxauthsize = SHA1_DIGEST_SIZE,
1646 		},
1647 		.caam = {
1648 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1649 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1650 					   OP_ALG_AAI_HMAC_PRECOMP,
1651 			.geniv = true,
1652 		},
1653 	},
1654 	{
1655 		.aead = {
1656 			.base = {
1657 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1658 				.cra_driver_name = "authenc-hmac-sha224-"
1659 						   "cbc-aes-caam-qi",
1660 				.cra_blocksize = AES_BLOCK_SIZE,
1661 			},
1662 			.setkey = aead_setkey,
1663 			.setauthsize = aead_setauthsize,
1664 			.encrypt = aead_encrypt,
1665 			.decrypt = aead_decrypt,
1666 			.ivsize = AES_BLOCK_SIZE,
1667 			.maxauthsize = SHA224_DIGEST_SIZE,
1668 		},
1669 		.caam = {
1670 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1671 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1672 					   OP_ALG_AAI_HMAC_PRECOMP,
1673 		}
1674 	},
1675 	{
1676 		.aead = {
1677 			.base = {
1678 				.cra_name = "echainiv(authenc(hmac(sha224),"
1679 					    "cbc(aes)))",
1680 				.cra_driver_name = "echainiv-authenc-"
1681 						   "hmac-sha224-cbc-aes-caam-qi",
1682 				.cra_blocksize = AES_BLOCK_SIZE,
1683 			},
1684 			.setkey = aead_setkey,
1685 			.setauthsize = aead_setauthsize,
1686 			.encrypt = aead_encrypt,
1687 			.decrypt = aead_decrypt,
1688 			.ivsize = AES_BLOCK_SIZE,
1689 			.maxauthsize = SHA224_DIGEST_SIZE,
1690 		},
1691 		.caam = {
1692 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1693 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1694 					   OP_ALG_AAI_HMAC_PRECOMP,
1695 			.geniv = true,
1696 		}
1697 	},
1698 	{
1699 		.aead = {
1700 			.base = {
1701 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1702 				.cra_driver_name = "authenc-hmac-sha256-"
1703 						   "cbc-aes-caam-qi",
1704 				.cra_blocksize = AES_BLOCK_SIZE,
1705 			},
1706 			.setkey = aead_setkey,
1707 			.setauthsize = aead_setauthsize,
1708 			.encrypt = aead_encrypt,
1709 			.decrypt = aead_decrypt,
1710 			.ivsize = AES_BLOCK_SIZE,
1711 			.maxauthsize = SHA256_DIGEST_SIZE,
1712 		},
1713 		.caam = {
1714 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1715 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1716 					   OP_ALG_AAI_HMAC_PRECOMP,
1717 		}
1718 	},
1719 	{
1720 		.aead = {
1721 			.base = {
1722 				.cra_name = "echainiv(authenc(hmac(sha256),"
1723 					    "cbc(aes)))",
1724 				.cra_driver_name = "echainiv-authenc-"
1725 						   "hmac-sha256-cbc-aes-"
1726 						   "caam-qi",
1727 				.cra_blocksize = AES_BLOCK_SIZE,
1728 			},
1729 			.setkey = aead_setkey,
1730 			.setauthsize = aead_setauthsize,
1731 			.encrypt = aead_encrypt,
1732 			.decrypt = aead_decrypt,
1733 			.ivsize = AES_BLOCK_SIZE,
1734 			.maxauthsize = SHA256_DIGEST_SIZE,
1735 		},
1736 		.caam = {
1737 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1738 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1739 					   OP_ALG_AAI_HMAC_PRECOMP,
1740 			.geniv = true,
1741 		}
1742 	},
1743 	{
1744 		.aead = {
1745 			.base = {
1746 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
1747 				.cra_driver_name = "authenc-hmac-sha384-"
1748 						   "cbc-aes-caam-qi",
1749 				.cra_blocksize = AES_BLOCK_SIZE,
1750 			},
1751 			.setkey = aead_setkey,
1752 			.setauthsize = aead_setauthsize,
1753 			.encrypt = aead_encrypt,
1754 			.decrypt = aead_decrypt,
1755 			.ivsize = AES_BLOCK_SIZE,
1756 			.maxauthsize = SHA384_DIGEST_SIZE,
1757 		},
1758 		.caam = {
1759 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1760 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1761 					   OP_ALG_AAI_HMAC_PRECOMP,
1762 		}
1763 	},
1764 	{
1765 		.aead = {
1766 			.base = {
1767 				.cra_name = "echainiv(authenc(hmac(sha384),"
1768 					    "cbc(aes)))",
1769 				.cra_driver_name = "echainiv-authenc-"
1770 						   "hmac-sha384-cbc-aes-"
1771 						   "caam-qi",
1772 				.cra_blocksize = AES_BLOCK_SIZE,
1773 			},
1774 			.setkey = aead_setkey,
1775 			.setauthsize = aead_setauthsize,
1776 			.encrypt = aead_encrypt,
1777 			.decrypt = aead_decrypt,
1778 			.ivsize = AES_BLOCK_SIZE,
1779 			.maxauthsize = SHA384_DIGEST_SIZE,
1780 		},
1781 		.caam = {
1782 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1783 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1784 					   OP_ALG_AAI_HMAC_PRECOMP,
1785 			.geniv = true,
1786 		}
1787 	},
1788 	{
1789 		.aead = {
1790 			.base = {
1791 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
1792 				.cra_driver_name = "authenc-hmac-sha512-"
1793 						   "cbc-aes-caam-qi",
1794 				.cra_blocksize = AES_BLOCK_SIZE,
1795 			},
1796 			.setkey = aead_setkey,
1797 			.setauthsize = aead_setauthsize,
1798 			.encrypt = aead_encrypt,
1799 			.decrypt = aead_decrypt,
1800 			.ivsize = AES_BLOCK_SIZE,
1801 			.maxauthsize = SHA512_DIGEST_SIZE,
1802 		},
1803 		.caam = {
1804 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1805 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1806 					   OP_ALG_AAI_HMAC_PRECOMP,
1807 		}
1808 	},
1809 	{
1810 		.aead = {
1811 			.base = {
1812 				.cra_name = "echainiv(authenc(hmac(sha512),"
1813 					    "cbc(aes)))",
1814 				.cra_driver_name = "echainiv-authenc-"
1815 						   "hmac-sha512-cbc-aes-"
1816 						   "caam-qi",
1817 				.cra_blocksize = AES_BLOCK_SIZE,
1818 			},
1819 			.setkey = aead_setkey,
1820 			.setauthsize = aead_setauthsize,
1821 			.encrypt = aead_encrypt,
1822 			.decrypt = aead_decrypt,
1823 			.ivsize = AES_BLOCK_SIZE,
1824 			.maxauthsize = SHA512_DIGEST_SIZE,
1825 		},
1826 		.caam = {
1827 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1828 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1829 					   OP_ALG_AAI_HMAC_PRECOMP,
1830 			.geniv = true,
1831 		}
1832 	},
1833 	{
1834 		.aead = {
1835 			.base = {
1836 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1837 				.cra_driver_name = "authenc-hmac-md5-"
1838 						   "cbc-des3_ede-caam-qi",
1839 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1840 			},
1841 			.setkey = des3_aead_setkey,
1842 			.setauthsize = aead_setauthsize,
1843 			.encrypt = aead_encrypt,
1844 			.decrypt = aead_decrypt,
1845 			.ivsize = DES3_EDE_BLOCK_SIZE,
1846 			.maxauthsize = MD5_DIGEST_SIZE,
1847 		},
1848 		.caam = {
1849 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1850 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1851 					   OP_ALG_AAI_HMAC_PRECOMP,
1852 		}
1853 	},
1854 	{
1855 		.aead = {
1856 			.base = {
1857 				.cra_name = "echainiv(authenc(hmac(md5),"
1858 					    "cbc(des3_ede)))",
1859 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1860 						   "cbc-des3_ede-caam-qi",
1861 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1862 			},
1863 			.setkey = des3_aead_setkey,
1864 			.setauthsize = aead_setauthsize,
1865 			.encrypt = aead_encrypt,
1866 			.decrypt = aead_decrypt,
1867 			.ivsize = DES3_EDE_BLOCK_SIZE,
1868 			.maxauthsize = MD5_DIGEST_SIZE,
1869 		},
1870 		.caam = {
1871 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1872 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1873 					   OP_ALG_AAI_HMAC_PRECOMP,
1874 			.geniv = true,
1875 		}
1876 	},
1877 	{
1878 		.aead = {
1879 			.base = {
1880 				.cra_name = "authenc(hmac(sha1),"
1881 					    "cbc(des3_ede))",
1882 				.cra_driver_name = "authenc-hmac-sha1-"
1883 						   "cbc-des3_ede-caam-qi",
1884 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1885 			},
1886 			.setkey = des3_aead_setkey,
1887 			.setauthsize = aead_setauthsize,
1888 			.encrypt = aead_encrypt,
1889 			.decrypt = aead_decrypt,
1890 			.ivsize = DES3_EDE_BLOCK_SIZE,
1891 			.maxauthsize = SHA1_DIGEST_SIZE,
1892 		},
1893 		.caam = {
1894 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1895 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1896 					   OP_ALG_AAI_HMAC_PRECOMP,
1897 		},
1898 	},
1899 	{
1900 		.aead = {
1901 			.base = {
1902 				.cra_name = "echainiv(authenc(hmac(sha1),"
1903 					    "cbc(des3_ede)))",
1904 				.cra_driver_name = "echainiv-authenc-"
1905 						   "hmac-sha1-"
1906 						   "cbc-des3_ede-caam-qi",
1907 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1908 			},
1909 			.setkey = des3_aead_setkey,
1910 			.setauthsize = aead_setauthsize,
1911 			.encrypt = aead_encrypt,
1912 			.decrypt = aead_decrypt,
1913 			.ivsize = DES3_EDE_BLOCK_SIZE,
1914 			.maxauthsize = SHA1_DIGEST_SIZE,
1915 		},
1916 		.caam = {
1917 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1918 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1919 					   OP_ALG_AAI_HMAC_PRECOMP,
1920 			.geniv = true,
1921 		}
1922 	},
1923 	{
1924 		.aead = {
1925 			.base = {
1926 				.cra_name = "authenc(hmac(sha224),"
1927 					    "cbc(des3_ede))",
1928 				.cra_driver_name = "authenc-hmac-sha224-"
1929 						   "cbc-des3_ede-caam-qi",
1930 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1931 			},
1932 			.setkey = des3_aead_setkey,
1933 			.setauthsize = aead_setauthsize,
1934 			.encrypt = aead_encrypt,
1935 			.decrypt = aead_decrypt,
1936 			.ivsize = DES3_EDE_BLOCK_SIZE,
1937 			.maxauthsize = SHA224_DIGEST_SIZE,
1938 		},
1939 		.caam = {
1940 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1941 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1942 					   OP_ALG_AAI_HMAC_PRECOMP,
1943 		},
1944 	},
1945 	{
1946 		.aead = {
1947 			.base = {
1948 				.cra_name = "echainiv(authenc(hmac(sha224),"
1949 					    "cbc(des3_ede)))",
1950 				.cra_driver_name = "echainiv-authenc-"
1951 						   "hmac-sha224-"
1952 						   "cbc-des3_ede-caam-qi",
1953 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1954 			},
1955 			.setkey = des3_aead_setkey,
1956 			.setauthsize = aead_setauthsize,
1957 			.encrypt = aead_encrypt,
1958 			.decrypt = aead_decrypt,
1959 			.ivsize = DES3_EDE_BLOCK_SIZE,
1960 			.maxauthsize = SHA224_DIGEST_SIZE,
1961 		},
1962 		.caam = {
1963 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1964 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1965 					   OP_ALG_AAI_HMAC_PRECOMP,
1966 			.geniv = true,
1967 		}
1968 	},
1969 	{
1970 		.aead = {
1971 			.base = {
1972 				.cra_name = "authenc(hmac(sha256),"
1973 					    "cbc(des3_ede))",
1974 				.cra_driver_name = "authenc-hmac-sha256-"
1975 						   "cbc-des3_ede-caam-qi",
1976 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1977 			},
1978 			.setkey = des3_aead_setkey,
1979 			.setauthsize = aead_setauthsize,
1980 			.encrypt = aead_encrypt,
1981 			.decrypt = aead_decrypt,
1982 			.ivsize = DES3_EDE_BLOCK_SIZE,
1983 			.maxauthsize = SHA256_DIGEST_SIZE,
1984 		},
1985 		.caam = {
1986 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1987 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1988 					   OP_ALG_AAI_HMAC_PRECOMP,
1989 		},
1990 	},
1991 	{
1992 		.aead = {
1993 			.base = {
1994 				.cra_name = "echainiv(authenc(hmac(sha256),"
1995 					    "cbc(des3_ede)))",
1996 				.cra_driver_name = "echainiv-authenc-"
1997 						   "hmac-sha256-"
1998 						   "cbc-des3_ede-caam-qi",
1999 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2000 			},
2001 			.setkey = des3_aead_setkey,
2002 			.setauthsize = aead_setauthsize,
2003 			.encrypt = aead_encrypt,
2004 			.decrypt = aead_decrypt,
2005 			.ivsize = DES3_EDE_BLOCK_SIZE,
2006 			.maxauthsize = SHA256_DIGEST_SIZE,
2007 		},
2008 		.caam = {
2009 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2010 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2011 					   OP_ALG_AAI_HMAC_PRECOMP,
2012 			.geniv = true,
2013 		}
2014 	},
2015 	{
2016 		.aead = {
2017 			.base = {
2018 				.cra_name = "authenc(hmac(sha384),"
2019 					    "cbc(des3_ede))",
2020 				.cra_driver_name = "authenc-hmac-sha384-"
2021 						   "cbc-des3_ede-caam-qi",
2022 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2023 			},
2024 			.setkey = des3_aead_setkey,
2025 			.setauthsize = aead_setauthsize,
2026 			.encrypt = aead_encrypt,
2027 			.decrypt = aead_decrypt,
2028 			.ivsize = DES3_EDE_BLOCK_SIZE,
2029 			.maxauthsize = SHA384_DIGEST_SIZE,
2030 		},
2031 		.caam = {
2032 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2033 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2034 					   OP_ALG_AAI_HMAC_PRECOMP,
2035 		},
2036 	},
2037 	{
2038 		.aead = {
2039 			.base = {
2040 				.cra_name = "echainiv(authenc(hmac(sha384),"
2041 					    "cbc(des3_ede)))",
2042 				.cra_driver_name = "echainiv-authenc-"
2043 						   "hmac-sha384-"
2044 						   "cbc-des3_ede-caam-qi",
2045 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2046 			},
2047 			.setkey = des3_aead_setkey,
2048 			.setauthsize = aead_setauthsize,
2049 			.encrypt = aead_encrypt,
2050 			.decrypt = aead_decrypt,
2051 			.ivsize = DES3_EDE_BLOCK_SIZE,
2052 			.maxauthsize = SHA384_DIGEST_SIZE,
2053 		},
2054 		.caam = {
2055 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2056 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2057 					   OP_ALG_AAI_HMAC_PRECOMP,
2058 			.geniv = true,
2059 		}
2060 	},
2061 	{
2062 		.aead = {
2063 			.base = {
2064 				.cra_name = "authenc(hmac(sha512),"
2065 					    "cbc(des3_ede))",
2066 				.cra_driver_name = "authenc-hmac-sha512-"
2067 						   "cbc-des3_ede-caam-qi",
2068 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2069 			},
2070 			.setkey = des3_aead_setkey,
2071 			.setauthsize = aead_setauthsize,
2072 			.encrypt = aead_encrypt,
2073 			.decrypt = aead_decrypt,
2074 			.ivsize = DES3_EDE_BLOCK_SIZE,
2075 			.maxauthsize = SHA512_DIGEST_SIZE,
2076 		},
2077 		.caam = {
2078 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2079 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2080 					   OP_ALG_AAI_HMAC_PRECOMP,
2081 		},
2082 	},
2083 	{
2084 		.aead = {
2085 			.base = {
2086 				.cra_name = "echainiv(authenc(hmac(sha512),"
2087 					    "cbc(des3_ede)))",
2088 				.cra_driver_name = "echainiv-authenc-"
2089 						   "hmac-sha512-"
2090 						   "cbc-des3_ede-caam-qi",
2091 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2092 			},
2093 			.setkey = des3_aead_setkey,
2094 			.setauthsize = aead_setauthsize,
2095 			.encrypt = aead_encrypt,
2096 			.decrypt = aead_decrypt,
2097 			.ivsize = DES3_EDE_BLOCK_SIZE,
2098 			.maxauthsize = SHA512_DIGEST_SIZE,
2099 		},
2100 		.caam = {
2101 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2102 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2103 					   OP_ALG_AAI_HMAC_PRECOMP,
2104 			.geniv = true,
2105 		}
2106 	},
2107 	{
2108 		.aead = {
2109 			.base = {
2110 				.cra_name = "authenc(hmac(md5),cbc(des))",
2111 				.cra_driver_name = "authenc-hmac-md5-"
2112 						   "cbc-des-caam-qi",
2113 				.cra_blocksize = DES_BLOCK_SIZE,
2114 			},
2115 			.setkey = aead_setkey,
2116 			.setauthsize = aead_setauthsize,
2117 			.encrypt = aead_encrypt,
2118 			.decrypt = aead_decrypt,
2119 			.ivsize = DES_BLOCK_SIZE,
2120 			.maxauthsize = MD5_DIGEST_SIZE,
2121 		},
2122 		.caam = {
2123 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2124 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2125 					   OP_ALG_AAI_HMAC_PRECOMP,
2126 		},
2127 	},
2128 	{
2129 		.aead = {
2130 			.base = {
2131 				.cra_name = "echainiv(authenc(hmac(md5),"
2132 					    "cbc(des)))",
2133 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2134 						   "cbc-des-caam-qi",
2135 				.cra_blocksize = DES_BLOCK_SIZE,
2136 			},
2137 			.setkey = aead_setkey,
2138 			.setauthsize = aead_setauthsize,
2139 			.encrypt = aead_encrypt,
2140 			.decrypt = aead_decrypt,
2141 			.ivsize = DES_BLOCK_SIZE,
2142 			.maxauthsize = MD5_DIGEST_SIZE,
2143 		},
2144 		.caam = {
2145 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2146 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2147 					   OP_ALG_AAI_HMAC_PRECOMP,
2148 			.geniv = true,
2149 		}
2150 	},
2151 	{
2152 		.aead = {
2153 			.base = {
2154 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2155 				.cra_driver_name = "authenc-hmac-sha1-"
2156 						   "cbc-des-caam-qi",
2157 				.cra_blocksize = DES_BLOCK_SIZE,
2158 			},
2159 			.setkey = aead_setkey,
2160 			.setauthsize = aead_setauthsize,
2161 			.encrypt = aead_encrypt,
2162 			.decrypt = aead_decrypt,
2163 			.ivsize = DES_BLOCK_SIZE,
2164 			.maxauthsize = SHA1_DIGEST_SIZE,
2165 		},
2166 		.caam = {
2167 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2168 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2169 					   OP_ALG_AAI_HMAC_PRECOMP,
2170 		},
2171 	},
2172 	{
2173 		.aead = {
2174 			.base = {
2175 				.cra_name = "echainiv(authenc(hmac(sha1),"
2176 					    "cbc(des)))",
2177 				.cra_driver_name = "echainiv-authenc-"
2178 						   "hmac-sha1-cbc-des-caam-qi",
2179 				.cra_blocksize = DES_BLOCK_SIZE,
2180 			},
2181 			.setkey = aead_setkey,
2182 			.setauthsize = aead_setauthsize,
2183 			.encrypt = aead_encrypt,
2184 			.decrypt = aead_decrypt,
2185 			.ivsize = DES_BLOCK_SIZE,
2186 			.maxauthsize = SHA1_DIGEST_SIZE,
2187 		},
2188 		.caam = {
2189 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2190 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2191 					   OP_ALG_AAI_HMAC_PRECOMP,
2192 			.geniv = true,
2193 		}
2194 	},
2195 	{
2196 		.aead = {
2197 			.base = {
2198 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2199 				.cra_driver_name = "authenc-hmac-sha224-"
2200 						   "cbc-des-caam-qi",
2201 				.cra_blocksize = DES_BLOCK_SIZE,
2202 			},
2203 			.setkey = aead_setkey,
2204 			.setauthsize = aead_setauthsize,
2205 			.encrypt = aead_encrypt,
2206 			.decrypt = aead_decrypt,
2207 			.ivsize = DES_BLOCK_SIZE,
2208 			.maxauthsize = SHA224_DIGEST_SIZE,
2209 		},
2210 		.caam = {
2211 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2212 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2213 					   OP_ALG_AAI_HMAC_PRECOMP,
2214 		},
2215 	},
2216 	{
2217 		.aead = {
2218 			.base = {
2219 				.cra_name = "echainiv(authenc(hmac(sha224),"
2220 					    "cbc(des)))",
2221 				.cra_driver_name = "echainiv-authenc-"
2222 						   "hmac-sha224-cbc-des-"
2223 						   "caam-qi",
2224 				.cra_blocksize = DES_BLOCK_SIZE,
2225 			},
2226 			.setkey = aead_setkey,
2227 			.setauthsize = aead_setauthsize,
2228 			.encrypt = aead_encrypt,
2229 			.decrypt = aead_decrypt,
2230 			.ivsize = DES_BLOCK_SIZE,
2231 			.maxauthsize = SHA224_DIGEST_SIZE,
2232 		},
2233 		.caam = {
2234 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2235 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2236 					   OP_ALG_AAI_HMAC_PRECOMP,
2237 			.geniv = true,
2238 		}
2239 	},
2240 	{
2241 		.aead = {
2242 			.base = {
2243 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2244 				.cra_driver_name = "authenc-hmac-sha256-"
2245 						   "cbc-des-caam-qi",
2246 				.cra_blocksize = DES_BLOCK_SIZE,
2247 			},
2248 			.setkey = aead_setkey,
2249 			.setauthsize = aead_setauthsize,
2250 			.encrypt = aead_encrypt,
2251 			.decrypt = aead_decrypt,
2252 			.ivsize = DES_BLOCK_SIZE,
2253 			.maxauthsize = SHA256_DIGEST_SIZE,
2254 		},
2255 		.caam = {
2256 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2257 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2258 					   OP_ALG_AAI_HMAC_PRECOMP,
2259 		},
2260 	},
2261 	{
2262 		.aead = {
2263 			.base = {
2264 				.cra_name = "echainiv(authenc(hmac(sha256),"
2265 					    "cbc(des)))",
2266 				.cra_driver_name = "echainiv-authenc-"
2267 						   "hmac-sha256-cbc-des-"
2268 						   "caam-qi",
2269 				.cra_blocksize = DES_BLOCK_SIZE,
2270 			},
2271 			.setkey = aead_setkey,
2272 			.setauthsize = aead_setauthsize,
2273 			.encrypt = aead_encrypt,
2274 			.decrypt = aead_decrypt,
2275 			.ivsize = DES_BLOCK_SIZE,
2276 			.maxauthsize = SHA256_DIGEST_SIZE,
2277 		},
2278 		.caam = {
2279 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2280 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2281 					   OP_ALG_AAI_HMAC_PRECOMP,
2282 			.geniv = true,
2283 		},
2284 	},
2285 	{
2286 		.aead = {
2287 			.base = {
2288 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2289 				.cra_driver_name = "authenc-hmac-sha384-"
2290 						   "cbc-des-caam-qi",
2291 				.cra_blocksize = DES_BLOCK_SIZE,
2292 			},
2293 			.setkey = aead_setkey,
2294 			.setauthsize = aead_setauthsize,
2295 			.encrypt = aead_encrypt,
2296 			.decrypt = aead_decrypt,
2297 			.ivsize = DES_BLOCK_SIZE,
2298 			.maxauthsize = SHA384_DIGEST_SIZE,
2299 		},
2300 		.caam = {
2301 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2302 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2303 					   OP_ALG_AAI_HMAC_PRECOMP,
2304 		},
2305 	},
2306 	{
2307 		.aead = {
2308 			.base = {
2309 				.cra_name = "echainiv(authenc(hmac(sha384),"
2310 					    "cbc(des)))",
2311 				.cra_driver_name = "echainiv-authenc-"
2312 						   "hmac-sha384-cbc-des-"
2313 						   "caam-qi",
2314 				.cra_blocksize = DES_BLOCK_SIZE,
2315 			},
2316 			.setkey = aead_setkey,
2317 			.setauthsize = aead_setauthsize,
2318 			.encrypt = aead_encrypt,
2319 			.decrypt = aead_decrypt,
2320 			.ivsize = DES_BLOCK_SIZE,
2321 			.maxauthsize = SHA384_DIGEST_SIZE,
2322 		},
2323 		.caam = {
2324 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2325 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2326 					   OP_ALG_AAI_HMAC_PRECOMP,
2327 			.geniv = true,
2328 		}
2329 	},
2330 	{
2331 		.aead = {
2332 			.base = {
2333 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2334 				.cra_driver_name = "authenc-hmac-sha512-"
2335 						   "cbc-des-caam-qi",
2336 				.cra_blocksize = DES_BLOCK_SIZE,
2337 			},
2338 			.setkey = aead_setkey,
2339 			.setauthsize = aead_setauthsize,
2340 			.encrypt = aead_encrypt,
2341 			.decrypt = aead_decrypt,
2342 			.ivsize = DES_BLOCK_SIZE,
2343 			.maxauthsize = SHA512_DIGEST_SIZE,
2344 		},
2345 		.caam = {
2346 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2347 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2348 					   OP_ALG_AAI_HMAC_PRECOMP,
2349 		}
2350 	},
2351 	{
2352 		.aead = {
2353 			.base = {
2354 				.cra_name = "echainiv(authenc(hmac(sha512),"
2355 					    "cbc(des)))",
2356 				.cra_driver_name = "echainiv-authenc-"
2357 						   "hmac-sha512-cbc-des-"
2358 						   "caam-qi",
2359 				.cra_blocksize = DES_BLOCK_SIZE,
2360 			},
2361 			.setkey = aead_setkey,
2362 			.setauthsize = aead_setauthsize,
2363 			.encrypt = aead_encrypt,
2364 			.decrypt = aead_decrypt,
2365 			.ivsize = DES_BLOCK_SIZE,
2366 			.maxauthsize = SHA512_DIGEST_SIZE,
2367 		},
2368 		.caam = {
2369 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2370 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2371 					   OP_ALG_AAI_HMAC_PRECOMP,
2372 			.geniv = true,
2373 		}
2374 	},
2375 };
2376 
2377 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2378 			    bool uses_dkp)
2379 {
2380 	struct caam_drv_private *priv;
2381 
2382 	/*
2383 	 * distribute tfms across job rings to ensure in-order
2384 	 * crypto request processing per tfm
2385 	 */
2386 	ctx->jrdev = caam_jr_alloc();
2387 	if (IS_ERR(ctx->jrdev)) {
2388 		pr_err("Job Ring Device allocation for transform failed\n");
2389 		return PTR_ERR(ctx->jrdev);
2390 	}
2391 
2392 	priv = dev_get_drvdata(ctx->jrdev->parent);
2393 	if (priv->era >= 6 && uses_dkp)
2394 		ctx->dir = DMA_BIDIRECTIONAL;
2395 	else
2396 		ctx->dir = DMA_TO_DEVICE;
2397 
2398 	ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2399 				      ctx->dir);
2400 	if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2401 		dev_err(ctx->jrdev, "unable to map key\n");
2402 		caam_jr_free(ctx->jrdev);
2403 		return -ENOMEM;
2404 	}
2405 
2406 	/* copy descriptor header template value */
2407 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2408 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2409 
2410 	ctx->qidev = priv->qidev;
2411 
2412 	spin_lock_init(&ctx->lock);
2413 	ctx->drv_ctx[ENCRYPT] = NULL;
2414 	ctx->drv_ctx[DECRYPT] = NULL;
2415 
2416 	return 0;
2417 }
2418 
2419 static int caam_cra_init(struct crypto_skcipher *tfm)
2420 {
2421 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
2422 	struct caam_skcipher_alg *caam_alg =
2423 		container_of(alg, typeof(*caam_alg), skcipher);
2424 
2425 	return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
2426 				false);
2427 }
2428 
2429 static int caam_aead_init(struct crypto_aead *tfm)
2430 {
2431 	struct aead_alg *alg = crypto_aead_alg(tfm);
2432 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2433 						      aead);
2434 	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2435 
2436 	return caam_init_common(ctx, &caam_alg->caam,
2437 				alg->setkey == aead_setkey);
2438 }
2439 
2440 static void caam_exit_common(struct caam_ctx *ctx)
2441 {
2442 	caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2443 	caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2444 
2445 	dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
2446 
2447 	caam_jr_free(ctx->jrdev);
2448 }
2449 
2450 static void caam_cra_exit(struct crypto_skcipher *tfm)
2451 {
2452 	caam_exit_common(crypto_skcipher_ctx(tfm));
2453 }
2454 
2455 static void caam_aead_exit(struct crypto_aead *tfm)
2456 {
2457 	caam_exit_common(crypto_aead_ctx(tfm));
2458 }
2459 
2460 static void __exit caam_qi_algapi_exit(void)
2461 {
2462 	int i;
2463 
2464 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2465 		struct caam_aead_alg *t_alg = driver_aeads + i;
2466 
2467 		if (t_alg->registered)
2468 			crypto_unregister_aead(&t_alg->aead);
2469 	}
2470 
2471 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2472 		struct caam_skcipher_alg *t_alg = driver_algs + i;
2473 
2474 		if (t_alg->registered)
2475 			crypto_unregister_skcipher(&t_alg->skcipher);
2476 	}
2477 }
2478 
2479 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2480 {
2481 	struct skcipher_alg *alg = &t_alg->skcipher;
2482 
2483 	alg->base.cra_module = THIS_MODULE;
2484 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2485 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2486 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2487 
2488 	alg->init = caam_cra_init;
2489 	alg->exit = caam_cra_exit;
2490 }
2491 
2492 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2493 {
2494 	struct aead_alg *alg = &t_alg->aead;
2495 
2496 	alg->base.cra_module = THIS_MODULE;
2497 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2498 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2499 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2500 
2501 	alg->init = caam_aead_init;
2502 	alg->exit = caam_aead_exit;
2503 }
2504 
2505 static int __init caam_qi_algapi_init(void)
2506 {
2507 	struct device_node *dev_node;
2508 	struct platform_device *pdev;
2509 	struct device *ctrldev;
2510 	struct caam_drv_private *priv;
2511 	int i = 0, err = 0;
2512 	u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
2513 	unsigned int md_limit = SHA512_DIGEST_SIZE;
2514 	bool registered = false;
2515 
2516 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2517 	if (!dev_node) {
2518 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2519 		if (!dev_node)
2520 			return -ENODEV;
2521 	}
2522 
2523 	pdev = of_find_device_by_node(dev_node);
2524 	of_node_put(dev_node);
2525 	if (!pdev)
2526 		return -ENODEV;
2527 
2528 	ctrldev = &pdev->dev;
2529 	priv = dev_get_drvdata(ctrldev);
2530 
2531 	/*
2532 	 * If priv is NULL, it's probably because the caam driver wasn't
2533 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2534 	 */
2535 	if (!priv || !priv->qi_present) {
2536 		err = -ENODEV;
2537 		goto out_put_dev;
2538 	}
2539 
2540 	if (caam_dpaa2) {
2541 		dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2542 		err = -ENODEV;
2543 		goto out_put_dev;
2544 	}
2545 
2546 	/*
2547 	 * Register crypto algorithms the device supports.
2548 	 * First, detect presence and attributes of DES, AES, and MD blocks.
2549 	 */
2550 	if (priv->era < 10) {
2551 		u32 cha_vid, cha_inst;
2552 
2553 		cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2554 		aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
2555 		md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2556 
2557 		cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2558 		des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
2559 			   CHA_ID_LS_DES_SHIFT;
2560 		aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
2561 		md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2562 	} else {
2563 		u32 aesa, mdha;
2564 
2565 		aesa = rd_reg32(&priv->ctrl->vreg.aesa);
2566 		mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2567 
2568 		aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2569 		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2570 
2571 		des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
2572 		aes_inst = aesa & CHA_VER_NUM_MASK;
2573 		md_inst = mdha & CHA_VER_NUM_MASK;
2574 	}
2575 
2576 	/* If MD is present, limit digest size based on LP256 */
2577 	if (md_inst && md_vid  == CHA_VER_VID_MD_LP256)
2578 		md_limit = SHA256_DIGEST_SIZE;
2579 
2580 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2581 		struct caam_skcipher_alg *t_alg = driver_algs + i;
2582 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
2583 
2584 		/* Skip DES algorithms if not supported by device */
2585 		if (!des_inst &&
2586 		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2587 		     (alg_sel == OP_ALG_ALGSEL_DES)))
2588 			continue;
2589 
2590 		/* Skip AES algorithms if not supported by device */
2591 		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2592 			continue;
2593 
2594 		caam_skcipher_alg_init(t_alg);
2595 
2596 		err = crypto_register_skcipher(&t_alg->skcipher);
2597 		if (err) {
2598 			dev_warn(priv->qidev, "%s alg registration failed\n",
2599 				 t_alg->skcipher.base.cra_driver_name);
2600 			continue;
2601 		}
2602 
2603 		t_alg->registered = true;
2604 		registered = true;
2605 	}
2606 
2607 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2608 		struct caam_aead_alg *t_alg = driver_aeads + i;
2609 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2610 				 OP_ALG_ALGSEL_MASK;
2611 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2612 				 OP_ALG_ALGSEL_MASK;
2613 		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2614 
2615 		/* Skip DES algorithms if not supported by device */
2616 		if (!des_inst &&
2617 		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2618 		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2619 			continue;
2620 
2621 		/* Skip AES algorithms if not supported by device */
2622 		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2623 			continue;
2624 
2625 		/*
2626 		 * Check support for AES algorithms not available
2627 		 * on LP devices.
2628 		 */
2629 		if (aes_vid  == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
2630 			continue;
2631 
2632 		/*
2633 		 * Skip algorithms requiring message digests
2634 		 * if MD or MD size is not supported by device.
2635 		 */
2636 		if (c2_alg_sel &&
2637 		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2638 			continue;
2639 
2640 		caam_aead_alg_init(t_alg);
2641 
2642 		err = crypto_register_aead(&t_alg->aead);
2643 		if (err) {
2644 			pr_warn("%s alg registration failed\n",
2645 				t_alg->aead.base.cra_driver_name);
2646 			continue;
2647 		}
2648 
2649 		t_alg->registered = true;
2650 		registered = true;
2651 	}
2652 
2653 	if (registered)
2654 		dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2655 
2656 out_put_dev:
2657 	put_device(ctrldev);
2658 	return err;
2659 }
2660 
2661 module_init(caam_qi_algapi_init);
2662 module_exit(caam_qi_algapi_exit);
2663 
2664 MODULE_LICENSE("GPL");
2665 MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2666 MODULE_AUTHOR("Freescale Semiconductor");
2667