1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2018 NXP
5  */
6 
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include <linux/fsl/mc.h>
19 #include <soc/fsl/dpaa2-io.h>
20 #include <soc/fsl/dpaa2-fd.h>
21 
22 #define CAAM_CRA_PRIORITY	2000
23 
24 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
25 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
26 				 SHA512_DIGEST_SIZE * 2)
27 
28 /*
29  * This is a a cache of buffers, from which the users of CAAM QI driver
30  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
31  * NOTE: A more elegant solution would be to have some headroom in the frames
32  *       being processed. This can be added by the dpaa2-eth driver. This would
33  *       pose a problem for userspace application processing which cannot
34  *       know of this limitation. So for now, this will work.
35  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
36  */
37 static struct kmem_cache *qi_cache;
38 
39 struct caam_alg_entry {
40 	struct device *dev;
41 	int class1_alg_type;
42 	int class2_alg_type;
43 	bool rfc3686;
44 	bool geniv;
45 };
46 
47 struct caam_aead_alg {
48 	struct aead_alg aead;
49 	struct caam_alg_entry caam;
50 	bool registered;
51 };
52 
53 struct caam_skcipher_alg {
54 	struct skcipher_alg skcipher;
55 	struct caam_alg_entry caam;
56 	bool registered;
57 };
58 
59 /**
60  * caam_ctx - per-session context
61  * @flc: Flow Contexts array
62  * @key:  [authentication key], encryption key
63  * @flc_dma: I/O virtual addresses of the Flow Contexts
64  * @key_dma: I/O virtual address of the key
65  * @dir: DMA direction for mapping key and Flow Contexts
66  * @dev: dpseci device
67  * @adata: authentication algorithm details
68  * @cdata: encryption algorithm details
69  * @authsize: authentication tag (a.k.a. ICV / MAC) size
70  */
71 struct caam_ctx {
72 	struct caam_flc flc[NUM_OP];
73 	u8 key[CAAM_MAX_KEY_SIZE];
74 	dma_addr_t flc_dma[NUM_OP];
75 	dma_addr_t key_dma;
76 	enum dma_data_direction dir;
77 	struct device *dev;
78 	struct alginfo adata;
79 	struct alginfo cdata;
80 	unsigned int authsize;
81 };
82 
83 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
84 				     dma_addr_t iova_addr)
85 {
86 	phys_addr_t phys_addr;
87 
88 	phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
89 				   iova_addr;
90 
91 	return phys_to_virt(phys_addr);
92 }
93 
94 /*
95  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
96  *
97  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
98  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
99  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
100  * hosting 16 SG entries.
101  *
102  * @flags - flags that would be used for the equivalent kmalloc(..) call
103  *
104  * Returns a pointer to a retrieved buffer on success or NULL on failure.
105  */
106 static inline void *qi_cache_zalloc(gfp_t flags)
107 {
108 	return kmem_cache_zalloc(qi_cache, flags);
109 }
110 
111 /*
112  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
113  *
114  * @obj - buffer previously allocated by qi_cache_zalloc
115  *
116  * No checking is being done, the call is a passthrough call to
117  * kmem_cache_free(...)
118  */
119 static inline void qi_cache_free(void *obj)
120 {
121 	kmem_cache_free(qi_cache, obj);
122 }
123 
124 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
125 {
126 	switch (crypto_tfm_alg_type(areq->tfm)) {
127 	case CRYPTO_ALG_TYPE_SKCIPHER:
128 		return skcipher_request_ctx(skcipher_request_cast(areq));
129 	case CRYPTO_ALG_TYPE_AEAD:
130 		return aead_request_ctx(container_of(areq, struct aead_request,
131 						     base));
132 	case CRYPTO_ALG_TYPE_AHASH:
133 		return ahash_request_ctx(ahash_request_cast(areq));
134 	default:
135 		return ERR_PTR(-EINVAL);
136 	}
137 }
138 
139 static void caam_unmap(struct device *dev, struct scatterlist *src,
140 		       struct scatterlist *dst, int src_nents,
141 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
142 		       dma_addr_t qm_sg_dma, int qm_sg_bytes)
143 {
144 	if (dst != src) {
145 		if (src_nents)
146 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
147 		if (dst_nents)
148 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
149 	} else {
150 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
151 	}
152 
153 	if (iv_dma)
154 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
155 
156 	if (qm_sg_bytes)
157 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
158 }
159 
160 static int aead_set_sh_desc(struct crypto_aead *aead)
161 {
162 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
163 						 typeof(*alg), aead);
164 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
165 	unsigned int ivsize = crypto_aead_ivsize(aead);
166 	struct device *dev = ctx->dev;
167 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
168 	struct caam_flc *flc;
169 	u32 *desc;
170 	u32 ctx1_iv_off = 0;
171 	u32 *nonce = NULL;
172 	unsigned int data_len[2];
173 	u32 inl_mask;
174 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
175 			       OP_ALG_AAI_CTR_MOD128);
176 	const bool is_rfc3686 = alg->caam.rfc3686;
177 
178 	if (!ctx->cdata.keylen || !ctx->authsize)
179 		return 0;
180 
181 	/*
182 	 * AES-CTR needs to load IV in CONTEXT1 reg
183 	 * at an offset of 128bits (16bytes)
184 	 * CONTEXT1[255:128] = IV
185 	 */
186 	if (ctr_mode)
187 		ctx1_iv_off = 16;
188 
189 	/*
190 	 * RFC3686 specific:
191 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
192 	 */
193 	if (is_rfc3686) {
194 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
195 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
196 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
197 	}
198 
199 	data_len[0] = ctx->adata.keylen_pad;
200 	data_len[1] = ctx->cdata.keylen;
201 
202 	/* aead_encrypt shared descriptor */
203 	if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
204 						 DESC_QI_AEAD_ENC_LEN) +
205 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
206 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
207 			      ARRAY_SIZE(data_len)) < 0)
208 		return -EINVAL;
209 
210 	if (inl_mask & 1)
211 		ctx->adata.key_virt = ctx->key;
212 	else
213 		ctx->adata.key_dma = ctx->key_dma;
214 
215 	if (inl_mask & 2)
216 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
217 	else
218 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
219 
220 	ctx->adata.key_inline = !!(inl_mask & 1);
221 	ctx->cdata.key_inline = !!(inl_mask & 2);
222 
223 	flc = &ctx->flc[ENCRYPT];
224 	desc = flc->sh_desc;
225 
226 	if (alg->caam.geniv)
227 		cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
228 					  ivsize, ctx->authsize, is_rfc3686,
229 					  nonce, ctx1_iv_off, true,
230 					  priv->sec_attr.era);
231 	else
232 		cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
233 				       ivsize, ctx->authsize, is_rfc3686, nonce,
234 				       ctx1_iv_off, true, priv->sec_attr.era);
235 
236 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
237 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
238 				   sizeof(flc->flc) + desc_bytes(desc),
239 				   ctx->dir);
240 
241 	/* aead_decrypt shared descriptor */
242 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
243 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
244 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
245 			      ARRAY_SIZE(data_len)) < 0)
246 		return -EINVAL;
247 
248 	if (inl_mask & 1)
249 		ctx->adata.key_virt = ctx->key;
250 	else
251 		ctx->adata.key_dma = ctx->key_dma;
252 
253 	if (inl_mask & 2)
254 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
255 	else
256 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
257 
258 	ctx->adata.key_inline = !!(inl_mask & 1);
259 	ctx->cdata.key_inline = !!(inl_mask & 2);
260 
261 	flc = &ctx->flc[DECRYPT];
262 	desc = flc->sh_desc;
263 	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
264 			       ivsize, ctx->authsize, alg->caam.geniv,
265 			       is_rfc3686, nonce, ctx1_iv_off, true,
266 			       priv->sec_attr.era);
267 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
268 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
269 				   sizeof(flc->flc) + desc_bytes(desc),
270 				   ctx->dir);
271 
272 	return 0;
273 }
274 
275 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
276 {
277 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
278 
279 	ctx->authsize = authsize;
280 	aead_set_sh_desc(authenc);
281 
282 	return 0;
283 }
284 
285 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
286 		       unsigned int keylen)
287 {
288 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
289 	struct device *dev = ctx->dev;
290 	struct crypto_authenc_keys keys;
291 
292 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
293 		goto badkey;
294 
295 	dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
296 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
297 		keys.authkeylen);
298 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
299 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
300 
301 	ctx->adata.keylen = keys.authkeylen;
302 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
303 					      OP_ALG_ALGSEL_MASK);
304 
305 	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
306 		goto badkey;
307 
308 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
309 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
310 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
311 				   keys.enckeylen, ctx->dir);
312 	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
313 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
314 			     ctx->adata.keylen_pad + keys.enckeylen, 1);
315 
316 	ctx->cdata.keylen = keys.enckeylen;
317 
318 	memzero_explicit(&keys, sizeof(keys));
319 	return aead_set_sh_desc(aead);
320 badkey:
321 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
322 	memzero_explicit(&keys, sizeof(keys));
323 	return -EINVAL;
324 }
325 
326 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
327 			    unsigned int keylen)
328 {
329 	struct crypto_authenc_keys keys;
330 	u32 flags;
331 	int err;
332 
333 	err = crypto_authenc_extractkeys(&keys, key, keylen);
334 	if (unlikely(err))
335 		goto badkey;
336 
337 	err = -EINVAL;
338 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
339 		goto badkey;
340 
341 	flags = crypto_aead_get_flags(aead);
342 	err = __des3_verify_key(&flags, keys.enckey);
343 	if (unlikely(err)) {
344 		crypto_aead_set_flags(aead, flags);
345 		goto out;
346 	}
347 
348 	err = aead_setkey(aead, key, keylen);
349 
350 out:
351 	memzero_explicit(&keys, sizeof(keys));
352 	return err;
353 
354 badkey:
355 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
356 	goto out;
357 }
358 
359 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
360 					   bool encrypt)
361 {
362 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
363 	struct caam_request *req_ctx = aead_request_ctx(req);
364 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
365 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
366 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
367 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
368 						 typeof(*alg), aead);
369 	struct device *dev = ctx->dev;
370 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
371 		      GFP_KERNEL : GFP_ATOMIC;
372 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
373 	struct aead_edesc *edesc;
374 	dma_addr_t qm_sg_dma, iv_dma = 0;
375 	int ivsize = 0;
376 	unsigned int authsize = ctx->authsize;
377 	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
378 	int in_len, out_len;
379 	struct dpaa2_sg_entry *sg_table;
380 
381 	/* allocate space for base edesc, link tables and IV */
382 	edesc = qi_cache_zalloc(GFP_DMA | flags);
383 	if (unlikely(!edesc)) {
384 		dev_err(dev, "could not allocate extended descriptor\n");
385 		return ERR_PTR(-ENOMEM);
386 	}
387 
388 	if (unlikely(req->dst != req->src)) {
389 		src_nents = sg_nents_for_len(req->src, req->assoclen +
390 					     req->cryptlen);
391 		if (unlikely(src_nents < 0)) {
392 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
393 				req->assoclen + req->cryptlen);
394 			qi_cache_free(edesc);
395 			return ERR_PTR(src_nents);
396 		}
397 
398 		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
399 					     req->cryptlen +
400 					     (encrypt ? authsize :
401 							(-authsize)));
402 		if (unlikely(dst_nents < 0)) {
403 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
404 				req->assoclen + req->cryptlen +
405 				(encrypt ? authsize : (-authsize)));
406 			qi_cache_free(edesc);
407 			return ERR_PTR(dst_nents);
408 		}
409 
410 		if (src_nents) {
411 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
412 						      DMA_TO_DEVICE);
413 			if (unlikely(!mapped_src_nents)) {
414 				dev_err(dev, "unable to map source\n");
415 				qi_cache_free(edesc);
416 				return ERR_PTR(-ENOMEM);
417 			}
418 		} else {
419 			mapped_src_nents = 0;
420 		}
421 
422 		if (dst_nents) {
423 			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
424 						      DMA_FROM_DEVICE);
425 			if (unlikely(!mapped_dst_nents)) {
426 				dev_err(dev, "unable to map destination\n");
427 				dma_unmap_sg(dev, req->src, src_nents,
428 					     DMA_TO_DEVICE);
429 				qi_cache_free(edesc);
430 				return ERR_PTR(-ENOMEM);
431 			}
432 		} else {
433 			mapped_dst_nents = 0;
434 		}
435 	} else {
436 		src_nents = sg_nents_for_len(req->src, req->assoclen +
437 					     req->cryptlen +
438 						(encrypt ? authsize : 0));
439 		if (unlikely(src_nents < 0)) {
440 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
441 				req->assoclen + req->cryptlen +
442 				(encrypt ? authsize : 0));
443 			qi_cache_free(edesc);
444 			return ERR_PTR(src_nents);
445 		}
446 
447 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
448 					      DMA_BIDIRECTIONAL);
449 		if (unlikely(!mapped_src_nents)) {
450 			dev_err(dev, "unable to map source\n");
451 			qi_cache_free(edesc);
452 			return ERR_PTR(-ENOMEM);
453 		}
454 	}
455 
456 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
457 		ivsize = crypto_aead_ivsize(aead);
458 
459 	/*
460 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
461 	 * Input is not contiguous.
462 	 */
463 	qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
464 		      (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
465 	sg_table = &edesc->sgt[0];
466 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
467 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
468 		     CAAM_QI_MEMCACHE_SIZE)) {
469 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
470 			qm_sg_nents, ivsize);
471 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
472 			   0, 0, 0);
473 		qi_cache_free(edesc);
474 		return ERR_PTR(-ENOMEM);
475 	}
476 
477 	if (ivsize) {
478 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
479 
480 		/* Make sure IV is located in a DMAable area */
481 		memcpy(iv, req->iv, ivsize);
482 
483 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
484 		if (dma_mapping_error(dev, iv_dma)) {
485 			dev_err(dev, "unable to map IV\n");
486 			caam_unmap(dev, req->src, req->dst, src_nents,
487 				   dst_nents, 0, 0, 0, 0);
488 			qi_cache_free(edesc);
489 			return ERR_PTR(-ENOMEM);
490 		}
491 	}
492 
493 	edesc->src_nents = src_nents;
494 	edesc->dst_nents = dst_nents;
495 	edesc->iv_dma = iv_dma;
496 
497 	if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
498 	    OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
499 		/*
500 		 * The associated data comes already with the IV but we need
501 		 * to skip it when we authenticate or encrypt...
502 		 */
503 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
504 	else
505 		edesc->assoclen = cpu_to_caam32(req->assoclen);
506 	edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
507 					     DMA_TO_DEVICE);
508 	if (dma_mapping_error(dev, edesc->assoclen_dma)) {
509 		dev_err(dev, "unable to map assoclen\n");
510 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
511 			   iv_dma, ivsize, 0, 0);
512 		qi_cache_free(edesc);
513 		return ERR_PTR(-ENOMEM);
514 	}
515 
516 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
517 	qm_sg_index++;
518 	if (ivsize) {
519 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
520 		qm_sg_index++;
521 	}
522 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
523 	qm_sg_index += mapped_src_nents;
524 
525 	if (mapped_dst_nents > 1)
526 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
527 				 qm_sg_index, 0);
528 
529 	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
530 	if (dma_mapping_error(dev, qm_sg_dma)) {
531 		dev_err(dev, "unable to map S/G table\n");
532 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
533 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
534 			   iv_dma, ivsize, 0, 0);
535 		qi_cache_free(edesc);
536 		return ERR_PTR(-ENOMEM);
537 	}
538 
539 	edesc->qm_sg_dma = qm_sg_dma;
540 	edesc->qm_sg_bytes = qm_sg_bytes;
541 
542 	out_len = req->assoclen + req->cryptlen +
543 		  (encrypt ? ctx->authsize : (-ctx->authsize));
544 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
545 
546 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
547 	dpaa2_fl_set_final(in_fle, true);
548 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
549 	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
550 	dpaa2_fl_set_len(in_fle, in_len);
551 
552 	if (req->dst == req->src) {
553 		if (mapped_src_nents == 1) {
554 			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
555 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
556 		} else {
557 			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
558 			dpaa2_fl_set_addr(out_fle, qm_sg_dma +
559 					  (1 + !!ivsize) * sizeof(*sg_table));
560 		}
561 	} else if (mapped_dst_nents == 1) {
562 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
563 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
564 	} else {
565 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
566 		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
567 				  sizeof(*sg_table));
568 	}
569 
570 	dpaa2_fl_set_len(out_fle, out_len);
571 
572 	return edesc;
573 }
574 
575 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
576 {
577 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
578 	unsigned int ivsize = crypto_aead_ivsize(aead);
579 	struct device *dev = ctx->dev;
580 	struct caam_flc *flc;
581 	u32 *desc;
582 
583 	if (!ctx->cdata.keylen || !ctx->authsize)
584 		return 0;
585 
586 	flc = &ctx->flc[ENCRYPT];
587 	desc = flc->sh_desc;
588 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
589 			       ctx->authsize, true, true);
590 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
591 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
592 				   sizeof(flc->flc) + desc_bytes(desc),
593 				   ctx->dir);
594 
595 	flc = &ctx->flc[DECRYPT];
596 	desc = flc->sh_desc;
597 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
598 			       ctx->authsize, false, true);
599 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
600 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
601 				   sizeof(flc->flc) + desc_bytes(desc),
602 				   ctx->dir);
603 
604 	return 0;
605 }
606 
607 static int chachapoly_setauthsize(struct crypto_aead *aead,
608 				  unsigned int authsize)
609 {
610 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
611 
612 	if (authsize != POLY1305_DIGEST_SIZE)
613 		return -EINVAL;
614 
615 	ctx->authsize = authsize;
616 	return chachapoly_set_sh_desc(aead);
617 }
618 
619 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
620 			     unsigned int keylen)
621 {
622 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
623 	unsigned int ivsize = crypto_aead_ivsize(aead);
624 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
625 
626 	if (keylen != CHACHA_KEY_SIZE + saltlen) {
627 		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
628 		return -EINVAL;
629 	}
630 
631 	ctx->cdata.key_virt = key;
632 	ctx->cdata.keylen = keylen - saltlen;
633 
634 	return chachapoly_set_sh_desc(aead);
635 }
636 
637 static int gcm_set_sh_desc(struct crypto_aead *aead)
638 {
639 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
640 	struct device *dev = ctx->dev;
641 	unsigned int ivsize = crypto_aead_ivsize(aead);
642 	struct caam_flc *flc;
643 	u32 *desc;
644 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
645 			ctx->cdata.keylen;
646 
647 	if (!ctx->cdata.keylen || !ctx->authsize)
648 		return 0;
649 
650 	/*
651 	 * AES GCM encrypt shared descriptor
652 	 * Job Descriptor and Shared Descriptor
653 	 * must fit into the 64-word Descriptor h/w Buffer
654 	 */
655 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
656 		ctx->cdata.key_inline = true;
657 		ctx->cdata.key_virt = ctx->key;
658 	} else {
659 		ctx->cdata.key_inline = false;
660 		ctx->cdata.key_dma = ctx->key_dma;
661 	}
662 
663 	flc = &ctx->flc[ENCRYPT];
664 	desc = flc->sh_desc;
665 	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
666 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
667 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
668 				   sizeof(flc->flc) + desc_bytes(desc),
669 				   ctx->dir);
670 
671 	/*
672 	 * Job Descriptor and Shared Descriptors
673 	 * must all fit into the 64-word Descriptor h/w Buffer
674 	 */
675 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
676 		ctx->cdata.key_inline = true;
677 		ctx->cdata.key_virt = ctx->key;
678 	} else {
679 		ctx->cdata.key_inline = false;
680 		ctx->cdata.key_dma = ctx->key_dma;
681 	}
682 
683 	flc = &ctx->flc[DECRYPT];
684 	desc = flc->sh_desc;
685 	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
686 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
687 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
688 				   sizeof(flc->flc) + desc_bytes(desc),
689 				   ctx->dir);
690 
691 	return 0;
692 }
693 
694 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
695 {
696 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
697 
698 	ctx->authsize = authsize;
699 	gcm_set_sh_desc(authenc);
700 
701 	return 0;
702 }
703 
704 static int gcm_setkey(struct crypto_aead *aead,
705 		      const u8 *key, unsigned int keylen)
706 {
707 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
708 	struct device *dev = ctx->dev;
709 
710 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
711 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
712 
713 	memcpy(ctx->key, key, keylen);
714 	dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
715 	ctx->cdata.keylen = keylen;
716 
717 	return gcm_set_sh_desc(aead);
718 }
719 
720 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
721 {
722 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
723 	struct device *dev = ctx->dev;
724 	unsigned int ivsize = crypto_aead_ivsize(aead);
725 	struct caam_flc *flc;
726 	u32 *desc;
727 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
728 			ctx->cdata.keylen;
729 
730 	if (!ctx->cdata.keylen || !ctx->authsize)
731 		return 0;
732 
733 	ctx->cdata.key_virt = ctx->key;
734 
735 	/*
736 	 * RFC4106 encrypt shared descriptor
737 	 * Job Descriptor and Shared Descriptor
738 	 * must fit into the 64-word Descriptor h/w Buffer
739 	 */
740 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
741 		ctx->cdata.key_inline = true;
742 	} else {
743 		ctx->cdata.key_inline = false;
744 		ctx->cdata.key_dma = ctx->key_dma;
745 	}
746 
747 	flc = &ctx->flc[ENCRYPT];
748 	desc = flc->sh_desc;
749 	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
750 				  true);
751 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
752 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
753 				   sizeof(flc->flc) + desc_bytes(desc),
754 				   ctx->dir);
755 
756 	/*
757 	 * Job Descriptor and Shared Descriptors
758 	 * must all fit into the 64-word Descriptor h/w Buffer
759 	 */
760 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
761 		ctx->cdata.key_inline = true;
762 	} else {
763 		ctx->cdata.key_inline = false;
764 		ctx->cdata.key_dma = ctx->key_dma;
765 	}
766 
767 	flc = &ctx->flc[DECRYPT];
768 	desc = flc->sh_desc;
769 	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
770 				  true);
771 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
772 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
773 				   sizeof(flc->flc) + desc_bytes(desc),
774 				   ctx->dir);
775 
776 	return 0;
777 }
778 
779 static int rfc4106_setauthsize(struct crypto_aead *authenc,
780 			       unsigned int authsize)
781 {
782 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
783 
784 	ctx->authsize = authsize;
785 	rfc4106_set_sh_desc(authenc);
786 
787 	return 0;
788 }
789 
790 static int rfc4106_setkey(struct crypto_aead *aead,
791 			  const u8 *key, unsigned int keylen)
792 {
793 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
794 	struct device *dev = ctx->dev;
795 
796 	if (keylen < 4)
797 		return -EINVAL;
798 
799 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
800 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
801 
802 	memcpy(ctx->key, key, keylen);
803 	/*
804 	 * The last four bytes of the key material are used as the salt value
805 	 * in the nonce. Update the AES key length.
806 	 */
807 	ctx->cdata.keylen = keylen - 4;
808 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
809 				   ctx->dir);
810 
811 	return rfc4106_set_sh_desc(aead);
812 }
813 
814 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
815 {
816 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
817 	struct device *dev = ctx->dev;
818 	unsigned int ivsize = crypto_aead_ivsize(aead);
819 	struct caam_flc *flc;
820 	u32 *desc;
821 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
822 			ctx->cdata.keylen;
823 
824 	if (!ctx->cdata.keylen || !ctx->authsize)
825 		return 0;
826 
827 	ctx->cdata.key_virt = ctx->key;
828 
829 	/*
830 	 * RFC4543 encrypt shared descriptor
831 	 * Job Descriptor and Shared Descriptor
832 	 * must fit into the 64-word Descriptor h/w Buffer
833 	 */
834 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
835 		ctx->cdata.key_inline = true;
836 	} else {
837 		ctx->cdata.key_inline = false;
838 		ctx->cdata.key_dma = ctx->key_dma;
839 	}
840 
841 	flc = &ctx->flc[ENCRYPT];
842 	desc = flc->sh_desc;
843 	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
844 				  true);
845 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
846 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
847 				   sizeof(flc->flc) + desc_bytes(desc),
848 				   ctx->dir);
849 
850 	/*
851 	 * Job Descriptor and Shared Descriptors
852 	 * must all fit into the 64-word Descriptor h/w Buffer
853 	 */
854 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
855 		ctx->cdata.key_inline = true;
856 	} else {
857 		ctx->cdata.key_inline = false;
858 		ctx->cdata.key_dma = ctx->key_dma;
859 	}
860 
861 	flc = &ctx->flc[DECRYPT];
862 	desc = flc->sh_desc;
863 	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
864 				  true);
865 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
866 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
867 				   sizeof(flc->flc) + desc_bytes(desc),
868 				   ctx->dir);
869 
870 	return 0;
871 }
872 
873 static int rfc4543_setauthsize(struct crypto_aead *authenc,
874 			       unsigned int authsize)
875 {
876 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
877 
878 	ctx->authsize = authsize;
879 	rfc4543_set_sh_desc(authenc);
880 
881 	return 0;
882 }
883 
884 static int rfc4543_setkey(struct crypto_aead *aead,
885 			  const u8 *key, unsigned int keylen)
886 {
887 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
888 	struct device *dev = ctx->dev;
889 
890 	if (keylen < 4)
891 		return -EINVAL;
892 
893 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
894 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
895 
896 	memcpy(ctx->key, key, keylen);
897 	/*
898 	 * The last four bytes of the key material are used as the salt value
899 	 * in the nonce. Update the AES key length.
900 	 */
901 	ctx->cdata.keylen = keylen - 4;
902 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
903 				   ctx->dir);
904 
905 	return rfc4543_set_sh_desc(aead);
906 }
907 
908 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
909 			   unsigned int keylen)
910 {
911 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
912 	struct caam_skcipher_alg *alg =
913 		container_of(crypto_skcipher_alg(skcipher),
914 			     struct caam_skcipher_alg, skcipher);
915 	struct device *dev = ctx->dev;
916 	struct caam_flc *flc;
917 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
918 	u32 *desc;
919 	u32 ctx1_iv_off = 0;
920 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
921 			       OP_ALG_AAI_CTR_MOD128) &&
922 			       ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
923 			       OP_ALG_ALGSEL_CHACHA20);
924 	const bool is_rfc3686 = alg->caam.rfc3686;
925 
926 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
927 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
928 
929 	/*
930 	 * AES-CTR needs to load IV in CONTEXT1 reg
931 	 * at an offset of 128bits (16bytes)
932 	 * CONTEXT1[255:128] = IV
933 	 */
934 	if (ctr_mode)
935 		ctx1_iv_off = 16;
936 
937 	/*
938 	 * RFC3686 specific:
939 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
940 	 *	| *key = {KEY, NONCE}
941 	 */
942 	if (is_rfc3686) {
943 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
944 		keylen -= CTR_RFC3686_NONCE_SIZE;
945 	}
946 
947 	ctx->cdata.keylen = keylen;
948 	ctx->cdata.key_virt = key;
949 	ctx->cdata.key_inline = true;
950 
951 	/* skcipher_encrypt shared descriptor */
952 	flc = &ctx->flc[ENCRYPT];
953 	desc = flc->sh_desc;
954 	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
955 				   ctx1_iv_off);
956 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
957 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
958 				   sizeof(flc->flc) + desc_bytes(desc),
959 				   ctx->dir);
960 
961 	/* skcipher_decrypt shared descriptor */
962 	flc = &ctx->flc[DECRYPT];
963 	desc = flc->sh_desc;
964 	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
965 				   ctx1_iv_off);
966 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
967 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
968 				   sizeof(flc->flc) + desc_bytes(desc),
969 				   ctx->dir);
970 
971 	return 0;
972 }
973 
974 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
975 				const u8 *key, unsigned int keylen)
976 {
977 	return unlikely(des3_verify_key(skcipher, key)) ?:
978 	       skcipher_setkey(skcipher, key, keylen);
979 }
980 
981 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
982 			       unsigned int keylen)
983 {
984 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
985 	struct device *dev = ctx->dev;
986 	struct caam_flc *flc;
987 	u32 *desc;
988 
989 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
990 		dev_err(dev, "key size mismatch\n");
991 		crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
992 		return -EINVAL;
993 	}
994 
995 	ctx->cdata.keylen = keylen;
996 	ctx->cdata.key_virt = key;
997 	ctx->cdata.key_inline = true;
998 
999 	/* xts_skcipher_encrypt shared descriptor */
1000 	flc = &ctx->flc[ENCRYPT];
1001 	desc = flc->sh_desc;
1002 	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1003 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1004 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1005 				   sizeof(flc->flc) + desc_bytes(desc),
1006 				   ctx->dir);
1007 
1008 	/* xts_skcipher_decrypt shared descriptor */
1009 	flc = &ctx->flc[DECRYPT];
1010 	desc = flc->sh_desc;
1011 	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1012 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1013 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1014 				   sizeof(flc->flc) + desc_bytes(desc),
1015 				   ctx->dir);
1016 
1017 	return 0;
1018 }
1019 
1020 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1021 {
1022 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1023 	struct caam_request *req_ctx = skcipher_request_ctx(req);
1024 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1025 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1026 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1027 	struct device *dev = ctx->dev;
1028 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1029 		       GFP_KERNEL : GFP_ATOMIC;
1030 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1031 	struct skcipher_edesc *edesc;
1032 	dma_addr_t iv_dma;
1033 	u8 *iv;
1034 	int ivsize = crypto_skcipher_ivsize(skcipher);
1035 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1036 	struct dpaa2_sg_entry *sg_table;
1037 
1038 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1039 	if (unlikely(src_nents < 0)) {
1040 		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1041 			req->cryptlen);
1042 		return ERR_PTR(src_nents);
1043 	}
1044 
1045 	if (unlikely(req->dst != req->src)) {
1046 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1047 		if (unlikely(dst_nents < 0)) {
1048 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1049 				req->cryptlen);
1050 			return ERR_PTR(dst_nents);
1051 		}
1052 
1053 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1054 					      DMA_TO_DEVICE);
1055 		if (unlikely(!mapped_src_nents)) {
1056 			dev_err(dev, "unable to map source\n");
1057 			return ERR_PTR(-ENOMEM);
1058 		}
1059 
1060 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1061 					      DMA_FROM_DEVICE);
1062 		if (unlikely(!mapped_dst_nents)) {
1063 			dev_err(dev, "unable to map destination\n");
1064 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1065 			return ERR_PTR(-ENOMEM);
1066 		}
1067 	} else {
1068 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1069 					      DMA_BIDIRECTIONAL);
1070 		if (unlikely(!mapped_src_nents)) {
1071 			dev_err(dev, "unable to map source\n");
1072 			return ERR_PTR(-ENOMEM);
1073 		}
1074 	}
1075 
1076 	qm_sg_ents = 1 + mapped_src_nents;
1077 	dst_sg_idx = qm_sg_ents;
1078 
1079 	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1080 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1081 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1082 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1083 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1084 			qm_sg_ents, ivsize);
1085 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1086 			   0, 0, 0);
1087 		return ERR_PTR(-ENOMEM);
1088 	}
1089 
1090 	/* allocate space for base edesc, link tables and IV */
1091 	edesc = qi_cache_zalloc(GFP_DMA | flags);
1092 	if (unlikely(!edesc)) {
1093 		dev_err(dev, "could not allocate extended descriptor\n");
1094 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1095 			   0, 0, 0);
1096 		return ERR_PTR(-ENOMEM);
1097 	}
1098 
1099 	/* Make sure IV is located in a DMAable area */
1100 	sg_table = &edesc->sgt[0];
1101 	iv = (u8 *)(sg_table + qm_sg_ents);
1102 	memcpy(iv, req->iv, ivsize);
1103 
1104 	iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1105 	if (dma_mapping_error(dev, iv_dma)) {
1106 		dev_err(dev, "unable to map IV\n");
1107 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1108 			   0, 0, 0);
1109 		qi_cache_free(edesc);
1110 		return ERR_PTR(-ENOMEM);
1111 	}
1112 
1113 	edesc->src_nents = src_nents;
1114 	edesc->dst_nents = dst_nents;
1115 	edesc->iv_dma = iv_dma;
1116 	edesc->qm_sg_bytes = qm_sg_bytes;
1117 
1118 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1119 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1120 
1121 	if (mapped_dst_nents > 1)
1122 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1123 				 dst_sg_idx, 0);
1124 
1125 	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1126 					  DMA_TO_DEVICE);
1127 	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1128 		dev_err(dev, "unable to map S/G table\n");
1129 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1130 			   iv_dma, ivsize, 0, 0);
1131 		qi_cache_free(edesc);
1132 		return ERR_PTR(-ENOMEM);
1133 	}
1134 
1135 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1136 	dpaa2_fl_set_final(in_fle, true);
1137 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1138 	dpaa2_fl_set_len(out_fle, req->cryptlen);
1139 
1140 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1141 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1142 
1143 	if (req->src == req->dst) {
1144 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1145 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1146 				  sizeof(*sg_table));
1147 	} else if (mapped_dst_nents > 1) {
1148 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1149 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1150 				  sizeof(*sg_table));
1151 	} else {
1152 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
1153 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
1154 	}
1155 
1156 	return edesc;
1157 }
1158 
1159 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1160 		       struct aead_request *req)
1161 {
1162 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1163 	int ivsize = crypto_aead_ivsize(aead);
1164 
1165 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1166 		   edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1167 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1168 }
1169 
1170 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1171 			   struct skcipher_request *req)
1172 {
1173 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1174 	int ivsize = crypto_skcipher_ivsize(skcipher);
1175 
1176 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1177 		   edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1178 }
1179 
1180 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1181 {
1182 	struct crypto_async_request *areq = cbk_ctx;
1183 	struct aead_request *req = container_of(areq, struct aead_request,
1184 						base);
1185 	struct caam_request *req_ctx = to_caam_req(areq);
1186 	struct aead_edesc *edesc = req_ctx->edesc;
1187 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1188 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1189 	int ecode = 0;
1190 
1191 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1192 
1193 	if (unlikely(status)) {
1194 		caam_qi2_strstatus(ctx->dev, status);
1195 		ecode = -EIO;
1196 	}
1197 
1198 	aead_unmap(ctx->dev, edesc, req);
1199 	qi_cache_free(edesc);
1200 	aead_request_complete(req, ecode);
1201 }
1202 
1203 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1204 {
1205 	struct crypto_async_request *areq = cbk_ctx;
1206 	struct aead_request *req = container_of(areq, struct aead_request,
1207 						base);
1208 	struct caam_request *req_ctx = to_caam_req(areq);
1209 	struct aead_edesc *edesc = req_ctx->edesc;
1210 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1211 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1212 	int ecode = 0;
1213 
1214 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1215 
1216 	if (unlikely(status)) {
1217 		caam_qi2_strstatus(ctx->dev, status);
1218 		/*
1219 		 * verify hw auth check passed else return -EBADMSG
1220 		 */
1221 		if ((status & JRSTA_CCBERR_ERRID_MASK) ==
1222 		     JRSTA_CCBERR_ERRID_ICVCHK)
1223 			ecode = -EBADMSG;
1224 		else
1225 			ecode = -EIO;
1226 	}
1227 
1228 	aead_unmap(ctx->dev, edesc, req);
1229 	qi_cache_free(edesc);
1230 	aead_request_complete(req, ecode);
1231 }
1232 
1233 static int aead_encrypt(struct aead_request *req)
1234 {
1235 	struct aead_edesc *edesc;
1236 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1237 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1238 	struct caam_request *caam_req = aead_request_ctx(req);
1239 	int ret;
1240 
1241 	/* allocate extended descriptor */
1242 	edesc = aead_edesc_alloc(req, true);
1243 	if (IS_ERR(edesc))
1244 		return PTR_ERR(edesc);
1245 
1246 	caam_req->flc = &ctx->flc[ENCRYPT];
1247 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1248 	caam_req->cbk = aead_encrypt_done;
1249 	caam_req->ctx = &req->base;
1250 	caam_req->edesc = edesc;
1251 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1252 	if (ret != -EINPROGRESS &&
1253 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1254 		aead_unmap(ctx->dev, edesc, req);
1255 		qi_cache_free(edesc);
1256 	}
1257 
1258 	return ret;
1259 }
1260 
1261 static int aead_decrypt(struct aead_request *req)
1262 {
1263 	struct aead_edesc *edesc;
1264 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1265 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1266 	struct caam_request *caam_req = aead_request_ctx(req);
1267 	int ret;
1268 
1269 	/* allocate extended descriptor */
1270 	edesc = aead_edesc_alloc(req, false);
1271 	if (IS_ERR(edesc))
1272 		return PTR_ERR(edesc);
1273 
1274 	caam_req->flc = &ctx->flc[DECRYPT];
1275 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1276 	caam_req->cbk = aead_decrypt_done;
1277 	caam_req->ctx = &req->base;
1278 	caam_req->edesc = edesc;
1279 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1280 	if (ret != -EINPROGRESS &&
1281 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1282 		aead_unmap(ctx->dev, edesc, req);
1283 		qi_cache_free(edesc);
1284 	}
1285 
1286 	return ret;
1287 }
1288 
1289 static int ipsec_gcm_encrypt(struct aead_request *req)
1290 {
1291 	if (req->assoclen < 8)
1292 		return -EINVAL;
1293 
1294 	return aead_encrypt(req);
1295 }
1296 
1297 static int ipsec_gcm_decrypt(struct aead_request *req)
1298 {
1299 	if (req->assoclen < 8)
1300 		return -EINVAL;
1301 
1302 	return aead_decrypt(req);
1303 }
1304 
1305 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1306 {
1307 	struct crypto_async_request *areq = cbk_ctx;
1308 	struct skcipher_request *req = skcipher_request_cast(areq);
1309 	struct caam_request *req_ctx = to_caam_req(areq);
1310 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1311 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1312 	struct skcipher_edesc *edesc = req_ctx->edesc;
1313 	int ecode = 0;
1314 	int ivsize = crypto_skcipher_ivsize(skcipher);
1315 
1316 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1317 
1318 	if (unlikely(status)) {
1319 		caam_qi2_strstatus(ctx->dev, status);
1320 		ecode = -EIO;
1321 	}
1322 
1323 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1324 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1325 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1326 	caam_dump_sg(KERN_DEBUG, "dst    @" __stringify(__LINE__)": ",
1327 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1328 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1329 
1330 	skcipher_unmap(ctx->dev, edesc, req);
1331 
1332 	/*
1333 	 * The crypto API expects us to set the IV (req->iv) to the last
1334 	 * ciphertext block. This is used e.g. by the CTS mode.
1335 	 */
1336 	scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
1337 				 ivsize, 0);
1338 
1339 	qi_cache_free(edesc);
1340 	skcipher_request_complete(req, ecode);
1341 }
1342 
1343 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1344 {
1345 	struct crypto_async_request *areq = cbk_ctx;
1346 	struct skcipher_request *req = skcipher_request_cast(areq);
1347 	struct caam_request *req_ctx = to_caam_req(areq);
1348 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1349 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1350 	struct skcipher_edesc *edesc = req_ctx->edesc;
1351 	int ecode = 0;
1352 	int ivsize = crypto_skcipher_ivsize(skcipher);
1353 
1354 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1355 
1356 	if (unlikely(status)) {
1357 		caam_qi2_strstatus(ctx->dev, status);
1358 		ecode = -EIO;
1359 	}
1360 
1361 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1362 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1363 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1364 	caam_dump_sg(KERN_DEBUG, "dst    @" __stringify(__LINE__)": ",
1365 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1366 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1367 
1368 	skcipher_unmap(ctx->dev, edesc, req);
1369 	qi_cache_free(edesc);
1370 	skcipher_request_complete(req, ecode);
1371 }
1372 
1373 static int skcipher_encrypt(struct skcipher_request *req)
1374 {
1375 	struct skcipher_edesc *edesc;
1376 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1377 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1378 	struct caam_request *caam_req = skcipher_request_ctx(req);
1379 	int ret;
1380 
1381 	/* allocate extended descriptor */
1382 	edesc = skcipher_edesc_alloc(req);
1383 	if (IS_ERR(edesc))
1384 		return PTR_ERR(edesc);
1385 
1386 	caam_req->flc = &ctx->flc[ENCRYPT];
1387 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1388 	caam_req->cbk = skcipher_encrypt_done;
1389 	caam_req->ctx = &req->base;
1390 	caam_req->edesc = edesc;
1391 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1392 	if (ret != -EINPROGRESS &&
1393 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1394 		skcipher_unmap(ctx->dev, edesc, req);
1395 		qi_cache_free(edesc);
1396 	}
1397 
1398 	return ret;
1399 }
1400 
1401 static int skcipher_decrypt(struct skcipher_request *req)
1402 {
1403 	struct skcipher_edesc *edesc;
1404 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1405 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1406 	struct caam_request *caam_req = skcipher_request_ctx(req);
1407 	int ivsize = crypto_skcipher_ivsize(skcipher);
1408 	int ret;
1409 
1410 	/* allocate extended descriptor */
1411 	edesc = skcipher_edesc_alloc(req);
1412 	if (IS_ERR(edesc))
1413 		return PTR_ERR(edesc);
1414 
1415 	/*
1416 	 * The crypto API expects us to set the IV (req->iv) to the last
1417 	 * ciphertext block.
1418 	 */
1419 	scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
1420 				 ivsize, 0);
1421 
1422 	caam_req->flc = &ctx->flc[DECRYPT];
1423 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1424 	caam_req->cbk = skcipher_decrypt_done;
1425 	caam_req->ctx = &req->base;
1426 	caam_req->edesc = edesc;
1427 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1428 	if (ret != -EINPROGRESS &&
1429 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1430 		skcipher_unmap(ctx->dev, edesc, req);
1431 		qi_cache_free(edesc);
1432 	}
1433 
1434 	return ret;
1435 }
1436 
1437 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1438 			 bool uses_dkp)
1439 {
1440 	dma_addr_t dma_addr;
1441 	int i;
1442 
1443 	/* copy descriptor header template value */
1444 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1445 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1446 
1447 	ctx->dev = caam->dev;
1448 	ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1449 
1450 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1451 					offsetof(struct caam_ctx, flc_dma),
1452 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1453 	if (dma_mapping_error(ctx->dev, dma_addr)) {
1454 		dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1455 		return -ENOMEM;
1456 	}
1457 
1458 	for (i = 0; i < NUM_OP; i++)
1459 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1460 	ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1461 
1462 	return 0;
1463 }
1464 
1465 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1466 {
1467 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1468 	struct caam_skcipher_alg *caam_alg =
1469 		container_of(alg, typeof(*caam_alg), skcipher);
1470 
1471 	crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1472 	return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1473 }
1474 
1475 static int caam_cra_init_aead(struct crypto_aead *tfm)
1476 {
1477 	struct aead_alg *alg = crypto_aead_alg(tfm);
1478 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1479 						      aead);
1480 
1481 	crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1482 	return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1483 			     alg->setkey == aead_setkey);
1484 }
1485 
1486 static void caam_exit_common(struct caam_ctx *ctx)
1487 {
1488 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1489 			       offsetof(struct caam_ctx, flc_dma), ctx->dir,
1490 			       DMA_ATTR_SKIP_CPU_SYNC);
1491 }
1492 
1493 static void caam_cra_exit(struct crypto_skcipher *tfm)
1494 {
1495 	caam_exit_common(crypto_skcipher_ctx(tfm));
1496 }
1497 
1498 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1499 {
1500 	caam_exit_common(crypto_aead_ctx(tfm));
1501 }
1502 
1503 static struct caam_skcipher_alg driver_algs[] = {
1504 	{
1505 		.skcipher = {
1506 			.base = {
1507 				.cra_name = "cbc(aes)",
1508 				.cra_driver_name = "cbc-aes-caam-qi2",
1509 				.cra_blocksize = AES_BLOCK_SIZE,
1510 			},
1511 			.setkey = skcipher_setkey,
1512 			.encrypt = skcipher_encrypt,
1513 			.decrypt = skcipher_decrypt,
1514 			.min_keysize = AES_MIN_KEY_SIZE,
1515 			.max_keysize = AES_MAX_KEY_SIZE,
1516 			.ivsize = AES_BLOCK_SIZE,
1517 		},
1518 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1519 	},
1520 	{
1521 		.skcipher = {
1522 			.base = {
1523 				.cra_name = "cbc(des3_ede)",
1524 				.cra_driver_name = "cbc-3des-caam-qi2",
1525 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1526 			},
1527 			.setkey = des3_skcipher_setkey,
1528 			.encrypt = skcipher_encrypt,
1529 			.decrypt = skcipher_decrypt,
1530 			.min_keysize = DES3_EDE_KEY_SIZE,
1531 			.max_keysize = DES3_EDE_KEY_SIZE,
1532 			.ivsize = DES3_EDE_BLOCK_SIZE,
1533 		},
1534 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1535 	},
1536 	{
1537 		.skcipher = {
1538 			.base = {
1539 				.cra_name = "cbc(des)",
1540 				.cra_driver_name = "cbc-des-caam-qi2",
1541 				.cra_blocksize = DES_BLOCK_SIZE,
1542 			},
1543 			.setkey = skcipher_setkey,
1544 			.encrypt = skcipher_encrypt,
1545 			.decrypt = skcipher_decrypt,
1546 			.min_keysize = DES_KEY_SIZE,
1547 			.max_keysize = DES_KEY_SIZE,
1548 			.ivsize = DES_BLOCK_SIZE,
1549 		},
1550 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1551 	},
1552 	{
1553 		.skcipher = {
1554 			.base = {
1555 				.cra_name = "ctr(aes)",
1556 				.cra_driver_name = "ctr-aes-caam-qi2",
1557 				.cra_blocksize = 1,
1558 			},
1559 			.setkey = skcipher_setkey,
1560 			.encrypt = skcipher_encrypt,
1561 			.decrypt = skcipher_decrypt,
1562 			.min_keysize = AES_MIN_KEY_SIZE,
1563 			.max_keysize = AES_MAX_KEY_SIZE,
1564 			.ivsize = AES_BLOCK_SIZE,
1565 			.chunksize = AES_BLOCK_SIZE,
1566 		},
1567 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1568 					OP_ALG_AAI_CTR_MOD128,
1569 	},
1570 	{
1571 		.skcipher = {
1572 			.base = {
1573 				.cra_name = "rfc3686(ctr(aes))",
1574 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1575 				.cra_blocksize = 1,
1576 			},
1577 			.setkey = skcipher_setkey,
1578 			.encrypt = skcipher_encrypt,
1579 			.decrypt = skcipher_decrypt,
1580 			.min_keysize = AES_MIN_KEY_SIZE +
1581 				       CTR_RFC3686_NONCE_SIZE,
1582 			.max_keysize = AES_MAX_KEY_SIZE +
1583 				       CTR_RFC3686_NONCE_SIZE,
1584 			.ivsize = CTR_RFC3686_IV_SIZE,
1585 			.chunksize = AES_BLOCK_SIZE,
1586 		},
1587 		.caam = {
1588 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1589 					   OP_ALG_AAI_CTR_MOD128,
1590 			.rfc3686 = true,
1591 		},
1592 	},
1593 	{
1594 		.skcipher = {
1595 			.base = {
1596 				.cra_name = "xts(aes)",
1597 				.cra_driver_name = "xts-aes-caam-qi2",
1598 				.cra_blocksize = AES_BLOCK_SIZE,
1599 			},
1600 			.setkey = xts_skcipher_setkey,
1601 			.encrypt = skcipher_encrypt,
1602 			.decrypt = skcipher_decrypt,
1603 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1604 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1605 			.ivsize = AES_BLOCK_SIZE,
1606 		},
1607 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1608 	},
1609 	{
1610 		.skcipher = {
1611 			.base = {
1612 				.cra_name = "chacha20",
1613 				.cra_driver_name = "chacha20-caam-qi2",
1614 				.cra_blocksize = 1,
1615 			},
1616 			.setkey = skcipher_setkey,
1617 			.encrypt = skcipher_encrypt,
1618 			.decrypt = skcipher_decrypt,
1619 			.min_keysize = CHACHA_KEY_SIZE,
1620 			.max_keysize = CHACHA_KEY_SIZE,
1621 			.ivsize = CHACHA_IV_SIZE,
1622 		},
1623 		.caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1624 	},
1625 };
1626 
1627 static struct caam_aead_alg driver_aeads[] = {
1628 	{
1629 		.aead = {
1630 			.base = {
1631 				.cra_name = "rfc4106(gcm(aes))",
1632 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1633 				.cra_blocksize = 1,
1634 			},
1635 			.setkey = rfc4106_setkey,
1636 			.setauthsize = rfc4106_setauthsize,
1637 			.encrypt = ipsec_gcm_encrypt,
1638 			.decrypt = ipsec_gcm_decrypt,
1639 			.ivsize = 8,
1640 			.maxauthsize = AES_BLOCK_SIZE,
1641 		},
1642 		.caam = {
1643 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1644 		},
1645 	},
1646 	{
1647 		.aead = {
1648 			.base = {
1649 				.cra_name = "rfc4543(gcm(aes))",
1650 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1651 				.cra_blocksize = 1,
1652 			},
1653 			.setkey = rfc4543_setkey,
1654 			.setauthsize = rfc4543_setauthsize,
1655 			.encrypt = ipsec_gcm_encrypt,
1656 			.decrypt = ipsec_gcm_decrypt,
1657 			.ivsize = 8,
1658 			.maxauthsize = AES_BLOCK_SIZE,
1659 		},
1660 		.caam = {
1661 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1662 		},
1663 	},
1664 	/* Galois Counter Mode */
1665 	{
1666 		.aead = {
1667 			.base = {
1668 				.cra_name = "gcm(aes)",
1669 				.cra_driver_name = "gcm-aes-caam-qi2",
1670 				.cra_blocksize = 1,
1671 			},
1672 			.setkey = gcm_setkey,
1673 			.setauthsize = gcm_setauthsize,
1674 			.encrypt = aead_encrypt,
1675 			.decrypt = aead_decrypt,
1676 			.ivsize = 12,
1677 			.maxauthsize = AES_BLOCK_SIZE,
1678 		},
1679 		.caam = {
1680 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1681 		}
1682 	},
1683 	/* single-pass ipsec_esp descriptor */
1684 	{
1685 		.aead = {
1686 			.base = {
1687 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1688 				.cra_driver_name = "authenc-hmac-md5-"
1689 						   "cbc-aes-caam-qi2",
1690 				.cra_blocksize = AES_BLOCK_SIZE,
1691 			},
1692 			.setkey = aead_setkey,
1693 			.setauthsize = aead_setauthsize,
1694 			.encrypt = aead_encrypt,
1695 			.decrypt = aead_decrypt,
1696 			.ivsize = AES_BLOCK_SIZE,
1697 			.maxauthsize = MD5_DIGEST_SIZE,
1698 		},
1699 		.caam = {
1700 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1701 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1702 					   OP_ALG_AAI_HMAC_PRECOMP,
1703 		}
1704 	},
1705 	{
1706 		.aead = {
1707 			.base = {
1708 				.cra_name = "echainiv(authenc(hmac(md5),"
1709 					    "cbc(aes)))",
1710 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1711 						   "cbc-aes-caam-qi2",
1712 				.cra_blocksize = AES_BLOCK_SIZE,
1713 			},
1714 			.setkey = aead_setkey,
1715 			.setauthsize = aead_setauthsize,
1716 			.encrypt = aead_encrypt,
1717 			.decrypt = aead_decrypt,
1718 			.ivsize = AES_BLOCK_SIZE,
1719 			.maxauthsize = MD5_DIGEST_SIZE,
1720 		},
1721 		.caam = {
1722 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1723 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1724 					   OP_ALG_AAI_HMAC_PRECOMP,
1725 			.geniv = true,
1726 		}
1727 	},
1728 	{
1729 		.aead = {
1730 			.base = {
1731 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1732 				.cra_driver_name = "authenc-hmac-sha1-"
1733 						   "cbc-aes-caam-qi2",
1734 				.cra_blocksize = AES_BLOCK_SIZE,
1735 			},
1736 			.setkey = aead_setkey,
1737 			.setauthsize = aead_setauthsize,
1738 			.encrypt = aead_encrypt,
1739 			.decrypt = aead_decrypt,
1740 			.ivsize = AES_BLOCK_SIZE,
1741 			.maxauthsize = SHA1_DIGEST_SIZE,
1742 		},
1743 		.caam = {
1744 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1745 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1746 					   OP_ALG_AAI_HMAC_PRECOMP,
1747 		}
1748 	},
1749 	{
1750 		.aead = {
1751 			.base = {
1752 				.cra_name = "echainiv(authenc(hmac(sha1),"
1753 					    "cbc(aes)))",
1754 				.cra_driver_name = "echainiv-authenc-"
1755 						   "hmac-sha1-cbc-aes-caam-qi2",
1756 				.cra_blocksize = AES_BLOCK_SIZE,
1757 			},
1758 			.setkey = aead_setkey,
1759 			.setauthsize = aead_setauthsize,
1760 			.encrypt = aead_encrypt,
1761 			.decrypt = aead_decrypt,
1762 			.ivsize = AES_BLOCK_SIZE,
1763 			.maxauthsize = SHA1_DIGEST_SIZE,
1764 		},
1765 		.caam = {
1766 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1767 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1768 					   OP_ALG_AAI_HMAC_PRECOMP,
1769 			.geniv = true,
1770 		},
1771 	},
1772 	{
1773 		.aead = {
1774 			.base = {
1775 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1776 				.cra_driver_name = "authenc-hmac-sha224-"
1777 						   "cbc-aes-caam-qi2",
1778 				.cra_blocksize = AES_BLOCK_SIZE,
1779 			},
1780 			.setkey = aead_setkey,
1781 			.setauthsize = aead_setauthsize,
1782 			.encrypt = aead_encrypt,
1783 			.decrypt = aead_decrypt,
1784 			.ivsize = AES_BLOCK_SIZE,
1785 			.maxauthsize = SHA224_DIGEST_SIZE,
1786 		},
1787 		.caam = {
1788 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1789 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1790 					   OP_ALG_AAI_HMAC_PRECOMP,
1791 		}
1792 	},
1793 	{
1794 		.aead = {
1795 			.base = {
1796 				.cra_name = "echainiv(authenc(hmac(sha224),"
1797 					    "cbc(aes)))",
1798 				.cra_driver_name = "echainiv-authenc-"
1799 						   "hmac-sha224-cbc-aes-caam-qi2",
1800 				.cra_blocksize = AES_BLOCK_SIZE,
1801 			},
1802 			.setkey = aead_setkey,
1803 			.setauthsize = aead_setauthsize,
1804 			.encrypt = aead_encrypt,
1805 			.decrypt = aead_decrypt,
1806 			.ivsize = AES_BLOCK_SIZE,
1807 			.maxauthsize = SHA224_DIGEST_SIZE,
1808 		},
1809 		.caam = {
1810 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1811 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1812 					   OP_ALG_AAI_HMAC_PRECOMP,
1813 			.geniv = true,
1814 		}
1815 	},
1816 	{
1817 		.aead = {
1818 			.base = {
1819 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1820 				.cra_driver_name = "authenc-hmac-sha256-"
1821 						   "cbc-aes-caam-qi2",
1822 				.cra_blocksize = AES_BLOCK_SIZE,
1823 			},
1824 			.setkey = aead_setkey,
1825 			.setauthsize = aead_setauthsize,
1826 			.encrypt = aead_encrypt,
1827 			.decrypt = aead_decrypt,
1828 			.ivsize = AES_BLOCK_SIZE,
1829 			.maxauthsize = SHA256_DIGEST_SIZE,
1830 		},
1831 		.caam = {
1832 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1833 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1834 					   OP_ALG_AAI_HMAC_PRECOMP,
1835 		}
1836 	},
1837 	{
1838 		.aead = {
1839 			.base = {
1840 				.cra_name = "echainiv(authenc(hmac(sha256),"
1841 					    "cbc(aes)))",
1842 				.cra_driver_name = "echainiv-authenc-"
1843 						   "hmac-sha256-cbc-aes-"
1844 						   "caam-qi2",
1845 				.cra_blocksize = AES_BLOCK_SIZE,
1846 			},
1847 			.setkey = aead_setkey,
1848 			.setauthsize = aead_setauthsize,
1849 			.encrypt = aead_encrypt,
1850 			.decrypt = aead_decrypt,
1851 			.ivsize = AES_BLOCK_SIZE,
1852 			.maxauthsize = SHA256_DIGEST_SIZE,
1853 		},
1854 		.caam = {
1855 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1856 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1857 					   OP_ALG_AAI_HMAC_PRECOMP,
1858 			.geniv = true,
1859 		}
1860 	},
1861 	{
1862 		.aead = {
1863 			.base = {
1864 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
1865 				.cra_driver_name = "authenc-hmac-sha384-"
1866 						   "cbc-aes-caam-qi2",
1867 				.cra_blocksize = AES_BLOCK_SIZE,
1868 			},
1869 			.setkey = aead_setkey,
1870 			.setauthsize = aead_setauthsize,
1871 			.encrypt = aead_encrypt,
1872 			.decrypt = aead_decrypt,
1873 			.ivsize = AES_BLOCK_SIZE,
1874 			.maxauthsize = SHA384_DIGEST_SIZE,
1875 		},
1876 		.caam = {
1877 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1878 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1879 					   OP_ALG_AAI_HMAC_PRECOMP,
1880 		}
1881 	},
1882 	{
1883 		.aead = {
1884 			.base = {
1885 				.cra_name = "echainiv(authenc(hmac(sha384),"
1886 					    "cbc(aes)))",
1887 				.cra_driver_name = "echainiv-authenc-"
1888 						   "hmac-sha384-cbc-aes-"
1889 						   "caam-qi2",
1890 				.cra_blocksize = AES_BLOCK_SIZE,
1891 			},
1892 			.setkey = aead_setkey,
1893 			.setauthsize = aead_setauthsize,
1894 			.encrypt = aead_encrypt,
1895 			.decrypt = aead_decrypt,
1896 			.ivsize = AES_BLOCK_SIZE,
1897 			.maxauthsize = SHA384_DIGEST_SIZE,
1898 		},
1899 		.caam = {
1900 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1901 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1902 					   OP_ALG_AAI_HMAC_PRECOMP,
1903 			.geniv = true,
1904 		}
1905 	},
1906 	{
1907 		.aead = {
1908 			.base = {
1909 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
1910 				.cra_driver_name = "authenc-hmac-sha512-"
1911 						   "cbc-aes-caam-qi2",
1912 				.cra_blocksize = AES_BLOCK_SIZE,
1913 			},
1914 			.setkey = aead_setkey,
1915 			.setauthsize = aead_setauthsize,
1916 			.encrypt = aead_encrypt,
1917 			.decrypt = aead_decrypt,
1918 			.ivsize = AES_BLOCK_SIZE,
1919 			.maxauthsize = SHA512_DIGEST_SIZE,
1920 		},
1921 		.caam = {
1922 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1923 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1924 					   OP_ALG_AAI_HMAC_PRECOMP,
1925 		}
1926 	},
1927 	{
1928 		.aead = {
1929 			.base = {
1930 				.cra_name = "echainiv(authenc(hmac(sha512),"
1931 					    "cbc(aes)))",
1932 				.cra_driver_name = "echainiv-authenc-"
1933 						   "hmac-sha512-cbc-aes-"
1934 						   "caam-qi2",
1935 				.cra_blocksize = AES_BLOCK_SIZE,
1936 			},
1937 			.setkey = aead_setkey,
1938 			.setauthsize = aead_setauthsize,
1939 			.encrypt = aead_encrypt,
1940 			.decrypt = aead_decrypt,
1941 			.ivsize = AES_BLOCK_SIZE,
1942 			.maxauthsize = SHA512_DIGEST_SIZE,
1943 		},
1944 		.caam = {
1945 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1946 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1947 					   OP_ALG_AAI_HMAC_PRECOMP,
1948 			.geniv = true,
1949 		}
1950 	},
1951 	{
1952 		.aead = {
1953 			.base = {
1954 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1955 				.cra_driver_name = "authenc-hmac-md5-"
1956 						   "cbc-des3_ede-caam-qi2",
1957 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1958 			},
1959 			.setkey = des3_aead_setkey,
1960 			.setauthsize = aead_setauthsize,
1961 			.encrypt = aead_encrypt,
1962 			.decrypt = aead_decrypt,
1963 			.ivsize = DES3_EDE_BLOCK_SIZE,
1964 			.maxauthsize = MD5_DIGEST_SIZE,
1965 		},
1966 		.caam = {
1967 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1968 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1969 					   OP_ALG_AAI_HMAC_PRECOMP,
1970 		}
1971 	},
1972 	{
1973 		.aead = {
1974 			.base = {
1975 				.cra_name = "echainiv(authenc(hmac(md5),"
1976 					    "cbc(des3_ede)))",
1977 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1978 						   "cbc-des3_ede-caam-qi2",
1979 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1980 			},
1981 			.setkey = des3_aead_setkey,
1982 			.setauthsize = aead_setauthsize,
1983 			.encrypt = aead_encrypt,
1984 			.decrypt = aead_decrypt,
1985 			.ivsize = DES3_EDE_BLOCK_SIZE,
1986 			.maxauthsize = MD5_DIGEST_SIZE,
1987 		},
1988 		.caam = {
1989 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1990 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1991 					   OP_ALG_AAI_HMAC_PRECOMP,
1992 			.geniv = true,
1993 		}
1994 	},
1995 	{
1996 		.aead = {
1997 			.base = {
1998 				.cra_name = "authenc(hmac(sha1),"
1999 					    "cbc(des3_ede))",
2000 				.cra_driver_name = "authenc-hmac-sha1-"
2001 						   "cbc-des3_ede-caam-qi2",
2002 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2003 			},
2004 			.setkey = des3_aead_setkey,
2005 			.setauthsize = aead_setauthsize,
2006 			.encrypt = aead_encrypt,
2007 			.decrypt = aead_decrypt,
2008 			.ivsize = DES3_EDE_BLOCK_SIZE,
2009 			.maxauthsize = SHA1_DIGEST_SIZE,
2010 		},
2011 		.caam = {
2012 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2013 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2014 					   OP_ALG_AAI_HMAC_PRECOMP,
2015 		},
2016 	},
2017 	{
2018 		.aead = {
2019 			.base = {
2020 				.cra_name = "echainiv(authenc(hmac(sha1),"
2021 					    "cbc(des3_ede)))",
2022 				.cra_driver_name = "echainiv-authenc-"
2023 						   "hmac-sha1-"
2024 						   "cbc-des3_ede-caam-qi2",
2025 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2026 			},
2027 			.setkey = des3_aead_setkey,
2028 			.setauthsize = aead_setauthsize,
2029 			.encrypt = aead_encrypt,
2030 			.decrypt = aead_decrypt,
2031 			.ivsize = DES3_EDE_BLOCK_SIZE,
2032 			.maxauthsize = SHA1_DIGEST_SIZE,
2033 		},
2034 		.caam = {
2035 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2036 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2037 					   OP_ALG_AAI_HMAC_PRECOMP,
2038 			.geniv = true,
2039 		}
2040 	},
2041 	{
2042 		.aead = {
2043 			.base = {
2044 				.cra_name = "authenc(hmac(sha224),"
2045 					    "cbc(des3_ede))",
2046 				.cra_driver_name = "authenc-hmac-sha224-"
2047 						   "cbc-des3_ede-caam-qi2",
2048 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2049 			},
2050 			.setkey = des3_aead_setkey,
2051 			.setauthsize = aead_setauthsize,
2052 			.encrypt = aead_encrypt,
2053 			.decrypt = aead_decrypt,
2054 			.ivsize = DES3_EDE_BLOCK_SIZE,
2055 			.maxauthsize = SHA224_DIGEST_SIZE,
2056 		},
2057 		.caam = {
2058 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2059 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2060 					   OP_ALG_AAI_HMAC_PRECOMP,
2061 		},
2062 	},
2063 	{
2064 		.aead = {
2065 			.base = {
2066 				.cra_name = "echainiv(authenc(hmac(sha224),"
2067 					    "cbc(des3_ede)))",
2068 				.cra_driver_name = "echainiv-authenc-"
2069 						   "hmac-sha224-"
2070 						   "cbc-des3_ede-caam-qi2",
2071 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2072 			},
2073 			.setkey = des3_aead_setkey,
2074 			.setauthsize = aead_setauthsize,
2075 			.encrypt = aead_encrypt,
2076 			.decrypt = aead_decrypt,
2077 			.ivsize = DES3_EDE_BLOCK_SIZE,
2078 			.maxauthsize = SHA224_DIGEST_SIZE,
2079 		},
2080 		.caam = {
2081 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2082 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2083 					   OP_ALG_AAI_HMAC_PRECOMP,
2084 			.geniv = true,
2085 		}
2086 	},
2087 	{
2088 		.aead = {
2089 			.base = {
2090 				.cra_name = "authenc(hmac(sha256),"
2091 					    "cbc(des3_ede))",
2092 				.cra_driver_name = "authenc-hmac-sha256-"
2093 						   "cbc-des3_ede-caam-qi2",
2094 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2095 			},
2096 			.setkey = des3_aead_setkey,
2097 			.setauthsize = aead_setauthsize,
2098 			.encrypt = aead_encrypt,
2099 			.decrypt = aead_decrypt,
2100 			.ivsize = DES3_EDE_BLOCK_SIZE,
2101 			.maxauthsize = SHA256_DIGEST_SIZE,
2102 		},
2103 		.caam = {
2104 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2105 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2106 					   OP_ALG_AAI_HMAC_PRECOMP,
2107 		},
2108 	},
2109 	{
2110 		.aead = {
2111 			.base = {
2112 				.cra_name = "echainiv(authenc(hmac(sha256),"
2113 					    "cbc(des3_ede)))",
2114 				.cra_driver_name = "echainiv-authenc-"
2115 						   "hmac-sha256-"
2116 						   "cbc-des3_ede-caam-qi2",
2117 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2118 			},
2119 			.setkey = des3_aead_setkey,
2120 			.setauthsize = aead_setauthsize,
2121 			.encrypt = aead_encrypt,
2122 			.decrypt = aead_decrypt,
2123 			.ivsize = DES3_EDE_BLOCK_SIZE,
2124 			.maxauthsize = SHA256_DIGEST_SIZE,
2125 		},
2126 		.caam = {
2127 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2128 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2129 					   OP_ALG_AAI_HMAC_PRECOMP,
2130 			.geniv = true,
2131 		}
2132 	},
2133 	{
2134 		.aead = {
2135 			.base = {
2136 				.cra_name = "authenc(hmac(sha384),"
2137 					    "cbc(des3_ede))",
2138 				.cra_driver_name = "authenc-hmac-sha384-"
2139 						   "cbc-des3_ede-caam-qi2",
2140 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2141 			},
2142 			.setkey = des3_aead_setkey,
2143 			.setauthsize = aead_setauthsize,
2144 			.encrypt = aead_encrypt,
2145 			.decrypt = aead_decrypt,
2146 			.ivsize = DES3_EDE_BLOCK_SIZE,
2147 			.maxauthsize = SHA384_DIGEST_SIZE,
2148 		},
2149 		.caam = {
2150 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2151 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2152 					   OP_ALG_AAI_HMAC_PRECOMP,
2153 		},
2154 	},
2155 	{
2156 		.aead = {
2157 			.base = {
2158 				.cra_name = "echainiv(authenc(hmac(sha384),"
2159 					    "cbc(des3_ede)))",
2160 				.cra_driver_name = "echainiv-authenc-"
2161 						   "hmac-sha384-"
2162 						   "cbc-des3_ede-caam-qi2",
2163 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2164 			},
2165 			.setkey = des3_aead_setkey,
2166 			.setauthsize = aead_setauthsize,
2167 			.encrypt = aead_encrypt,
2168 			.decrypt = aead_decrypt,
2169 			.ivsize = DES3_EDE_BLOCK_SIZE,
2170 			.maxauthsize = SHA384_DIGEST_SIZE,
2171 		},
2172 		.caam = {
2173 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2174 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2175 					   OP_ALG_AAI_HMAC_PRECOMP,
2176 			.geniv = true,
2177 		}
2178 	},
2179 	{
2180 		.aead = {
2181 			.base = {
2182 				.cra_name = "authenc(hmac(sha512),"
2183 					    "cbc(des3_ede))",
2184 				.cra_driver_name = "authenc-hmac-sha512-"
2185 						   "cbc-des3_ede-caam-qi2",
2186 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2187 			},
2188 			.setkey = des3_aead_setkey,
2189 			.setauthsize = aead_setauthsize,
2190 			.encrypt = aead_encrypt,
2191 			.decrypt = aead_decrypt,
2192 			.ivsize = DES3_EDE_BLOCK_SIZE,
2193 			.maxauthsize = SHA512_DIGEST_SIZE,
2194 		},
2195 		.caam = {
2196 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2197 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2198 					   OP_ALG_AAI_HMAC_PRECOMP,
2199 		},
2200 	},
2201 	{
2202 		.aead = {
2203 			.base = {
2204 				.cra_name = "echainiv(authenc(hmac(sha512),"
2205 					    "cbc(des3_ede)))",
2206 				.cra_driver_name = "echainiv-authenc-"
2207 						   "hmac-sha512-"
2208 						   "cbc-des3_ede-caam-qi2",
2209 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2210 			},
2211 			.setkey = des3_aead_setkey,
2212 			.setauthsize = aead_setauthsize,
2213 			.encrypt = aead_encrypt,
2214 			.decrypt = aead_decrypt,
2215 			.ivsize = DES3_EDE_BLOCK_SIZE,
2216 			.maxauthsize = SHA512_DIGEST_SIZE,
2217 		},
2218 		.caam = {
2219 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2220 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2221 					   OP_ALG_AAI_HMAC_PRECOMP,
2222 			.geniv = true,
2223 		}
2224 	},
2225 	{
2226 		.aead = {
2227 			.base = {
2228 				.cra_name = "authenc(hmac(md5),cbc(des))",
2229 				.cra_driver_name = "authenc-hmac-md5-"
2230 						   "cbc-des-caam-qi2",
2231 				.cra_blocksize = DES_BLOCK_SIZE,
2232 			},
2233 			.setkey = aead_setkey,
2234 			.setauthsize = aead_setauthsize,
2235 			.encrypt = aead_encrypt,
2236 			.decrypt = aead_decrypt,
2237 			.ivsize = DES_BLOCK_SIZE,
2238 			.maxauthsize = MD5_DIGEST_SIZE,
2239 		},
2240 		.caam = {
2241 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2242 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2243 					   OP_ALG_AAI_HMAC_PRECOMP,
2244 		},
2245 	},
2246 	{
2247 		.aead = {
2248 			.base = {
2249 				.cra_name = "echainiv(authenc(hmac(md5),"
2250 					    "cbc(des)))",
2251 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2252 						   "cbc-des-caam-qi2",
2253 				.cra_blocksize = DES_BLOCK_SIZE,
2254 			},
2255 			.setkey = aead_setkey,
2256 			.setauthsize = aead_setauthsize,
2257 			.encrypt = aead_encrypt,
2258 			.decrypt = aead_decrypt,
2259 			.ivsize = DES_BLOCK_SIZE,
2260 			.maxauthsize = MD5_DIGEST_SIZE,
2261 		},
2262 		.caam = {
2263 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2264 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2265 					   OP_ALG_AAI_HMAC_PRECOMP,
2266 			.geniv = true,
2267 		}
2268 	},
2269 	{
2270 		.aead = {
2271 			.base = {
2272 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2273 				.cra_driver_name = "authenc-hmac-sha1-"
2274 						   "cbc-des-caam-qi2",
2275 				.cra_blocksize = DES_BLOCK_SIZE,
2276 			},
2277 			.setkey = aead_setkey,
2278 			.setauthsize = aead_setauthsize,
2279 			.encrypt = aead_encrypt,
2280 			.decrypt = aead_decrypt,
2281 			.ivsize = DES_BLOCK_SIZE,
2282 			.maxauthsize = SHA1_DIGEST_SIZE,
2283 		},
2284 		.caam = {
2285 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2286 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2287 					   OP_ALG_AAI_HMAC_PRECOMP,
2288 		},
2289 	},
2290 	{
2291 		.aead = {
2292 			.base = {
2293 				.cra_name = "echainiv(authenc(hmac(sha1),"
2294 					    "cbc(des)))",
2295 				.cra_driver_name = "echainiv-authenc-"
2296 						   "hmac-sha1-cbc-des-caam-qi2",
2297 				.cra_blocksize = DES_BLOCK_SIZE,
2298 			},
2299 			.setkey = aead_setkey,
2300 			.setauthsize = aead_setauthsize,
2301 			.encrypt = aead_encrypt,
2302 			.decrypt = aead_decrypt,
2303 			.ivsize = DES_BLOCK_SIZE,
2304 			.maxauthsize = SHA1_DIGEST_SIZE,
2305 		},
2306 		.caam = {
2307 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2308 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2309 					   OP_ALG_AAI_HMAC_PRECOMP,
2310 			.geniv = true,
2311 		}
2312 	},
2313 	{
2314 		.aead = {
2315 			.base = {
2316 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2317 				.cra_driver_name = "authenc-hmac-sha224-"
2318 						   "cbc-des-caam-qi2",
2319 				.cra_blocksize = DES_BLOCK_SIZE,
2320 			},
2321 			.setkey = aead_setkey,
2322 			.setauthsize = aead_setauthsize,
2323 			.encrypt = aead_encrypt,
2324 			.decrypt = aead_decrypt,
2325 			.ivsize = DES_BLOCK_SIZE,
2326 			.maxauthsize = SHA224_DIGEST_SIZE,
2327 		},
2328 		.caam = {
2329 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2330 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2331 					   OP_ALG_AAI_HMAC_PRECOMP,
2332 		},
2333 	},
2334 	{
2335 		.aead = {
2336 			.base = {
2337 				.cra_name = "echainiv(authenc(hmac(sha224),"
2338 					    "cbc(des)))",
2339 				.cra_driver_name = "echainiv-authenc-"
2340 						   "hmac-sha224-cbc-des-"
2341 						   "caam-qi2",
2342 				.cra_blocksize = DES_BLOCK_SIZE,
2343 			},
2344 			.setkey = aead_setkey,
2345 			.setauthsize = aead_setauthsize,
2346 			.encrypt = aead_encrypt,
2347 			.decrypt = aead_decrypt,
2348 			.ivsize = DES_BLOCK_SIZE,
2349 			.maxauthsize = SHA224_DIGEST_SIZE,
2350 		},
2351 		.caam = {
2352 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2353 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2354 					   OP_ALG_AAI_HMAC_PRECOMP,
2355 			.geniv = true,
2356 		}
2357 	},
2358 	{
2359 		.aead = {
2360 			.base = {
2361 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2362 				.cra_driver_name = "authenc-hmac-sha256-"
2363 						   "cbc-des-caam-qi2",
2364 				.cra_blocksize = DES_BLOCK_SIZE,
2365 			},
2366 			.setkey = aead_setkey,
2367 			.setauthsize = aead_setauthsize,
2368 			.encrypt = aead_encrypt,
2369 			.decrypt = aead_decrypt,
2370 			.ivsize = DES_BLOCK_SIZE,
2371 			.maxauthsize = SHA256_DIGEST_SIZE,
2372 		},
2373 		.caam = {
2374 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2375 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2376 					   OP_ALG_AAI_HMAC_PRECOMP,
2377 		},
2378 	},
2379 	{
2380 		.aead = {
2381 			.base = {
2382 				.cra_name = "echainiv(authenc(hmac(sha256),"
2383 					    "cbc(des)))",
2384 				.cra_driver_name = "echainiv-authenc-"
2385 						   "hmac-sha256-cbc-desi-"
2386 						   "caam-qi2",
2387 				.cra_blocksize = DES_BLOCK_SIZE,
2388 			},
2389 			.setkey = aead_setkey,
2390 			.setauthsize = aead_setauthsize,
2391 			.encrypt = aead_encrypt,
2392 			.decrypt = aead_decrypt,
2393 			.ivsize = DES_BLOCK_SIZE,
2394 			.maxauthsize = SHA256_DIGEST_SIZE,
2395 		},
2396 		.caam = {
2397 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2398 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2399 					   OP_ALG_AAI_HMAC_PRECOMP,
2400 			.geniv = true,
2401 		},
2402 	},
2403 	{
2404 		.aead = {
2405 			.base = {
2406 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2407 				.cra_driver_name = "authenc-hmac-sha384-"
2408 						   "cbc-des-caam-qi2",
2409 				.cra_blocksize = DES_BLOCK_SIZE,
2410 			},
2411 			.setkey = aead_setkey,
2412 			.setauthsize = aead_setauthsize,
2413 			.encrypt = aead_encrypt,
2414 			.decrypt = aead_decrypt,
2415 			.ivsize = DES_BLOCK_SIZE,
2416 			.maxauthsize = SHA384_DIGEST_SIZE,
2417 		},
2418 		.caam = {
2419 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2420 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2421 					   OP_ALG_AAI_HMAC_PRECOMP,
2422 		},
2423 	},
2424 	{
2425 		.aead = {
2426 			.base = {
2427 				.cra_name = "echainiv(authenc(hmac(sha384),"
2428 					    "cbc(des)))",
2429 				.cra_driver_name = "echainiv-authenc-"
2430 						   "hmac-sha384-cbc-des-"
2431 						   "caam-qi2",
2432 				.cra_blocksize = DES_BLOCK_SIZE,
2433 			},
2434 			.setkey = aead_setkey,
2435 			.setauthsize = aead_setauthsize,
2436 			.encrypt = aead_encrypt,
2437 			.decrypt = aead_decrypt,
2438 			.ivsize = DES_BLOCK_SIZE,
2439 			.maxauthsize = SHA384_DIGEST_SIZE,
2440 		},
2441 		.caam = {
2442 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2443 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2444 					   OP_ALG_AAI_HMAC_PRECOMP,
2445 			.geniv = true,
2446 		}
2447 	},
2448 	{
2449 		.aead = {
2450 			.base = {
2451 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2452 				.cra_driver_name = "authenc-hmac-sha512-"
2453 						   "cbc-des-caam-qi2",
2454 				.cra_blocksize = DES_BLOCK_SIZE,
2455 			},
2456 			.setkey = aead_setkey,
2457 			.setauthsize = aead_setauthsize,
2458 			.encrypt = aead_encrypt,
2459 			.decrypt = aead_decrypt,
2460 			.ivsize = DES_BLOCK_SIZE,
2461 			.maxauthsize = SHA512_DIGEST_SIZE,
2462 		},
2463 		.caam = {
2464 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2465 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2466 					   OP_ALG_AAI_HMAC_PRECOMP,
2467 		}
2468 	},
2469 	{
2470 		.aead = {
2471 			.base = {
2472 				.cra_name = "echainiv(authenc(hmac(sha512),"
2473 					    "cbc(des)))",
2474 				.cra_driver_name = "echainiv-authenc-"
2475 						   "hmac-sha512-cbc-des-"
2476 						   "caam-qi2",
2477 				.cra_blocksize = DES_BLOCK_SIZE,
2478 			},
2479 			.setkey = aead_setkey,
2480 			.setauthsize = aead_setauthsize,
2481 			.encrypt = aead_encrypt,
2482 			.decrypt = aead_decrypt,
2483 			.ivsize = DES_BLOCK_SIZE,
2484 			.maxauthsize = SHA512_DIGEST_SIZE,
2485 		},
2486 		.caam = {
2487 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2488 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2489 					   OP_ALG_AAI_HMAC_PRECOMP,
2490 			.geniv = true,
2491 		}
2492 	},
2493 	{
2494 		.aead = {
2495 			.base = {
2496 				.cra_name = "authenc(hmac(md5),"
2497 					    "rfc3686(ctr(aes)))",
2498 				.cra_driver_name = "authenc-hmac-md5-"
2499 						   "rfc3686-ctr-aes-caam-qi2",
2500 				.cra_blocksize = 1,
2501 			},
2502 			.setkey = aead_setkey,
2503 			.setauthsize = aead_setauthsize,
2504 			.encrypt = aead_encrypt,
2505 			.decrypt = aead_decrypt,
2506 			.ivsize = CTR_RFC3686_IV_SIZE,
2507 			.maxauthsize = MD5_DIGEST_SIZE,
2508 		},
2509 		.caam = {
2510 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2511 					   OP_ALG_AAI_CTR_MOD128,
2512 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2513 					   OP_ALG_AAI_HMAC_PRECOMP,
2514 			.rfc3686 = true,
2515 		},
2516 	},
2517 	{
2518 		.aead = {
2519 			.base = {
2520 				.cra_name = "seqiv(authenc("
2521 					    "hmac(md5),rfc3686(ctr(aes))))",
2522 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
2523 						   "rfc3686-ctr-aes-caam-qi2",
2524 				.cra_blocksize = 1,
2525 			},
2526 			.setkey = aead_setkey,
2527 			.setauthsize = aead_setauthsize,
2528 			.encrypt = aead_encrypt,
2529 			.decrypt = aead_decrypt,
2530 			.ivsize = CTR_RFC3686_IV_SIZE,
2531 			.maxauthsize = MD5_DIGEST_SIZE,
2532 		},
2533 		.caam = {
2534 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2535 					   OP_ALG_AAI_CTR_MOD128,
2536 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2537 					   OP_ALG_AAI_HMAC_PRECOMP,
2538 			.rfc3686 = true,
2539 			.geniv = true,
2540 		},
2541 	},
2542 	{
2543 		.aead = {
2544 			.base = {
2545 				.cra_name = "authenc(hmac(sha1),"
2546 					    "rfc3686(ctr(aes)))",
2547 				.cra_driver_name = "authenc-hmac-sha1-"
2548 						   "rfc3686-ctr-aes-caam-qi2",
2549 				.cra_blocksize = 1,
2550 			},
2551 			.setkey = aead_setkey,
2552 			.setauthsize = aead_setauthsize,
2553 			.encrypt = aead_encrypt,
2554 			.decrypt = aead_decrypt,
2555 			.ivsize = CTR_RFC3686_IV_SIZE,
2556 			.maxauthsize = SHA1_DIGEST_SIZE,
2557 		},
2558 		.caam = {
2559 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2560 					   OP_ALG_AAI_CTR_MOD128,
2561 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2562 					   OP_ALG_AAI_HMAC_PRECOMP,
2563 			.rfc3686 = true,
2564 		},
2565 	},
2566 	{
2567 		.aead = {
2568 			.base = {
2569 				.cra_name = "seqiv(authenc("
2570 					    "hmac(sha1),rfc3686(ctr(aes))))",
2571 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
2572 						   "rfc3686-ctr-aes-caam-qi2",
2573 				.cra_blocksize = 1,
2574 			},
2575 			.setkey = aead_setkey,
2576 			.setauthsize = aead_setauthsize,
2577 			.encrypt = aead_encrypt,
2578 			.decrypt = aead_decrypt,
2579 			.ivsize = CTR_RFC3686_IV_SIZE,
2580 			.maxauthsize = SHA1_DIGEST_SIZE,
2581 		},
2582 		.caam = {
2583 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2584 					   OP_ALG_AAI_CTR_MOD128,
2585 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2586 					   OP_ALG_AAI_HMAC_PRECOMP,
2587 			.rfc3686 = true,
2588 			.geniv = true,
2589 		},
2590 	},
2591 	{
2592 		.aead = {
2593 			.base = {
2594 				.cra_name = "authenc(hmac(sha224),"
2595 					    "rfc3686(ctr(aes)))",
2596 				.cra_driver_name = "authenc-hmac-sha224-"
2597 						   "rfc3686-ctr-aes-caam-qi2",
2598 				.cra_blocksize = 1,
2599 			},
2600 			.setkey = aead_setkey,
2601 			.setauthsize = aead_setauthsize,
2602 			.encrypt = aead_encrypt,
2603 			.decrypt = aead_decrypt,
2604 			.ivsize = CTR_RFC3686_IV_SIZE,
2605 			.maxauthsize = SHA224_DIGEST_SIZE,
2606 		},
2607 		.caam = {
2608 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2609 					   OP_ALG_AAI_CTR_MOD128,
2610 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2611 					   OP_ALG_AAI_HMAC_PRECOMP,
2612 			.rfc3686 = true,
2613 		},
2614 	},
2615 	{
2616 		.aead = {
2617 			.base = {
2618 				.cra_name = "seqiv(authenc("
2619 					    "hmac(sha224),rfc3686(ctr(aes))))",
2620 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
2621 						   "rfc3686-ctr-aes-caam-qi2",
2622 				.cra_blocksize = 1,
2623 			},
2624 			.setkey = aead_setkey,
2625 			.setauthsize = aead_setauthsize,
2626 			.encrypt = aead_encrypt,
2627 			.decrypt = aead_decrypt,
2628 			.ivsize = CTR_RFC3686_IV_SIZE,
2629 			.maxauthsize = SHA224_DIGEST_SIZE,
2630 		},
2631 		.caam = {
2632 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2633 					   OP_ALG_AAI_CTR_MOD128,
2634 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2635 					   OP_ALG_AAI_HMAC_PRECOMP,
2636 			.rfc3686 = true,
2637 			.geniv = true,
2638 		},
2639 	},
2640 	{
2641 		.aead = {
2642 			.base = {
2643 				.cra_name = "authenc(hmac(sha256),"
2644 					    "rfc3686(ctr(aes)))",
2645 				.cra_driver_name = "authenc-hmac-sha256-"
2646 						   "rfc3686-ctr-aes-caam-qi2",
2647 				.cra_blocksize = 1,
2648 			},
2649 			.setkey = aead_setkey,
2650 			.setauthsize = aead_setauthsize,
2651 			.encrypt = aead_encrypt,
2652 			.decrypt = aead_decrypt,
2653 			.ivsize = CTR_RFC3686_IV_SIZE,
2654 			.maxauthsize = SHA256_DIGEST_SIZE,
2655 		},
2656 		.caam = {
2657 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2658 					   OP_ALG_AAI_CTR_MOD128,
2659 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2660 					   OP_ALG_AAI_HMAC_PRECOMP,
2661 			.rfc3686 = true,
2662 		},
2663 	},
2664 	{
2665 		.aead = {
2666 			.base = {
2667 				.cra_name = "seqiv(authenc(hmac(sha256),"
2668 					    "rfc3686(ctr(aes))))",
2669 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
2670 						   "rfc3686-ctr-aes-caam-qi2",
2671 				.cra_blocksize = 1,
2672 			},
2673 			.setkey = aead_setkey,
2674 			.setauthsize = aead_setauthsize,
2675 			.encrypt = aead_encrypt,
2676 			.decrypt = aead_decrypt,
2677 			.ivsize = CTR_RFC3686_IV_SIZE,
2678 			.maxauthsize = SHA256_DIGEST_SIZE,
2679 		},
2680 		.caam = {
2681 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2682 					   OP_ALG_AAI_CTR_MOD128,
2683 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2684 					   OP_ALG_AAI_HMAC_PRECOMP,
2685 			.rfc3686 = true,
2686 			.geniv = true,
2687 		},
2688 	},
2689 	{
2690 		.aead = {
2691 			.base = {
2692 				.cra_name = "authenc(hmac(sha384),"
2693 					    "rfc3686(ctr(aes)))",
2694 				.cra_driver_name = "authenc-hmac-sha384-"
2695 						   "rfc3686-ctr-aes-caam-qi2",
2696 				.cra_blocksize = 1,
2697 			},
2698 			.setkey = aead_setkey,
2699 			.setauthsize = aead_setauthsize,
2700 			.encrypt = aead_encrypt,
2701 			.decrypt = aead_decrypt,
2702 			.ivsize = CTR_RFC3686_IV_SIZE,
2703 			.maxauthsize = SHA384_DIGEST_SIZE,
2704 		},
2705 		.caam = {
2706 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2707 					   OP_ALG_AAI_CTR_MOD128,
2708 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2709 					   OP_ALG_AAI_HMAC_PRECOMP,
2710 			.rfc3686 = true,
2711 		},
2712 	},
2713 	{
2714 		.aead = {
2715 			.base = {
2716 				.cra_name = "seqiv(authenc(hmac(sha384),"
2717 					    "rfc3686(ctr(aes))))",
2718 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
2719 						   "rfc3686-ctr-aes-caam-qi2",
2720 				.cra_blocksize = 1,
2721 			},
2722 			.setkey = aead_setkey,
2723 			.setauthsize = aead_setauthsize,
2724 			.encrypt = aead_encrypt,
2725 			.decrypt = aead_decrypt,
2726 			.ivsize = CTR_RFC3686_IV_SIZE,
2727 			.maxauthsize = SHA384_DIGEST_SIZE,
2728 		},
2729 		.caam = {
2730 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2731 					   OP_ALG_AAI_CTR_MOD128,
2732 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2733 					   OP_ALG_AAI_HMAC_PRECOMP,
2734 			.rfc3686 = true,
2735 			.geniv = true,
2736 		},
2737 	},
2738 	{
2739 		.aead = {
2740 			.base = {
2741 				.cra_name = "rfc7539(chacha20,poly1305)",
2742 				.cra_driver_name = "rfc7539-chacha20-poly1305-"
2743 						   "caam-qi2",
2744 				.cra_blocksize = 1,
2745 			},
2746 			.setkey = chachapoly_setkey,
2747 			.setauthsize = chachapoly_setauthsize,
2748 			.encrypt = aead_encrypt,
2749 			.decrypt = aead_decrypt,
2750 			.ivsize = CHACHAPOLY_IV_SIZE,
2751 			.maxauthsize = POLY1305_DIGEST_SIZE,
2752 		},
2753 		.caam = {
2754 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2755 					   OP_ALG_AAI_AEAD,
2756 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2757 					   OP_ALG_AAI_AEAD,
2758 		},
2759 	},
2760 	{
2761 		.aead = {
2762 			.base = {
2763 				.cra_name = "rfc7539esp(chacha20,poly1305)",
2764 				.cra_driver_name = "rfc7539esp-chacha20-"
2765 						   "poly1305-caam-qi2",
2766 				.cra_blocksize = 1,
2767 			},
2768 			.setkey = chachapoly_setkey,
2769 			.setauthsize = chachapoly_setauthsize,
2770 			.encrypt = aead_encrypt,
2771 			.decrypt = aead_decrypt,
2772 			.ivsize = 8,
2773 			.maxauthsize = POLY1305_DIGEST_SIZE,
2774 		},
2775 		.caam = {
2776 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2777 					   OP_ALG_AAI_AEAD,
2778 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2779 					   OP_ALG_AAI_AEAD,
2780 		},
2781 	},
2782 	{
2783 		.aead = {
2784 			.base = {
2785 				.cra_name = "authenc(hmac(sha512),"
2786 					    "rfc3686(ctr(aes)))",
2787 				.cra_driver_name = "authenc-hmac-sha512-"
2788 						   "rfc3686-ctr-aes-caam-qi2",
2789 				.cra_blocksize = 1,
2790 			},
2791 			.setkey = aead_setkey,
2792 			.setauthsize = aead_setauthsize,
2793 			.encrypt = aead_encrypt,
2794 			.decrypt = aead_decrypt,
2795 			.ivsize = CTR_RFC3686_IV_SIZE,
2796 			.maxauthsize = SHA512_DIGEST_SIZE,
2797 		},
2798 		.caam = {
2799 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2800 					   OP_ALG_AAI_CTR_MOD128,
2801 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2802 					   OP_ALG_AAI_HMAC_PRECOMP,
2803 			.rfc3686 = true,
2804 		},
2805 	},
2806 	{
2807 		.aead = {
2808 			.base = {
2809 				.cra_name = "seqiv(authenc(hmac(sha512),"
2810 					    "rfc3686(ctr(aes))))",
2811 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
2812 						   "rfc3686-ctr-aes-caam-qi2",
2813 				.cra_blocksize = 1,
2814 			},
2815 			.setkey = aead_setkey,
2816 			.setauthsize = aead_setauthsize,
2817 			.encrypt = aead_encrypt,
2818 			.decrypt = aead_decrypt,
2819 			.ivsize = CTR_RFC3686_IV_SIZE,
2820 			.maxauthsize = SHA512_DIGEST_SIZE,
2821 		},
2822 		.caam = {
2823 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2824 					   OP_ALG_AAI_CTR_MOD128,
2825 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2826 					   OP_ALG_AAI_HMAC_PRECOMP,
2827 			.rfc3686 = true,
2828 			.geniv = true,
2829 		},
2830 	},
2831 };
2832 
2833 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2834 {
2835 	struct skcipher_alg *alg = &t_alg->skcipher;
2836 
2837 	alg->base.cra_module = THIS_MODULE;
2838 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2839 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2840 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2841 
2842 	alg->init = caam_cra_init_skcipher;
2843 	alg->exit = caam_cra_exit;
2844 }
2845 
2846 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2847 {
2848 	struct aead_alg *alg = &t_alg->aead;
2849 
2850 	alg->base.cra_module = THIS_MODULE;
2851 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2852 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2853 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2854 
2855 	alg->init = caam_cra_init_aead;
2856 	alg->exit = caam_cra_exit_aead;
2857 }
2858 
2859 /* max hash key is max split key size */
2860 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
2861 
2862 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
2863 
2864 /* caam context sizes for hashes: running digest + 8 */
2865 #define HASH_MSG_LEN			8
2866 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2867 
2868 enum hash_optype {
2869 	UPDATE = 0,
2870 	UPDATE_FIRST,
2871 	FINALIZE,
2872 	DIGEST,
2873 	HASH_NUM_OP
2874 };
2875 
2876 /**
2877  * caam_hash_ctx - ahash per-session context
2878  * @flc: Flow Contexts array
2879  * @flc_dma: I/O virtual addresses of the Flow Contexts
2880  * @dev: dpseci device
2881  * @ctx_len: size of Context Register
2882  * @adata: hashing algorithm details
2883  */
2884 struct caam_hash_ctx {
2885 	struct caam_flc flc[HASH_NUM_OP];
2886 	dma_addr_t flc_dma[HASH_NUM_OP];
2887 	struct device *dev;
2888 	int ctx_len;
2889 	struct alginfo adata;
2890 };
2891 
2892 /* ahash state */
2893 struct caam_hash_state {
2894 	struct caam_request caam_req;
2895 	dma_addr_t buf_dma;
2896 	dma_addr_t ctx_dma;
2897 	int ctx_dma_len;
2898 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2899 	int buflen_0;
2900 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2901 	int buflen_1;
2902 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2903 	int (*update)(struct ahash_request *req);
2904 	int (*final)(struct ahash_request *req);
2905 	int (*finup)(struct ahash_request *req);
2906 	int current_buf;
2907 };
2908 
2909 struct caam_export_state {
2910 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2911 	u8 caam_ctx[MAX_CTX_LEN];
2912 	int buflen;
2913 	int (*update)(struct ahash_request *req);
2914 	int (*final)(struct ahash_request *req);
2915 	int (*finup)(struct ahash_request *req);
2916 };
2917 
2918 static inline void switch_buf(struct caam_hash_state *state)
2919 {
2920 	state->current_buf ^= 1;
2921 }
2922 
2923 static inline u8 *current_buf(struct caam_hash_state *state)
2924 {
2925 	return state->current_buf ? state->buf_1 : state->buf_0;
2926 }
2927 
2928 static inline u8 *alt_buf(struct caam_hash_state *state)
2929 {
2930 	return state->current_buf ? state->buf_0 : state->buf_1;
2931 }
2932 
2933 static inline int *current_buflen(struct caam_hash_state *state)
2934 {
2935 	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
2936 }
2937 
2938 static inline int *alt_buflen(struct caam_hash_state *state)
2939 {
2940 	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
2941 }
2942 
2943 /* Map current buffer in state (if length > 0) and put it in link table */
2944 static inline int buf_map_to_qm_sg(struct device *dev,
2945 				   struct dpaa2_sg_entry *qm_sg,
2946 				   struct caam_hash_state *state)
2947 {
2948 	int buflen = *current_buflen(state);
2949 
2950 	if (!buflen)
2951 		return 0;
2952 
2953 	state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
2954 					DMA_TO_DEVICE);
2955 	if (dma_mapping_error(dev, state->buf_dma)) {
2956 		dev_err(dev, "unable to map buf\n");
2957 		state->buf_dma = 0;
2958 		return -ENOMEM;
2959 	}
2960 
2961 	dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
2962 
2963 	return 0;
2964 }
2965 
2966 /* Map state->caam_ctx, and add it to link table */
2967 static inline int ctx_map_to_qm_sg(struct device *dev,
2968 				   struct caam_hash_state *state, int ctx_len,
2969 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
2970 {
2971 	state->ctx_dma_len = ctx_len;
2972 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
2973 	if (dma_mapping_error(dev, state->ctx_dma)) {
2974 		dev_err(dev, "unable to map ctx\n");
2975 		state->ctx_dma = 0;
2976 		return -ENOMEM;
2977 	}
2978 
2979 	dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
2980 
2981 	return 0;
2982 }
2983 
2984 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
2985 {
2986 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
2987 	int digestsize = crypto_ahash_digestsize(ahash);
2988 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
2989 	struct caam_flc *flc;
2990 	u32 *desc;
2991 
2992 	/* ahash_update shared descriptor */
2993 	flc = &ctx->flc[UPDATE];
2994 	desc = flc->sh_desc;
2995 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
2996 			  ctx->ctx_len, true, priv->sec_attr.era);
2997 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2998 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
2999 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3000 	print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3001 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3002 			     1);
3003 
3004 	/* ahash_update_first shared descriptor */
3005 	flc = &ctx->flc[UPDATE_FIRST];
3006 	desc = flc->sh_desc;
3007 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3008 			  ctx->ctx_len, false, priv->sec_attr.era);
3009 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3010 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3011 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3012 	print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3013 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3014 			     1);
3015 
3016 	/* ahash_final shared descriptor */
3017 	flc = &ctx->flc[FINALIZE];
3018 	desc = flc->sh_desc;
3019 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3020 			  ctx->ctx_len, true, priv->sec_attr.era);
3021 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3022 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3023 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3024 	print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3025 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3026 			     1);
3027 
3028 	/* ahash_digest shared descriptor */
3029 	flc = &ctx->flc[DIGEST];
3030 	desc = flc->sh_desc;
3031 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3032 			  ctx->ctx_len, false, priv->sec_attr.era);
3033 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3034 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3035 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3036 	print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3037 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3038 			     1);
3039 
3040 	return 0;
3041 }
3042 
3043 struct split_key_sh_result {
3044 	struct completion completion;
3045 	int err;
3046 	struct device *dev;
3047 };
3048 
3049 static void split_key_sh_done(void *cbk_ctx, u32 err)
3050 {
3051 	struct split_key_sh_result *res = cbk_ctx;
3052 
3053 	dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3054 
3055 	if (err)
3056 		caam_qi2_strstatus(res->dev, err);
3057 
3058 	res->err = err;
3059 	complete(&res->completion);
3060 }
3061 
3062 /* Digest hash size if it is too large */
3063 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3064 			   u32 digestsize)
3065 {
3066 	struct caam_request *req_ctx;
3067 	u32 *desc;
3068 	struct split_key_sh_result result;
3069 	dma_addr_t key_dma;
3070 	struct caam_flc *flc;
3071 	dma_addr_t flc_dma;
3072 	int ret = -ENOMEM;
3073 	struct dpaa2_fl_entry *in_fle, *out_fle;
3074 
3075 	req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3076 	if (!req_ctx)
3077 		return -ENOMEM;
3078 
3079 	in_fle = &req_ctx->fd_flt[1];
3080 	out_fle = &req_ctx->fd_flt[0];
3081 
3082 	flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3083 	if (!flc)
3084 		goto err_flc;
3085 
3086 	key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3087 	if (dma_mapping_error(ctx->dev, key_dma)) {
3088 		dev_err(ctx->dev, "unable to map key memory\n");
3089 		goto err_key_dma;
3090 	}
3091 
3092 	desc = flc->sh_desc;
3093 
3094 	init_sh_desc(desc, 0);
3095 
3096 	/* descriptor to perform unkeyed hash on key_in */
3097 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3098 			 OP_ALG_AS_INITFINAL);
3099 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3100 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3101 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3102 			 LDST_SRCDST_BYTE_CONTEXT);
3103 
3104 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3105 	flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3106 				 desc_bytes(desc), DMA_TO_DEVICE);
3107 	if (dma_mapping_error(ctx->dev, flc_dma)) {
3108 		dev_err(ctx->dev, "unable to map shared descriptor\n");
3109 		goto err_flc_dma;
3110 	}
3111 
3112 	dpaa2_fl_set_final(in_fle, true);
3113 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3114 	dpaa2_fl_set_addr(in_fle, key_dma);
3115 	dpaa2_fl_set_len(in_fle, *keylen);
3116 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3117 	dpaa2_fl_set_addr(out_fle, key_dma);
3118 	dpaa2_fl_set_len(out_fle, digestsize);
3119 
3120 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3121 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3122 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3123 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3124 			     1);
3125 
3126 	result.err = 0;
3127 	init_completion(&result.completion);
3128 	result.dev = ctx->dev;
3129 
3130 	req_ctx->flc = flc;
3131 	req_ctx->flc_dma = flc_dma;
3132 	req_ctx->cbk = split_key_sh_done;
3133 	req_ctx->ctx = &result;
3134 
3135 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3136 	if (ret == -EINPROGRESS) {
3137 		/* in progress */
3138 		wait_for_completion(&result.completion);
3139 		ret = result.err;
3140 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3141 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
3142 				     digestsize, 1);
3143 	}
3144 
3145 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3146 			 DMA_TO_DEVICE);
3147 err_flc_dma:
3148 	dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3149 err_key_dma:
3150 	kfree(flc);
3151 err_flc:
3152 	kfree(req_ctx);
3153 
3154 	*keylen = digestsize;
3155 
3156 	return ret;
3157 }
3158 
3159 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3160 			unsigned int keylen)
3161 {
3162 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3163 	unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3164 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
3165 	int ret;
3166 	u8 *hashed_key = NULL;
3167 
3168 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3169 
3170 	if (keylen > blocksize) {
3171 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3172 		if (!hashed_key)
3173 			return -ENOMEM;
3174 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3175 		if (ret)
3176 			goto bad_free_key;
3177 		key = hashed_key;
3178 	}
3179 
3180 	ctx->adata.keylen = keylen;
3181 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3182 					      OP_ALG_ALGSEL_MASK);
3183 	if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3184 		goto bad_free_key;
3185 
3186 	ctx->adata.key_virt = key;
3187 	ctx->adata.key_inline = true;
3188 
3189 	ret = ahash_set_sh_desc(ahash);
3190 	kfree(hashed_key);
3191 	return ret;
3192 bad_free_key:
3193 	kfree(hashed_key);
3194 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3195 	return -EINVAL;
3196 }
3197 
3198 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3199 			       struct ahash_request *req)
3200 {
3201 	struct caam_hash_state *state = ahash_request_ctx(req);
3202 
3203 	if (edesc->src_nents)
3204 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3205 
3206 	if (edesc->qm_sg_bytes)
3207 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3208 				 DMA_TO_DEVICE);
3209 
3210 	if (state->buf_dma) {
3211 		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3212 				 DMA_TO_DEVICE);
3213 		state->buf_dma = 0;
3214 	}
3215 }
3216 
3217 static inline void ahash_unmap_ctx(struct device *dev,
3218 				   struct ahash_edesc *edesc,
3219 				   struct ahash_request *req, u32 flag)
3220 {
3221 	struct caam_hash_state *state = ahash_request_ctx(req);
3222 
3223 	if (state->ctx_dma) {
3224 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3225 		state->ctx_dma = 0;
3226 	}
3227 	ahash_unmap(dev, edesc, req);
3228 }
3229 
3230 static void ahash_done(void *cbk_ctx, u32 status)
3231 {
3232 	struct crypto_async_request *areq = cbk_ctx;
3233 	struct ahash_request *req = ahash_request_cast(areq);
3234 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3235 	struct caam_hash_state *state = ahash_request_ctx(req);
3236 	struct ahash_edesc *edesc = state->caam_req.edesc;
3237 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3238 	int digestsize = crypto_ahash_digestsize(ahash);
3239 	int ecode = 0;
3240 
3241 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3242 
3243 	if (unlikely(status)) {
3244 		caam_qi2_strstatus(ctx->dev, status);
3245 		ecode = -EIO;
3246 	}
3247 
3248 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3249 	memcpy(req->result, state->caam_ctx, digestsize);
3250 	qi_cache_free(edesc);
3251 
3252 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3253 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3254 			     ctx->ctx_len, 1);
3255 
3256 	req->base.complete(&req->base, ecode);
3257 }
3258 
3259 static void ahash_done_bi(void *cbk_ctx, u32 status)
3260 {
3261 	struct crypto_async_request *areq = cbk_ctx;
3262 	struct ahash_request *req = ahash_request_cast(areq);
3263 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3264 	struct caam_hash_state *state = ahash_request_ctx(req);
3265 	struct ahash_edesc *edesc = state->caam_req.edesc;
3266 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3267 	int ecode = 0;
3268 
3269 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3270 
3271 	if (unlikely(status)) {
3272 		caam_qi2_strstatus(ctx->dev, status);
3273 		ecode = -EIO;
3274 	}
3275 
3276 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3277 	switch_buf(state);
3278 	qi_cache_free(edesc);
3279 
3280 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3281 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3282 			     ctx->ctx_len, 1);
3283 	if (req->result)
3284 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3285 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3286 				     crypto_ahash_digestsize(ahash), 1);
3287 
3288 	req->base.complete(&req->base, ecode);
3289 }
3290 
3291 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3292 {
3293 	struct crypto_async_request *areq = cbk_ctx;
3294 	struct ahash_request *req = ahash_request_cast(areq);
3295 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3296 	struct caam_hash_state *state = ahash_request_ctx(req);
3297 	struct ahash_edesc *edesc = state->caam_req.edesc;
3298 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3299 	int digestsize = crypto_ahash_digestsize(ahash);
3300 	int ecode = 0;
3301 
3302 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3303 
3304 	if (unlikely(status)) {
3305 		caam_qi2_strstatus(ctx->dev, status);
3306 		ecode = -EIO;
3307 	}
3308 
3309 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3310 	memcpy(req->result, state->caam_ctx, digestsize);
3311 	qi_cache_free(edesc);
3312 
3313 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3314 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3315 			     ctx->ctx_len, 1);
3316 
3317 	req->base.complete(&req->base, ecode);
3318 }
3319 
3320 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3321 {
3322 	struct crypto_async_request *areq = cbk_ctx;
3323 	struct ahash_request *req = ahash_request_cast(areq);
3324 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3325 	struct caam_hash_state *state = ahash_request_ctx(req);
3326 	struct ahash_edesc *edesc = state->caam_req.edesc;
3327 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3328 	int ecode = 0;
3329 
3330 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3331 
3332 	if (unlikely(status)) {
3333 		caam_qi2_strstatus(ctx->dev, status);
3334 		ecode = -EIO;
3335 	}
3336 
3337 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3338 	switch_buf(state);
3339 	qi_cache_free(edesc);
3340 
3341 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3342 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3343 			     ctx->ctx_len, 1);
3344 	if (req->result)
3345 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3346 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3347 				     crypto_ahash_digestsize(ahash), 1);
3348 
3349 	req->base.complete(&req->base, ecode);
3350 }
3351 
3352 static int ahash_update_ctx(struct ahash_request *req)
3353 {
3354 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3355 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3356 	struct caam_hash_state *state = ahash_request_ctx(req);
3357 	struct caam_request *req_ctx = &state->caam_req;
3358 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3359 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3360 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3361 		      GFP_KERNEL : GFP_ATOMIC;
3362 	u8 *buf = current_buf(state);
3363 	int *buflen = current_buflen(state);
3364 	u8 *next_buf = alt_buf(state);
3365 	int *next_buflen = alt_buflen(state), last_buflen;
3366 	int in_len = *buflen + req->nbytes, to_hash;
3367 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3368 	struct ahash_edesc *edesc;
3369 	int ret = 0;
3370 
3371 	last_buflen = *next_buflen;
3372 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3373 	to_hash = in_len - *next_buflen;
3374 
3375 	if (to_hash) {
3376 		struct dpaa2_sg_entry *sg_table;
3377 
3378 		src_nents = sg_nents_for_len(req->src,
3379 					     req->nbytes - (*next_buflen));
3380 		if (src_nents < 0) {
3381 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3382 			return src_nents;
3383 		}
3384 
3385 		if (src_nents) {
3386 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3387 						  DMA_TO_DEVICE);
3388 			if (!mapped_nents) {
3389 				dev_err(ctx->dev, "unable to DMA map source\n");
3390 				return -ENOMEM;
3391 			}
3392 		} else {
3393 			mapped_nents = 0;
3394 		}
3395 
3396 		/* allocate space for base edesc and link tables */
3397 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3398 		if (!edesc) {
3399 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3400 				     DMA_TO_DEVICE);
3401 			return -ENOMEM;
3402 		}
3403 
3404 		edesc->src_nents = src_nents;
3405 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3406 		qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
3407 			      sizeof(*sg_table);
3408 		sg_table = &edesc->sgt[0];
3409 
3410 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3411 				       DMA_BIDIRECTIONAL);
3412 		if (ret)
3413 			goto unmap_ctx;
3414 
3415 		ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3416 		if (ret)
3417 			goto unmap_ctx;
3418 
3419 		if (mapped_nents) {
3420 			sg_to_qm_sg_last(req->src, mapped_nents,
3421 					 sg_table + qm_sg_src_index, 0);
3422 			if (*next_buflen)
3423 				scatterwalk_map_and_copy(next_buf, req->src,
3424 							 to_hash - *buflen,
3425 							 *next_buflen, 0);
3426 		} else {
3427 			dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3428 					   true);
3429 		}
3430 
3431 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3432 						  qm_sg_bytes, DMA_TO_DEVICE);
3433 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3434 			dev_err(ctx->dev, "unable to map S/G table\n");
3435 			ret = -ENOMEM;
3436 			goto unmap_ctx;
3437 		}
3438 		edesc->qm_sg_bytes = qm_sg_bytes;
3439 
3440 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3441 		dpaa2_fl_set_final(in_fle, true);
3442 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3443 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3444 		dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3445 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3446 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3447 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3448 
3449 		req_ctx->flc = &ctx->flc[UPDATE];
3450 		req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3451 		req_ctx->cbk = ahash_done_bi;
3452 		req_ctx->ctx = &req->base;
3453 		req_ctx->edesc = edesc;
3454 
3455 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3456 		if (ret != -EINPROGRESS &&
3457 		    !(ret == -EBUSY &&
3458 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3459 			goto unmap_ctx;
3460 	} else if (*next_buflen) {
3461 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3462 					 req->nbytes, 0);
3463 		*buflen = *next_buflen;
3464 		*next_buflen = last_buflen;
3465 	}
3466 
3467 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3468 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3469 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3470 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3471 			     1);
3472 
3473 	return ret;
3474 unmap_ctx:
3475 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3476 	qi_cache_free(edesc);
3477 	return ret;
3478 }
3479 
3480 static int ahash_final_ctx(struct ahash_request *req)
3481 {
3482 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3483 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3484 	struct caam_hash_state *state = ahash_request_ctx(req);
3485 	struct caam_request *req_ctx = &state->caam_req;
3486 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3487 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3488 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3489 		      GFP_KERNEL : GFP_ATOMIC;
3490 	int buflen = *current_buflen(state);
3491 	int qm_sg_bytes, qm_sg_src_index;
3492 	int digestsize = crypto_ahash_digestsize(ahash);
3493 	struct ahash_edesc *edesc;
3494 	struct dpaa2_sg_entry *sg_table;
3495 	int ret;
3496 
3497 	/* allocate space for base edesc and link tables */
3498 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3499 	if (!edesc)
3500 		return -ENOMEM;
3501 
3502 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3503 	qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
3504 	sg_table = &edesc->sgt[0];
3505 
3506 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3507 			       DMA_BIDIRECTIONAL);
3508 	if (ret)
3509 		goto unmap_ctx;
3510 
3511 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3512 	if (ret)
3513 		goto unmap_ctx;
3514 
3515 	dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
3516 
3517 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3518 					  DMA_TO_DEVICE);
3519 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3520 		dev_err(ctx->dev, "unable to map S/G table\n");
3521 		ret = -ENOMEM;
3522 		goto unmap_ctx;
3523 	}
3524 	edesc->qm_sg_bytes = qm_sg_bytes;
3525 
3526 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3527 	dpaa2_fl_set_final(in_fle, true);
3528 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3529 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3530 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3531 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3532 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3533 	dpaa2_fl_set_len(out_fle, digestsize);
3534 
3535 	req_ctx->flc = &ctx->flc[FINALIZE];
3536 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3537 	req_ctx->cbk = ahash_done_ctx_src;
3538 	req_ctx->ctx = &req->base;
3539 	req_ctx->edesc = edesc;
3540 
3541 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3542 	if (ret == -EINPROGRESS ||
3543 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3544 		return ret;
3545 
3546 unmap_ctx:
3547 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3548 	qi_cache_free(edesc);
3549 	return ret;
3550 }
3551 
3552 static int ahash_finup_ctx(struct ahash_request *req)
3553 {
3554 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3555 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3556 	struct caam_hash_state *state = ahash_request_ctx(req);
3557 	struct caam_request *req_ctx = &state->caam_req;
3558 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3559 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3560 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3561 		      GFP_KERNEL : GFP_ATOMIC;
3562 	int buflen = *current_buflen(state);
3563 	int qm_sg_bytes, qm_sg_src_index;
3564 	int src_nents, mapped_nents;
3565 	int digestsize = crypto_ahash_digestsize(ahash);
3566 	struct ahash_edesc *edesc;
3567 	struct dpaa2_sg_entry *sg_table;
3568 	int ret;
3569 
3570 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3571 	if (src_nents < 0) {
3572 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3573 		return src_nents;
3574 	}
3575 
3576 	if (src_nents) {
3577 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3578 					  DMA_TO_DEVICE);
3579 		if (!mapped_nents) {
3580 			dev_err(ctx->dev, "unable to DMA map source\n");
3581 			return -ENOMEM;
3582 		}
3583 	} else {
3584 		mapped_nents = 0;
3585 	}
3586 
3587 	/* allocate space for base edesc and link tables */
3588 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3589 	if (!edesc) {
3590 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3591 		return -ENOMEM;
3592 	}
3593 
3594 	edesc->src_nents = src_nents;
3595 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3596 	qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
3597 	sg_table = &edesc->sgt[0];
3598 
3599 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3600 			       DMA_BIDIRECTIONAL);
3601 	if (ret)
3602 		goto unmap_ctx;
3603 
3604 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3605 	if (ret)
3606 		goto unmap_ctx;
3607 
3608 	sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
3609 
3610 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3611 					  DMA_TO_DEVICE);
3612 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3613 		dev_err(ctx->dev, "unable to map S/G table\n");
3614 		ret = -ENOMEM;
3615 		goto unmap_ctx;
3616 	}
3617 	edesc->qm_sg_bytes = qm_sg_bytes;
3618 
3619 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3620 	dpaa2_fl_set_final(in_fle, true);
3621 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3622 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3623 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3624 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3625 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3626 	dpaa2_fl_set_len(out_fle, digestsize);
3627 
3628 	req_ctx->flc = &ctx->flc[FINALIZE];
3629 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3630 	req_ctx->cbk = ahash_done_ctx_src;
3631 	req_ctx->ctx = &req->base;
3632 	req_ctx->edesc = edesc;
3633 
3634 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3635 	if (ret == -EINPROGRESS ||
3636 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3637 		return ret;
3638 
3639 unmap_ctx:
3640 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3641 	qi_cache_free(edesc);
3642 	return ret;
3643 }
3644 
3645 static int ahash_digest(struct ahash_request *req)
3646 {
3647 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3648 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3649 	struct caam_hash_state *state = ahash_request_ctx(req);
3650 	struct caam_request *req_ctx = &state->caam_req;
3651 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3652 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3653 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3654 		      GFP_KERNEL : GFP_ATOMIC;
3655 	int digestsize = crypto_ahash_digestsize(ahash);
3656 	int src_nents, mapped_nents;
3657 	struct ahash_edesc *edesc;
3658 	int ret = -ENOMEM;
3659 
3660 	state->buf_dma = 0;
3661 
3662 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3663 	if (src_nents < 0) {
3664 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3665 		return src_nents;
3666 	}
3667 
3668 	if (src_nents) {
3669 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3670 					  DMA_TO_DEVICE);
3671 		if (!mapped_nents) {
3672 			dev_err(ctx->dev, "unable to map source for DMA\n");
3673 			return ret;
3674 		}
3675 	} else {
3676 		mapped_nents = 0;
3677 	}
3678 
3679 	/* allocate space for base edesc and link tables */
3680 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3681 	if (!edesc) {
3682 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3683 		return ret;
3684 	}
3685 
3686 	edesc->src_nents = src_nents;
3687 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3688 
3689 	if (mapped_nents > 1) {
3690 		int qm_sg_bytes;
3691 		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3692 
3693 		qm_sg_bytes = mapped_nents * sizeof(*sg_table);
3694 		sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
3695 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3696 						  qm_sg_bytes, DMA_TO_DEVICE);
3697 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3698 			dev_err(ctx->dev, "unable to map S/G table\n");
3699 			goto unmap;
3700 		}
3701 		edesc->qm_sg_bytes = qm_sg_bytes;
3702 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3703 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3704 	} else {
3705 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3706 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3707 	}
3708 
3709 	state->ctx_dma_len = digestsize;
3710 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3711 					DMA_FROM_DEVICE);
3712 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3713 		dev_err(ctx->dev, "unable to map ctx\n");
3714 		state->ctx_dma = 0;
3715 		goto unmap;
3716 	}
3717 
3718 	dpaa2_fl_set_final(in_fle, true);
3719 	dpaa2_fl_set_len(in_fle, req->nbytes);
3720 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3721 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3722 	dpaa2_fl_set_len(out_fle, digestsize);
3723 
3724 	req_ctx->flc = &ctx->flc[DIGEST];
3725 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3726 	req_ctx->cbk = ahash_done;
3727 	req_ctx->ctx = &req->base;
3728 	req_ctx->edesc = edesc;
3729 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3730 	if (ret == -EINPROGRESS ||
3731 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3732 		return ret;
3733 
3734 unmap:
3735 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3736 	qi_cache_free(edesc);
3737 	return ret;
3738 }
3739 
3740 static int ahash_final_no_ctx(struct ahash_request *req)
3741 {
3742 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3743 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3744 	struct caam_hash_state *state = ahash_request_ctx(req);
3745 	struct caam_request *req_ctx = &state->caam_req;
3746 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3747 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3748 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3749 		      GFP_KERNEL : GFP_ATOMIC;
3750 	u8 *buf = current_buf(state);
3751 	int buflen = *current_buflen(state);
3752 	int digestsize = crypto_ahash_digestsize(ahash);
3753 	struct ahash_edesc *edesc;
3754 	int ret = -ENOMEM;
3755 
3756 	/* allocate space for base edesc and link tables */
3757 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3758 	if (!edesc)
3759 		return ret;
3760 
3761 	if (buflen) {
3762 		state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3763 						DMA_TO_DEVICE);
3764 		if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3765 			dev_err(ctx->dev, "unable to map src\n");
3766 			goto unmap;
3767 		}
3768 	}
3769 
3770 	state->ctx_dma_len = digestsize;
3771 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3772 					DMA_FROM_DEVICE);
3773 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3774 		dev_err(ctx->dev, "unable to map ctx\n");
3775 		state->ctx_dma = 0;
3776 		goto unmap;
3777 	}
3778 
3779 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3780 	dpaa2_fl_set_final(in_fle, true);
3781 	/*
3782 	 * crypto engine requires the input entry to be present when
3783 	 * "frame list" FD is used.
3784 	 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3785 	 * in_fle zeroized (except for "Final" flag) is the best option.
3786 	 */
3787 	if (buflen) {
3788 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3789 		dpaa2_fl_set_addr(in_fle, state->buf_dma);
3790 		dpaa2_fl_set_len(in_fle, buflen);
3791 	}
3792 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3793 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3794 	dpaa2_fl_set_len(out_fle, digestsize);
3795 
3796 	req_ctx->flc = &ctx->flc[DIGEST];
3797 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3798 	req_ctx->cbk = ahash_done;
3799 	req_ctx->ctx = &req->base;
3800 	req_ctx->edesc = edesc;
3801 
3802 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3803 	if (ret == -EINPROGRESS ||
3804 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3805 		return ret;
3806 
3807 unmap:
3808 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3809 	qi_cache_free(edesc);
3810 	return ret;
3811 }
3812 
3813 static int ahash_update_no_ctx(struct ahash_request *req)
3814 {
3815 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3816 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3817 	struct caam_hash_state *state = ahash_request_ctx(req);
3818 	struct caam_request *req_ctx = &state->caam_req;
3819 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3820 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3821 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3822 		      GFP_KERNEL : GFP_ATOMIC;
3823 	u8 *buf = current_buf(state);
3824 	int *buflen = current_buflen(state);
3825 	u8 *next_buf = alt_buf(state);
3826 	int *next_buflen = alt_buflen(state);
3827 	int in_len = *buflen + req->nbytes, to_hash;
3828 	int qm_sg_bytes, src_nents, mapped_nents;
3829 	struct ahash_edesc *edesc;
3830 	int ret = 0;
3831 
3832 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3833 	to_hash = in_len - *next_buflen;
3834 
3835 	if (to_hash) {
3836 		struct dpaa2_sg_entry *sg_table;
3837 
3838 		src_nents = sg_nents_for_len(req->src,
3839 					     req->nbytes - *next_buflen);
3840 		if (src_nents < 0) {
3841 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3842 			return src_nents;
3843 		}
3844 
3845 		if (src_nents) {
3846 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3847 						  DMA_TO_DEVICE);
3848 			if (!mapped_nents) {
3849 				dev_err(ctx->dev, "unable to DMA map source\n");
3850 				return -ENOMEM;
3851 			}
3852 		} else {
3853 			mapped_nents = 0;
3854 		}
3855 
3856 		/* allocate space for base edesc and link tables */
3857 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3858 		if (!edesc) {
3859 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3860 				     DMA_TO_DEVICE);
3861 			return -ENOMEM;
3862 		}
3863 
3864 		edesc->src_nents = src_nents;
3865 		qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
3866 		sg_table = &edesc->sgt[0];
3867 
3868 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3869 		if (ret)
3870 			goto unmap_ctx;
3871 
3872 		sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3873 
3874 		if (*next_buflen)
3875 			scatterwalk_map_and_copy(next_buf, req->src,
3876 						 to_hash - *buflen,
3877 						 *next_buflen, 0);
3878 
3879 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3880 						  qm_sg_bytes, DMA_TO_DEVICE);
3881 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3882 			dev_err(ctx->dev, "unable to map S/G table\n");
3883 			ret = -ENOMEM;
3884 			goto unmap_ctx;
3885 		}
3886 		edesc->qm_sg_bytes = qm_sg_bytes;
3887 
3888 		state->ctx_dma_len = ctx->ctx_len;
3889 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3890 						ctx->ctx_len, DMA_FROM_DEVICE);
3891 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3892 			dev_err(ctx->dev, "unable to map ctx\n");
3893 			state->ctx_dma = 0;
3894 			ret = -ENOMEM;
3895 			goto unmap_ctx;
3896 		}
3897 
3898 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3899 		dpaa2_fl_set_final(in_fle, true);
3900 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3901 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3902 		dpaa2_fl_set_len(in_fle, to_hash);
3903 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3904 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3905 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3906 
3907 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3908 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3909 		req_ctx->cbk = ahash_done_ctx_dst;
3910 		req_ctx->ctx = &req->base;
3911 		req_ctx->edesc = edesc;
3912 
3913 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3914 		if (ret != -EINPROGRESS &&
3915 		    !(ret == -EBUSY &&
3916 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3917 			goto unmap_ctx;
3918 
3919 		state->update = ahash_update_ctx;
3920 		state->finup = ahash_finup_ctx;
3921 		state->final = ahash_final_ctx;
3922 	} else if (*next_buflen) {
3923 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3924 					 req->nbytes, 0);
3925 		*buflen = *next_buflen;
3926 		*next_buflen = 0;
3927 	}
3928 
3929 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3930 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3931 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3932 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3933 			     1);
3934 
3935 	return ret;
3936 unmap_ctx:
3937 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
3938 	qi_cache_free(edesc);
3939 	return ret;
3940 }
3941 
3942 static int ahash_finup_no_ctx(struct ahash_request *req)
3943 {
3944 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3945 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3946 	struct caam_hash_state *state = ahash_request_ctx(req);
3947 	struct caam_request *req_ctx = &state->caam_req;
3948 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3949 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3950 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3951 		      GFP_KERNEL : GFP_ATOMIC;
3952 	int buflen = *current_buflen(state);
3953 	int qm_sg_bytes, src_nents, mapped_nents;
3954 	int digestsize = crypto_ahash_digestsize(ahash);
3955 	struct ahash_edesc *edesc;
3956 	struct dpaa2_sg_entry *sg_table;
3957 	int ret;
3958 
3959 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3960 	if (src_nents < 0) {
3961 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3962 		return src_nents;
3963 	}
3964 
3965 	if (src_nents) {
3966 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3967 					  DMA_TO_DEVICE);
3968 		if (!mapped_nents) {
3969 			dev_err(ctx->dev, "unable to DMA map source\n");
3970 			return -ENOMEM;
3971 		}
3972 	} else {
3973 		mapped_nents = 0;
3974 	}
3975 
3976 	/* allocate space for base edesc and link tables */
3977 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3978 	if (!edesc) {
3979 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3980 		return -ENOMEM;
3981 	}
3982 
3983 	edesc->src_nents = src_nents;
3984 	qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
3985 	sg_table = &edesc->sgt[0];
3986 
3987 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3988 	if (ret)
3989 		goto unmap;
3990 
3991 	sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3992 
3993 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3994 					  DMA_TO_DEVICE);
3995 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3996 		dev_err(ctx->dev, "unable to map S/G table\n");
3997 		ret = -ENOMEM;
3998 		goto unmap;
3999 	}
4000 	edesc->qm_sg_bytes = qm_sg_bytes;
4001 
4002 	state->ctx_dma_len = digestsize;
4003 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4004 					DMA_FROM_DEVICE);
4005 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4006 		dev_err(ctx->dev, "unable to map ctx\n");
4007 		state->ctx_dma = 0;
4008 		ret = -ENOMEM;
4009 		goto unmap;
4010 	}
4011 
4012 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4013 	dpaa2_fl_set_final(in_fle, true);
4014 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4015 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4016 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4017 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4018 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4019 	dpaa2_fl_set_len(out_fle, digestsize);
4020 
4021 	req_ctx->flc = &ctx->flc[DIGEST];
4022 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4023 	req_ctx->cbk = ahash_done;
4024 	req_ctx->ctx = &req->base;
4025 	req_ctx->edesc = edesc;
4026 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4027 	if (ret != -EINPROGRESS &&
4028 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4029 		goto unmap;
4030 
4031 	return ret;
4032 unmap:
4033 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4034 	qi_cache_free(edesc);
4035 	return -ENOMEM;
4036 }
4037 
4038 static int ahash_update_first(struct ahash_request *req)
4039 {
4040 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4041 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4042 	struct caam_hash_state *state = ahash_request_ctx(req);
4043 	struct caam_request *req_ctx = &state->caam_req;
4044 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4045 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4046 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4047 		      GFP_KERNEL : GFP_ATOMIC;
4048 	u8 *next_buf = alt_buf(state);
4049 	int *next_buflen = alt_buflen(state);
4050 	int to_hash;
4051 	int src_nents, mapped_nents;
4052 	struct ahash_edesc *edesc;
4053 	int ret = 0;
4054 
4055 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4056 				      1);
4057 	to_hash = req->nbytes - *next_buflen;
4058 
4059 	if (to_hash) {
4060 		struct dpaa2_sg_entry *sg_table;
4061 
4062 		src_nents = sg_nents_for_len(req->src,
4063 					     req->nbytes - (*next_buflen));
4064 		if (src_nents < 0) {
4065 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4066 			return src_nents;
4067 		}
4068 
4069 		if (src_nents) {
4070 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4071 						  DMA_TO_DEVICE);
4072 			if (!mapped_nents) {
4073 				dev_err(ctx->dev, "unable to map source for DMA\n");
4074 				return -ENOMEM;
4075 			}
4076 		} else {
4077 			mapped_nents = 0;
4078 		}
4079 
4080 		/* allocate space for base edesc and link tables */
4081 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4082 		if (!edesc) {
4083 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4084 				     DMA_TO_DEVICE);
4085 			return -ENOMEM;
4086 		}
4087 
4088 		edesc->src_nents = src_nents;
4089 		sg_table = &edesc->sgt[0];
4090 
4091 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4092 		dpaa2_fl_set_final(in_fle, true);
4093 		dpaa2_fl_set_len(in_fle, to_hash);
4094 
4095 		if (mapped_nents > 1) {
4096 			int qm_sg_bytes;
4097 
4098 			sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
4099 			qm_sg_bytes = mapped_nents * sizeof(*sg_table);
4100 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4101 							  qm_sg_bytes,
4102 							  DMA_TO_DEVICE);
4103 			if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4104 				dev_err(ctx->dev, "unable to map S/G table\n");
4105 				ret = -ENOMEM;
4106 				goto unmap_ctx;
4107 			}
4108 			edesc->qm_sg_bytes = qm_sg_bytes;
4109 			dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4110 			dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4111 		} else {
4112 			dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4113 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4114 		}
4115 
4116 		if (*next_buflen)
4117 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
4118 						 *next_buflen, 0);
4119 
4120 		state->ctx_dma_len = ctx->ctx_len;
4121 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4122 						ctx->ctx_len, DMA_FROM_DEVICE);
4123 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4124 			dev_err(ctx->dev, "unable to map ctx\n");
4125 			state->ctx_dma = 0;
4126 			ret = -ENOMEM;
4127 			goto unmap_ctx;
4128 		}
4129 
4130 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4131 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4132 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4133 
4134 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4135 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4136 		req_ctx->cbk = ahash_done_ctx_dst;
4137 		req_ctx->ctx = &req->base;
4138 		req_ctx->edesc = edesc;
4139 
4140 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4141 		if (ret != -EINPROGRESS &&
4142 		    !(ret == -EBUSY && req->base.flags &
4143 		      CRYPTO_TFM_REQ_MAY_BACKLOG))
4144 			goto unmap_ctx;
4145 
4146 		state->update = ahash_update_ctx;
4147 		state->finup = ahash_finup_ctx;
4148 		state->final = ahash_final_ctx;
4149 	} else if (*next_buflen) {
4150 		state->update = ahash_update_no_ctx;
4151 		state->finup = ahash_finup_no_ctx;
4152 		state->final = ahash_final_no_ctx;
4153 		scatterwalk_map_and_copy(next_buf, req->src, 0,
4154 					 req->nbytes, 0);
4155 		switch_buf(state);
4156 	}
4157 
4158 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4159 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4160 			     1);
4161 
4162 	return ret;
4163 unmap_ctx:
4164 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4165 	qi_cache_free(edesc);
4166 	return ret;
4167 }
4168 
4169 static int ahash_finup_first(struct ahash_request *req)
4170 {
4171 	return ahash_digest(req);
4172 }
4173 
4174 static int ahash_init(struct ahash_request *req)
4175 {
4176 	struct caam_hash_state *state = ahash_request_ctx(req);
4177 
4178 	state->update = ahash_update_first;
4179 	state->finup = ahash_finup_first;
4180 	state->final = ahash_final_no_ctx;
4181 
4182 	state->ctx_dma = 0;
4183 	state->ctx_dma_len = 0;
4184 	state->current_buf = 0;
4185 	state->buf_dma = 0;
4186 	state->buflen_0 = 0;
4187 	state->buflen_1 = 0;
4188 
4189 	return 0;
4190 }
4191 
4192 static int ahash_update(struct ahash_request *req)
4193 {
4194 	struct caam_hash_state *state = ahash_request_ctx(req);
4195 
4196 	return state->update(req);
4197 }
4198 
4199 static int ahash_finup(struct ahash_request *req)
4200 {
4201 	struct caam_hash_state *state = ahash_request_ctx(req);
4202 
4203 	return state->finup(req);
4204 }
4205 
4206 static int ahash_final(struct ahash_request *req)
4207 {
4208 	struct caam_hash_state *state = ahash_request_ctx(req);
4209 
4210 	return state->final(req);
4211 }
4212 
4213 static int ahash_export(struct ahash_request *req, void *out)
4214 {
4215 	struct caam_hash_state *state = ahash_request_ctx(req);
4216 	struct caam_export_state *export = out;
4217 	int len;
4218 	u8 *buf;
4219 
4220 	if (state->current_buf) {
4221 		buf = state->buf_1;
4222 		len = state->buflen_1;
4223 	} else {
4224 		buf = state->buf_0;
4225 		len = state->buflen_0;
4226 	}
4227 
4228 	memcpy(export->buf, buf, len);
4229 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4230 	export->buflen = len;
4231 	export->update = state->update;
4232 	export->final = state->final;
4233 	export->finup = state->finup;
4234 
4235 	return 0;
4236 }
4237 
4238 static int ahash_import(struct ahash_request *req, const void *in)
4239 {
4240 	struct caam_hash_state *state = ahash_request_ctx(req);
4241 	const struct caam_export_state *export = in;
4242 
4243 	memset(state, 0, sizeof(*state));
4244 	memcpy(state->buf_0, export->buf, export->buflen);
4245 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4246 	state->buflen_0 = export->buflen;
4247 	state->update = export->update;
4248 	state->final = export->final;
4249 	state->finup = export->finup;
4250 
4251 	return 0;
4252 }
4253 
4254 struct caam_hash_template {
4255 	char name[CRYPTO_MAX_ALG_NAME];
4256 	char driver_name[CRYPTO_MAX_ALG_NAME];
4257 	char hmac_name[CRYPTO_MAX_ALG_NAME];
4258 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4259 	unsigned int blocksize;
4260 	struct ahash_alg template_ahash;
4261 	u32 alg_type;
4262 };
4263 
4264 /* ahash descriptors */
4265 static struct caam_hash_template driver_hash[] = {
4266 	{
4267 		.name = "sha1",
4268 		.driver_name = "sha1-caam-qi2",
4269 		.hmac_name = "hmac(sha1)",
4270 		.hmac_driver_name = "hmac-sha1-caam-qi2",
4271 		.blocksize = SHA1_BLOCK_SIZE,
4272 		.template_ahash = {
4273 			.init = ahash_init,
4274 			.update = ahash_update,
4275 			.final = ahash_final,
4276 			.finup = ahash_finup,
4277 			.digest = ahash_digest,
4278 			.export = ahash_export,
4279 			.import = ahash_import,
4280 			.setkey = ahash_setkey,
4281 			.halg = {
4282 				.digestsize = SHA1_DIGEST_SIZE,
4283 				.statesize = sizeof(struct caam_export_state),
4284 			},
4285 		},
4286 		.alg_type = OP_ALG_ALGSEL_SHA1,
4287 	}, {
4288 		.name = "sha224",
4289 		.driver_name = "sha224-caam-qi2",
4290 		.hmac_name = "hmac(sha224)",
4291 		.hmac_driver_name = "hmac-sha224-caam-qi2",
4292 		.blocksize = SHA224_BLOCK_SIZE,
4293 		.template_ahash = {
4294 			.init = ahash_init,
4295 			.update = ahash_update,
4296 			.final = ahash_final,
4297 			.finup = ahash_finup,
4298 			.digest = ahash_digest,
4299 			.export = ahash_export,
4300 			.import = ahash_import,
4301 			.setkey = ahash_setkey,
4302 			.halg = {
4303 				.digestsize = SHA224_DIGEST_SIZE,
4304 				.statesize = sizeof(struct caam_export_state),
4305 			},
4306 		},
4307 		.alg_type = OP_ALG_ALGSEL_SHA224,
4308 	}, {
4309 		.name = "sha256",
4310 		.driver_name = "sha256-caam-qi2",
4311 		.hmac_name = "hmac(sha256)",
4312 		.hmac_driver_name = "hmac-sha256-caam-qi2",
4313 		.blocksize = SHA256_BLOCK_SIZE,
4314 		.template_ahash = {
4315 			.init = ahash_init,
4316 			.update = ahash_update,
4317 			.final = ahash_final,
4318 			.finup = ahash_finup,
4319 			.digest = ahash_digest,
4320 			.export = ahash_export,
4321 			.import = ahash_import,
4322 			.setkey = ahash_setkey,
4323 			.halg = {
4324 				.digestsize = SHA256_DIGEST_SIZE,
4325 				.statesize = sizeof(struct caam_export_state),
4326 			},
4327 		},
4328 		.alg_type = OP_ALG_ALGSEL_SHA256,
4329 	}, {
4330 		.name = "sha384",
4331 		.driver_name = "sha384-caam-qi2",
4332 		.hmac_name = "hmac(sha384)",
4333 		.hmac_driver_name = "hmac-sha384-caam-qi2",
4334 		.blocksize = SHA384_BLOCK_SIZE,
4335 		.template_ahash = {
4336 			.init = ahash_init,
4337 			.update = ahash_update,
4338 			.final = ahash_final,
4339 			.finup = ahash_finup,
4340 			.digest = ahash_digest,
4341 			.export = ahash_export,
4342 			.import = ahash_import,
4343 			.setkey = ahash_setkey,
4344 			.halg = {
4345 				.digestsize = SHA384_DIGEST_SIZE,
4346 				.statesize = sizeof(struct caam_export_state),
4347 			},
4348 		},
4349 		.alg_type = OP_ALG_ALGSEL_SHA384,
4350 	}, {
4351 		.name = "sha512",
4352 		.driver_name = "sha512-caam-qi2",
4353 		.hmac_name = "hmac(sha512)",
4354 		.hmac_driver_name = "hmac-sha512-caam-qi2",
4355 		.blocksize = SHA512_BLOCK_SIZE,
4356 		.template_ahash = {
4357 			.init = ahash_init,
4358 			.update = ahash_update,
4359 			.final = ahash_final,
4360 			.finup = ahash_finup,
4361 			.digest = ahash_digest,
4362 			.export = ahash_export,
4363 			.import = ahash_import,
4364 			.setkey = ahash_setkey,
4365 			.halg = {
4366 				.digestsize = SHA512_DIGEST_SIZE,
4367 				.statesize = sizeof(struct caam_export_state),
4368 			},
4369 		},
4370 		.alg_type = OP_ALG_ALGSEL_SHA512,
4371 	}, {
4372 		.name = "md5",
4373 		.driver_name = "md5-caam-qi2",
4374 		.hmac_name = "hmac(md5)",
4375 		.hmac_driver_name = "hmac-md5-caam-qi2",
4376 		.blocksize = MD5_BLOCK_WORDS * 4,
4377 		.template_ahash = {
4378 			.init = ahash_init,
4379 			.update = ahash_update,
4380 			.final = ahash_final,
4381 			.finup = ahash_finup,
4382 			.digest = ahash_digest,
4383 			.export = ahash_export,
4384 			.import = ahash_import,
4385 			.setkey = ahash_setkey,
4386 			.halg = {
4387 				.digestsize = MD5_DIGEST_SIZE,
4388 				.statesize = sizeof(struct caam_export_state),
4389 			},
4390 		},
4391 		.alg_type = OP_ALG_ALGSEL_MD5,
4392 	}
4393 };
4394 
4395 struct caam_hash_alg {
4396 	struct list_head entry;
4397 	struct device *dev;
4398 	int alg_type;
4399 	struct ahash_alg ahash_alg;
4400 };
4401 
4402 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4403 {
4404 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4405 	struct crypto_alg *base = tfm->__crt_alg;
4406 	struct hash_alg_common *halg =
4407 		 container_of(base, struct hash_alg_common, base);
4408 	struct ahash_alg *alg =
4409 		 container_of(halg, struct ahash_alg, halg);
4410 	struct caam_hash_alg *caam_hash =
4411 		 container_of(alg, struct caam_hash_alg, ahash_alg);
4412 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4413 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4414 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4415 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4416 					 HASH_MSG_LEN + 32,
4417 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4418 					 HASH_MSG_LEN + 64,
4419 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4420 	dma_addr_t dma_addr;
4421 	int i;
4422 
4423 	ctx->dev = caam_hash->dev;
4424 
4425 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4426 					DMA_BIDIRECTIONAL,
4427 					DMA_ATTR_SKIP_CPU_SYNC);
4428 	if (dma_mapping_error(ctx->dev, dma_addr)) {
4429 		dev_err(ctx->dev, "unable to map shared descriptors\n");
4430 		return -ENOMEM;
4431 	}
4432 
4433 	for (i = 0; i < HASH_NUM_OP; i++)
4434 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4435 
4436 	/* copy descriptor header template value */
4437 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4438 
4439 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
4440 				   OP_ALG_ALGSEL_SUBMASK) >>
4441 				  OP_ALG_ALGSEL_SHIFT];
4442 
4443 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4444 				 sizeof(struct caam_hash_state));
4445 
4446 	return ahash_set_sh_desc(ahash);
4447 }
4448 
4449 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4450 {
4451 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4452 
4453 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4454 			       DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4455 }
4456 
4457 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4458 	struct caam_hash_template *template, bool keyed)
4459 {
4460 	struct caam_hash_alg *t_alg;
4461 	struct ahash_alg *halg;
4462 	struct crypto_alg *alg;
4463 
4464 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4465 	if (!t_alg)
4466 		return ERR_PTR(-ENOMEM);
4467 
4468 	t_alg->ahash_alg = template->template_ahash;
4469 	halg = &t_alg->ahash_alg;
4470 	alg = &halg->halg.base;
4471 
4472 	if (keyed) {
4473 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4474 			 template->hmac_name);
4475 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4476 			 template->hmac_driver_name);
4477 	} else {
4478 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4479 			 template->name);
4480 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4481 			 template->driver_name);
4482 		t_alg->ahash_alg.setkey = NULL;
4483 	}
4484 	alg->cra_module = THIS_MODULE;
4485 	alg->cra_init = caam_hash_cra_init;
4486 	alg->cra_exit = caam_hash_cra_exit;
4487 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4488 	alg->cra_priority = CAAM_CRA_PRIORITY;
4489 	alg->cra_blocksize = template->blocksize;
4490 	alg->cra_alignmask = 0;
4491 	alg->cra_flags = CRYPTO_ALG_ASYNC;
4492 
4493 	t_alg->alg_type = template->alg_type;
4494 	t_alg->dev = dev;
4495 
4496 	return t_alg;
4497 }
4498 
4499 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4500 {
4501 	struct dpaa2_caam_priv_per_cpu *ppriv;
4502 
4503 	ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4504 	napi_schedule_irqoff(&ppriv->napi);
4505 }
4506 
4507 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4508 {
4509 	struct device *dev = priv->dev;
4510 	struct dpaa2_io_notification_ctx *nctx;
4511 	struct dpaa2_caam_priv_per_cpu *ppriv;
4512 	int err, i = 0, cpu;
4513 
4514 	for_each_online_cpu(cpu) {
4515 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4516 		ppriv->priv = priv;
4517 		nctx = &ppriv->nctx;
4518 		nctx->is_cdan = 0;
4519 		nctx->id = ppriv->rsp_fqid;
4520 		nctx->desired_cpu = cpu;
4521 		nctx->cb = dpaa2_caam_fqdan_cb;
4522 
4523 		/* Register notification callbacks */
4524 		ppriv->dpio = dpaa2_io_service_select(cpu);
4525 		err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4526 		if (unlikely(err)) {
4527 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4528 			nctx->cb = NULL;
4529 			/*
4530 			 * If no affine DPIO for this core, there's probably
4531 			 * none available for next cores either. Signal we want
4532 			 * to retry later, in case the DPIO devices weren't
4533 			 * probed yet.
4534 			 */
4535 			err = -EPROBE_DEFER;
4536 			goto err;
4537 		}
4538 
4539 		ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4540 						     dev);
4541 		if (unlikely(!ppriv->store)) {
4542 			dev_err(dev, "dpaa2_io_store_create() failed\n");
4543 			err = -ENOMEM;
4544 			goto err;
4545 		}
4546 
4547 		if (++i == priv->num_pairs)
4548 			break;
4549 	}
4550 
4551 	return 0;
4552 
4553 err:
4554 	for_each_online_cpu(cpu) {
4555 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4556 		if (!ppriv->nctx.cb)
4557 			break;
4558 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4559 	}
4560 
4561 	for_each_online_cpu(cpu) {
4562 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4563 		if (!ppriv->store)
4564 			break;
4565 		dpaa2_io_store_destroy(ppriv->store);
4566 	}
4567 
4568 	return err;
4569 }
4570 
4571 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4572 {
4573 	struct dpaa2_caam_priv_per_cpu *ppriv;
4574 	int i = 0, cpu;
4575 
4576 	for_each_online_cpu(cpu) {
4577 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4578 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4579 					    priv->dev);
4580 		dpaa2_io_store_destroy(ppriv->store);
4581 
4582 		if (++i == priv->num_pairs)
4583 			return;
4584 	}
4585 }
4586 
4587 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4588 {
4589 	struct dpseci_rx_queue_cfg rx_queue_cfg;
4590 	struct device *dev = priv->dev;
4591 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4592 	struct dpaa2_caam_priv_per_cpu *ppriv;
4593 	int err = 0, i = 0, cpu;
4594 
4595 	/* Configure Rx queues */
4596 	for_each_online_cpu(cpu) {
4597 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4598 
4599 		rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4600 				       DPSECI_QUEUE_OPT_USER_CTX;
4601 		rx_queue_cfg.order_preservation_en = 0;
4602 		rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4603 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4604 		/*
4605 		 * Rx priority (WQ) doesn't really matter, since we use
4606 		 * pull mode, i.e. volatile dequeues from specific FQs
4607 		 */
4608 		rx_queue_cfg.dest_cfg.priority = 0;
4609 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4610 
4611 		err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4612 					  &rx_queue_cfg);
4613 		if (err) {
4614 			dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4615 				err);
4616 			return err;
4617 		}
4618 
4619 		if (++i == priv->num_pairs)
4620 			break;
4621 	}
4622 
4623 	return err;
4624 }
4625 
4626 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4627 {
4628 	struct device *dev = priv->dev;
4629 
4630 	if (!priv->cscn_mem)
4631 		return;
4632 
4633 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4634 	kfree(priv->cscn_mem);
4635 }
4636 
4637 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4638 {
4639 	struct device *dev = priv->dev;
4640 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4641 
4642 	dpaa2_dpseci_congestion_free(priv);
4643 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4644 }
4645 
4646 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4647 				  const struct dpaa2_fd *fd)
4648 {
4649 	struct caam_request *req;
4650 	u32 fd_err;
4651 
4652 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4653 		dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4654 		return;
4655 	}
4656 
4657 	fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4658 	if (unlikely(fd_err))
4659 		dev_err(priv->dev, "FD error: %08x\n", fd_err);
4660 
4661 	/*
4662 	 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4663 	 * in FD[ERR] or FD[FRC].
4664 	 */
4665 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4666 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4667 			 DMA_BIDIRECTIONAL);
4668 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4669 }
4670 
4671 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4672 {
4673 	int err;
4674 
4675 	/* Retry while portal is busy */
4676 	do {
4677 		err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4678 					       ppriv->store);
4679 	} while (err == -EBUSY);
4680 
4681 	if (unlikely(err))
4682 		dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4683 
4684 	return err;
4685 }
4686 
4687 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4688 {
4689 	struct dpaa2_dq *dq;
4690 	int cleaned = 0, is_last;
4691 
4692 	do {
4693 		dq = dpaa2_io_store_next(ppriv->store, &is_last);
4694 		if (unlikely(!dq)) {
4695 			if (unlikely(!is_last)) {
4696 				dev_dbg(ppriv->priv->dev,
4697 					"FQ %d returned no valid frames\n",
4698 					ppriv->rsp_fqid);
4699 				/*
4700 				 * MUST retry until we get some sort of
4701 				 * valid response token (be it "empty dequeue"
4702 				 * or a valid frame).
4703 				 */
4704 				continue;
4705 			}
4706 			break;
4707 		}
4708 
4709 		/* Process FD */
4710 		dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4711 		cleaned++;
4712 	} while (!is_last);
4713 
4714 	return cleaned;
4715 }
4716 
4717 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4718 {
4719 	struct dpaa2_caam_priv_per_cpu *ppriv;
4720 	struct dpaa2_caam_priv *priv;
4721 	int err, cleaned = 0, store_cleaned;
4722 
4723 	ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4724 	priv = ppriv->priv;
4725 
4726 	if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4727 		return 0;
4728 
4729 	do {
4730 		store_cleaned = dpaa2_caam_store_consume(ppriv);
4731 		cleaned += store_cleaned;
4732 
4733 		if (store_cleaned == 0 ||
4734 		    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4735 			break;
4736 
4737 		/* Try to dequeue some more */
4738 		err = dpaa2_caam_pull_fq(ppriv);
4739 		if (unlikely(err))
4740 			break;
4741 	} while (1);
4742 
4743 	if (cleaned < budget) {
4744 		napi_complete_done(napi, cleaned);
4745 		err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4746 		if (unlikely(err))
4747 			dev_err(priv->dev, "Notification rearm failed: %d\n",
4748 				err);
4749 	}
4750 
4751 	return cleaned;
4752 }
4753 
4754 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4755 					 u16 token)
4756 {
4757 	struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4758 	struct device *dev = priv->dev;
4759 	int err;
4760 
4761 	/*
4762 	 * Congestion group feature supported starting with DPSECI API v5.1
4763 	 * and only when object has been created with this capability.
4764 	 */
4765 	if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4766 	    !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4767 		return 0;
4768 
4769 	priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4770 				 GFP_KERNEL | GFP_DMA);
4771 	if (!priv->cscn_mem)
4772 		return -ENOMEM;
4773 
4774 	priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4775 	priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4776 					DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4777 	if (dma_mapping_error(dev, priv->cscn_dma)) {
4778 		dev_err(dev, "Error mapping CSCN memory area\n");
4779 		err = -ENOMEM;
4780 		goto err_dma_map;
4781 	}
4782 
4783 	cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4784 	cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4785 	cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4786 	cong_notif_cfg.message_ctx = (uintptr_t)priv;
4787 	cong_notif_cfg.message_iova = priv->cscn_dma;
4788 	cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4789 					DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4790 					DPSECI_CGN_MODE_COHERENT_WRITE;
4791 
4792 	err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4793 						 &cong_notif_cfg);
4794 	if (err) {
4795 		dev_err(dev, "dpseci_set_congestion_notification failed\n");
4796 		goto err_set_cong;
4797 	}
4798 
4799 	return 0;
4800 
4801 err_set_cong:
4802 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4803 err_dma_map:
4804 	kfree(priv->cscn_mem);
4805 
4806 	return err;
4807 }
4808 
4809 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4810 {
4811 	struct device *dev = &ls_dev->dev;
4812 	struct dpaa2_caam_priv *priv;
4813 	struct dpaa2_caam_priv_per_cpu *ppriv;
4814 	int err, cpu;
4815 	u8 i;
4816 
4817 	priv = dev_get_drvdata(dev);
4818 
4819 	priv->dev = dev;
4820 	priv->dpsec_id = ls_dev->obj_desc.id;
4821 
4822 	/* Get a handle for the DPSECI this interface is associate with */
4823 	err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4824 	if (err) {
4825 		dev_err(dev, "dpseci_open() failed: %d\n", err);
4826 		goto err_open;
4827 	}
4828 
4829 	err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4830 				     &priv->minor_ver);
4831 	if (err) {
4832 		dev_err(dev, "dpseci_get_api_version() failed\n");
4833 		goto err_get_vers;
4834 	}
4835 
4836 	dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4837 
4838 	err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4839 				    &priv->dpseci_attr);
4840 	if (err) {
4841 		dev_err(dev, "dpseci_get_attributes() failed\n");
4842 		goto err_get_vers;
4843 	}
4844 
4845 	err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4846 				  &priv->sec_attr);
4847 	if (err) {
4848 		dev_err(dev, "dpseci_get_sec_attr() failed\n");
4849 		goto err_get_vers;
4850 	}
4851 
4852 	err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4853 	if (err) {
4854 		dev_err(dev, "setup_congestion() failed\n");
4855 		goto err_get_vers;
4856 	}
4857 
4858 	priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4859 			      priv->dpseci_attr.num_tx_queues);
4860 	if (priv->num_pairs > num_online_cpus()) {
4861 		dev_warn(dev, "%d queues won't be used\n",
4862 			 priv->num_pairs - num_online_cpus());
4863 		priv->num_pairs = num_online_cpus();
4864 	}
4865 
4866 	for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4867 		err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4868 					  &priv->rx_queue_attr[i]);
4869 		if (err) {
4870 			dev_err(dev, "dpseci_get_rx_queue() failed\n");
4871 			goto err_get_rx_queue;
4872 		}
4873 	}
4874 
4875 	for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4876 		err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4877 					  &priv->tx_queue_attr[i]);
4878 		if (err) {
4879 			dev_err(dev, "dpseci_get_tx_queue() failed\n");
4880 			goto err_get_rx_queue;
4881 		}
4882 	}
4883 
4884 	i = 0;
4885 	for_each_online_cpu(cpu) {
4886 		u8 j;
4887 
4888 		j = i % priv->num_pairs;
4889 
4890 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4891 		ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
4892 
4893 		/*
4894 		 * Allow all cores to enqueue, while only some of them
4895 		 * will take part in dequeuing.
4896 		 */
4897 		if (++i > priv->num_pairs)
4898 			continue;
4899 
4900 		ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
4901 		ppriv->prio = j;
4902 
4903 		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
4904 			priv->rx_queue_attr[j].fqid,
4905 			priv->tx_queue_attr[j].fqid);
4906 
4907 		ppriv->net_dev.dev = *dev;
4908 		INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4909 		netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4910 			       DPAA2_CAAM_NAPI_WEIGHT);
4911 	}
4912 
4913 	return 0;
4914 
4915 err_get_rx_queue:
4916 	dpaa2_dpseci_congestion_free(priv);
4917 err_get_vers:
4918 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4919 err_open:
4920 	return err;
4921 }
4922 
4923 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
4924 {
4925 	struct device *dev = priv->dev;
4926 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4927 	struct dpaa2_caam_priv_per_cpu *ppriv;
4928 	int i;
4929 
4930 	for (i = 0; i < priv->num_pairs; i++) {
4931 		ppriv = per_cpu_ptr(priv->ppriv, i);
4932 		napi_enable(&ppriv->napi);
4933 	}
4934 
4935 	return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
4936 }
4937 
4938 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
4939 {
4940 	struct device *dev = priv->dev;
4941 	struct dpaa2_caam_priv_per_cpu *ppriv;
4942 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4943 	int i, err = 0, enabled;
4944 
4945 	err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
4946 	if (err) {
4947 		dev_err(dev, "dpseci_disable() failed\n");
4948 		return err;
4949 	}
4950 
4951 	err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
4952 	if (err) {
4953 		dev_err(dev, "dpseci_is_enabled() failed\n");
4954 		return err;
4955 	}
4956 
4957 	dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
4958 
4959 	for (i = 0; i < priv->num_pairs; i++) {
4960 		ppriv = per_cpu_ptr(priv->ppriv, i);
4961 		napi_disable(&ppriv->napi);
4962 		netif_napi_del(&ppriv->napi);
4963 	}
4964 
4965 	return 0;
4966 }
4967 
4968 static struct list_head hash_list;
4969 
4970 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
4971 {
4972 	struct device *dev;
4973 	struct dpaa2_caam_priv *priv;
4974 	int i, err = 0;
4975 	bool registered = false;
4976 
4977 	/*
4978 	 * There is no way to get CAAM endianness - there is no direct register
4979 	 * space access and MC f/w does not provide this attribute.
4980 	 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
4981 	 * property.
4982 	 */
4983 	caam_little_end = true;
4984 
4985 	caam_imx = false;
4986 
4987 	dev = &dpseci_dev->dev;
4988 
4989 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
4990 	if (!priv)
4991 		return -ENOMEM;
4992 
4993 	dev_set_drvdata(dev, priv);
4994 
4995 	priv->domain = iommu_get_domain_for_dev(dev);
4996 
4997 	qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
4998 				     0, SLAB_CACHE_DMA, NULL);
4999 	if (!qi_cache) {
5000 		dev_err(dev, "Can't allocate SEC cache\n");
5001 		return -ENOMEM;
5002 	}
5003 
5004 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5005 	if (err) {
5006 		dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5007 		goto err_dma_mask;
5008 	}
5009 
5010 	/* Obtain a MC portal */
5011 	err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5012 	if (err) {
5013 		if (err == -ENXIO)
5014 			err = -EPROBE_DEFER;
5015 		else
5016 			dev_err(dev, "MC portal allocation failed\n");
5017 
5018 		goto err_dma_mask;
5019 	}
5020 
5021 	priv->ppriv = alloc_percpu(*priv->ppriv);
5022 	if (!priv->ppriv) {
5023 		dev_err(dev, "alloc_percpu() failed\n");
5024 		err = -ENOMEM;
5025 		goto err_alloc_ppriv;
5026 	}
5027 
5028 	/* DPSECI initialization */
5029 	err = dpaa2_dpseci_setup(dpseci_dev);
5030 	if (err) {
5031 		dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5032 		goto err_dpseci_setup;
5033 	}
5034 
5035 	/* DPIO */
5036 	err = dpaa2_dpseci_dpio_setup(priv);
5037 	if (err) {
5038 		if (err != -EPROBE_DEFER)
5039 			dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
5040 		goto err_dpio_setup;
5041 	}
5042 
5043 	/* DPSECI binding to DPIO */
5044 	err = dpaa2_dpseci_bind(priv);
5045 	if (err) {
5046 		dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5047 		goto err_bind;
5048 	}
5049 
5050 	/* DPSECI enable */
5051 	err = dpaa2_dpseci_enable(priv);
5052 	if (err) {
5053 		dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5054 		goto err_bind;
5055 	}
5056 
5057 	/* register crypto algorithms the device supports */
5058 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5059 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5060 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5061 
5062 		/* Skip DES algorithms if not supported by device */
5063 		if (!priv->sec_attr.des_acc_num &&
5064 		    (alg_sel == OP_ALG_ALGSEL_3DES ||
5065 		     alg_sel == OP_ALG_ALGSEL_DES))
5066 			continue;
5067 
5068 		/* Skip AES algorithms if not supported by device */
5069 		if (!priv->sec_attr.aes_acc_num &&
5070 		    alg_sel == OP_ALG_ALGSEL_AES)
5071 			continue;
5072 
5073 		/* Skip CHACHA20 algorithms if not supported by device */
5074 		if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5075 		    !priv->sec_attr.ccha_acc_num)
5076 			continue;
5077 
5078 		t_alg->caam.dev = dev;
5079 		caam_skcipher_alg_init(t_alg);
5080 
5081 		err = crypto_register_skcipher(&t_alg->skcipher);
5082 		if (err) {
5083 			dev_warn(dev, "%s alg registration failed: %d\n",
5084 				 t_alg->skcipher.base.cra_driver_name, err);
5085 			continue;
5086 		}
5087 
5088 		t_alg->registered = true;
5089 		registered = true;
5090 	}
5091 
5092 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5093 		struct caam_aead_alg *t_alg = driver_aeads + i;
5094 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5095 				 OP_ALG_ALGSEL_MASK;
5096 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5097 				 OP_ALG_ALGSEL_MASK;
5098 
5099 		/* Skip DES algorithms if not supported by device */
5100 		if (!priv->sec_attr.des_acc_num &&
5101 		    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5102 		     c1_alg_sel == OP_ALG_ALGSEL_DES))
5103 			continue;
5104 
5105 		/* Skip AES algorithms if not supported by device */
5106 		if (!priv->sec_attr.aes_acc_num &&
5107 		    c1_alg_sel == OP_ALG_ALGSEL_AES)
5108 			continue;
5109 
5110 		/* Skip CHACHA20 algorithms if not supported by device */
5111 		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5112 		    !priv->sec_attr.ccha_acc_num)
5113 			continue;
5114 
5115 		/* Skip POLY1305 algorithms if not supported by device */
5116 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5117 		    !priv->sec_attr.ptha_acc_num)
5118 			continue;
5119 
5120 		/*
5121 		 * Skip algorithms requiring message digests
5122 		 * if MD not supported by device.
5123 		 */
5124 		if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5125 		    !priv->sec_attr.md_acc_num)
5126 			continue;
5127 
5128 		t_alg->caam.dev = dev;
5129 		caam_aead_alg_init(t_alg);
5130 
5131 		err = crypto_register_aead(&t_alg->aead);
5132 		if (err) {
5133 			dev_warn(dev, "%s alg registration failed: %d\n",
5134 				 t_alg->aead.base.cra_driver_name, err);
5135 			continue;
5136 		}
5137 
5138 		t_alg->registered = true;
5139 		registered = true;
5140 	}
5141 	if (registered)
5142 		dev_info(dev, "algorithms registered in /proc/crypto\n");
5143 
5144 	/* register hash algorithms the device supports */
5145 	INIT_LIST_HEAD(&hash_list);
5146 
5147 	/*
5148 	 * Skip registration of any hashing algorithms if MD block
5149 	 * is not present.
5150 	 */
5151 	if (!priv->sec_attr.md_acc_num)
5152 		return 0;
5153 
5154 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5155 		struct caam_hash_alg *t_alg;
5156 		struct caam_hash_template *alg = driver_hash + i;
5157 
5158 		/* register hmac version */
5159 		t_alg = caam_hash_alloc(dev, alg, true);
5160 		if (IS_ERR(t_alg)) {
5161 			err = PTR_ERR(t_alg);
5162 			dev_warn(dev, "%s hash alg allocation failed: %d\n",
5163 				 alg->driver_name, err);
5164 			continue;
5165 		}
5166 
5167 		err = crypto_register_ahash(&t_alg->ahash_alg);
5168 		if (err) {
5169 			dev_warn(dev, "%s alg registration failed: %d\n",
5170 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5171 				 err);
5172 			kfree(t_alg);
5173 		} else {
5174 			list_add_tail(&t_alg->entry, &hash_list);
5175 		}
5176 
5177 		/* register unkeyed version */
5178 		t_alg = caam_hash_alloc(dev, alg, false);
5179 		if (IS_ERR(t_alg)) {
5180 			err = PTR_ERR(t_alg);
5181 			dev_warn(dev, "%s alg allocation failed: %d\n",
5182 				 alg->driver_name, err);
5183 			continue;
5184 		}
5185 
5186 		err = crypto_register_ahash(&t_alg->ahash_alg);
5187 		if (err) {
5188 			dev_warn(dev, "%s alg registration failed: %d\n",
5189 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5190 				 err);
5191 			kfree(t_alg);
5192 		} else {
5193 			list_add_tail(&t_alg->entry, &hash_list);
5194 		}
5195 	}
5196 	if (!list_empty(&hash_list))
5197 		dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5198 
5199 	return err;
5200 
5201 err_bind:
5202 	dpaa2_dpseci_dpio_free(priv);
5203 err_dpio_setup:
5204 	dpaa2_dpseci_free(priv);
5205 err_dpseci_setup:
5206 	free_percpu(priv->ppriv);
5207 err_alloc_ppriv:
5208 	fsl_mc_portal_free(priv->mc_io);
5209 err_dma_mask:
5210 	kmem_cache_destroy(qi_cache);
5211 
5212 	return err;
5213 }
5214 
5215 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5216 {
5217 	struct device *dev;
5218 	struct dpaa2_caam_priv *priv;
5219 	int i;
5220 
5221 	dev = &ls_dev->dev;
5222 	priv = dev_get_drvdata(dev);
5223 
5224 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5225 		struct caam_aead_alg *t_alg = driver_aeads + i;
5226 
5227 		if (t_alg->registered)
5228 			crypto_unregister_aead(&t_alg->aead);
5229 	}
5230 
5231 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5232 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5233 
5234 		if (t_alg->registered)
5235 			crypto_unregister_skcipher(&t_alg->skcipher);
5236 	}
5237 
5238 	if (hash_list.next) {
5239 		struct caam_hash_alg *t_hash_alg, *p;
5240 
5241 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5242 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5243 			list_del(&t_hash_alg->entry);
5244 			kfree(t_hash_alg);
5245 		}
5246 	}
5247 
5248 	dpaa2_dpseci_disable(priv);
5249 	dpaa2_dpseci_dpio_free(priv);
5250 	dpaa2_dpseci_free(priv);
5251 	free_percpu(priv->ppriv);
5252 	fsl_mc_portal_free(priv->mc_io);
5253 	kmem_cache_destroy(qi_cache);
5254 
5255 	return 0;
5256 }
5257 
5258 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5259 {
5260 	struct dpaa2_fd fd;
5261 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5262 	struct dpaa2_caam_priv_per_cpu *ppriv;
5263 	int err = 0, i;
5264 
5265 	if (IS_ERR(req))
5266 		return PTR_ERR(req);
5267 
5268 	if (priv->cscn_mem) {
5269 		dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5270 					DPAA2_CSCN_SIZE,
5271 					DMA_FROM_DEVICE);
5272 		if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5273 			dev_dbg_ratelimited(dev, "Dropping request\n");
5274 			return -EBUSY;
5275 		}
5276 	}
5277 
5278 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5279 
5280 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5281 					 DMA_BIDIRECTIONAL);
5282 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
5283 		dev_err(dev, "DMA mapping error for QI enqueue request\n");
5284 		goto err_out;
5285 	}
5286 
5287 	memset(&fd, 0, sizeof(fd));
5288 	dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5289 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5290 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5291 	dpaa2_fd_set_flc(&fd, req->flc_dma);
5292 
5293 	ppriv = this_cpu_ptr(priv->ppriv);
5294 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5295 		err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5296 						  &fd);
5297 		if (err != -EBUSY)
5298 			break;
5299 
5300 		cpu_relax();
5301 	}
5302 
5303 	if (unlikely(err)) {
5304 		dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5305 		goto err_out;
5306 	}
5307 
5308 	return -EINPROGRESS;
5309 
5310 err_out:
5311 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5312 			 DMA_BIDIRECTIONAL);
5313 	return -EIO;
5314 }
5315 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5316 
5317 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5318 	{
5319 		.vendor = FSL_MC_VENDOR_FREESCALE,
5320 		.obj_type = "dpseci",
5321 	},
5322 	{ .vendor = 0x0 }
5323 };
5324 
5325 static struct fsl_mc_driver dpaa2_caam_driver = {
5326 	.driver = {
5327 		.name		= KBUILD_MODNAME,
5328 		.owner		= THIS_MODULE,
5329 	},
5330 	.probe		= dpaa2_caam_probe,
5331 	.remove		= dpaa2_caam_remove,
5332 	.match_id_table = dpaa2_caam_match_id_table
5333 };
5334 
5335 MODULE_LICENSE("Dual BSD/GPL");
5336 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5337 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5338 
5339 module_fsl_mc_driver(dpaa2_caam_driver);
5340