1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2019 NXP
5  */
6 
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
22 
23 #define CAAM_CRA_PRIORITY	2000
24 
25 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
26 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
27 				 SHA512_DIGEST_SIZE * 2)
28 
29 /*
30  * This is a a cache of buffers, from which the users of CAAM QI driver
31  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
32  * NOTE: A more elegant solution would be to have some headroom in the frames
33  *       being processed. This can be added by the dpaa2-eth driver. This would
34  *       pose a problem for userspace application processing which cannot
35  *       know of this limitation. So for now, this will work.
36  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
37  */
38 static struct kmem_cache *qi_cache;
39 
40 struct caam_alg_entry {
41 	struct device *dev;
42 	int class1_alg_type;
43 	int class2_alg_type;
44 	bool rfc3686;
45 	bool geniv;
46 	bool nodkp;
47 };
48 
49 struct caam_aead_alg {
50 	struct aead_alg aead;
51 	struct caam_alg_entry caam;
52 	bool registered;
53 };
54 
55 struct caam_skcipher_alg {
56 	struct skcipher_alg skcipher;
57 	struct caam_alg_entry caam;
58 	bool registered;
59 };
60 
61 /**
62  * caam_ctx - per-session context
63  * @flc: Flow Contexts array
64  * @key:  [authentication key], encryption key
65  * @flc_dma: I/O virtual addresses of the Flow Contexts
66  * @key_dma: I/O virtual address of the key
67  * @dir: DMA direction for mapping key and Flow Contexts
68  * @dev: dpseci device
69  * @adata: authentication algorithm details
70  * @cdata: encryption algorithm details
71  * @authsize: authentication tag (a.k.a. ICV / MAC) size
72  */
73 struct caam_ctx {
74 	struct caam_flc flc[NUM_OP];
75 	u8 key[CAAM_MAX_KEY_SIZE];
76 	dma_addr_t flc_dma[NUM_OP];
77 	dma_addr_t key_dma;
78 	enum dma_data_direction dir;
79 	struct device *dev;
80 	struct alginfo adata;
81 	struct alginfo cdata;
82 	unsigned int authsize;
83 };
84 
85 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
86 				     dma_addr_t iova_addr)
87 {
88 	phys_addr_t phys_addr;
89 
90 	phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
91 				   iova_addr;
92 
93 	return phys_to_virt(phys_addr);
94 }
95 
96 /*
97  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
98  *
99  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
100  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
101  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
102  * hosting 16 SG entries.
103  *
104  * @flags - flags that would be used for the equivalent kmalloc(..) call
105  *
106  * Returns a pointer to a retrieved buffer on success or NULL on failure.
107  */
108 static inline void *qi_cache_zalloc(gfp_t flags)
109 {
110 	return kmem_cache_zalloc(qi_cache, flags);
111 }
112 
113 /*
114  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
115  *
116  * @obj - buffer previously allocated by qi_cache_zalloc
117  *
118  * No checking is being done, the call is a passthrough call to
119  * kmem_cache_free(...)
120  */
121 static inline void qi_cache_free(void *obj)
122 {
123 	kmem_cache_free(qi_cache, obj);
124 }
125 
126 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
127 {
128 	switch (crypto_tfm_alg_type(areq->tfm)) {
129 	case CRYPTO_ALG_TYPE_SKCIPHER:
130 		return skcipher_request_ctx(skcipher_request_cast(areq));
131 	case CRYPTO_ALG_TYPE_AEAD:
132 		return aead_request_ctx(container_of(areq, struct aead_request,
133 						     base));
134 	case CRYPTO_ALG_TYPE_AHASH:
135 		return ahash_request_ctx(ahash_request_cast(areq));
136 	default:
137 		return ERR_PTR(-EINVAL);
138 	}
139 }
140 
141 static void caam_unmap(struct device *dev, struct scatterlist *src,
142 		       struct scatterlist *dst, int src_nents,
143 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
144 		       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
145 		       int qm_sg_bytes)
146 {
147 	if (dst != src) {
148 		if (src_nents)
149 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
150 		if (dst_nents)
151 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
152 	} else {
153 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
154 	}
155 
156 	if (iv_dma)
157 		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
158 
159 	if (qm_sg_bytes)
160 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
161 }
162 
163 static int aead_set_sh_desc(struct crypto_aead *aead)
164 {
165 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
166 						 typeof(*alg), aead);
167 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
168 	unsigned int ivsize = crypto_aead_ivsize(aead);
169 	struct device *dev = ctx->dev;
170 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
171 	struct caam_flc *flc;
172 	u32 *desc;
173 	u32 ctx1_iv_off = 0;
174 	u32 *nonce = NULL;
175 	unsigned int data_len[2];
176 	u32 inl_mask;
177 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
178 			       OP_ALG_AAI_CTR_MOD128);
179 	const bool is_rfc3686 = alg->caam.rfc3686;
180 
181 	if (!ctx->cdata.keylen || !ctx->authsize)
182 		return 0;
183 
184 	/*
185 	 * AES-CTR needs to load IV in CONTEXT1 reg
186 	 * at an offset of 128bits (16bytes)
187 	 * CONTEXT1[255:128] = IV
188 	 */
189 	if (ctr_mode)
190 		ctx1_iv_off = 16;
191 
192 	/*
193 	 * RFC3686 specific:
194 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
195 	 */
196 	if (is_rfc3686) {
197 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
198 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
199 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
200 	}
201 
202 	/*
203 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
204 	 * in invalid opcodes (last bytes of user key) in the resulting
205 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
206 	 * addresses are needed.
207 	 */
208 	ctx->adata.key_virt = ctx->key;
209 	ctx->adata.key_dma = ctx->key_dma;
210 
211 	ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
212 	ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
213 
214 	data_len[0] = ctx->adata.keylen_pad;
215 	data_len[1] = ctx->cdata.keylen;
216 
217 	/* aead_encrypt shared descriptor */
218 	if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
219 						 DESC_QI_AEAD_ENC_LEN) +
220 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
221 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
222 			      ARRAY_SIZE(data_len)) < 0)
223 		return -EINVAL;
224 
225 	ctx->adata.key_inline = !!(inl_mask & 1);
226 	ctx->cdata.key_inline = !!(inl_mask & 2);
227 
228 	flc = &ctx->flc[ENCRYPT];
229 	desc = flc->sh_desc;
230 
231 	if (alg->caam.geniv)
232 		cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
233 					  ivsize, ctx->authsize, is_rfc3686,
234 					  nonce, ctx1_iv_off, true,
235 					  priv->sec_attr.era);
236 	else
237 		cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
238 				       ivsize, ctx->authsize, is_rfc3686, nonce,
239 				       ctx1_iv_off, true, priv->sec_attr.era);
240 
241 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
242 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
243 				   sizeof(flc->flc) + desc_bytes(desc),
244 				   ctx->dir);
245 
246 	/* aead_decrypt shared descriptor */
247 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
248 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
249 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
250 			      ARRAY_SIZE(data_len)) < 0)
251 		return -EINVAL;
252 
253 	ctx->adata.key_inline = !!(inl_mask & 1);
254 	ctx->cdata.key_inline = !!(inl_mask & 2);
255 
256 	flc = &ctx->flc[DECRYPT];
257 	desc = flc->sh_desc;
258 	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
259 			       ivsize, ctx->authsize, alg->caam.geniv,
260 			       is_rfc3686, nonce, ctx1_iv_off, true,
261 			       priv->sec_attr.era);
262 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
263 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
264 				   sizeof(flc->flc) + desc_bytes(desc),
265 				   ctx->dir);
266 
267 	return 0;
268 }
269 
270 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
271 {
272 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
273 
274 	ctx->authsize = authsize;
275 	aead_set_sh_desc(authenc);
276 
277 	return 0;
278 }
279 
280 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
281 		       unsigned int keylen)
282 {
283 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
284 	struct device *dev = ctx->dev;
285 	struct crypto_authenc_keys keys;
286 
287 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
288 		goto badkey;
289 
290 	dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
291 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
292 		keys.authkeylen);
293 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
294 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
295 
296 	ctx->adata.keylen = keys.authkeylen;
297 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
298 					      OP_ALG_ALGSEL_MASK);
299 
300 	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
301 		goto badkey;
302 
303 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
304 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
305 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
306 				   keys.enckeylen, ctx->dir);
307 	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
308 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
309 			     ctx->adata.keylen_pad + keys.enckeylen, 1);
310 
311 	ctx->cdata.keylen = keys.enckeylen;
312 
313 	memzero_explicit(&keys, sizeof(keys));
314 	return aead_set_sh_desc(aead);
315 badkey:
316 	memzero_explicit(&keys, sizeof(keys));
317 	return -EINVAL;
318 }
319 
320 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
321 			    unsigned int keylen)
322 {
323 	struct crypto_authenc_keys keys;
324 	int err;
325 
326 	err = crypto_authenc_extractkeys(&keys, key, keylen);
327 	if (unlikely(err))
328 		goto out;
329 
330 	err = -EINVAL;
331 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
332 		goto out;
333 
334 	err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
335 	      aead_setkey(aead, key, keylen);
336 
337 out:
338 	memzero_explicit(&keys, sizeof(keys));
339 	return err;
340 }
341 
342 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
343 					   bool encrypt)
344 {
345 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
346 	struct caam_request *req_ctx = aead_request_ctx(req);
347 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
348 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
349 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
350 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
351 						 typeof(*alg), aead);
352 	struct device *dev = ctx->dev;
353 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
354 		      GFP_KERNEL : GFP_ATOMIC;
355 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
356 	int src_len, dst_len = 0;
357 	struct aead_edesc *edesc;
358 	dma_addr_t qm_sg_dma, iv_dma = 0;
359 	int ivsize = 0;
360 	unsigned int authsize = ctx->authsize;
361 	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
362 	int in_len, out_len;
363 	struct dpaa2_sg_entry *sg_table;
364 
365 	/* allocate space for base edesc, link tables and IV */
366 	edesc = qi_cache_zalloc(GFP_DMA | flags);
367 	if (unlikely(!edesc)) {
368 		dev_err(dev, "could not allocate extended descriptor\n");
369 		return ERR_PTR(-ENOMEM);
370 	}
371 
372 	if (unlikely(req->dst != req->src)) {
373 		src_len = req->assoclen + req->cryptlen;
374 		dst_len = src_len + (encrypt ? authsize : (-authsize));
375 
376 		src_nents = sg_nents_for_len(req->src, src_len);
377 		if (unlikely(src_nents < 0)) {
378 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
379 				src_len);
380 			qi_cache_free(edesc);
381 			return ERR_PTR(src_nents);
382 		}
383 
384 		dst_nents = sg_nents_for_len(req->dst, dst_len);
385 		if (unlikely(dst_nents < 0)) {
386 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
387 				dst_len);
388 			qi_cache_free(edesc);
389 			return ERR_PTR(dst_nents);
390 		}
391 
392 		if (src_nents) {
393 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
394 						      DMA_TO_DEVICE);
395 			if (unlikely(!mapped_src_nents)) {
396 				dev_err(dev, "unable to map source\n");
397 				qi_cache_free(edesc);
398 				return ERR_PTR(-ENOMEM);
399 			}
400 		} else {
401 			mapped_src_nents = 0;
402 		}
403 
404 		if (dst_nents) {
405 			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
406 						      DMA_FROM_DEVICE);
407 			if (unlikely(!mapped_dst_nents)) {
408 				dev_err(dev, "unable to map destination\n");
409 				dma_unmap_sg(dev, req->src, src_nents,
410 					     DMA_TO_DEVICE);
411 				qi_cache_free(edesc);
412 				return ERR_PTR(-ENOMEM);
413 			}
414 		} else {
415 			mapped_dst_nents = 0;
416 		}
417 	} else {
418 		src_len = req->assoclen + req->cryptlen +
419 			  (encrypt ? authsize : 0);
420 
421 		src_nents = sg_nents_for_len(req->src, src_len);
422 		if (unlikely(src_nents < 0)) {
423 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
424 				src_len);
425 			qi_cache_free(edesc);
426 			return ERR_PTR(src_nents);
427 		}
428 
429 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
430 					      DMA_BIDIRECTIONAL);
431 		if (unlikely(!mapped_src_nents)) {
432 			dev_err(dev, "unable to map source\n");
433 			qi_cache_free(edesc);
434 			return ERR_PTR(-ENOMEM);
435 		}
436 	}
437 
438 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
439 		ivsize = crypto_aead_ivsize(aead);
440 
441 	/*
442 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
443 	 * Input is not contiguous.
444 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
445 	 * the end of the table by allocating more S/G entries. Logic:
446 	 * if (src != dst && output S/G)
447 	 *      pad output S/G, if needed
448 	 * else if (src == dst && S/G)
449 	 *      overlapping S/Gs; pad one of them
450 	 * else if (input S/G) ...
451 	 *      pad input S/G, if needed
452 	 */
453 	qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
454 	if (mapped_dst_nents > 1)
455 		qm_sg_nents += pad_sg_nents(mapped_dst_nents);
456 	else if ((req->src == req->dst) && (mapped_src_nents > 1))
457 		qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
458 				  1 + !!ivsize +
459 				  pad_sg_nents(mapped_src_nents));
460 	else
461 		qm_sg_nents = pad_sg_nents(qm_sg_nents);
462 
463 	sg_table = &edesc->sgt[0];
464 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
465 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
466 		     CAAM_QI_MEMCACHE_SIZE)) {
467 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
468 			qm_sg_nents, ivsize);
469 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
470 			   0, DMA_NONE, 0, 0);
471 		qi_cache_free(edesc);
472 		return ERR_PTR(-ENOMEM);
473 	}
474 
475 	if (ivsize) {
476 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
477 
478 		/* Make sure IV is located in a DMAable area */
479 		memcpy(iv, req->iv, ivsize);
480 
481 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
482 		if (dma_mapping_error(dev, iv_dma)) {
483 			dev_err(dev, "unable to map IV\n");
484 			caam_unmap(dev, req->src, req->dst, src_nents,
485 				   dst_nents, 0, 0, DMA_NONE, 0, 0);
486 			qi_cache_free(edesc);
487 			return ERR_PTR(-ENOMEM);
488 		}
489 	}
490 
491 	edesc->src_nents = src_nents;
492 	edesc->dst_nents = dst_nents;
493 	edesc->iv_dma = iv_dma;
494 
495 	if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
496 	    OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
497 		/*
498 		 * The associated data comes already with the IV but we need
499 		 * to skip it when we authenticate or encrypt...
500 		 */
501 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
502 	else
503 		edesc->assoclen = cpu_to_caam32(req->assoclen);
504 	edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
505 					     DMA_TO_DEVICE);
506 	if (dma_mapping_error(dev, edesc->assoclen_dma)) {
507 		dev_err(dev, "unable to map assoclen\n");
508 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
509 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
510 		qi_cache_free(edesc);
511 		return ERR_PTR(-ENOMEM);
512 	}
513 
514 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
515 	qm_sg_index++;
516 	if (ivsize) {
517 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
518 		qm_sg_index++;
519 	}
520 	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
521 	qm_sg_index += mapped_src_nents;
522 
523 	if (mapped_dst_nents > 1)
524 		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
525 
526 	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
527 	if (dma_mapping_error(dev, qm_sg_dma)) {
528 		dev_err(dev, "unable to map S/G table\n");
529 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
530 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
531 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
532 		qi_cache_free(edesc);
533 		return ERR_PTR(-ENOMEM);
534 	}
535 
536 	edesc->qm_sg_dma = qm_sg_dma;
537 	edesc->qm_sg_bytes = qm_sg_bytes;
538 
539 	out_len = req->assoclen + req->cryptlen +
540 		  (encrypt ? ctx->authsize : (-ctx->authsize));
541 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
542 
543 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
544 	dpaa2_fl_set_final(in_fle, true);
545 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
546 	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
547 	dpaa2_fl_set_len(in_fle, in_len);
548 
549 	if (req->dst == req->src) {
550 		if (mapped_src_nents == 1) {
551 			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
552 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
553 		} else {
554 			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
555 			dpaa2_fl_set_addr(out_fle, qm_sg_dma +
556 					  (1 + !!ivsize) * sizeof(*sg_table));
557 		}
558 	} else if (!mapped_dst_nents) {
559 		/*
560 		 * crypto engine requires the output entry to be present when
561 		 * "frame list" FD is used.
562 		 * Since engine does not support FMT=2'b11 (unused entry type),
563 		 * leaving out_fle zeroized is the best option.
564 		 */
565 		goto skip_out_fle;
566 	} else if (mapped_dst_nents == 1) {
567 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
568 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
569 	} else {
570 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
571 		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
572 				  sizeof(*sg_table));
573 	}
574 
575 	dpaa2_fl_set_len(out_fle, out_len);
576 
577 skip_out_fle:
578 	return edesc;
579 }
580 
581 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
582 {
583 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
584 	unsigned int ivsize = crypto_aead_ivsize(aead);
585 	struct device *dev = ctx->dev;
586 	struct caam_flc *flc;
587 	u32 *desc;
588 
589 	if (!ctx->cdata.keylen || !ctx->authsize)
590 		return 0;
591 
592 	flc = &ctx->flc[ENCRYPT];
593 	desc = flc->sh_desc;
594 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
595 			       ctx->authsize, true, true);
596 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
597 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
598 				   sizeof(flc->flc) + desc_bytes(desc),
599 				   ctx->dir);
600 
601 	flc = &ctx->flc[DECRYPT];
602 	desc = flc->sh_desc;
603 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
604 			       ctx->authsize, false, true);
605 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
606 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
607 				   sizeof(flc->flc) + desc_bytes(desc),
608 				   ctx->dir);
609 
610 	return 0;
611 }
612 
613 static int chachapoly_setauthsize(struct crypto_aead *aead,
614 				  unsigned int authsize)
615 {
616 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
617 
618 	if (authsize != POLY1305_DIGEST_SIZE)
619 		return -EINVAL;
620 
621 	ctx->authsize = authsize;
622 	return chachapoly_set_sh_desc(aead);
623 }
624 
625 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
626 			     unsigned int keylen)
627 {
628 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
629 	unsigned int ivsize = crypto_aead_ivsize(aead);
630 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
631 
632 	if (keylen != CHACHA_KEY_SIZE + saltlen)
633 		return -EINVAL;
634 
635 	ctx->cdata.key_virt = key;
636 	ctx->cdata.keylen = keylen - saltlen;
637 
638 	return chachapoly_set_sh_desc(aead);
639 }
640 
641 static int gcm_set_sh_desc(struct crypto_aead *aead)
642 {
643 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
644 	struct device *dev = ctx->dev;
645 	unsigned int ivsize = crypto_aead_ivsize(aead);
646 	struct caam_flc *flc;
647 	u32 *desc;
648 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
649 			ctx->cdata.keylen;
650 
651 	if (!ctx->cdata.keylen || !ctx->authsize)
652 		return 0;
653 
654 	/*
655 	 * AES GCM encrypt shared descriptor
656 	 * Job Descriptor and Shared Descriptor
657 	 * must fit into the 64-word Descriptor h/w Buffer
658 	 */
659 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
660 		ctx->cdata.key_inline = true;
661 		ctx->cdata.key_virt = ctx->key;
662 	} else {
663 		ctx->cdata.key_inline = false;
664 		ctx->cdata.key_dma = ctx->key_dma;
665 	}
666 
667 	flc = &ctx->flc[ENCRYPT];
668 	desc = flc->sh_desc;
669 	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
670 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
671 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
672 				   sizeof(flc->flc) + desc_bytes(desc),
673 				   ctx->dir);
674 
675 	/*
676 	 * Job Descriptor and Shared Descriptors
677 	 * must all fit into the 64-word Descriptor h/w Buffer
678 	 */
679 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
680 		ctx->cdata.key_inline = true;
681 		ctx->cdata.key_virt = ctx->key;
682 	} else {
683 		ctx->cdata.key_inline = false;
684 		ctx->cdata.key_dma = ctx->key_dma;
685 	}
686 
687 	flc = &ctx->flc[DECRYPT];
688 	desc = flc->sh_desc;
689 	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
690 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
691 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
692 				   sizeof(flc->flc) + desc_bytes(desc),
693 				   ctx->dir);
694 
695 	return 0;
696 }
697 
698 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
699 {
700 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
701 	int err;
702 
703 	err = crypto_gcm_check_authsize(authsize);
704 	if (err)
705 		return err;
706 
707 	ctx->authsize = authsize;
708 	gcm_set_sh_desc(authenc);
709 
710 	return 0;
711 }
712 
713 static int gcm_setkey(struct crypto_aead *aead,
714 		      const u8 *key, unsigned int keylen)
715 {
716 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
717 	struct device *dev = ctx->dev;
718 	int ret;
719 
720 	ret = aes_check_keylen(keylen);
721 	if (ret)
722 		return ret;
723 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
724 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
725 
726 	memcpy(ctx->key, key, keylen);
727 	dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
728 	ctx->cdata.keylen = keylen;
729 
730 	return gcm_set_sh_desc(aead);
731 }
732 
733 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
734 {
735 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
736 	struct device *dev = ctx->dev;
737 	unsigned int ivsize = crypto_aead_ivsize(aead);
738 	struct caam_flc *flc;
739 	u32 *desc;
740 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
741 			ctx->cdata.keylen;
742 
743 	if (!ctx->cdata.keylen || !ctx->authsize)
744 		return 0;
745 
746 	ctx->cdata.key_virt = ctx->key;
747 
748 	/*
749 	 * RFC4106 encrypt shared descriptor
750 	 * Job Descriptor and Shared Descriptor
751 	 * must fit into the 64-word Descriptor h/w Buffer
752 	 */
753 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
754 		ctx->cdata.key_inline = true;
755 	} else {
756 		ctx->cdata.key_inline = false;
757 		ctx->cdata.key_dma = ctx->key_dma;
758 	}
759 
760 	flc = &ctx->flc[ENCRYPT];
761 	desc = flc->sh_desc;
762 	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
763 				  true);
764 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
765 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
766 				   sizeof(flc->flc) + desc_bytes(desc),
767 				   ctx->dir);
768 
769 	/*
770 	 * Job Descriptor and Shared Descriptors
771 	 * must all fit into the 64-word Descriptor h/w Buffer
772 	 */
773 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
774 		ctx->cdata.key_inline = true;
775 	} else {
776 		ctx->cdata.key_inline = false;
777 		ctx->cdata.key_dma = ctx->key_dma;
778 	}
779 
780 	flc = &ctx->flc[DECRYPT];
781 	desc = flc->sh_desc;
782 	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
783 				  true);
784 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
785 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
786 				   sizeof(flc->flc) + desc_bytes(desc),
787 				   ctx->dir);
788 
789 	return 0;
790 }
791 
792 static int rfc4106_setauthsize(struct crypto_aead *authenc,
793 			       unsigned int authsize)
794 {
795 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
796 	int err;
797 
798 	err = crypto_rfc4106_check_authsize(authsize);
799 	if (err)
800 		return err;
801 
802 	ctx->authsize = authsize;
803 	rfc4106_set_sh_desc(authenc);
804 
805 	return 0;
806 }
807 
808 static int rfc4106_setkey(struct crypto_aead *aead,
809 			  const u8 *key, unsigned int keylen)
810 {
811 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
812 	struct device *dev = ctx->dev;
813 	int ret;
814 
815 	ret = aes_check_keylen(keylen - 4);
816 	if (ret)
817 		return ret;
818 
819 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
820 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
821 
822 	memcpy(ctx->key, key, keylen);
823 	/*
824 	 * The last four bytes of the key material are used as the salt value
825 	 * in the nonce. Update the AES key length.
826 	 */
827 	ctx->cdata.keylen = keylen - 4;
828 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
829 				   ctx->dir);
830 
831 	return rfc4106_set_sh_desc(aead);
832 }
833 
834 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
835 {
836 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
837 	struct device *dev = ctx->dev;
838 	unsigned int ivsize = crypto_aead_ivsize(aead);
839 	struct caam_flc *flc;
840 	u32 *desc;
841 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
842 			ctx->cdata.keylen;
843 
844 	if (!ctx->cdata.keylen || !ctx->authsize)
845 		return 0;
846 
847 	ctx->cdata.key_virt = ctx->key;
848 
849 	/*
850 	 * RFC4543 encrypt shared descriptor
851 	 * Job Descriptor and Shared Descriptor
852 	 * must fit into the 64-word Descriptor h/w Buffer
853 	 */
854 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
855 		ctx->cdata.key_inline = true;
856 	} else {
857 		ctx->cdata.key_inline = false;
858 		ctx->cdata.key_dma = ctx->key_dma;
859 	}
860 
861 	flc = &ctx->flc[ENCRYPT];
862 	desc = flc->sh_desc;
863 	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
864 				  true);
865 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
866 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
867 				   sizeof(flc->flc) + desc_bytes(desc),
868 				   ctx->dir);
869 
870 	/*
871 	 * Job Descriptor and Shared Descriptors
872 	 * must all fit into the 64-word Descriptor h/w Buffer
873 	 */
874 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
875 		ctx->cdata.key_inline = true;
876 	} else {
877 		ctx->cdata.key_inline = false;
878 		ctx->cdata.key_dma = ctx->key_dma;
879 	}
880 
881 	flc = &ctx->flc[DECRYPT];
882 	desc = flc->sh_desc;
883 	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
884 				  true);
885 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
886 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
887 				   sizeof(flc->flc) + desc_bytes(desc),
888 				   ctx->dir);
889 
890 	return 0;
891 }
892 
893 static int rfc4543_setauthsize(struct crypto_aead *authenc,
894 			       unsigned int authsize)
895 {
896 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
897 
898 	if (authsize != 16)
899 		return -EINVAL;
900 
901 	ctx->authsize = authsize;
902 	rfc4543_set_sh_desc(authenc);
903 
904 	return 0;
905 }
906 
907 static int rfc4543_setkey(struct crypto_aead *aead,
908 			  const u8 *key, unsigned int keylen)
909 {
910 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
911 	struct device *dev = ctx->dev;
912 	int ret;
913 
914 	ret = aes_check_keylen(keylen - 4);
915 	if (ret)
916 		return ret;
917 
918 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
919 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
920 
921 	memcpy(ctx->key, key, keylen);
922 	/*
923 	 * The last four bytes of the key material are used as the salt value
924 	 * in the nonce. Update the AES key length.
925 	 */
926 	ctx->cdata.keylen = keylen - 4;
927 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
928 				   ctx->dir);
929 
930 	return rfc4543_set_sh_desc(aead);
931 }
932 
933 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
934 			   unsigned int keylen, const u32 ctx1_iv_off)
935 {
936 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
937 	struct caam_skcipher_alg *alg =
938 		container_of(crypto_skcipher_alg(skcipher),
939 			     struct caam_skcipher_alg, skcipher);
940 	struct device *dev = ctx->dev;
941 	struct caam_flc *flc;
942 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
943 	u32 *desc;
944 	const bool is_rfc3686 = alg->caam.rfc3686;
945 
946 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
947 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
948 
949 	ctx->cdata.keylen = keylen;
950 	ctx->cdata.key_virt = key;
951 	ctx->cdata.key_inline = true;
952 
953 	/* skcipher_encrypt shared descriptor */
954 	flc = &ctx->flc[ENCRYPT];
955 	desc = flc->sh_desc;
956 	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
957 				   ctx1_iv_off);
958 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
959 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
960 				   sizeof(flc->flc) + desc_bytes(desc),
961 				   ctx->dir);
962 
963 	/* skcipher_decrypt shared descriptor */
964 	flc = &ctx->flc[DECRYPT];
965 	desc = flc->sh_desc;
966 	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
967 				   ctx1_iv_off);
968 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
969 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
970 				   sizeof(flc->flc) + desc_bytes(desc),
971 				   ctx->dir);
972 
973 	return 0;
974 }
975 
976 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
977 			       const u8 *key, unsigned int keylen)
978 {
979 	int err;
980 
981 	err = aes_check_keylen(keylen);
982 	if (err)
983 		return err;
984 
985 	return skcipher_setkey(skcipher, key, keylen, 0);
986 }
987 
988 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
989 				   const u8 *key, unsigned int keylen)
990 {
991 	u32 ctx1_iv_off;
992 	int err;
993 
994 	/*
995 	 * RFC3686 specific:
996 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
997 	 *	| *key = {KEY, NONCE}
998 	 */
999 	ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1000 	keylen -= CTR_RFC3686_NONCE_SIZE;
1001 
1002 	err = aes_check_keylen(keylen);
1003 	if (err)
1004 		return err;
1005 
1006 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1007 }
1008 
1009 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1010 			       const u8 *key, unsigned int keylen)
1011 {
1012 	u32 ctx1_iv_off;
1013 	int err;
1014 
1015 	/*
1016 	 * AES-CTR needs to load IV in CONTEXT1 reg
1017 	 * at an offset of 128bits (16bytes)
1018 	 * CONTEXT1[255:128] = IV
1019 	 */
1020 	ctx1_iv_off = 16;
1021 
1022 	err = aes_check_keylen(keylen);
1023 	if (err)
1024 		return err;
1025 
1026 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1027 }
1028 
1029 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1030 				    const u8 *key, unsigned int keylen)
1031 {
1032 	if (keylen != CHACHA_KEY_SIZE)
1033 		return -EINVAL;
1034 
1035 	return skcipher_setkey(skcipher, key, keylen, 0);
1036 }
1037 
1038 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1039 			       const u8 *key, unsigned int keylen)
1040 {
1041 	return verify_skcipher_des_key(skcipher, key) ?:
1042 	       skcipher_setkey(skcipher, key, keylen, 0);
1043 }
1044 
1045 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1046 			        const u8 *key, unsigned int keylen)
1047 {
1048 	return verify_skcipher_des3_key(skcipher, key) ?:
1049 	       skcipher_setkey(skcipher, key, keylen, 0);
1050 }
1051 
1052 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1053 			       unsigned int keylen)
1054 {
1055 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1056 	struct device *dev = ctx->dev;
1057 	struct caam_flc *flc;
1058 	u32 *desc;
1059 
1060 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
1061 		dev_dbg(dev, "key size mismatch\n");
1062 		return -EINVAL;
1063 	}
1064 
1065 	ctx->cdata.keylen = keylen;
1066 	ctx->cdata.key_virt = key;
1067 	ctx->cdata.key_inline = true;
1068 
1069 	/* xts_skcipher_encrypt shared descriptor */
1070 	flc = &ctx->flc[ENCRYPT];
1071 	desc = flc->sh_desc;
1072 	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1073 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1074 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1075 				   sizeof(flc->flc) + desc_bytes(desc),
1076 				   ctx->dir);
1077 
1078 	/* xts_skcipher_decrypt shared descriptor */
1079 	flc = &ctx->flc[DECRYPT];
1080 	desc = flc->sh_desc;
1081 	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1082 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1083 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1084 				   sizeof(flc->flc) + desc_bytes(desc),
1085 				   ctx->dir);
1086 
1087 	return 0;
1088 }
1089 
1090 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1091 {
1092 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1093 	struct caam_request *req_ctx = skcipher_request_ctx(req);
1094 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1095 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1096 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1097 	struct device *dev = ctx->dev;
1098 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1099 		       GFP_KERNEL : GFP_ATOMIC;
1100 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1101 	struct skcipher_edesc *edesc;
1102 	dma_addr_t iv_dma;
1103 	u8 *iv;
1104 	int ivsize = crypto_skcipher_ivsize(skcipher);
1105 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1106 	struct dpaa2_sg_entry *sg_table;
1107 
1108 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1109 	if (unlikely(src_nents < 0)) {
1110 		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1111 			req->cryptlen);
1112 		return ERR_PTR(src_nents);
1113 	}
1114 
1115 	if (unlikely(req->dst != req->src)) {
1116 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1117 		if (unlikely(dst_nents < 0)) {
1118 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1119 				req->cryptlen);
1120 			return ERR_PTR(dst_nents);
1121 		}
1122 
1123 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1124 					      DMA_TO_DEVICE);
1125 		if (unlikely(!mapped_src_nents)) {
1126 			dev_err(dev, "unable to map source\n");
1127 			return ERR_PTR(-ENOMEM);
1128 		}
1129 
1130 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1131 					      DMA_FROM_DEVICE);
1132 		if (unlikely(!mapped_dst_nents)) {
1133 			dev_err(dev, "unable to map destination\n");
1134 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1135 			return ERR_PTR(-ENOMEM);
1136 		}
1137 	} else {
1138 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1139 					      DMA_BIDIRECTIONAL);
1140 		if (unlikely(!mapped_src_nents)) {
1141 			dev_err(dev, "unable to map source\n");
1142 			return ERR_PTR(-ENOMEM);
1143 		}
1144 	}
1145 
1146 	qm_sg_ents = 1 + mapped_src_nents;
1147 	dst_sg_idx = qm_sg_ents;
1148 
1149 	/*
1150 	 * Input, output HW S/G tables: [IV, src][dst, IV]
1151 	 * IV entries point to the same buffer
1152 	 * If src == dst, S/G entries are reused (S/G tables overlap)
1153 	 *
1154 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1155 	 * the end of the table by allocating more S/G entries.
1156 	 */
1157 	if (req->src != req->dst)
1158 		qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1159 	else
1160 		qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1161 
1162 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1163 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1164 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1165 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1166 			qm_sg_ents, ivsize);
1167 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1168 			   0, DMA_NONE, 0, 0);
1169 		return ERR_PTR(-ENOMEM);
1170 	}
1171 
1172 	/* allocate space for base edesc, link tables and IV */
1173 	edesc = qi_cache_zalloc(GFP_DMA | flags);
1174 	if (unlikely(!edesc)) {
1175 		dev_err(dev, "could not allocate extended descriptor\n");
1176 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1177 			   0, DMA_NONE, 0, 0);
1178 		return ERR_PTR(-ENOMEM);
1179 	}
1180 
1181 	/* Make sure IV is located in a DMAable area */
1182 	sg_table = &edesc->sgt[0];
1183 	iv = (u8 *)(sg_table + qm_sg_ents);
1184 	memcpy(iv, req->iv, ivsize);
1185 
1186 	iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1187 	if (dma_mapping_error(dev, iv_dma)) {
1188 		dev_err(dev, "unable to map IV\n");
1189 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1190 			   0, DMA_NONE, 0, 0);
1191 		qi_cache_free(edesc);
1192 		return ERR_PTR(-ENOMEM);
1193 	}
1194 
1195 	edesc->src_nents = src_nents;
1196 	edesc->dst_nents = dst_nents;
1197 	edesc->iv_dma = iv_dma;
1198 	edesc->qm_sg_bytes = qm_sg_bytes;
1199 
1200 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1201 	sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1202 
1203 	if (req->src != req->dst)
1204 		sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1205 
1206 	dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1207 			 ivsize, 0);
1208 
1209 	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1210 					  DMA_TO_DEVICE);
1211 	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1212 		dev_err(dev, "unable to map S/G table\n");
1213 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1214 			   iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1215 		qi_cache_free(edesc);
1216 		return ERR_PTR(-ENOMEM);
1217 	}
1218 
1219 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1220 	dpaa2_fl_set_final(in_fle, true);
1221 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1222 	dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1223 
1224 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1225 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1226 
1227 	dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1228 
1229 	if (req->src == req->dst)
1230 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1231 				  sizeof(*sg_table));
1232 	else
1233 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1234 				  sizeof(*sg_table));
1235 
1236 	return edesc;
1237 }
1238 
1239 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1240 		       struct aead_request *req)
1241 {
1242 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1243 	int ivsize = crypto_aead_ivsize(aead);
1244 
1245 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1246 		   edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1247 		   edesc->qm_sg_bytes);
1248 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1249 }
1250 
1251 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1252 			   struct skcipher_request *req)
1253 {
1254 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1255 	int ivsize = crypto_skcipher_ivsize(skcipher);
1256 
1257 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1258 		   edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1259 		   edesc->qm_sg_bytes);
1260 }
1261 
1262 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1263 {
1264 	struct crypto_async_request *areq = cbk_ctx;
1265 	struct aead_request *req = container_of(areq, struct aead_request,
1266 						base);
1267 	struct caam_request *req_ctx = to_caam_req(areq);
1268 	struct aead_edesc *edesc = req_ctx->edesc;
1269 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1270 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1271 	int ecode = 0;
1272 
1273 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1274 
1275 	if (unlikely(status))
1276 		ecode = caam_qi2_strstatus(ctx->dev, status);
1277 
1278 	aead_unmap(ctx->dev, edesc, req);
1279 	qi_cache_free(edesc);
1280 	aead_request_complete(req, ecode);
1281 }
1282 
1283 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1284 {
1285 	struct crypto_async_request *areq = cbk_ctx;
1286 	struct aead_request *req = container_of(areq, struct aead_request,
1287 						base);
1288 	struct caam_request *req_ctx = to_caam_req(areq);
1289 	struct aead_edesc *edesc = req_ctx->edesc;
1290 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1291 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1292 	int ecode = 0;
1293 
1294 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1295 
1296 	if (unlikely(status))
1297 		ecode = caam_qi2_strstatus(ctx->dev, status);
1298 
1299 	aead_unmap(ctx->dev, edesc, req);
1300 	qi_cache_free(edesc);
1301 	aead_request_complete(req, ecode);
1302 }
1303 
1304 static int aead_encrypt(struct aead_request *req)
1305 {
1306 	struct aead_edesc *edesc;
1307 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1308 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1309 	struct caam_request *caam_req = aead_request_ctx(req);
1310 	int ret;
1311 
1312 	/* allocate extended descriptor */
1313 	edesc = aead_edesc_alloc(req, true);
1314 	if (IS_ERR(edesc))
1315 		return PTR_ERR(edesc);
1316 
1317 	caam_req->flc = &ctx->flc[ENCRYPT];
1318 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1319 	caam_req->cbk = aead_encrypt_done;
1320 	caam_req->ctx = &req->base;
1321 	caam_req->edesc = edesc;
1322 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1323 	if (ret != -EINPROGRESS &&
1324 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1325 		aead_unmap(ctx->dev, edesc, req);
1326 		qi_cache_free(edesc);
1327 	}
1328 
1329 	return ret;
1330 }
1331 
1332 static int aead_decrypt(struct aead_request *req)
1333 {
1334 	struct aead_edesc *edesc;
1335 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1336 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1337 	struct caam_request *caam_req = aead_request_ctx(req);
1338 	int ret;
1339 
1340 	/* allocate extended descriptor */
1341 	edesc = aead_edesc_alloc(req, false);
1342 	if (IS_ERR(edesc))
1343 		return PTR_ERR(edesc);
1344 
1345 	caam_req->flc = &ctx->flc[DECRYPT];
1346 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1347 	caam_req->cbk = aead_decrypt_done;
1348 	caam_req->ctx = &req->base;
1349 	caam_req->edesc = edesc;
1350 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1351 	if (ret != -EINPROGRESS &&
1352 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1353 		aead_unmap(ctx->dev, edesc, req);
1354 		qi_cache_free(edesc);
1355 	}
1356 
1357 	return ret;
1358 }
1359 
1360 static int ipsec_gcm_encrypt(struct aead_request *req)
1361 {
1362 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1363 }
1364 
1365 static int ipsec_gcm_decrypt(struct aead_request *req)
1366 {
1367 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1368 }
1369 
1370 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1371 {
1372 	struct crypto_async_request *areq = cbk_ctx;
1373 	struct skcipher_request *req = skcipher_request_cast(areq);
1374 	struct caam_request *req_ctx = to_caam_req(areq);
1375 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1376 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1377 	struct skcipher_edesc *edesc = req_ctx->edesc;
1378 	int ecode = 0;
1379 	int ivsize = crypto_skcipher_ivsize(skcipher);
1380 
1381 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1382 
1383 	if (unlikely(status))
1384 		ecode = caam_qi2_strstatus(ctx->dev, status);
1385 
1386 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1387 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1388 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1389 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1390 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1391 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1392 
1393 	skcipher_unmap(ctx->dev, edesc, req);
1394 
1395 	/*
1396 	 * The crypto API expects us to set the IV (req->iv) to the last
1397 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1398 	 * This is used e.g. by the CTS mode.
1399 	 */
1400 	if (!ecode)
1401 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1402 		       ivsize);
1403 
1404 	qi_cache_free(edesc);
1405 	skcipher_request_complete(req, ecode);
1406 }
1407 
1408 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1409 {
1410 	struct crypto_async_request *areq = cbk_ctx;
1411 	struct skcipher_request *req = skcipher_request_cast(areq);
1412 	struct caam_request *req_ctx = to_caam_req(areq);
1413 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1414 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1415 	struct skcipher_edesc *edesc = req_ctx->edesc;
1416 	int ecode = 0;
1417 	int ivsize = crypto_skcipher_ivsize(skcipher);
1418 
1419 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1420 
1421 	if (unlikely(status))
1422 		ecode = caam_qi2_strstatus(ctx->dev, status);
1423 
1424 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1425 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1426 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1427 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1428 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1429 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1430 
1431 	skcipher_unmap(ctx->dev, edesc, req);
1432 
1433 	/*
1434 	 * The crypto API expects us to set the IV (req->iv) to the last
1435 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1436 	 * This is used e.g. by the CTS mode.
1437 	 */
1438 	if (!ecode)
1439 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1440 		       ivsize);
1441 
1442 	qi_cache_free(edesc);
1443 	skcipher_request_complete(req, ecode);
1444 }
1445 
1446 static int skcipher_encrypt(struct skcipher_request *req)
1447 {
1448 	struct skcipher_edesc *edesc;
1449 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1450 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1451 	struct caam_request *caam_req = skcipher_request_ctx(req);
1452 	int ret;
1453 
1454 	if (!req->cryptlen)
1455 		return 0;
1456 
1457 	/* allocate extended descriptor */
1458 	edesc = skcipher_edesc_alloc(req);
1459 	if (IS_ERR(edesc))
1460 		return PTR_ERR(edesc);
1461 
1462 	caam_req->flc = &ctx->flc[ENCRYPT];
1463 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1464 	caam_req->cbk = skcipher_encrypt_done;
1465 	caam_req->ctx = &req->base;
1466 	caam_req->edesc = edesc;
1467 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1468 	if (ret != -EINPROGRESS &&
1469 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1470 		skcipher_unmap(ctx->dev, edesc, req);
1471 		qi_cache_free(edesc);
1472 	}
1473 
1474 	return ret;
1475 }
1476 
1477 static int skcipher_decrypt(struct skcipher_request *req)
1478 {
1479 	struct skcipher_edesc *edesc;
1480 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1481 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1482 	struct caam_request *caam_req = skcipher_request_ctx(req);
1483 	int ret;
1484 
1485 	if (!req->cryptlen)
1486 		return 0;
1487 	/* allocate extended descriptor */
1488 	edesc = skcipher_edesc_alloc(req);
1489 	if (IS_ERR(edesc))
1490 		return PTR_ERR(edesc);
1491 
1492 	caam_req->flc = &ctx->flc[DECRYPT];
1493 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1494 	caam_req->cbk = skcipher_decrypt_done;
1495 	caam_req->ctx = &req->base;
1496 	caam_req->edesc = edesc;
1497 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1498 	if (ret != -EINPROGRESS &&
1499 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1500 		skcipher_unmap(ctx->dev, edesc, req);
1501 		qi_cache_free(edesc);
1502 	}
1503 
1504 	return ret;
1505 }
1506 
1507 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1508 			 bool uses_dkp)
1509 {
1510 	dma_addr_t dma_addr;
1511 	int i;
1512 
1513 	/* copy descriptor header template value */
1514 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1515 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1516 
1517 	ctx->dev = caam->dev;
1518 	ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1519 
1520 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1521 					offsetof(struct caam_ctx, flc_dma),
1522 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1523 	if (dma_mapping_error(ctx->dev, dma_addr)) {
1524 		dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1525 		return -ENOMEM;
1526 	}
1527 
1528 	for (i = 0; i < NUM_OP; i++)
1529 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1530 	ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1531 
1532 	return 0;
1533 }
1534 
1535 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1536 {
1537 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1538 	struct caam_skcipher_alg *caam_alg =
1539 		container_of(alg, typeof(*caam_alg), skcipher);
1540 
1541 	crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1542 	return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1543 }
1544 
1545 static int caam_cra_init_aead(struct crypto_aead *tfm)
1546 {
1547 	struct aead_alg *alg = crypto_aead_alg(tfm);
1548 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1549 						      aead);
1550 
1551 	crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1552 	return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1553 			     !caam_alg->caam.nodkp);
1554 }
1555 
1556 static void caam_exit_common(struct caam_ctx *ctx)
1557 {
1558 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1559 			       offsetof(struct caam_ctx, flc_dma), ctx->dir,
1560 			       DMA_ATTR_SKIP_CPU_SYNC);
1561 }
1562 
1563 static void caam_cra_exit(struct crypto_skcipher *tfm)
1564 {
1565 	caam_exit_common(crypto_skcipher_ctx(tfm));
1566 }
1567 
1568 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1569 {
1570 	caam_exit_common(crypto_aead_ctx(tfm));
1571 }
1572 
1573 static struct caam_skcipher_alg driver_algs[] = {
1574 	{
1575 		.skcipher = {
1576 			.base = {
1577 				.cra_name = "cbc(aes)",
1578 				.cra_driver_name = "cbc-aes-caam-qi2",
1579 				.cra_blocksize = AES_BLOCK_SIZE,
1580 			},
1581 			.setkey = aes_skcipher_setkey,
1582 			.encrypt = skcipher_encrypt,
1583 			.decrypt = skcipher_decrypt,
1584 			.min_keysize = AES_MIN_KEY_SIZE,
1585 			.max_keysize = AES_MAX_KEY_SIZE,
1586 			.ivsize = AES_BLOCK_SIZE,
1587 		},
1588 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1589 	},
1590 	{
1591 		.skcipher = {
1592 			.base = {
1593 				.cra_name = "cbc(des3_ede)",
1594 				.cra_driver_name = "cbc-3des-caam-qi2",
1595 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1596 			},
1597 			.setkey = des3_skcipher_setkey,
1598 			.encrypt = skcipher_encrypt,
1599 			.decrypt = skcipher_decrypt,
1600 			.min_keysize = DES3_EDE_KEY_SIZE,
1601 			.max_keysize = DES3_EDE_KEY_SIZE,
1602 			.ivsize = DES3_EDE_BLOCK_SIZE,
1603 		},
1604 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1605 	},
1606 	{
1607 		.skcipher = {
1608 			.base = {
1609 				.cra_name = "cbc(des)",
1610 				.cra_driver_name = "cbc-des-caam-qi2",
1611 				.cra_blocksize = DES_BLOCK_SIZE,
1612 			},
1613 			.setkey = des_skcipher_setkey,
1614 			.encrypt = skcipher_encrypt,
1615 			.decrypt = skcipher_decrypt,
1616 			.min_keysize = DES_KEY_SIZE,
1617 			.max_keysize = DES_KEY_SIZE,
1618 			.ivsize = DES_BLOCK_SIZE,
1619 		},
1620 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1621 	},
1622 	{
1623 		.skcipher = {
1624 			.base = {
1625 				.cra_name = "ctr(aes)",
1626 				.cra_driver_name = "ctr-aes-caam-qi2",
1627 				.cra_blocksize = 1,
1628 			},
1629 			.setkey = ctr_skcipher_setkey,
1630 			.encrypt = skcipher_encrypt,
1631 			.decrypt = skcipher_decrypt,
1632 			.min_keysize = AES_MIN_KEY_SIZE,
1633 			.max_keysize = AES_MAX_KEY_SIZE,
1634 			.ivsize = AES_BLOCK_SIZE,
1635 			.chunksize = AES_BLOCK_SIZE,
1636 		},
1637 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1638 					OP_ALG_AAI_CTR_MOD128,
1639 	},
1640 	{
1641 		.skcipher = {
1642 			.base = {
1643 				.cra_name = "rfc3686(ctr(aes))",
1644 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1645 				.cra_blocksize = 1,
1646 			},
1647 			.setkey = rfc3686_skcipher_setkey,
1648 			.encrypt = skcipher_encrypt,
1649 			.decrypt = skcipher_decrypt,
1650 			.min_keysize = AES_MIN_KEY_SIZE +
1651 				       CTR_RFC3686_NONCE_SIZE,
1652 			.max_keysize = AES_MAX_KEY_SIZE +
1653 				       CTR_RFC3686_NONCE_SIZE,
1654 			.ivsize = CTR_RFC3686_IV_SIZE,
1655 			.chunksize = AES_BLOCK_SIZE,
1656 		},
1657 		.caam = {
1658 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1659 					   OP_ALG_AAI_CTR_MOD128,
1660 			.rfc3686 = true,
1661 		},
1662 	},
1663 	{
1664 		.skcipher = {
1665 			.base = {
1666 				.cra_name = "xts(aes)",
1667 				.cra_driver_name = "xts-aes-caam-qi2",
1668 				.cra_blocksize = AES_BLOCK_SIZE,
1669 			},
1670 			.setkey = xts_skcipher_setkey,
1671 			.encrypt = skcipher_encrypt,
1672 			.decrypt = skcipher_decrypt,
1673 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1674 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1675 			.ivsize = AES_BLOCK_SIZE,
1676 		},
1677 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1678 	},
1679 	{
1680 		.skcipher = {
1681 			.base = {
1682 				.cra_name = "chacha20",
1683 				.cra_driver_name = "chacha20-caam-qi2",
1684 				.cra_blocksize = 1,
1685 			},
1686 			.setkey = chacha20_skcipher_setkey,
1687 			.encrypt = skcipher_encrypt,
1688 			.decrypt = skcipher_decrypt,
1689 			.min_keysize = CHACHA_KEY_SIZE,
1690 			.max_keysize = CHACHA_KEY_SIZE,
1691 			.ivsize = CHACHA_IV_SIZE,
1692 		},
1693 		.caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1694 	},
1695 };
1696 
1697 static struct caam_aead_alg driver_aeads[] = {
1698 	{
1699 		.aead = {
1700 			.base = {
1701 				.cra_name = "rfc4106(gcm(aes))",
1702 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1703 				.cra_blocksize = 1,
1704 			},
1705 			.setkey = rfc4106_setkey,
1706 			.setauthsize = rfc4106_setauthsize,
1707 			.encrypt = ipsec_gcm_encrypt,
1708 			.decrypt = ipsec_gcm_decrypt,
1709 			.ivsize = 8,
1710 			.maxauthsize = AES_BLOCK_SIZE,
1711 		},
1712 		.caam = {
1713 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1714 			.nodkp = true,
1715 		},
1716 	},
1717 	{
1718 		.aead = {
1719 			.base = {
1720 				.cra_name = "rfc4543(gcm(aes))",
1721 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1722 				.cra_blocksize = 1,
1723 			},
1724 			.setkey = rfc4543_setkey,
1725 			.setauthsize = rfc4543_setauthsize,
1726 			.encrypt = ipsec_gcm_encrypt,
1727 			.decrypt = ipsec_gcm_decrypt,
1728 			.ivsize = 8,
1729 			.maxauthsize = AES_BLOCK_SIZE,
1730 		},
1731 		.caam = {
1732 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1733 			.nodkp = true,
1734 		},
1735 	},
1736 	/* Galois Counter Mode */
1737 	{
1738 		.aead = {
1739 			.base = {
1740 				.cra_name = "gcm(aes)",
1741 				.cra_driver_name = "gcm-aes-caam-qi2",
1742 				.cra_blocksize = 1,
1743 			},
1744 			.setkey = gcm_setkey,
1745 			.setauthsize = gcm_setauthsize,
1746 			.encrypt = aead_encrypt,
1747 			.decrypt = aead_decrypt,
1748 			.ivsize = 12,
1749 			.maxauthsize = AES_BLOCK_SIZE,
1750 		},
1751 		.caam = {
1752 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1753 			.nodkp = true,
1754 		}
1755 	},
1756 	/* single-pass ipsec_esp descriptor */
1757 	{
1758 		.aead = {
1759 			.base = {
1760 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1761 				.cra_driver_name = "authenc-hmac-md5-"
1762 						   "cbc-aes-caam-qi2",
1763 				.cra_blocksize = AES_BLOCK_SIZE,
1764 			},
1765 			.setkey = aead_setkey,
1766 			.setauthsize = aead_setauthsize,
1767 			.encrypt = aead_encrypt,
1768 			.decrypt = aead_decrypt,
1769 			.ivsize = AES_BLOCK_SIZE,
1770 			.maxauthsize = MD5_DIGEST_SIZE,
1771 		},
1772 		.caam = {
1773 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1774 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1775 					   OP_ALG_AAI_HMAC_PRECOMP,
1776 		}
1777 	},
1778 	{
1779 		.aead = {
1780 			.base = {
1781 				.cra_name = "echainiv(authenc(hmac(md5),"
1782 					    "cbc(aes)))",
1783 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1784 						   "cbc-aes-caam-qi2",
1785 				.cra_blocksize = AES_BLOCK_SIZE,
1786 			},
1787 			.setkey = aead_setkey,
1788 			.setauthsize = aead_setauthsize,
1789 			.encrypt = aead_encrypt,
1790 			.decrypt = aead_decrypt,
1791 			.ivsize = AES_BLOCK_SIZE,
1792 			.maxauthsize = MD5_DIGEST_SIZE,
1793 		},
1794 		.caam = {
1795 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1796 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1797 					   OP_ALG_AAI_HMAC_PRECOMP,
1798 			.geniv = true,
1799 		}
1800 	},
1801 	{
1802 		.aead = {
1803 			.base = {
1804 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1805 				.cra_driver_name = "authenc-hmac-sha1-"
1806 						   "cbc-aes-caam-qi2",
1807 				.cra_blocksize = AES_BLOCK_SIZE,
1808 			},
1809 			.setkey = aead_setkey,
1810 			.setauthsize = aead_setauthsize,
1811 			.encrypt = aead_encrypt,
1812 			.decrypt = aead_decrypt,
1813 			.ivsize = AES_BLOCK_SIZE,
1814 			.maxauthsize = SHA1_DIGEST_SIZE,
1815 		},
1816 		.caam = {
1817 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1818 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1819 					   OP_ALG_AAI_HMAC_PRECOMP,
1820 		}
1821 	},
1822 	{
1823 		.aead = {
1824 			.base = {
1825 				.cra_name = "echainiv(authenc(hmac(sha1),"
1826 					    "cbc(aes)))",
1827 				.cra_driver_name = "echainiv-authenc-"
1828 						   "hmac-sha1-cbc-aes-caam-qi2",
1829 				.cra_blocksize = AES_BLOCK_SIZE,
1830 			},
1831 			.setkey = aead_setkey,
1832 			.setauthsize = aead_setauthsize,
1833 			.encrypt = aead_encrypt,
1834 			.decrypt = aead_decrypt,
1835 			.ivsize = AES_BLOCK_SIZE,
1836 			.maxauthsize = SHA1_DIGEST_SIZE,
1837 		},
1838 		.caam = {
1839 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1840 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1841 					   OP_ALG_AAI_HMAC_PRECOMP,
1842 			.geniv = true,
1843 		},
1844 	},
1845 	{
1846 		.aead = {
1847 			.base = {
1848 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1849 				.cra_driver_name = "authenc-hmac-sha224-"
1850 						   "cbc-aes-caam-qi2",
1851 				.cra_blocksize = AES_BLOCK_SIZE,
1852 			},
1853 			.setkey = aead_setkey,
1854 			.setauthsize = aead_setauthsize,
1855 			.encrypt = aead_encrypt,
1856 			.decrypt = aead_decrypt,
1857 			.ivsize = AES_BLOCK_SIZE,
1858 			.maxauthsize = SHA224_DIGEST_SIZE,
1859 		},
1860 		.caam = {
1861 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1862 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1863 					   OP_ALG_AAI_HMAC_PRECOMP,
1864 		}
1865 	},
1866 	{
1867 		.aead = {
1868 			.base = {
1869 				.cra_name = "echainiv(authenc(hmac(sha224),"
1870 					    "cbc(aes)))",
1871 				.cra_driver_name = "echainiv-authenc-"
1872 						   "hmac-sha224-cbc-aes-caam-qi2",
1873 				.cra_blocksize = AES_BLOCK_SIZE,
1874 			},
1875 			.setkey = aead_setkey,
1876 			.setauthsize = aead_setauthsize,
1877 			.encrypt = aead_encrypt,
1878 			.decrypt = aead_decrypt,
1879 			.ivsize = AES_BLOCK_SIZE,
1880 			.maxauthsize = SHA224_DIGEST_SIZE,
1881 		},
1882 		.caam = {
1883 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1884 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1885 					   OP_ALG_AAI_HMAC_PRECOMP,
1886 			.geniv = true,
1887 		}
1888 	},
1889 	{
1890 		.aead = {
1891 			.base = {
1892 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1893 				.cra_driver_name = "authenc-hmac-sha256-"
1894 						   "cbc-aes-caam-qi2",
1895 				.cra_blocksize = AES_BLOCK_SIZE,
1896 			},
1897 			.setkey = aead_setkey,
1898 			.setauthsize = aead_setauthsize,
1899 			.encrypt = aead_encrypt,
1900 			.decrypt = aead_decrypt,
1901 			.ivsize = AES_BLOCK_SIZE,
1902 			.maxauthsize = SHA256_DIGEST_SIZE,
1903 		},
1904 		.caam = {
1905 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1906 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1907 					   OP_ALG_AAI_HMAC_PRECOMP,
1908 		}
1909 	},
1910 	{
1911 		.aead = {
1912 			.base = {
1913 				.cra_name = "echainiv(authenc(hmac(sha256),"
1914 					    "cbc(aes)))",
1915 				.cra_driver_name = "echainiv-authenc-"
1916 						   "hmac-sha256-cbc-aes-"
1917 						   "caam-qi2",
1918 				.cra_blocksize = AES_BLOCK_SIZE,
1919 			},
1920 			.setkey = aead_setkey,
1921 			.setauthsize = aead_setauthsize,
1922 			.encrypt = aead_encrypt,
1923 			.decrypt = aead_decrypt,
1924 			.ivsize = AES_BLOCK_SIZE,
1925 			.maxauthsize = SHA256_DIGEST_SIZE,
1926 		},
1927 		.caam = {
1928 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1929 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1930 					   OP_ALG_AAI_HMAC_PRECOMP,
1931 			.geniv = true,
1932 		}
1933 	},
1934 	{
1935 		.aead = {
1936 			.base = {
1937 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
1938 				.cra_driver_name = "authenc-hmac-sha384-"
1939 						   "cbc-aes-caam-qi2",
1940 				.cra_blocksize = AES_BLOCK_SIZE,
1941 			},
1942 			.setkey = aead_setkey,
1943 			.setauthsize = aead_setauthsize,
1944 			.encrypt = aead_encrypt,
1945 			.decrypt = aead_decrypt,
1946 			.ivsize = AES_BLOCK_SIZE,
1947 			.maxauthsize = SHA384_DIGEST_SIZE,
1948 		},
1949 		.caam = {
1950 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1951 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1952 					   OP_ALG_AAI_HMAC_PRECOMP,
1953 		}
1954 	},
1955 	{
1956 		.aead = {
1957 			.base = {
1958 				.cra_name = "echainiv(authenc(hmac(sha384),"
1959 					    "cbc(aes)))",
1960 				.cra_driver_name = "echainiv-authenc-"
1961 						   "hmac-sha384-cbc-aes-"
1962 						   "caam-qi2",
1963 				.cra_blocksize = AES_BLOCK_SIZE,
1964 			},
1965 			.setkey = aead_setkey,
1966 			.setauthsize = aead_setauthsize,
1967 			.encrypt = aead_encrypt,
1968 			.decrypt = aead_decrypt,
1969 			.ivsize = AES_BLOCK_SIZE,
1970 			.maxauthsize = SHA384_DIGEST_SIZE,
1971 		},
1972 		.caam = {
1973 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1974 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1975 					   OP_ALG_AAI_HMAC_PRECOMP,
1976 			.geniv = true,
1977 		}
1978 	},
1979 	{
1980 		.aead = {
1981 			.base = {
1982 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
1983 				.cra_driver_name = "authenc-hmac-sha512-"
1984 						   "cbc-aes-caam-qi2",
1985 				.cra_blocksize = AES_BLOCK_SIZE,
1986 			},
1987 			.setkey = aead_setkey,
1988 			.setauthsize = aead_setauthsize,
1989 			.encrypt = aead_encrypt,
1990 			.decrypt = aead_decrypt,
1991 			.ivsize = AES_BLOCK_SIZE,
1992 			.maxauthsize = SHA512_DIGEST_SIZE,
1993 		},
1994 		.caam = {
1995 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1996 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1997 					   OP_ALG_AAI_HMAC_PRECOMP,
1998 		}
1999 	},
2000 	{
2001 		.aead = {
2002 			.base = {
2003 				.cra_name = "echainiv(authenc(hmac(sha512),"
2004 					    "cbc(aes)))",
2005 				.cra_driver_name = "echainiv-authenc-"
2006 						   "hmac-sha512-cbc-aes-"
2007 						   "caam-qi2",
2008 				.cra_blocksize = AES_BLOCK_SIZE,
2009 			},
2010 			.setkey = aead_setkey,
2011 			.setauthsize = aead_setauthsize,
2012 			.encrypt = aead_encrypt,
2013 			.decrypt = aead_decrypt,
2014 			.ivsize = AES_BLOCK_SIZE,
2015 			.maxauthsize = SHA512_DIGEST_SIZE,
2016 		},
2017 		.caam = {
2018 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2019 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2020 					   OP_ALG_AAI_HMAC_PRECOMP,
2021 			.geniv = true,
2022 		}
2023 	},
2024 	{
2025 		.aead = {
2026 			.base = {
2027 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2028 				.cra_driver_name = "authenc-hmac-md5-"
2029 						   "cbc-des3_ede-caam-qi2",
2030 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2031 			},
2032 			.setkey = des3_aead_setkey,
2033 			.setauthsize = aead_setauthsize,
2034 			.encrypt = aead_encrypt,
2035 			.decrypt = aead_decrypt,
2036 			.ivsize = DES3_EDE_BLOCK_SIZE,
2037 			.maxauthsize = MD5_DIGEST_SIZE,
2038 		},
2039 		.caam = {
2040 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2041 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2042 					   OP_ALG_AAI_HMAC_PRECOMP,
2043 		}
2044 	},
2045 	{
2046 		.aead = {
2047 			.base = {
2048 				.cra_name = "echainiv(authenc(hmac(md5),"
2049 					    "cbc(des3_ede)))",
2050 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2051 						   "cbc-des3_ede-caam-qi2",
2052 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2053 			},
2054 			.setkey = des3_aead_setkey,
2055 			.setauthsize = aead_setauthsize,
2056 			.encrypt = aead_encrypt,
2057 			.decrypt = aead_decrypt,
2058 			.ivsize = DES3_EDE_BLOCK_SIZE,
2059 			.maxauthsize = MD5_DIGEST_SIZE,
2060 		},
2061 		.caam = {
2062 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2063 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2064 					   OP_ALG_AAI_HMAC_PRECOMP,
2065 			.geniv = true,
2066 		}
2067 	},
2068 	{
2069 		.aead = {
2070 			.base = {
2071 				.cra_name = "authenc(hmac(sha1),"
2072 					    "cbc(des3_ede))",
2073 				.cra_driver_name = "authenc-hmac-sha1-"
2074 						   "cbc-des3_ede-caam-qi2",
2075 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2076 			},
2077 			.setkey = des3_aead_setkey,
2078 			.setauthsize = aead_setauthsize,
2079 			.encrypt = aead_encrypt,
2080 			.decrypt = aead_decrypt,
2081 			.ivsize = DES3_EDE_BLOCK_SIZE,
2082 			.maxauthsize = SHA1_DIGEST_SIZE,
2083 		},
2084 		.caam = {
2085 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2086 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2087 					   OP_ALG_AAI_HMAC_PRECOMP,
2088 		},
2089 	},
2090 	{
2091 		.aead = {
2092 			.base = {
2093 				.cra_name = "echainiv(authenc(hmac(sha1),"
2094 					    "cbc(des3_ede)))",
2095 				.cra_driver_name = "echainiv-authenc-"
2096 						   "hmac-sha1-"
2097 						   "cbc-des3_ede-caam-qi2",
2098 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2099 			},
2100 			.setkey = des3_aead_setkey,
2101 			.setauthsize = aead_setauthsize,
2102 			.encrypt = aead_encrypt,
2103 			.decrypt = aead_decrypt,
2104 			.ivsize = DES3_EDE_BLOCK_SIZE,
2105 			.maxauthsize = SHA1_DIGEST_SIZE,
2106 		},
2107 		.caam = {
2108 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2109 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2110 					   OP_ALG_AAI_HMAC_PRECOMP,
2111 			.geniv = true,
2112 		}
2113 	},
2114 	{
2115 		.aead = {
2116 			.base = {
2117 				.cra_name = "authenc(hmac(sha224),"
2118 					    "cbc(des3_ede))",
2119 				.cra_driver_name = "authenc-hmac-sha224-"
2120 						   "cbc-des3_ede-caam-qi2",
2121 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2122 			},
2123 			.setkey = des3_aead_setkey,
2124 			.setauthsize = aead_setauthsize,
2125 			.encrypt = aead_encrypt,
2126 			.decrypt = aead_decrypt,
2127 			.ivsize = DES3_EDE_BLOCK_SIZE,
2128 			.maxauthsize = SHA224_DIGEST_SIZE,
2129 		},
2130 		.caam = {
2131 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2132 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2133 					   OP_ALG_AAI_HMAC_PRECOMP,
2134 		},
2135 	},
2136 	{
2137 		.aead = {
2138 			.base = {
2139 				.cra_name = "echainiv(authenc(hmac(sha224),"
2140 					    "cbc(des3_ede)))",
2141 				.cra_driver_name = "echainiv-authenc-"
2142 						   "hmac-sha224-"
2143 						   "cbc-des3_ede-caam-qi2",
2144 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2145 			},
2146 			.setkey = des3_aead_setkey,
2147 			.setauthsize = aead_setauthsize,
2148 			.encrypt = aead_encrypt,
2149 			.decrypt = aead_decrypt,
2150 			.ivsize = DES3_EDE_BLOCK_SIZE,
2151 			.maxauthsize = SHA224_DIGEST_SIZE,
2152 		},
2153 		.caam = {
2154 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2155 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2156 					   OP_ALG_AAI_HMAC_PRECOMP,
2157 			.geniv = true,
2158 		}
2159 	},
2160 	{
2161 		.aead = {
2162 			.base = {
2163 				.cra_name = "authenc(hmac(sha256),"
2164 					    "cbc(des3_ede))",
2165 				.cra_driver_name = "authenc-hmac-sha256-"
2166 						   "cbc-des3_ede-caam-qi2",
2167 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2168 			},
2169 			.setkey = des3_aead_setkey,
2170 			.setauthsize = aead_setauthsize,
2171 			.encrypt = aead_encrypt,
2172 			.decrypt = aead_decrypt,
2173 			.ivsize = DES3_EDE_BLOCK_SIZE,
2174 			.maxauthsize = SHA256_DIGEST_SIZE,
2175 		},
2176 		.caam = {
2177 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2178 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2179 					   OP_ALG_AAI_HMAC_PRECOMP,
2180 		},
2181 	},
2182 	{
2183 		.aead = {
2184 			.base = {
2185 				.cra_name = "echainiv(authenc(hmac(sha256),"
2186 					    "cbc(des3_ede)))",
2187 				.cra_driver_name = "echainiv-authenc-"
2188 						   "hmac-sha256-"
2189 						   "cbc-des3_ede-caam-qi2",
2190 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2191 			},
2192 			.setkey = des3_aead_setkey,
2193 			.setauthsize = aead_setauthsize,
2194 			.encrypt = aead_encrypt,
2195 			.decrypt = aead_decrypt,
2196 			.ivsize = DES3_EDE_BLOCK_SIZE,
2197 			.maxauthsize = SHA256_DIGEST_SIZE,
2198 		},
2199 		.caam = {
2200 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2201 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2202 					   OP_ALG_AAI_HMAC_PRECOMP,
2203 			.geniv = true,
2204 		}
2205 	},
2206 	{
2207 		.aead = {
2208 			.base = {
2209 				.cra_name = "authenc(hmac(sha384),"
2210 					    "cbc(des3_ede))",
2211 				.cra_driver_name = "authenc-hmac-sha384-"
2212 						   "cbc-des3_ede-caam-qi2",
2213 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2214 			},
2215 			.setkey = des3_aead_setkey,
2216 			.setauthsize = aead_setauthsize,
2217 			.encrypt = aead_encrypt,
2218 			.decrypt = aead_decrypt,
2219 			.ivsize = DES3_EDE_BLOCK_SIZE,
2220 			.maxauthsize = SHA384_DIGEST_SIZE,
2221 		},
2222 		.caam = {
2223 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2224 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2225 					   OP_ALG_AAI_HMAC_PRECOMP,
2226 		},
2227 	},
2228 	{
2229 		.aead = {
2230 			.base = {
2231 				.cra_name = "echainiv(authenc(hmac(sha384),"
2232 					    "cbc(des3_ede)))",
2233 				.cra_driver_name = "echainiv-authenc-"
2234 						   "hmac-sha384-"
2235 						   "cbc-des3_ede-caam-qi2",
2236 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2237 			},
2238 			.setkey = des3_aead_setkey,
2239 			.setauthsize = aead_setauthsize,
2240 			.encrypt = aead_encrypt,
2241 			.decrypt = aead_decrypt,
2242 			.ivsize = DES3_EDE_BLOCK_SIZE,
2243 			.maxauthsize = SHA384_DIGEST_SIZE,
2244 		},
2245 		.caam = {
2246 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2247 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2248 					   OP_ALG_AAI_HMAC_PRECOMP,
2249 			.geniv = true,
2250 		}
2251 	},
2252 	{
2253 		.aead = {
2254 			.base = {
2255 				.cra_name = "authenc(hmac(sha512),"
2256 					    "cbc(des3_ede))",
2257 				.cra_driver_name = "authenc-hmac-sha512-"
2258 						   "cbc-des3_ede-caam-qi2",
2259 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2260 			},
2261 			.setkey = des3_aead_setkey,
2262 			.setauthsize = aead_setauthsize,
2263 			.encrypt = aead_encrypt,
2264 			.decrypt = aead_decrypt,
2265 			.ivsize = DES3_EDE_BLOCK_SIZE,
2266 			.maxauthsize = SHA512_DIGEST_SIZE,
2267 		},
2268 		.caam = {
2269 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2270 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2271 					   OP_ALG_AAI_HMAC_PRECOMP,
2272 		},
2273 	},
2274 	{
2275 		.aead = {
2276 			.base = {
2277 				.cra_name = "echainiv(authenc(hmac(sha512),"
2278 					    "cbc(des3_ede)))",
2279 				.cra_driver_name = "echainiv-authenc-"
2280 						   "hmac-sha512-"
2281 						   "cbc-des3_ede-caam-qi2",
2282 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2283 			},
2284 			.setkey = des3_aead_setkey,
2285 			.setauthsize = aead_setauthsize,
2286 			.encrypt = aead_encrypt,
2287 			.decrypt = aead_decrypt,
2288 			.ivsize = DES3_EDE_BLOCK_SIZE,
2289 			.maxauthsize = SHA512_DIGEST_SIZE,
2290 		},
2291 		.caam = {
2292 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2293 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2294 					   OP_ALG_AAI_HMAC_PRECOMP,
2295 			.geniv = true,
2296 		}
2297 	},
2298 	{
2299 		.aead = {
2300 			.base = {
2301 				.cra_name = "authenc(hmac(md5),cbc(des))",
2302 				.cra_driver_name = "authenc-hmac-md5-"
2303 						   "cbc-des-caam-qi2",
2304 				.cra_blocksize = DES_BLOCK_SIZE,
2305 			},
2306 			.setkey = aead_setkey,
2307 			.setauthsize = aead_setauthsize,
2308 			.encrypt = aead_encrypt,
2309 			.decrypt = aead_decrypt,
2310 			.ivsize = DES_BLOCK_SIZE,
2311 			.maxauthsize = MD5_DIGEST_SIZE,
2312 		},
2313 		.caam = {
2314 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2315 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2316 					   OP_ALG_AAI_HMAC_PRECOMP,
2317 		},
2318 	},
2319 	{
2320 		.aead = {
2321 			.base = {
2322 				.cra_name = "echainiv(authenc(hmac(md5),"
2323 					    "cbc(des)))",
2324 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2325 						   "cbc-des-caam-qi2",
2326 				.cra_blocksize = DES_BLOCK_SIZE,
2327 			},
2328 			.setkey = aead_setkey,
2329 			.setauthsize = aead_setauthsize,
2330 			.encrypt = aead_encrypt,
2331 			.decrypt = aead_decrypt,
2332 			.ivsize = DES_BLOCK_SIZE,
2333 			.maxauthsize = MD5_DIGEST_SIZE,
2334 		},
2335 		.caam = {
2336 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2337 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2338 					   OP_ALG_AAI_HMAC_PRECOMP,
2339 			.geniv = true,
2340 		}
2341 	},
2342 	{
2343 		.aead = {
2344 			.base = {
2345 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2346 				.cra_driver_name = "authenc-hmac-sha1-"
2347 						   "cbc-des-caam-qi2",
2348 				.cra_blocksize = DES_BLOCK_SIZE,
2349 			},
2350 			.setkey = aead_setkey,
2351 			.setauthsize = aead_setauthsize,
2352 			.encrypt = aead_encrypt,
2353 			.decrypt = aead_decrypt,
2354 			.ivsize = DES_BLOCK_SIZE,
2355 			.maxauthsize = SHA1_DIGEST_SIZE,
2356 		},
2357 		.caam = {
2358 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2359 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2360 					   OP_ALG_AAI_HMAC_PRECOMP,
2361 		},
2362 	},
2363 	{
2364 		.aead = {
2365 			.base = {
2366 				.cra_name = "echainiv(authenc(hmac(sha1),"
2367 					    "cbc(des)))",
2368 				.cra_driver_name = "echainiv-authenc-"
2369 						   "hmac-sha1-cbc-des-caam-qi2",
2370 				.cra_blocksize = DES_BLOCK_SIZE,
2371 			},
2372 			.setkey = aead_setkey,
2373 			.setauthsize = aead_setauthsize,
2374 			.encrypt = aead_encrypt,
2375 			.decrypt = aead_decrypt,
2376 			.ivsize = DES_BLOCK_SIZE,
2377 			.maxauthsize = SHA1_DIGEST_SIZE,
2378 		},
2379 		.caam = {
2380 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2381 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2382 					   OP_ALG_AAI_HMAC_PRECOMP,
2383 			.geniv = true,
2384 		}
2385 	},
2386 	{
2387 		.aead = {
2388 			.base = {
2389 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2390 				.cra_driver_name = "authenc-hmac-sha224-"
2391 						   "cbc-des-caam-qi2",
2392 				.cra_blocksize = DES_BLOCK_SIZE,
2393 			},
2394 			.setkey = aead_setkey,
2395 			.setauthsize = aead_setauthsize,
2396 			.encrypt = aead_encrypt,
2397 			.decrypt = aead_decrypt,
2398 			.ivsize = DES_BLOCK_SIZE,
2399 			.maxauthsize = SHA224_DIGEST_SIZE,
2400 		},
2401 		.caam = {
2402 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2403 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2404 					   OP_ALG_AAI_HMAC_PRECOMP,
2405 		},
2406 	},
2407 	{
2408 		.aead = {
2409 			.base = {
2410 				.cra_name = "echainiv(authenc(hmac(sha224),"
2411 					    "cbc(des)))",
2412 				.cra_driver_name = "echainiv-authenc-"
2413 						   "hmac-sha224-cbc-des-"
2414 						   "caam-qi2",
2415 				.cra_blocksize = DES_BLOCK_SIZE,
2416 			},
2417 			.setkey = aead_setkey,
2418 			.setauthsize = aead_setauthsize,
2419 			.encrypt = aead_encrypt,
2420 			.decrypt = aead_decrypt,
2421 			.ivsize = DES_BLOCK_SIZE,
2422 			.maxauthsize = SHA224_DIGEST_SIZE,
2423 		},
2424 		.caam = {
2425 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2426 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2427 					   OP_ALG_AAI_HMAC_PRECOMP,
2428 			.geniv = true,
2429 		}
2430 	},
2431 	{
2432 		.aead = {
2433 			.base = {
2434 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2435 				.cra_driver_name = "authenc-hmac-sha256-"
2436 						   "cbc-des-caam-qi2",
2437 				.cra_blocksize = DES_BLOCK_SIZE,
2438 			},
2439 			.setkey = aead_setkey,
2440 			.setauthsize = aead_setauthsize,
2441 			.encrypt = aead_encrypt,
2442 			.decrypt = aead_decrypt,
2443 			.ivsize = DES_BLOCK_SIZE,
2444 			.maxauthsize = SHA256_DIGEST_SIZE,
2445 		},
2446 		.caam = {
2447 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2448 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2449 					   OP_ALG_AAI_HMAC_PRECOMP,
2450 		},
2451 	},
2452 	{
2453 		.aead = {
2454 			.base = {
2455 				.cra_name = "echainiv(authenc(hmac(sha256),"
2456 					    "cbc(des)))",
2457 				.cra_driver_name = "echainiv-authenc-"
2458 						   "hmac-sha256-cbc-des-"
2459 						   "caam-qi2",
2460 				.cra_blocksize = DES_BLOCK_SIZE,
2461 			},
2462 			.setkey = aead_setkey,
2463 			.setauthsize = aead_setauthsize,
2464 			.encrypt = aead_encrypt,
2465 			.decrypt = aead_decrypt,
2466 			.ivsize = DES_BLOCK_SIZE,
2467 			.maxauthsize = SHA256_DIGEST_SIZE,
2468 		},
2469 		.caam = {
2470 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2471 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2472 					   OP_ALG_AAI_HMAC_PRECOMP,
2473 			.geniv = true,
2474 		},
2475 	},
2476 	{
2477 		.aead = {
2478 			.base = {
2479 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2480 				.cra_driver_name = "authenc-hmac-sha384-"
2481 						   "cbc-des-caam-qi2",
2482 				.cra_blocksize = DES_BLOCK_SIZE,
2483 			},
2484 			.setkey = aead_setkey,
2485 			.setauthsize = aead_setauthsize,
2486 			.encrypt = aead_encrypt,
2487 			.decrypt = aead_decrypt,
2488 			.ivsize = DES_BLOCK_SIZE,
2489 			.maxauthsize = SHA384_DIGEST_SIZE,
2490 		},
2491 		.caam = {
2492 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2493 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2494 					   OP_ALG_AAI_HMAC_PRECOMP,
2495 		},
2496 	},
2497 	{
2498 		.aead = {
2499 			.base = {
2500 				.cra_name = "echainiv(authenc(hmac(sha384),"
2501 					    "cbc(des)))",
2502 				.cra_driver_name = "echainiv-authenc-"
2503 						   "hmac-sha384-cbc-des-"
2504 						   "caam-qi2",
2505 				.cra_blocksize = DES_BLOCK_SIZE,
2506 			},
2507 			.setkey = aead_setkey,
2508 			.setauthsize = aead_setauthsize,
2509 			.encrypt = aead_encrypt,
2510 			.decrypt = aead_decrypt,
2511 			.ivsize = DES_BLOCK_SIZE,
2512 			.maxauthsize = SHA384_DIGEST_SIZE,
2513 		},
2514 		.caam = {
2515 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2516 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2517 					   OP_ALG_AAI_HMAC_PRECOMP,
2518 			.geniv = true,
2519 		}
2520 	},
2521 	{
2522 		.aead = {
2523 			.base = {
2524 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2525 				.cra_driver_name = "authenc-hmac-sha512-"
2526 						   "cbc-des-caam-qi2",
2527 				.cra_blocksize = DES_BLOCK_SIZE,
2528 			},
2529 			.setkey = aead_setkey,
2530 			.setauthsize = aead_setauthsize,
2531 			.encrypt = aead_encrypt,
2532 			.decrypt = aead_decrypt,
2533 			.ivsize = DES_BLOCK_SIZE,
2534 			.maxauthsize = SHA512_DIGEST_SIZE,
2535 		},
2536 		.caam = {
2537 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2538 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2539 					   OP_ALG_AAI_HMAC_PRECOMP,
2540 		}
2541 	},
2542 	{
2543 		.aead = {
2544 			.base = {
2545 				.cra_name = "echainiv(authenc(hmac(sha512),"
2546 					    "cbc(des)))",
2547 				.cra_driver_name = "echainiv-authenc-"
2548 						   "hmac-sha512-cbc-des-"
2549 						   "caam-qi2",
2550 				.cra_blocksize = DES_BLOCK_SIZE,
2551 			},
2552 			.setkey = aead_setkey,
2553 			.setauthsize = aead_setauthsize,
2554 			.encrypt = aead_encrypt,
2555 			.decrypt = aead_decrypt,
2556 			.ivsize = DES_BLOCK_SIZE,
2557 			.maxauthsize = SHA512_DIGEST_SIZE,
2558 		},
2559 		.caam = {
2560 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2561 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2562 					   OP_ALG_AAI_HMAC_PRECOMP,
2563 			.geniv = true,
2564 		}
2565 	},
2566 	{
2567 		.aead = {
2568 			.base = {
2569 				.cra_name = "authenc(hmac(md5),"
2570 					    "rfc3686(ctr(aes)))",
2571 				.cra_driver_name = "authenc-hmac-md5-"
2572 						   "rfc3686-ctr-aes-caam-qi2",
2573 				.cra_blocksize = 1,
2574 			},
2575 			.setkey = aead_setkey,
2576 			.setauthsize = aead_setauthsize,
2577 			.encrypt = aead_encrypt,
2578 			.decrypt = aead_decrypt,
2579 			.ivsize = CTR_RFC3686_IV_SIZE,
2580 			.maxauthsize = MD5_DIGEST_SIZE,
2581 		},
2582 		.caam = {
2583 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2584 					   OP_ALG_AAI_CTR_MOD128,
2585 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2586 					   OP_ALG_AAI_HMAC_PRECOMP,
2587 			.rfc3686 = true,
2588 		},
2589 	},
2590 	{
2591 		.aead = {
2592 			.base = {
2593 				.cra_name = "seqiv(authenc("
2594 					    "hmac(md5),rfc3686(ctr(aes))))",
2595 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
2596 						   "rfc3686-ctr-aes-caam-qi2",
2597 				.cra_blocksize = 1,
2598 			},
2599 			.setkey = aead_setkey,
2600 			.setauthsize = aead_setauthsize,
2601 			.encrypt = aead_encrypt,
2602 			.decrypt = aead_decrypt,
2603 			.ivsize = CTR_RFC3686_IV_SIZE,
2604 			.maxauthsize = MD5_DIGEST_SIZE,
2605 		},
2606 		.caam = {
2607 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2608 					   OP_ALG_AAI_CTR_MOD128,
2609 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2610 					   OP_ALG_AAI_HMAC_PRECOMP,
2611 			.rfc3686 = true,
2612 			.geniv = true,
2613 		},
2614 	},
2615 	{
2616 		.aead = {
2617 			.base = {
2618 				.cra_name = "authenc(hmac(sha1),"
2619 					    "rfc3686(ctr(aes)))",
2620 				.cra_driver_name = "authenc-hmac-sha1-"
2621 						   "rfc3686-ctr-aes-caam-qi2",
2622 				.cra_blocksize = 1,
2623 			},
2624 			.setkey = aead_setkey,
2625 			.setauthsize = aead_setauthsize,
2626 			.encrypt = aead_encrypt,
2627 			.decrypt = aead_decrypt,
2628 			.ivsize = CTR_RFC3686_IV_SIZE,
2629 			.maxauthsize = SHA1_DIGEST_SIZE,
2630 		},
2631 		.caam = {
2632 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2633 					   OP_ALG_AAI_CTR_MOD128,
2634 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2635 					   OP_ALG_AAI_HMAC_PRECOMP,
2636 			.rfc3686 = true,
2637 		},
2638 	},
2639 	{
2640 		.aead = {
2641 			.base = {
2642 				.cra_name = "seqiv(authenc("
2643 					    "hmac(sha1),rfc3686(ctr(aes))))",
2644 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
2645 						   "rfc3686-ctr-aes-caam-qi2",
2646 				.cra_blocksize = 1,
2647 			},
2648 			.setkey = aead_setkey,
2649 			.setauthsize = aead_setauthsize,
2650 			.encrypt = aead_encrypt,
2651 			.decrypt = aead_decrypt,
2652 			.ivsize = CTR_RFC3686_IV_SIZE,
2653 			.maxauthsize = SHA1_DIGEST_SIZE,
2654 		},
2655 		.caam = {
2656 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2657 					   OP_ALG_AAI_CTR_MOD128,
2658 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2659 					   OP_ALG_AAI_HMAC_PRECOMP,
2660 			.rfc3686 = true,
2661 			.geniv = true,
2662 		},
2663 	},
2664 	{
2665 		.aead = {
2666 			.base = {
2667 				.cra_name = "authenc(hmac(sha224),"
2668 					    "rfc3686(ctr(aes)))",
2669 				.cra_driver_name = "authenc-hmac-sha224-"
2670 						   "rfc3686-ctr-aes-caam-qi2",
2671 				.cra_blocksize = 1,
2672 			},
2673 			.setkey = aead_setkey,
2674 			.setauthsize = aead_setauthsize,
2675 			.encrypt = aead_encrypt,
2676 			.decrypt = aead_decrypt,
2677 			.ivsize = CTR_RFC3686_IV_SIZE,
2678 			.maxauthsize = SHA224_DIGEST_SIZE,
2679 		},
2680 		.caam = {
2681 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2682 					   OP_ALG_AAI_CTR_MOD128,
2683 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2684 					   OP_ALG_AAI_HMAC_PRECOMP,
2685 			.rfc3686 = true,
2686 		},
2687 	},
2688 	{
2689 		.aead = {
2690 			.base = {
2691 				.cra_name = "seqiv(authenc("
2692 					    "hmac(sha224),rfc3686(ctr(aes))))",
2693 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
2694 						   "rfc3686-ctr-aes-caam-qi2",
2695 				.cra_blocksize = 1,
2696 			},
2697 			.setkey = aead_setkey,
2698 			.setauthsize = aead_setauthsize,
2699 			.encrypt = aead_encrypt,
2700 			.decrypt = aead_decrypt,
2701 			.ivsize = CTR_RFC3686_IV_SIZE,
2702 			.maxauthsize = SHA224_DIGEST_SIZE,
2703 		},
2704 		.caam = {
2705 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2706 					   OP_ALG_AAI_CTR_MOD128,
2707 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2708 					   OP_ALG_AAI_HMAC_PRECOMP,
2709 			.rfc3686 = true,
2710 			.geniv = true,
2711 		},
2712 	},
2713 	{
2714 		.aead = {
2715 			.base = {
2716 				.cra_name = "authenc(hmac(sha256),"
2717 					    "rfc3686(ctr(aes)))",
2718 				.cra_driver_name = "authenc-hmac-sha256-"
2719 						   "rfc3686-ctr-aes-caam-qi2",
2720 				.cra_blocksize = 1,
2721 			},
2722 			.setkey = aead_setkey,
2723 			.setauthsize = aead_setauthsize,
2724 			.encrypt = aead_encrypt,
2725 			.decrypt = aead_decrypt,
2726 			.ivsize = CTR_RFC3686_IV_SIZE,
2727 			.maxauthsize = SHA256_DIGEST_SIZE,
2728 		},
2729 		.caam = {
2730 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2731 					   OP_ALG_AAI_CTR_MOD128,
2732 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2733 					   OP_ALG_AAI_HMAC_PRECOMP,
2734 			.rfc3686 = true,
2735 		},
2736 	},
2737 	{
2738 		.aead = {
2739 			.base = {
2740 				.cra_name = "seqiv(authenc(hmac(sha256),"
2741 					    "rfc3686(ctr(aes))))",
2742 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
2743 						   "rfc3686-ctr-aes-caam-qi2",
2744 				.cra_blocksize = 1,
2745 			},
2746 			.setkey = aead_setkey,
2747 			.setauthsize = aead_setauthsize,
2748 			.encrypt = aead_encrypt,
2749 			.decrypt = aead_decrypt,
2750 			.ivsize = CTR_RFC3686_IV_SIZE,
2751 			.maxauthsize = SHA256_DIGEST_SIZE,
2752 		},
2753 		.caam = {
2754 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2755 					   OP_ALG_AAI_CTR_MOD128,
2756 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2757 					   OP_ALG_AAI_HMAC_PRECOMP,
2758 			.rfc3686 = true,
2759 			.geniv = true,
2760 		},
2761 	},
2762 	{
2763 		.aead = {
2764 			.base = {
2765 				.cra_name = "authenc(hmac(sha384),"
2766 					    "rfc3686(ctr(aes)))",
2767 				.cra_driver_name = "authenc-hmac-sha384-"
2768 						   "rfc3686-ctr-aes-caam-qi2",
2769 				.cra_blocksize = 1,
2770 			},
2771 			.setkey = aead_setkey,
2772 			.setauthsize = aead_setauthsize,
2773 			.encrypt = aead_encrypt,
2774 			.decrypt = aead_decrypt,
2775 			.ivsize = CTR_RFC3686_IV_SIZE,
2776 			.maxauthsize = SHA384_DIGEST_SIZE,
2777 		},
2778 		.caam = {
2779 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2780 					   OP_ALG_AAI_CTR_MOD128,
2781 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2782 					   OP_ALG_AAI_HMAC_PRECOMP,
2783 			.rfc3686 = true,
2784 		},
2785 	},
2786 	{
2787 		.aead = {
2788 			.base = {
2789 				.cra_name = "seqiv(authenc(hmac(sha384),"
2790 					    "rfc3686(ctr(aes))))",
2791 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
2792 						   "rfc3686-ctr-aes-caam-qi2",
2793 				.cra_blocksize = 1,
2794 			},
2795 			.setkey = aead_setkey,
2796 			.setauthsize = aead_setauthsize,
2797 			.encrypt = aead_encrypt,
2798 			.decrypt = aead_decrypt,
2799 			.ivsize = CTR_RFC3686_IV_SIZE,
2800 			.maxauthsize = SHA384_DIGEST_SIZE,
2801 		},
2802 		.caam = {
2803 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2804 					   OP_ALG_AAI_CTR_MOD128,
2805 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2806 					   OP_ALG_AAI_HMAC_PRECOMP,
2807 			.rfc3686 = true,
2808 			.geniv = true,
2809 		},
2810 	},
2811 	{
2812 		.aead = {
2813 			.base = {
2814 				.cra_name = "rfc7539(chacha20,poly1305)",
2815 				.cra_driver_name = "rfc7539-chacha20-poly1305-"
2816 						   "caam-qi2",
2817 				.cra_blocksize = 1,
2818 			},
2819 			.setkey = chachapoly_setkey,
2820 			.setauthsize = chachapoly_setauthsize,
2821 			.encrypt = aead_encrypt,
2822 			.decrypt = aead_decrypt,
2823 			.ivsize = CHACHAPOLY_IV_SIZE,
2824 			.maxauthsize = POLY1305_DIGEST_SIZE,
2825 		},
2826 		.caam = {
2827 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2828 					   OP_ALG_AAI_AEAD,
2829 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2830 					   OP_ALG_AAI_AEAD,
2831 			.nodkp = true,
2832 		},
2833 	},
2834 	{
2835 		.aead = {
2836 			.base = {
2837 				.cra_name = "rfc7539esp(chacha20,poly1305)",
2838 				.cra_driver_name = "rfc7539esp-chacha20-"
2839 						   "poly1305-caam-qi2",
2840 				.cra_blocksize = 1,
2841 			},
2842 			.setkey = chachapoly_setkey,
2843 			.setauthsize = chachapoly_setauthsize,
2844 			.encrypt = aead_encrypt,
2845 			.decrypt = aead_decrypt,
2846 			.ivsize = 8,
2847 			.maxauthsize = POLY1305_DIGEST_SIZE,
2848 		},
2849 		.caam = {
2850 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2851 					   OP_ALG_AAI_AEAD,
2852 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2853 					   OP_ALG_AAI_AEAD,
2854 			.nodkp = true,
2855 		},
2856 	},
2857 	{
2858 		.aead = {
2859 			.base = {
2860 				.cra_name = "authenc(hmac(sha512),"
2861 					    "rfc3686(ctr(aes)))",
2862 				.cra_driver_name = "authenc-hmac-sha512-"
2863 						   "rfc3686-ctr-aes-caam-qi2",
2864 				.cra_blocksize = 1,
2865 			},
2866 			.setkey = aead_setkey,
2867 			.setauthsize = aead_setauthsize,
2868 			.encrypt = aead_encrypt,
2869 			.decrypt = aead_decrypt,
2870 			.ivsize = CTR_RFC3686_IV_SIZE,
2871 			.maxauthsize = SHA512_DIGEST_SIZE,
2872 		},
2873 		.caam = {
2874 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2875 					   OP_ALG_AAI_CTR_MOD128,
2876 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2877 					   OP_ALG_AAI_HMAC_PRECOMP,
2878 			.rfc3686 = true,
2879 		},
2880 	},
2881 	{
2882 		.aead = {
2883 			.base = {
2884 				.cra_name = "seqiv(authenc(hmac(sha512),"
2885 					    "rfc3686(ctr(aes))))",
2886 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
2887 						   "rfc3686-ctr-aes-caam-qi2",
2888 				.cra_blocksize = 1,
2889 			},
2890 			.setkey = aead_setkey,
2891 			.setauthsize = aead_setauthsize,
2892 			.encrypt = aead_encrypt,
2893 			.decrypt = aead_decrypt,
2894 			.ivsize = CTR_RFC3686_IV_SIZE,
2895 			.maxauthsize = SHA512_DIGEST_SIZE,
2896 		},
2897 		.caam = {
2898 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2899 					   OP_ALG_AAI_CTR_MOD128,
2900 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2901 					   OP_ALG_AAI_HMAC_PRECOMP,
2902 			.rfc3686 = true,
2903 			.geniv = true,
2904 		},
2905 	},
2906 };
2907 
2908 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2909 {
2910 	struct skcipher_alg *alg = &t_alg->skcipher;
2911 
2912 	alg->base.cra_module = THIS_MODULE;
2913 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2914 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2915 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
2916 			      CRYPTO_ALG_KERN_DRIVER_ONLY;
2917 
2918 	alg->init = caam_cra_init_skcipher;
2919 	alg->exit = caam_cra_exit;
2920 }
2921 
2922 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2923 {
2924 	struct aead_alg *alg = &t_alg->aead;
2925 
2926 	alg->base.cra_module = THIS_MODULE;
2927 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2928 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2929 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
2930 			      CRYPTO_ALG_KERN_DRIVER_ONLY;
2931 
2932 	alg->init = caam_cra_init_aead;
2933 	alg->exit = caam_cra_exit_aead;
2934 }
2935 
2936 /* max hash key is max split key size */
2937 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
2938 
2939 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
2940 
2941 /* caam context sizes for hashes: running digest + 8 */
2942 #define HASH_MSG_LEN			8
2943 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2944 
2945 enum hash_optype {
2946 	UPDATE = 0,
2947 	UPDATE_FIRST,
2948 	FINALIZE,
2949 	DIGEST,
2950 	HASH_NUM_OP
2951 };
2952 
2953 /**
2954  * caam_hash_ctx - ahash per-session context
2955  * @flc: Flow Contexts array
2956  * @key: authentication key
2957  * @flc_dma: I/O virtual addresses of the Flow Contexts
2958  * @dev: dpseci device
2959  * @ctx_len: size of Context Register
2960  * @adata: hashing algorithm details
2961  */
2962 struct caam_hash_ctx {
2963 	struct caam_flc flc[HASH_NUM_OP];
2964 	u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2965 	dma_addr_t flc_dma[HASH_NUM_OP];
2966 	struct device *dev;
2967 	int ctx_len;
2968 	struct alginfo adata;
2969 };
2970 
2971 /* ahash state */
2972 struct caam_hash_state {
2973 	struct caam_request caam_req;
2974 	dma_addr_t buf_dma;
2975 	dma_addr_t ctx_dma;
2976 	int ctx_dma_len;
2977 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2978 	int buflen;
2979 	int next_buflen;
2980 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2981 	int (*update)(struct ahash_request *req);
2982 	int (*final)(struct ahash_request *req);
2983 	int (*finup)(struct ahash_request *req);
2984 };
2985 
2986 struct caam_export_state {
2987 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2988 	u8 caam_ctx[MAX_CTX_LEN];
2989 	int buflen;
2990 	int (*update)(struct ahash_request *req);
2991 	int (*final)(struct ahash_request *req);
2992 	int (*finup)(struct ahash_request *req);
2993 };
2994 
2995 /* Map current buffer in state (if length > 0) and put it in link table */
2996 static inline int buf_map_to_qm_sg(struct device *dev,
2997 				   struct dpaa2_sg_entry *qm_sg,
2998 				   struct caam_hash_state *state)
2999 {
3000 	int buflen = state->buflen;
3001 
3002 	if (!buflen)
3003 		return 0;
3004 
3005 	state->buf_dma = dma_map_single(dev, state->buf, buflen,
3006 					DMA_TO_DEVICE);
3007 	if (dma_mapping_error(dev, state->buf_dma)) {
3008 		dev_err(dev, "unable to map buf\n");
3009 		state->buf_dma = 0;
3010 		return -ENOMEM;
3011 	}
3012 
3013 	dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3014 
3015 	return 0;
3016 }
3017 
3018 /* Map state->caam_ctx, and add it to link table */
3019 static inline int ctx_map_to_qm_sg(struct device *dev,
3020 				   struct caam_hash_state *state, int ctx_len,
3021 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
3022 {
3023 	state->ctx_dma_len = ctx_len;
3024 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3025 	if (dma_mapping_error(dev, state->ctx_dma)) {
3026 		dev_err(dev, "unable to map ctx\n");
3027 		state->ctx_dma = 0;
3028 		return -ENOMEM;
3029 	}
3030 
3031 	dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3032 
3033 	return 0;
3034 }
3035 
3036 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3037 {
3038 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3039 	int digestsize = crypto_ahash_digestsize(ahash);
3040 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3041 	struct caam_flc *flc;
3042 	u32 *desc;
3043 
3044 	/* ahash_update shared descriptor */
3045 	flc = &ctx->flc[UPDATE];
3046 	desc = flc->sh_desc;
3047 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3048 			  ctx->ctx_len, true, priv->sec_attr.era);
3049 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3050 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3051 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3052 	print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3053 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3054 			     1);
3055 
3056 	/* ahash_update_first shared descriptor */
3057 	flc = &ctx->flc[UPDATE_FIRST];
3058 	desc = flc->sh_desc;
3059 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3060 			  ctx->ctx_len, false, priv->sec_attr.era);
3061 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3062 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3063 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3064 	print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3065 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3066 			     1);
3067 
3068 	/* ahash_final shared descriptor */
3069 	flc = &ctx->flc[FINALIZE];
3070 	desc = flc->sh_desc;
3071 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3072 			  ctx->ctx_len, true, priv->sec_attr.era);
3073 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3074 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3075 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3076 	print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3077 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3078 			     1);
3079 
3080 	/* ahash_digest shared descriptor */
3081 	flc = &ctx->flc[DIGEST];
3082 	desc = flc->sh_desc;
3083 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3084 			  ctx->ctx_len, false, priv->sec_attr.era);
3085 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3086 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3087 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3088 	print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3089 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3090 			     1);
3091 
3092 	return 0;
3093 }
3094 
3095 struct split_key_sh_result {
3096 	struct completion completion;
3097 	int err;
3098 	struct device *dev;
3099 };
3100 
3101 static void split_key_sh_done(void *cbk_ctx, u32 err)
3102 {
3103 	struct split_key_sh_result *res = cbk_ctx;
3104 
3105 	dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3106 
3107 	res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3108 	complete(&res->completion);
3109 }
3110 
3111 /* Digest hash size if it is too large */
3112 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3113 			   u32 digestsize)
3114 {
3115 	struct caam_request *req_ctx;
3116 	u32 *desc;
3117 	struct split_key_sh_result result;
3118 	dma_addr_t key_dma;
3119 	struct caam_flc *flc;
3120 	dma_addr_t flc_dma;
3121 	int ret = -ENOMEM;
3122 	struct dpaa2_fl_entry *in_fle, *out_fle;
3123 
3124 	req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3125 	if (!req_ctx)
3126 		return -ENOMEM;
3127 
3128 	in_fle = &req_ctx->fd_flt[1];
3129 	out_fle = &req_ctx->fd_flt[0];
3130 
3131 	flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3132 	if (!flc)
3133 		goto err_flc;
3134 
3135 	key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3136 	if (dma_mapping_error(ctx->dev, key_dma)) {
3137 		dev_err(ctx->dev, "unable to map key memory\n");
3138 		goto err_key_dma;
3139 	}
3140 
3141 	desc = flc->sh_desc;
3142 
3143 	init_sh_desc(desc, 0);
3144 
3145 	/* descriptor to perform unkeyed hash on key_in */
3146 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3147 			 OP_ALG_AS_INITFINAL);
3148 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3149 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3150 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3151 			 LDST_SRCDST_BYTE_CONTEXT);
3152 
3153 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3154 	flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3155 				 desc_bytes(desc), DMA_TO_DEVICE);
3156 	if (dma_mapping_error(ctx->dev, flc_dma)) {
3157 		dev_err(ctx->dev, "unable to map shared descriptor\n");
3158 		goto err_flc_dma;
3159 	}
3160 
3161 	dpaa2_fl_set_final(in_fle, true);
3162 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3163 	dpaa2_fl_set_addr(in_fle, key_dma);
3164 	dpaa2_fl_set_len(in_fle, *keylen);
3165 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3166 	dpaa2_fl_set_addr(out_fle, key_dma);
3167 	dpaa2_fl_set_len(out_fle, digestsize);
3168 
3169 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3170 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3171 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3172 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3173 			     1);
3174 
3175 	result.err = 0;
3176 	init_completion(&result.completion);
3177 	result.dev = ctx->dev;
3178 
3179 	req_ctx->flc = flc;
3180 	req_ctx->flc_dma = flc_dma;
3181 	req_ctx->cbk = split_key_sh_done;
3182 	req_ctx->ctx = &result;
3183 
3184 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3185 	if (ret == -EINPROGRESS) {
3186 		/* in progress */
3187 		wait_for_completion(&result.completion);
3188 		ret = result.err;
3189 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3190 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
3191 				     digestsize, 1);
3192 	}
3193 
3194 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3195 			 DMA_TO_DEVICE);
3196 err_flc_dma:
3197 	dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3198 err_key_dma:
3199 	kfree(flc);
3200 err_flc:
3201 	kfree(req_ctx);
3202 
3203 	*keylen = digestsize;
3204 
3205 	return ret;
3206 }
3207 
3208 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3209 			unsigned int keylen)
3210 {
3211 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3212 	unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3213 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
3214 	int ret;
3215 	u8 *hashed_key = NULL;
3216 
3217 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3218 
3219 	if (keylen > blocksize) {
3220 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3221 		if (!hashed_key)
3222 			return -ENOMEM;
3223 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3224 		if (ret)
3225 			goto bad_free_key;
3226 		key = hashed_key;
3227 	}
3228 
3229 	ctx->adata.keylen = keylen;
3230 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3231 					      OP_ALG_ALGSEL_MASK);
3232 	if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3233 		goto bad_free_key;
3234 
3235 	ctx->adata.key_virt = key;
3236 	ctx->adata.key_inline = true;
3237 
3238 	/*
3239 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3240 	 * in invalid opcodes (last bytes of user key) in the resulting
3241 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3242 	 * addresses are needed.
3243 	 */
3244 	if (keylen > ctx->adata.keylen_pad) {
3245 		memcpy(ctx->key, key, keylen);
3246 		dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3247 					   ctx->adata.keylen_pad,
3248 					   DMA_TO_DEVICE);
3249 	}
3250 
3251 	ret = ahash_set_sh_desc(ahash);
3252 	kfree(hashed_key);
3253 	return ret;
3254 bad_free_key:
3255 	kfree(hashed_key);
3256 	return -EINVAL;
3257 }
3258 
3259 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3260 			       struct ahash_request *req)
3261 {
3262 	struct caam_hash_state *state = ahash_request_ctx(req);
3263 
3264 	if (edesc->src_nents)
3265 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3266 
3267 	if (edesc->qm_sg_bytes)
3268 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3269 				 DMA_TO_DEVICE);
3270 
3271 	if (state->buf_dma) {
3272 		dma_unmap_single(dev, state->buf_dma, state->buflen,
3273 				 DMA_TO_DEVICE);
3274 		state->buf_dma = 0;
3275 	}
3276 }
3277 
3278 static inline void ahash_unmap_ctx(struct device *dev,
3279 				   struct ahash_edesc *edesc,
3280 				   struct ahash_request *req, u32 flag)
3281 {
3282 	struct caam_hash_state *state = ahash_request_ctx(req);
3283 
3284 	if (state->ctx_dma) {
3285 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3286 		state->ctx_dma = 0;
3287 	}
3288 	ahash_unmap(dev, edesc, req);
3289 }
3290 
3291 static void ahash_done(void *cbk_ctx, u32 status)
3292 {
3293 	struct crypto_async_request *areq = cbk_ctx;
3294 	struct ahash_request *req = ahash_request_cast(areq);
3295 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3296 	struct caam_hash_state *state = ahash_request_ctx(req);
3297 	struct ahash_edesc *edesc = state->caam_req.edesc;
3298 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3299 	int digestsize = crypto_ahash_digestsize(ahash);
3300 	int ecode = 0;
3301 
3302 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3303 
3304 	if (unlikely(status))
3305 		ecode = caam_qi2_strstatus(ctx->dev, status);
3306 
3307 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3308 	memcpy(req->result, state->caam_ctx, digestsize);
3309 	qi_cache_free(edesc);
3310 
3311 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3312 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3313 			     ctx->ctx_len, 1);
3314 
3315 	req->base.complete(&req->base, ecode);
3316 }
3317 
3318 static void ahash_done_bi(void *cbk_ctx, u32 status)
3319 {
3320 	struct crypto_async_request *areq = cbk_ctx;
3321 	struct ahash_request *req = ahash_request_cast(areq);
3322 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3323 	struct caam_hash_state *state = ahash_request_ctx(req);
3324 	struct ahash_edesc *edesc = state->caam_req.edesc;
3325 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3326 	int ecode = 0;
3327 
3328 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3329 
3330 	if (unlikely(status))
3331 		ecode = caam_qi2_strstatus(ctx->dev, status);
3332 
3333 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3334 	qi_cache_free(edesc);
3335 
3336 	scatterwalk_map_and_copy(state->buf, req->src,
3337 				 req->nbytes - state->next_buflen,
3338 				 state->next_buflen, 0);
3339 	state->buflen = state->next_buflen;
3340 
3341 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3342 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3343 			     state->buflen, 1);
3344 
3345 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3346 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3347 			     ctx->ctx_len, 1);
3348 	if (req->result)
3349 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3350 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3351 				     crypto_ahash_digestsize(ahash), 1);
3352 
3353 	req->base.complete(&req->base, ecode);
3354 }
3355 
3356 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3357 {
3358 	struct crypto_async_request *areq = cbk_ctx;
3359 	struct ahash_request *req = ahash_request_cast(areq);
3360 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3361 	struct caam_hash_state *state = ahash_request_ctx(req);
3362 	struct ahash_edesc *edesc = state->caam_req.edesc;
3363 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3364 	int digestsize = crypto_ahash_digestsize(ahash);
3365 	int ecode = 0;
3366 
3367 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3368 
3369 	if (unlikely(status))
3370 		ecode = caam_qi2_strstatus(ctx->dev, status);
3371 
3372 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3373 	memcpy(req->result, state->caam_ctx, digestsize);
3374 	qi_cache_free(edesc);
3375 
3376 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3377 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3378 			     ctx->ctx_len, 1);
3379 
3380 	req->base.complete(&req->base, ecode);
3381 }
3382 
3383 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3384 {
3385 	struct crypto_async_request *areq = cbk_ctx;
3386 	struct ahash_request *req = ahash_request_cast(areq);
3387 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3388 	struct caam_hash_state *state = ahash_request_ctx(req);
3389 	struct ahash_edesc *edesc = state->caam_req.edesc;
3390 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3391 	int ecode = 0;
3392 
3393 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3394 
3395 	if (unlikely(status))
3396 		ecode = caam_qi2_strstatus(ctx->dev, status);
3397 
3398 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3399 	qi_cache_free(edesc);
3400 
3401 	scatterwalk_map_and_copy(state->buf, req->src,
3402 				 req->nbytes - state->next_buflen,
3403 				 state->next_buflen, 0);
3404 	state->buflen = state->next_buflen;
3405 
3406 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3407 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3408 			     state->buflen, 1);
3409 
3410 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3411 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3412 			     ctx->ctx_len, 1);
3413 	if (req->result)
3414 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3415 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3416 				     crypto_ahash_digestsize(ahash), 1);
3417 
3418 	req->base.complete(&req->base, ecode);
3419 }
3420 
3421 static int ahash_update_ctx(struct ahash_request *req)
3422 {
3423 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3424 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3425 	struct caam_hash_state *state = ahash_request_ctx(req);
3426 	struct caam_request *req_ctx = &state->caam_req;
3427 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3428 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3429 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3430 		      GFP_KERNEL : GFP_ATOMIC;
3431 	u8 *buf = state->buf;
3432 	int *buflen = &state->buflen;
3433 	int *next_buflen = &state->next_buflen;
3434 	int in_len = *buflen + req->nbytes, to_hash;
3435 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3436 	struct ahash_edesc *edesc;
3437 	int ret = 0;
3438 
3439 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3440 	to_hash = in_len - *next_buflen;
3441 
3442 	if (to_hash) {
3443 		struct dpaa2_sg_entry *sg_table;
3444 		int src_len = req->nbytes - *next_buflen;
3445 
3446 		src_nents = sg_nents_for_len(req->src, src_len);
3447 		if (src_nents < 0) {
3448 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3449 			return src_nents;
3450 		}
3451 
3452 		if (src_nents) {
3453 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3454 						  DMA_TO_DEVICE);
3455 			if (!mapped_nents) {
3456 				dev_err(ctx->dev, "unable to DMA map source\n");
3457 				return -ENOMEM;
3458 			}
3459 		} else {
3460 			mapped_nents = 0;
3461 		}
3462 
3463 		/* allocate space for base edesc and link tables */
3464 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3465 		if (!edesc) {
3466 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3467 				     DMA_TO_DEVICE);
3468 			return -ENOMEM;
3469 		}
3470 
3471 		edesc->src_nents = src_nents;
3472 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3473 		qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3474 			      sizeof(*sg_table);
3475 		sg_table = &edesc->sgt[0];
3476 
3477 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3478 				       DMA_BIDIRECTIONAL);
3479 		if (ret)
3480 			goto unmap_ctx;
3481 
3482 		ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3483 		if (ret)
3484 			goto unmap_ctx;
3485 
3486 		if (mapped_nents) {
3487 			sg_to_qm_sg_last(req->src, src_len,
3488 					 sg_table + qm_sg_src_index, 0);
3489 		} else {
3490 			dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3491 					   true);
3492 		}
3493 
3494 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3495 						  qm_sg_bytes, DMA_TO_DEVICE);
3496 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3497 			dev_err(ctx->dev, "unable to map S/G table\n");
3498 			ret = -ENOMEM;
3499 			goto unmap_ctx;
3500 		}
3501 		edesc->qm_sg_bytes = qm_sg_bytes;
3502 
3503 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3504 		dpaa2_fl_set_final(in_fle, true);
3505 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3506 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3507 		dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3508 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3509 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3510 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3511 
3512 		req_ctx->flc = &ctx->flc[UPDATE];
3513 		req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3514 		req_ctx->cbk = ahash_done_bi;
3515 		req_ctx->ctx = &req->base;
3516 		req_ctx->edesc = edesc;
3517 
3518 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3519 		if (ret != -EINPROGRESS &&
3520 		    !(ret == -EBUSY &&
3521 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3522 			goto unmap_ctx;
3523 	} else if (*next_buflen) {
3524 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3525 					 req->nbytes, 0);
3526 		*buflen = *next_buflen;
3527 
3528 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3529 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
3530 				     *buflen, 1);
3531 	}
3532 
3533 	return ret;
3534 unmap_ctx:
3535 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3536 	qi_cache_free(edesc);
3537 	return ret;
3538 }
3539 
3540 static int ahash_final_ctx(struct ahash_request *req)
3541 {
3542 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3543 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3544 	struct caam_hash_state *state = ahash_request_ctx(req);
3545 	struct caam_request *req_ctx = &state->caam_req;
3546 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3547 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3548 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3549 		      GFP_KERNEL : GFP_ATOMIC;
3550 	int buflen = state->buflen;
3551 	int qm_sg_bytes;
3552 	int digestsize = crypto_ahash_digestsize(ahash);
3553 	struct ahash_edesc *edesc;
3554 	struct dpaa2_sg_entry *sg_table;
3555 	int ret;
3556 
3557 	/* allocate space for base edesc and link tables */
3558 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3559 	if (!edesc)
3560 		return -ENOMEM;
3561 
3562 	qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3563 	sg_table = &edesc->sgt[0];
3564 
3565 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3566 			       DMA_BIDIRECTIONAL);
3567 	if (ret)
3568 		goto unmap_ctx;
3569 
3570 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3571 	if (ret)
3572 		goto unmap_ctx;
3573 
3574 	dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3575 
3576 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3577 					  DMA_TO_DEVICE);
3578 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3579 		dev_err(ctx->dev, "unable to map S/G table\n");
3580 		ret = -ENOMEM;
3581 		goto unmap_ctx;
3582 	}
3583 	edesc->qm_sg_bytes = qm_sg_bytes;
3584 
3585 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3586 	dpaa2_fl_set_final(in_fle, true);
3587 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3588 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3589 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3590 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3591 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3592 	dpaa2_fl_set_len(out_fle, digestsize);
3593 
3594 	req_ctx->flc = &ctx->flc[FINALIZE];
3595 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3596 	req_ctx->cbk = ahash_done_ctx_src;
3597 	req_ctx->ctx = &req->base;
3598 	req_ctx->edesc = edesc;
3599 
3600 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3601 	if (ret == -EINPROGRESS ||
3602 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3603 		return ret;
3604 
3605 unmap_ctx:
3606 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3607 	qi_cache_free(edesc);
3608 	return ret;
3609 }
3610 
3611 static int ahash_finup_ctx(struct ahash_request *req)
3612 {
3613 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3614 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3615 	struct caam_hash_state *state = ahash_request_ctx(req);
3616 	struct caam_request *req_ctx = &state->caam_req;
3617 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3618 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3619 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3620 		      GFP_KERNEL : GFP_ATOMIC;
3621 	int buflen = state->buflen;
3622 	int qm_sg_bytes, qm_sg_src_index;
3623 	int src_nents, mapped_nents;
3624 	int digestsize = crypto_ahash_digestsize(ahash);
3625 	struct ahash_edesc *edesc;
3626 	struct dpaa2_sg_entry *sg_table;
3627 	int ret;
3628 
3629 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3630 	if (src_nents < 0) {
3631 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3632 		return src_nents;
3633 	}
3634 
3635 	if (src_nents) {
3636 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3637 					  DMA_TO_DEVICE);
3638 		if (!mapped_nents) {
3639 			dev_err(ctx->dev, "unable to DMA map source\n");
3640 			return -ENOMEM;
3641 		}
3642 	} else {
3643 		mapped_nents = 0;
3644 	}
3645 
3646 	/* allocate space for base edesc and link tables */
3647 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3648 	if (!edesc) {
3649 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3650 		return -ENOMEM;
3651 	}
3652 
3653 	edesc->src_nents = src_nents;
3654 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3655 	qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3656 		      sizeof(*sg_table);
3657 	sg_table = &edesc->sgt[0];
3658 
3659 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3660 			       DMA_BIDIRECTIONAL);
3661 	if (ret)
3662 		goto unmap_ctx;
3663 
3664 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3665 	if (ret)
3666 		goto unmap_ctx;
3667 
3668 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3669 
3670 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3671 					  DMA_TO_DEVICE);
3672 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3673 		dev_err(ctx->dev, "unable to map S/G table\n");
3674 		ret = -ENOMEM;
3675 		goto unmap_ctx;
3676 	}
3677 	edesc->qm_sg_bytes = qm_sg_bytes;
3678 
3679 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3680 	dpaa2_fl_set_final(in_fle, true);
3681 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3682 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3683 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3684 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3685 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3686 	dpaa2_fl_set_len(out_fle, digestsize);
3687 
3688 	req_ctx->flc = &ctx->flc[FINALIZE];
3689 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3690 	req_ctx->cbk = ahash_done_ctx_src;
3691 	req_ctx->ctx = &req->base;
3692 	req_ctx->edesc = edesc;
3693 
3694 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3695 	if (ret == -EINPROGRESS ||
3696 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3697 		return ret;
3698 
3699 unmap_ctx:
3700 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3701 	qi_cache_free(edesc);
3702 	return ret;
3703 }
3704 
3705 static int ahash_digest(struct ahash_request *req)
3706 {
3707 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3708 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3709 	struct caam_hash_state *state = ahash_request_ctx(req);
3710 	struct caam_request *req_ctx = &state->caam_req;
3711 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3712 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3713 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3714 		      GFP_KERNEL : GFP_ATOMIC;
3715 	int digestsize = crypto_ahash_digestsize(ahash);
3716 	int src_nents, mapped_nents;
3717 	struct ahash_edesc *edesc;
3718 	int ret = -ENOMEM;
3719 
3720 	state->buf_dma = 0;
3721 
3722 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3723 	if (src_nents < 0) {
3724 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3725 		return src_nents;
3726 	}
3727 
3728 	if (src_nents) {
3729 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3730 					  DMA_TO_DEVICE);
3731 		if (!mapped_nents) {
3732 			dev_err(ctx->dev, "unable to map source for DMA\n");
3733 			return ret;
3734 		}
3735 	} else {
3736 		mapped_nents = 0;
3737 	}
3738 
3739 	/* allocate space for base edesc and link tables */
3740 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3741 	if (!edesc) {
3742 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3743 		return ret;
3744 	}
3745 
3746 	edesc->src_nents = src_nents;
3747 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3748 
3749 	if (mapped_nents > 1) {
3750 		int qm_sg_bytes;
3751 		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3752 
3753 		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3754 		sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3755 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3756 						  qm_sg_bytes, DMA_TO_DEVICE);
3757 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3758 			dev_err(ctx->dev, "unable to map S/G table\n");
3759 			goto unmap;
3760 		}
3761 		edesc->qm_sg_bytes = qm_sg_bytes;
3762 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3763 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3764 	} else {
3765 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3766 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3767 	}
3768 
3769 	state->ctx_dma_len = digestsize;
3770 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3771 					DMA_FROM_DEVICE);
3772 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3773 		dev_err(ctx->dev, "unable to map ctx\n");
3774 		state->ctx_dma = 0;
3775 		goto unmap;
3776 	}
3777 
3778 	dpaa2_fl_set_final(in_fle, true);
3779 	dpaa2_fl_set_len(in_fle, req->nbytes);
3780 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3781 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3782 	dpaa2_fl_set_len(out_fle, digestsize);
3783 
3784 	req_ctx->flc = &ctx->flc[DIGEST];
3785 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3786 	req_ctx->cbk = ahash_done;
3787 	req_ctx->ctx = &req->base;
3788 	req_ctx->edesc = edesc;
3789 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3790 	if (ret == -EINPROGRESS ||
3791 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3792 		return ret;
3793 
3794 unmap:
3795 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3796 	qi_cache_free(edesc);
3797 	return ret;
3798 }
3799 
3800 static int ahash_final_no_ctx(struct ahash_request *req)
3801 {
3802 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3803 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3804 	struct caam_hash_state *state = ahash_request_ctx(req);
3805 	struct caam_request *req_ctx = &state->caam_req;
3806 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3807 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3808 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3809 		      GFP_KERNEL : GFP_ATOMIC;
3810 	u8 *buf = state->buf;
3811 	int buflen = state->buflen;
3812 	int digestsize = crypto_ahash_digestsize(ahash);
3813 	struct ahash_edesc *edesc;
3814 	int ret = -ENOMEM;
3815 
3816 	/* allocate space for base edesc and link tables */
3817 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3818 	if (!edesc)
3819 		return ret;
3820 
3821 	if (buflen) {
3822 		state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3823 						DMA_TO_DEVICE);
3824 		if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3825 			dev_err(ctx->dev, "unable to map src\n");
3826 			goto unmap;
3827 		}
3828 	}
3829 
3830 	state->ctx_dma_len = digestsize;
3831 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3832 					DMA_FROM_DEVICE);
3833 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3834 		dev_err(ctx->dev, "unable to map ctx\n");
3835 		state->ctx_dma = 0;
3836 		goto unmap;
3837 	}
3838 
3839 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3840 	dpaa2_fl_set_final(in_fle, true);
3841 	/*
3842 	 * crypto engine requires the input entry to be present when
3843 	 * "frame list" FD is used.
3844 	 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3845 	 * in_fle zeroized (except for "Final" flag) is the best option.
3846 	 */
3847 	if (buflen) {
3848 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3849 		dpaa2_fl_set_addr(in_fle, state->buf_dma);
3850 		dpaa2_fl_set_len(in_fle, buflen);
3851 	}
3852 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3853 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3854 	dpaa2_fl_set_len(out_fle, digestsize);
3855 
3856 	req_ctx->flc = &ctx->flc[DIGEST];
3857 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3858 	req_ctx->cbk = ahash_done;
3859 	req_ctx->ctx = &req->base;
3860 	req_ctx->edesc = edesc;
3861 
3862 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3863 	if (ret == -EINPROGRESS ||
3864 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3865 		return ret;
3866 
3867 unmap:
3868 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3869 	qi_cache_free(edesc);
3870 	return ret;
3871 }
3872 
3873 static int ahash_update_no_ctx(struct ahash_request *req)
3874 {
3875 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3876 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3877 	struct caam_hash_state *state = ahash_request_ctx(req);
3878 	struct caam_request *req_ctx = &state->caam_req;
3879 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3880 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3881 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3882 		      GFP_KERNEL : GFP_ATOMIC;
3883 	u8 *buf = state->buf;
3884 	int *buflen = &state->buflen;
3885 	int *next_buflen = &state->next_buflen;
3886 	int in_len = *buflen + req->nbytes, to_hash;
3887 	int qm_sg_bytes, src_nents, mapped_nents;
3888 	struct ahash_edesc *edesc;
3889 	int ret = 0;
3890 
3891 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3892 	to_hash = in_len - *next_buflen;
3893 
3894 	if (to_hash) {
3895 		struct dpaa2_sg_entry *sg_table;
3896 		int src_len = req->nbytes - *next_buflen;
3897 
3898 		src_nents = sg_nents_for_len(req->src, src_len);
3899 		if (src_nents < 0) {
3900 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3901 			return src_nents;
3902 		}
3903 
3904 		if (src_nents) {
3905 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3906 						  DMA_TO_DEVICE);
3907 			if (!mapped_nents) {
3908 				dev_err(ctx->dev, "unable to DMA map source\n");
3909 				return -ENOMEM;
3910 			}
3911 		} else {
3912 			mapped_nents = 0;
3913 		}
3914 
3915 		/* allocate space for base edesc and link tables */
3916 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3917 		if (!edesc) {
3918 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3919 				     DMA_TO_DEVICE);
3920 			return -ENOMEM;
3921 		}
3922 
3923 		edesc->src_nents = src_nents;
3924 		qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
3925 			      sizeof(*sg_table);
3926 		sg_table = &edesc->sgt[0];
3927 
3928 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3929 		if (ret)
3930 			goto unmap_ctx;
3931 
3932 		sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
3933 
3934 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3935 						  qm_sg_bytes, DMA_TO_DEVICE);
3936 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3937 			dev_err(ctx->dev, "unable to map S/G table\n");
3938 			ret = -ENOMEM;
3939 			goto unmap_ctx;
3940 		}
3941 		edesc->qm_sg_bytes = qm_sg_bytes;
3942 
3943 		state->ctx_dma_len = ctx->ctx_len;
3944 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3945 						ctx->ctx_len, DMA_FROM_DEVICE);
3946 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3947 			dev_err(ctx->dev, "unable to map ctx\n");
3948 			state->ctx_dma = 0;
3949 			ret = -ENOMEM;
3950 			goto unmap_ctx;
3951 		}
3952 
3953 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3954 		dpaa2_fl_set_final(in_fle, true);
3955 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3956 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3957 		dpaa2_fl_set_len(in_fle, to_hash);
3958 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3959 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3960 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3961 
3962 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3963 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3964 		req_ctx->cbk = ahash_done_ctx_dst;
3965 		req_ctx->ctx = &req->base;
3966 		req_ctx->edesc = edesc;
3967 
3968 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3969 		if (ret != -EINPROGRESS &&
3970 		    !(ret == -EBUSY &&
3971 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3972 			goto unmap_ctx;
3973 
3974 		state->update = ahash_update_ctx;
3975 		state->finup = ahash_finup_ctx;
3976 		state->final = ahash_final_ctx;
3977 	} else if (*next_buflen) {
3978 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3979 					 req->nbytes, 0);
3980 		*buflen = *next_buflen;
3981 
3982 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3983 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
3984 				     *buflen, 1);
3985 	}
3986 
3987 	return ret;
3988 unmap_ctx:
3989 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
3990 	qi_cache_free(edesc);
3991 	return ret;
3992 }
3993 
3994 static int ahash_finup_no_ctx(struct ahash_request *req)
3995 {
3996 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3997 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3998 	struct caam_hash_state *state = ahash_request_ctx(req);
3999 	struct caam_request *req_ctx = &state->caam_req;
4000 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4001 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4002 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4003 		      GFP_KERNEL : GFP_ATOMIC;
4004 	int buflen = state->buflen;
4005 	int qm_sg_bytes, src_nents, mapped_nents;
4006 	int digestsize = crypto_ahash_digestsize(ahash);
4007 	struct ahash_edesc *edesc;
4008 	struct dpaa2_sg_entry *sg_table;
4009 	int ret = -ENOMEM;
4010 
4011 	src_nents = sg_nents_for_len(req->src, req->nbytes);
4012 	if (src_nents < 0) {
4013 		dev_err(ctx->dev, "Invalid number of src SG.\n");
4014 		return src_nents;
4015 	}
4016 
4017 	if (src_nents) {
4018 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4019 					  DMA_TO_DEVICE);
4020 		if (!mapped_nents) {
4021 			dev_err(ctx->dev, "unable to DMA map source\n");
4022 			return ret;
4023 		}
4024 	} else {
4025 		mapped_nents = 0;
4026 	}
4027 
4028 	/* allocate space for base edesc and link tables */
4029 	edesc = qi_cache_zalloc(GFP_DMA | flags);
4030 	if (!edesc) {
4031 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4032 		return ret;
4033 	}
4034 
4035 	edesc->src_nents = src_nents;
4036 	qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4037 	sg_table = &edesc->sgt[0];
4038 
4039 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4040 	if (ret)
4041 		goto unmap;
4042 
4043 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4044 
4045 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4046 					  DMA_TO_DEVICE);
4047 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4048 		dev_err(ctx->dev, "unable to map S/G table\n");
4049 		ret = -ENOMEM;
4050 		goto unmap;
4051 	}
4052 	edesc->qm_sg_bytes = qm_sg_bytes;
4053 
4054 	state->ctx_dma_len = digestsize;
4055 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4056 					DMA_FROM_DEVICE);
4057 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4058 		dev_err(ctx->dev, "unable to map ctx\n");
4059 		state->ctx_dma = 0;
4060 		ret = -ENOMEM;
4061 		goto unmap;
4062 	}
4063 
4064 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4065 	dpaa2_fl_set_final(in_fle, true);
4066 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4067 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4068 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4069 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4070 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4071 	dpaa2_fl_set_len(out_fle, digestsize);
4072 
4073 	req_ctx->flc = &ctx->flc[DIGEST];
4074 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4075 	req_ctx->cbk = ahash_done;
4076 	req_ctx->ctx = &req->base;
4077 	req_ctx->edesc = edesc;
4078 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4079 	if (ret != -EINPROGRESS &&
4080 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4081 		goto unmap;
4082 
4083 	return ret;
4084 unmap:
4085 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4086 	qi_cache_free(edesc);
4087 	return ret;
4088 }
4089 
4090 static int ahash_update_first(struct ahash_request *req)
4091 {
4092 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4093 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4094 	struct caam_hash_state *state = ahash_request_ctx(req);
4095 	struct caam_request *req_ctx = &state->caam_req;
4096 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4097 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4098 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4099 		      GFP_KERNEL : GFP_ATOMIC;
4100 	u8 *buf = state->buf;
4101 	int *buflen = &state->buflen;
4102 	int *next_buflen = &state->next_buflen;
4103 	int to_hash;
4104 	int src_nents, mapped_nents;
4105 	struct ahash_edesc *edesc;
4106 	int ret = 0;
4107 
4108 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4109 				      1);
4110 	to_hash = req->nbytes - *next_buflen;
4111 
4112 	if (to_hash) {
4113 		struct dpaa2_sg_entry *sg_table;
4114 		int src_len = req->nbytes - *next_buflen;
4115 
4116 		src_nents = sg_nents_for_len(req->src, src_len);
4117 		if (src_nents < 0) {
4118 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4119 			return src_nents;
4120 		}
4121 
4122 		if (src_nents) {
4123 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4124 						  DMA_TO_DEVICE);
4125 			if (!mapped_nents) {
4126 				dev_err(ctx->dev, "unable to map source for DMA\n");
4127 				return -ENOMEM;
4128 			}
4129 		} else {
4130 			mapped_nents = 0;
4131 		}
4132 
4133 		/* allocate space for base edesc and link tables */
4134 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4135 		if (!edesc) {
4136 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4137 				     DMA_TO_DEVICE);
4138 			return -ENOMEM;
4139 		}
4140 
4141 		edesc->src_nents = src_nents;
4142 		sg_table = &edesc->sgt[0];
4143 
4144 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4145 		dpaa2_fl_set_final(in_fle, true);
4146 		dpaa2_fl_set_len(in_fle, to_hash);
4147 
4148 		if (mapped_nents > 1) {
4149 			int qm_sg_bytes;
4150 
4151 			sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4152 			qm_sg_bytes = pad_sg_nents(mapped_nents) *
4153 				      sizeof(*sg_table);
4154 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4155 							  qm_sg_bytes,
4156 							  DMA_TO_DEVICE);
4157 			if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4158 				dev_err(ctx->dev, "unable to map S/G table\n");
4159 				ret = -ENOMEM;
4160 				goto unmap_ctx;
4161 			}
4162 			edesc->qm_sg_bytes = qm_sg_bytes;
4163 			dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4164 			dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4165 		} else {
4166 			dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4167 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4168 		}
4169 
4170 		state->ctx_dma_len = ctx->ctx_len;
4171 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4172 						ctx->ctx_len, DMA_FROM_DEVICE);
4173 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4174 			dev_err(ctx->dev, "unable to map ctx\n");
4175 			state->ctx_dma = 0;
4176 			ret = -ENOMEM;
4177 			goto unmap_ctx;
4178 		}
4179 
4180 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4181 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4182 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4183 
4184 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4185 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4186 		req_ctx->cbk = ahash_done_ctx_dst;
4187 		req_ctx->ctx = &req->base;
4188 		req_ctx->edesc = edesc;
4189 
4190 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4191 		if (ret != -EINPROGRESS &&
4192 		    !(ret == -EBUSY && req->base.flags &
4193 		      CRYPTO_TFM_REQ_MAY_BACKLOG))
4194 			goto unmap_ctx;
4195 
4196 		state->update = ahash_update_ctx;
4197 		state->finup = ahash_finup_ctx;
4198 		state->final = ahash_final_ctx;
4199 	} else if (*next_buflen) {
4200 		state->update = ahash_update_no_ctx;
4201 		state->finup = ahash_finup_no_ctx;
4202 		state->final = ahash_final_no_ctx;
4203 		scatterwalk_map_and_copy(buf, req->src, 0,
4204 					 req->nbytes, 0);
4205 		*buflen = *next_buflen;
4206 
4207 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4208 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
4209 				     *buflen, 1);
4210 	}
4211 
4212 	return ret;
4213 unmap_ctx:
4214 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4215 	qi_cache_free(edesc);
4216 	return ret;
4217 }
4218 
4219 static int ahash_finup_first(struct ahash_request *req)
4220 {
4221 	return ahash_digest(req);
4222 }
4223 
4224 static int ahash_init(struct ahash_request *req)
4225 {
4226 	struct caam_hash_state *state = ahash_request_ctx(req);
4227 
4228 	state->update = ahash_update_first;
4229 	state->finup = ahash_finup_first;
4230 	state->final = ahash_final_no_ctx;
4231 
4232 	state->ctx_dma = 0;
4233 	state->ctx_dma_len = 0;
4234 	state->buf_dma = 0;
4235 	state->buflen = 0;
4236 	state->next_buflen = 0;
4237 
4238 	return 0;
4239 }
4240 
4241 static int ahash_update(struct ahash_request *req)
4242 {
4243 	struct caam_hash_state *state = ahash_request_ctx(req);
4244 
4245 	return state->update(req);
4246 }
4247 
4248 static int ahash_finup(struct ahash_request *req)
4249 {
4250 	struct caam_hash_state *state = ahash_request_ctx(req);
4251 
4252 	return state->finup(req);
4253 }
4254 
4255 static int ahash_final(struct ahash_request *req)
4256 {
4257 	struct caam_hash_state *state = ahash_request_ctx(req);
4258 
4259 	return state->final(req);
4260 }
4261 
4262 static int ahash_export(struct ahash_request *req, void *out)
4263 {
4264 	struct caam_hash_state *state = ahash_request_ctx(req);
4265 	struct caam_export_state *export = out;
4266 	u8 *buf = state->buf;
4267 	int len = state->buflen;
4268 
4269 	memcpy(export->buf, buf, len);
4270 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4271 	export->buflen = len;
4272 	export->update = state->update;
4273 	export->final = state->final;
4274 	export->finup = state->finup;
4275 
4276 	return 0;
4277 }
4278 
4279 static int ahash_import(struct ahash_request *req, const void *in)
4280 {
4281 	struct caam_hash_state *state = ahash_request_ctx(req);
4282 	const struct caam_export_state *export = in;
4283 
4284 	memset(state, 0, sizeof(*state));
4285 	memcpy(state->buf, export->buf, export->buflen);
4286 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4287 	state->buflen = export->buflen;
4288 	state->update = export->update;
4289 	state->final = export->final;
4290 	state->finup = export->finup;
4291 
4292 	return 0;
4293 }
4294 
4295 struct caam_hash_template {
4296 	char name[CRYPTO_MAX_ALG_NAME];
4297 	char driver_name[CRYPTO_MAX_ALG_NAME];
4298 	char hmac_name[CRYPTO_MAX_ALG_NAME];
4299 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4300 	unsigned int blocksize;
4301 	struct ahash_alg template_ahash;
4302 	u32 alg_type;
4303 };
4304 
4305 /* ahash descriptors */
4306 static struct caam_hash_template driver_hash[] = {
4307 	{
4308 		.name = "sha1",
4309 		.driver_name = "sha1-caam-qi2",
4310 		.hmac_name = "hmac(sha1)",
4311 		.hmac_driver_name = "hmac-sha1-caam-qi2",
4312 		.blocksize = SHA1_BLOCK_SIZE,
4313 		.template_ahash = {
4314 			.init = ahash_init,
4315 			.update = ahash_update,
4316 			.final = ahash_final,
4317 			.finup = ahash_finup,
4318 			.digest = ahash_digest,
4319 			.export = ahash_export,
4320 			.import = ahash_import,
4321 			.setkey = ahash_setkey,
4322 			.halg = {
4323 				.digestsize = SHA1_DIGEST_SIZE,
4324 				.statesize = sizeof(struct caam_export_state),
4325 			},
4326 		},
4327 		.alg_type = OP_ALG_ALGSEL_SHA1,
4328 	}, {
4329 		.name = "sha224",
4330 		.driver_name = "sha224-caam-qi2",
4331 		.hmac_name = "hmac(sha224)",
4332 		.hmac_driver_name = "hmac-sha224-caam-qi2",
4333 		.blocksize = SHA224_BLOCK_SIZE,
4334 		.template_ahash = {
4335 			.init = ahash_init,
4336 			.update = ahash_update,
4337 			.final = ahash_final,
4338 			.finup = ahash_finup,
4339 			.digest = ahash_digest,
4340 			.export = ahash_export,
4341 			.import = ahash_import,
4342 			.setkey = ahash_setkey,
4343 			.halg = {
4344 				.digestsize = SHA224_DIGEST_SIZE,
4345 				.statesize = sizeof(struct caam_export_state),
4346 			},
4347 		},
4348 		.alg_type = OP_ALG_ALGSEL_SHA224,
4349 	}, {
4350 		.name = "sha256",
4351 		.driver_name = "sha256-caam-qi2",
4352 		.hmac_name = "hmac(sha256)",
4353 		.hmac_driver_name = "hmac-sha256-caam-qi2",
4354 		.blocksize = SHA256_BLOCK_SIZE,
4355 		.template_ahash = {
4356 			.init = ahash_init,
4357 			.update = ahash_update,
4358 			.final = ahash_final,
4359 			.finup = ahash_finup,
4360 			.digest = ahash_digest,
4361 			.export = ahash_export,
4362 			.import = ahash_import,
4363 			.setkey = ahash_setkey,
4364 			.halg = {
4365 				.digestsize = SHA256_DIGEST_SIZE,
4366 				.statesize = sizeof(struct caam_export_state),
4367 			},
4368 		},
4369 		.alg_type = OP_ALG_ALGSEL_SHA256,
4370 	}, {
4371 		.name = "sha384",
4372 		.driver_name = "sha384-caam-qi2",
4373 		.hmac_name = "hmac(sha384)",
4374 		.hmac_driver_name = "hmac-sha384-caam-qi2",
4375 		.blocksize = SHA384_BLOCK_SIZE,
4376 		.template_ahash = {
4377 			.init = ahash_init,
4378 			.update = ahash_update,
4379 			.final = ahash_final,
4380 			.finup = ahash_finup,
4381 			.digest = ahash_digest,
4382 			.export = ahash_export,
4383 			.import = ahash_import,
4384 			.setkey = ahash_setkey,
4385 			.halg = {
4386 				.digestsize = SHA384_DIGEST_SIZE,
4387 				.statesize = sizeof(struct caam_export_state),
4388 			},
4389 		},
4390 		.alg_type = OP_ALG_ALGSEL_SHA384,
4391 	}, {
4392 		.name = "sha512",
4393 		.driver_name = "sha512-caam-qi2",
4394 		.hmac_name = "hmac(sha512)",
4395 		.hmac_driver_name = "hmac-sha512-caam-qi2",
4396 		.blocksize = SHA512_BLOCK_SIZE,
4397 		.template_ahash = {
4398 			.init = ahash_init,
4399 			.update = ahash_update,
4400 			.final = ahash_final,
4401 			.finup = ahash_finup,
4402 			.digest = ahash_digest,
4403 			.export = ahash_export,
4404 			.import = ahash_import,
4405 			.setkey = ahash_setkey,
4406 			.halg = {
4407 				.digestsize = SHA512_DIGEST_SIZE,
4408 				.statesize = sizeof(struct caam_export_state),
4409 			},
4410 		},
4411 		.alg_type = OP_ALG_ALGSEL_SHA512,
4412 	}, {
4413 		.name = "md5",
4414 		.driver_name = "md5-caam-qi2",
4415 		.hmac_name = "hmac(md5)",
4416 		.hmac_driver_name = "hmac-md5-caam-qi2",
4417 		.blocksize = MD5_BLOCK_WORDS * 4,
4418 		.template_ahash = {
4419 			.init = ahash_init,
4420 			.update = ahash_update,
4421 			.final = ahash_final,
4422 			.finup = ahash_finup,
4423 			.digest = ahash_digest,
4424 			.export = ahash_export,
4425 			.import = ahash_import,
4426 			.setkey = ahash_setkey,
4427 			.halg = {
4428 				.digestsize = MD5_DIGEST_SIZE,
4429 				.statesize = sizeof(struct caam_export_state),
4430 			},
4431 		},
4432 		.alg_type = OP_ALG_ALGSEL_MD5,
4433 	}
4434 };
4435 
4436 struct caam_hash_alg {
4437 	struct list_head entry;
4438 	struct device *dev;
4439 	int alg_type;
4440 	struct ahash_alg ahash_alg;
4441 };
4442 
4443 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4444 {
4445 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4446 	struct crypto_alg *base = tfm->__crt_alg;
4447 	struct hash_alg_common *halg =
4448 		 container_of(base, struct hash_alg_common, base);
4449 	struct ahash_alg *alg =
4450 		 container_of(halg, struct ahash_alg, halg);
4451 	struct caam_hash_alg *caam_hash =
4452 		 container_of(alg, struct caam_hash_alg, ahash_alg);
4453 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4454 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4455 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4456 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4457 					 HASH_MSG_LEN + 32,
4458 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4459 					 HASH_MSG_LEN + 64,
4460 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4461 	dma_addr_t dma_addr;
4462 	int i;
4463 
4464 	ctx->dev = caam_hash->dev;
4465 
4466 	if (alg->setkey) {
4467 		ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4468 							  ARRAY_SIZE(ctx->key),
4469 							  DMA_TO_DEVICE,
4470 							  DMA_ATTR_SKIP_CPU_SYNC);
4471 		if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4472 			dev_err(ctx->dev, "unable to map key\n");
4473 			return -ENOMEM;
4474 		}
4475 	}
4476 
4477 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4478 					DMA_BIDIRECTIONAL,
4479 					DMA_ATTR_SKIP_CPU_SYNC);
4480 	if (dma_mapping_error(ctx->dev, dma_addr)) {
4481 		dev_err(ctx->dev, "unable to map shared descriptors\n");
4482 		if (ctx->adata.key_dma)
4483 			dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4484 					       ARRAY_SIZE(ctx->key),
4485 					       DMA_TO_DEVICE,
4486 					       DMA_ATTR_SKIP_CPU_SYNC);
4487 		return -ENOMEM;
4488 	}
4489 
4490 	for (i = 0; i < HASH_NUM_OP; i++)
4491 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4492 
4493 	/* copy descriptor header template value */
4494 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4495 
4496 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
4497 				   OP_ALG_ALGSEL_SUBMASK) >>
4498 				  OP_ALG_ALGSEL_SHIFT];
4499 
4500 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4501 				 sizeof(struct caam_hash_state));
4502 
4503 	/*
4504 	 * For keyed hash algorithms shared descriptors
4505 	 * will be created later in setkey() callback
4506 	 */
4507 	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
4508 }
4509 
4510 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4511 {
4512 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4513 
4514 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4515 			       DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4516 	if (ctx->adata.key_dma)
4517 		dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4518 				       ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4519 				       DMA_ATTR_SKIP_CPU_SYNC);
4520 }
4521 
4522 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4523 	struct caam_hash_template *template, bool keyed)
4524 {
4525 	struct caam_hash_alg *t_alg;
4526 	struct ahash_alg *halg;
4527 	struct crypto_alg *alg;
4528 
4529 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4530 	if (!t_alg)
4531 		return ERR_PTR(-ENOMEM);
4532 
4533 	t_alg->ahash_alg = template->template_ahash;
4534 	halg = &t_alg->ahash_alg;
4535 	alg = &halg->halg.base;
4536 
4537 	if (keyed) {
4538 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4539 			 template->hmac_name);
4540 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4541 			 template->hmac_driver_name);
4542 	} else {
4543 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4544 			 template->name);
4545 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4546 			 template->driver_name);
4547 		t_alg->ahash_alg.setkey = NULL;
4548 	}
4549 	alg->cra_module = THIS_MODULE;
4550 	alg->cra_init = caam_hash_cra_init;
4551 	alg->cra_exit = caam_hash_cra_exit;
4552 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4553 	alg->cra_priority = CAAM_CRA_PRIORITY;
4554 	alg->cra_blocksize = template->blocksize;
4555 	alg->cra_alignmask = 0;
4556 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4557 
4558 	t_alg->alg_type = template->alg_type;
4559 	t_alg->dev = dev;
4560 
4561 	return t_alg;
4562 }
4563 
4564 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4565 {
4566 	struct dpaa2_caam_priv_per_cpu *ppriv;
4567 
4568 	ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4569 	napi_schedule_irqoff(&ppriv->napi);
4570 }
4571 
4572 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4573 {
4574 	struct device *dev = priv->dev;
4575 	struct dpaa2_io_notification_ctx *nctx;
4576 	struct dpaa2_caam_priv_per_cpu *ppriv;
4577 	int err, i = 0, cpu;
4578 
4579 	for_each_online_cpu(cpu) {
4580 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4581 		ppriv->priv = priv;
4582 		nctx = &ppriv->nctx;
4583 		nctx->is_cdan = 0;
4584 		nctx->id = ppriv->rsp_fqid;
4585 		nctx->desired_cpu = cpu;
4586 		nctx->cb = dpaa2_caam_fqdan_cb;
4587 
4588 		/* Register notification callbacks */
4589 		ppriv->dpio = dpaa2_io_service_select(cpu);
4590 		err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4591 		if (unlikely(err)) {
4592 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4593 			nctx->cb = NULL;
4594 			/*
4595 			 * If no affine DPIO for this core, there's probably
4596 			 * none available for next cores either. Signal we want
4597 			 * to retry later, in case the DPIO devices weren't
4598 			 * probed yet.
4599 			 */
4600 			err = -EPROBE_DEFER;
4601 			goto err;
4602 		}
4603 
4604 		ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4605 						     dev);
4606 		if (unlikely(!ppriv->store)) {
4607 			dev_err(dev, "dpaa2_io_store_create() failed\n");
4608 			err = -ENOMEM;
4609 			goto err;
4610 		}
4611 
4612 		if (++i == priv->num_pairs)
4613 			break;
4614 	}
4615 
4616 	return 0;
4617 
4618 err:
4619 	for_each_online_cpu(cpu) {
4620 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4621 		if (!ppriv->nctx.cb)
4622 			break;
4623 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4624 	}
4625 
4626 	for_each_online_cpu(cpu) {
4627 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4628 		if (!ppriv->store)
4629 			break;
4630 		dpaa2_io_store_destroy(ppriv->store);
4631 	}
4632 
4633 	return err;
4634 }
4635 
4636 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4637 {
4638 	struct dpaa2_caam_priv_per_cpu *ppriv;
4639 	int i = 0, cpu;
4640 
4641 	for_each_online_cpu(cpu) {
4642 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4643 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4644 					    priv->dev);
4645 		dpaa2_io_store_destroy(ppriv->store);
4646 
4647 		if (++i == priv->num_pairs)
4648 			return;
4649 	}
4650 }
4651 
4652 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4653 {
4654 	struct dpseci_rx_queue_cfg rx_queue_cfg;
4655 	struct device *dev = priv->dev;
4656 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4657 	struct dpaa2_caam_priv_per_cpu *ppriv;
4658 	int err = 0, i = 0, cpu;
4659 
4660 	/* Configure Rx queues */
4661 	for_each_online_cpu(cpu) {
4662 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4663 
4664 		rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4665 				       DPSECI_QUEUE_OPT_USER_CTX;
4666 		rx_queue_cfg.order_preservation_en = 0;
4667 		rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4668 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4669 		/*
4670 		 * Rx priority (WQ) doesn't really matter, since we use
4671 		 * pull mode, i.e. volatile dequeues from specific FQs
4672 		 */
4673 		rx_queue_cfg.dest_cfg.priority = 0;
4674 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4675 
4676 		err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4677 					  &rx_queue_cfg);
4678 		if (err) {
4679 			dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4680 				err);
4681 			return err;
4682 		}
4683 
4684 		if (++i == priv->num_pairs)
4685 			break;
4686 	}
4687 
4688 	return err;
4689 }
4690 
4691 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4692 {
4693 	struct device *dev = priv->dev;
4694 
4695 	if (!priv->cscn_mem)
4696 		return;
4697 
4698 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4699 	kfree(priv->cscn_mem);
4700 }
4701 
4702 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4703 {
4704 	struct device *dev = priv->dev;
4705 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4706 	int err;
4707 
4708 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4709 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4710 		if (err)
4711 			dev_err(dev, "dpseci_reset() failed\n");
4712 	}
4713 
4714 	dpaa2_dpseci_congestion_free(priv);
4715 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4716 }
4717 
4718 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4719 				  const struct dpaa2_fd *fd)
4720 {
4721 	struct caam_request *req;
4722 	u32 fd_err;
4723 
4724 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4725 		dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4726 		return;
4727 	}
4728 
4729 	fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4730 	if (unlikely(fd_err))
4731 		dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4732 
4733 	/*
4734 	 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4735 	 * in FD[ERR] or FD[FRC].
4736 	 */
4737 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4738 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4739 			 DMA_BIDIRECTIONAL);
4740 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4741 }
4742 
4743 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4744 {
4745 	int err;
4746 
4747 	/* Retry while portal is busy */
4748 	do {
4749 		err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4750 					       ppriv->store);
4751 	} while (err == -EBUSY);
4752 
4753 	if (unlikely(err))
4754 		dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4755 
4756 	return err;
4757 }
4758 
4759 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4760 {
4761 	struct dpaa2_dq *dq;
4762 	int cleaned = 0, is_last;
4763 
4764 	do {
4765 		dq = dpaa2_io_store_next(ppriv->store, &is_last);
4766 		if (unlikely(!dq)) {
4767 			if (unlikely(!is_last)) {
4768 				dev_dbg(ppriv->priv->dev,
4769 					"FQ %d returned no valid frames\n",
4770 					ppriv->rsp_fqid);
4771 				/*
4772 				 * MUST retry until we get some sort of
4773 				 * valid response token (be it "empty dequeue"
4774 				 * or a valid frame).
4775 				 */
4776 				continue;
4777 			}
4778 			break;
4779 		}
4780 
4781 		/* Process FD */
4782 		dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4783 		cleaned++;
4784 	} while (!is_last);
4785 
4786 	return cleaned;
4787 }
4788 
4789 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4790 {
4791 	struct dpaa2_caam_priv_per_cpu *ppriv;
4792 	struct dpaa2_caam_priv *priv;
4793 	int err, cleaned = 0, store_cleaned;
4794 
4795 	ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4796 	priv = ppriv->priv;
4797 
4798 	if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4799 		return 0;
4800 
4801 	do {
4802 		store_cleaned = dpaa2_caam_store_consume(ppriv);
4803 		cleaned += store_cleaned;
4804 
4805 		if (store_cleaned == 0 ||
4806 		    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4807 			break;
4808 
4809 		/* Try to dequeue some more */
4810 		err = dpaa2_caam_pull_fq(ppriv);
4811 		if (unlikely(err))
4812 			break;
4813 	} while (1);
4814 
4815 	if (cleaned < budget) {
4816 		napi_complete_done(napi, cleaned);
4817 		err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4818 		if (unlikely(err))
4819 			dev_err(priv->dev, "Notification rearm failed: %d\n",
4820 				err);
4821 	}
4822 
4823 	return cleaned;
4824 }
4825 
4826 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4827 					 u16 token)
4828 {
4829 	struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4830 	struct device *dev = priv->dev;
4831 	int err;
4832 
4833 	/*
4834 	 * Congestion group feature supported starting with DPSECI API v5.1
4835 	 * and only when object has been created with this capability.
4836 	 */
4837 	if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4838 	    !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4839 		return 0;
4840 
4841 	priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4842 				 GFP_KERNEL | GFP_DMA);
4843 	if (!priv->cscn_mem)
4844 		return -ENOMEM;
4845 
4846 	priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4847 	priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4848 					DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4849 	if (dma_mapping_error(dev, priv->cscn_dma)) {
4850 		dev_err(dev, "Error mapping CSCN memory area\n");
4851 		err = -ENOMEM;
4852 		goto err_dma_map;
4853 	}
4854 
4855 	cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4856 	cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4857 	cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4858 	cong_notif_cfg.message_ctx = (uintptr_t)priv;
4859 	cong_notif_cfg.message_iova = priv->cscn_dma;
4860 	cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4861 					DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4862 					DPSECI_CGN_MODE_COHERENT_WRITE;
4863 
4864 	err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4865 						 &cong_notif_cfg);
4866 	if (err) {
4867 		dev_err(dev, "dpseci_set_congestion_notification failed\n");
4868 		goto err_set_cong;
4869 	}
4870 
4871 	return 0;
4872 
4873 err_set_cong:
4874 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4875 err_dma_map:
4876 	kfree(priv->cscn_mem);
4877 
4878 	return err;
4879 }
4880 
4881 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4882 {
4883 	struct device *dev = &ls_dev->dev;
4884 	struct dpaa2_caam_priv *priv;
4885 	struct dpaa2_caam_priv_per_cpu *ppriv;
4886 	int err, cpu;
4887 	u8 i;
4888 
4889 	priv = dev_get_drvdata(dev);
4890 
4891 	priv->dev = dev;
4892 	priv->dpsec_id = ls_dev->obj_desc.id;
4893 
4894 	/* Get a handle for the DPSECI this interface is associate with */
4895 	err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4896 	if (err) {
4897 		dev_err(dev, "dpseci_open() failed: %d\n", err);
4898 		goto err_open;
4899 	}
4900 
4901 	err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4902 				     &priv->minor_ver);
4903 	if (err) {
4904 		dev_err(dev, "dpseci_get_api_version() failed\n");
4905 		goto err_get_vers;
4906 	}
4907 
4908 	dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4909 
4910 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4911 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4912 		if (err) {
4913 			dev_err(dev, "dpseci_reset() failed\n");
4914 			goto err_get_vers;
4915 		}
4916 	}
4917 
4918 	err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4919 				    &priv->dpseci_attr);
4920 	if (err) {
4921 		dev_err(dev, "dpseci_get_attributes() failed\n");
4922 		goto err_get_vers;
4923 	}
4924 
4925 	err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4926 				  &priv->sec_attr);
4927 	if (err) {
4928 		dev_err(dev, "dpseci_get_sec_attr() failed\n");
4929 		goto err_get_vers;
4930 	}
4931 
4932 	err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4933 	if (err) {
4934 		dev_err(dev, "setup_congestion() failed\n");
4935 		goto err_get_vers;
4936 	}
4937 
4938 	priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4939 			      priv->dpseci_attr.num_tx_queues);
4940 	if (priv->num_pairs > num_online_cpus()) {
4941 		dev_warn(dev, "%d queues won't be used\n",
4942 			 priv->num_pairs - num_online_cpus());
4943 		priv->num_pairs = num_online_cpus();
4944 	}
4945 
4946 	for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4947 		err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4948 					  &priv->rx_queue_attr[i]);
4949 		if (err) {
4950 			dev_err(dev, "dpseci_get_rx_queue() failed\n");
4951 			goto err_get_rx_queue;
4952 		}
4953 	}
4954 
4955 	for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4956 		err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4957 					  &priv->tx_queue_attr[i]);
4958 		if (err) {
4959 			dev_err(dev, "dpseci_get_tx_queue() failed\n");
4960 			goto err_get_rx_queue;
4961 		}
4962 	}
4963 
4964 	i = 0;
4965 	for_each_online_cpu(cpu) {
4966 		u8 j;
4967 
4968 		j = i % priv->num_pairs;
4969 
4970 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4971 		ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
4972 
4973 		/*
4974 		 * Allow all cores to enqueue, while only some of them
4975 		 * will take part in dequeuing.
4976 		 */
4977 		if (++i > priv->num_pairs)
4978 			continue;
4979 
4980 		ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
4981 		ppriv->prio = j;
4982 
4983 		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
4984 			priv->rx_queue_attr[j].fqid,
4985 			priv->tx_queue_attr[j].fqid);
4986 
4987 		ppriv->net_dev.dev = *dev;
4988 		INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4989 		netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4990 			       DPAA2_CAAM_NAPI_WEIGHT);
4991 	}
4992 
4993 	return 0;
4994 
4995 err_get_rx_queue:
4996 	dpaa2_dpseci_congestion_free(priv);
4997 err_get_vers:
4998 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4999 err_open:
5000 	return err;
5001 }
5002 
5003 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5004 {
5005 	struct device *dev = priv->dev;
5006 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5007 	struct dpaa2_caam_priv_per_cpu *ppriv;
5008 	int i;
5009 
5010 	for (i = 0; i < priv->num_pairs; i++) {
5011 		ppriv = per_cpu_ptr(priv->ppriv, i);
5012 		napi_enable(&ppriv->napi);
5013 	}
5014 
5015 	return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5016 }
5017 
5018 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5019 {
5020 	struct device *dev = priv->dev;
5021 	struct dpaa2_caam_priv_per_cpu *ppriv;
5022 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5023 	int i, err = 0, enabled;
5024 
5025 	err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5026 	if (err) {
5027 		dev_err(dev, "dpseci_disable() failed\n");
5028 		return err;
5029 	}
5030 
5031 	err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5032 	if (err) {
5033 		dev_err(dev, "dpseci_is_enabled() failed\n");
5034 		return err;
5035 	}
5036 
5037 	dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5038 
5039 	for (i = 0; i < priv->num_pairs; i++) {
5040 		ppriv = per_cpu_ptr(priv->ppriv, i);
5041 		napi_disable(&ppriv->napi);
5042 		netif_napi_del(&ppriv->napi);
5043 	}
5044 
5045 	return 0;
5046 }
5047 
5048 static struct list_head hash_list;
5049 
5050 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5051 {
5052 	struct device *dev;
5053 	struct dpaa2_caam_priv *priv;
5054 	int i, err = 0;
5055 	bool registered = false;
5056 
5057 	/*
5058 	 * There is no way to get CAAM endianness - there is no direct register
5059 	 * space access and MC f/w does not provide this attribute.
5060 	 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5061 	 * property.
5062 	 */
5063 	caam_little_end = true;
5064 
5065 	caam_imx = false;
5066 
5067 	dev = &dpseci_dev->dev;
5068 
5069 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5070 	if (!priv)
5071 		return -ENOMEM;
5072 
5073 	dev_set_drvdata(dev, priv);
5074 
5075 	priv->domain = iommu_get_domain_for_dev(dev);
5076 
5077 	qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5078 				     0, SLAB_CACHE_DMA, NULL);
5079 	if (!qi_cache) {
5080 		dev_err(dev, "Can't allocate SEC cache\n");
5081 		return -ENOMEM;
5082 	}
5083 
5084 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5085 	if (err) {
5086 		dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5087 		goto err_dma_mask;
5088 	}
5089 
5090 	/* Obtain a MC portal */
5091 	err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5092 	if (err) {
5093 		if (err == -ENXIO)
5094 			err = -EPROBE_DEFER;
5095 		else
5096 			dev_err(dev, "MC portal allocation failed\n");
5097 
5098 		goto err_dma_mask;
5099 	}
5100 
5101 	priv->ppriv = alloc_percpu(*priv->ppriv);
5102 	if (!priv->ppriv) {
5103 		dev_err(dev, "alloc_percpu() failed\n");
5104 		err = -ENOMEM;
5105 		goto err_alloc_ppriv;
5106 	}
5107 
5108 	/* DPSECI initialization */
5109 	err = dpaa2_dpseci_setup(dpseci_dev);
5110 	if (err) {
5111 		dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5112 		goto err_dpseci_setup;
5113 	}
5114 
5115 	/* DPIO */
5116 	err = dpaa2_dpseci_dpio_setup(priv);
5117 	if (err) {
5118 		if (err != -EPROBE_DEFER)
5119 			dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
5120 		goto err_dpio_setup;
5121 	}
5122 
5123 	/* DPSECI binding to DPIO */
5124 	err = dpaa2_dpseci_bind(priv);
5125 	if (err) {
5126 		dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5127 		goto err_bind;
5128 	}
5129 
5130 	/* DPSECI enable */
5131 	err = dpaa2_dpseci_enable(priv);
5132 	if (err) {
5133 		dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5134 		goto err_bind;
5135 	}
5136 
5137 	dpaa2_dpseci_debugfs_init(priv);
5138 
5139 	/* register crypto algorithms the device supports */
5140 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5141 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5142 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5143 
5144 		/* Skip DES algorithms if not supported by device */
5145 		if (!priv->sec_attr.des_acc_num &&
5146 		    (alg_sel == OP_ALG_ALGSEL_3DES ||
5147 		     alg_sel == OP_ALG_ALGSEL_DES))
5148 			continue;
5149 
5150 		/* Skip AES algorithms if not supported by device */
5151 		if (!priv->sec_attr.aes_acc_num &&
5152 		    alg_sel == OP_ALG_ALGSEL_AES)
5153 			continue;
5154 
5155 		/* Skip CHACHA20 algorithms if not supported by device */
5156 		if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5157 		    !priv->sec_attr.ccha_acc_num)
5158 			continue;
5159 
5160 		t_alg->caam.dev = dev;
5161 		caam_skcipher_alg_init(t_alg);
5162 
5163 		err = crypto_register_skcipher(&t_alg->skcipher);
5164 		if (err) {
5165 			dev_warn(dev, "%s alg registration failed: %d\n",
5166 				 t_alg->skcipher.base.cra_driver_name, err);
5167 			continue;
5168 		}
5169 
5170 		t_alg->registered = true;
5171 		registered = true;
5172 	}
5173 
5174 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5175 		struct caam_aead_alg *t_alg = driver_aeads + i;
5176 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5177 				 OP_ALG_ALGSEL_MASK;
5178 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5179 				 OP_ALG_ALGSEL_MASK;
5180 
5181 		/* Skip DES algorithms if not supported by device */
5182 		if (!priv->sec_attr.des_acc_num &&
5183 		    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5184 		     c1_alg_sel == OP_ALG_ALGSEL_DES))
5185 			continue;
5186 
5187 		/* Skip AES algorithms if not supported by device */
5188 		if (!priv->sec_attr.aes_acc_num &&
5189 		    c1_alg_sel == OP_ALG_ALGSEL_AES)
5190 			continue;
5191 
5192 		/* Skip CHACHA20 algorithms if not supported by device */
5193 		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5194 		    !priv->sec_attr.ccha_acc_num)
5195 			continue;
5196 
5197 		/* Skip POLY1305 algorithms if not supported by device */
5198 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5199 		    !priv->sec_attr.ptha_acc_num)
5200 			continue;
5201 
5202 		/*
5203 		 * Skip algorithms requiring message digests
5204 		 * if MD not supported by device.
5205 		 */
5206 		if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5207 		    !priv->sec_attr.md_acc_num)
5208 			continue;
5209 
5210 		t_alg->caam.dev = dev;
5211 		caam_aead_alg_init(t_alg);
5212 
5213 		err = crypto_register_aead(&t_alg->aead);
5214 		if (err) {
5215 			dev_warn(dev, "%s alg registration failed: %d\n",
5216 				 t_alg->aead.base.cra_driver_name, err);
5217 			continue;
5218 		}
5219 
5220 		t_alg->registered = true;
5221 		registered = true;
5222 	}
5223 	if (registered)
5224 		dev_info(dev, "algorithms registered in /proc/crypto\n");
5225 
5226 	/* register hash algorithms the device supports */
5227 	INIT_LIST_HEAD(&hash_list);
5228 
5229 	/*
5230 	 * Skip registration of any hashing algorithms if MD block
5231 	 * is not present.
5232 	 */
5233 	if (!priv->sec_attr.md_acc_num)
5234 		return 0;
5235 
5236 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5237 		struct caam_hash_alg *t_alg;
5238 		struct caam_hash_template *alg = driver_hash + i;
5239 
5240 		/* register hmac version */
5241 		t_alg = caam_hash_alloc(dev, alg, true);
5242 		if (IS_ERR(t_alg)) {
5243 			err = PTR_ERR(t_alg);
5244 			dev_warn(dev, "%s hash alg allocation failed: %d\n",
5245 				 alg->hmac_driver_name, err);
5246 			continue;
5247 		}
5248 
5249 		err = crypto_register_ahash(&t_alg->ahash_alg);
5250 		if (err) {
5251 			dev_warn(dev, "%s alg registration failed: %d\n",
5252 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5253 				 err);
5254 			kfree(t_alg);
5255 		} else {
5256 			list_add_tail(&t_alg->entry, &hash_list);
5257 		}
5258 
5259 		/* register unkeyed version */
5260 		t_alg = caam_hash_alloc(dev, alg, false);
5261 		if (IS_ERR(t_alg)) {
5262 			err = PTR_ERR(t_alg);
5263 			dev_warn(dev, "%s alg allocation failed: %d\n",
5264 				 alg->driver_name, err);
5265 			continue;
5266 		}
5267 
5268 		err = crypto_register_ahash(&t_alg->ahash_alg);
5269 		if (err) {
5270 			dev_warn(dev, "%s alg registration failed: %d\n",
5271 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5272 				 err);
5273 			kfree(t_alg);
5274 		} else {
5275 			list_add_tail(&t_alg->entry, &hash_list);
5276 		}
5277 	}
5278 	if (!list_empty(&hash_list))
5279 		dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5280 
5281 	return err;
5282 
5283 err_bind:
5284 	dpaa2_dpseci_dpio_free(priv);
5285 err_dpio_setup:
5286 	dpaa2_dpseci_free(priv);
5287 err_dpseci_setup:
5288 	free_percpu(priv->ppriv);
5289 err_alloc_ppriv:
5290 	fsl_mc_portal_free(priv->mc_io);
5291 err_dma_mask:
5292 	kmem_cache_destroy(qi_cache);
5293 
5294 	return err;
5295 }
5296 
5297 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5298 {
5299 	struct device *dev;
5300 	struct dpaa2_caam_priv *priv;
5301 	int i;
5302 
5303 	dev = &ls_dev->dev;
5304 	priv = dev_get_drvdata(dev);
5305 
5306 	dpaa2_dpseci_debugfs_exit(priv);
5307 
5308 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5309 		struct caam_aead_alg *t_alg = driver_aeads + i;
5310 
5311 		if (t_alg->registered)
5312 			crypto_unregister_aead(&t_alg->aead);
5313 	}
5314 
5315 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5316 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5317 
5318 		if (t_alg->registered)
5319 			crypto_unregister_skcipher(&t_alg->skcipher);
5320 	}
5321 
5322 	if (hash_list.next) {
5323 		struct caam_hash_alg *t_hash_alg, *p;
5324 
5325 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5326 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5327 			list_del(&t_hash_alg->entry);
5328 			kfree(t_hash_alg);
5329 		}
5330 	}
5331 
5332 	dpaa2_dpseci_disable(priv);
5333 	dpaa2_dpseci_dpio_free(priv);
5334 	dpaa2_dpseci_free(priv);
5335 	free_percpu(priv->ppriv);
5336 	fsl_mc_portal_free(priv->mc_io);
5337 	kmem_cache_destroy(qi_cache);
5338 
5339 	return 0;
5340 }
5341 
5342 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5343 {
5344 	struct dpaa2_fd fd;
5345 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5346 	struct dpaa2_caam_priv_per_cpu *ppriv;
5347 	int err = 0, i;
5348 
5349 	if (IS_ERR(req))
5350 		return PTR_ERR(req);
5351 
5352 	if (priv->cscn_mem) {
5353 		dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5354 					DPAA2_CSCN_SIZE,
5355 					DMA_FROM_DEVICE);
5356 		if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5357 			dev_dbg_ratelimited(dev, "Dropping request\n");
5358 			return -EBUSY;
5359 		}
5360 	}
5361 
5362 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5363 
5364 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5365 					 DMA_BIDIRECTIONAL);
5366 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
5367 		dev_err(dev, "DMA mapping error for QI enqueue request\n");
5368 		goto err_out;
5369 	}
5370 
5371 	memset(&fd, 0, sizeof(fd));
5372 	dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5373 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5374 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5375 	dpaa2_fd_set_flc(&fd, req->flc_dma);
5376 
5377 	ppriv = this_cpu_ptr(priv->ppriv);
5378 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5379 		err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5380 						  &fd);
5381 		if (err != -EBUSY)
5382 			break;
5383 
5384 		cpu_relax();
5385 	}
5386 
5387 	if (unlikely(err)) {
5388 		dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5389 		goto err_out;
5390 	}
5391 
5392 	return -EINPROGRESS;
5393 
5394 err_out:
5395 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5396 			 DMA_BIDIRECTIONAL);
5397 	return -EIO;
5398 }
5399 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5400 
5401 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5402 	{
5403 		.vendor = FSL_MC_VENDOR_FREESCALE,
5404 		.obj_type = "dpseci",
5405 	},
5406 	{ .vendor = 0x0 }
5407 };
5408 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5409 
5410 static struct fsl_mc_driver dpaa2_caam_driver = {
5411 	.driver = {
5412 		.name		= KBUILD_MODNAME,
5413 		.owner		= THIS_MODULE,
5414 	},
5415 	.probe		= dpaa2_caam_probe,
5416 	.remove		= dpaa2_caam_remove,
5417 	.match_id_table = dpaa2_caam_match_id_table
5418 };
5419 
5420 MODULE_LICENSE("Dual BSD/GPL");
5421 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5422 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5423 
5424 module_fsl_mc_driver(dpaa2_caam_driver);
5425