1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2019 NXP
5  */
6 
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include <linux/fsl/mc.h>
19 #include <soc/fsl/dpaa2-io.h>
20 #include <soc/fsl/dpaa2-fd.h>
21 
22 #define CAAM_CRA_PRIORITY	2000
23 
24 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
25 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
26 				 SHA512_DIGEST_SIZE * 2)
27 
28 /*
29  * This is a a cache of buffers, from which the users of CAAM QI driver
30  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
31  * NOTE: A more elegant solution would be to have some headroom in the frames
32  *       being processed. This can be added by the dpaa2-eth driver. This would
33  *       pose a problem for userspace application processing which cannot
34  *       know of this limitation. So for now, this will work.
35  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
36  */
37 static struct kmem_cache *qi_cache;
38 
39 struct caam_alg_entry {
40 	struct device *dev;
41 	int class1_alg_type;
42 	int class2_alg_type;
43 	bool rfc3686;
44 	bool geniv;
45 	bool nodkp;
46 };
47 
48 struct caam_aead_alg {
49 	struct aead_alg aead;
50 	struct caam_alg_entry caam;
51 	bool registered;
52 };
53 
54 struct caam_skcipher_alg {
55 	struct skcipher_alg skcipher;
56 	struct caam_alg_entry caam;
57 	bool registered;
58 };
59 
60 /**
61  * caam_ctx - per-session context
62  * @flc: Flow Contexts array
63  * @key:  [authentication key], encryption key
64  * @flc_dma: I/O virtual addresses of the Flow Contexts
65  * @key_dma: I/O virtual address of the key
66  * @dir: DMA direction for mapping key and Flow Contexts
67  * @dev: dpseci device
68  * @adata: authentication algorithm details
69  * @cdata: encryption algorithm details
70  * @authsize: authentication tag (a.k.a. ICV / MAC) size
71  */
72 struct caam_ctx {
73 	struct caam_flc flc[NUM_OP];
74 	u8 key[CAAM_MAX_KEY_SIZE];
75 	dma_addr_t flc_dma[NUM_OP];
76 	dma_addr_t key_dma;
77 	enum dma_data_direction dir;
78 	struct device *dev;
79 	struct alginfo adata;
80 	struct alginfo cdata;
81 	unsigned int authsize;
82 };
83 
84 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
85 				     dma_addr_t iova_addr)
86 {
87 	phys_addr_t phys_addr;
88 
89 	phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
90 				   iova_addr;
91 
92 	return phys_to_virt(phys_addr);
93 }
94 
95 /*
96  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
97  *
98  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
99  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
100  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
101  * hosting 16 SG entries.
102  *
103  * @flags - flags that would be used for the equivalent kmalloc(..) call
104  *
105  * Returns a pointer to a retrieved buffer on success or NULL on failure.
106  */
107 static inline void *qi_cache_zalloc(gfp_t flags)
108 {
109 	return kmem_cache_zalloc(qi_cache, flags);
110 }
111 
112 /*
113  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
114  *
115  * @obj - buffer previously allocated by qi_cache_zalloc
116  *
117  * No checking is being done, the call is a passthrough call to
118  * kmem_cache_free(...)
119  */
120 static inline void qi_cache_free(void *obj)
121 {
122 	kmem_cache_free(qi_cache, obj);
123 }
124 
125 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
126 {
127 	switch (crypto_tfm_alg_type(areq->tfm)) {
128 	case CRYPTO_ALG_TYPE_SKCIPHER:
129 		return skcipher_request_ctx(skcipher_request_cast(areq));
130 	case CRYPTO_ALG_TYPE_AEAD:
131 		return aead_request_ctx(container_of(areq, struct aead_request,
132 						     base));
133 	case CRYPTO_ALG_TYPE_AHASH:
134 		return ahash_request_ctx(ahash_request_cast(areq));
135 	default:
136 		return ERR_PTR(-EINVAL);
137 	}
138 }
139 
140 static void caam_unmap(struct device *dev, struct scatterlist *src,
141 		       struct scatterlist *dst, int src_nents,
142 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
143 		       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
144 		       int qm_sg_bytes)
145 {
146 	if (dst != src) {
147 		if (src_nents)
148 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
149 		if (dst_nents)
150 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
151 	} else {
152 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
153 	}
154 
155 	if (iv_dma)
156 		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
157 
158 	if (qm_sg_bytes)
159 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
160 }
161 
162 static int aead_set_sh_desc(struct crypto_aead *aead)
163 {
164 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
165 						 typeof(*alg), aead);
166 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
167 	unsigned int ivsize = crypto_aead_ivsize(aead);
168 	struct device *dev = ctx->dev;
169 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
170 	struct caam_flc *flc;
171 	u32 *desc;
172 	u32 ctx1_iv_off = 0;
173 	u32 *nonce = NULL;
174 	unsigned int data_len[2];
175 	u32 inl_mask;
176 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
177 			       OP_ALG_AAI_CTR_MOD128);
178 	const bool is_rfc3686 = alg->caam.rfc3686;
179 
180 	if (!ctx->cdata.keylen || !ctx->authsize)
181 		return 0;
182 
183 	/*
184 	 * AES-CTR needs to load IV in CONTEXT1 reg
185 	 * at an offset of 128bits (16bytes)
186 	 * CONTEXT1[255:128] = IV
187 	 */
188 	if (ctr_mode)
189 		ctx1_iv_off = 16;
190 
191 	/*
192 	 * RFC3686 specific:
193 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
194 	 */
195 	if (is_rfc3686) {
196 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
197 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
198 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
199 	}
200 
201 	data_len[0] = ctx->adata.keylen_pad;
202 	data_len[1] = ctx->cdata.keylen;
203 
204 	/* aead_encrypt shared descriptor */
205 	if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
206 						 DESC_QI_AEAD_ENC_LEN) +
207 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
208 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
209 			      ARRAY_SIZE(data_len)) < 0)
210 		return -EINVAL;
211 
212 	if (inl_mask & 1)
213 		ctx->adata.key_virt = ctx->key;
214 	else
215 		ctx->adata.key_dma = ctx->key_dma;
216 
217 	if (inl_mask & 2)
218 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
219 	else
220 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
221 
222 	ctx->adata.key_inline = !!(inl_mask & 1);
223 	ctx->cdata.key_inline = !!(inl_mask & 2);
224 
225 	flc = &ctx->flc[ENCRYPT];
226 	desc = flc->sh_desc;
227 
228 	if (alg->caam.geniv)
229 		cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
230 					  ivsize, ctx->authsize, is_rfc3686,
231 					  nonce, ctx1_iv_off, true,
232 					  priv->sec_attr.era);
233 	else
234 		cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
235 				       ivsize, ctx->authsize, is_rfc3686, nonce,
236 				       ctx1_iv_off, true, priv->sec_attr.era);
237 
238 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
239 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
240 				   sizeof(flc->flc) + desc_bytes(desc),
241 				   ctx->dir);
242 
243 	/* aead_decrypt shared descriptor */
244 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
245 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
246 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
247 			      ARRAY_SIZE(data_len)) < 0)
248 		return -EINVAL;
249 
250 	if (inl_mask & 1)
251 		ctx->adata.key_virt = ctx->key;
252 	else
253 		ctx->adata.key_dma = ctx->key_dma;
254 
255 	if (inl_mask & 2)
256 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
257 	else
258 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
259 
260 	ctx->adata.key_inline = !!(inl_mask & 1);
261 	ctx->cdata.key_inline = !!(inl_mask & 2);
262 
263 	flc = &ctx->flc[DECRYPT];
264 	desc = flc->sh_desc;
265 	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
266 			       ivsize, ctx->authsize, alg->caam.geniv,
267 			       is_rfc3686, nonce, ctx1_iv_off, true,
268 			       priv->sec_attr.era);
269 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
270 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
271 				   sizeof(flc->flc) + desc_bytes(desc),
272 				   ctx->dir);
273 
274 	return 0;
275 }
276 
277 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
278 {
279 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
280 
281 	ctx->authsize = authsize;
282 	aead_set_sh_desc(authenc);
283 
284 	return 0;
285 }
286 
287 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
288 		       unsigned int keylen)
289 {
290 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
291 	struct device *dev = ctx->dev;
292 	struct crypto_authenc_keys keys;
293 
294 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
295 		goto badkey;
296 
297 	dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
298 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
299 		keys.authkeylen);
300 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
301 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
302 
303 	ctx->adata.keylen = keys.authkeylen;
304 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
305 					      OP_ALG_ALGSEL_MASK);
306 
307 	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
308 		goto badkey;
309 
310 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
311 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
312 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
313 				   keys.enckeylen, ctx->dir);
314 	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
315 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
316 			     ctx->adata.keylen_pad + keys.enckeylen, 1);
317 
318 	ctx->cdata.keylen = keys.enckeylen;
319 
320 	memzero_explicit(&keys, sizeof(keys));
321 	return aead_set_sh_desc(aead);
322 badkey:
323 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
324 	memzero_explicit(&keys, sizeof(keys));
325 	return -EINVAL;
326 }
327 
328 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
329 			    unsigned int keylen)
330 {
331 	struct crypto_authenc_keys keys;
332 	u32 flags;
333 	int err;
334 
335 	err = crypto_authenc_extractkeys(&keys, key, keylen);
336 	if (unlikely(err))
337 		goto badkey;
338 
339 	err = -EINVAL;
340 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
341 		goto badkey;
342 
343 	flags = crypto_aead_get_flags(aead);
344 	err = __des3_verify_key(&flags, keys.enckey);
345 	if (unlikely(err)) {
346 		crypto_aead_set_flags(aead, flags);
347 		goto out;
348 	}
349 
350 	err = aead_setkey(aead, key, keylen);
351 
352 out:
353 	memzero_explicit(&keys, sizeof(keys));
354 	return err;
355 
356 badkey:
357 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
358 	goto out;
359 }
360 
361 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
362 					   bool encrypt)
363 {
364 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
365 	struct caam_request *req_ctx = aead_request_ctx(req);
366 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
367 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
368 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
369 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
370 						 typeof(*alg), aead);
371 	struct device *dev = ctx->dev;
372 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
373 		      GFP_KERNEL : GFP_ATOMIC;
374 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
375 	int src_len, dst_len = 0;
376 	struct aead_edesc *edesc;
377 	dma_addr_t qm_sg_dma, iv_dma = 0;
378 	int ivsize = 0;
379 	unsigned int authsize = ctx->authsize;
380 	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
381 	int in_len, out_len;
382 	struct dpaa2_sg_entry *sg_table;
383 
384 	/* allocate space for base edesc, link tables and IV */
385 	edesc = qi_cache_zalloc(GFP_DMA | flags);
386 	if (unlikely(!edesc)) {
387 		dev_err(dev, "could not allocate extended descriptor\n");
388 		return ERR_PTR(-ENOMEM);
389 	}
390 
391 	if (unlikely(req->dst != req->src)) {
392 		src_len = req->assoclen + req->cryptlen;
393 		dst_len = src_len + (encrypt ? authsize : (-authsize));
394 
395 		src_nents = sg_nents_for_len(req->src, src_len);
396 		if (unlikely(src_nents < 0)) {
397 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
398 				src_len);
399 			qi_cache_free(edesc);
400 			return ERR_PTR(src_nents);
401 		}
402 
403 		dst_nents = sg_nents_for_len(req->dst, dst_len);
404 		if (unlikely(dst_nents < 0)) {
405 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
406 				dst_len);
407 			qi_cache_free(edesc);
408 			return ERR_PTR(dst_nents);
409 		}
410 
411 		if (src_nents) {
412 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
413 						      DMA_TO_DEVICE);
414 			if (unlikely(!mapped_src_nents)) {
415 				dev_err(dev, "unable to map source\n");
416 				qi_cache_free(edesc);
417 				return ERR_PTR(-ENOMEM);
418 			}
419 		} else {
420 			mapped_src_nents = 0;
421 		}
422 
423 		if (dst_nents) {
424 			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
425 						      DMA_FROM_DEVICE);
426 			if (unlikely(!mapped_dst_nents)) {
427 				dev_err(dev, "unable to map destination\n");
428 				dma_unmap_sg(dev, req->src, src_nents,
429 					     DMA_TO_DEVICE);
430 				qi_cache_free(edesc);
431 				return ERR_PTR(-ENOMEM);
432 			}
433 		} else {
434 			mapped_dst_nents = 0;
435 		}
436 	} else {
437 		src_len = req->assoclen + req->cryptlen +
438 			  (encrypt ? authsize : 0);
439 
440 		src_nents = sg_nents_for_len(req->src, src_len);
441 		if (unlikely(src_nents < 0)) {
442 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
443 				src_len);
444 			qi_cache_free(edesc);
445 			return ERR_PTR(src_nents);
446 		}
447 
448 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
449 					      DMA_BIDIRECTIONAL);
450 		if (unlikely(!mapped_src_nents)) {
451 			dev_err(dev, "unable to map source\n");
452 			qi_cache_free(edesc);
453 			return ERR_PTR(-ENOMEM);
454 		}
455 	}
456 
457 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
458 		ivsize = crypto_aead_ivsize(aead);
459 
460 	/*
461 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
462 	 * Input is not contiguous.
463 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
464 	 * the end of the table by allocating more S/G entries. Logic:
465 	 * if (src != dst && output S/G)
466 	 *      pad output S/G, if needed
467 	 * else if (src == dst && S/G)
468 	 *      overlapping S/Gs; pad one of them
469 	 * else if (input S/G) ...
470 	 *      pad input S/G, if needed
471 	 */
472 	qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
473 	if (mapped_dst_nents > 1)
474 		qm_sg_nents += pad_sg_nents(mapped_dst_nents);
475 	else if ((req->src == req->dst) && (mapped_src_nents > 1))
476 		qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
477 				  1 + !!ivsize +
478 				  pad_sg_nents(mapped_src_nents));
479 	else
480 		qm_sg_nents = pad_sg_nents(qm_sg_nents);
481 
482 	sg_table = &edesc->sgt[0];
483 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
484 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
485 		     CAAM_QI_MEMCACHE_SIZE)) {
486 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
487 			qm_sg_nents, ivsize);
488 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
489 			   0, DMA_NONE, 0, 0);
490 		qi_cache_free(edesc);
491 		return ERR_PTR(-ENOMEM);
492 	}
493 
494 	if (ivsize) {
495 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
496 
497 		/* Make sure IV is located in a DMAable area */
498 		memcpy(iv, req->iv, ivsize);
499 
500 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
501 		if (dma_mapping_error(dev, iv_dma)) {
502 			dev_err(dev, "unable to map IV\n");
503 			caam_unmap(dev, req->src, req->dst, src_nents,
504 				   dst_nents, 0, 0, DMA_NONE, 0, 0);
505 			qi_cache_free(edesc);
506 			return ERR_PTR(-ENOMEM);
507 		}
508 	}
509 
510 	edesc->src_nents = src_nents;
511 	edesc->dst_nents = dst_nents;
512 	edesc->iv_dma = iv_dma;
513 
514 	if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
515 	    OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
516 		/*
517 		 * The associated data comes already with the IV but we need
518 		 * to skip it when we authenticate or encrypt...
519 		 */
520 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
521 	else
522 		edesc->assoclen = cpu_to_caam32(req->assoclen);
523 	edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
524 					     DMA_TO_DEVICE);
525 	if (dma_mapping_error(dev, edesc->assoclen_dma)) {
526 		dev_err(dev, "unable to map assoclen\n");
527 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
528 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
529 		qi_cache_free(edesc);
530 		return ERR_PTR(-ENOMEM);
531 	}
532 
533 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
534 	qm_sg_index++;
535 	if (ivsize) {
536 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
537 		qm_sg_index++;
538 	}
539 	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
540 	qm_sg_index += mapped_src_nents;
541 
542 	if (mapped_dst_nents > 1)
543 		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
544 
545 	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
546 	if (dma_mapping_error(dev, qm_sg_dma)) {
547 		dev_err(dev, "unable to map S/G table\n");
548 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
549 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
550 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
551 		qi_cache_free(edesc);
552 		return ERR_PTR(-ENOMEM);
553 	}
554 
555 	edesc->qm_sg_dma = qm_sg_dma;
556 	edesc->qm_sg_bytes = qm_sg_bytes;
557 
558 	out_len = req->assoclen + req->cryptlen +
559 		  (encrypt ? ctx->authsize : (-ctx->authsize));
560 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
561 
562 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
563 	dpaa2_fl_set_final(in_fle, true);
564 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
565 	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
566 	dpaa2_fl_set_len(in_fle, in_len);
567 
568 	if (req->dst == req->src) {
569 		if (mapped_src_nents == 1) {
570 			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
571 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
572 		} else {
573 			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
574 			dpaa2_fl_set_addr(out_fle, qm_sg_dma +
575 					  (1 + !!ivsize) * sizeof(*sg_table));
576 		}
577 	} else if (!mapped_dst_nents) {
578 		/*
579 		 * crypto engine requires the output entry to be present when
580 		 * "frame list" FD is used.
581 		 * Since engine does not support FMT=2'b11 (unused entry type),
582 		 * leaving out_fle zeroized is the best option.
583 		 */
584 		goto skip_out_fle;
585 	} else if (mapped_dst_nents == 1) {
586 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
587 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
588 	} else {
589 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
590 		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
591 				  sizeof(*sg_table));
592 	}
593 
594 	dpaa2_fl_set_len(out_fle, out_len);
595 
596 skip_out_fle:
597 	return edesc;
598 }
599 
600 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
601 {
602 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
603 	unsigned int ivsize = crypto_aead_ivsize(aead);
604 	struct device *dev = ctx->dev;
605 	struct caam_flc *flc;
606 	u32 *desc;
607 
608 	if (!ctx->cdata.keylen || !ctx->authsize)
609 		return 0;
610 
611 	flc = &ctx->flc[ENCRYPT];
612 	desc = flc->sh_desc;
613 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
614 			       ctx->authsize, true, true);
615 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
616 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
617 				   sizeof(flc->flc) + desc_bytes(desc),
618 				   ctx->dir);
619 
620 	flc = &ctx->flc[DECRYPT];
621 	desc = flc->sh_desc;
622 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
623 			       ctx->authsize, false, true);
624 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
625 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
626 				   sizeof(flc->flc) + desc_bytes(desc),
627 				   ctx->dir);
628 
629 	return 0;
630 }
631 
632 static int chachapoly_setauthsize(struct crypto_aead *aead,
633 				  unsigned int authsize)
634 {
635 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
636 
637 	if (authsize != POLY1305_DIGEST_SIZE)
638 		return -EINVAL;
639 
640 	ctx->authsize = authsize;
641 	return chachapoly_set_sh_desc(aead);
642 }
643 
644 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
645 			     unsigned int keylen)
646 {
647 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
648 	unsigned int ivsize = crypto_aead_ivsize(aead);
649 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
650 
651 	if (keylen != CHACHA_KEY_SIZE + saltlen) {
652 		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
653 		return -EINVAL;
654 	}
655 
656 	ctx->cdata.key_virt = key;
657 	ctx->cdata.keylen = keylen - saltlen;
658 
659 	return chachapoly_set_sh_desc(aead);
660 }
661 
662 static int gcm_set_sh_desc(struct crypto_aead *aead)
663 {
664 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
665 	struct device *dev = ctx->dev;
666 	unsigned int ivsize = crypto_aead_ivsize(aead);
667 	struct caam_flc *flc;
668 	u32 *desc;
669 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
670 			ctx->cdata.keylen;
671 
672 	if (!ctx->cdata.keylen || !ctx->authsize)
673 		return 0;
674 
675 	/*
676 	 * AES GCM encrypt shared descriptor
677 	 * Job Descriptor and Shared Descriptor
678 	 * must fit into the 64-word Descriptor h/w Buffer
679 	 */
680 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
681 		ctx->cdata.key_inline = true;
682 		ctx->cdata.key_virt = ctx->key;
683 	} else {
684 		ctx->cdata.key_inline = false;
685 		ctx->cdata.key_dma = ctx->key_dma;
686 	}
687 
688 	flc = &ctx->flc[ENCRYPT];
689 	desc = flc->sh_desc;
690 	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
691 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
692 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
693 				   sizeof(flc->flc) + desc_bytes(desc),
694 				   ctx->dir);
695 
696 	/*
697 	 * Job Descriptor and Shared Descriptors
698 	 * must all fit into the 64-word Descriptor h/w Buffer
699 	 */
700 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
701 		ctx->cdata.key_inline = true;
702 		ctx->cdata.key_virt = ctx->key;
703 	} else {
704 		ctx->cdata.key_inline = false;
705 		ctx->cdata.key_dma = ctx->key_dma;
706 	}
707 
708 	flc = &ctx->flc[DECRYPT];
709 	desc = flc->sh_desc;
710 	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
711 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
712 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
713 				   sizeof(flc->flc) + desc_bytes(desc),
714 				   ctx->dir);
715 
716 	return 0;
717 }
718 
719 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
720 {
721 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
722 
723 	ctx->authsize = authsize;
724 	gcm_set_sh_desc(authenc);
725 
726 	return 0;
727 }
728 
729 static int gcm_setkey(struct crypto_aead *aead,
730 		      const u8 *key, unsigned int keylen)
731 {
732 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
733 	struct device *dev = ctx->dev;
734 
735 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
736 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
737 
738 	memcpy(ctx->key, key, keylen);
739 	dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
740 	ctx->cdata.keylen = keylen;
741 
742 	return gcm_set_sh_desc(aead);
743 }
744 
745 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
746 {
747 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
748 	struct device *dev = ctx->dev;
749 	unsigned int ivsize = crypto_aead_ivsize(aead);
750 	struct caam_flc *flc;
751 	u32 *desc;
752 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
753 			ctx->cdata.keylen;
754 
755 	if (!ctx->cdata.keylen || !ctx->authsize)
756 		return 0;
757 
758 	ctx->cdata.key_virt = ctx->key;
759 
760 	/*
761 	 * RFC4106 encrypt shared descriptor
762 	 * Job Descriptor and Shared Descriptor
763 	 * must fit into the 64-word Descriptor h/w Buffer
764 	 */
765 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
766 		ctx->cdata.key_inline = true;
767 	} else {
768 		ctx->cdata.key_inline = false;
769 		ctx->cdata.key_dma = ctx->key_dma;
770 	}
771 
772 	flc = &ctx->flc[ENCRYPT];
773 	desc = flc->sh_desc;
774 	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
775 				  true);
776 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
777 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
778 				   sizeof(flc->flc) + desc_bytes(desc),
779 				   ctx->dir);
780 
781 	/*
782 	 * Job Descriptor and Shared Descriptors
783 	 * must all fit into the 64-word Descriptor h/w Buffer
784 	 */
785 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
786 		ctx->cdata.key_inline = true;
787 	} else {
788 		ctx->cdata.key_inline = false;
789 		ctx->cdata.key_dma = ctx->key_dma;
790 	}
791 
792 	flc = &ctx->flc[DECRYPT];
793 	desc = flc->sh_desc;
794 	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
795 				  true);
796 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
797 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
798 				   sizeof(flc->flc) + desc_bytes(desc),
799 				   ctx->dir);
800 
801 	return 0;
802 }
803 
804 static int rfc4106_setauthsize(struct crypto_aead *authenc,
805 			       unsigned int authsize)
806 {
807 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
808 
809 	ctx->authsize = authsize;
810 	rfc4106_set_sh_desc(authenc);
811 
812 	return 0;
813 }
814 
815 static int rfc4106_setkey(struct crypto_aead *aead,
816 			  const u8 *key, unsigned int keylen)
817 {
818 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
819 	struct device *dev = ctx->dev;
820 
821 	if (keylen < 4)
822 		return -EINVAL;
823 
824 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
825 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
826 
827 	memcpy(ctx->key, key, keylen);
828 	/*
829 	 * The last four bytes of the key material are used as the salt value
830 	 * in the nonce. Update the AES key length.
831 	 */
832 	ctx->cdata.keylen = keylen - 4;
833 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
834 				   ctx->dir);
835 
836 	return rfc4106_set_sh_desc(aead);
837 }
838 
839 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
840 {
841 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
842 	struct device *dev = ctx->dev;
843 	unsigned int ivsize = crypto_aead_ivsize(aead);
844 	struct caam_flc *flc;
845 	u32 *desc;
846 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
847 			ctx->cdata.keylen;
848 
849 	if (!ctx->cdata.keylen || !ctx->authsize)
850 		return 0;
851 
852 	ctx->cdata.key_virt = ctx->key;
853 
854 	/*
855 	 * RFC4543 encrypt shared descriptor
856 	 * Job Descriptor and Shared Descriptor
857 	 * must fit into the 64-word Descriptor h/w Buffer
858 	 */
859 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
860 		ctx->cdata.key_inline = true;
861 	} else {
862 		ctx->cdata.key_inline = false;
863 		ctx->cdata.key_dma = ctx->key_dma;
864 	}
865 
866 	flc = &ctx->flc[ENCRYPT];
867 	desc = flc->sh_desc;
868 	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
869 				  true);
870 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
871 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
872 				   sizeof(flc->flc) + desc_bytes(desc),
873 				   ctx->dir);
874 
875 	/*
876 	 * Job Descriptor and Shared Descriptors
877 	 * must all fit into the 64-word Descriptor h/w Buffer
878 	 */
879 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
880 		ctx->cdata.key_inline = true;
881 	} else {
882 		ctx->cdata.key_inline = false;
883 		ctx->cdata.key_dma = ctx->key_dma;
884 	}
885 
886 	flc = &ctx->flc[DECRYPT];
887 	desc = flc->sh_desc;
888 	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
889 				  true);
890 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
891 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
892 				   sizeof(flc->flc) + desc_bytes(desc),
893 				   ctx->dir);
894 
895 	return 0;
896 }
897 
898 static int rfc4543_setauthsize(struct crypto_aead *authenc,
899 			       unsigned int authsize)
900 {
901 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
902 
903 	ctx->authsize = authsize;
904 	rfc4543_set_sh_desc(authenc);
905 
906 	return 0;
907 }
908 
909 static int rfc4543_setkey(struct crypto_aead *aead,
910 			  const u8 *key, unsigned int keylen)
911 {
912 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
913 	struct device *dev = ctx->dev;
914 
915 	if (keylen < 4)
916 		return -EINVAL;
917 
918 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
919 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
920 
921 	memcpy(ctx->key, key, keylen);
922 	/*
923 	 * The last four bytes of the key material are used as the salt value
924 	 * in the nonce. Update the AES key length.
925 	 */
926 	ctx->cdata.keylen = keylen - 4;
927 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
928 				   ctx->dir);
929 
930 	return rfc4543_set_sh_desc(aead);
931 }
932 
933 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
934 			   unsigned int keylen)
935 {
936 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
937 	struct caam_skcipher_alg *alg =
938 		container_of(crypto_skcipher_alg(skcipher),
939 			     struct caam_skcipher_alg, skcipher);
940 	struct device *dev = ctx->dev;
941 	struct caam_flc *flc;
942 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
943 	u32 *desc;
944 	u32 ctx1_iv_off = 0;
945 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
946 			       OP_ALG_AAI_CTR_MOD128) &&
947 			       ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
948 			       OP_ALG_ALGSEL_CHACHA20);
949 	const bool is_rfc3686 = alg->caam.rfc3686;
950 
951 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
952 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
953 
954 	/*
955 	 * AES-CTR needs to load IV in CONTEXT1 reg
956 	 * at an offset of 128bits (16bytes)
957 	 * CONTEXT1[255:128] = IV
958 	 */
959 	if (ctr_mode)
960 		ctx1_iv_off = 16;
961 
962 	/*
963 	 * RFC3686 specific:
964 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
965 	 *	| *key = {KEY, NONCE}
966 	 */
967 	if (is_rfc3686) {
968 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
969 		keylen -= CTR_RFC3686_NONCE_SIZE;
970 	}
971 
972 	ctx->cdata.keylen = keylen;
973 	ctx->cdata.key_virt = key;
974 	ctx->cdata.key_inline = true;
975 
976 	/* skcipher_encrypt shared descriptor */
977 	flc = &ctx->flc[ENCRYPT];
978 	desc = flc->sh_desc;
979 	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
980 				   ctx1_iv_off);
981 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
982 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
983 				   sizeof(flc->flc) + desc_bytes(desc),
984 				   ctx->dir);
985 
986 	/* skcipher_decrypt shared descriptor */
987 	flc = &ctx->flc[DECRYPT];
988 	desc = flc->sh_desc;
989 	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
990 				   ctx1_iv_off);
991 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
992 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
993 				   sizeof(flc->flc) + desc_bytes(desc),
994 				   ctx->dir);
995 
996 	return 0;
997 }
998 
999 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1000 				const u8 *key, unsigned int keylen)
1001 {
1002 	return unlikely(des3_verify_key(skcipher, key)) ?:
1003 	       skcipher_setkey(skcipher, key, keylen);
1004 }
1005 
1006 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1007 			       unsigned int keylen)
1008 {
1009 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1010 	struct device *dev = ctx->dev;
1011 	struct caam_flc *flc;
1012 	u32 *desc;
1013 
1014 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
1015 		dev_err(dev, "key size mismatch\n");
1016 		crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1017 		return -EINVAL;
1018 	}
1019 
1020 	ctx->cdata.keylen = keylen;
1021 	ctx->cdata.key_virt = key;
1022 	ctx->cdata.key_inline = true;
1023 
1024 	/* xts_skcipher_encrypt shared descriptor */
1025 	flc = &ctx->flc[ENCRYPT];
1026 	desc = flc->sh_desc;
1027 	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1028 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1029 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1030 				   sizeof(flc->flc) + desc_bytes(desc),
1031 				   ctx->dir);
1032 
1033 	/* xts_skcipher_decrypt shared descriptor */
1034 	flc = &ctx->flc[DECRYPT];
1035 	desc = flc->sh_desc;
1036 	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1037 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1038 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1039 				   sizeof(flc->flc) + desc_bytes(desc),
1040 				   ctx->dir);
1041 
1042 	return 0;
1043 }
1044 
1045 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1046 {
1047 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1048 	struct caam_request *req_ctx = skcipher_request_ctx(req);
1049 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1050 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1051 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1052 	struct device *dev = ctx->dev;
1053 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1054 		       GFP_KERNEL : GFP_ATOMIC;
1055 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1056 	struct skcipher_edesc *edesc;
1057 	dma_addr_t iv_dma;
1058 	u8 *iv;
1059 	int ivsize = crypto_skcipher_ivsize(skcipher);
1060 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1061 	struct dpaa2_sg_entry *sg_table;
1062 
1063 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1064 	if (unlikely(src_nents < 0)) {
1065 		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1066 			req->cryptlen);
1067 		return ERR_PTR(src_nents);
1068 	}
1069 
1070 	if (unlikely(req->dst != req->src)) {
1071 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1072 		if (unlikely(dst_nents < 0)) {
1073 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1074 				req->cryptlen);
1075 			return ERR_PTR(dst_nents);
1076 		}
1077 
1078 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1079 					      DMA_TO_DEVICE);
1080 		if (unlikely(!mapped_src_nents)) {
1081 			dev_err(dev, "unable to map source\n");
1082 			return ERR_PTR(-ENOMEM);
1083 		}
1084 
1085 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1086 					      DMA_FROM_DEVICE);
1087 		if (unlikely(!mapped_dst_nents)) {
1088 			dev_err(dev, "unable to map destination\n");
1089 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1090 			return ERR_PTR(-ENOMEM);
1091 		}
1092 	} else {
1093 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1094 					      DMA_BIDIRECTIONAL);
1095 		if (unlikely(!mapped_src_nents)) {
1096 			dev_err(dev, "unable to map source\n");
1097 			return ERR_PTR(-ENOMEM);
1098 		}
1099 	}
1100 
1101 	qm_sg_ents = 1 + mapped_src_nents;
1102 	dst_sg_idx = qm_sg_ents;
1103 
1104 	/*
1105 	 * Input, output HW S/G tables: [IV, src][dst, IV]
1106 	 * IV entries point to the same buffer
1107 	 * If src == dst, S/G entries are reused (S/G tables overlap)
1108 	 *
1109 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1110 	 * the end of the table by allocating more S/G entries.
1111 	 */
1112 	if (req->src != req->dst)
1113 		qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1114 	else
1115 		qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1116 
1117 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1118 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1119 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1120 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1121 			qm_sg_ents, ivsize);
1122 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1123 			   0, DMA_NONE, 0, 0);
1124 		return ERR_PTR(-ENOMEM);
1125 	}
1126 
1127 	/* allocate space for base edesc, link tables and IV */
1128 	edesc = qi_cache_zalloc(GFP_DMA | flags);
1129 	if (unlikely(!edesc)) {
1130 		dev_err(dev, "could not allocate extended descriptor\n");
1131 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1132 			   0, DMA_NONE, 0, 0);
1133 		return ERR_PTR(-ENOMEM);
1134 	}
1135 
1136 	/* Make sure IV is located in a DMAable area */
1137 	sg_table = &edesc->sgt[0];
1138 	iv = (u8 *)(sg_table + qm_sg_ents);
1139 	memcpy(iv, req->iv, ivsize);
1140 
1141 	iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1142 	if (dma_mapping_error(dev, iv_dma)) {
1143 		dev_err(dev, "unable to map IV\n");
1144 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1145 			   0, DMA_NONE, 0, 0);
1146 		qi_cache_free(edesc);
1147 		return ERR_PTR(-ENOMEM);
1148 	}
1149 
1150 	edesc->src_nents = src_nents;
1151 	edesc->dst_nents = dst_nents;
1152 	edesc->iv_dma = iv_dma;
1153 	edesc->qm_sg_bytes = qm_sg_bytes;
1154 
1155 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1156 	sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1157 
1158 	if (req->src != req->dst)
1159 		sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1160 
1161 	dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1162 			 ivsize, 0);
1163 
1164 	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1165 					  DMA_TO_DEVICE);
1166 	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1167 		dev_err(dev, "unable to map S/G table\n");
1168 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1169 			   iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1170 		qi_cache_free(edesc);
1171 		return ERR_PTR(-ENOMEM);
1172 	}
1173 
1174 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1175 	dpaa2_fl_set_final(in_fle, true);
1176 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1177 	dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1178 
1179 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1180 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1181 
1182 	dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1183 
1184 	if (req->src == req->dst)
1185 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1186 				  sizeof(*sg_table));
1187 	else
1188 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1189 				  sizeof(*sg_table));
1190 
1191 	return edesc;
1192 }
1193 
1194 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1195 		       struct aead_request *req)
1196 {
1197 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1198 	int ivsize = crypto_aead_ivsize(aead);
1199 
1200 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1201 		   edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1202 		   edesc->qm_sg_bytes);
1203 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1204 }
1205 
1206 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1207 			   struct skcipher_request *req)
1208 {
1209 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1210 	int ivsize = crypto_skcipher_ivsize(skcipher);
1211 
1212 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1213 		   edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1214 		   edesc->qm_sg_bytes);
1215 }
1216 
1217 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1218 {
1219 	struct crypto_async_request *areq = cbk_ctx;
1220 	struct aead_request *req = container_of(areq, struct aead_request,
1221 						base);
1222 	struct caam_request *req_ctx = to_caam_req(areq);
1223 	struct aead_edesc *edesc = req_ctx->edesc;
1224 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1225 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1226 	int ecode = 0;
1227 
1228 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1229 
1230 	if (unlikely(status)) {
1231 		caam_qi2_strstatus(ctx->dev, status);
1232 		ecode = -EIO;
1233 	}
1234 
1235 	aead_unmap(ctx->dev, edesc, req);
1236 	qi_cache_free(edesc);
1237 	aead_request_complete(req, ecode);
1238 }
1239 
1240 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1241 {
1242 	struct crypto_async_request *areq = cbk_ctx;
1243 	struct aead_request *req = container_of(areq, struct aead_request,
1244 						base);
1245 	struct caam_request *req_ctx = to_caam_req(areq);
1246 	struct aead_edesc *edesc = req_ctx->edesc;
1247 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1248 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1249 	int ecode = 0;
1250 
1251 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1252 
1253 	if (unlikely(status)) {
1254 		caam_qi2_strstatus(ctx->dev, status);
1255 		/*
1256 		 * verify hw auth check passed else return -EBADMSG
1257 		 */
1258 		if ((status & JRSTA_CCBERR_ERRID_MASK) ==
1259 		     JRSTA_CCBERR_ERRID_ICVCHK)
1260 			ecode = -EBADMSG;
1261 		else
1262 			ecode = -EIO;
1263 	}
1264 
1265 	aead_unmap(ctx->dev, edesc, req);
1266 	qi_cache_free(edesc);
1267 	aead_request_complete(req, ecode);
1268 }
1269 
1270 static int aead_encrypt(struct aead_request *req)
1271 {
1272 	struct aead_edesc *edesc;
1273 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1274 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1275 	struct caam_request *caam_req = aead_request_ctx(req);
1276 	int ret;
1277 
1278 	/* allocate extended descriptor */
1279 	edesc = aead_edesc_alloc(req, true);
1280 	if (IS_ERR(edesc))
1281 		return PTR_ERR(edesc);
1282 
1283 	caam_req->flc = &ctx->flc[ENCRYPT];
1284 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1285 	caam_req->cbk = aead_encrypt_done;
1286 	caam_req->ctx = &req->base;
1287 	caam_req->edesc = edesc;
1288 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1289 	if (ret != -EINPROGRESS &&
1290 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1291 		aead_unmap(ctx->dev, edesc, req);
1292 		qi_cache_free(edesc);
1293 	}
1294 
1295 	return ret;
1296 }
1297 
1298 static int aead_decrypt(struct aead_request *req)
1299 {
1300 	struct aead_edesc *edesc;
1301 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1302 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1303 	struct caam_request *caam_req = aead_request_ctx(req);
1304 	int ret;
1305 
1306 	/* allocate extended descriptor */
1307 	edesc = aead_edesc_alloc(req, false);
1308 	if (IS_ERR(edesc))
1309 		return PTR_ERR(edesc);
1310 
1311 	caam_req->flc = &ctx->flc[DECRYPT];
1312 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1313 	caam_req->cbk = aead_decrypt_done;
1314 	caam_req->ctx = &req->base;
1315 	caam_req->edesc = edesc;
1316 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1317 	if (ret != -EINPROGRESS &&
1318 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1319 		aead_unmap(ctx->dev, edesc, req);
1320 		qi_cache_free(edesc);
1321 	}
1322 
1323 	return ret;
1324 }
1325 
1326 static int ipsec_gcm_encrypt(struct aead_request *req)
1327 {
1328 	if (req->assoclen < 8)
1329 		return -EINVAL;
1330 
1331 	return aead_encrypt(req);
1332 }
1333 
1334 static int ipsec_gcm_decrypt(struct aead_request *req)
1335 {
1336 	if (req->assoclen < 8)
1337 		return -EINVAL;
1338 
1339 	return aead_decrypt(req);
1340 }
1341 
1342 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1343 {
1344 	struct crypto_async_request *areq = cbk_ctx;
1345 	struct skcipher_request *req = skcipher_request_cast(areq);
1346 	struct caam_request *req_ctx = to_caam_req(areq);
1347 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1348 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1349 	struct skcipher_edesc *edesc = req_ctx->edesc;
1350 	int ecode = 0;
1351 	int ivsize = crypto_skcipher_ivsize(skcipher);
1352 
1353 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1354 
1355 	if (unlikely(status)) {
1356 		caam_qi2_strstatus(ctx->dev, status);
1357 		ecode = -EIO;
1358 	}
1359 
1360 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1361 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1362 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1363 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1364 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1365 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1366 
1367 	skcipher_unmap(ctx->dev, edesc, req);
1368 
1369 	/*
1370 	 * The crypto API expects us to set the IV (req->iv) to the last
1371 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1372 	 * This is used e.g. by the CTS mode.
1373 	 */
1374 	memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
1375 
1376 	qi_cache_free(edesc);
1377 	skcipher_request_complete(req, ecode);
1378 }
1379 
1380 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1381 {
1382 	struct crypto_async_request *areq = cbk_ctx;
1383 	struct skcipher_request *req = skcipher_request_cast(areq);
1384 	struct caam_request *req_ctx = to_caam_req(areq);
1385 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1386 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1387 	struct skcipher_edesc *edesc = req_ctx->edesc;
1388 	int ecode = 0;
1389 	int ivsize = crypto_skcipher_ivsize(skcipher);
1390 
1391 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1392 
1393 	if (unlikely(status)) {
1394 		caam_qi2_strstatus(ctx->dev, status);
1395 		ecode = -EIO;
1396 	}
1397 
1398 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1399 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1400 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1401 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1402 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1403 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1404 
1405 	skcipher_unmap(ctx->dev, edesc, req);
1406 
1407 	/*
1408 	 * The crypto API expects us to set the IV (req->iv) to the last
1409 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1410 	 * This is used e.g. by the CTS mode.
1411 	 */
1412 	memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
1413 
1414 	qi_cache_free(edesc);
1415 	skcipher_request_complete(req, ecode);
1416 }
1417 
1418 static int skcipher_encrypt(struct skcipher_request *req)
1419 {
1420 	struct skcipher_edesc *edesc;
1421 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1422 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1423 	struct caam_request *caam_req = skcipher_request_ctx(req);
1424 	int ret;
1425 
1426 	/* allocate extended descriptor */
1427 	edesc = skcipher_edesc_alloc(req);
1428 	if (IS_ERR(edesc))
1429 		return PTR_ERR(edesc);
1430 
1431 	caam_req->flc = &ctx->flc[ENCRYPT];
1432 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1433 	caam_req->cbk = skcipher_encrypt_done;
1434 	caam_req->ctx = &req->base;
1435 	caam_req->edesc = edesc;
1436 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1437 	if (ret != -EINPROGRESS &&
1438 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1439 		skcipher_unmap(ctx->dev, edesc, req);
1440 		qi_cache_free(edesc);
1441 	}
1442 
1443 	return ret;
1444 }
1445 
1446 static int skcipher_decrypt(struct skcipher_request *req)
1447 {
1448 	struct skcipher_edesc *edesc;
1449 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1450 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1451 	struct caam_request *caam_req = skcipher_request_ctx(req);
1452 	int ret;
1453 
1454 	/* allocate extended descriptor */
1455 	edesc = skcipher_edesc_alloc(req);
1456 	if (IS_ERR(edesc))
1457 		return PTR_ERR(edesc);
1458 
1459 	caam_req->flc = &ctx->flc[DECRYPT];
1460 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1461 	caam_req->cbk = skcipher_decrypt_done;
1462 	caam_req->ctx = &req->base;
1463 	caam_req->edesc = edesc;
1464 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1465 	if (ret != -EINPROGRESS &&
1466 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1467 		skcipher_unmap(ctx->dev, edesc, req);
1468 		qi_cache_free(edesc);
1469 	}
1470 
1471 	return ret;
1472 }
1473 
1474 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1475 			 bool uses_dkp)
1476 {
1477 	dma_addr_t dma_addr;
1478 	int i;
1479 
1480 	/* copy descriptor header template value */
1481 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1482 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1483 
1484 	ctx->dev = caam->dev;
1485 	ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1486 
1487 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1488 					offsetof(struct caam_ctx, flc_dma),
1489 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1490 	if (dma_mapping_error(ctx->dev, dma_addr)) {
1491 		dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1492 		return -ENOMEM;
1493 	}
1494 
1495 	for (i = 0; i < NUM_OP; i++)
1496 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1497 	ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1498 
1499 	return 0;
1500 }
1501 
1502 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1503 {
1504 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1505 	struct caam_skcipher_alg *caam_alg =
1506 		container_of(alg, typeof(*caam_alg), skcipher);
1507 
1508 	crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1509 	return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1510 }
1511 
1512 static int caam_cra_init_aead(struct crypto_aead *tfm)
1513 {
1514 	struct aead_alg *alg = crypto_aead_alg(tfm);
1515 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1516 						      aead);
1517 
1518 	crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1519 	return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1520 			     !caam_alg->caam.nodkp);
1521 }
1522 
1523 static void caam_exit_common(struct caam_ctx *ctx)
1524 {
1525 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1526 			       offsetof(struct caam_ctx, flc_dma), ctx->dir,
1527 			       DMA_ATTR_SKIP_CPU_SYNC);
1528 }
1529 
1530 static void caam_cra_exit(struct crypto_skcipher *tfm)
1531 {
1532 	caam_exit_common(crypto_skcipher_ctx(tfm));
1533 }
1534 
1535 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1536 {
1537 	caam_exit_common(crypto_aead_ctx(tfm));
1538 }
1539 
1540 static struct caam_skcipher_alg driver_algs[] = {
1541 	{
1542 		.skcipher = {
1543 			.base = {
1544 				.cra_name = "cbc(aes)",
1545 				.cra_driver_name = "cbc-aes-caam-qi2",
1546 				.cra_blocksize = AES_BLOCK_SIZE,
1547 			},
1548 			.setkey = skcipher_setkey,
1549 			.encrypt = skcipher_encrypt,
1550 			.decrypt = skcipher_decrypt,
1551 			.min_keysize = AES_MIN_KEY_SIZE,
1552 			.max_keysize = AES_MAX_KEY_SIZE,
1553 			.ivsize = AES_BLOCK_SIZE,
1554 		},
1555 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1556 	},
1557 	{
1558 		.skcipher = {
1559 			.base = {
1560 				.cra_name = "cbc(des3_ede)",
1561 				.cra_driver_name = "cbc-3des-caam-qi2",
1562 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1563 			},
1564 			.setkey = des3_skcipher_setkey,
1565 			.encrypt = skcipher_encrypt,
1566 			.decrypt = skcipher_decrypt,
1567 			.min_keysize = DES3_EDE_KEY_SIZE,
1568 			.max_keysize = DES3_EDE_KEY_SIZE,
1569 			.ivsize = DES3_EDE_BLOCK_SIZE,
1570 		},
1571 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1572 	},
1573 	{
1574 		.skcipher = {
1575 			.base = {
1576 				.cra_name = "cbc(des)",
1577 				.cra_driver_name = "cbc-des-caam-qi2",
1578 				.cra_blocksize = DES_BLOCK_SIZE,
1579 			},
1580 			.setkey = skcipher_setkey,
1581 			.encrypt = skcipher_encrypt,
1582 			.decrypt = skcipher_decrypt,
1583 			.min_keysize = DES_KEY_SIZE,
1584 			.max_keysize = DES_KEY_SIZE,
1585 			.ivsize = DES_BLOCK_SIZE,
1586 		},
1587 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1588 	},
1589 	{
1590 		.skcipher = {
1591 			.base = {
1592 				.cra_name = "ctr(aes)",
1593 				.cra_driver_name = "ctr-aes-caam-qi2",
1594 				.cra_blocksize = 1,
1595 			},
1596 			.setkey = skcipher_setkey,
1597 			.encrypt = skcipher_encrypt,
1598 			.decrypt = skcipher_decrypt,
1599 			.min_keysize = AES_MIN_KEY_SIZE,
1600 			.max_keysize = AES_MAX_KEY_SIZE,
1601 			.ivsize = AES_BLOCK_SIZE,
1602 			.chunksize = AES_BLOCK_SIZE,
1603 		},
1604 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1605 					OP_ALG_AAI_CTR_MOD128,
1606 	},
1607 	{
1608 		.skcipher = {
1609 			.base = {
1610 				.cra_name = "rfc3686(ctr(aes))",
1611 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1612 				.cra_blocksize = 1,
1613 			},
1614 			.setkey = skcipher_setkey,
1615 			.encrypt = skcipher_encrypt,
1616 			.decrypt = skcipher_decrypt,
1617 			.min_keysize = AES_MIN_KEY_SIZE +
1618 				       CTR_RFC3686_NONCE_SIZE,
1619 			.max_keysize = AES_MAX_KEY_SIZE +
1620 				       CTR_RFC3686_NONCE_SIZE,
1621 			.ivsize = CTR_RFC3686_IV_SIZE,
1622 			.chunksize = AES_BLOCK_SIZE,
1623 		},
1624 		.caam = {
1625 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1626 					   OP_ALG_AAI_CTR_MOD128,
1627 			.rfc3686 = true,
1628 		},
1629 	},
1630 	{
1631 		.skcipher = {
1632 			.base = {
1633 				.cra_name = "xts(aes)",
1634 				.cra_driver_name = "xts-aes-caam-qi2",
1635 				.cra_blocksize = AES_BLOCK_SIZE,
1636 			},
1637 			.setkey = xts_skcipher_setkey,
1638 			.encrypt = skcipher_encrypt,
1639 			.decrypt = skcipher_decrypt,
1640 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1641 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1642 			.ivsize = AES_BLOCK_SIZE,
1643 		},
1644 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1645 	},
1646 	{
1647 		.skcipher = {
1648 			.base = {
1649 				.cra_name = "chacha20",
1650 				.cra_driver_name = "chacha20-caam-qi2",
1651 				.cra_blocksize = 1,
1652 			},
1653 			.setkey = skcipher_setkey,
1654 			.encrypt = skcipher_encrypt,
1655 			.decrypt = skcipher_decrypt,
1656 			.min_keysize = CHACHA_KEY_SIZE,
1657 			.max_keysize = CHACHA_KEY_SIZE,
1658 			.ivsize = CHACHA_IV_SIZE,
1659 		},
1660 		.caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1661 	},
1662 };
1663 
1664 static struct caam_aead_alg driver_aeads[] = {
1665 	{
1666 		.aead = {
1667 			.base = {
1668 				.cra_name = "rfc4106(gcm(aes))",
1669 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1670 				.cra_blocksize = 1,
1671 			},
1672 			.setkey = rfc4106_setkey,
1673 			.setauthsize = rfc4106_setauthsize,
1674 			.encrypt = ipsec_gcm_encrypt,
1675 			.decrypt = ipsec_gcm_decrypt,
1676 			.ivsize = 8,
1677 			.maxauthsize = AES_BLOCK_SIZE,
1678 		},
1679 		.caam = {
1680 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1681 			.nodkp = true,
1682 		},
1683 	},
1684 	{
1685 		.aead = {
1686 			.base = {
1687 				.cra_name = "rfc4543(gcm(aes))",
1688 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1689 				.cra_blocksize = 1,
1690 			},
1691 			.setkey = rfc4543_setkey,
1692 			.setauthsize = rfc4543_setauthsize,
1693 			.encrypt = ipsec_gcm_encrypt,
1694 			.decrypt = ipsec_gcm_decrypt,
1695 			.ivsize = 8,
1696 			.maxauthsize = AES_BLOCK_SIZE,
1697 		},
1698 		.caam = {
1699 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1700 			.nodkp = true,
1701 		},
1702 	},
1703 	/* Galois Counter Mode */
1704 	{
1705 		.aead = {
1706 			.base = {
1707 				.cra_name = "gcm(aes)",
1708 				.cra_driver_name = "gcm-aes-caam-qi2",
1709 				.cra_blocksize = 1,
1710 			},
1711 			.setkey = gcm_setkey,
1712 			.setauthsize = gcm_setauthsize,
1713 			.encrypt = aead_encrypt,
1714 			.decrypt = aead_decrypt,
1715 			.ivsize = 12,
1716 			.maxauthsize = AES_BLOCK_SIZE,
1717 		},
1718 		.caam = {
1719 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1720 			.nodkp = true,
1721 		}
1722 	},
1723 	/* single-pass ipsec_esp descriptor */
1724 	{
1725 		.aead = {
1726 			.base = {
1727 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1728 				.cra_driver_name = "authenc-hmac-md5-"
1729 						   "cbc-aes-caam-qi2",
1730 				.cra_blocksize = AES_BLOCK_SIZE,
1731 			},
1732 			.setkey = aead_setkey,
1733 			.setauthsize = aead_setauthsize,
1734 			.encrypt = aead_encrypt,
1735 			.decrypt = aead_decrypt,
1736 			.ivsize = AES_BLOCK_SIZE,
1737 			.maxauthsize = MD5_DIGEST_SIZE,
1738 		},
1739 		.caam = {
1740 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1741 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1742 					   OP_ALG_AAI_HMAC_PRECOMP,
1743 		}
1744 	},
1745 	{
1746 		.aead = {
1747 			.base = {
1748 				.cra_name = "echainiv(authenc(hmac(md5),"
1749 					    "cbc(aes)))",
1750 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1751 						   "cbc-aes-caam-qi2",
1752 				.cra_blocksize = AES_BLOCK_SIZE,
1753 			},
1754 			.setkey = aead_setkey,
1755 			.setauthsize = aead_setauthsize,
1756 			.encrypt = aead_encrypt,
1757 			.decrypt = aead_decrypt,
1758 			.ivsize = AES_BLOCK_SIZE,
1759 			.maxauthsize = MD5_DIGEST_SIZE,
1760 		},
1761 		.caam = {
1762 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1763 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1764 					   OP_ALG_AAI_HMAC_PRECOMP,
1765 			.geniv = true,
1766 		}
1767 	},
1768 	{
1769 		.aead = {
1770 			.base = {
1771 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1772 				.cra_driver_name = "authenc-hmac-sha1-"
1773 						   "cbc-aes-caam-qi2",
1774 				.cra_blocksize = AES_BLOCK_SIZE,
1775 			},
1776 			.setkey = aead_setkey,
1777 			.setauthsize = aead_setauthsize,
1778 			.encrypt = aead_encrypt,
1779 			.decrypt = aead_decrypt,
1780 			.ivsize = AES_BLOCK_SIZE,
1781 			.maxauthsize = SHA1_DIGEST_SIZE,
1782 		},
1783 		.caam = {
1784 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1785 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1786 					   OP_ALG_AAI_HMAC_PRECOMP,
1787 		}
1788 	},
1789 	{
1790 		.aead = {
1791 			.base = {
1792 				.cra_name = "echainiv(authenc(hmac(sha1),"
1793 					    "cbc(aes)))",
1794 				.cra_driver_name = "echainiv-authenc-"
1795 						   "hmac-sha1-cbc-aes-caam-qi2",
1796 				.cra_blocksize = AES_BLOCK_SIZE,
1797 			},
1798 			.setkey = aead_setkey,
1799 			.setauthsize = aead_setauthsize,
1800 			.encrypt = aead_encrypt,
1801 			.decrypt = aead_decrypt,
1802 			.ivsize = AES_BLOCK_SIZE,
1803 			.maxauthsize = SHA1_DIGEST_SIZE,
1804 		},
1805 		.caam = {
1806 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1807 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1808 					   OP_ALG_AAI_HMAC_PRECOMP,
1809 			.geniv = true,
1810 		},
1811 	},
1812 	{
1813 		.aead = {
1814 			.base = {
1815 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1816 				.cra_driver_name = "authenc-hmac-sha224-"
1817 						   "cbc-aes-caam-qi2",
1818 				.cra_blocksize = AES_BLOCK_SIZE,
1819 			},
1820 			.setkey = aead_setkey,
1821 			.setauthsize = aead_setauthsize,
1822 			.encrypt = aead_encrypt,
1823 			.decrypt = aead_decrypt,
1824 			.ivsize = AES_BLOCK_SIZE,
1825 			.maxauthsize = SHA224_DIGEST_SIZE,
1826 		},
1827 		.caam = {
1828 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1829 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1830 					   OP_ALG_AAI_HMAC_PRECOMP,
1831 		}
1832 	},
1833 	{
1834 		.aead = {
1835 			.base = {
1836 				.cra_name = "echainiv(authenc(hmac(sha224),"
1837 					    "cbc(aes)))",
1838 				.cra_driver_name = "echainiv-authenc-"
1839 						   "hmac-sha224-cbc-aes-caam-qi2",
1840 				.cra_blocksize = AES_BLOCK_SIZE,
1841 			},
1842 			.setkey = aead_setkey,
1843 			.setauthsize = aead_setauthsize,
1844 			.encrypt = aead_encrypt,
1845 			.decrypt = aead_decrypt,
1846 			.ivsize = AES_BLOCK_SIZE,
1847 			.maxauthsize = SHA224_DIGEST_SIZE,
1848 		},
1849 		.caam = {
1850 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1851 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1852 					   OP_ALG_AAI_HMAC_PRECOMP,
1853 			.geniv = true,
1854 		}
1855 	},
1856 	{
1857 		.aead = {
1858 			.base = {
1859 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1860 				.cra_driver_name = "authenc-hmac-sha256-"
1861 						   "cbc-aes-caam-qi2",
1862 				.cra_blocksize = AES_BLOCK_SIZE,
1863 			},
1864 			.setkey = aead_setkey,
1865 			.setauthsize = aead_setauthsize,
1866 			.encrypt = aead_encrypt,
1867 			.decrypt = aead_decrypt,
1868 			.ivsize = AES_BLOCK_SIZE,
1869 			.maxauthsize = SHA256_DIGEST_SIZE,
1870 		},
1871 		.caam = {
1872 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1873 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1874 					   OP_ALG_AAI_HMAC_PRECOMP,
1875 		}
1876 	},
1877 	{
1878 		.aead = {
1879 			.base = {
1880 				.cra_name = "echainiv(authenc(hmac(sha256),"
1881 					    "cbc(aes)))",
1882 				.cra_driver_name = "echainiv-authenc-"
1883 						   "hmac-sha256-cbc-aes-"
1884 						   "caam-qi2",
1885 				.cra_blocksize = AES_BLOCK_SIZE,
1886 			},
1887 			.setkey = aead_setkey,
1888 			.setauthsize = aead_setauthsize,
1889 			.encrypt = aead_encrypt,
1890 			.decrypt = aead_decrypt,
1891 			.ivsize = AES_BLOCK_SIZE,
1892 			.maxauthsize = SHA256_DIGEST_SIZE,
1893 		},
1894 		.caam = {
1895 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1896 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1897 					   OP_ALG_AAI_HMAC_PRECOMP,
1898 			.geniv = true,
1899 		}
1900 	},
1901 	{
1902 		.aead = {
1903 			.base = {
1904 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
1905 				.cra_driver_name = "authenc-hmac-sha384-"
1906 						   "cbc-aes-caam-qi2",
1907 				.cra_blocksize = AES_BLOCK_SIZE,
1908 			},
1909 			.setkey = aead_setkey,
1910 			.setauthsize = aead_setauthsize,
1911 			.encrypt = aead_encrypt,
1912 			.decrypt = aead_decrypt,
1913 			.ivsize = AES_BLOCK_SIZE,
1914 			.maxauthsize = SHA384_DIGEST_SIZE,
1915 		},
1916 		.caam = {
1917 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1918 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1919 					   OP_ALG_AAI_HMAC_PRECOMP,
1920 		}
1921 	},
1922 	{
1923 		.aead = {
1924 			.base = {
1925 				.cra_name = "echainiv(authenc(hmac(sha384),"
1926 					    "cbc(aes)))",
1927 				.cra_driver_name = "echainiv-authenc-"
1928 						   "hmac-sha384-cbc-aes-"
1929 						   "caam-qi2",
1930 				.cra_blocksize = AES_BLOCK_SIZE,
1931 			},
1932 			.setkey = aead_setkey,
1933 			.setauthsize = aead_setauthsize,
1934 			.encrypt = aead_encrypt,
1935 			.decrypt = aead_decrypt,
1936 			.ivsize = AES_BLOCK_SIZE,
1937 			.maxauthsize = SHA384_DIGEST_SIZE,
1938 		},
1939 		.caam = {
1940 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1941 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1942 					   OP_ALG_AAI_HMAC_PRECOMP,
1943 			.geniv = true,
1944 		}
1945 	},
1946 	{
1947 		.aead = {
1948 			.base = {
1949 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
1950 				.cra_driver_name = "authenc-hmac-sha512-"
1951 						   "cbc-aes-caam-qi2",
1952 				.cra_blocksize = AES_BLOCK_SIZE,
1953 			},
1954 			.setkey = aead_setkey,
1955 			.setauthsize = aead_setauthsize,
1956 			.encrypt = aead_encrypt,
1957 			.decrypt = aead_decrypt,
1958 			.ivsize = AES_BLOCK_SIZE,
1959 			.maxauthsize = SHA512_DIGEST_SIZE,
1960 		},
1961 		.caam = {
1962 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1963 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1964 					   OP_ALG_AAI_HMAC_PRECOMP,
1965 		}
1966 	},
1967 	{
1968 		.aead = {
1969 			.base = {
1970 				.cra_name = "echainiv(authenc(hmac(sha512),"
1971 					    "cbc(aes)))",
1972 				.cra_driver_name = "echainiv-authenc-"
1973 						   "hmac-sha512-cbc-aes-"
1974 						   "caam-qi2",
1975 				.cra_blocksize = AES_BLOCK_SIZE,
1976 			},
1977 			.setkey = aead_setkey,
1978 			.setauthsize = aead_setauthsize,
1979 			.encrypt = aead_encrypt,
1980 			.decrypt = aead_decrypt,
1981 			.ivsize = AES_BLOCK_SIZE,
1982 			.maxauthsize = SHA512_DIGEST_SIZE,
1983 		},
1984 		.caam = {
1985 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1986 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1987 					   OP_ALG_AAI_HMAC_PRECOMP,
1988 			.geniv = true,
1989 		}
1990 	},
1991 	{
1992 		.aead = {
1993 			.base = {
1994 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1995 				.cra_driver_name = "authenc-hmac-md5-"
1996 						   "cbc-des3_ede-caam-qi2",
1997 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1998 			},
1999 			.setkey = des3_aead_setkey,
2000 			.setauthsize = aead_setauthsize,
2001 			.encrypt = aead_encrypt,
2002 			.decrypt = aead_decrypt,
2003 			.ivsize = DES3_EDE_BLOCK_SIZE,
2004 			.maxauthsize = MD5_DIGEST_SIZE,
2005 		},
2006 		.caam = {
2007 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2008 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2009 					   OP_ALG_AAI_HMAC_PRECOMP,
2010 		}
2011 	},
2012 	{
2013 		.aead = {
2014 			.base = {
2015 				.cra_name = "echainiv(authenc(hmac(md5),"
2016 					    "cbc(des3_ede)))",
2017 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2018 						   "cbc-des3_ede-caam-qi2",
2019 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2020 			},
2021 			.setkey = des3_aead_setkey,
2022 			.setauthsize = aead_setauthsize,
2023 			.encrypt = aead_encrypt,
2024 			.decrypt = aead_decrypt,
2025 			.ivsize = DES3_EDE_BLOCK_SIZE,
2026 			.maxauthsize = MD5_DIGEST_SIZE,
2027 		},
2028 		.caam = {
2029 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2030 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2031 					   OP_ALG_AAI_HMAC_PRECOMP,
2032 			.geniv = true,
2033 		}
2034 	},
2035 	{
2036 		.aead = {
2037 			.base = {
2038 				.cra_name = "authenc(hmac(sha1),"
2039 					    "cbc(des3_ede))",
2040 				.cra_driver_name = "authenc-hmac-sha1-"
2041 						   "cbc-des3_ede-caam-qi2",
2042 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2043 			},
2044 			.setkey = des3_aead_setkey,
2045 			.setauthsize = aead_setauthsize,
2046 			.encrypt = aead_encrypt,
2047 			.decrypt = aead_decrypt,
2048 			.ivsize = DES3_EDE_BLOCK_SIZE,
2049 			.maxauthsize = SHA1_DIGEST_SIZE,
2050 		},
2051 		.caam = {
2052 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2053 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2054 					   OP_ALG_AAI_HMAC_PRECOMP,
2055 		},
2056 	},
2057 	{
2058 		.aead = {
2059 			.base = {
2060 				.cra_name = "echainiv(authenc(hmac(sha1),"
2061 					    "cbc(des3_ede)))",
2062 				.cra_driver_name = "echainiv-authenc-"
2063 						   "hmac-sha1-"
2064 						   "cbc-des3_ede-caam-qi2",
2065 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2066 			},
2067 			.setkey = des3_aead_setkey,
2068 			.setauthsize = aead_setauthsize,
2069 			.encrypt = aead_encrypt,
2070 			.decrypt = aead_decrypt,
2071 			.ivsize = DES3_EDE_BLOCK_SIZE,
2072 			.maxauthsize = SHA1_DIGEST_SIZE,
2073 		},
2074 		.caam = {
2075 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2076 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2077 					   OP_ALG_AAI_HMAC_PRECOMP,
2078 			.geniv = true,
2079 		}
2080 	},
2081 	{
2082 		.aead = {
2083 			.base = {
2084 				.cra_name = "authenc(hmac(sha224),"
2085 					    "cbc(des3_ede))",
2086 				.cra_driver_name = "authenc-hmac-sha224-"
2087 						   "cbc-des3_ede-caam-qi2",
2088 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2089 			},
2090 			.setkey = des3_aead_setkey,
2091 			.setauthsize = aead_setauthsize,
2092 			.encrypt = aead_encrypt,
2093 			.decrypt = aead_decrypt,
2094 			.ivsize = DES3_EDE_BLOCK_SIZE,
2095 			.maxauthsize = SHA224_DIGEST_SIZE,
2096 		},
2097 		.caam = {
2098 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2099 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2100 					   OP_ALG_AAI_HMAC_PRECOMP,
2101 		},
2102 	},
2103 	{
2104 		.aead = {
2105 			.base = {
2106 				.cra_name = "echainiv(authenc(hmac(sha224),"
2107 					    "cbc(des3_ede)))",
2108 				.cra_driver_name = "echainiv-authenc-"
2109 						   "hmac-sha224-"
2110 						   "cbc-des3_ede-caam-qi2",
2111 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2112 			},
2113 			.setkey = des3_aead_setkey,
2114 			.setauthsize = aead_setauthsize,
2115 			.encrypt = aead_encrypt,
2116 			.decrypt = aead_decrypt,
2117 			.ivsize = DES3_EDE_BLOCK_SIZE,
2118 			.maxauthsize = SHA224_DIGEST_SIZE,
2119 		},
2120 		.caam = {
2121 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2122 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2123 					   OP_ALG_AAI_HMAC_PRECOMP,
2124 			.geniv = true,
2125 		}
2126 	},
2127 	{
2128 		.aead = {
2129 			.base = {
2130 				.cra_name = "authenc(hmac(sha256),"
2131 					    "cbc(des3_ede))",
2132 				.cra_driver_name = "authenc-hmac-sha256-"
2133 						   "cbc-des3_ede-caam-qi2",
2134 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2135 			},
2136 			.setkey = des3_aead_setkey,
2137 			.setauthsize = aead_setauthsize,
2138 			.encrypt = aead_encrypt,
2139 			.decrypt = aead_decrypt,
2140 			.ivsize = DES3_EDE_BLOCK_SIZE,
2141 			.maxauthsize = SHA256_DIGEST_SIZE,
2142 		},
2143 		.caam = {
2144 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2145 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2146 					   OP_ALG_AAI_HMAC_PRECOMP,
2147 		},
2148 	},
2149 	{
2150 		.aead = {
2151 			.base = {
2152 				.cra_name = "echainiv(authenc(hmac(sha256),"
2153 					    "cbc(des3_ede)))",
2154 				.cra_driver_name = "echainiv-authenc-"
2155 						   "hmac-sha256-"
2156 						   "cbc-des3_ede-caam-qi2",
2157 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2158 			},
2159 			.setkey = des3_aead_setkey,
2160 			.setauthsize = aead_setauthsize,
2161 			.encrypt = aead_encrypt,
2162 			.decrypt = aead_decrypt,
2163 			.ivsize = DES3_EDE_BLOCK_SIZE,
2164 			.maxauthsize = SHA256_DIGEST_SIZE,
2165 		},
2166 		.caam = {
2167 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2168 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2169 					   OP_ALG_AAI_HMAC_PRECOMP,
2170 			.geniv = true,
2171 		}
2172 	},
2173 	{
2174 		.aead = {
2175 			.base = {
2176 				.cra_name = "authenc(hmac(sha384),"
2177 					    "cbc(des3_ede))",
2178 				.cra_driver_name = "authenc-hmac-sha384-"
2179 						   "cbc-des3_ede-caam-qi2",
2180 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2181 			},
2182 			.setkey = des3_aead_setkey,
2183 			.setauthsize = aead_setauthsize,
2184 			.encrypt = aead_encrypt,
2185 			.decrypt = aead_decrypt,
2186 			.ivsize = DES3_EDE_BLOCK_SIZE,
2187 			.maxauthsize = SHA384_DIGEST_SIZE,
2188 		},
2189 		.caam = {
2190 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2191 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2192 					   OP_ALG_AAI_HMAC_PRECOMP,
2193 		},
2194 	},
2195 	{
2196 		.aead = {
2197 			.base = {
2198 				.cra_name = "echainiv(authenc(hmac(sha384),"
2199 					    "cbc(des3_ede)))",
2200 				.cra_driver_name = "echainiv-authenc-"
2201 						   "hmac-sha384-"
2202 						   "cbc-des3_ede-caam-qi2",
2203 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2204 			},
2205 			.setkey = des3_aead_setkey,
2206 			.setauthsize = aead_setauthsize,
2207 			.encrypt = aead_encrypt,
2208 			.decrypt = aead_decrypt,
2209 			.ivsize = DES3_EDE_BLOCK_SIZE,
2210 			.maxauthsize = SHA384_DIGEST_SIZE,
2211 		},
2212 		.caam = {
2213 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2214 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2215 					   OP_ALG_AAI_HMAC_PRECOMP,
2216 			.geniv = true,
2217 		}
2218 	},
2219 	{
2220 		.aead = {
2221 			.base = {
2222 				.cra_name = "authenc(hmac(sha512),"
2223 					    "cbc(des3_ede))",
2224 				.cra_driver_name = "authenc-hmac-sha512-"
2225 						   "cbc-des3_ede-caam-qi2",
2226 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2227 			},
2228 			.setkey = des3_aead_setkey,
2229 			.setauthsize = aead_setauthsize,
2230 			.encrypt = aead_encrypt,
2231 			.decrypt = aead_decrypt,
2232 			.ivsize = DES3_EDE_BLOCK_SIZE,
2233 			.maxauthsize = SHA512_DIGEST_SIZE,
2234 		},
2235 		.caam = {
2236 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2237 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2238 					   OP_ALG_AAI_HMAC_PRECOMP,
2239 		},
2240 	},
2241 	{
2242 		.aead = {
2243 			.base = {
2244 				.cra_name = "echainiv(authenc(hmac(sha512),"
2245 					    "cbc(des3_ede)))",
2246 				.cra_driver_name = "echainiv-authenc-"
2247 						   "hmac-sha512-"
2248 						   "cbc-des3_ede-caam-qi2",
2249 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2250 			},
2251 			.setkey = des3_aead_setkey,
2252 			.setauthsize = aead_setauthsize,
2253 			.encrypt = aead_encrypt,
2254 			.decrypt = aead_decrypt,
2255 			.ivsize = DES3_EDE_BLOCK_SIZE,
2256 			.maxauthsize = SHA512_DIGEST_SIZE,
2257 		},
2258 		.caam = {
2259 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2260 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2261 					   OP_ALG_AAI_HMAC_PRECOMP,
2262 			.geniv = true,
2263 		}
2264 	},
2265 	{
2266 		.aead = {
2267 			.base = {
2268 				.cra_name = "authenc(hmac(md5),cbc(des))",
2269 				.cra_driver_name = "authenc-hmac-md5-"
2270 						   "cbc-des-caam-qi2",
2271 				.cra_blocksize = DES_BLOCK_SIZE,
2272 			},
2273 			.setkey = aead_setkey,
2274 			.setauthsize = aead_setauthsize,
2275 			.encrypt = aead_encrypt,
2276 			.decrypt = aead_decrypt,
2277 			.ivsize = DES_BLOCK_SIZE,
2278 			.maxauthsize = MD5_DIGEST_SIZE,
2279 		},
2280 		.caam = {
2281 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2282 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2283 					   OP_ALG_AAI_HMAC_PRECOMP,
2284 		},
2285 	},
2286 	{
2287 		.aead = {
2288 			.base = {
2289 				.cra_name = "echainiv(authenc(hmac(md5),"
2290 					    "cbc(des)))",
2291 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2292 						   "cbc-des-caam-qi2",
2293 				.cra_blocksize = DES_BLOCK_SIZE,
2294 			},
2295 			.setkey = aead_setkey,
2296 			.setauthsize = aead_setauthsize,
2297 			.encrypt = aead_encrypt,
2298 			.decrypt = aead_decrypt,
2299 			.ivsize = DES_BLOCK_SIZE,
2300 			.maxauthsize = MD5_DIGEST_SIZE,
2301 		},
2302 		.caam = {
2303 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2304 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2305 					   OP_ALG_AAI_HMAC_PRECOMP,
2306 			.geniv = true,
2307 		}
2308 	},
2309 	{
2310 		.aead = {
2311 			.base = {
2312 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2313 				.cra_driver_name = "authenc-hmac-sha1-"
2314 						   "cbc-des-caam-qi2",
2315 				.cra_blocksize = DES_BLOCK_SIZE,
2316 			},
2317 			.setkey = aead_setkey,
2318 			.setauthsize = aead_setauthsize,
2319 			.encrypt = aead_encrypt,
2320 			.decrypt = aead_decrypt,
2321 			.ivsize = DES_BLOCK_SIZE,
2322 			.maxauthsize = SHA1_DIGEST_SIZE,
2323 		},
2324 		.caam = {
2325 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2326 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2327 					   OP_ALG_AAI_HMAC_PRECOMP,
2328 		},
2329 	},
2330 	{
2331 		.aead = {
2332 			.base = {
2333 				.cra_name = "echainiv(authenc(hmac(sha1),"
2334 					    "cbc(des)))",
2335 				.cra_driver_name = "echainiv-authenc-"
2336 						   "hmac-sha1-cbc-des-caam-qi2",
2337 				.cra_blocksize = DES_BLOCK_SIZE,
2338 			},
2339 			.setkey = aead_setkey,
2340 			.setauthsize = aead_setauthsize,
2341 			.encrypt = aead_encrypt,
2342 			.decrypt = aead_decrypt,
2343 			.ivsize = DES_BLOCK_SIZE,
2344 			.maxauthsize = SHA1_DIGEST_SIZE,
2345 		},
2346 		.caam = {
2347 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2348 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2349 					   OP_ALG_AAI_HMAC_PRECOMP,
2350 			.geniv = true,
2351 		}
2352 	},
2353 	{
2354 		.aead = {
2355 			.base = {
2356 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2357 				.cra_driver_name = "authenc-hmac-sha224-"
2358 						   "cbc-des-caam-qi2",
2359 				.cra_blocksize = DES_BLOCK_SIZE,
2360 			},
2361 			.setkey = aead_setkey,
2362 			.setauthsize = aead_setauthsize,
2363 			.encrypt = aead_encrypt,
2364 			.decrypt = aead_decrypt,
2365 			.ivsize = DES_BLOCK_SIZE,
2366 			.maxauthsize = SHA224_DIGEST_SIZE,
2367 		},
2368 		.caam = {
2369 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2370 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2371 					   OP_ALG_AAI_HMAC_PRECOMP,
2372 		},
2373 	},
2374 	{
2375 		.aead = {
2376 			.base = {
2377 				.cra_name = "echainiv(authenc(hmac(sha224),"
2378 					    "cbc(des)))",
2379 				.cra_driver_name = "echainiv-authenc-"
2380 						   "hmac-sha224-cbc-des-"
2381 						   "caam-qi2",
2382 				.cra_blocksize = DES_BLOCK_SIZE,
2383 			},
2384 			.setkey = aead_setkey,
2385 			.setauthsize = aead_setauthsize,
2386 			.encrypt = aead_encrypt,
2387 			.decrypt = aead_decrypt,
2388 			.ivsize = DES_BLOCK_SIZE,
2389 			.maxauthsize = SHA224_DIGEST_SIZE,
2390 		},
2391 		.caam = {
2392 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2393 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2394 					   OP_ALG_AAI_HMAC_PRECOMP,
2395 			.geniv = true,
2396 		}
2397 	},
2398 	{
2399 		.aead = {
2400 			.base = {
2401 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2402 				.cra_driver_name = "authenc-hmac-sha256-"
2403 						   "cbc-des-caam-qi2",
2404 				.cra_blocksize = DES_BLOCK_SIZE,
2405 			},
2406 			.setkey = aead_setkey,
2407 			.setauthsize = aead_setauthsize,
2408 			.encrypt = aead_encrypt,
2409 			.decrypt = aead_decrypt,
2410 			.ivsize = DES_BLOCK_SIZE,
2411 			.maxauthsize = SHA256_DIGEST_SIZE,
2412 		},
2413 		.caam = {
2414 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2415 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2416 					   OP_ALG_AAI_HMAC_PRECOMP,
2417 		},
2418 	},
2419 	{
2420 		.aead = {
2421 			.base = {
2422 				.cra_name = "echainiv(authenc(hmac(sha256),"
2423 					    "cbc(des)))",
2424 				.cra_driver_name = "echainiv-authenc-"
2425 						   "hmac-sha256-cbc-desi-"
2426 						   "caam-qi2",
2427 				.cra_blocksize = DES_BLOCK_SIZE,
2428 			},
2429 			.setkey = aead_setkey,
2430 			.setauthsize = aead_setauthsize,
2431 			.encrypt = aead_encrypt,
2432 			.decrypt = aead_decrypt,
2433 			.ivsize = DES_BLOCK_SIZE,
2434 			.maxauthsize = SHA256_DIGEST_SIZE,
2435 		},
2436 		.caam = {
2437 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2438 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2439 					   OP_ALG_AAI_HMAC_PRECOMP,
2440 			.geniv = true,
2441 		},
2442 	},
2443 	{
2444 		.aead = {
2445 			.base = {
2446 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2447 				.cra_driver_name = "authenc-hmac-sha384-"
2448 						   "cbc-des-caam-qi2",
2449 				.cra_blocksize = DES_BLOCK_SIZE,
2450 			},
2451 			.setkey = aead_setkey,
2452 			.setauthsize = aead_setauthsize,
2453 			.encrypt = aead_encrypt,
2454 			.decrypt = aead_decrypt,
2455 			.ivsize = DES_BLOCK_SIZE,
2456 			.maxauthsize = SHA384_DIGEST_SIZE,
2457 		},
2458 		.caam = {
2459 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2460 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2461 					   OP_ALG_AAI_HMAC_PRECOMP,
2462 		},
2463 	},
2464 	{
2465 		.aead = {
2466 			.base = {
2467 				.cra_name = "echainiv(authenc(hmac(sha384),"
2468 					    "cbc(des)))",
2469 				.cra_driver_name = "echainiv-authenc-"
2470 						   "hmac-sha384-cbc-des-"
2471 						   "caam-qi2",
2472 				.cra_blocksize = DES_BLOCK_SIZE,
2473 			},
2474 			.setkey = aead_setkey,
2475 			.setauthsize = aead_setauthsize,
2476 			.encrypt = aead_encrypt,
2477 			.decrypt = aead_decrypt,
2478 			.ivsize = DES_BLOCK_SIZE,
2479 			.maxauthsize = SHA384_DIGEST_SIZE,
2480 		},
2481 		.caam = {
2482 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2483 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2484 					   OP_ALG_AAI_HMAC_PRECOMP,
2485 			.geniv = true,
2486 		}
2487 	},
2488 	{
2489 		.aead = {
2490 			.base = {
2491 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2492 				.cra_driver_name = "authenc-hmac-sha512-"
2493 						   "cbc-des-caam-qi2",
2494 				.cra_blocksize = DES_BLOCK_SIZE,
2495 			},
2496 			.setkey = aead_setkey,
2497 			.setauthsize = aead_setauthsize,
2498 			.encrypt = aead_encrypt,
2499 			.decrypt = aead_decrypt,
2500 			.ivsize = DES_BLOCK_SIZE,
2501 			.maxauthsize = SHA512_DIGEST_SIZE,
2502 		},
2503 		.caam = {
2504 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2505 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2506 					   OP_ALG_AAI_HMAC_PRECOMP,
2507 		}
2508 	},
2509 	{
2510 		.aead = {
2511 			.base = {
2512 				.cra_name = "echainiv(authenc(hmac(sha512),"
2513 					    "cbc(des)))",
2514 				.cra_driver_name = "echainiv-authenc-"
2515 						   "hmac-sha512-cbc-des-"
2516 						   "caam-qi2",
2517 				.cra_blocksize = DES_BLOCK_SIZE,
2518 			},
2519 			.setkey = aead_setkey,
2520 			.setauthsize = aead_setauthsize,
2521 			.encrypt = aead_encrypt,
2522 			.decrypt = aead_decrypt,
2523 			.ivsize = DES_BLOCK_SIZE,
2524 			.maxauthsize = SHA512_DIGEST_SIZE,
2525 		},
2526 		.caam = {
2527 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2528 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2529 					   OP_ALG_AAI_HMAC_PRECOMP,
2530 			.geniv = true,
2531 		}
2532 	},
2533 	{
2534 		.aead = {
2535 			.base = {
2536 				.cra_name = "authenc(hmac(md5),"
2537 					    "rfc3686(ctr(aes)))",
2538 				.cra_driver_name = "authenc-hmac-md5-"
2539 						   "rfc3686-ctr-aes-caam-qi2",
2540 				.cra_blocksize = 1,
2541 			},
2542 			.setkey = aead_setkey,
2543 			.setauthsize = aead_setauthsize,
2544 			.encrypt = aead_encrypt,
2545 			.decrypt = aead_decrypt,
2546 			.ivsize = CTR_RFC3686_IV_SIZE,
2547 			.maxauthsize = MD5_DIGEST_SIZE,
2548 		},
2549 		.caam = {
2550 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2551 					   OP_ALG_AAI_CTR_MOD128,
2552 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2553 					   OP_ALG_AAI_HMAC_PRECOMP,
2554 			.rfc3686 = true,
2555 		},
2556 	},
2557 	{
2558 		.aead = {
2559 			.base = {
2560 				.cra_name = "seqiv(authenc("
2561 					    "hmac(md5),rfc3686(ctr(aes))))",
2562 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
2563 						   "rfc3686-ctr-aes-caam-qi2",
2564 				.cra_blocksize = 1,
2565 			},
2566 			.setkey = aead_setkey,
2567 			.setauthsize = aead_setauthsize,
2568 			.encrypt = aead_encrypt,
2569 			.decrypt = aead_decrypt,
2570 			.ivsize = CTR_RFC3686_IV_SIZE,
2571 			.maxauthsize = MD5_DIGEST_SIZE,
2572 		},
2573 		.caam = {
2574 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2575 					   OP_ALG_AAI_CTR_MOD128,
2576 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2577 					   OP_ALG_AAI_HMAC_PRECOMP,
2578 			.rfc3686 = true,
2579 			.geniv = true,
2580 		},
2581 	},
2582 	{
2583 		.aead = {
2584 			.base = {
2585 				.cra_name = "authenc(hmac(sha1),"
2586 					    "rfc3686(ctr(aes)))",
2587 				.cra_driver_name = "authenc-hmac-sha1-"
2588 						   "rfc3686-ctr-aes-caam-qi2",
2589 				.cra_blocksize = 1,
2590 			},
2591 			.setkey = aead_setkey,
2592 			.setauthsize = aead_setauthsize,
2593 			.encrypt = aead_encrypt,
2594 			.decrypt = aead_decrypt,
2595 			.ivsize = CTR_RFC3686_IV_SIZE,
2596 			.maxauthsize = SHA1_DIGEST_SIZE,
2597 		},
2598 		.caam = {
2599 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2600 					   OP_ALG_AAI_CTR_MOD128,
2601 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2602 					   OP_ALG_AAI_HMAC_PRECOMP,
2603 			.rfc3686 = true,
2604 		},
2605 	},
2606 	{
2607 		.aead = {
2608 			.base = {
2609 				.cra_name = "seqiv(authenc("
2610 					    "hmac(sha1),rfc3686(ctr(aes))))",
2611 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
2612 						   "rfc3686-ctr-aes-caam-qi2",
2613 				.cra_blocksize = 1,
2614 			},
2615 			.setkey = aead_setkey,
2616 			.setauthsize = aead_setauthsize,
2617 			.encrypt = aead_encrypt,
2618 			.decrypt = aead_decrypt,
2619 			.ivsize = CTR_RFC3686_IV_SIZE,
2620 			.maxauthsize = SHA1_DIGEST_SIZE,
2621 		},
2622 		.caam = {
2623 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2624 					   OP_ALG_AAI_CTR_MOD128,
2625 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2626 					   OP_ALG_AAI_HMAC_PRECOMP,
2627 			.rfc3686 = true,
2628 			.geniv = true,
2629 		},
2630 	},
2631 	{
2632 		.aead = {
2633 			.base = {
2634 				.cra_name = "authenc(hmac(sha224),"
2635 					    "rfc3686(ctr(aes)))",
2636 				.cra_driver_name = "authenc-hmac-sha224-"
2637 						   "rfc3686-ctr-aes-caam-qi2",
2638 				.cra_blocksize = 1,
2639 			},
2640 			.setkey = aead_setkey,
2641 			.setauthsize = aead_setauthsize,
2642 			.encrypt = aead_encrypt,
2643 			.decrypt = aead_decrypt,
2644 			.ivsize = CTR_RFC3686_IV_SIZE,
2645 			.maxauthsize = SHA224_DIGEST_SIZE,
2646 		},
2647 		.caam = {
2648 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2649 					   OP_ALG_AAI_CTR_MOD128,
2650 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2651 					   OP_ALG_AAI_HMAC_PRECOMP,
2652 			.rfc3686 = true,
2653 		},
2654 	},
2655 	{
2656 		.aead = {
2657 			.base = {
2658 				.cra_name = "seqiv(authenc("
2659 					    "hmac(sha224),rfc3686(ctr(aes))))",
2660 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
2661 						   "rfc3686-ctr-aes-caam-qi2",
2662 				.cra_blocksize = 1,
2663 			},
2664 			.setkey = aead_setkey,
2665 			.setauthsize = aead_setauthsize,
2666 			.encrypt = aead_encrypt,
2667 			.decrypt = aead_decrypt,
2668 			.ivsize = CTR_RFC3686_IV_SIZE,
2669 			.maxauthsize = SHA224_DIGEST_SIZE,
2670 		},
2671 		.caam = {
2672 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2673 					   OP_ALG_AAI_CTR_MOD128,
2674 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2675 					   OP_ALG_AAI_HMAC_PRECOMP,
2676 			.rfc3686 = true,
2677 			.geniv = true,
2678 		},
2679 	},
2680 	{
2681 		.aead = {
2682 			.base = {
2683 				.cra_name = "authenc(hmac(sha256),"
2684 					    "rfc3686(ctr(aes)))",
2685 				.cra_driver_name = "authenc-hmac-sha256-"
2686 						   "rfc3686-ctr-aes-caam-qi2",
2687 				.cra_blocksize = 1,
2688 			},
2689 			.setkey = aead_setkey,
2690 			.setauthsize = aead_setauthsize,
2691 			.encrypt = aead_encrypt,
2692 			.decrypt = aead_decrypt,
2693 			.ivsize = CTR_RFC3686_IV_SIZE,
2694 			.maxauthsize = SHA256_DIGEST_SIZE,
2695 		},
2696 		.caam = {
2697 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2698 					   OP_ALG_AAI_CTR_MOD128,
2699 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2700 					   OP_ALG_AAI_HMAC_PRECOMP,
2701 			.rfc3686 = true,
2702 		},
2703 	},
2704 	{
2705 		.aead = {
2706 			.base = {
2707 				.cra_name = "seqiv(authenc(hmac(sha256),"
2708 					    "rfc3686(ctr(aes))))",
2709 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
2710 						   "rfc3686-ctr-aes-caam-qi2",
2711 				.cra_blocksize = 1,
2712 			},
2713 			.setkey = aead_setkey,
2714 			.setauthsize = aead_setauthsize,
2715 			.encrypt = aead_encrypt,
2716 			.decrypt = aead_decrypt,
2717 			.ivsize = CTR_RFC3686_IV_SIZE,
2718 			.maxauthsize = SHA256_DIGEST_SIZE,
2719 		},
2720 		.caam = {
2721 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2722 					   OP_ALG_AAI_CTR_MOD128,
2723 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2724 					   OP_ALG_AAI_HMAC_PRECOMP,
2725 			.rfc3686 = true,
2726 			.geniv = true,
2727 		},
2728 	},
2729 	{
2730 		.aead = {
2731 			.base = {
2732 				.cra_name = "authenc(hmac(sha384),"
2733 					    "rfc3686(ctr(aes)))",
2734 				.cra_driver_name = "authenc-hmac-sha384-"
2735 						   "rfc3686-ctr-aes-caam-qi2",
2736 				.cra_blocksize = 1,
2737 			},
2738 			.setkey = aead_setkey,
2739 			.setauthsize = aead_setauthsize,
2740 			.encrypt = aead_encrypt,
2741 			.decrypt = aead_decrypt,
2742 			.ivsize = CTR_RFC3686_IV_SIZE,
2743 			.maxauthsize = SHA384_DIGEST_SIZE,
2744 		},
2745 		.caam = {
2746 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2747 					   OP_ALG_AAI_CTR_MOD128,
2748 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2749 					   OP_ALG_AAI_HMAC_PRECOMP,
2750 			.rfc3686 = true,
2751 		},
2752 	},
2753 	{
2754 		.aead = {
2755 			.base = {
2756 				.cra_name = "seqiv(authenc(hmac(sha384),"
2757 					    "rfc3686(ctr(aes))))",
2758 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
2759 						   "rfc3686-ctr-aes-caam-qi2",
2760 				.cra_blocksize = 1,
2761 			},
2762 			.setkey = aead_setkey,
2763 			.setauthsize = aead_setauthsize,
2764 			.encrypt = aead_encrypt,
2765 			.decrypt = aead_decrypt,
2766 			.ivsize = CTR_RFC3686_IV_SIZE,
2767 			.maxauthsize = SHA384_DIGEST_SIZE,
2768 		},
2769 		.caam = {
2770 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2771 					   OP_ALG_AAI_CTR_MOD128,
2772 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2773 					   OP_ALG_AAI_HMAC_PRECOMP,
2774 			.rfc3686 = true,
2775 			.geniv = true,
2776 		},
2777 	},
2778 	{
2779 		.aead = {
2780 			.base = {
2781 				.cra_name = "rfc7539(chacha20,poly1305)",
2782 				.cra_driver_name = "rfc7539-chacha20-poly1305-"
2783 						   "caam-qi2",
2784 				.cra_blocksize = 1,
2785 			},
2786 			.setkey = chachapoly_setkey,
2787 			.setauthsize = chachapoly_setauthsize,
2788 			.encrypt = aead_encrypt,
2789 			.decrypt = aead_decrypt,
2790 			.ivsize = CHACHAPOLY_IV_SIZE,
2791 			.maxauthsize = POLY1305_DIGEST_SIZE,
2792 		},
2793 		.caam = {
2794 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2795 					   OP_ALG_AAI_AEAD,
2796 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2797 					   OP_ALG_AAI_AEAD,
2798 			.nodkp = true,
2799 		},
2800 	},
2801 	{
2802 		.aead = {
2803 			.base = {
2804 				.cra_name = "rfc7539esp(chacha20,poly1305)",
2805 				.cra_driver_name = "rfc7539esp-chacha20-"
2806 						   "poly1305-caam-qi2",
2807 				.cra_blocksize = 1,
2808 			},
2809 			.setkey = chachapoly_setkey,
2810 			.setauthsize = chachapoly_setauthsize,
2811 			.encrypt = aead_encrypt,
2812 			.decrypt = aead_decrypt,
2813 			.ivsize = 8,
2814 			.maxauthsize = POLY1305_DIGEST_SIZE,
2815 		},
2816 		.caam = {
2817 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2818 					   OP_ALG_AAI_AEAD,
2819 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2820 					   OP_ALG_AAI_AEAD,
2821 			.nodkp = true,
2822 		},
2823 	},
2824 	{
2825 		.aead = {
2826 			.base = {
2827 				.cra_name = "authenc(hmac(sha512),"
2828 					    "rfc3686(ctr(aes)))",
2829 				.cra_driver_name = "authenc-hmac-sha512-"
2830 						   "rfc3686-ctr-aes-caam-qi2",
2831 				.cra_blocksize = 1,
2832 			},
2833 			.setkey = aead_setkey,
2834 			.setauthsize = aead_setauthsize,
2835 			.encrypt = aead_encrypt,
2836 			.decrypt = aead_decrypt,
2837 			.ivsize = CTR_RFC3686_IV_SIZE,
2838 			.maxauthsize = SHA512_DIGEST_SIZE,
2839 		},
2840 		.caam = {
2841 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2842 					   OP_ALG_AAI_CTR_MOD128,
2843 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2844 					   OP_ALG_AAI_HMAC_PRECOMP,
2845 			.rfc3686 = true,
2846 		},
2847 	},
2848 	{
2849 		.aead = {
2850 			.base = {
2851 				.cra_name = "seqiv(authenc(hmac(sha512),"
2852 					    "rfc3686(ctr(aes))))",
2853 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
2854 						   "rfc3686-ctr-aes-caam-qi2",
2855 				.cra_blocksize = 1,
2856 			},
2857 			.setkey = aead_setkey,
2858 			.setauthsize = aead_setauthsize,
2859 			.encrypt = aead_encrypt,
2860 			.decrypt = aead_decrypt,
2861 			.ivsize = CTR_RFC3686_IV_SIZE,
2862 			.maxauthsize = SHA512_DIGEST_SIZE,
2863 		},
2864 		.caam = {
2865 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2866 					   OP_ALG_AAI_CTR_MOD128,
2867 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2868 					   OP_ALG_AAI_HMAC_PRECOMP,
2869 			.rfc3686 = true,
2870 			.geniv = true,
2871 		},
2872 	},
2873 };
2874 
2875 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2876 {
2877 	struct skcipher_alg *alg = &t_alg->skcipher;
2878 
2879 	alg->base.cra_module = THIS_MODULE;
2880 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2881 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2882 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2883 
2884 	alg->init = caam_cra_init_skcipher;
2885 	alg->exit = caam_cra_exit;
2886 }
2887 
2888 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2889 {
2890 	struct aead_alg *alg = &t_alg->aead;
2891 
2892 	alg->base.cra_module = THIS_MODULE;
2893 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2894 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2895 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2896 
2897 	alg->init = caam_cra_init_aead;
2898 	alg->exit = caam_cra_exit_aead;
2899 }
2900 
2901 /* max hash key is max split key size */
2902 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
2903 
2904 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
2905 
2906 /* caam context sizes for hashes: running digest + 8 */
2907 #define HASH_MSG_LEN			8
2908 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2909 
2910 enum hash_optype {
2911 	UPDATE = 0,
2912 	UPDATE_FIRST,
2913 	FINALIZE,
2914 	DIGEST,
2915 	HASH_NUM_OP
2916 };
2917 
2918 /**
2919  * caam_hash_ctx - ahash per-session context
2920  * @flc: Flow Contexts array
2921  * @flc_dma: I/O virtual addresses of the Flow Contexts
2922  * @dev: dpseci device
2923  * @ctx_len: size of Context Register
2924  * @adata: hashing algorithm details
2925  */
2926 struct caam_hash_ctx {
2927 	struct caam_flc flc[HASH_NUM_OP];
2928 	dma_addr_t flc_dma[HASH_NUM_OP];
2929 	struct device *dev;
2930 	int ctx_len;
2931 	struct alginfo adata;
2932 };
2933 
2934 /* ahash state */
2935 struct caam_hash_state {
2936 	struct caam_request caam_req;
2937 	dma_addr_t buf_dma;
2938 	dma_addr_t ctx_dma;
2939 	int ctx_dma_len;
2940 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2941 	int buflen_0;
2942 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2943 	int buflen_1;
2944 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2945 	int (*update)(struct ahash_request *req);
2946 	int (*final)(struct ahash_request *req);
2947 	int (*finup)(struct ahash_request *req);
2948 	int current_buf;
2949 };
2950 
2951 struct caam_export_state {
2952 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2953 	u8 caam_ctx[MAX_CTX_LEN];
2954 	int buflen;
2955 	int (*update)(struct ahash_request *req);
2956 	int (*final)(struct ahash_request *req);
2957 	int (*finup)(struct ahash_request *req);
2958 };
2959 
2960 static inline void switch_buf(struct caam_hash_state *state)
2961 {
2962 	state->current_buf ^= 1;
2963 }
2964 
2965 static inline u8 *current_buf(struct caam_hash_state *state)
2966 {
2967 	return state->current_buf ? state->buf_1 : state->buf_0;
2968 }
2969 
2970 static inline u8 *alt_buf(struct caam_hash_state *state)
2971 {
2972 	return state->current_buf ? state->buf_0 : state->buf_1;
2973 }
2974 
2975 static inline int *current_buflen(struct caam_hash_state *state)
2976 {
2977 	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
2978 }
2979 
2980 static inline int *alt_buflen(struct caam_hash_state *state)
2981 {
2982 	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
2983 }
2984 
2985 /* Map current buffer in state (if length > 0) and put it in link table */
2986 static inline int buf_map_to_qm_sg(struct device *dev,
2987 				   struct dpaa2_sg_entry *qm_sg,
2988 				   struct caam_hash_state *state)
2989 {
2990 	int buflen = *current_buflen(state);
2991 
2992 	if (!buflen)
2993 		return 0;
2994 
2995 	state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
2996 					DMA_TO_DEVICE);
2997 	if (dma_mapping_error(dev, state->buf_dma)) {
2998 		dev_err(dev, "unable to map buf\n");
2999 		state->buf_dma = 0;
3000 		return -ENOMEM;
3001 	}
3002 
3003 	dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3004 
3005 	return 0;
3006 }
3007 
3008 /* Map state->caam_ctx, and add it to link table */
3009 static inline int ctx_map_to_qm_sg(struct device *dev,
3010 				   struct caam_hash_state *state, int ctx_len,
3011 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
3012 {
3013 	state->ctx_dma_len = ctx_len;
3014 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3015 	if (dma_mapping_error(dev, state->ctx_dma)) {
3016 		dev_err(dev, "unable to map ctx\n");
3017 		state->ctx_dma = 0;
3018 		return -ENOMEM;
3019 	}
3020 
3021 	dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3022 
3023 	return 0;
3024 }
3025 
3026 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3027 {
3028 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3029 	int digestsize = crypto_ahash_digestsize(ahash);
3030 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3031 	struct caam_flc *flc;
3032 	u32 *desc;
3033 
3034 	/* ahash_update shared descriptor */
3035 	flc = &ctx->flc[UPDATE];
3036 	desc = flc->sh_desc;
3037 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3038 			  ctx->ctx_len, true, priv->sec_attr.era);
3039 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3040 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3041 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3042 	print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3043 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3044 			     1);
3045 
3046 	/* ahash_update_first shared descriptor */
3047 	flc = &ctx->flc[UPDATE_FIRST];
3048 	desc = flc->sh_desc;
3049 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3050 			  ctx->ctx_len, false, priv->sec_attr.era);
3051 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3052 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3053 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3054 	print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3055 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3056 			     1);
3057 
3058 	/* ahash_final shared descriptor */
3059 	flc = &ctx->flc[FINALIZE];
3060 	desc = flc->sh_desc;
3061 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3062 			  ctx->ctx_len, true, priv->sec_attr.era);
3063 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3064 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3065 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3066 	print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3067 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3068 			     1);
3069 
3070 	/* ahash_digest shared descriptor */
3071 	flc = &ctx->flc[DIGEST];
3072 	desc = flc->sh_desc;
3073 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3074 			  ctx->ctx_len, false, priv->sec_attr.era);
3075 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3076 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3077 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3078 	print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3079 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3080 			     1);
3081 
3082 	return 0;
3083 }
3084 
3085 struct split_key_sh_result {
3086 	struct completion completion;
3087 	int err;
3088 	struct device *dev;
3089 };
3090 
3091 static void split_key_sh_done(void *cbk_ctx, u32 err)
3092 {
3093 	struct split_key_sh_result *res = cbk_ctx;
3094 
3095 	dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3096 
3097 	if (err)
3098 		caam_qi2_strstatus(res->dev, err);
3099 
3100 	res->err = err;
3101 	complete(&res->completion);
3102 }
3103 
3104 /* Digest hash size if it is too large */
3105 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3106 			   u32 digestsize)
3107 {
3108 	struct caam_request *req_ctx;
3109 	u32 *desc;
3110 	struct split_key_sh_result result;
3111 	dma_addr_t key_dma;
3112 	struct caam_flc *flc;
3113 	dma_addr_t flc_dma;
3114 	int ret = -ENOMEM;
3115 	struct dpaa2_fl_entry *in_fle, *out_fle;
3116 
3117 	req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3118 	if (!req_ctx)
3119 		return -ENOMEM;
3120 
3121 	in_fle = &req_ctx->fd_flt[1];
3122 	out_fle = &req_ctx->fd_flt[0];
3123 
3124 	flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3125 	if (!flc)
3126 		goto err_flc;
3127 
3128 	key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3129 	if (dma_mapping_error(ctx->dev, key_dma)) {
3130 		dev_err(ctx->dev, "unable to map key memory\n");
3131 		goto err_key_dma;
3132 	}
3133 
3134 	desc = flc->sh_desc;
3135 
3136 	init_sh_desc(desc, 0);
3137 
3138 	/* descriptor to perform unkeyed hash on key_in */
3139 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3140 			 OP_ALG_AS_INITFINAL);
3141 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3142 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3143 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3144 			 LDST_SRCDST_BYTE_CONTEXT);
3145 
3146 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3147 	flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3148 				 desc_bytes(desc), DMA_TO_DEVICE);
3149 	if (dma_mapping_error(ctx->dev, flc_dma)) {
3150 		dev_err(ctx->dev, "unable to map shared descriptor\n");
3151 		goto err_flc_dma;
3152 	}
3153 
3154 	dpaa2_fl_set_final(in_fle, true);
3155 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3156 	dpaa2_fl_set_addr(in_fle, key_dma);
3157 	dpaa2_fl_set_len(in_fle, *keylen);
3158 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3159 	dpaa2_fl_set_addr(out_fle, key_dma);
3160 	dpaa2_fl_set_len(out_fle, digestsize);
3161 
3162 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3163 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3164 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3165 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3166 			     1);
3167 
3168 	result.err = 0;
3169 	init_completion(&result.completion);
3170 	result.dev = ctx->dev;
3171 
3172 	req_ctx->flc = flc;
3173 	req_ctx->flc_dma = flc_dma;
3174 	req_ctx->cbk = split_key_sh_done;
3175 	req_ctx->ctx = &result;
3176 
3177 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3178 	if (ret == -EINPROGRESS) {
3179 		/* in progress */
3180 		wait_for_completion(&result.completion);
3181 		ret = result.err;
3182 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3183 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
3184 				     digestsize, 1);
3185 	}
3186 
3187 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3188 			 DMA_TO_DEVICE);
3189 err_flc_dma:
3190 	dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3191 err_key_dma:
3192 	kfree(flc);
3193 err_flc:
3194 	kfree(req_ctx);
3195 
3196 	*keylen = digestsize;
3197 
3198 	return ret;
3199 }
3200 
3201 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3202 			unsigned int keylen)
3203 {
3204 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3205 	unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3206 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
3207 	int ret;
3208 	u8 *hashed_key = NULL;
3209 
3210 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3211 
3212 	if (keylen > blocksize) {
3213 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3214 		if (!hashed_key)
3215 			return -ENOMEM;
3216 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3217 		if (ret)
3218 			goto bad_free_key;
3219 		key = hashed_key;
3220 	}
3221 
3222 	ctx->adata.keylen = keylen;
3223 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3224 					      OP_ALG_ALGSEL_MASK);
3225 	if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3226 		goto bad_free_key;
3227 
3228 	ctx->adata.key_virt = key;
3229 	ctx->adata.key_inline = true;
3230 
3231 	ret = ahash_set_sh_desc(ahash);
3232 	kfree(hashed_key);
3233 	return ret;
3234 bad_free_key:
3235 	kfree(hashed_key);
3236 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3237 	return -EINVAL;
3238 }
3239 
3240 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3241 			       struct ahash_request *req)
3242 {
3243 	struct caam_hash_state *state = ahash_request_ctx(req);
3244 
3245 	if (edesc->src_nents)
3246 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3247 
3248 	if (edesc->qm_sg_bytes)
3249 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3250 				 DMA_TO_DEVICE);
3251 
3252 	if (state->buf_dma) {
3253 		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3254 				 DMA_TO_DEVICE);
3255 		state->buf_dma = 0;
3256 	}
3257 }
3258 
3259 static inline void ahash_unmap_ctx(struct device *dev,
3260 				   struct ahash_edesc *edesc,
3261 				   struct ahash_request *req, u32 flag)
3262 {
3263 	struct caam_hash_state *state = ahash_request_ctx(req);
3264 
3265 	if (state->ctx_dma) {
3266 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3267 		state->ctx_dma = 0;
3268 	}
3269 	ahash_unmap(dev, edesc, req);
3270 }
3271 
3272 static void ahash_done(void *cbk_ctx, u32 status)
3273 {
3274 	struct crypto_async_request *areq = cbk_ctx;
3275 	struct ahash_request *req = ahash_request_cast(areq);
3276 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3277 	struct caam_hash_state *state = ahash_request_ctx(req);
3278 	struct ahash_edesc *edesc = state->caam_req.edesc;
3279 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3280 	int digestsize = crypto_ahash_digestsize(ahash);
3281 	int ecode = 0;
3282 
3283 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3284 
3285 	if (unlikely(status)) {
3286 		caam_qi2_strstatus(ctx->dev, status);
3287 		ecode = -EIO;
3288 	}
3289 
3290 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3291 	memcpy(req->result, state->caam_ctx, digestsize);
3292 	qi_cache_free(edesc);
3293 
3294 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3295 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3296 			     ctx->ctx_len, 1);
3297 
3298 	req->base.complete(&req->base, ecode);
3299 }
3300 
3301 static void ahash_done_bi(void *cbk_ctx, u32 status)
3302 {
3303 	struct crypto_async_request *areq = cbk_ctx;
3304 	struct ahash_request *req = ahash_request_cast(areq);
3305 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3306 	struct caam_hash_state *state = ahash_request_ctx(req);
3307 	struct ahash_edesc *edesc = state->caam_req.edesc;
3308 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3309 	int ecode = 0;
3310 
3311 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3312 
3313 	if (unlikely(status)) {
3314 		caam_qi2_strstatus(ctx->dev, status);
3315 		ecode = -EIO;
3316 	}
3317 
3318 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3319 	switch_buf(state);
3320 	qi_cache_free(edesc);
3321 
3322 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3323 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3324 			     ctx->ctx_len, 1);
3325 	if (req->result)
3326 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3327 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3328 				     crypto_ahash_digestsize(ahash), 1);
3329 
3330 	req->base.complete(&req->base, ecode);
3331 }
3332 
3333 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3334 {
3335 	struct crypto_async_request *areq = cbk_ctx;
3336 	struct ahash_request *req = ahash_request_cast(areq);
3337 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3338 	struct caam_hash_state *state = ahash_request_ctx(req);
3339 	struct ahash_edesc *edesc = state->caam_req.edesc;
3340 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3341 	int digestsize = crypto_ahash_digestsize(ahash);
3342 	int ecode = 0;
3343 
3344 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3345 
3346 	if (unlikely(status)) {
3347 		caam_qi2_strstatus(ctx->dev, status);
3348 		ecode = -EIO;
3349 	}
3350 
3351 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3352 	memcpy(req->result, state->caam_ctx, digestsize);
3353 	qi_cache_free(edesc);
3354 
3355 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3356 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3357 			     ctx->ctx_len, 1);
3358 
3359 	req->base.complete(&req->base, ecode);
3360 }
3361 
3362 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3363 {
3364 	struct crypto_async_request *areq = cbk_ctx;
3365 	struct ahash_request *req = ahash_request_cast(areq);
3366 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3367 	struct caam_hash_state *state = ahash_request_ctx(req);
3368 	struct ahash_edesc *edesc = state->caam_req.edesc;
3369 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3370 	int ecode = 0;
3371 
3372 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3373 
3374 	if (unlikely(status)) {
3375 		caam_qi2_strstatus(ctx->dev, status);
3376 		ecode = -EIO;
3377 	}
3378 
3379 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3380 	switch_buf(state);
3381 	qi_cache_free(edesc);
3382 
3383 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3384 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3385 			     ctx->ctx_len, 1);
3386 	if (req->result)
3387 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3388 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3389 				     crypto_ahash_digestsize(ahash), 1);
3390 
3391 	req->base.complete(&req->base, ecode);
3392 }
3393 
3394 static int ahash_update_ctx(struct ahash_request *req)
3395 {
3396 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3397 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3398 	struct caam_hash_state *state = ahash_request_ctx(req);
3399 	struct caam_request *req_ctx = &state->caam_req;
3400 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3401 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3402 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3403 		      GFP_KERNEL : GFP_ATOMIC;
3404 	u8 *buf = current_buf(state);
3405 	int *buflen = current_buflen(state);
3406 	u8 *next_buf = alt_buf(state);
3407 	int *next_buflen = alt_buflen(state), last_buflen;
3408 	int in_len = *buflen + req->nbytes, to_hash;
3409 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3410 	struct ahash_edesc *edesc;
3411 	int ret = 0;
3412 
3413 	last_buflen = *next_buflen;
3414 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3415 	to_hash = in_len - *next_buflen;
3416 
3417 	if (to_hash) {
3418 		struct dpaa2_sg_entry *sg_table;
3419 		int src_len = req->nbytes - *next_buflen;
3420 
3421 		src_nents = sg_nents_for_len(req->src, src_len);
3422 		if (src_nents < 0) {
3423 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3424 			return src_nents;
3425 		}
3426 
3427 		if (src_nents) {
3428 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3429 						  DMA_TO_DEVICE);
3430 			if (!mapped_nents) {
3431 				dev_err(ctx->dev, "unable to DMA map source\n");
3432 				return -ENOMEM;
3433 			}
3434 		} else {
3435 			mapped_nents = 0;
3436 		}
3437 
3438 		/* allocate space for base edesc and link tables */
3439 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3440 		if (!edesc) {
3441 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3442 				     DMA_TO_DEVICE);
3443 			return -ENOMEM;
3444 		}
3445 
3446 		edesc->src_nents = src_nents;
3447 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3448 		qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3449 			      sizeof(*sg_table);
3450 		sg_table = &edesc->sgt[0];
3451 
3452 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3453 				       DMA_BIDIRECTIONAL);
3454 		if (ret)
3455 			goto unmap_ctx;
3456 
3457 		ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3458 		if (ret)
3459 			goto unmap_ctx;
3460 
3461 		if (mapped_nents) {
3462 			sg_to_qm_sg_last(req->src, src_len,
3463 					 sg_table + qm_sg_src_index, 0);
3464 			if (*next_buflen)
3465 				scatterwalk_map_and_copy(next_buf, req->src,
3466 							 to_hash - *buflen,
3467 							 *next_buflen, 0);
3468 		} else {
3469 			dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3470 					   true);
3471 		}
3472 
3473 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3474 						  qm_sg_bytes, DMA_TO_DEVICE);
3475 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3476 			dev_err(ctx->dev, "unable to map S/G table\n");
3477 			ret = -ENOMEM;
3478 			goto unmap_ctx;
3479 		}
3480 		edesc->qm_sg_bytes = qm_sg_bytes;
3481 
3482 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3483 		dpaa2_fl_set_final(in_fle, true);
3484 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3485 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3486 		dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3487 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3488 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3489 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3490 
3491 		req_ctx->flc = &ctx->flc[UPDATE];
3492 		req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3493 		req_ctx->cbk = ahash_done_bi;
3494 		req_ctx->ctx = &req->base;
3495 		req_ctx->edesc = edesc;
3496 
3497 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3498 		if (ret != -EINPROGRESS &&
3499 		    !(ret == -EBUSY &&
3500 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3501 			goto unmap_ctx;
3502 	} else if (*next_buflen) {
3503 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3504 					 req->nbytes, 0);
3505 		*buflen = *next_buflen;
3506 		*next_buflen = last_buflen;
3507 	}
3508 
3509 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3510 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3511 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3512 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3513 			     1);
3514 
3515 	return ret;
3516 unmap_ctx:
3517 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3518 	qi_cache_free(edesc);
3519 	return ret;
3520 }
3521 
3522 static int ahash_final_ctx(struct ahash_request *req)
3523 {
3524 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3525 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3526 	struct caam_hash_state *state = ahash_request_ctx(req);
3527 	struct caam_request *req_ctx = &state->caam_req;
3528 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3529 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3530 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3531 		      GFP_KERNEL : GFP_ATOMIC;
3532 	int buflen = *current_buflen(state);
3533 	int qm_sg_bytes;
3534 	int digestsize = crypto_ahash_digestsize(ahash);
3535 	struct ahash_edesc *edesc;
3536 	struct dpaa2_sg_entry *sg_table;
3537 	int ret;
3538 
3539 	/* allocate space for base edesc and link tables */
3540 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3541 	if (!edesc)
3542 		return -ENOMEM;
3543 
3544 	qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3545 	sg_table = &edesc->sgt[0];
3546 
3547 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3548 			       DMA_BIDIRECTIONAL);
3549 	if (ret)
3550 		goto unmap_ctx;
3551 
3552 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3553 	if (ret)
3554 		goto unmap_ctx;
3555 
3556 	dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3557 
3558 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3559 					  DMA_TO_DEVICE);
3560 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3561 		dev_err(ctx->dev, "unable to map S/G table\n");
3562 		ret = -ENOMEM;
3563 		goto unmap_ctx;
3564 	}
3565 	edesc->qm_sg_bytes = qm_sg_bytes;
3566 
3567 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3568 	dpaa2_fl_set_final(in_fle, true);
3569 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3570 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3571 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3572 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3573 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3574 	dpaa2_fl_set_len(out_fle, digestsize);
3575 
3576 	req_ctx->flc = &ctx->flc[FINALIZE];
3577 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3578 	req_ctx->cbk = ahash_done_ctx_src;
3579 	req_ctx->ctx = &req->base;
3580 	req_ctx->edesc = edesc;
3581 
3582 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3583 	if (ret == -EINPROGRESS ||
3584 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3585 		return ret;
3586 
3587 unmap_ctx:
3588 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3589 	qi_cache_free(edesc);
3590 	return ret;
3591 }
3592 
3593 static int ahash_finup_ctx(struct ahash_request *req)
3594 {
3595 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3596 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3597 	struct caam_hash_state *state = ahash_request_ctx(req);
3598 	struct caam_request *req_ctx = &state->caam_req;
3599 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3600 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3601 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3602 		      GFP_KERNEL : GFP_ATOMIC;
3603 	int buflen = *current_buflen(state);
3604 	int qm_sg_bytes, qm_sg_src_index;
3605 	int src_nents, mapped_nents;
3606 	int digestsize = crypto_ahash_digestsize(ahash);
3607 	struct ahash_edesc *edesc;
3608 	struct dpaa2_sg_entry *sg_table;
3609 	int ret;
3610 
3611 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3612 	if (src_nents < 0) {
3613 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3614 		return src_nents;
3615 	}
3616 
3617 	if (src_nents) {
3618 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3619 					  DMA_TO_DEVICE);
3620 		if (!mapped_nents) {
3621 			dev_err(ctx->dev, "unable to DMA map source\n");
3622 			return -ENOMEM;
3623 		}
3624 	} else {
3625 		mapped_nents = 0;
3626 	}
3627 
3628 	/* allocate space for base edesc and link tables */
3629 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3630 	if (!edesc) {
3631 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3632 		return -ENOMEM;
3633 	}
3634 
3635 	edesc->src_nents = src_nents;
3636 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3637 	qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3638 		      sizeof(*sg_table);
3639 	sg_table = &edesc->sgt[0];
3640 
3641 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3642 			       DMA_BIDIRECTIONAL);
3643 	if (ret)
3644 		goto unmap_ctx;
3645 
3646 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3647 	if (ret)
3648 		goto unmap_ctx;
3649 
3650 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3651 
3652 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3653 					  DMA_TO_DEVICE);
3654 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3655 		dev_err(ctx->dev, "unable to map S/G table\n");
3656 		ret = -ENOMEM;
3657 		goto unmap_ctx;
3658 	}
3659 	edesc->qm_sg_bytes = qm_sg_bytes;
3660 
3661 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3662 	dpaa2_fl_set_final(in_fle, true);
3663 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3664 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3665 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3666 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3667 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3668 	dpaa2_fl_set_len(out_fle, digestsize);
3669 
3670 	req_ctx->flc = &ctx->flc[FINALIZE];
3671 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3672 	req_ctx->cbk = ahash_done_ctx_src;
3673 	req_ctx->ctx = &req->base;
3674 	req_ctx->edesc = edesc;
3675 
3676 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3677 	if (ret == -EINPROGRESS ||
3678 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3679 		return ret;
3680 
3681 unmap_ctx:
3682 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3683 	qi_cache_free(edesc);
3684 	return ret;
3685 }
3686 
3687 static int ahash_digest(struct ahash_request *req)
3688 {
3689 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3690 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3691 	struct caam_hash_state *state = ahash_request_ctx(req);
3692 	struct caam_request *req_ctx = &state->caam_req;
3693 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3694 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3695 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3696 		      GFP_KERNEL : GFP_ATOMIC;
3697 	int digestsize = crypto_ahash_digestsize(ahash);
3698 	int src_nents, mapped_nents;
3699 	struct ahash_edesc *edesc;
3700 	int ret = -ENOMEM;
3701 
3702 	state->buf_dma = 0;
3703 
3704 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3705 	if (src_nents < 0) {
3706 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3707 		return src_nents;
3708 	}
3709 
3710 	if (src_nents) {
3711 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3712 					  DMA_TO_DEVICE);
3713 		if (!mapped_nents) {
3714 			dev_err(ctx->dev, "unable to map source for DMA\n");
3715 			return ret;
3716 		}
3717 	} else {
3718 		mapped_nents = 0;
3719 	}
3720 
3721 	/* allocate space for base edesc and link tables */
3722 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3723 	if (!edesc) {
3724 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3725 		return ret;
3726 	}
3727 
3728 	edesc->src_nents = src_nents;
3729 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3730 
3731 	if (mapped_nents > 1) {
3732 		int qm_sg_bytes;
3733 		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3734 
3735 		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3736 		sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3737 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3738 						  qm_sg_bytes, DMA_TO_DEVICE);
3739 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3740 			dev_err(ctx->dev, "unable to map S/G table\n");
3741 			goto unmap;
3742 		}
3743 		edesc->qm_sg_bytes = qm_sg_bytes;
3744 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3745 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3746 	} else {
3747 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3748 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3749 	}
3750 
3751 	state->ctx_dma_len = digestsize;
3752 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3753 					DMA_FROM_DEVICE);
3754 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3755 		dev_err(ctx->dev, "unable to map ctx\n");
3756 		state->ctx_dma = 0;
3757 		goto unmap;
3758 	}
3759 
3760 	dpaa2_fl_set_final(in_fle, true);
3761 	dpaa2_fl_set_len(in_fle, req->nbytes);
3762 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3763 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3764 	dpaa2_fl_set_len(out_fle, digestsize);
3765 
3766 	req_ctx->flc = &ctx->flc[DIGEST];
3767 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3768 	req_ctx->cbk = ahash_done;
3769 	req_ctx->ctx = &req->base;
3770 	req_ctx->edesc = edesc;
3771 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3772 	if (ret == -EINPROGRESS ||
3773 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3774 		return ret;
3775 
3776 unmap:
3777 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3778 	qi_cache_free(edesc);
3779 	return ret;
3780 }
3781 
3782 static int ahash_final_no_ctx(struct ahash_request *req)
3783 {
3784 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3785 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3786 	struct caam_hash_state *state = ahash_request_ctx(req);
3787 	struct caam_request *req_ctx = &state->caam_req;
3788 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3789 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3790 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3791 		      GFP_KERNEL : GFP_ATOMIC;
3792 	u8 *buf = current_buf(state);
3793 	int buflen = *current_buflen(state);
3794 	int digestsize = crypto_ahash_digestsize(ahash);
3795 	struct ahash_edesc *edesc;
3796 	int ret = -ENOMEM;
3797 
3798 	/* allocate space for base edesc and link tables */
3799 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3800 	if (!edesc)
3801 		return ret;
3802 
3803 	if (buflen) {
3804 		state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3805 						DMA_TO_DEVICE);
3806 		if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3807 			dev_err(ctx->dev, "unable to map src\n");
3808 			goto unmap;
3809 		}
3810 	}
3811 
3812 	state->ctx_dma_len = digestsize;
3813 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3814 					DMA_FROM_DEVICE);
3815 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3816 		dev_err(ctx->dev, "unable to map ctx\n");
3817 		state->ctx_dma = 0;
3818 		goto unmap;
3819 	}
3820 
3821 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3822 	dpaa2_fl_set_final(in_fle, true);
3823 	/*
3824 	 * crypto engine requires the input entry to be present when
3825 	 * "frame list" FD is used.
3826 	 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3827 	 * in_fle zeroized (except for "Final" flag) is the best option.
3828 	 */
3829 	if (buflen) {
3830 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3831 		dpaa2_fl_set_addr(in_fle, state->buf_dma);
3832 		dpaa2_fl_set_len(in_fle, buflen);
3833 	}
3834 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3835 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3836 	dpaa2_fl_set_len(out_fle, digestsize);
3837 
3838 	req_ctx->flc = &ctx->flc[DIGEST];
3839 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3840 	req_ctx->cbk = ahash_done;
3841 	req_ctx->ctx = &req->base;
3842 	req_ctx->edesc = edesc;
3843 
3844 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3845 	if (ret == -EINPROGRESS ||
3846 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3847 		return ret;
3848 
3849 unmap:
3850 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3851 	qi_cache_free(edesc);
3852 	return ret;
3853 }
3854 
3855 static int ahash_update_no_ctx(struct ahash_request *req)
3856 {
3857 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3858 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3859 	struct caam_hash_state *state = ahash_request_ctx(req);
3860 	struct caam_request *req_ctx = &state->caam_req;
3861 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3862 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3863 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3864 		      GFP_KERNEL : GFP_ATOMIC;
3865 	u8 *buf = current_buf(state);
3866 	int *buflen = current_buflen(state);
3867 	u8 *next_buf = alt_buf(state);
3868 	int *next_buflen = alt_buflen(state);
3869 	int in_len = *buflen + req->nbytes, to_hash;
3870 	int qm_sg_bytes, src_nents, mapped_nents;
3871 	struct ahash_edesc *edesc;
3872 	int ret = 0;
3873 
3874 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3875 	to_hash = in_len - *next_buflen;
3876 
3877 	if (to_hash) {
3878 		struct dpaa2_sg_entry *sg_table;
3879 		int src_len = req->nbytes - *next_buflen;
3880 
3881 		src_nents = sg_nents_for_len(req->src, src_len);
3882 		if (src_nents < 0) {
3883 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3884 			return src_nents;
3885 		}
3886 
3887 		if (src_nents) {
3888 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3889 						  DMA_TO_DEVICE);
3890 			if (!mapped_nents) {
3891 				dev_err(ctx->dev, "unable to DMA map source\n");
3892 				return -ENOMEM;
3893 			}
3894 		} else {
3895 			mapped_nents = 0;
3896 		}
3897 
3898 		/* allocate space for base edesc and link tables */
3899 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3900 		if (!edesc) {
3901 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3902 				     DMA_TO_DEVICE);
3903 			return -ENOMEM;
3904 		}
3905 
3906 		edesc->src_nents = src_nents;
3907 		qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
3908 			      sizeof(*sg_table);
3909 		sg_table = &edesc->sgt[0];
3910 
3911 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3912 		if (ret)
3913 			goto unmap_ctx;
3914 
3915 		sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
3916 
3917 		if (*next_buflen)
3918 			scatterwalk_map_and_copy(next_buf, req->src,
3919 						 to_hash - *buflen,
3920 						 *next_buflen, 0);
3921 
3922 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3923 						  qm_sg_bytes, DMA_TO_DEVICE);
3924 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3925 			dev_err(ctx->dev, "unable to map S/G table\n");
3926 			ret = -ENOMEM;
3927 			goto unmap_ctx;
3928 		}
3929 		edesc->qm_sg_bytes = qm_sg_bytes;
3930 
3931 		state->ctx_dma_len = ctx->ctx_len;
3932 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3933 						ctx->ctx_len, DMA_FROM_DEVICE);
3934 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3935 			dev_err(ctx->dev, "unable to map ctx\n");
3936 			state->ctx_dma = 0;
3937 			ret = -ENOMEM;
3938 			goto unmap_ctx;
3939 		}
3940 
3941 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3942 		dpaa2_fl_set_final(in_fle, true);
3943 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3944 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3945 		dpaa2_fl_set_len(in_fle, to_hash);
3946 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3947 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3948 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3949 
3950 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3951 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3952 		req_ctx->cbk = ahash_done_ctx_dst;
3953 		req_ctx->ctx = &req->base;
3954 		req_ctx->edesc = edesc;
3955 
3956 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3957 		if (ret != -EINPROGRESS &&
3958 		    !(ret == -EBUSY &&
3959 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3960 			goto unmap_ctx;
3961 
3962 		state->update = ahash_update_ctx;
3963 		state->finup = ahash_finup_ctx;
3964 		state->final = ahash_final_ctx;
3965 	} else if (*next_buflen) {
3966 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3967 					 req->nbytes, 0);
3968 		*buflen = *next_buflen;
3969 		*next_buflen = 0;
3970 	}
3971 
3972 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3973 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3974 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3975 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3976 			     1);
3977 
3978 	return ret;
3979 unmap_ctx:
3980 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
3981 	qi_cache_free(edesc);
3982 	return ret;
3983 }
3984 
3985 static int ahash_finup_no_ctx(struct ahash_request *req)
3986 {
3987 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3988 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3989 	struct caam_hash_state *state = ahash_request_ctx(req);
3990 	struct caam_request *req_ctx = &state->caam_req;
3991 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3992 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3993 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3994 		      GFP_KERNEL : GFP_ATOMIC;
3995 	int buflen = *current_buflen(state);
3996 	int qm_sg_bytes, src_nents, mapped_nents;
3997 	int digestsize = crypto_ahash_digestsize(ahash);
3998 	struct ahash_edesc *edesc;
3999 	struct dpaa2_sg_entry *sg_table;
4000 	int ret;
4001 
4002 	src_nents = sg_nents_for_len(req->src, req->nbytes);
4003 	if (src_nents < 0) {
4004 		dev_err(ctx->dev, "Invalid number of src SG.\n");
4005 		return src_nents;
4006 	}
4007 
4008 	if (src_nents) {
4009 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4010 					  DMA_TO_DEVICE);
4011 		if (!mapped_nents) {
4012 			dev_err(ctx->dev, "unable to DMA map source\n");
4013 			return -ENOMEM;
4014 		}
4015 	} else {
4016 		mapped_nents = 0;
4017 	}
4018 
4019 	/* allocate space for base edesc and link tables */
4020 	edesc = qi_cache_zalloc(GFP_DMA | flags);
4021 	if (!edesc) {
4022 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4023 		return -ENOMEM;
4024 	}
4025 
4026 	edesc->src_nents = src_nents;
4027 	qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4028 	sg_table = &edesc->sgt[0];
4029 
4030 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4031 	if (ret)
4032 		goto unmap;
4033 
4034 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4035 
4036 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4037 					  DMA_TO_DEVICE);
4038 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4039 		dev_err(ctx->dev, "unable to map S/G table\n");
4040 		ret = -ENOMEM;
4041 		goto unmap;
4042 	}
4043 	edesc->qm_sg_bytes = qm_sg_bytes;
4044 
4045 	state->ctx_dma_len = digestsize;
4046 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4047 					DMA_FROM_DEVICE);
4048 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4049 		dev_err(ctx->dev, "unable to map ctx\n");
4050 		state->ctx_dma = 0;
4051 		ret = -ENOMEM;
4052 		goto unmap;
4053 	}
4054 
4055 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4056 	dpaa2_fl_set_final(in_fle, true);
4057 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4058 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4059 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4060 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4061 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4062 	dpaa2_fl_set_len(out_fle, digestsize);
4063 
4064 	req_ctx->flc = &ctx->flc[DIGEST];
4065 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4066 	req_ctx->cbk = ahash_done;
4067 	req_ctx->ctx = &req->base;
4068 	req_ctx->edesc = edesc;
4069 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4070 	if (ret != -EINPROGRESS &&
4071 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4072 		goto unmap;
4073 
4074 	return ret;
4075 unmap:
4076 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4077 	qi_cache_free(edesc);
4078 	return -ENOMEM;
4079 }
4080 
4081 static int ahash_update_first(struct ahash_request *req)
4082 {
4083 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4084 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4085 	struct caam_hash_state *state = ahash_request_ctx(req);
4086 	struct caam_request *req_ctx = &state->caam_req;
4087 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4088 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4089 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4090 		      GFP_KERNEL : GFP_ATOMIC;
4091 	u8 *next_buf = alt_buf(state);
4092 	int *next_buflen = alt_buflen(state);
4093 	int to_hash;
4094 	int src_nents, mapped_nents;
4095 	struct ahash_edesc *edesc;
4096 	int ret = 0;
4097 
4098 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4099 				      1);
4100 	to_hash = req->nbytes - *next_buflen;
4101 
4102 	if (to_hash) {
4103 		struct dpaa2_sg_entry *sg_table;
4104 		int src_len = req->nbytes - *next_buflen;
4105 
4106 		src_nents = sg_nents_for_len(req->src, src_len);
4107 		if (src_nents < 0) {
4108 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4109 			return src_nents;
4110 		}
4111 
4112 		if (src_nents) {
4113 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4114 						  DMA_TO_DEVICE);
4115 			if (!mapped_nents) {
4116 				dev_err(ctx->dev, "unable to map source for DMA\n");
4117 				return -ENOMEM;
4118 			}
4119 		} else {
4120 			mapped_nents = 0;
4121 		}
4122 
4123 		/* allocate space for base edesc and link tables */
4124 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4125 		if (!edesc) {
4126 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4127 				     DMA_TO_DEVICE);
4128 			return -ENOMEM;
4129 		}
4130 
4131 		edesc->src_nents = src_nents;
4132 		sg_table = &edesc->sgt[0];
4133 
4134 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4135 		dpaa2_fl_set_final(in_fle, true);
4136 		dpaa2_fl_set_len(in_fle, to_hash);
4137 
4138 		if (mapped_nents > 1) {
4139 			int qm_sg_bytes;
4140 
4141 			sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4142 			qm_sg_bytes = pad_sg_nents(mapped_nents) *
4143 				      sizeof(*sg_table);
4144 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4145 							  qm_sg_bytes,
4146 							  DMA_TO_DEVICE);
4147 			if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4148 				dev_err(ctx->dev, "unable to map S/G table\n");
4149 				ret = -ENOMEM;
4150 				goto unmap_ctx;
4151 			}
4152 			edesc->qm_sg_bytes = qm_sg_bytes;
4153 			dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4154 			dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4155 		} else {
4156 			dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4157 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4158 		}
4159 
4160 		if (*next_buflen)
4161 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
4162 						 *next_buflen, 0);
4163 
4164 		state->ctx_dma_len = ctx->ctx_len;
4165 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4166 						ctx->ctx_len, DMA_FROM_DEVICE);
4167 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4168 			dev_err(ctx->dev, "unable to map ctx\n");
4169 			state->ctx_dma = 0;
4170 			ret = -ENOMEM;
4171 			goto unmap_ctx;
4172 		}
4173 
4174 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4175 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4176 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4177 
4178 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4179 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4180 		req_ctx->cbk = ahash_done_ctx_dst;
4181 		req_ctx->ctx = &req->base;
4182 		req_ctx->edesc = edesc;
4183 
4184 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4185 		if (ret != -EINPROGRESS &&
4186 		    !(ret == -EBUSY && req->base.flags &
4187 		      CRYPTO_TFM_REQ_MAY_BACKLOG))
4188 			goto unmap_ctx;
4189 
4190 		state->update = ahash_update_ctx;
4191 		state->finup = ahash_finup_ctx;
4192 		state->final = ahash_final_ctx;
4193 	} else if (*next_buflen) {
4194 		state->update = ahash_update_no_ctx;
4195 		state->finup = ahash_finup_no_ctx;
4196 		state->final = ahash_final_no_ctx;
4197 		scatterwalk_map_and_copy(next_buf, req->src, 0,
4198 					 req->nbytes, 0);
4199 		switch_buf(state);
4200 	}
4201 
4202 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4203 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4204 			     1);
4205 
4206 	return ret;
4207 unmap_ctx:
4208 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4209 	qi_cache_free(edesc);
4210 	return ret;
4211 }
4212 
4213 static int ahash_finup_first(struct ahash_request *req)
4214 {
4215 	return ahash_digest(req);
4216 }
4217 
4218 static int ahash_init(struct ahash_request *req)
4219 {
4220 	struct caam_hash_state *state = ahash_request_ctx(req);
4221 
4222 	state->update = ahash_update_first;
4223 	state->finup = ahash_finup_first;
4224 	state->final = ahash_final_no_ctx;
4225 
4226 	state->ctx_dma = 0;
4227 	state->ctx_dma_len = 0;
4228 	state->current_buf = 0;
4229 	state->buf_dma = 0;
4230 	state->buflen_0 = 0;
4231 	state->buflen_1 = 0;
4232 
4233 	return 0;
4234 }
4235 
4236 static int ahash_update(struct ahash_request *req)
4237 {
4238 	struct caam_hash_state *state = ahash_request_ctx(req);
4239 
4240 	return state->update(req);
4241 }
4242 
4243 static int ahash_finup(struct ahash_request *req)
4244 {
4245 	struct caam_hash_state *state = ahash_request_ctx(req);
4246 
4247 	return state->finup(req);
4248 }
4249 
4250 static int ahash_final(struct ahash_request *req)
4251 {
4252 	struct caam_hash_state *state = ahash_request_ctx(req);
4253 
4254 	return state->final(req);
4255 }
4256 
4257 static int ahash_export(struct ahash_request *req, void *out)
4258 {
4259 	struct caam_hash_state *state = ahash_request_ctx(req);
4260 	struct caam_export_state *export = out;
4261 	int len;
4262 	u8 *buf;
4263 
4264 	if (state->current_buf) {
4265 		buf = state->buf_1;
4266 		len = state->buflen_1;
4267 	} else {
4268 		buf = state->buf_0;
4269 		len = state->buflen_0;
4270 	}
4271 
4272 	memcpy(export->buf, buf, len);
4273 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4274 	export->buflen = len;
4275 	export->update = state->update;
4276 	export->final = state->final;
4277 	export->finup = state->finup;
4278 
4279 	return 0;
4280 }
4281 
4282 static int ahash_import(struct ahash_request *req, const void *in)
4283 {
4284 	struct caam_hash_state *state = ahash_request_ctx(req);
4285 	const struct caam_export_state *export = in;
4286 
4287 	memset(state, 0, sizeof(*state));
4288 	memcpy(state->buf_0, export->buf, export->buflen);
4289 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4290 	state->buflen_0 = export->buflen;
4291 	state->update = export->update;
4292 	state->final = export->final;
4293 	state->finup = export->finup;
4294 
4295 	return 0;
4296 }
4297 
4298 struct caam_hash_template {
4299 	char name[CRYPTO_MAX_ALG_NAME];
4300 	char driver_name[CRYPTO_MAX_ALG_NAME];
4301 	char hmac_name[CRYPTO_MAX_ALG_NAME];
4302 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4303 	unsigned int blocksize;
4304 	struct ahash_alg template_ahash;
4305 	u32 alg_type;
4306 };
4307 
4308 /* ahash descriptors */
4309 static struct caam_hash_template driver_hash[] = {
4310 	{
4311 		.name = "sha1",
4312 		.driver_name = "sha1-caam-qi2",
4313 		.hmac_name = "hmac(sha1)",
4314 		.hmac_driver_name = "hmac-sha1-caam-qi2",
4315 		.blocksize = SHA1_BLOCK_SIZE,
4316 		.template_ahash = {
4317 			.init = ahash_init,
4318 			.update = ahash_update,
4319 			.final = ahash_final,
4320 			.finup = ahash_finup,
4321 			.digest = ahash_digest,
4322 			.export = ahash_export,
4323 			.import = ahash_import,
4324 			.setkey = ahash_setkey,
4325 			.halg = {
4326 				.digestsize = SHA1_DIGEST_SIZE,
4327 				.statesize = sizeof(struct caam_export_state),
4328 			},
4329 		},
4330 		.alg_type = OP_ALG_ALGSEL_SHA1,
4331 	}, {
4332 		.name = "sha224",
4333 		.driver_name = "sha224-caam-qi2",
4334 		.hmac_name = "hmac(sha224)",
4335 		.hmac_driver_name = "hmac-sha224-caam-qi2",
4336 		.blocksize = SHA224_BLOCK_SIZE,
4337 		.template_ahash = {
4338 			.init = ahash_init,
4339 			.update = ahash_update,
4340 			.final = ahash_final,
4341 			.finup = ahash_finup,
4342 			.digest = ahash_digest,
4343 			.export = ahash_export,
4344 			.import = ahash_import,
4345 			.setkey = ahash_setkey,
4346 			.halg = {
4347 				.digestsize = SHA224_DIGEST_SIZE,
4348 				.statesize = sizeof(struct caam_export_state),
4349 			},
4350 		},
4351 		.alg_type = OP_ALG_ALGSEL_SHA224,
4352 	}, {
4353 		.name = "sha256",
4354 		.driver_name = "sha256-caam-qi2",
4355 		.hmac_name = "hmac(sha256)",
4356 		.hmac_driver_name = "hmac-sha256-caam-qi2",
4357 		.blocksize = SHA256_BLOCK_SIZE,
4358 		.template_ahash = {
4359 			.init = ahash_init,
4360 			.update = ahash_update,
4361 			.final = ahash_final,
4362 			.finup = ahash_finup,
4363 			.digest = ahash_digest,
4364 			.export = ahash_export,
4365 			.import = ahash_import,
4366 			.setkey = ahash_setkey,
4367 			.halg = {
4368 				.digestsize = SHA256_DIGEST_SIZE,
4369 				.statesize = sizeof(struct caam_export_state),
4370 			},
4371 		},
4372 		.alg_type = OP_ALG_ALGSEL_SHA256,
4373 	}, {
4374 		.name = "sha384",
4375 		.driver_name = "sha384-caam-qi2",
4376 		.hmac_name = "hmac(sha384)",
4377 		.hmac_driver_name = "hmac-sha384-caam-qi2",
4378 		.blocksize = SHA384_BLOCK_SIZE,
4379 		.template_ahash = {
4380 			.init = ahash_init,
4381 			.update = ahash_update,
4382 			.final = ahash_final,
4383 			.finup = ahash_finup,
4384 			.digest = ahash_digest,
4385 			.export = ahash_export,
4386 			.import = ahash_import,
4387 			.setkey = ahash_setkey,
4388 			.halg = {
4389 				.digestsize = SHA384_DIGEST_SIZE,
4390 				.statesize = sizeof(struct caam_export_state),
4391 			},
4392 		},
4393 		.alg_type = OP_ALG_ALGSEL_SHA384,
4394 	}, {
4395 		.name = "sha512",
4396 		.driver_name = "sha512-caam-qi2",
4397 		.hmac_name = "hmac(sha512)",
4398 		.hmac_driver_name = "hmac-sha512-caam-qi2",
4399 		.blocksize = SHA512_BLOCK_SIZE,
4400 		.template_ahash = {
4401 			.init = ahash_init,
4402 			.update = ahash_update,
4403 			.final = ahash_final,
4404 			.finup = ahash_finup,
4405 			.digest = ahash_digest,
4406 			.export = ahash_export,
4407 			.import = ahash_import,
4408 			.setkey = ahash_setkey,
4409 			.halg = {
4410 				.digestsize = SHA512_DIGEST_SIZE,
4411 				.statesize = sizeof(struct caam_export_state),
4412 			},
4413 		},
4414 		.alg_type = OP_ALG_ALGSEL_SHA512,
4415 	}, {
4416 		.name = "md5",
4417 		.driver_name = "md5-caam-qi2",
4418 		.hmac_name = "hmac(md5)",
4419 		.hmac_driver_name = "hmac-md5-caam-qi2",
4420 		.blocksize = MD5_BLOCK_WORDS * 4,
4421 		.template_ahash = {
4422 			.init = ahash_init,
4423 			.update = ahash_update,
4424 			.final = ahash_final,
4425 			.finup = ahash_finup,
4426 			.digest = ahash_digest,
4427 			.export = ahash_export,
4428 			.import = ahash_import,
4429 			.setkey = ahash_setkey,
4430 			.halg = {
4431 				.digestsize = MD5_DIGEST_SIZE,
4432 				.statesize = sizeof(struct caam_export_state),
4433 			},
4434 		},
4435 		.alg_type = OP_ALG_ALGSEL_MD5,
4436 	}
4437 };
4438 
4439 struct caam_hash_alg {
4440 	struct list_head entry;
4441 	struct device *dev;
4442 	int alg_type;
4443 	struct ahash_alg ahash_alg;
4444 };
4445 
4446 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4447 {
4448 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4449 	struct crypto_alg *base = tfm->__crt_alg;
4450 	struct hash_alg_common *halg =
4451 		 container_of(base, struct hash_alg_common, base);
4452 	struct ahash_alg *alg =
4453 		 container_of(halg, struct ahash_alg, halg);
4454 	struct caam_hash_alg *caam_hash =
4455 		 container_of(alg, struct caam_hash_alg, ahash_alg);
4456 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4457 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4458 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4459 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4460 					 HASH_MSG_LEN + 32,
4461 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4462 					 HASH_MSG_LEN + 64,
4463 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4464 	dma_addr_t dma_addr;
4465 	int i;
4466 
4467 	ctx->dev = caam_hash->dev;
4468 
4469 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4470 					DMA_BIDIRECTIONAL,
4471 					DMA_ATTR_SKIP_CPU_SYNC);
4472 	if (dma_mapping_error(ctx->dev, dma_addr)) {
4473 		dev_err(ctx->dev, "unable to map shared descriptors\n");
4474 		return -ENOMEM;
4475 	}
4476 
4477 	for (i = 0; i < HASH_NUM_OP; i++)
4478 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4479 
4480 	/* copy descriptor header template value */
4481 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4482 
4483 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
4484 				   OP_ALG_ALGSEL_SUBMASK) >>
4485 				  OP_ALG_ALGSEL_SHIFT];
4486 
4487 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4488 				 sizeof(struct caam_hash_state));
4489 
4490 	return ahash_set_sh_desc(ahash);
4491 }
4492 
4493 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4494 {
4495 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4496 
4497 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4498 			       DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4499 }
4500 
4501 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4502 	struct caam_hash_template *template, bool keyed)
4503 {
4504 	struct caam_hash_alg *t_alg;
4505 	struct ahash_alg *halg;
4506 	struct crypto_alg *alg;
4507 
4508 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4509 	if (!t_alg)
4510 		return ERR_PTR(-ENOMEM);
4511 
4512 	t_alg->ahash_alg = template->template_ahash;
4513 	halg = &t_alg->ahash_alg;
4514 	alg = &halg->halg.base;
4515 
4516 	if (keyed) {
4517 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4518 			 template->hmac_name);
4519 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4520 			 template->hmac_driver_name);
4521 	} else {
4522 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4523 			 template->name);
4524 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4525 			 template->driver_name);
4526 		t_alg->ahash_alg.setkey = NULL;
4527 	}
4528 	alg->cra_module = THIS_MODULE;
4529 	alg->cra_init = caam_hash_cra_init;
4530 	alg->cra_exit = caam_hash_cra_exit;
4531 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4532 	alg->cra_priority = CAAM_CRA_PRIORITY;
4533 	alg->cra_blocksize = template->blocksize;
4534 	alg->cra_alignmask = 0;
4535 	alg->cra_flags = CRYPTO_ALG_ASYNC;
4536 
4537 	t_alg->alg_type = template->alg_type;
4538 	t_alg->dev = dev;
4539 
4540 	return t_alg;
4541 }
4542 
4543 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4544 {
4545 	struct dpaa2_caam_priv_per_cpu *ppriv;
4546 
4547 	ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4548 	napi_schedule_irqoff(&ppriv->napi);
4549 }
4550 
4551 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4552 {
4553 	struct device *dev = priv->dev;
4554 	struct dpaa2_io_notification_ctx *nctx;
4555 	struct dpaa2_caam_priv_per_cpu *ppriv;
4556 	int err, i = 0, cpu;
4557 
4558 	for_each_online_cpu(cpu) {
4559 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4560 		ppriv->priv = priv;
4561 		nctx = &ppriv->nctx;
4562 		nctx->is_cdan = 0;
4563 		nctx->id = ppriv->rsp_fqid;
4564 		nctx->desired_cpu = cpu;
4565 		nctx->cb = dpaa2_caam_fqdan_cb;
4566 
4567 		/* Register notification callbacks */
4568 		ppriv->dpio = dpaa2_io_service_select(cpu);
4569 		err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4570 		if (unlikely(err)) {
4571 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4572 			nctx->cb = NULL;
4573 			/*
4574 			 * If no affine DPIO for this core, there's probably
4575 			 * none available for next cores either. Signal we want
4576 			 * to retry later, in case the DPIO devices weren't
4577 			 * probed yet.
4578 			 */
4579 			err = -EPROBE_DEFER;
4580 			goto err;
4581 		}
4582 
4583 		ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4584 						     dev);
4585 		if (unlikely(!ppriv->store)) {
4586 			dev_err(dev, "dpaa2_io_store_create() failed\n");
4587 			err = -ENOMEM;
4588 			goto err;
4589 		}
4590 
4591 		if (++i == priv->num_pairs)
4592 			break;
4593 	}
4594 
4595 	return 0;
4596 
4597 err:
4598 	for_each_online_cpu(cpu) {
4599 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4600 		if (!ppriv->nctx.cb)
4601 			break;
4602 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4603 	}
4604 
4605 	for_each_online_cpu(cpu) {
4606 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4607 		if (!ppriv->store)
4608 			break;
4609 		dpaa2_io_store_destroy(ppriv->store);
4610 	}
4611 
4612 	return err;
4613 }
4614 
4615 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4616 {
4617 	struct dpaa2_caam_priv_per_cpu *ppriv;
4618 	int i = 0, cpu;
4619 
4620 	for_each_online_cpu(cpu) {
4621 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4622 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4623 					    priv->dev);
4624 		dpaa2_io_store_destroy(ppriv->store);
4625 
4626 		if (++i == priv->num_pairs)
4627 			return;
4628 	}
4629 }
4630 
4631 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4632 {
4633 	struct dpseci_rx_queue_cfg rx_queue_cfg;
4634 	struct device *dev = priv->dev;
4635 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4636 	struct dpaa2_caam_priv_per_cpu *ppriv;
4637 	int err = 0, i = 0, cpu;
4638 
4639 	/* Configure Rx queues */
4640 	for_each_online_cpu(cpu) {
4641 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4642 
4643 		rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4644 				       DPSECI_QUEUE_OPT_USER_CTX;
4645 		rx_queue_cfg.order_preservation_en = 0;
4646 		rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4647 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4648 		/*
4649 		 * Rx priority (WQ) doesn't really matter, since we use
4650 		 * pull mode, i.e. volatile dequeues from specific FQs
4651 		 */
4652 		rx_queue_cfg.dest_cfg.priority = 0;
4653 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4654 
4655 		err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4656 					  &rx_queue_cfg);
4657 		if (err) {
4658 			dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4659 				err);
4660 			return err;
4661 		}
4662 
4663 		if (++i == priv->num_pairs)
4664 			break;
4665 	}
4666 
4667 	return err;
4668 }
4669 
4670 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4671 {
4672 	struct device *dev = priv->dev;
4673 
4674 	if (!priv->cscn_mem)
4675 		return;
4676 
4677 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4678 	kfree(priv->cscn_mem);
4679 }
4680 
4681 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4682 {
4683 	struct device *dev = priv->dev;
4684 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4685 
4686 	dpaa2_dpseci_congestion_free(priv);
4687 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4688 }
4689 
4690 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4691 				  const struct dpaa2_fd *fd)
4692 {
4693 	struct caam_request *req;
4694 	u32 fd_err;
4695 
4696 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4697 		dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4698 		return;
4699 	}
4700 
4701 	fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4702 	if (unlikely(fd_err))
4703 		dev_err(priv->dev, "FD error: %08x\n", fd_err);
4704 
4705 	/*
4706 	 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4707 	 * in FD[ERR] or FD[FRC].
4708 	 */
4709 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4710 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4711 			 DMA_BIDIRECTIONAL);
4712 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4713 }
4714 
4715 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4716 {
4717 	int err;
4718 
4719 	/* Retry while portal is busy */
4720 	do {
4721 		err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4722 					       ppriv->store);
4723 	} while (err == -EBUSY);
4724 
4725 	if (unlikely(err))
4726 		dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4727 
4728 	return err;
4729 }
4730 
4731 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4732 {
4733 	struct dpaa2_dq *dq;
4734 	int cleaned = 0, is_last;
4735 
4736 	do {
4737 		dq = dpaa2_io_store_next(ppriv->store, &is_last);
4738 		if (unlikely(!dq)) {
4739 			if (unlikely(!is_last)) {
4740 				dev_dbg(ppriv->priv->dev,
4741 					"FQ %d returned no valid frames\n",
4742 					ppriv->rsp_fqid);
4743 				/*
4744 				 * MUST retry until we get some sort of
4745 				 * valid response token (be it "empty dequeue"
4746 				 * or a valid frame).
4747 				 */
4748 				continue;
4749 			}
4750 			break;
4751 		}
4752 
4753 		/* Process FD */
4754 		dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4755 		cleaned++;
4756 	} while (!is_last);
4757 
4758 	return cleaned;
4759 }
4760 
4761 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4762 {
4763 	struct dpaa2_caam_priv_per_cpu *ppriv;
4764 	struct dpaa2_caam_priv *priv;
4765 	int err, cleaned = 0, store_cleaned;
4766 
4767 	ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4768 	priv = ppriv->priv;
4769 
4770 	if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4771 		return 0;
4772 
4773 	do {
4774 		store_cleaned = dpaa2_caam_store_consume(ppriv);
4775 		cleaned += store_cleaned;
4776 
4777 		if (store_cleaned == 0 ||
4778 		    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4779 			break;
4780 
4781 		/* Try to dequeue some more */
4782 		err = dpaa2_caam_pull_fq(ppriv);
4783 		if (unlikely(err))
4784 			break;
4785 	} while (1);
4786 
4787 	if (cleaned < budget) {
4788 		napi_complete_done(napi, cleaned);
4789 		err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4790 		if (unlikely(err))
4791 			dev_err(priv->dev, "Notification rearm failed: %d\n",
4792 				err);
4793 	}
4794 
4795 	return cleaned;
4796 }
4797 
4798 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4799 					 u16 token)
4800 {
4801 	struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4802 	struct device *dev = priv->dev;
4803 	int err;
4804 
4805 	/*
4806 	 * Congestion group feature supported starting with DPSECI API v5.1
4807 	 * and only when object has been created with this capability.
4808 	 */
4809 	if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4810 	    !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4811 		return 0;
4812 
4813 	priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4814 				 GFP_KERNEL | GFP_DMA);
4815 	if (!priv->cscn_mem)
4816 		return -ENOMEM;
4817 
4818 	priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4819 	priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4820 					DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4821 	if (dma_mapping_error(dev, priv->cscn_dma)) {
4822 		dev_err(dev, "Error mapping CSCN memory area\n");
4823 		err = -ENOMEM;
4824 		goto err_dma_map;
4825 	}
4826 
4827 	cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4828 	cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4829 	cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4830 	cong_notif_cfg.message_ctx = (uintptr_t)priv;
4831 	cong_notif_cfg.message_iova = priv->cscn_dma;
4832 	cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4833 					DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4834 					DPSECI_CGN_MODE_COHERENT_WRITE;
4835 
4836 	err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4837 						 &cong_notif_cfg);
4838 	if (err) {
4839 		dev_err(dev, "dpseci_set_congestion_notification failed\n");
4840 		goto err_set_cong;
4841 	}
4842 
4843 	return 0;
4844 
4845 err_set_cong:
4846 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4847 err_dma_map:
4848 	kfree(priv->cscn_mem);
4849 
4850 	return err;
4851 }
4852 
4853 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4854 {
4855 	struct device *dev = &ls_dev->dev;
4856 	struct dpaa2_caam_priv *priv;
4857 	struct dpaa2_caam_priv_per_cpu *ppriv;
4858 	int err, cpu;
4859 	u8 i;
4860 
4861 	priv = dev_get_drvdata(dev);
4862 
4863 	priv->dev = dev;
4864 	priv->dpsec_id = ls_dev->obj_desc.id;
4865 
4866 	/* Get a handle for the DPSECI this interface is associate with */
4867 	err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4868 	if (err) {
4869 		dev_err(dev, "dpseci_open() failed: %d\n", err);
4870 		goto err_open;
4871 	}
4872 
4873 	err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4874 				     &priv->minor_ver);
4875 	if (err) {
4876 		dev_err(dev, "dpseci_get_api_version() failed\n");
4877 		goto err_get_vers;
4878 	}
4879 
4880 	dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4881 
4882 	err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4883 				    &priv->dpseci_attr);
4884 	if (err) {
4885 		dev_err(dev, "dpseci_get_attributes() failed\n");
4886 		goto err_get_vers;
4887 	}
4888 
4889 	err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4890 				  &priv->sec_attr);
4891 	if (err) {
4892 		dev_err(dev, "dpseci_get_sec_attr() failed\n");
4893 		goto err_get_vers;
4894 	}
4895 
4896 	err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4897 	if (err) {
4898 		dev_err(dev, "setup_congestion() failed\n");
4899 		goto err_get_vers;
4900 	}
4901 
4902 	priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4903 			      priv->dpseci_attr.num_tx_queues);
4904 	if (priv->num_pairs > num_online_cpus()) {
4905 		dev_warn(dev, "%d queues won't be used\n",
4906 			 priv->num_pairs - num_online_cpus());
4907 		priv->num_pairs = num_online_cpus();
4908 	}
4909 
4910 	for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4911 		err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4912 					  &priv->rx_queue_attr[i]);
4913 		if (err) {
4914 			dev_err(dev, "dpseci_get_rx_queue() failed\n");
4915 			goto err_get_rx_queue;
4916 		}
4917 	}
4918 
4919 	for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4920 		err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4921 					  &priv->tx_queue_attr[i]);
4922 		if (err) {
4923 			dev_err(dev, "dpseci_get_tx_queue() failed\n");
4924 			goto err_get_rx_queue;
4925 		}
4926 	}
4927 
4928 	i = 0;
4929 	for_each_online_cpu(cpu) {
4930 		u8 j;
4931 
4932 		j = i % priv->num_pairs;
4933 
4934 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4935 		ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
4936 
4937 		/*
4938 		 * Allow all cores to enqueue, while only some of them
4939 		 * will take part in dequeuing.
4940 		 */
4941 		if (++i > priv->num_pairs)
4942 			continue;
4943 
4944 		ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
4945 		ppriv->prio = j;
4946 
4947 		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
4948 			priv->rx_queue_attr[j].fqid,
4949 			priv->tx_queue_attr[j].fqid);
4950 
4951 		ppriv->net_dev.dev = *dev;
4952 		INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4953 		netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4954 			       DPAA2_CAAM_NAPI_WEIGHT);
4955 	}
4956 
4957 	return 0;
4958 
4959 err_get_rx_queue:
4960 	dpaa2_dpseci_congestion_free(priv);
4961 err_get_vers:
4962 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4963 err_open:
4964 	return err;
4965 }
4966 
4967 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
4968 {
4969 	struct device *dev = priv->dev;
4970 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4971 	struct dpaa2_caam_priv_per_cpu *ppriv;
4972 	int i;
4973 
4974 	for (i = 0; i < priv->num_pairs; i++) {
4975 		ppriv = per_cpu_ptr(priv->ppriv, i);
4976 		napi_enable(&ppriv->napi);
4977 	}
4978 
4979 	return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
4980 }
4981 
4982 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
4983 {
4984 	struct device *dev = priv->dev;
4985 	struct dpaa2_caam_priv_per_cpu *ppriv;
4986 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4987 	int i, err = 0, enabled;
4988 
4989 	err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
4990 	if (err) {
4991 		dev_err(dev, "dpseci_disable() failed\n");
4992 		return err;
4993 	}
4994 
4995 	err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
4996 	if (err) {
4997 		dev_err(dev, "dpseci_is_enabled() failed\n");
4998 		return err;
4999 	}
5000 
5001 	dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5002 
5003 	for (i = 0; i < priv->num_pairs; i++) {
5004 		ppriv = per_cpu_ptr(priv->ppriv, i);
5005 		napi_disable(&ppriv->napi);
5006 		netif_napi_del(&ppriv->napi);
5007 	}
5008 
5009 	return 0;
5010 }
5011 
5012 static struct list_head hash_list;
5013 
5014 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5015 {
5016 	struct device *dev;
5017 	struct dpaa2_caam_priv *priv;
5018 	int i, err = 0;
5019 	bool registered = false;
5020 
5021 	/*
5022 	 * There is no way to get CAAM endianness - there is no direct register
5023 	 * space access and MC f/w does not provide this attribute.
5024 	 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5025 	 * property.
5026 	 */
5027 	caam_little_end = true;
5028 
5029 	caam_imx = false;
5030 
5031 	dev = &dpseci_dev->dev;
5032 
5033 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5034 	if (!priv)
5035 		return -ENOMEM;
5036 
5037 	dev_set_drvdata(dev, priv);
5038 
5039 	priv->domain = iommu_get_domain_for_dev(dev);
5040 
5041 	qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5042 				     0, SLAB_CACHE_DMA, NULL);
5043 	if (!qi_cache) {
5044 		dev_err(dev, "Can't allocate SEC cache\n");
5045 		return -ENOMEM;
5046 	}
5047 
5048 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5049 	if (err) {
5050 		dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5051 		goto err_dma_mask;
5052 	}
5053 
5054 	/* Obtain a MC portal */
5055 	err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5056 	if (err) {
5057 		if (err == -ENXIO)
5058 			err = -EPROBE_DEFER;
5059 		else
5060 			dev_err(dev, "MC portal allocation failed\n");
5061 
5062 		goto err_dma_mask;
5063 	}
5064 
5065 	priv->ppriv = alloc_percpu(*priv->ppriv);
5066 	if (!priv->ppriv) {
5067 		dev_err(dev, "alloc_percpu() failed\n");
5068 		err = -ENOMEM;
5069 		goto err_alloc_ppriv;
5070 	}
5071 
5072 	/* DPSECI initialization */
5073 	err = dpaa2_dpseci_setup(dpseci_dev);
5074 	if (err) {
5075 		dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5076 		goto err_dpseci_setup;
5077 	}
5078 
5079 	/* DPIO */
5080 	err = dpaa2_dpseci_dpio_setup(priv);
5081 	if (err) {
5082 		if (err != -EPROBE_DEFER)
5083 			dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
5084 		goto err_dpio_setup;
5085 	}
5086 
5087 	/* DPSECI binding to DPIO */
5088 	err = dpaa2_dpseci_bind(priv);
5089 	if (err) {
5090 		dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5091 		goto err_bind;
5092 	}
5093 
5094 	/* DPSECI enable */
5095 	err = dpaa2_dpseci_enable(priv);
5096 	if (err) {
5097 		dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5098 		goto err_bind;
5099 	}
5100 
5101 	/* register crypto algorithms the device supports */
5102 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5103 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5104 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5105 
5106 		/* Skip DES algorithms if not supported by device */
5107 		if (!priv->sec_attr.des_acc_num &&
5108 		    (alg_sel == OP_ALG_ALGSEL_3DES ||
5109 		     alg_sel == OP_ALG_ALGSEL_DES))
5110 			continue;
5111 
5112 		/* Skip AES algorithms if not supported by device */
5113 		if (!priv->sec_attr.aes_acc_num &&
5114 		    alg_sel == OP_ALG_ALGSEL_AES)
5115 			continue;
5116 
5117 		/* Skip CHACHA20 algorithms if not supported by device */
5118 		if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5119 		    !priv->sec_attr.ccha_acc_num)
5120 			continue;
5121 
5122 		t_alg->caam.dev = dev;
5123 		caam_skcipher_alg_init(t_alg);
5124 
5125 		err = crypto_register_skcipher(&t_alg->skcipher);
5126 		if (err) {
5127 			dev_warn(dev, "%s alg registration failed: %d\n",
5128 				 t_alg->skcipher.base.cra_driver_name, err);
5129 			continue;
5130 		}
5131 
5132 		t_alg->registered = true;
5133 		registered = true;
5134 	}
5135 
5136 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5137 		struct caam_aead_alg *t_alg = driver_aeads + i;
5138 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5139 				 OP_ALG_ALGSEL_MASK;
5140 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5141 				 OP_ALG_ALGSEL_MASK;
5142 
5143 		/* Skip DES algorithms if not supported by device */
5144 		if (!priv->sec_attr.des_acc_num &&
5145 		    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5146 		     c1_alg_sel == OP_ALG_ALGSEL_DES))
5147 			continue;
5148 
5149 		/* Skip AES algorithms if not supported by device */
5150 		if (!priv->sec_attr.aes_acc_num &&
5151 		    c1_alg_sel == OP_ALG_ALGSEL_AES)
5152 			continue;
5153 
5154 		/* Skip CHACHA20 algorithms if not supported by device */
5155 		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5156 		    !priv->sec_attr.ccha_acc_num)
5157 			continue;
5158 
5159 		/* Skip POLY1305 algorithms if not supported by device */
5160 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5161 		    !priv->sec_attr.ptha_acc_num)
5162 			continue;
5163 
5164 		/*
5165 		 * Skip algorithms requiring message digests
5166 		 * if MD not supported by device.
5167 		 */
5168 		if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5169 		    !priv->sec_attr.md_acc_num)
5170 			continue;
5171 
5172 		t_alg->caam.dev = dev;
5173 		caam_aead_alg_init(t_alg);
5174 
5175 		err = crypto_register_aead(&t_alg->aead);
5176 		if (err) {
5177 			dev_warn(dev, "%s alg registration failed: %d\n",
5178 				 t_alg->aead.base.cra_driver_name, err);
5179 			continue;
5180 		}
5181 
5182 		t_alg->registered = true;
5183 		registered = true;
5184 	}
5185 	if (registered)
5186 		dev_info(dev, "algorithms registered in /proc/crypto\n");
5187 
5188 	/* register hash algorithms the device supports */
5189 	INIT_LIST_HEAD(&hash_list);
5190 
5191 	/*
5192 	 * Skip registration of any hashing algorithms if MD block
5193 	 * is not present.
5194 	 */
5195 	if (!priv->sec_attr.md_acc_num)
5196 		return 0;
5197 
5198 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5199 		struct caam_hash_alg *t_alg;
5200 		struct caam_hash_template *alg = driver_hash + i;
5201 
5202 		/* register hmac version */
5203 		t_alg = caam_hash_alloc(dev, alg, true);
5204 		if (IS_ERR(t_alg)) {
5205 			err = PTR_ERR(t_alg);
5206 			dev_warn(dev, "%s hash alg allocation failed: %d\n",
5207 				 alg->driver_name, err);
5208 			continue;
5209 		}
5210 
5211 		err = crypto_register_ahash(&t_alg->ahash_alg);
5212 		if (err) {
5213 			dev_warn(dev, "%s alg registration failed: %d\n",
5214 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5215 				 err);
5216 			kfree(t_alg);
5217 		} else {
5218 			list_add_tail(&t_alg->entry, &hash_list);
5219 		}
5220 
5221 		/* register unkeyed version */
5222 		t_alg = caam_hash_alloc(dev, alg, false);
5223 		if (IS_ERR(t_alg)) {
5224 			err = PTR_ERR(t_alg);
5225 			dev_warn(dev, "%s alg allocation failed: %d\n",
5226 				 alg->driver_name, err);
5227 			continue;
5228 		}
5229 
5230 		err = crypto_register_ahash(&t_alg->ahash_alg);
5231 		if (err) {
5232 			dev_warn(dev, "%s alg registration failed: %d\n",
5233 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5234 				 err);
5235 			kfree(t_alg);
5236 		} else {
5237 			list_add_tail(&t_alg->entry, &hash_list);
5238 		}
5239 	}
5240 	if (!list_empty(&hash_list))
5241 		dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5242 
5243 	return err;
5244 
5245 err_bind:
5246 	dpaa2_dpseci_dpio_free(priv);
5247 err_dpio_setup:
5248 	dpaa2_dpseci_free(priv);
5249 err_dpseci_setup:
5250 	free_percpu(priv->ppriv);
5251 err_alloc_ppriv:
5252 	fsl_mc_portal_free(priv->mc_io);
5253 err_dma_mask:
5254 	kmem_cache_destroy(qi_cache);
5255 
5256 	return err;
5257 }
5258 
5259 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5260 {
5261 	struct device *dev;
5262 	struct dpaa2_caam_priv *priv;
5263 	int i;
5264 
5265 	dev = &ls_dev->dev;
5266 	priv = dev_get_drvdata(dev);
5267 
5268 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5269 		struct caam_aead_alg *t_alg = driver_aeads + i;
5270 
5271 		if (t_alg->registered)
5272 			crypto_unregister_aead(&t_alg->aead);
5273 	}
5274 
5275 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5276 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5277 
5278 		if (t_alg->registered)
5279 			crypto_unregister_skcipher(&t_alg->skcipher);
5280 	}
5281 
5282 	if (hash_list.next) {
5283 		struct caam_hash_alg *t_hash_alg, *p;
5284 
5285 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5286 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5287 			list_del(&t_hash_alg->entry);
5288 			kfree(t_hash_alg);
5289 		}
5290 	}
5291 
5292 	dpaa2_dpseci_disable(priv);
5293 	dpaa2_dpseci_dpio_free(priv);
5294 	dpaa2_dpseci_free(priv);
5295 	free_percpu(priv->ppriv);
5296 	fsl_mc_portal_free(priv->mc_io);
5297 	kmem_cache_destroy(qi_cache);
5298 
5299 	return 0;
5300 }
5301 
5302 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5303 {
5304 	struct dpaa2_fd fd;
5305 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5306 	struct dpaa2_caam_priv_per_cpu *ppriv;
5307 	int err = 0, i;
5308 
5309 	if (IS_ERR(req))
5310 		return PTR_ERR(req);
5311 
5312 	if (priv->cscn_mem) {
5313 		dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5314 					DPAA2_CSCN_SIZE,
5315 					DMA_FROM_DEVICE);
5316 		if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5317 			dev_dbg_ratelimited(dev, "Dropping request\n");
5318 			return -EBUSY;
5319 		}
5320 	}
5321 
5322 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5323 
5324 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5325 					 DMA_BIDIRECTIONAL);
5326 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
5327 		dev_err(dev, "DMA mapping error for QI enqueue request\n");
5328 		goto err_out;
5329 	}
5330 
5331 	memset(&fd, 0, sizeof(fd));
5332 	dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5333 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5334 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5335 	dpaa2_fd_set_flc(&fd, req->flc_dma);
5336 
5337 	ppriv = this_cpu_ptr(priv->ppriv);
5338 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5339 		err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5340 						  &fd);
5341 		if (err != -EBUSY)
5342 			break;
5343 
5344 		cpu_relax();
5345 	}
5346 
5347 	if (unlikely(err)) {
5348 		dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5349 		goto err_out;
5350 	}
5351 
5352 	return -EINPROGRESS;
5353 
5354 err_out:
5355 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5356 			 DMA_BIDIRECTIONAL);
5357 	return -EIO;
5358 }
5359 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5360 
5361 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5362 	{
5363 		.vendor = FSL_MC_VENDOR_FREESCALE,
5364 		.obj_type = "dpseci",
5365 	},
5366 	{ .vendor = 0x0 }
5367 };
5368 
5369 static struct fsl_mc_driver dpaa2_caam_driver = {
5370 	.driver = {
5371 		.name		= KBUILD_MODNAME,
5372 		.owner		= THIS_MODULE,
5373 	},
5374 	.probe		= dpaa2_caam_probe,
5375 	.remove		= dpaa2_caam_remove,
5376 	.match_id_table = dpaa2_caam_match_id_table
5377 };
5378 
5379 MODULE_LICENSE("Dual BSD/GPL");
5380 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5381 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5382 
5383 module_fsl_mc_driver(dpaa2_caam_driver);
5384