1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2018 NXP
5  */
6 
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include <linux/fsl/mc.h>
19 #include <soc/fsl/dpaa2-io.h>
20 #include <soc/fsl/dpaa2-fd.h>
21 
22 #define CAAM_CRA_PRIORITY	2000
23 
24 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
25 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
26 				 SHA512_DIGEST_SIZE * 2)
27 
28 #if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
29 bool caam_little_end;
30 EXPORT_SYMBOL(caam_little_end);
31 bool caam_imx;
32 EXPORT_SYMBOL(caam_imx);
33 #endif
34 
35 /*
36  * This is a a cache of buffers, from which the users of CAAM QI driver
37  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
38  * NOTE: A more elegant solution would be to have some headroom in the frames
39  *       being processed. This can be added by the dpaa2-eth driver. This would
40  *       pose a problem for userspace application processing which cannot
41  *       know of this limitation. So for now, this will work.
42  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
43  */
44 static struct kmem_cache *qi_cache;
45 
46 struct caam_alg_entry {
47 	struct device *dev;
48 	int class1_alg_type;
49 	int class2_alg_type;
50 	bool rfc3686;
51 	bool geniv;
52 };
53 
54 struct caam_aead_alg {
55 	struct aead_alg aead;
56 	struct caam_alg_entry caam;
57 	bool registered;
58 };
59 
60 struct caam_skcipher_alg {
61 	struct skcipher_alg skcipher;
62 	struct caam_alg_entry caam;
63 	bool registered;
64 };
65 
66 /**
67  * caam_ctx - per-session context
68  * @flc: Flow Contexts array
69  * @key:  [authentication key], encryption key
70  * @flc_dma: I/O virtual addresses of the Flow Contexts
71  * @key_dma: I/O virtual address of the key
72  * @dir: DMA direction for mapping key and Flow Contexts
73  * @dev: dpseci device
74  * @adata: authentication algorithm details
75  * @cdata: encryption algorithm details
76  * @authsize: authentication tag (a.k.a. ICV / MAC) size
77  */
78 struct caam_ctx {
79 	struct caam_flc flc[NUM_OP];
80 	u8 key[CAAM_MAX_KEY_SIZE];
81 	dma_addr_t flc_dma[NUM_OP];
82 	dma_addr_t key_dma;
83 	enum dma_data_direction dir;
84 	struct device *dev;
85 	struct alginfo adata;
86 	struct alginfo cdata;
87 	unsigned int authsize;
88 };
89 
90 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
91 				     dma_addr_t iova_addr)
92 {
93 	phys_addr_t phys_addr;
94 
95 	phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
96 				   iova_addr;
97 
98 	return phys_to_virt(phys_addr);
99 }
100 
101 /*
102  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
103  *
104  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
105  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
106  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
107  * hosting 16 SG entries.
108  *
109  * @flags - flags that would be used for the equivalent kmalloc(..) call
110  *
111  * Returns a pointer to a retrieved buffer on success or NULL on failure.
112  */
113 static inline void *qi_cache_zalloc(gfp_t flags)
114 {
115 	return kmem_cache_zalloc(qi_cache, flags);
116 }
117 
118 /*
119  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
120  *
121  * @obj - buffer previously allocated by qi_cache_zalloc
122  *
123  * No checking is being done, the call is a passthrough call to
124  * kmem_cache_free(...)
125  */
126 static inline void qi_cache_free(void *obj)
127 {
128 	kmem_cache_free(qi_cache, obj);
129 }
130 
131 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
132 {
133 	switch (crypto_tfm_alg_type(areq->tfm)) {
134 	case CRYPTO_ALG_TYPE_SKCIPHER:
135 		return skcipher_request_ctx(skcipher_request_cast(areq));
136 	case CRYPTO_ALG_TYPE_AEAD:
137 		return aead_request_ctx(container_of(areq, struct aead_request,
138 						     base));
139 	case CRYPTO_ALG_TYPE_AHASH:
140 		return ahash_request_ctx(ahash_request_cast(areq));
141 	default:
142 		return ERR_PTR(-EINVAL);
143 	}
144 }
145 
146 static void caam_unmap(struct device *dev, struct scatterlist *src,
147 		       struct scatterlist *dst, int src_nents,
148 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
149 		       dma_addr_t qm_sg_dma, int qm_sg_bytes)
150 {
151 	if (dst != src) {
152 		if (src_nents)
153 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
154 		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
155 	} else {
156 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
157 	}
158 
159 	if (iv_dma)
160 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
161 
162 	if (qm_sg_bytes)
163 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
164 }
165 
166 static int aead_set_sh_desc(struct crypto_aead *aead)
167 {
168 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
169 						 typeof(*alg), aead);
170 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
171 	unsigned int ivsize = crypto_aead_ivsize(aead);
172 	struct device *dev = ctx->dev;
173 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
174 	struct caam_flc *flc;
175 	u32 *desc;
176 	u32 ctx1_iv_off = 0;
177 	u32 *nonce = NULL;
178 	unsigned int data_len[2];
179 	u32 inl_mask;
180 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
181 			       OP_ALG_AAI_CTR_MOD128);
182 	const bool is_rfc3686 = alg->caam.rfc3686;
183 
184 	if (!ctx->cdata.keylen || !ctx->authsize)
185 		return 0;
186 
187 	/*
188 	 * AES-CTR needs to load IV in CONTEXT1 reg
189 	 * at an offset of 128bits (16bytes)
190 	 * CONTEXT1[255:128] = IV
191 	 */
192 	if (ctr_mode)
193 		ctx1_iv_off = 16;
194 
195 	/*
196 	 * RFC3686 specific:
197 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
198 	 */
199 	if (is_rfc3686) {
200 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
201 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
202 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
203 	}
204 
205 	data_len[0] = ctx->adata.keylen_pad;
206 	data_len[1] = ctx->cdata.keylen;
207 
208 	/* aead_encrypt shared descriptor */
209 	if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
210 						 DESC_QI_AEAD_ENC_LEN) +
211 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
212 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
213 			      ARRAY_SIZE(data_len)) < 0)
214 		return -EINVAL;
215 
216 	if (inl_mask & 1)
217 		ctx->adata.key_virt = ctx->key;
218 	else
219 		ctx->adata.key_dma = ctx->key_dma;
220 
221 	if (inl_mask & 2)
222 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
223 	else
224 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
225 
226 	ctx->adata.key_inline = !!(inl_mask & 1);
227 	ctx->cdata.key_inline = !!(inl_mask & 2);
228 
229 	flc = &ctx->flc[ENCRYPT];
230 	desc = flc->sh_desc;
231 
232 	if (alg->caam.geniv)
233 		cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
234 					  ivsize, ctx->authsize, is_rfc3686,
235 					  nonce, ctx1_iv_off, true,
236 					  priv->sec_attr.era);
237 	else
238 		cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
239 				       ivsize, ctx->authsize, is_rfc3686, nonce,
240 				       ctx1_iv_off, true, priv->sec_attr.era);
241 
242 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
243 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
244 				   sizeof(flc->flc) + desc_bytes(desc),
245 				   ctx->dir);
246 
247 	/* aead_decrypt shared descriptor */
248 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
249 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
250 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
251 			      ARRAY_SIZE(data_len)) < 0)
252 		return -EINVAL;
253 
254 	if (inl_mask & 1)
255 		ctx->adata.key_virt = ctx->key;
256 	else
257 		ctx->adata.key_dma = ctx->key_dma;
258 
259 	if (inl_mask & 2)
260 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
261 	else
262 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
263 
264 	ctx->adata.key_inline = !!(inl_mask & 1);
265 	ctx->cdata.key_inline = !!(inl_mask & 2);
266 
267 	flc = &ctx->flc[DECRYPT];
268 	desc = flc->sh_desc;
269 	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
270 			       ivsize, ctx->authsize, alg->caam.geniv,
271 			       is_rfc3686, nonce, ctx1_iv_off, true,
272 			       priv->sec_attr.era);
273 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
274 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
275 				   sizeof(flc->flc) + desc_bytes(desc),
276 				   ctx->dir);
277 
278 	return 0;
279 }
280 
281 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
282 {
283 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
284 
285 	ctx->authsize = authsize;
286 	aead_set_sh_desc(authenc);
287 
288 	return 0;
289 }
290 
291 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
292 		       unsigned int keylen)
293 {
294 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
295 	struct device *dev = ctx->dev;
296 	struct crypto_authenc_keys keys;
297 
298 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
299 		goto badkey;
300 
301 	dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
302 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
303 		keys.authkeylen);
304 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
305 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
306 
307 	ctx->adata.keylen = keys.authkeylen;
308 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
309 					      OP_ALG_ALGSEL_MASK);
310 
311 	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
312 		goto badkey;
313 
314 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
315 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
316 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
317 				   keys.enckeylen, ctx->dir);
318 	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
319 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
320 			     ctx->adata.keylen_pad + keys.enckeylen, 1);
321 
322 	ctx->cdata.keylen = keys.enckeylen;
323 
324 	memzero_explicit(&keys, sizeof(keys));
325 	return aead_set_sh_desc(aead);
326 badkey:
327 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
328 	memzero_explicit(&keys, sizeof(keys));
329 	return -EINVAL;
330 }
331 
332 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
333 					   bool encrypt)
334 {
335 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
336 	struct caam_request *req_ctx = aead_request_ctx(req);
337 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
338 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
339 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
340 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
341 						 typeof(*alg), aead);
342 	struct device *dev = ctx->dev;
343 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
344 		      GFP_KERNEL : GFP_ATOMIC;
345 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
346 	struct aead_edesc *edesc;
347 	dma_addr_t qm_sg_dma, iv_dma = 0;
348 	int ivsize = 0;
349 	unsigned int authsize = ctx->authsize;
350 	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
351 	int in_len, out_len;
352 	struct dpaa2_sg_entry *sg_table;
353 
354 	/* allocate space for base edesc, link tables and IV */
355 	edesc = qi_cache_zalloc(GFP_DMA | flags);
356 	if (unlikely(!edesc)) {
357 		dev_err(dev, "could not allocate extended descriptor\n");
358 		return ERR_PTR(-ENOMEM);
359 	}
360 
361 	if (unlikely(req->dst != req->src)) {
362 		src_nents = sg_nents_for_len(req->src, req->assoclen +
363 					     req->cryptlen);
364 		if (unlikely(src_nents < 0)) {
365 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
366 				req->assoclen + req->cryptlen);
367 			qi_cache_free(edesc);
368 			return ERR_PTR(src_nents);
369 		}
370 
371 		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
372 					     req->cryptlen +
373 					     (encrypt ? authsize :
374 							(-authsize)));
375 		if (unlikely(dst_nents < 0)) {
376 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
377 				req->assoclen + req->cryptlen +
378 				(encrypt ? authsize : (-authsize)));
379 			qi_cache_free(edesc);
380 			return ERR_PTR(dst_nents);
381 		}
382 
383 		if (src_nents) {
384 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
385 						      DMA_TO_DEVICE);
386 			if (unlikely(!mapped_src_nents)) {
387 				dev_err(dev, "unable to map source\n");
388 				qi_cache_free(edesc);
389 				return ERR_PTR(-ENOMEM);
390 			}
391 		} else {
392 			mapped_src_nents = 0;
393 		}
394 
395 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
396 					      DMA_FROM_DEVICE);
397 		if (unlikely(!mapped_dst_nents)) {
398 			dev_err(dev, "unable to map destination\n");
399 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
400 			qi_cache_free(edesc);
401 			return ERR_PTR(-ENOMEM);
402 		}
403 	} else {
404 		src_nents = sg_nents_for_len(req->src, req->assoclen +
405 					     req->cryptlen +
406 						(encrypt ? authsize : 0));
407 		if (unlikely(src_nents < 0)) {
408 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
409 				req->assoclen + req->cryptlen +
410 				(encrypt ? authsize : 0));
411 			qi_cache_free(edesc);
412 			return ERR_PTR(src_nents);
413 		}
414 
415 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
416 					      DMA_BIDIRECTIONAL);
417 		if (unlikely(!mapped_src_nents)) {
418 			dev_err(dev, "unable to map source\n");
419 			qi_cache_free(edesc);
420 			return ERR_PTR(-ENOMEM);
421 		}
422 	}
423 
424 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
425 		ivsize = crypto_aead_ivsize(aead);
426 
427 	/*
428 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
429 	 * Input is not contiguous.
430 	 */
431 	qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
432 		      (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
433 	sg_table = &edesc->sgt[0];
434 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
435 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
436 		     CAAM_QI_MEMCACHE_SIZE)) {
437 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
438 			qm_sg_nents, ivsize);
439 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
440 			   0, 0, 0);
441 		qi_cache_free(edesc);
442 		return ERR_PTR(-ENOMEM);
443 	}
444 
445 	if (ivsize) {
446 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
447 
448 		/* Make sure IV is located in a DMAable area */
449 		memcpy(iv, req->iv, ivsize);
450 
451 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
452 		if (dma_mapping_error(dev, iv_dma)) {
453 			dev_err(dev, "unable to map IV\n");
454 			caam_unmap(dev, req->src, req->dst, src_nents,
455 				   dst_nents, 0, 0, 0, 0);
456 			qi_cache_free(edesc);
457 			return ERR_PTR(-ENOMEM);
458 		}
459 	}
460 
461 	edesc->src_nents = src_nents;
462 	edesc->dst_nents = dst_nents;
463 	edesc->iv_dma = iv_dma;
464 
465 	if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
466 	    OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
467 		/*
468 		 * The associated data comes already with the IV but we need
469 		 * to skip it when we authenticate or encrypt...
470 		 */
471 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
472 	else
473 		edesc->assoclen = cpu_to_caam32(req->assoclen);
474 	edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
475 					     DMA_TO_DEVICE);
476 	if (dma_mapping_error(dev, edesc->assoclen_dma)) {
477 		dev_err(dev, "unable to map assoclen\n");
478 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
479 			   iv_dma, ivsize, 0, 0);
480 		qi_cache_free(edesc);
481 		return ERR_PTR(-ENOMEM);
482 	}
483 
484 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
485 	qm_sg_index++;
486 	if (ivsize) {
487 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
488 		qm_sg_index++;
489 	}
490 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
491 	qm_sg_index += mapped_src_nents;
492 
493 	if (mapped_dst_nents > 1)
494 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
495 				 qm_sg_index, 0);
496 
497 	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
498 	if (dma_mapping_error(dev, qm_sg_dma)) {
499 		dev_err(dev, "unable to map S/G table\n");
500 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
501 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
502 			   iv_dma, ivsize, 0, 0);
503 		qi_cache_free(edesc);
504 		return ERR_PTR(-ENOMEM);
505 	}
506 
507 	edesc->qm_sg_dma = qm_sg_dma;
508 	edesc->qm_sg_bytes = qm_sg_bytes;
509 
510 	out_len = req->assoclen + req->cryptlen +
511 		  (encrypt ? ctx->authsize : (-ctx->authsize));
512 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
513 
514 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
515 	dpaa2_fl_set_final(in_fle, true);
516 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
517 	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
518 	dpaa2_fl_set_len(in_fle, in_len);
519 
520 	if (req->dst == req->src) {
521 		if (mapped_src_nents == 1) {
522 			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
523 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
524 		} else {
525 			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
526 			dpaa2_fl_set_addr(out_fle, qm_sg_dma +
527 					  (1 + !!ivsize) * sizeof(*sg_table));
528 		}
529 	} else if (mapped_dst_nents == 1) {
530 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
531 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
532 	} else {
533 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
534 		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
535 				  sizeof(*sg_table));
536 	}
537 
538 	dpaa2_fl_set_len(out_fle, out_len);
539 
540 	return edesc;
541 }
542 
543 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
544 {
545 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
546 	unsigned int ivsize = crypto_aead_ivsize(aead);
547 	struct device *dev = ctx->dev;
548 	struct caam_flc *flc;
549 	u32 *desc;
550 
551 	if (!ctx->cdata.keylen || !ctx->authsize)
552 		return 0;
553 
554 	flc = &ctx->flc[ENCRYPT];
555 	desc = flc->sh_desc;
556 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
557 			       ctx->authsize, true, true);
558 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
559 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
560 				   sizeof(flc->flc) + desc_bytes(desc),
561 				   ctx->dir);
562 
563 	flc = &ctx->flc[DECRYPT];
564 	desc = flc->sh_desc;
565 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
566 			       ctx->authsize, false, true);
567 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
568 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
569 				   sizeof(flc->flc) + desc_bytes(desc),
570 				   ctx->dir);
571 
572 	return 0;
573 }
574 
575 static int chachapoly_setauthsize(struct crypto_aead *aead,
576 				  unsigned int authsize)
577 {
578 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
579 
580 	if (authsize != POLY1305_DIGEST_SIZE)
581 		return -EINVAL;
582 
583 	ctx->authsize = authsize;
584 	return chachapoly_set_sh_desc(aead);
585 }
586 
587 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
588 			     unsigned int keylen)
589 {
590 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
591 	unsigned int ivsize = crypto_aead_ivsize(aead);
592 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
593 
594 	if (keylen != CHACHA_KEY_SIZE + saltlen) {
595 		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
596 		return -EINVAL;
597 	}
598 
599 	ctx->cdata.key_virt = key;
600 	ctx->cdata.keylen = keylen - saltlen;
601 
602 	return chachapoly_set_sh_desc(aead);
603 }
604 
605 static int gcm_set_sh_desc(struct crypto_aead *aead)
606 {
607 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
608 	struct device *dev = ctx->dev;
609 	unsigned int ivsize = crypto_aead_ivsize(aead);
610 	struct caam_flc *flc;
611 	u32 *desc;
612 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
613 			ctx->cdata.keylen;
614 
615 	if (!ctx->cdata.keylen || !ctx->authsize)
616 		return 0;
617 
618 	/*
619 	 * AES GCM encrypt shared descriptor
620 	 * Job Descriptor and Shared Descriptor
621 	 * must fit into the 64-word Descriptor h/w Buffer
622 	 */
623 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
624 		ctx->cdata.key_inline = true;
625 		ctx->cdata.key_virt = ctx->key;
626 	} else {
627 		ctx->cdata.key_inline = false;
628 		ctx->cdata.key_dma = ctx->key_dma;
629 	}
630 
631 	flc = &ctx->flc[ENCRYPT];
632 	desc = flc->sh_desc;
633 	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
634 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
635 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
636 				   sizeof(flc->flc) + desc_bytes(desc),
637 				   ctx->dir);
638 
639 	/*
640 	 * Job Descriptor and Shared Descriptors
641 	 * must all fit into the 64-word Descriptor h/w Buffer
642 	 */
643 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
644 		ctx->cdata.key_inline = true;
645 		ctx->cdata.key_virt = ctx->key;
646 	} else {
647 		ctx->cdata.key_inline = false;
648 		ctx->cdata.key_dma = ctx->key_dma;
649 	}
650 
651 	flc = &ctx->flc[DECRYPT];
652 	desc = flc->sh_desc;
653 	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
654 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
655 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
656 				   sizeof(flc->flc) + desc_bytes(desc),
657 				   ctx->dir);
658 
659 	return 0;
660 }
661 
662 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
663 {
664 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
665 
666 	ctx->authsize = authsize;
667 	gcm_set_sh_desc(authenc);
668 
669 	return 0;
670 }
671 
672 static int gcm_setkey(struct crypto_aead *aead,
673 		      const u8 *key, unsigned int keylen)
674 {
675 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
676 	struct device *dev = ctx->dev;
677 
678 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
679 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
680 
681 	memcpy(ctx->key, key, keylen);
682 	dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
683 	ctx->cdata.keylen = keylen;
684 
685 	return gcm_set_sh_desc(aead);
686 }
687 
688 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
689 {
690 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
691 	struct device *dev = ctx->dev;
692 	unsigned int ivsize = crypto_aead_ivsize(aead);
693 	struct caam_flc *flc;
694 	u32 *desc;
695 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
696 			ctx->cdata.keylen;
697 
698 	if (!ctx->cdata.keylen || !ctx->authsize)
699 		return 0;
700 
701 	ctx->cdata.key_virt = ctx->key;
702 
703 	/*
704 	 * RFC4106 encrypt shared descriptor
705 	 * Job Descriptor and Shared Descriptor
706 	 * must fit into the 64-word Descriptor h/w Buffer
707 	 */
708 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
709 		ctx->cdata.key_inline = true;
710 	} else {
711 		ctx->cdata.key_inline = false;
712 		ctx->cdata.key_dma = ctx->key_dma;
713 	}
714 
715 	flc = &ctx->flc[ENCRYPT];
716 	desc = flc->sh_desc;
717 	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
718 				  true);
719 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
720 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
721 				   sizeof(flc->flc) + desc_bytes(desc),
722 				   ctx->dir);
723 
724 	/*
725 	 * Job Descriptor and Shared Descriptors
726 	 * must all fit into the 64-word Descriptor h/w Buffer
727 	 */
728 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
729 		ctx->cdata.key_inline = true;
730 	} else {
731 		ctx->cdata.key_inline = false;
732 		ctx->cdata.key_dma = ctx->key_dma;
733 	}
734 
735 	flc = &ctx->flc[DECRYPT];
736 	desc = flc->sh_desc;
737 	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
738 				  true);
739 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
740 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
741 				   sizeof(flc->flc) + desc_bytes(desc),
742 				   ctx->dir);
743 
744 	return 0;
745 }
746 
747 static int rfc4106_setauthsize(struct crypto_aead *authenc,
748 			       unsigned int authsize)
749 {
750 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
751 
752 	ctx->authsize = authsize;
753 	rfc4106_set_sh_desc(authenc);
754 
755 	return 0;
756 }
757 
758 static int rfc4106_setkey(struct crypto_aead *aead,
759 			  const u8 *key, unsigned int keylen)
760 {
761 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
762 	struct device *dev = ctx->dev;
763 
764 	if (keylen < 4)
765 		return -EINVAL;
766 
767 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
768 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
769 
770 	memcpy(ctx->key, key, keylen);
771 	/*
772 	 * The last four bytes of the key material are used as the salt value
773 	 * in the nonce. Update the AES key length.
774 	 */
775 	ctx->cdata.keylen = keylen - 4;
776 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
777 				   ctx->dir);
778 
779 	return rfc4106_set_sh_desc(aead);
780 }
781 
782 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
783 {
784 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
785 	struct device *dev = ctx->dev;
786 	unsigned int ivsize = crypto_aead_ivsize(aead);
787 	struct caam_flc *flc;
788 	u32 *desc;
789 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
790 			ctx->cdata.keylen;
791 
792 	if (!ctx->cdata.keylen || !ctx->authsize)
793 		return 0;
794 
795 	ctx->cdata.key_virt = ctx->key;
796 
797 	/*
798 	 * RFC4543 encrypt shared descriptor
799 	 * Job Descriptor and Shared Descriptor
800 	 * must fit into the 64-word Descriptor h/w Buffer
801 	 */
802 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
803 		ctx->cdata.key_inline = true;
804 	} else {
805 		ctx->cdata.key_inline = false;
806 		ctx->cdata.key_dma = ctx->key_dma;
807 	}
808 
809 	flc = &ctx->flc[ENCRYPT];
810 	desc = flc->sh_desc;
811 	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
812 				  true);
813 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
814 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
815 				   sizeof(flc->flc) + desc_bytes(desc),
816 				   ctx->dir);
817 
818 	/*
819 	 * Job Descriptor and Shared Descriptors
820 	 * must all fit into the 64-word Descriptor h/w Buffer
821 	 */
822 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
823 		ctx->cdata.key_inline = true;
824 	} else {
825 		ctx->cdata.key_inline = false;
826 		ctx->cdata.key_dma = ctx->key_dma;
827 	}
828 
829 	flc = &ctx->flc[DECRYPT];
830 	desc = flc->sh_desc;
831 	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
832 				  true);
833 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
834 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
835 				   sizeof(flc->flc) + desc_bytes(desc),
836 				   ctx->dir);
837 
838 	return 0;
839 }
840 
841 static int rfc4543_setauthsize(struct crypto_aead *authenc,
842 			       unsigned int authsize)
843 {
844 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
845 
846 	ctx->authsize = authsize;
847 	rfc4543_set_sh_desc(authenc);
848 
849 	return 0;
850 }
851 
852 static int rfc4543_setkey(struct crypto_aead *aead,
853 			  const u8 *key, unsigned int keylen)
854 {
855 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
856 	struct device *dev = ctx->dev;
857 
858 	if (keylen < 4)
859 		return -EINVAL;
860 
861 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
862 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
863 
864 	memcpy(ctx->key, key, keylen);
865 	/*
866 	 * The last four bytes of the key material are used as the salt value
867 	 * in the nonce. Update the AES key length.
868 	 */
869 	ctx->cdata.keylen = keylen - 4;
870 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
871 				   ctx->dir);
872 
873 	return rfc4543_set_sh_desc(aead);
874 }
875 
876 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
877 			   unsigned int keylen)
878 {
879 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
880 	struct caam_skcipher_alg *alg =
881 		container_of(crypto_skcipher_alg(skcipher),
882 			     struct caam_skcipher_alg, skcipher);
883 	struct device *dev = ctx->dev;
884 	struct caam_flc *flc;
885 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
886 	u32 *desc;
887 	u32 ctx1_iv_off = 0;
888 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
889 			       OP_ALG_AAI_CTR_MOD128) &&
890 			       ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
891 			       OP_ALG_ALGSEL_CHACHA20);
892 	const bool is_rfc3686 = alg->caam.rfc3686;
893 
894 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
895 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
896 
897 	/*
898 	 * AES-CTR needs to load IV in CONTEXT1 reg
899 	 * at an offset of 128bits (16bytes)
900 	 * CONTEXT1[255:128] = IV
901 	 */
902 	if (ctr_mode)
903 		ctx1_iv_off = 16;
904 
905 	/*
906 	 * RFC3686 specific:
907 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
908 	 *	| *key = {KEY, NONCE}
909 	 */
910 	if (is_rfc3686) {
911 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
912 		keylen -= CTR_RFC3686_NONCE_SIZE;
913 	}
914 
915 	ctx->cdata.keylen = keylen;
916 	ctx->cdata.key_virt = key;
917 	ctx->cdata.key_inline = true;
918 
919 	/* skcipher_encrypt shared descriptor */
920 	flc = &ctx->flc[ENCRYPT];
921 	desc = flc->sh_desc;
922 	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
923 				   ctx1_iv_off);
924 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
925 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
926 				   sizeof(flc->flc) + desc_bytes(desc),
927 				   ctx->dir);
928 
929 	/* skcipher_decrypt shared descriptor */
930 	flc = &ctx->flc[DECRYPT];
931 	desc = flc->sh_desc;
932 	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
933 				   ctx1_iv_off);
934 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
935 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
936 				   sizeof(flc->flc) + desc_bytes(desc),
937 				   ctx->dir);
938 
939 	return 0;
940 }
941 
942 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
943 			       unsigned int keylen)
944 {
945 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
946 	struct device *dev = ctx->dev;
947 	struct caam_flc *flc;
948 	u32 *desc;
949 
950 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
951 		dev_err(dev, "key size mismatch\n");
952 		crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
953 		return -EINVAL;
954 	}
955 
956 	ctx->cdata.keylen = keylen;
957 	ctx->cdata.key_virt = key;
958 	ctx->cdata.key_inline = true;
959 
960 	/* xts_skcipher_encrypt shared descriptor */
961 	flc = &ctx->flc[ENCRYPT];
962 	desc = flc->sh_desc;
963 	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
964 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
965 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
966 				   sizeof(flc->flc) + desc_bytes(desc),
967 				   ctx->dir);
968 
969 	/* xts_skcipher_decrypt shared descriptor */
970 	flc = &ctx->flc[DECRYPT];
971 	desc = flc->sh_desc;
972 	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
973 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
974 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
975 				   sizeof(flc->flc) + desc_bytes(desc),
976 				   ctx->dir);
977 
978 	return 0;
979 }
980 
981 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
982 {
983 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
984 	struct caam_request *req_ctx = skcipher_request_ctx(req);
985 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
986 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
987 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
988 	struct device *dev = ctx->dev;
989 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
990 		       GFP_KERNEL : GFP_ATOMIC;
991 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
992 	struct skcipher_edesc *edesc;
993 	dma_addr_t iv_dma;
994 	u8 *iv;
995 	int ivsize = crypto_skcipher_ivsize(skcipher);
996 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
997 	struct dpaa2_sg_entry *sg_table;
998 
999 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1000 	if (unlikely(src_nents < 0)) {
1001 		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1002 			req->cryptlen);
1003 		return ERR_PTR(src_nents);
1004 	}
1005 
1006 	if (unlikely(req->dst != req->src)) {
1007 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1008 		if (unlikely(dst_nents < 0)) {
1009 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1010 				req->cryptlen);
1011 			return ERR_PTR(dst_nents);
1012 		}
1013 
1014 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1015 					      DMA_TO_DEVICE);
1016 		if (unlikely(!mapped_src_nents)) {
1017 			dev_err(dev, "unable to map source\n");
1018 			return ERR_PTR(-ENOMEM);
1019 		}
1020 
1021 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1022 					      DMA_FROM_DEVICE);
1023 		if (unlikely(!mapped_dst_nents)) {
1024 			dev_err(dev, "unable to map destination\n");
1025 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1026 			return ERR_PTR(-ENOMEM);
1027 		}
1028 	} else {
1029 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1030 					      DMA_BIDIRECTIONAL);
1031 		if (unlikely(!mapped_src_nents)) {
1032 			dev_err(dev, "unable to map source\n");
1033 			return ERR_PTR(-ENOMEM);
1034 		}
1035 	}
1036 
1037 	qm_sg_ents = 1 + mapped_src_nents;
1038 	dst_sg_idx = qm_sg_ents;
1039 
1040 	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1041 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1042 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1043 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1044 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1045 			qm_sg_ents, ivsize);
1046 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1047 			   0, 0, 0);
1048 		return ERR_PTR(-ENOMEM);
1049 	}
1050 
1051 	/* allocate space for base edesc, link tables and IV */
1052 	edesc = qi_cache_zalloc(GFP_DMA | flags);
1053 	if (unlikely(!edesc)) {
1054 		dev_err(dev, "could not allocate extended descriptor\n");
1055 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1056 			   0, 0, 0);
1057 		return ERR_PTR(-ENOMEM);
1058 	}
1059 
1060 	/* Make sure IV is located in a DMAable area */
1061 	sg_table = &edesc->sgt[0];
1062 	iv = (u8 *)(sg_table + qm_sg_ents);
1063 	memcpy(iv, req->iv, ivsize);
1064 
1065 	iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1066 	if (dma_mapping_error(dev, iv_dma)) {
1067 		dev_err(dev, "unable to map IV\n");
1068 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1069 			   0, 0, 0);
1070 		qi_cache_free(edesc);
1071 		return ERR_PTR(-ENOMEM);
1072 	}
1073 
1074 	edesc->src_nents = src_nents;
1075 	edesc->dst_nents = dst_nents;
1076 	edesc->iv_dma = iv_dma;
1077 	edesc->qm_sg_bytes = qm_sg_bytes;
1078 
1079 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1080 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1081 
1082 	if (mapped_dst_nents > 1)
1083 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1084 				 dst_sg_idx, 0);
1085 
1086 	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1087 					  DMA_TO_DEVICE);
1088 	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1089 		dev_err(dev, "unable to map S/G table\n");
1090 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1091 			   iv_dma, ivsize, 0, 0);
1092 		qi_cache_free(edesc);
1093 		return ERR_PTR(-ENOMEM);
1094 	}
1095 
1096 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1097 	dpaa2_fl_set_final(in_fle, true);
1098 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1099 	dpaa2_fl_set_len(out_fle, req->cryptlen);
1100 
1101 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1102 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1103 
1104 	if (req->src == req->dst) {
1105 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1106 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1107 				  sizeof(*sg_table));
1108 	} else if (mapped_dst_nents > 1) {
1109 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1110 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1111 				  sizeof(*sg_table));
1112 	} else {
1113 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
1114 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
1115 	}
1116 
1117 	return edesc;
1118 }
1119 
1120 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1121 		       struct aead_request *req)
1122 {
1123 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1124 	int ivsize = crypto_aead_ivsize(aead);
1125 
1126 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1127 		   edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1128 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1129 }
1130 
1131 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1132 			   struct skcipher_request *req)
1133 {
1134 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1135 	int ivsize = crypto_skcipher_ivsize(skcipher);
1136 
1137 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1138 		   edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1139 }
1140 
1141 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1142 {
1143 	struct crypto_async_request *areq = cbk_ctx;
1144 	struct aead_request *req = container_of(areq, struct aead_request,
1145 						base);
1146 	struct caam_request *req_ctx = to_caam_req(areq);
1147 	struct aead_edesc *edesc = req_ctx->edesc;
1148 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1149 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1150 	int ecode = 0;
1151 
1152 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1153 
1154 	if (unlikely(status)) {
1155 		caam_qi2_strstatus(ctx->dev, status);
1156 		ecode = -EIO;
1157 	}
1158 
1159 	aead_unmap(ctx->dev, edesc, req);
1160 	qi_cache_free(edesc);
1161 	aead_request_complete(req, ecode);
1162 }
1163 
1164 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1165 {
1166 	struct crypto_async_request *areq = cbk_ctx;
1167 	struct aead_request *req = container_of(areq, struct aead_request,
1168 						base);
1169 	struct caam_request *req_ctx = to_caam_req(areq);
1170 	struct aead_edesc *edesc = req_ctx->edesc;
1171 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1172 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1173 	int ecode = 0;
1174 
1175 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1176 
1177 	if (unlikely(status)) {
1178 		caam_qi2_strstatus(ctx->dev, status);
1179 		/*
1180 		 * verify hw auth check passed else return -EBADMSG
1181 		 */
1182 		if ((status & JRSTA_CCBERR_ERRID_MASK) ==
1183 		     JRSTA_CCBERR_ERRID_ICVCHK)
1184 			ecode = -EBADMSG;
1185 		else
1186 			ecode = -EIO;
1187 	}
1188 
1189 	aead_unmap(ctx->dev, edesc, req);
1190 	qi_cache_free(edesc);
1191 	aead_request_complete(req, ecode);
1192 }
1193 
1194 static int aead_encrypt(struct aead_request *req)
1195 {
1196 	struct aead_edesc *edesc;
1197 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1198 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1199 	struct caam_request *caam_req = aead_request_ctx(req);
1200 	int ret;
1201 
1202 	/* allocate extended descriptor */
1203 	edesc = aead_edesc_alloc(req, true);
1204 	if (IS_ERR(edesc))
1205 		return PTR_ERR(edesc);
1206 
1207 	caam_req->flc = &ctx->flc[ENCRYPT];
1208 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1209 	caam_req->cbk = aead_encrypt_done;
1210 	caam_req->ctx = &req->base;
1211 	caam_req->edesc = edesc;
1212 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1213 	if (ret != -EINPROGRESS &&
1214 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1215 		aead_unmap(ctx->dev, edesc, req);
1216 		qi_cache_free(edesc);
1217 	}
1218 
1219 	return ret;
1220 }
1221 
1222 static int aead_decrypt(struct aead_request *req)
1223 {
1224 	struct aead_edesc *edesc;
1225 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1226 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1227 	struct caam_request *caam_req = aead_request_ctx(req);
1228 	int ret;
1229 
1230 	/* allocate extended descriptor */
1231 	edesc = aead_edesc_alloc(req, false);
1232 	if (IS_ERR(edesc))
1233 		return PTR_ERR(edesc);
1234 
1235 	caam_req->flc = &ctx->flc[DECRYPT];
1236 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1237 	caam_req->cbk = aead_decrypt_done;
1238 	caam_req->ctx = &req->base;
1239 	caam_req->edesc = edesc;
1240 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1241 	if (ret != -EINPROGRESS &&
1242 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1243 		aead_unmap(ctx->dev, edesc, req);
1244 		qi_cache_free(edesc);
1245 	}
1246 
1247 	return ret;
1248 }
1249 
1250 static int ipsec_gcm_encrypt(struct aead_request *req)
1251 {
1252 	if (req->assoclen < 8)
1253 		return -EINVAL;
1254 
1255 	return aead_encrypt(req);
1256 }
1257 
1258 static int ipsec_gcm_decrypt(struct aead_request *req)
1259 {
1260 	if (req->assoclen < 8)
1261 		return -EINVAL;
1262 
1263 	return aead_decrypt(req);
1264 }
1265 
1266 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1267 {
1268 	struct crypto_async_request *areq = cbk_ctx;
1269 	struct skcipher_request *req = skcipher_request_cast(areq);
1270 	struct caam_request *req_ctx = to_caam_req(areq);
1271 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1272 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1273 	struct skcipher_edesc *edesc = req_ctx->edesc;
1274 	int ecode = 0;
1275 	int ivsize = crypto_skcipher_ivsize(skcipher);
1276 
1277 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1278 
1279 	if (unlikely(status)) {
1280 		caam_qi2_strstatus(ctx->dev, status);
1281 		ecode = -EIO;
1282 	}
1283 
1284 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1285 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1286 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1287 	caam_dump_sg(KERN_DEBUG, "dst    @" __stringify(__LINE__)": ",
1288 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1289 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1290 
1291 	skcipher_unmap(ctx->dev, edesc, req);
1292 
1293 	/*
1294 	 * The crypto API expects us to set the IV (req->iv) to the last
1295 	 * ciphertext block. This is used e.g. by the CTS mode.
1296 	 */
1297 	scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
1298 				 ivsize, 0);
1299 
1300 	qi_cache_free(edesc);
1301 	skcipher_request_complete(req, ecode);
1302 }
1303 
1304 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1305 {
1306 	struct crypto_async_request *areq = cbk_ctx;
1307 	struct skcipher_request *req = skcipher_request_cast(areq);
1308 	struct caam_request *req_ctx = to_caam_req(areq);
1309 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1310 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1311 	struct skcipher_edesc *edesc = req_ctx->edesc;
1312 	int ecode = 0;
1313 	int ivsize = crypto_skcipher_ivsize(skcipher);
1314 
1315 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1316 
1317 	if (unlikely(status)) {
1318 		caam_qi2_strstatus(ctx->dev, status);
1319 		ecode = -EIO;
1320 	}
1321 
1322 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1323 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1324 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1325 	caam_dump_sg(KERN_DEBUG, "dst    @" __stringify(__LINE__)": ",
1326 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1327 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1328 
1329 	skcipher_unmap(ctx->dev, edesc, req);
1330 	qi_cache_free(edesc);
1331 	skcipher_request_complete(req, ecode);
1332 }
1333 
1334 static int skcipher_encrypt(struct skcipher_request *req)
1335 {
1336 	struct skcipher_edesc *edesc;
1337 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1338 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1339 	struct caam_request *caam_req = skcipher_request_ctx(req);
1340 	int ret;
1341 
1342 	/* allocate extended descriptor */
1343 	edesc = skcipher_edesc_alloc(req);
1344 	if (IS_ERR(edesc))
1345 		return PTR_ERR(edesc);
1346 
1347 	caam_req->flc = &ctx->flc[ENCRYPT];
1348 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1349 	caam_req->cbk = skcipher_encrypt_done;
1350 	caam_req->ctx = &req->base;
1351 	caam_req->edesc = edesc;
1352 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1353 	if (ret != -EINPROGRESS &&
1354 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1355 		skcipher_unmap(ctx->dev, edesc, req);
1356 		qi_cache_free(edesc);
1357 	}
1358 
1359 	return ret;
1360 }
1361 
1362 static int skcipher_decrypt(struct skcipher_request *req)
1363 {
1364 	struct skcipher_edesc *edesc;
1365 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1366 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1367 	struct caam_request *caam_req = skcipher_request_ctx(req);
1368 	int ivsize = crypto_skcipher_ivsize(skcipher);
1369 	int ret;
1370 
1371 	/* allocate extended descriptor */
1372 	edesc = skcipher_edesc_alloc(req);
1373 	if (IS_ERR(edesc))
1374 		return PTR_ERR(edesc);
1375 
1376 	/*
1377 	 * The crypto API expects us to set the IV (req->iv) to the last
1378 	 * ciphertext block.
1379 	 */
1380 	scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
1381 				 ivsize, 0);
1382 
1383 	caam_req->flc = &ctx->flc[DECRYPT];
1384 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1385 	caam_req->cbk = skcipher_decrypt_done;
1386 	caam_req->ctx = &req->base;
1387 	caam_req->edesc = edesc;
1388 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1389 	if (ret != -EINPROGRESS &&
1390 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1391 		skcipher_unmap(ctx->dev, edesc, req);
1392 		qi_cache_free(edesc);
1393 	}
1394 
1395 	return ret;
1396 }
1397 
1398 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1399 			 bool uses_dkp)
1400 {
1401 	dma_addr_t dma_addr;
1402 	int i;
1403 
1404 	/* copy descriptor header template value */
1405 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1406 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1407 
1408 	ctx->dev = caam->dev;
1409 	ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1410 
1411 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1412 					offsetof(struct caam_ctx, flc_dma),
1413 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1414 	if (dma_mapping_error(ctx->dev, dma_addr)) {
1415 		dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1416 		return -ENOMEM;
1417 	}
1418 
1419 	for (i = 0; i < NUM_OP; i++)
1420 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1421 	ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1422 
1423 	return 0;
1424 }
1425 
1426 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1427 {
1428 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1429 	struct caam_skcipher_alg *caam_alg =
1430 		container_of(alg, typeof(*caam_alg), skcipher);
1431 
1432 	crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1433 	return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1434 }
1435 
1436 static int caam_cra_init_aead(struct crypto_aead *tfm)
1437 {
1438 	struct aead_alg *alg = crypto_aead_alg(tfm);
1439 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1440 						      aead);
1441 
1442 	crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1443 	return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1444 			     alg->setkey == aead_setkey);
1445 }
1446 
1447 static void caam_exit_common(struct caam_ctx *ctx)
1448 {
1449 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1450 			       offsetof(struct caam_ctx, flc_dma), ctx->dir,
1451 			       DMA_ATTR_SKIP_CPU_SYNC);
1452 }
1453 
1454 static void caam_cra_exit(struct crypto_skcipher *tfm)
1455 {
1456 	caam_exit_common(crypto_skcipher_ctx(tfm));
1457 }
1458 
1459 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1460 {
1461 	caam_exit_common(crypto_aead_ctx(tfm));
1462 }
1463 
1464 static struct caam_skcipher_alg driver_algs[] = {
1465 	{
1466 		.skcipher = {
1467 			.base = {
1468 				.cra_name = "cbc(aes)",
1469 				.cra_driver_name = "cbc-aes-caam-qi2",
1470 				.cra_blocksize = AES_BLOCK_SIZE,
1471 			},
1472 			.setkey = skcipher_setkey,
1473 			.encrypt = skcipher_encrypt,
1474 			.decrypt = skcipher_decrypt,
1475 			.min_keysize = AES_MIN_KEY_SIZE,
1476 			.max_keysize = AES_MAX_KEY_SIZE,
1477 			.ivsize = AES_BLOCK_SIZE,
1478 		},
1479 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1480 	},
1481 	{
1482 		.skcipher = {
1483 			.base = {
1484 				.cra_name = "cbc(des3_ede)",
1485 				.cra_driver_name = "cbc-3des-caam-qi2",
1486 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1487 			},
1488 			.setkey = skcipher_setkey,
1489 			.encrypt = skcipher_encrypt,
1490 			.decrypt = skcipher_decrypt,
1491 			.min_keysize = DES3_EDE_KEY_SIZE,
1492 			.max_keysize = DES3_EDE_KEY_SIZE,
1493 			.ivsize = DES3_EDE_BLOCK_SIZE,
1494 		},
1495 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1496 	},
1497 	{
1498 		.skcipher = {
1499 			.base = {
1500 				.cra_name = "cbc(des)",
1501 				.cra_driver_name = "cbc-des-caam-qi2",
1502 				.cra_blocksize = DES_BLOCK_SIZE,
1503 			},
1504 			.setkey = skcipher_setkey,
1505 			.encrypt = skcipher_encrypt,
1506 			.decrypt = skcipher_decrypt,
1507 			.min_keysize = DES_KEY_SIZE,
1508 			.max_keysize = DES_KEY_SIZE,
1509 			.ivsize = DES_BLOCK_SIZE,
1510 		},
1511 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1512 	},
1513 	{
1514 		.skcipher = {
1515 			.base = {
1516 				.cra_name = "ctr(aes)",
1517 				.cra_driver_name = "ctr-aes-caam-qi2",
1518 				.cra_blocksize = 1,
1519 			},
1520 			.setkey = skcipher_setkey,
1521 			.encrypt = skcipher_encrypt,
1522 			.decrypt = skcipher_decrypt,
1523 			.min_keysize = AES_MIN_KEY_SIZE,
1524 			.max_keysize = AES_MAX_KEY_SIZE,
1525 			.ivsize = AES_BLOCK_SIZE,
1526 			.chunksize = AES_BLOCK_SIZE,
1527 		},
1528 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1529 					OP_ALG_AAI_CTR_MOD128,
1530 	},
1531 	{
1532 		.skcipher = {
1533 			.base = {
1534 				.cra_name = "rfc3686(ctr(aes))",
1535 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1536 				.cra_blocksize = 1,
1537 			},
1538 			.setkey = skcipher_setkey,
1539 			.encrypt = skcipher_encrypt,
1540 			.decrypt = skcipher_decrypt,
1541 			.min_keysize = AES_MIN_KEY_SIZE +
1542 				       CTR_RFC3686_NONCE_SIZE,
1543 			.max_keysize = AES_MAX_KEY_SIZE +
1544 				       CTR_RFC3686_NONCE_SIZE,
1545 			.ivsize = CTR_RFC3686_IV_SIZE,
1546 			.chunksize = AES_BLOCK_SIZE,
1547 		},
1548 		.caam = {
1549 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1550 					   OP_ALG_AAI_CTR_MOD128,
1551 			.rfc3686 = true,
1552 		},
1553 	},
1554 	{
1555 		.skcipher = {
1556 			.base = {
1557 				.cra_name = "xts(aes)",
1558 				.cra_driver_name = "xts-aes-caam-qi2",
1559 				.cra_blocksize = AES_BLOCK_SIZE,
1560 			},
1561 			.setkey = xts_skcipher_setkey,
1562 			.encrypt = skcipher_encrypt,
1563 			.decrypt = skcipher_decrypt,
1564 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1565 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1566 			.ivsize = AES_BLOCK_SIZE,
1567 		},
1568 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1569 	},
1570 	{
1571 		.skcipher = {
1572 			.base = {
1573 				.cra_name = "chacha20",
1574 				.cra_driver_name = "chacha20-caam-qi2",
1575 				.cra_blocksize = 1,
1576 			},
1577 			.setkey = skcipher_setkey,
1578 			.encrypt = skcipher_encrypt,
1579 			.decrypt = skcipher_decrypt,
1580 			.min_keysize = CHACHA_KEY_SIZE,
1581 			.max_keysize = CHACHA_KEY_SIZE,
1582 			.ivsize = CHACHA_IV_SIZE,
1583 		},
1584 		.caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1585 	},
1586 };
1587 
1588 static struct caam_aead_alg driver_aeads[] = {
1589 	{
1590 		.aead = {
1591 			.base = {
1592 				.cra_name = "rfc4106(gcm(aes))",
1593 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1594 				.cra_blocksize = 1,
1595 			},
1596 			.setkey = rfc4106_setkey,
1597 			.setauthsize = rfc4106_setauthsize,
1598 			.encrypt = ipsec_gcm_encrypt,
1599 			.decrypt = ipsec_gcm_decrypt,
1600 			.ivsize = 8,
1601 			.maxauthsize = AES_BLOCK_SIZE,
1602 		},
1603 		.caam = {
1604 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1605 		},
1606 	},
1607 	{
1608 		.aead = {
1609 			.base = {
1610 				.cra_name = "rfc4543(gcm(aes))",
1611 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1612 				.cra_blocksize = 1,
1613 			},
1614 			.setkey = rfc4543_setkey,
1615 			.setauthsize = rfc4543_setauthsize,
1616 			.encrypt = ipsec_gcm_encrypt,
1617 			.decrypt = ipsec_gcm_decrypt,
1618 			.ivsize = 8,
1619 			.maxauthsize = AES_BLOCK_SIZE,
1620 		},
1621 		.caam = {
1622 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1623 		},
1624 	},
1625 	/* Galois Counter Mode */
1626 	{
1627 		.aead = {
1628 			.base = {
1629 				.cra_name = "gcm(aes)",
1630 				.cra_driver_name = "gcm-aes-caam-qi2",
1631 				.cra_blocksize = 1,
1632 			},
1633 			.setkey = gcm_setkey,
1634 			.setauthsize = gcm_setauthsize,
1635 			.encrypt = aead_encrypt,
1636 			.decrypt = aead_decrypt,
1637 			.ivsize = 12,
1638 			.maxauthsize = AES_BLOCK_SIZE,
1639 		},
1640 		.caam = {
1641 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1642 		}
1643 	},
1644 	/* single-pass ipsec_esp descriptor */
1645 	{
1646 		.aead = {
1647 			.base = {
1648 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1649 				.cra_driver_name = "authenc-hmac-md5-"
1650 						   "cbc-aes-caam-qi2",
1651 				.cra_blocksize = AES_BLOCK_SIZE,
1652 			},
1653 			.setkey = aead_setkey,
1654 			.setauthsize = aead_setauthsize,
1655 			.encrypt = aead_encrypt,
1656 			.decrypt = aead_decrypt,
1657 			.ivsize = AES_BLOCK_SIZE,
1658 			.maxauthsize = MD5_DIGEST_SIZE,
1659 		},
1660 		.caam = {
1661 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1662 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1663 					   OP_ALG_AAI_HMAC_PRECOMP,
1664 		}
1665 	},
1666 	{
1667 		.aead = {
1668 			.base = {
1669 				.cra_name = "echainiv(authenc(hmac(md5),"
1670 					    "cbc(aes)))",
1671 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1672 						   "cbc-aes-caam-qi2",
1673 				.cra_blocksize = AES_BLOCK_SIZE,
1674 			},
1675 			.setkey = aead_setkey,
1676 			.setauthsize = aead_setauthsize,
1677 			.encrypt = aead_encrypt,
1678 			.decrypt = aead_decrypt,
1679 			.ivsize = AES_BLOCK_SIZE,
1680 			.maxauthsize = MD5_DIGEST_SIZE,
1681 		},
1682 		.caam = {
1683 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1684 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1685 					   OP_ALG_AAI_HMAC_PRECOMP,
1686 			.geniv = true,
1687 		}
1688 	},
1689 	{
1690 		.aead = {
1691 			.base = {
1692 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1693 				.cra_driver_name = "authenc-hmac-sha1-"
1694 						   "cbc-aes-caam-qi2",
1695 				.cra_blocksize = AES_BLOCK_SIZE,
1696 			},
1697 			.setkey = aead_setkey,
1698 			.setauthsize = aead_setauthsize,
1699 			.encrypt = aead_encrypt,
1700 			.decrypt = aead_decrypt,
1701 			.ivsize = AES_BLOCK_SIZE,
1702 			.maxauthsize = SHA1_DIGEST_SIZE,
1703 		},
1704 		.caam = {
1705 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1706 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1707 					   OP_ALG_AAI_HMAC_PRECOMP,
1708 		}
1709 	},
1710 	{
1711 		.aead = {
1712 			.base = {
1713 				.cra_name = "echainiv(authenc(hmac(sha1),"
1714 					    "cbc(aes)))",
1715 				.cra_driver_name = "echainiv-authenc-"
1716 						   "hmac-sha1-cbc-aes-caam-qi2",
1717 				.cra_blocksize = AES_BLOCK_SIZE,
1718 			},
1719 			.setkey = aead_setkey,
1720 			.setauthsize = aead_setauthsize,
1721 			.encrypt = aead_encrypt,
1722 			.decrypt = aead_decrypt,
1723 			.ivsize = AES_BLOCK_SIZE,
1724 			.maxauthsize = SHA1_DIGEST_SIZE,
1725 		},
1726 		.caam = {
1727 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1728 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1729 					   OP_ALG_AAI_HMAC_PRECOMP,
1730 			.geniv = true,
1731 		},
1732 	},
1733 	{
1734 		.aead = {
1735 			.base = {
1736 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1737 				.cra_driver_name = "authenc-hmac-sha224-"
1738 						   "cbc-aes-caam-qi2",
1739 				.cra_blocksize = AES_BLOCK_SIZE,
1740 			},
1741 			.setkey = aead_setkey,
1742 			.setauthsize = aead_setauthsize,
1743 			.encrypt = aead_encrypt,
1744 			.decrypt = aead_decrypt,
1745 			.ivsize = AES_BLOCK_SIZE,
1746 			.maxauthsize = SHA224_DIGEST_SIZE,
1747 		},
1748 		.caam = {
1749 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1750 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1751 					   OP_ALG_AAI_HMAC_PRECOMP,
1752 		}
1753 	},
1754 	{
1755 		.aead = {
1756 			.base = {
1757 				.cra_name = "echainiv(authenc(hmac(sha224),"
1758 					    "cbc(aes)))",
1759 				.cra_driver_name = "echainiv-authenc-"
1760 						   "hmac-sha224-cbc-aes-caam-qi2",
1761 				.cra_blocksize = AES_BLOCK_SIZE,
1762 			},
1763 			.setkey = aead_setkey,
1764 			.setauthsize = aead_setauthsize,
1765 			.encrypt = aead_encrypt,
1766 			.decrypt = aead_decrypt,
1767 			.ivsize = AES_BLOCK_SIZE,
1768 			.maxauthsize = SHA224_DIGEST_SIZE,
1769 		},
1770 		.caam = {
1771 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1772 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1773 					   OP_ALG_AAI_HMAC_PRECOMP,
1774 			.geniv = true,
1775 		}
1776 	},
1777 	{
1778 		.aead = {
1779 			.base = {
1780 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1781 				.cra_driver_name = "authenc-hmac-sha256-"
1782 						   "cbc-aes-caam-qi2",
1783 				.cra_blocksize = AES_BLOCK_SIZE,
1784 			},
1785 			.setkey = aead_setkey,
1786 			.setauthsize = aead_setauthsize,
1787 			.encrypt = aead_encrypt,
1788 			.decrypt = aead_decrypt,
1789 			.ivsize = AES_BLOCK_SIZE,
1790 			.maxauthsize = SHA256_DIGEST_SIZE,
1791 		},
1792 		.caam = {
1793 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1794 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1795 					   OP_ALG_AAI_HMAC_PRECOMP,
1796 		}
1797 	},
1798 	{
1799 		.aead = {
1800 			.base = {
1801 				.cra_name = "echainiv(authenc(hmac(sha256),"
1802 					    "cbc(aes)))",
1803 				.cra_driver_name = "echainiv-authenc-"
1804 						   "hmac-sha256-cbc-aes-"
1805 						   "caam-qi2",
1806 				.cra_blocksize = AES_BLOCK_SIZE,
1807 			},
1808 			.setkey = aead_setkey,
1809 			.setauthsize = aead_setauthsize,
1810 			.encrypt = aead_encrypt,
1811 			.decrypt = aead_decrypt,
1812 			.ivsize = AES_BLOCK_SIZE,
1813 			.maxauthsize = SHA256_DIGEST_SIZE,
1814 		},
1815 		.caam = {
1816 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1817 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1818 					   OP_ALG_AAI_HMAC_PRECOMP,
1819 			.geniv = true,
1820 		}
1821 	},
1822 	{
1823 		.aead = {
1824 			.base = {
1825 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
1826 				.cra_driver_name = "authenc-hmac-sha384-"
1827 						   "cbc-aes-caam-qi2",
1828 				.cra_blocksize = AES_BLOCK_SIZE,
1829 			},
1830 			.setkey = aead_setkey,
1831 			.setauthsize = aead_setauthsize,
1832 			.encrypt = aead_encrypt,
1833 			.decrypt = aead_decrypt,
1834 			.ivsize = AES_BLOCK_SIZE,
1835 			.maxauthsize = SHA384_DIGEST_SIZE,
1836 		},
1837 		.caam = {
1838 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1839 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1840 					   OP_ALG_AAI_HMAC_PRECOMP,
1841 		}
1842 	},
1843 	{
1844 		.aead = {
1845 			.base = {
1846 				.cra_name = "echainiv(authenc(hmac(sha384),"
1847 					    "cbc(aes)))",
1848 				.cra_driver_name = "echainiv-authenc-"
1849 						   "hmac-sha384-cbc-aes-"
1850 						   "caam-qi2",
1851 				.cra_blocksize = AES_BLOCK_SIZE,
1852 			},
1853 			.setkey = aead_setkey,
1854 			.setauthsize = aead_setauthsize,
1855 			.encrypt = aead_encrypt,
1856 			.decrypt = aead_decrypt,
1857 			.ivsize = AES_BLOCK_SIZE,
1858 			.maxauthsize = SHA384_DIGEST_SIZE,
1859 		},
1860 		.caam = {
1861 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1862 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1863 					   OP_ALG_AAI_HMAC_PRECOMP,
1864 			.geniv = true,
1865 		}
1866 	},
1867 	{
1868 		.aead = {
1869 			.base = {
1870 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
1871 				.cra_driver_name = "authenc-hmac-sha512-"
1872 						   "cbc-aes-caam-qi2",
1873 				.cra_blocksize = AES_BLOCK_SIZE,
1874 			},
1875 			.setkey = aead_setkey,
1876 			.setauthsize = aead_setauthsize,
1877 			.encrypt = aead_encrypt,
1878 			.decrypt = aead_decrypt,
1879 			.ivsize = AES_BLOCK_SIZE,
1880 			.maxauthsize = SHA512_DIGEST_SIZE,
1881 		},
1882 		.caam = {
1883 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1884 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1885 					   OP_ALG_AAI_HMAC_PRECOMP,
1886 		}
1887 	},
1888 	{
1889 		.aead = {
1890 			.base = {
1891 				.cra_name = "echainiv(authenc(hmac(sha512),"
1892 					    "cbc(aes)))",
1893 				.cra_driver_name = "echainiv-authenc-"
1894 						   "hmac-sha512-cbc-aes-"
1895 						   "caam-qi2",
1896 				.cra_blocksize = AES_BLOCK_SIZE,
1897 			},
1898 			.setkey = aead_setkey,
1899 			.setauthsize = aead_setauthsize,
1900 			.encrypt = aead_encrypt,
1901 			.decrypt = aead_decrypt,
1902 			.ivsize = AES_BLOCK_SIZE,
1903 			.maxauthsize = SHA512_DIGEST_SIZE,
1904 		},
1905 		.caam = {
1906 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1907 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1908 					   OP_ALG_AAI_HMAC_PRECOMP,
1909 			.geniv = true,
1910 		}
1911 	},
1912 	{
1913 		.aead = {
1914 			.base = {
1915 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1916 				.cra_driver_name = "authenc-hmac-md5-"
1917 						   "cbc-des3_ede-caam-qi2",
1918 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1919 			},
1920 			.setkey = aead_setkey,
1921 			.setauthsize = aead_setauthsize,
1922 			.encrypt = aead_encrypt,
1923 			.decrypt = aead_decrypt,
1924 			.ivsize = DES3_EDE_BLOCK_SIZE,
1925 			.maxauthsize = MD5_DIGEST_SIZE,
1926 		},
1927 		.caam = {
1928 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1929 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1930 					   OP_ALG_AAI_HMAC_PRECOMP,
1931 		}
1932 	},
1933 	{
1934 		.aead = {
1935 			.base = {
1936 				.cra_name = "echainiv(authenc(hmac(md5),"
1937 					    "cbc(des3_ede)))",
1938 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1939 						   "cbc-des3_ede-caam-qi2",
1940 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1941 			},
1942 			.setkey = aead_setkey,
1943 			.setauthsize = aead_setauthsize,
1944 			.encrypt = aead_encrypt,
1945 			.decrypt = aead_decrypt,
1946 			.ivsize = DES3_EDE_BLOCK_SIZE,
1947 			.maxauthsize = MD5_DIGEST_SIZE,
1948 		},
1949 		.caam = {
1950 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1951 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1952 					   OP_ALG_AAI_HMAC_PRECOMP,
1953 			.geniv = true,
1954 		}
1955 	},
1956 	{
1957 		.aead = {
1958 			.base = {
1959 				.cra_name = "authenc(hmac(sha1),"
1960 					    "cbc(des3_ede))",
1961 				.cra_driver_name = "authenc-hmac-sha1-"
1962 						   "cbc-des3_ede-caam-qi2",
1963 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1964 			},
1965 			.setkey = aead_setkey,
1966 			.setauthsize = aead_setauthsize,
1967 			.encrypt = aead_encrypt,
1968 			.decrypt = aead_decrypt,
1969 			.ivsize = DES3_EDE_BLOCK_SIZE,
1970 			.maxauthsize = SHA1_DIGEST_SIZE,
1971 		},
1972 		.caam = {
1973 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1974 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1975 					   OP_ALG_AAI_HMAC_PRECOMP,
1976 		},
1977 	},
1978 	{
1979 		.aead = {
1980 			.base = {
1981 				.cra_name = "echainiv(authenc(hmac(sha1),"
1982 					    "cbc(des3_ede)))",
1983 				.cra_driver_name = "echainiv-authenc-"
1984 						   "hmac-sha1-"
1985 						   "cbc-des3_ede-caam-qi2",
1986 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1987 			},
1988 			.setkey = aead_setkey,
1989 			.setauthsize = aead_setauthsize,
1990 			.encrypt = aead_encrypt,
1991 			.decrypt = aead_decrypt,
1992 			.ivsize = DES3_EDE_BLOCK_SIZE,
1993 			.maxauthsize = SHA1_DIGEST_SIZE,
1994 		},
1995 		.caam = {
1996 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1997 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1998 					   OP_ALG_AAI_HMAC_PRECOMP,
1999 			.geniv = true,
2000 		}
2001 	},
2002 	{
2003 		.aead = {
2004 			.base = {
2005 				.cra_name = "authenc(hmac(sha224),"
2006 					    "cbc(des3_ede))",
2007 				.cra_driver_name = "authenc-hmac-sha224-"
2008 						   "cbc-des3_ede-caam-qi2",
2009 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2010 			},
2011 			.setkey = aead_setkey,
2012 			.setauthsize = aead_setauthsize,
2013 			.encrypt = aead_encrypt,
2014 			.decrypt = aead_decrypt,
2015 			.ivsize = DES3_EDE_BLOCK_SIZE,
2016 			.maxauthsize = SHA224_DIGEST_SIZE,
2017 		},
2018 		.caam = {
2019 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2020 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2021 					   OP_ALG_AAI_HMAC_PRECOMP,
2022 		},
2023 	},
2024 	{
2025 		.aead = {
2026 			.base = {
2027 				.cra_name = "echainiv(authenc(hmac(sha224),"
2028 					    "cbc(des3_ede)))",
2029 				.cra_driver_name = "echainiv-authenc-"
2030 						   "hmac-sha224-"
2031 						   "cbc-des3_ede-caam-qi2",
2032 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2033 			},
2034 			.setkey = aead_setkey,
2035 			.setauthsize = aead_setauthsize,
2036 			.encrypt = aead_encrypt,
2037 			.decrypt = aead_decrypt,
2038 			.ivsize = DES3_EDE_BLOCK_SIZE,
2039 			.maxauthsize = SHA224_DIGEST_SIZE,
2040 		},
2041 		.caam = {
2042 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2043 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2044 					   OP_ALG_AAI_HMAC_PRECOMP,
2045 			.geniv = true,
2046 		}
2047 	},
2048 	{
2049 		.aead = {
2050 			.base = {
2051 				.cra_name = "authenc(hmac(sha256),"
2052 					    "cbc(des3_ede))",
2053 				.cra_driver_name = "authenc-hmac-sha256-"
2054 						   "cbc-des3_ede-caam-qi2",
2055 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2056 			},
2057 			.setkey = aead_setkey,
2058 			.setauthsize = aead_setauthsize,
2059 			.encrypt = aead_encrypt,
2060 			.decrypt = aead_decrypt,
2061 			.ivsize = DES3_EDE_BLOCK_SIZE,
2062 			.maxauthsize = SHA256_DIGEST_SIZE,
2063 		},
2064 		.caam = {
2065 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2066 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2067 					   OP_ALG_AAI_HMAC_PRECOMP,
2068 		},
2069 	},
2070 	{
2071 		.aead = {
2072 			.base = {
2073 				.cra_name = "echainiv(authenc(hmac(sha256),"
2074 					    "cbc(des3_ede)))",
2075 				.cra_driver_name = "echainiv-authenc-"
2076 						   "hmac-sha256-"
2077 						   "cbc-des3_ede-caam-qi2",
2078 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2079 			},
2080 			.setkey = aead_setkey,
2081 			.setauthsize = aead_setauthsize,
2082 			.encrypt = aead_encrypt,
2083 			.decrypt = aead_decrypt,
2084 			.ivsize = DES3_EDE_BLOCK_SIZE,
2085 			.maxauthsize = SHA256_DIGEST_SIZE,
2086 		},
2087 		.caam = {
2088 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2089 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2090 					   OP_ALG_AAI_HMAC_PRECOMP,
2091 			.geniv = true,
2092 		}
2093 	},
2094 	{
2095 		.aead = {
2096 			.base = {
2097 				.cra_name = "authenc(hmac(sha384),"
2098 					    "cbc(des3_ede))",
2099 				.cra_driver_name = "authenc-hmac-sha384-"
2100 						   "cbc-des3_ede-caam-qi2",
2101 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2102 			},
2103 			.setkey = aead_setkey,
2104 			.setauthsize = aead_setauthsize,
2105 			.encrypt = aead_encrypt,
2106 			.decrypt = aead_decrypt,
2107 			.ivsize = DES3_EDE_BLOCK_SIZE,
2108 			.maxauthsize = SHA384_DIGEST_SIZE,
2109 		},
2110 		.caam = {
2111 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2112 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2113 					   OP_ALG_AAI_HMAC_PRECOMP,
2114 		},
2115 	},
2116 	{
2117 		.aead = {
2118 			.base = {
2119 				.cra_name = "echainiv(authenc(hmac(sha384),"
2120 					    "cbc(des3_ede)))",
2121 				.cra_driver_name = "echainiv-authenc-"
2122 						   "hmac-sha384-"
2123 						   "cbc-des3_ede-caam-qi2",
2124 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2125 			},
2126 			.setkey = aead_setkey,
2127 			.setauthsize = aead_setauthsize,
2128 			.encrypt = aead_encrypt,
2129 			.decrypt = aead_decrypt,
2130 			.ivsize = DES3_EDE_BLOCK_SIZE,
2131 			.maxauthsize = SHA384_DIGEST_SIZE,
2132 		},
2133 		.caam = {
2134 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2135 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2136 					   OP_ALG_AAI_HMAC_PRECOMP,
2137 			.geniv = true,
2138 		}
2139 	},
2140 	{
2141 		.aead = {
2142 			.base = {
2143 				.cra_name = "authenc(hmac(sha512),"
2144 					    "cbc(des3_ede))",
2145 				.cra_driver_name = "authenc-hmac-sha512-"
2146 						   "cbc-des3_ede-caam-qi2",
2147 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2148 			},
2149 			.setkey = aead_setkey,
2150 			.setauthsize = aead_setauthsize,
2151 			.encrypt = aead_encrypt,
2152 			.decrypt = aead_decrypt,
2153 			.ivsize = DES3_EDE_BLOCK_SIZE,
2154 			.maxauthsize = SHA512_DIGEST_SIZE,
2155 		},
2156 		.caam = {
2157 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2158 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2159 					   OP_ALG_AAI_HMAC_PRECOMP,
2160 		},
2161 	},
2162 	{
2163 		.aead = {
2164 			.base = {
2165 				.cra_name = "echainiv(authenc(hmac(sha512),"
2166 					    "cbc(des3_ede)))",
2167 				.cra_driver_name = "echainiv-authenc-"
2168 						   "hmac-sha512-"
2169 						   "cbc-des3_ede-caam-qi2",
2170 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2171 			},
2172 			.setkey = aead_setkey,
2173 			.setauthsize = aead_setauthsize,
2174 			.encrypt = aead_encrypt,
2175 			.decrypt = aead_decrypt,
2176 			.ivsize = DES3_EDE_BLOCK_SIZE,
2177 			.maxauthsize = SHA512_DIGEST_SIZE,
2178 		},
2179 		.caam = {
2180 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2181 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2182 					   OP_ALG_AAI_HMAC_PRECOMP,
2183 			.geniv = true,
2184 		}
2185 	},
2186 	{
2187 		.aead = {
2188 			.base = {
2189 				.cra_name = "authenc(hmac(md5),cbc(des))",
2190 				.cra_driver_name = "authenc-hmac-md5-"
2191 						   "cbc-des-caam-qi2",
2192 				.cra_blocksize = DES_BLOCK_SIZE,
2193 			},
2194 			.setkey = aead_setkey,
2195 			.setauthsize = aead_setauthsize,
2196 			.encrypt = aead_encrypt,
2197 			.decrypt = aead_decrypt,
2198 			.ivsize = DES_BLOCK_SIZE,
2199 			.maxauthsize = MD5_DIGEST_SIZE,
2200 		},
2201 		.caam = {
2202 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2203 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2204 					   OP_ALG_AAI_HMAC_PRECOMP,
2205 		},
2206 	},
2207 	{
2208 		.aead = {
2209 			.base = {
2210 				.cra_name = "echainiv(authenc(hmac(md5),"
2211 					    "cbc(des)))",
2212 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2213 						   "cbc-des-caam-qi2",
2214 				.cra_blocksize = DES_BLOCK_SIZE,
2215 			},
2216 			.setkey = aead_setkey,
2217 			.setauthsize = aead_setauthsize,
2218 			.encrypt = aead_encrypt,
2219 			.decrypt = aead_decrypt,
2220 			.ivsize = DES_BLOCK_SIZE,
2221 			.maxauthsize = MD5_DIGEST_SIZE,
2222 		},
2223 		.caam = {
2224 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2225 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2226 					   OP_ALG_AAI_HMAC_PRECOMP,
2227 			.geniv = true,
2228 		}
2229 	},
2230 	{
2231 		.aead = {
2232 			.base = {
2233 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2234 				.cra_driver_name = "authenc-hmac-sha1-"
2235 						   "cbc-des-caam-qi2",
2236 				.cra_blocksize = DES_BLOCK_SIZE,
2237 			},
2238 			.setkey = aead_setkey,
2239 			.setauthsize = aead_setauthsize,
2240 			.encrypt = aead_encrypt,
2241 			.decrypt = aead_decrypt,
2242 			.ivsize = DES_BLOCK_SIZE,
2243 			.maxauthsize = SHA1_DIGEST_SIZE,
2244 		},
2245 		.caam = {
2246 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2247 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2248 					   OP_ALG_AAI_HMAC_PRECOMP,
2249 		},
2250 	},
2251 	{
2252 		.aead = {
2253 			.base = {
2254 				.cra_name = "echainiv(authenc(hmac(sha1),"
2255 					    "cbc(des)))",
2256 				.cra_driver_name = "echainiv-authenc-"
2257 						   "hmac-sha1-cbc-des-caam-qi2",
2258 				.cra_blocksize = DES_BLOCK_SIZE,
2259 			},
2260 			.setkey = aead_setkey,
2261 			.setauthsize = aead_setauthsize,
2262 			.encrypt = aead_encrypt,
2263 			.decrypt = aead_decrypt,
2264 			.ivsize = DES_BLOCK_SIZE,
2265 			.maxauthsize = SHA1_DIGEST_SIZE,
2266 		},
2267 		.caam = {
2268 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2269 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2270 					   OP_ALG_AAI_HMAC_PRECOMP,
2271 			.geniv = true,
2272 		}
2273 	},
2274 	{
2275 		.aead = {
2276 			.base = {
2277 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2278 				.cra_driver_name = "authenc-hmac-sha224-"
2279 						   "cbc-des-caam-qi2",
2280 				.cra_blocksize = DES_BLOCK_SIZE,
2281 			},
2282 			.setkey = aead_setkey,
2283 			.setauthsize = aead_setauthsize,
2284 			.encrypt = aead_encrypt,
2285 			.decrypt = aead_decrypt,
2286 			.ivsize = DES_BLOCK_SIZE,
2287 			.maxauthsize = SHA224_DIGEST_SIZE,
2288 		},
2289 		.caam = {
2290 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2291 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2292 					   OP_ALG_AAI_HMAC_PRECOMP,
2293 		},
2294 	},
2295 	{
2296 		.aead = {
2297 			.base = {
2298 				.cra_name = "echainiv(authenc(hmac(sha224),"
2299 					    "cbc(des)))",
2300 				.cra_driver_name = "echainiv-authenc-"
2301 						   "hmac-sha224-cbc-des-"
2302 						   "caam-qi2",
2303 				.cra_blocksize = DES_BLOCK_SIZE,
2304 			},
2305 			.setkey = aead_setkey,
2306 			.setauthsize = aead_setauthsize,
2307 			.encrypt = aead_encrypt,
2308 			.decrypt = aead_decrypt,
2309 			.ivsize = DES_BLOCK_SIZE,
2310 			.maxauthsize = SHA224_DIGEST_SIZE,
2311 		},
2312 		.caam = {
2313 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2314 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2315 					   OP_ALG_AAI_HMAC_PRECOMP,
2316 			.geniv = true,
2317 		}
2318 	},
2319 	{
2320 		.aead = {
2321 			.base = {
2322 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2323 				.cra_driver_name = "authenc-hmac-sha256-"
2324 						   "cbc-des-caam-qi2",
2325 				.cra_blocksize = DES_BLOCK_SIZE,
2326 			},
2327 			.setkey = aead_setkey,
2328 			.setauthsize = aead_setauthsize,
2329 			.encrypt = aead_encrypt,
2330 			.decrypt = aead_decrypt,
2331 			.ivsize = DES_BLOCK_SIZE,
2332 			.maxauthsize = SHA256_DIGEST_SIZE,
2333 		},
2334 		.caam = {
2335 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2336 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2337 					   OP_ALG_AAI_HMAC_PRECOMP,
2338 		},
2339 	},
2340 	{
2341 		.aead = {
2342 			.base = {
2343 				.cra_name = "echainiv(authenc(hmac(sha256),"
2344 					    "cbc(des)))",
2345 				.cra_driver_name = "echainiv-authenc-"
2346 						   "hmac-sha256-cbc-desi-"
2347 						   "caam-qi2",
2348 				.cra_blocksize = DES_BLOCK_SIZE,
2349 			},
2350 			.setkey = aead_setkey,
2351 			.setauthsize = aead_setauthsize,
2352 			.encrypt = aead_encrypt,
2353 			.decrypt = aead_decrypt,
2354 			.ivsize = DES_BLOCK_SIZE,
2355 			.maxauthsize = SHA256_DIGEST_SIZE,
2356 		},
2357 		.caam = {
2358 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2359 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2360 					   OP_ALG_AAI_HMAC_PRECOMP,
2361 			.geniv = true,
2362 		},
2363 	},
2364 	{
2365 		.aead = {
2366 			.base = {
2367 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2368 				.cra_driver_name = "authenc-hmac-sha384-"
2369 						   "cbc-des-caam-qi2",
2370 				.cra_blocksize = DES_BLOCK_SIZE,
2371 			},
2372 			.setkey = aead_setkey,
2373 			.setauthsize = aead_setauthsize,
2374 			.encrypt = aead_encrypt,
2375 			.decrypt = aead_decrypt,
2376 			.ivsize = DES_BLOCK_SIZE,
2377 			.maxauthsize = SHA384_DIGEST_SIZE,
2378 		},
2379 		.caam = {
2380 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2381 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2382 					   OP_ALG_AAI_HMAC_PRECOMP,
2383 		},
2384 	},
2385 	{
2386 		.aead = {
2387 			.base = {
2388 				.cra_name = "echainiv(authenc(hmac(sha384),"
2389 					    "cbc(des)))",
2390 				.cra_driver_name = "echainiv-authenc-"
2391 						   "hmac-sha384-cbc-des-"
2392 						   "caam-qi2",
2393 				.cra_blocksize = DES_BLOCK_SIZE,
2394 			},
2395 			.setkey = aead_setkey,
2396 			.setauthsize = aead_setauthsize,
2397 			.encrypt = aead_encrypt,
2398 			.decrypt = aead_decrypt,
2399 			.ivsize = DES_BLOCK_SIZE,
2400 			.maxauthsize = SHA384_DIGEST_SIZE,
2401 		},
2402 		.caam = {
2403 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2404 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2405 					   OP_ALG_AAI_HMAC_PRECOMP,
2406 			.geniv = true,
2407 		}
2408 	},
2409 	{
2410 		.aead = {
2411 			.base = {
2412 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2413 				.cra_driver_name = "authenc-hmac-sha512-"
2414 						   "cbc-des-caam-qi2",
2415 				.cra_blocksize = DES_BLOCK_SIZE,
2416 			},
2417 			.setkey = aead_setkey,
2418 			.setauthsize = aead_setauthsize,
2419 			.encrypt = aead_encrypt,
2420 			.decrypt = aead_decrypt,
2421 			.ivsize = DES_BLOCK_SIZE,
2422 			.maxauthsize = SHA512_DIGEST_SIZE,
2423 		},
2424 		.caam = {
2425 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2426 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2427 					   OP_ALG_AAI_HMAC_PRECOMP,
2428 		}
2429 	},
2430 	{
2431 		.aead = {
2432 			.base = {
2433 				.cra_name = "echainiv(authenc(hmac(sha512),"
2434 					    "cbc(des)))",
2435 				.cra_driver_name = "echainiv-authenc-"
2436 						   "hmac-sha512-cbc-des-"
2437 						   "caam-qi2",
2438 				.cra_blocksize = DES_BLOCK_SIZE,
2439 			},
2440 			.setkey = aead_setkey,
2441 			.setauthsize = aead_setauthsize,
2442 			.encrypt = aead_encrypt,
2443 			.decrypt = aead_decrypt,
2444 			.ivsize = DES_BLOCK_SIZE,
2445 			.maxauthsize = SHA512_DIGEST_SIZE,
2446 		},
2447 		.caam = {
2448 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2449 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2450 					   OP_ALG_AAI_HMAC_PRECOMP,
2451 			.geniv = true,
2452 		}
2453 	},
2454 	{
2455 		.aead = {
2456 			.base = {
2457 				.cra_name = "authenc(hmac(md5),"
2458 					    "rfc3686(ctr(aes)))",
2459 				.cra_driver_name = "authenc-hmac-md5-"
2460 						   "rfc3686-ctr-aes-caam-qi2",
2461 				.cra_blocksize = 1,
2462 			},
2463 			.setkey = aead_setkey,
2464 			.setauthsize = aead_setauthsize,
2465 			.encrypt = aead_encrypt,
2466 			.decrypt = aead_decrypt,
2467 			.ivsize = CTR_RFC3686_IV_SIZE,
2468 			.maxauthsize = MD5_DIGEST_SIZE,
2469 		},
2470 		.caam = {
2471 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2472 					   OP_ALG_AAI_CTR_MOD128,
2473 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2474 					   OP_ALG_AAI_HMAC_PRECOMP,
2475 			.rfc3686 = true,
2476 		},
2477 	},
2478 	{
2479 		.aead = {
2480 			.base = {
2481 				.cra_name = "seqiv(authenc("
2482 					    "hmac(md5),rfc3686(ctr(aes))))",
2483 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
2484 						   "rfc3686-ctr-aes-caam-qi2",
2485 				.cra_blocksize = 1,
2486 			},
2487 			.setkey = aead_setkey,
2488 			.setauthsize = aead_setauthsize,
2489 			.encrypt = aead_encrypt,
2490 			.decrypt = aead_decrypt,
2491 			.ivsize = CTR_RFC3686_IV_SIZE,
2492 			.maxauthsize = MD5_DIGEST_SIZE,
2493 		},
2494 		.caam = {
2495 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2496 					   OP_ALG_AAI_CTR_MOD128,
2497 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2498 					   OP_ALG_AAI_HMAC_PRECOMP,
2499 			.rfc3686 = true,
2500 			.geniv = true,
2501 		},
2502 	},
2503 	{
2504 		.aead = {
2505 			.base = {
2506 				.cra_name = "authenc(hmac(sha1),"
2507 					    "rfc3686(ctr(aes)))",
2508 				.cra_driver_name = "authenc-hmac-sha1-"
2509 						   "rfc3686-ctr-aes-caam-qi2",
2510 				.cra_blocksize = 1,
2511 			},
2512 			.setkey = aead_setkey,
2513 			.setauthsize = aead_setauthsize,
2514 			.encrypt = aead_encrypt,
2515 			.decrypt = aead_decrypt,
2516 			.ivsize = CTR_RFC3686_IV_SIZE,
2517 			.maxauthsize = SHA1_DIGEST_SIZE,
2518 		},
2519 		.caam = {
2520 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2521 					   OP_ALG_AAI_CTR_MOD128,
2522 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2523 					   OP_ALG_AAI_HMAC_PRECOMP,
2524 			.rfc3686 = true,
2525 		},
2526 	},
2527 	{
2528 		.aead = {
2529 			.base = {
2530 				.cra_name = "seqiv(authenc("
2531 					    "hmac(sha1),rfc3686(ctr(aes))))",
2532 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
2533 						   "rfc3686-ctr-aes-caam-qi2",
2534 				.cra_blocksize = 1,
2535 			},
2536 			.setkey = aead_setkey,
2537 			.setauthsize = aead_setauthsize,
2538 			.encrypt = aead_encrypt,
2539 			.decrypt = aead_decrypt,
2540 			.ivsize = CTR_RFC3686_IV_SIZE,
2541 			.maxauthsize = SHA1_DIGEST_SIZE,
2542 		},
2543 		.caam = {
2544 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2545 					   OP_ALG_AAI_CTR_MOD128,
2546 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2547 					   OP_ALG_AAI_HMAC_PRECOMP,
2548 			.rfc3686 = true,
2549 			.geniv = true,
2550 		},
2551 	},
2552 	{
2553 		.aead = {
2554 			.base = {
2555 				.cra_name = "authenc(hmac(sha224),"
2556 					    "rfc3686(ctr(aes)))",
2557 				.cra_driver_name = "authenc-hmac-sha224-"
2558 						   "rfc3686-ctr-aes-caam-qi2",
2559 				.cra_blocksize = 1,
2560 			},
2561 			.setkey = aead_setkey,
2562 			.setauthsize = aead_setauthsize,
2563 			.encrypt = aead_encrypt,
2564 			.decrypt = aead_decrypt,
2565 			.ivsize = CTR_RFC3686_IV_SIZE,
2566 			.maxauthsize = SHA224_DIGEST_SIZE,
2567 		},
2568 		.caam = {
2569 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2570 					   OP_ALG_AAI_CTR_MOD128,
2571 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2572 					   OP_ALG_AAI_HMAC_PRECOMP,
2573 			.rfc3686 = true,
2574 		},
2575 	},
2576 	{
2577 		.aead = {
2578 			.base = {
2579 				.cra_name = "seqiv(authenc("
2580 					    "hmac(sha224),rfc3686(ctr(aes))))",
2581 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
2582 						   "rfc3686-ctr-aes-caam-qi2",
2583 				.cra_blocksize = 1,
2584 			},
2585 			.setkey = aead_setkey,
2586 			.setauthsize = aead_setauthsize,
2587 			.encrypt = aead_encrypt,
2588 			.decrypt = aead_decrypt,
2589 			.ivsize = CTR_RFC3686_IV_SIZE,
2590 			.maxauthsize = SHA224_DIGEST_SIZE,
2591 		},
2592 		.caam = {
2593 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2594 					   OP_ALG_AAI_CTR_MOD128,
2595 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2596 					   OP_ALG_AAI_HMAC_PRECOMP,
2597 			.rfc3686 = true,
2598 			.geniv = true,
2599 		},
2600 	},
2601 	{
2602 		.aead = {
2603 			.base = {
2604 				.cra_name = "authenc(hmac(sha256),"
2605 					    "rfc3686(ctr(aes)))",
2606 				.cra_driver_name = "authenc-hmac-sha256-"
2607 						   "rfc3686-ctr-aes-caam-qi2",
2608 				.cra_blocksize = 1,
2609 			},
2610 			.setkey = aead_setkey,
2611 			.setauthsize = aead_setauthsize,
2612 			.encrypt = aead_encrypt,
2613 			.decrypt = aead_decrypt,
2614 			.ivsize = CTR_RFC3686_IV_SIZE,
2615 			.maxauthsize = SHA256_DIGEST_SIZE,
2616 		},
2617 		.caam = {
2618 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2619 					   OP_ALG_AAI_CTR_MOD128,
2620 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2621 					   OP_ALG_AAI_HMAC_PRECOMP,
2622 			.rfc3686 = true,
2623 		},
2624 	},
2625 	{
2626 		.aead = {
2627 			.base = {
2628 				.cra_name = "seqiv(authenc(hmac(sha256),"
2629 					    "rfc3686(ctr(aes))))",
2630 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
2631 						   "rfc3686-ctr-aes-caam-qi2",
2632 				.cra_blocksize = 1,
2633 			},
2634 			.setkey = aead_setkey,
2635 			.setauthsize = aead_setauthsize,
2636 			.encrypt = aead_encrypt,
2637 			.decrypt = aead_decrypt,
2638 			.ivsize = CTR_RFC3686_IV_SIZE,
2639 			.maxauthsize = SHA256_DIGEST_SIZE,
2640 		},
2641 		.caam = {
2642 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2643 					   OP_ALG_AAI_CTR_MOD128,
2644 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2645 					   OP_ALG_AAI_HMAC_PRECOMP,
2646 			.rfc3686 = true,
2647 			.geniv = true,
2648 		},
2649 	},
2650 	{
2651 		.aead = {
2652 			.base = {
2653 				.cra_name = "authenc(hmac(sha384),"
2654 					    "rfc3686(ctr(aes)))",
2655 				.cra_driver_name = "authenc-hmac-sha384-"
2656 						   "rfc3686-ctr-aes-caam-qi2",
2657 				.cra_blocksize = 1,
2658 			},
2659 			.setkey = aead_setkey,
2660 			.setauthsize = aead_setauthsize,
2661 			.encrypt = aead_encrypt,
2662 			.decrypt = aead_decrypt,
2663 			.ivsize = CTR_RFC3686_IV_SIZE,
2664 			.maxauthsize = SHA384_DIGEST_SIZE,
2665 		},
2666 		.caam = {
2667 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2668 					   OP_ALG_AAI_CTR_MOD128,
2669 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2670 					   OP_ALG_AAI_HMAC_PRECOMP,
2671 			.rfc3686 = true,
2672 		},
2673 	},
2674 	{
2675 		.aead = {
2676 			.base = {
2677 				.cra_name = "seqiv(authenc(hmac(sha384),"
2678 					    "rfc3686(ctr(aes))))",
2679 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
2680 						   "rfc3686-ctr-aes-caam-qi2",
2681 				.cra_blocksize = 1,
2682 			},
2683 			.setkey = aead_setkey,
2684 			.setauthsize = aead_setauthsize,
2685 			.encrypt = aead_encrypt,
2686 			.decrypt = aead_decrypt,
2687 			.ivsize = CTR_RFC3686_IV_SIZE,
2688 			.maxauthsize = SHA384_DIGEST_SIZE,
2689 		},
2690 		.caam = {
2691 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2692 					   OP_ALG_AAI_CTR_MOD128,
2693 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2694 					   OP_ALG_AAI_HMAC_PRECOMP,
2695 			.rfc3686 = true,
2696 			.geniv = true,
2697 		},
2698 	},
2699 	{
2700 		.aead = {
2701 			.base = {
2702 				.cra_name = "rfc7539(chacha20,poly1305)",
2703 				.cra_driver_name = "rfc7539-chacha20-poly1305-"
2704 						   "caam-qi2",
2705 				.cra_blocksize = 1,
2706 			},
2707 			.setkey = chachapoly_setkey,
2708 			.setauthsize = chachapoly_setauthsize,
2709 			.encrypt = aead_encrypt,
2710 			.decrypt = aead_decrypt,
2711 			.ivsize = CHACHAPOLY_IV_SIZE,
2712 			.maxauthsize = POLY1305_DIGEST_SIZE,
2713 		},
2714 		.caam = {
2715 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2716 					   OP_ALG_AAI_AEAD,
2717 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2718 					   OP_ALG_AAI_AEAD,
2719 		},
2720 	},
2721 	{
2722 		.aead = {
2723 			.base = {
2724 				.cra_name = "rfc7539esp(chacha20,poly1305)",
2725 				.cra_driver_name = "rfc7539esp-chacha20-"
2726 						   "poly1305-caam-qi2",
2727 				.cra_blocksize = 1,
2728 			},
2729 			.setkey = chachapoly_setkey,
2730 			.setauthsize = chachapoly_setauthsize,
2731 			.encrypt = aead_encrypt,
2732 			.decrypt = aead_decrypt,
2733 			.ivsize = 8,
2734 			.maxauthsize = POLY1305_DIGEST_SIZE,
2735 		},
2736 		.caam = {
2737 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2738 					   OP_ALG_AAI_AEAD,
2739 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2740 					   OP_ALG_AAI_AEAD,
2741 		},
2742 	},
2743 	{
2744 		.aead = {
2745 			.base = {
2746 				.cra_name = "authenc(hmac(sha512),"
2747 					    "rfc3686(ctr(aes)))",
2748 				.cra_driver_name = "authenc-hmac-sha512-"
2749 						   "rfc3686-ctr-aes-caam-qi2",
2750 				.cra_blocksize = 1,
2751 			},
2752 			.setkey = aead_setkey,
2753 			.setauthsize = aead_setauthsize,
2754 			.encrypt = aead_encrypt,
2755 			.decrypt = aead_decrypt,
2756 			.ivsize = CTR_RFC3686_IV_SIZE,
2757 			.maxauthsize = SHA512_DIGEST_SIZE,
2758 		},
2759 		.caam = {
2760 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2761 					   OP_ALG_AAI_CTR_MOD128,
2762 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2763 					   OP_ALG_AAI_HMAC_PRECOMP,
2764 			.rfc3686 = true,
2765 		},
2766 	},
2767 	{
2768 		.aead = {
2769 			.base = {
2770 				.cra_name = "seqiv(authenc(hmac(sha512),"
2771 					    "rfc3686(ctr(aes))))",
2772 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
2773 						   "rfc3686-ctr-aes-caam-qi2",
2774 				.cra_blocksize = 1,
2775 			},
2776 			.setkey = aead_setkey,
2777 			.setauthsize = aead_setauthsize,
2778 			.encrypt = aead_encrypt,
2779 			.decrypt = aead_decrypt,
2780 			.ivsize = CTR_RFC3686_IV_SIZE,
2781 			.maxauthsize = SHA512_DIGEST_SIZE,
2782 		},
2783 		.caam = {
2784 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2785 					   OP_ALG_AAI_CTR_MOD128,
2786 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2787 					   OP_ALG_AAI_HMAC_PRECOMP,
2788 			.rfc3686 = true,
2789 			.geniv = true,
2790 		},
2791 	},
2792 };
2793 
2794 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2795 {
2796 	struct skcipher_alg *alg = &t_alg->skcipher;
2797 
2798 	alg->base.cra_module = THIS_MODULE;
2799 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2800 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2801 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2802 
2803 	alg->init = caam_cra_init_skcipher;
2804 	alg->exit = caam_cra_exit;
2805 }
2806 
2807 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2808 {
2809 	struct aead_alg *alg = &t_alg->aead;
2810 
2811 	alg->base.cra_module = THIS_MODULE;
2812 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2813 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2814 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2815 
2816 	alg->init = caam_cra_init_aead;
2817 	alg->exit = caam_cra_exit_aead;
2818 }
2819 
2820 /* max hash key is max split key size */
2821 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
2822 
2823 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
2824 
2825 /* caam context sizes for hashes: running digest + 8 */
2826 #define HASH_MSG_LEN			8
2827 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2828 
2829 enum hash_optype {
2830 	UPDATE = 0,
2831 	UPDATE_FIRST,
2832 	FINALIZE,
2833 	DIGEST,
2834 	HASH_NUM_OP
2835 };
2836 
2837 /**
2838  * caam_hash_ctx - ahash per-session context
2839  * @flc: Flow Contexts array
2840  * @flc_dma: I/O virtual addresses of the Flow Contexts
2841  * @dev: dpseci device
2842  * @ctx_len: size of Context Register
2843  * @adata: hashing algorithm details
2844  */
2845 struct caam_hash_ctx {
2846 	struct caam_flc flc[HASH_NUM_OP];
2847 	dma_addr_t flc_dma[HASH_NUM_OP];
2848 	struct device *dev;
2849 	int ctx_len;
2850 	struct alginfo adata;
2851 };
2852 
2853 /* ahash state */
2854 struct caam_hash_state {
2855 	struct caam_request caam_req;
2856 	dma_addr_t buf_dma;
2857 	dma_addr_t ctx_dma;
2858 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2859 	int buflen_0;
2860 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2861 	int buflen_1;
2862 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2863 	int (*update)(struct ahash_request *req);
2864 	int (*final)(struct ahash_request *req);
2865 	int (*finup)(struct ahash_request *req);
2866 	int current_buf;
2867 };
2868 
2869 struct caam_export_state {
2870 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2871 	u8 caam_ctx[MAX_CTX_LEN];
2872 	int buflen;
2873 	int (*update)(struct ahash_request *req);
2874 	int (*final)(struct ahash_request *req);
2875 	int (*finup)(struct ahash_request *req);
2876 };
2877 
2878 static inline void switch_buf(struct caam_hash_state *state)
2879 {
2880 	state->current_buf ^= 1;
2881 }
2882 
2883 static inline u8 *current_buf(struct caam_hash_state *state)
2884 {
2885 	return state->current_buf ? state->buf_1 : state->buf_0;
2886 }
2887 
2888 static inline u8 *alt_buf(struct caam_hash_state *state)
2889 {
2890 	return state->current_buf ? state->buf_0 : state->buf_1;
2891 }
2892 
2893 static inline int *current_buflen(struct caam_hash_state *state)
2894 {
2895 	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
2896 }
2897 
2898 static inline int *alt_buflen(struct caam_hash_state *state)
2899 {
2900 	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
2901 }
2902 
2903 /* Map current buffer in state (if length > 0) and put it in link table */
2904 static inline int buf_map_to_qm_sg(struct device *dev,
2905 				   struct dpaa2_sg_entry *qm_sg,
2906 				   struct caam_hash_state *state)
2907 {
2908 	int buflen = *current_buflen(state);
2909 
2910 	if (!buflen)
2911 		return 0;
2912 
2913 	state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
2914 					DMA_TO_DEVICE);
2915 	if (dma_mapping_error(dev, state->buf_dma)) {
2916 		dev_err(dev, "unable to map buf\n");
2917 		state->buf_dma = 0;
2918 		return -ENOMEM;
2919 	}
2920 
2921 	dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
2922 
2923 	return 0;
2924 }
2925 
2926 /* Map state->caam_ctx, and add it to link table */
2927 static inline int ctx_map_to_qm_sg(struct device *dev,
2928 				   struct caam_hash_state *state, int ctx_len,
2929 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
2930 {
2931 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
2932 	if (dma_mapping_error(dev, state->ctx_dma)) {
2933 		dev_err(dev, "unable to map ctx\n");
2934 		state->ctx_dma = 0;
2935 		return -ENOMEM;
2936 	}
2937 
2938 	dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
2939 
2940 	return 0;
2941 }
2942 
2943 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
2944 {
2945 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
2946 	int digestsize = crypto_ahash_digestsize(ahash);
2947 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
2948 	struct caam_flc *flc;
2949 	u32 *desc;
2950 
2951 	/* ahash_update shared descriptor */
2952 	flc = &ctx->flc[UPDATE];
2953 	desc = flc->sh_desc;
2954 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
2955 			  ctx->ctx_len, true, priv->sec_attr.era);
2956 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2957 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
2958 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
2959 	print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
2960 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2961 			     1);
2962 
2963 	/* ahash_update_first shared descriptor */
2964 	flc = &ctx->flc[UPDATE_FIRST];
2965 	desc = flc->sh_desc;
2966 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
2967 			  ctx->ctx_len, false, priv->sec_attr.era);
2968 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2969 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
2970 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
2971 	print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
2972 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2973 			     1);
2974 
2975 	/* ahash_final shared descriptor */
2976 	flc = &ctx->flc[FINALIZE];
2977 	desc = flc->sh_desc;
2978 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
2979 			  ctx->ctx_len, true, priv->sec_attr.era);
2980 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2981 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
2982 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
2983 	print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
2984 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2985 			     1);
2986 
2987 	/* ahash_digest shared descriptor */
2988 	flc = &ctx->flc[DIGEST];
2989 	desc = flc->sh_desc;
2990 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
2991 			  ctx->ctx_len, false, priv->sec_attr.era);
2992 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2993 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
2994 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
2995 	print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
2996 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2997 			     1);
2998 
2999 	return 0;
3000 }
3001 
3002 struct split_key_sh_result {
3003 	struct completion completion;
3004 	int err;
3005 	struct device *dev;
3006 };
3007 
3008 static void split_key_sh_done(void *cbk_ctx, u32 err)
3009 {
3010 	struct split_key_sh_result *res = cbk_ctx;
3011 
3012 	dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3013 
3014 	if (err)
3015 		caam_qi2_strstatus(res->dev, err);
3016 
3017 	res->err = err;
3018 	complete(&res->completion);
3019 }
3020 
3021 /* Digest hash size if it is too large */
3022 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
3023 			   u32 *keylen, u8 *key_out, u32 digestsize)
3024 {
3025 	struct caam_request *req_ctx;
3026 	u32 *desc;
3027 	struct split_key_sh_result result;
3028 	dma_addr_t src_dma, dst_dma;
3029 	struct caam_flc *flc;
3030 	dma_addr_t flc_dma;
3031 	int ret = -ENOMEM;
3032 	struct dpaa2_fl_entry *in_fle, *out_fle;
3033 
3034 	req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3035 	if (!req_ctx)
3036 		return -ENOMEM;
3037 
3038 	in_fle = &req_ctx->fd_flt[1];
3039 	out_fle = &req_ctx->fd_flt[0];
3040 
3041 	flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3042 	if (!flc)
3043 		goto err_flc;
3044 
3045 	src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
3046 				 DMA_TO_DEVICE);
3047 	if (dma_mapping_error(ctx->dev, src_dma)) {
3048 		dev_err(ctx->dev, "unable to map key input memory\n");
3049 		goto err_src_dma;
3050 	}
3051 	dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
3052 				 DMA_FROM_DEVICE);
3053 	if (dma_mapping_error(ctx->dev, dst_dma)) {
3054 		dev_err(ctx->dev, "unable to map key output memory\n");
3055 		goto err_dst_dma;
3056 	}
3057 
3058 	desc = flc->sh_desc;
3059 
3060 	init_sh_desc(desc, 0);
3061 
3062 	/* descriptor to perform unkeyed hash on key_in */
3063 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3064 			 OP_ALG_AS_INITFINAL);
3065 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3066 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3067 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3068 			 LDST_SRCDST_BYTE_CONTEXT);
3069 
3070 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3071 	flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3072 				 desc_bytes(desc), DMA_TO_DEVICE);
3073 	if (dma_mapping_error(ctx->dev, flc_dma)) {
3074 		dev_err(ctx->dev, "unable to map shared descriptor\n");
3075 		goto err_flc_dma;
3076 	}
3077 
3078 	dpaa2_fl_set_final(in_fle, true);
3079 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3080 	dpaa2_fl_set_addr(in_fle, src_dma);
3081 	dpaa2_fl_set_len(in_fle, *keylen);
3082 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3083 	dpaa2_fl_set_addr(out_fle, dst_dma);
3084 	dpaa2_fl_set_len(out_fle, digestsize);
3085 
3086 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3087 			     DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
3088 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3089 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3090 			     1);
3091 
3092 	result.err = 0;
3093 	init_completion(&result.completion);
3094 	result.dev = ctx->dev;
3095 
3096 	req_ctx->flc = flc;
3097 	req_ctx->flc_dma = flc_dma;
3098 	req_ctx->cbk = split_key_sh_done;
3099 	req_ctx->ctx = &result;
3100 
3101 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3102 	if (ret == -EINPROGRESS) {
3103 		/* in progress */
3104 		wait_for_completion(&result.completion);
3105 		ret = result.err;
3106 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3107 				     DUMP_PREFIX_ADDRESS, 16, 4, key_in,
3108 				     digestsize, 1);
3109 	}
3110 
3111 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3112 			 DMA_TO_DEVICE);
3113 err_flc_dma:
3114 	dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
3115 err_dst_dma:
3116 	dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
3117 err_src_dma:
3118 	kfree(flc);
3119 err_flc:
3120 	kfree(req_ctx);
3121 
3122 	*keylen = digestsize;
3123 
3124 	return ret;
3125 }
3126 
3127 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3128 			unsigned int keylen)
3129 {
3130 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3131 	unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3132 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
3133 	int ret;
3134 	u8 *hashed_key = NULL;
3135 
3136 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3137 
3138 	if (keylen > blocksize) {
3139 		hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
3140 					   GFP_KERNEL | GFP_DMA);
3141 		if (!hashed_key)
3142 			return -ENOMEM;
3143 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
3144 				      digestsize);
3145 		if (ret)
3146 			goto bad_free_key;
3147 		key = hashed_key;
3148 	}
3149 
3150 	ctx->adata.keylen = keylen;
3151 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3152 					      OP_ALG_ALGSEL_MASK);
3153 	if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3154 		goto bad_free_key;
3155 
3156 	ctx->adata.key_virt = key;
3157 	ctx->adata.key_inline = true;
3158 
3159 	ret = ahash_set_sh_desc(ahash);
3160 	kfree(hashed_key);
3161 	return ret;
3162 bad_free_key:
3163 	kfree(hashed_key);
3164 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3165 	return -EINVAL;
3166 }
3167 
3168 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3169 			       struct ahash_request *req, int dst_len)
3170 {
3171 	struct caam_hash_state *state = ahash_request_ctx(req);
3172 
3173 	if (edesc->src_nents)
3174 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3175 	if (edesc->dst_dma)
3176 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
3177 
3178 	if (edesc->qm_sg_bytes)
3179 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3180 				 DMA_TO_DEVICE);
3181 
3182 	if (state->buf_dma) {
3183 		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3184 				 DMA_TO_DEVICE);
3185 		state->buf_dma = 0;
3186 	}
3187 }
3188 
3189 static inline void ahash_unmap_ctx(struct device *dev,
3190 				   struct ahash_edesc *edesc,
3191 				   struct ahash_request *req, int dst_len,
3192 				   u32 flag)
3193 {
3194 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3195 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3196 	struct caam_hash_state *state = ahash_request_ctx(req);
3197 
3198 	if (state->ctx_dma) {
3199 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
3200 		state->ctx_dma = 0;
3201 	}
3202 	ahash_unmap(dev, edesc, req, dst_len);
3203 }
3204 
3205 static void ahash_done(void *cbk_ctx, u32 status)
3206 {
3207 	struct crypto_async_request *areq = cbk_ctx;
3208 	struct ahash_request *req = ahash_request_cast(areq);
3209 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3210 	struct caam_hash_state *state = ahash_request_ctx(req);
3211 	struct ahash_edesc *edesc = state->caam_req.edesc;
3212 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3213 	int digestsize = crypto_ahash_digestsize(ahash);
3214 	int ecode = 0;
3215 
3216 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3217 
3218 	if (unlikely(status)) {
3219 		caam_qi2_strstatus(ctx->dev, status);
3220 		ecode = -EIO;
3221 	}
3222 
3223 	ahash_unmap(ctx->dev, edesc, req, digestsize);
3224 	qi_cache_free(edesc);
3225 
3226 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3227 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3228 			     ctx->ctx_len, 1);
3229 	if (req->result)
3230 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3231 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3232 				     digestsize, 1);
3233 
3234 	req->base.complete(&req->base, ecode);
3235 }
3236 
3237 static void ahash_done_bi(void *cbk_ctx, u32 status)
3238 {
3239 	struct crypto_async_request *areq = cbk_ctx;
3240 	struct ahash_request *req = ahash_request_cast(areq);
3241 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3242 	struct caam_hash_state *state = ahash_request_ctx(req);
3243 	struct ahash_edesc *edesc = state->caam_req.edesc;
3244 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3245 	int ecode = 0;
3246 
3247 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3248 
3249 	if (unlikely(status)) {
3250 		caam_qi2_strstatus(ctx->dev, status);
3251 		ecode = -EIO;
3252 	}
3253 
3254 	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
3255 	switch_buf(state);
3256 	qi_cache_free(edesc);
3257 
3258 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3259 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3260 			     ctx->ctx_len, 1);
3261 	if (req->result)
3262 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3263 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3264 				     crypto_ahash_digestsize(ahash), 1);
3265 
3266 	req->base.complete(&req->base, ecode);
3267 }
3268 
3269 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3270 {
3271 	struct crypto_async_request *areq = cbk_ctx;
3272 	struct ahash_request *req = ahash_request_cast(areq);
3273 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3274 	struct caam_hash_state *state = ahash_request_ctx(req);
3275 	struct ahash_edesc *edesc = state->caam_req.edesc;
3276 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3277 	int digestsize = crypto_ahash_digestsize(ahash);
3278 	int ecode = 0;
3279 
3280 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3281 
3282 	if (unlikely(status)) {
3283 		caam_qi2_strstatus(ctx->dev, status);
3284 		ecode = -EIO;
3285 	}
3286 
3287 	ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
3288 	qi_cache_free(edesc);
3289 
3290 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3291 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3292 			     ctx->ctx_len, 1);
3293 	if (req->result)
3294 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3295 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3296 				     digestsize, 1);
3297 
3298 	req->base.complete(&req->base, ecode);
3299 }
3300 
3301 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3302 {
3303 	struct crypto_async_request *areq = cbk_ctx;
3304 	struct ahash_request *req = ahash_request_cast(areq);
3305 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3306 	struct caam_hash_state *state = ahash_request_ctx(req);
3307 	struct ahash_edesc *edesc = state->caam_req.edesc;
3308 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3309 	int ecode = 0;
3310 
3311 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3312 
3313 	if (unlikely(status)) {
3314 		caam_qi2_strstatus(ctx->dev, status);
3315 		ecode = -EIO;
3316 	}
3317 
3318 	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
3319 	switch_buf(state);
3320 	qi_cache_free(edesc);
3321 
3322 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3323 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3324 			     ctx->ctx_len, 1);
3325 	if (req->result)
3326 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3327 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3328 				     crypto_ahash_digestsize(ahash), 1);
3329 
3330 	req->base.complete(&req->base, ecode);
3331 }
3332 
3333 static int ahash_update_ctx(struct ahash_request *req)
3334 {
3335 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3336 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3337 	struct caam_hash_state *state = ahash_request_ctx(req);
3338 	struct caam_request *req_ctx = &state->caam_req;
3339 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3340 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3341 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3342 		      GFP_KERNEL : GFP_ATOMIC;
3343 	u8 *buf = current_buf(state);
3344 	int *buflen = current_buflen(state);
3345 	u8 *next_buf = alt_buf(state);
3346 	int *next_buflen = alt_buflen(state), last_buflen;
3347 	int in_len = *buflen + req->nbytes, to_hash;
3348 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3349 	struct ahash_edesc *edesc;
3350 	int ret = 0;
3351 
3352 	last_buflen = *next_buflen;
3353 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3354 	to_hash = in_len - *next_buflen;
3355 
3356 	if (to_hash) {
3357 		struct dpaa2_sg_entry *sg_table;
3358 
3359 		src_nents = sg_nents_for_len(req->src,
3360 					     req->nbytes - (*next_buflen));
3361 		if (src_nents < 0) {
3362 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3363 			return src_nents;
3364 		}
3365 
3366 		if (src_nents) {
3367 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3368 						  DMA_TO_DEVICE);
3369 			if (!mapped_nents) {
3370 				dev_err(ctx->dev, "unable to DMA map source\n");
3371 				return -ENOMEM;
3372 			}
3373 		} else {
3374 			mapped_nents = 0;
3375 		}
3376 
3377 		/* allocate space for base edesc and link tables */
3378 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3379 		if (!edesc) {
3380 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3381 				     DMA_TO_DEVICE);
3382 			return -ENOMEM;
3383 		}
3384 
3385 		edesc->src_nents = src_nents;
3386 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3387 		qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
3388 			      sizeof(*sg_table);
3389 		sg_table = &edesc->sgt[0];
3390 
3391 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3392 				       DMA_BIDIRECTIONAL);
3393 		if (ret)
3394 			goto unmap_ctx;
3395 
3396 		ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3397 		if (ret)
3398 			goto unmap_ctx;
3399 
3400 		if (mapped_nents) {
3401 			sg_to_qm_sg_last(req->src, mapped_nents,
3402 					 sg_table + qm_sg_src_index, 0);
3403 			if (*next_buflen)
3404 				scatterwalk_map_and_copy(next_buf, req->src,
3405 							 to_hash - *buflen,
3406 							 *next_buflen, 0);
3407 		} else {
3408 			dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3409 					   true);
3410 		}
3411 
3412 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3413 						  qm_sg_bytes, DMA_TO_DEVICE);
3414 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3415 			dev_err(ctx->dev, "unable to map S/G table\n");
3416 			ret = -ENOMEM;
3417 			goto unmap_ctx;
3418 		}
3419 		edesc->qm_sg_bytes = qm_sg_bytes;
3420 
3421 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3422 		dpaa2_fl_set_final(in_fle, true);
3423 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3424 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3425 		dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3426 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3427 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3428 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3429 
3430 		req_ctx->flc = &ctx->flc[UPDATE];
3431 		req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3432 		req_ctx->cbk = ahash_done_bi;
3433 		req_ctx->ctx = &req->base;
3434 		req_ctx->edesc = edesc;
3435 
3436 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3437 		if (ret != -EINPROGRESS &&
3438 		    !(ret == -EBUSY &&
3439 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3440 			goto unmap_ctx;
3441 	} else if (*next_buflen) {
3442 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3443 					 req->nbytes, 0);
3444 		*buflen = *next_buflen;
3445 		*next_buflen = last_buflen;
3446 	}
3447 
3448 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3449 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3450 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3451 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3452 			     1);
3453 
3454 	return ret;
3455 unmap_ctx:
3456 	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
3457 	qi_cache_free(edesc);
3458 	return ret;
3459 }
3460 
3461 static int ahash_final_ctx(struct ahash_request *req)
3462 {
3463 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3464 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3465 	struct caam_hash_state *state = ahash_request_ctx(req);
3466 	struct caam_request *req_ctx = &state->caam_req;
3467 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3468 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3469 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3470 		      GFP_KERNEL : GFP_ATOMIC;
3471 	int buflen = *current_buflen(state);
3472 	int qm_sg_bytes, qm_sg_src_index;
3473 	int digestsize = crypto_ahash_digestsize(ahash);
3474 	struct ahash_edesc *edesc;
3475 	struct dpaa2_sg_entry *sg_table;
3476 	int ret;
3477 
3478 	/* allocate space for base edesc and link tables */
3479 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3480 	if (!edesc)
3481 		return -ENOMEM;
3482 
3483 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3484 	qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
3485 	sg_table = &edesc->sgt[0];
3486 
3487 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3488 			       DMA_TO_DEVICE);
3489 	if (ret)
3490 		goto unmap_ctx;
3491 
3492 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3493 	if (ret)
3494 		goto unmap_ctx;
3495 
3496 	dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
3497 
3498 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3499 					  DMA_TO_DEVICE);
3500 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3501 		dev_err(ctx->dev, "unable to map S/G table\n");
3502 		ret = -ENOMEM;
3503 		goto unmap_ctx;
3504 	}
3505 	edesc->qm_sg_bytes = qm_sg_bytes;
3506 
3507 	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3508 					DMA_FROM_DEVICE);
3509 	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3510 		dev_err(ctx->dev, "unable to map dst\n");
3511 		edesc->dst_dma = 0;
3512 		ret = -ENOMEM;
3513 		goto unmap_ctx;
3514 	}
3515 
3516 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3517 	dpaa2_fl_set_final(in_fle, true);
3518 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3519 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3520 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3521 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3522 	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3523 	dpaa2_fl_set_len(out_fle, digestsize);
3524 
3525 	req_ctx->flc = &ctx->flc[FINALIZE];
3526 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3527 	req_ctx->cbk = ahash_done_ctx_src;
3528 	req_ctx->ctx = &req->base;
3529 	req_ctx->edesc = edesc;
3530 
3531 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3532 	if (ret == -EINPROGRESS ||
3533 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3534 		return ret;
3535 
3536 unmap_ctx:
3537 	ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
3538 	qi_cache_free(edesc);
3539 	return ret;
3540 }
3541 
3542 static int ahash_finup_ctx(struct ahash_request *req)
3543 {
3544 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3545 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3546 	struct caam_hash_state *state = ahash_request_ctx(req);
3547 	struct caam_request *req_ctx = &state->caam_req;
3548 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3549 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3550 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3551 		      GFP_KERNEL : GFP_ATOMIC;
3552 	int buflen = *current_buflen(state);
3553 	int qm_sg_bytes, qm_sg_src_index;
3554 	int src_nents, mapped_nents;
3555 	int digestsize = crypto_ahash_digestsize(ahash);
3556 	struct ahash_edesc *edesc;
3557 	struct dpaa2_sg_entry *sg_table;
3558 	int ret;
3559 
3560 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3561 	if (src_nents < 0) {
3562 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3563 		return src_nents;
3564 	}
3565 
3566 	if (src_nents) {
3567 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3568 					  DMA_TO_DEVICE);
3569 		if (!mapped_nents) {
3570 			dev_err(ctx->dev, "unable to DMA map source\n");
3571 			return -ENOMEM;
3572 		}
3573 	} else {
3574 		mapped_nents = 0;
3575 	}
3576 
3577 	/* allocate space for base edesc and link tables */
3578 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3579 	if (!edesc) {
3580 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3581 		return -ENOMEM;
3582 	}
3583 
3584 	edesc->src_nents = src_nents;
3585 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3586 	qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
3587 	sg_table = &edesc->sgt[0];
3588 
3589 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3590 			       DMA_TO_DEVICE);
3591 	if (ret)
3592 		goto unmap_ctx;
3593 
3594 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3595 	if (ret)
3596 		goto unmap_ctx;
3597 
3598 	sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
3599 
3600 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3601 					  DMA_TO_DEVICE);
3602 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3603 		dev_err(ctx->dev, "unable to map S/G table\n");
3604 		ret = -ENOMEM;
3605 		goto unmap_ctx;
3606 	}
3607 	edesc->qm_sg_bytes = qm_sg_bytes;
3608 
3609 	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3610 					DMA_FROM_DEVICE);
3611 	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3612 		dev_err(ctx->dev, "unable to map dst\n");
3613 		edesc->dst_dma = 0;
3614 		ret = -ENOMEM;
3615 		goto unmap_ctx;
3616 	}
3617 
3618 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3619 	dpaa2_fl_set_final(in_fle, true);
3620 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3621 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3622 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3623 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3624 	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3625 	dpaa2_fl_set_len(out_fle, digestsize);
3626 
3627 	req_ctx->flc = &ctx->flc[FINALIZE];
3628 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3629 	req_ctx->cbk = ahash_done_ctx_src;
3630 	req_ctx->ctx = &req->base;
3631 	req_ctx->edesc = edesc;
3632 
3633 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3634 	if (ret == -EINPROGRESS ||
3635 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3636 		return ret;
3637 
3638 unmap_ctx:
3639 	ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
3640 	qi_cache_free(edesc);
3641 	return ret;
3642 }
3643 
3644 static int ahash_digest(struct ahash_request *req)
3645 {
3646 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3647 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3648 	struct caam_hash_state *state = ahash_request_ctx(req);
3649 	struct caam_request *req_ctx = &state->caam_req;
3650 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3651 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3652 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3653 		      GFP_KERNEL : GFP_ATOMIC;
3654 	int digestsize = crypto_ahash_digestsize(ahash);
3655 	int src_nents, mapped_nents;
3656 	struct ahash_edesc *edesc;
3657 	int ret = -ENOMEM;
3658 
3659 	state->buf_dma = 0;
3660 
3661 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3662 	if (src_nents < 0) {
3663 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3664 		return src_nents;
3665 	}
3666 
3667 	if (src_nents) {
3668 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3669 					  DMA_TO_DEVICE);
3670 		if (!mapped_nents) {
3671 			dev_err(ctx->dev, "unable to map source for DMA\n");
3672 			return ret;
3673 		}
3674 	} else {
3675 		mapped_nents = 0;
3676 	}
3677 
3678 	/* allocate space for base edesc and link tables */
3679 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3680 	if (!edesc) {
3681 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3682 		return ret;
3683 	}
3684 
3685 	edesc->src_nents = src_nents;
3686 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3687 
3688 	if (mapped_nents > 1) {
3689 		int qm_sg_bytes;
3690 		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3691 
3692 		qm_sg_bytes = mapped_nents * sizeof(*sg_table);
3693 		sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
3694 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3695 						  qm_sg_bytes, DMA_TO_DEVICE);
3696 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3697 			dev_err(ctx->dev, "unable to map S/G table\n");
3698 			goto unmap;
3699 		}
3700 		edesc->qm_sg_bytes = qm_sg_bytes;
3701 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3702 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3703 	} else {
3704 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3705 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3706 	}
3707 
3708 	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3709 					DMA_FROM_DEVICE);
3710 	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3711 		dev_err(ctx->dev, "unable to map dst\n");
3712 		edesc->dst_dma = 0;
3713 		goto unmap;
3714 	}
3715 
3716 	dpaa2_fl_set_final(in_fle, true);
3717 	dpaa2_fl_set_len(in_fle, req->nbytes);
3718 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3719 	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3720 	dpaa2_fl_set_len(out_fle, digestsize);
3721 
3722 	req_ctx->flc = &ctx->flc[DIGEST];
3723 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3724 	req_ctx->cbk = ahash_done;
3725 	req_ctx->ctx = &req->base;
3726 	req_ctx->edesc = edesc;
3727 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3728 	if (ret == -EINPROGRESS ||
3729 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3730 		return ret;
3731 
3732 unmap:
3733 	ahash_unmap(ctx->dev, edesc, req, digestsize);
3734 	qi_cache_free(edesc);
3735 	return ret;
3736 }
3737 
3738 static int ahash_final_no_ctx(struct ahash_request *req)
3739 {
3740 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3741 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3742 	struct caam_hash_state *state = ahash_request_ctx(req);
3743 	struct caam_request *req_ctx = &state->caam_req;
3744 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3745 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3746 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3747 		      GFP_KERNEL : GFP_ATOMIC;
3748 	u8 *buf = current_buf(state);
3749 	int buflen = *current_buflen(state);
3750 	int digestsize = crypto_ahash_digestsize(ahash);
3751 	struct ahash_edesc *edesc;
3752 	int ret = -ENOMEM;
3753 
3754 	/* allocate space for base edesc and link tables */
3755 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3756 	if (!edesc)
3757 		return ret;
3758 
3759 	state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
3760 	if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3761 		dev_err(ctx->dev, "unable to map src\n");
3762 		goto unmap;
3763 	}
3764 
3765 	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3766 					DMA_FROM_DEVICE);
3767 	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3768 		dev_err(ctx->dev, "unable to map dst\n");
3769 		edesc->dst_dma = 0;
3770 		goto unmap;
3771 	}
3772 
3773 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3774 	dpaa2_fl_set_final(in_fle, true);
3775 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3776 	dpaa2_fl_set_addr(in_fle, state->buf_dma);
3777 	dpaa2_fl_set_len(in_fle, buflen);
3778 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3779 	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3780 	dpaa2_fl_set_len(out_fle, digestsize);
3781 
3782 	req_ctx->flc = &ctx->flc[DIGEST];
3783 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3784 	req_ctx->cbk = ahash_done;
3785 	req_ctx->ctx = &req->base;
3786 	req_ctx->edesc = edesc;
3787 
3788 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3789 	if (ret == -EINPROGRESS ||
3790 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3791 		return ret;
3792 
3793 unmap:
3794 	ahash_unmap(ctx->dev, edesc, req, digestsize);
3795 	qi_cache_free(edesc);
3796 	return ret;
3797 }
3798 
3799 static int ahash_update_no_ctx(struct ahash_request *req)
3800 {
3801 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3802 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3803 	struct caam_hash_state *state = ahash_request_ctx(req);
3804 	struct caam_request *req_ctx = &state->caam_req;
3805 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3806 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3807 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3808 		      GFP_KERNEL : GFP_ATOMIC;
3809 	u8 *buf = current_buf(state);
3810 	int *buflen = current_buflen(state);
3811 	u8 *next_buf = alt_buf(state);
3812 	int *next_buflen = alt_buflen(state);
3813 	int in_len = *buflen + req->nbytes, to_hash;
3814 	int qm_sg_bytes, src_nents, mapped_nents;
3815 	struct ahash_edesc *edesc;
3816 	int ret = 0;
3817 
3818 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3819 	to_hash = in_len - *next_buflen;
3820 
3821 	if (to_hash) {
3822 		struct dpaa2_sg_entry *sg_table;
3823 
3824 		src_nents = sg_nents_for_len(req->src,
3825 					     req->nbytes - *next_buflen);
3826 		if (src_nents < 0) {
3827 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3828 			return src_nents;
3829 		}
3830 
3831 		if (src_nents) {
3832 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3833 						  DMA_TO_DEVICE);
3834 			if (!mapped_nents) {
3835 				dev_err(ctx->dev, "unable to DMA map source\n");
3836 				return -ENOMEM;
3837 			}
3838 		} else {
3839 			mapped_nents = 0;
3840 		}
3841 
3842 		/* allocate space for base edesc and link tables */
3843 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3844 		if (!edesc) {
3845 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3846 				     DMA_TO_DEVICE);
3847 			return -ENOMEM;
3848 		}
3849 
3850 		edesc->src_nents = src_nents;
3851 		qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
3852 		sg_table = &edesc->sgt[0];
3853 
3854 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3855 		if (ret)
3856 			goto unmap_ctx;
3857 
3858 		sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3859 
3860 		if (*next_buflen)
3861 			scatterwalk_map_and_copy(next_buf, req->src,
3862 						 to_hash - *buflen,
3863 						 *next_buflen, 0);
3864 
3865 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3866 						  qm_sg_bytes, DMA_TO_DEVICE);
3867 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3868 			dev_err(ctx->dev, "unable to map S/G table\n");
3869 			ret = -ENOMEM;
3870 			goto unmap_ctx;
3871 		}
3872 		edesc->qm_sg_bytes = qm_sg_bytes;
3873 
3874 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3875 						ctx->ctx_len, DMA_FROM_DEVICE);
3876 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3877 			dev_err(ctx->dev, "unable to map ctx\n");
3878 			state->ctx_dma = 0;
3879 			ret = -ENOMEM;
3880 			goto unmap_ctx;
3881 		}
3882 
3883 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3884 		dpaa2_fl_set_final(in_fle, true);
3885 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3886 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3887 		dpaa2_fl_set_len(in_fle, to_hash);
3888 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3889 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3890 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3891 
3892 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3893 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3894 		req_ctx->cbk = ahash_done_ctx_dst;
3895 		req_ctx->ctx = &req->base;
3896 		req_ctx->edesc = edesc;
3897 
3898 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3899 		if (ret != -EINPROGRESS &&
3900 		    !(ret == -EBUSY &&
3901 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3902 			goto unmap_ctx;
3903 
3904 		state->update = ahash_update_ctx;
3905 		state->finup = ahash_finup_ctx;
3906 		state->final = ahash_final_ctx;
3907 	} else if (*next_buflen) {
3908 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3909 					 req->nbytes, 0);
3910 		*buflen = *next_buflen;
3911 		*next_buflen = 0;
3912 	}
3913 
3914 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3915 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3916 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3917 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3918 			     1);
3919 
3920 	return ret;
3921 unmap_ctx:
3922 	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
3923 	qi_cache_free(edesc);
3924 	return ret;
3925 }
3926 
3927 static int ahash_finup_no_ctx(struct ahash_request *req)
3928 {
3929 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3930 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3931 	struct caam_hash_state *state = ahash_request_ctx(req);
3932 	struct caam_request *req_ctx = &state->caam_req;
3933 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3934 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3935 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3936 		      GFP_KERNEL : GFP_ATOMIC;
3937 	int buflen = *current_buflen(state);
3938 	int qm_sg_bytes, src_nents, mapped_nents;
3939 	int digestsize = crypto_ahash_digestsize(ahash);
3940 	struct ahash_edesc *edesc;
3941 	struct dpaa2_sg_entry *sg_table;
3942 	int ret;
3943 
3944 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3945 	if (src_nents < 0) {
3946 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3947 		return src_nents;
3948 	}
3949 
3950 	if (src_nents) {
3951 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3952 					  DMA_TO_DEVICE);
3953 		if (!mapped_nents) {
3954 			dev_err(ctx->dev, "unable to DMA map source\n");
3955 			return -ENOMEM;
3956 		}
3957 	} else {
3958 		mapped_nents = 0;
3959 	}
3960 
3961 	/* allocate space for base edesc and link tables */
3962 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3963 	if (!edesc) {
3964 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3965 		return -ENOMEM;
3966 	}
3967 
3968 	edesc->src_nents = src_nents;
3969 	qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
3970 	sg_table = &edesc->sgt[0];
3971 
3972 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3973 	if (ret)
3974 		goto unmap;
3975 
3976 	sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3977 
3978 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3979 					  DMA_TO_DEVICE);
3980 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3981 		dev_err(ctx->dev, "unable to map S/G table\n");
3982 		ret = -ENOMEM;
3983 		goto unmap;
3984 	}
3985 	edesc->qm_sg_bytes = qm_sg_bytes;
3986 
3987 	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3988 					DMA_FROM_DEVICE);
3989 	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3990 		dev_err(ctx->dev, "unable to map dst\n");
3991 		edesc->dst_dma = 0;
3992 		ret = -ENOMEM;
3993 		goto unmap;
3994 	}
3995 
3996 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3997 	dpaa2_fl_set_final(in_fle, true);
3998 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3999 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4000 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4001 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4002 	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
4003 	dpaa2_fl_set_len(out_fle, digestsize);
4004 
4005 	req_ctx->flc = &ctx->flc[DIGEST];
4006 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4007 	req_ctx->cbk = ahash_done;
4008 	req_ctx->ctx = &req->base;
4009 	req_ctx->edesc = edesc;
4010 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4011 	if (ret != -EINPROGRESS &&
4012 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4013 		goto unmap;
4014 
4015 	return ret;
4016 unmap:
4017 	ahash_unmap(ctx->dev, edesc, req, digestsize);
4018 	qi_cache_free(edesc);
4019 	return -ENOMEM;
4020 }
4021 
4022 static int ahash_update_first(struct ahash_request *req)
4023 {
4024 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4025 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4026 	struct caam_hash_state *state = ahash_request_ctx(req);
4027 	struct caam_request *req_ctx = &state->caam_req;
4028 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4029 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4030 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4031 		      GFP_KERNEL : GFP_ATOMIC;
4032 	u8 *next_buf = alt_buf(state);
4033 	int *next_buflen = alt_buflen(state);
4034 	int to_hash;
4035 	int src_nents, mapped_nents;
4036 	struct ahash_edesc *edesc;
4037 	int ret = 0;
4038 
4039 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4040 				      1);
4041 	to_hash = req->nbytes - *next_buflen;
4042 
4043 	if (to_hash) {
4044 		struct dpaa2_sg_entry *sg_table;
4045 
4046 		src_nents = sg_nents_for_len(req->src,
4047 					     req->nbytes - (*next_buflen));
4048 		if (src_nents < 0) {
4049 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4050 			return src_nents;
4051 		}
4052 
4053 		if (src_nents) {
4054 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4055 						  DMA_TO_DEVICE);
4056 			if (!mapped_nents) {
4057 				dev_err(ctx->dev, "unable to map source for DMA\n");
4058 				return -ENOMEM;
4059 			}
4060 		} else {
4061 			mapped_nents = 0;
4062 		}
4063 
4064 		/* allocate space for base edesc and link tables */
4065 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4066 		if (!edesc) {
4067 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4068 				     DMA_TO_DEVICE);
4069 			return -ENOMEM;
4070 		}
4071 
4072 		edesc->src_nents = src_nents;
4073 		sg_table = &edesc->sgt[0];
4074 
4075 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4076 		dpaa2_fl_set_final(in_fle, true);
4077 		dpaa2_fl_set_len(in_fle, to_hash);
4078 
4079 		if (mapped_nents > 1) {
4080 			int qm_sg_bytes;
4081 
4082 			sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
4083 			qm_sg_bytes = mapped_nents * sizeof(*sg_table);
4084 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4085 							  qm_sg_bytes,
4086 							  DMA_TO_DEVICE);
4087 			if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4088 				dev_err(ctx->dev, "unable to map S/G table\n");
4089 				ret = -ENOMEM;
4090 				goto unmap_ctx;
4091 			}
4092 			edesc->qm_sg_bytes = qm_sg_bytes;
4093 			dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4094 			dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4095 		} else {
4096 			dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4097 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4098 		}
4099 
4100 		if (*next_buflen)
4101 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
4102 						 *next_buflen, 0);
4103 
4104 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4105 						ctx->ctx_len, DMA_FROM_DEVICE);
4106 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4107 			dev_err(ctx->dev, "unable to map ctx\n");
4108 			state->ctx_dma = 0;
4109 			ret = -ENOMEM;
4110 			goto unmap_ctx;
4111 		}
4112 
4113 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4114 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4115 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4116 
4117 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4118 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4119 		req_ctx->cbk = ahash_done_ctx_dst;
4120 		req_ctx->ctx = &req->base;
4121 		req_ctx->edesc = edesc;
4122 
4123 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4124 		if (ret != -EINPROGRESS &&
4125 		    !(ret == -EBUSY && req->base.flags &
4126 		      CRYPTO_TFM_REQ_MAY_BACKLOG))
4127 			goto unmap_ctx;
4128 
4129 		state->update = ahash_update_ctx;
4130 		state->finup = ahash_finup_ctx;
4131 		state->final = ahash_final_ctx;
4132 	} else if (*next_buflen) {
4133 		state->update = ahash_update_no_ctx;
4134 		state->finup = ahash_finup_no_ctx;
4135 		state->final = ahash_final_no_ctx;
4136 		scatterwalk_map_and_copy(next_buf, req->src, 0,
4137 					 req->nbytes, 0);
4138 		switch_buf(state);
4139 	}
4140 
4141 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4142 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4143 			     1);
4144 
4145 	return ret;
4146 unmap_ctx:
4147 	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
4148 	qi_cache_free(edesc);
4149 	return ret;
4150 }
4151 
4152 static int ahash_finup_first(struct ahash_request *req)
4153 {
4154 	return ahash_digest(req);
4155 }
4156 
4157 static int ahash_init(struct ahash_request *req)
4158 {
4159 	struct caam_hash_state *state = ahash_request_ctx(req);
4160 
4161 	state->update = ahash_update_first;
4162 	state->finup = ahash_finup_first;
4163 	state->final = ahash_final_no_ctx;
4164 
4165 	state->ctx_dma = 0;
4166 	state->current_buf = 0;
4167 	state->buf_dma = 0;
4168 	state->buflen_0 = 0;
4169 	state->buflen_1 = 0;
4170 
4171 	return 0;
4172 }
4173 
4174 static int ahash_update(struct ahash_request *req)
4175 {
4176 	struct caam_hash_state *state = ahash_request_ctx(req);
4177 
4178 	return state->update(req);
4179 }
4180 
4181 static int ahash_finup(struct ahash_request *req)
4182 {
4183 	struct caam_hash_state *state = ahash_request_ctx(req);
4184 
4185 	return state->finup(req);
4186 }
4187 
4188 static int ahash_final(struct ahash_request *req)
4189 {
4190 	struct caam_hash_state *state = ahash_request_ctx(req);
4191 
4192 	return state->final(req);
4193 }
4194 
4195 static int ahash_export(struct ahash_request *req, void *out)
4196 {
4197 	struct caam_hash_state *state = ahash_request_ctx(req);
4198 	struct caam_export_state *export = out;
4199 	int len;
4200 	u8 *buf;
4201 
4202 	if (state->current_buf) {
4203 		buf = state->buf_1;
4204 		len = state->buflen_1;
4205 	} else {
4206 		buf = state->buf_0;
4207 		len = state->buflen_0;
4208 	}
4209 
4210 	memcpy(export->buf, buf, len);
4211 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4212 	export->buflen = len;
4213 	export->update = state->update;
4214 	export->final = state->final;
4215 	export->finup = state->finup;
4216 
4217 	return 0;
4218 }
4219 
4220 static int ahash_import(struct ahash_request *req, const void *in)
4221 {
4222 	struct caam_hash_state *state = ahash_request_ctx(req);
4223 	const struct caam_export_state *export = in;
4224 
4225 	memset(state, 0, sizeof(*state));
4226 	memcpy(state->buf_0, export->buf, export->buflen);
4227 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4228 	state->buflen_0 = export->buflen;
4229 	state->update = export->update;
4230 	state->final = export->final;
4231 	state->finup = export->finup;
4232 
4233 	return 0;
4234 }
4235 
4236 struct caam_hash_template {
4237 	char name[CRYPTO_MAX_ALG_NAME];
4238 	char driver_name[CRYPTO_MAX_ALG_NAME];
4239 	char hmac_name[CRYPTO_MAX_ALG_NAME];
4240 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4241 	unsigned int blocksize;
4242 	struct ahash_alg template_ahash;
4243 	u32 alg_type;
4244 };
4245 
4246 /* ahash descriptors */
4247 static struct caam_hash_template driver_hash[] = {
4248 	{
4249 		.name = "sha1",
4250 		.driver_name = "sha1-caam-qi2",
4251 		.hmac_name = "hmac(sha1)",
4252 		.hmac_driver_name = "hmac-sha1-caam-qi2",
4253 		.blocksize = SHA1_BLOCK_SIZE,
4254 		.template_ahash = {
4255 			.init = ahash_init,
4256 			.update = ahash_update,
4257 			.final = ahash_final,
4258 			.finup = ahash_finup,
4259 			.digest = ahash_digest,
4260 			.export = ahash_export,
4261 			.import = ahash_import,
4262 			.setkey = ahash_setkey,
4263 			.halg = {
4264 				.digestsize = SHA1_DIGEST_SIZE,
4265 				.statesize = sizeof(struct caam_export_state),
4266 			},
4267 		},
4268 		.alg_type = OP_ALG_ALGSEL_SHA1,
4269 	}, {
4270 		.name = "sha224",
4271 		.driver_name = "sha224-caam-qi2",
4272 		.hmac_name = "hmac(sha224)",
4273 		.hmac_driver_name = "hmac-sha224-caam-qi2",
4274 		.blocksize = SHA224_BLOCK_SIZE,
4275 		.template_ahash = {
4276 			.init = ahash_init,
4277 			.update = ahash_update,
4278 			.final = ahash_final,
4279 			.finup = ahash_finup,
4280 			.digest = ahash_digest,
4281 			.export = ahash_export,
4282 			.import = ahash_import,
4283 			.setkey = ahash_setkey,
4284 			.halg = {
4285 				.digestsize = SHA224_DIGEST_SIZE,
4286 				.statesize = sizeof(struct caam_export_state),
4287 			},
4288 		},
4289 		.alg_type = OP_ALG_ALGSEL_SHA224,
4290 	}, {
4291 		.name = "sha256",
4292 		.driver_name = "sha256-caam-qi2",
4293 		.hmac_name = "hmac(sha256)",
4294 		.hmac_driver_name = "hmac-sha256-caam-qi2",
4295 		.blocksize = SHA256_BLOCK_SIZE,
4296 		.template_ahash = {
4297 			.init = ahash_init,
4298 			.update = ahash_update,
4299 			.final = ahash_final,
4300 			.finup = ahash_finup,
4301 			.digest = ahash_digest,
4302 			.export = ahash_export,
4303 			.import = ahash_import,
4304 			.setkey = ahash_setkey,
4305 			.halg = {
4306 				.digestsize = SHA256_DIGEST_SIZE,
4307 				.statesize = sizeof(struct caam_export_state),
4308 			},
4309 		},
4310 		.alg_type = OP_ALG_ALGSEL_SHA256,
4311 	}, {
4312 		.name = "sha384",
4313 		.driver_name = "sha384-caam-qi2",
4314 		.hmac_name = "hmac(sha384)",
4315 		.hmac_driver_name = "hmac-sha384-caam-qi2",
4316 		.blocksize = SHA384_BLOCK_SIZE,
4317 		.template_ahash = {
4318 			.init = ahash_init,
4319 			.update = ahash_update,
4320 			.final = ahash_final,
4321 			.finup = ahash_finup,
4322 			.digest = ahash_digest,
4323 			.export = ahash_export,
4324 			.import = ahash_import,
4325 			.setkey = ahash_setkey,
4326 			.halg = {
4327 				.digestsize = SHA384_DIGEST_SIZE,
4328 				.statesize = sizeof(struct caam_export_state),
4329 			},
4330 		},
4331 		.alg_type = OP_ALG_ALGSEL_SHA384,
4332 	}, {
4333 		.name = "sha512",
4334 		.driver_name = "sha512-caam-qi2",
4335 		.hmac_name = "hmac(sha512)",
4336 		.hmac_driver_name = "hmac-sha512-caam-qi2",
4337 		.blocksize = SHA512_BLOCK_SIZE,
4338 		.template_ahash = {
4339 			.init = ahash_init,
4340 			.update = ahash_update,
4341 			.final = ahash_final,
4342 			.finup = ahash_finup,
4343 			.digest = ahash_digest,
4344 			.export = ahash_export,
4345 			.import = ahash_import,
4346 			.setkey = ahash_setkey,
4347 			.halg = {
4348 				.digestsize = SHA512_DIGEST_SIZE,
4349 				.statesize = sizeof(struct caam_export_state),
4350 			},
4351 		},
4352 		.alg_type = OP_ALG_ALGSEL_SHA512,
4353 	}, {
4354 		.name = "md5",
4355 		.driver_name = "md5-caam-qi2",
4356 		.hmac_name = "hmac(md5)",
4357 		.hmac_driver_name = "hmac-md5-caam-qi2",
4358 		.blocksize = MD5_BLOCK_WORDS * 4,
4359 		.template_ahash = {
4360 			.init = ahash_init,
4361 			.update = ahash_update,
4362 			.final = ahash_final,
4363 			.finup = ahash_finup,
4364 			.digest = ahash_digest,
4365 			.export = ahash_export,
4366 			.import = ahash_import,
4367 			.setkey = ahash_setkey,
4368 			.halg = {
4369 				.digestsize = MD5_DIGEST_SIZE,
4370 				.statesize = sizeof(struct caam_export_state),
4371 			},
4372 		},
4373 		.alg_type = OP_ALG_ALGSEL_MD5,
4374 	}
4375 };
4376 
4377 struct caam_hash_alg {
4378 	struct list_head entry;
4379 	struct device *dev;
4380 	int alg_type;
4381 	struct ahash_alg ahash_alg;
4382 };
4383 
4384 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4385 {
4386 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4387 	struct crypto_alg *base = tfm->__crt_alg;
4388 	struct hash_alg_common *halg =
4389 		 container_of(base, struct hash_alg_common, base);
4390 	struct ahash_alg *alg =
4391 		 container_of(halg, struct ahash_alg, halg);
4392 	struct caam_hash_alg *caam_hash =
4393 		 container_of(alg, struct caam_hash_alg, ahash_alg);
4394 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4395 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4396 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4397 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4398 					 HASH_MSG_LEN + 32,
4399 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4400 					 HASH_MSG_LEN + 64,
4401 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4402 	dma_addr_t dma_addr;
4403 	int i;
4404 
4405 	ctx->dev = caam_hash->dev;
4406 
4407 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4408 					DMA_BIDIRECTIONAL,
4409 					DMA_ATTR_SKIP_CPU_SYNC);
4410 	if (dma_mapping_error(ctx->dev, dma_addr)) {
4411 		dev_err(ctx->dev, "unable to map shared descriptors\n");
4412 		return -ENOMEM;
4413 	}
4414 
4415 	for (i = 0; i < HASH_NUM_OP; i++)
4416 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4417 
4418 	/* copy descriptor header template value */
4419 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4420 
4421 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
4422 				   OP_ALG_ALGSEL_SUBMASK) >>
4423 				  OP_ALG_ALGSEL_SHIFT];
4424 
4425 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4426 				 sizeof(struct caam_hash_state));
4427 
4428 	return ahash_set_sh_desc(ahash);
4429 }
4430 
4431 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4432 {
4433 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4434 
4435 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4436 			       DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4437 }
4438 
4439 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4440 	struct caam_hash_template *template, bool keyed)
4441 {
4442 	struct caam_hash_alg *t_alg;
4443 	struct ahash_alg *halg;
4444 	struct crypto_alg *alg;
4445 
4446 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4447 	if (!t_alg)
4448 		return ERR_PTR(-ENOMEM);
4449 
4450 	t_alg->ahash_alg = template->template_ahash;
4451 	halg = &t_alg->ahash_alg;
4452 	alg = &halg->halg.base;
4453 
4454 	if (keyed) {
4455 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4456 			 template->hmac_name);
4457 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4458 			 template->hmac_driver_name);
4459 	} else {
4460 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4461 			 template->name);
4462 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4463 			 template->driver_name);
4464 		t_alg->ahash_alg.setkey = NULL;
4465 	}
4466 	alg->cra_module = THIS_MODULE;
4467 	alg->cra_init = caam_hash_cra_init;
4468 	alg->cra_exit = caam_hash_cra_exit;
4469 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4470 	alg->cra_priority = CAAM_CRA_PRIORITY;
4471 	alg->cra_blocksize = template->blocksize;
4472 	alg->cra_alignmask = 0;
4473 	alg->cra_flags = CRYPTO_ALG_ASYNC;
4474 
4475 	t_alg->alg_type = template->alg_type;
4476 	t_alg->dev = dev;
4477 
4478 	return t_alg;
4479 }
4480 
4481 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4482 {
4483 	struct dpaa2_caam_priv_per_cpu *ppriv;
4484 
4485 	ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4486 	napi_schedule_irqoff(&ppriv->napi);
4487 }
4488 
4489 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4490 {
4491 	struct device *dev = priv->dev;
4492 	struct dpaa2_io_notification_ctx *nctx;
4493 	struct dpaa2_caam_priv_per_cpu *ppriv;
4494 	int err, i = 0, cpu;
4495 
4496 	for_each_online_cpu(cpu) {
4497 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4498 		ppriv->priv = priv;
4499 		nctx = &ppriv->nctx;
4500 		nctx->is_cdan = 0;
4501 		nctx->id = ppriv->rsp_fqid;
4502 		nctx->desired_cpu = cpu;
4503 		nctx->cb = dpaa2_caam_fqdan_cb;
4504 
4505 		/* Register notification callbacks */
4506 		err = dpaa2_io_service_register(NULL, nctx);
4507 		if (unlikely(err)) {
4508 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4509 			nctx->cb = NULL;
4510 			/*
4511 			 * If no affine DPIO for this core, there's probably
4512 			 * none available for next cores either. Signal we want
4513 			 * to retry later, in case the DPIO devices weren't
4514 			 * probed yet.
4515 			 */
4516 			err = -EPROBE_DEFER;
4517 			goto err;
4518 		}
4519 
4520 		ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4521 						     dev);
4522 		if (unlikely(!ppriv->store)) {
4523 			dev_err(dev, "dpaa2_io_store_create() failed\n");
4524 			err = -ENOMEM;
4525 			goto err;
4526 		}
4527 
4528 		if (++i == priv->num_pairs)
4529 			break;
4530 	}
4531 
4532 	return 0;
4533 
4534 err:
4535 	for_each_online_cpu(cpu) {
4536 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4537 		if (!ppriv->nctx.cb)
4538 			break;
4539 		dpaa2_io_service_deregister(NULL, &ppriv->nctx);
4540 	}
4541 
4542 	for_each_online_cpu(cpu) {
4543 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4544 		if (!ppriv->store)
4545 			break;
4546 		dpaa2_io_store_destroy(ppriv->store);
4547 	}
4548 
4549 	return err;
4550 }
4551 
4552 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4553 {
4554 	struct dpaa2_caam_priv_per_cpu *ppriv;
4555 	int i = 0, cpu;
4556 
4557 	for_each_online_cpu(cpu) {
4558 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4559 		dpaa2_io_service_deregister(NULL, &ppriv->nctx);
4560 		dpaa2_io_store_destroy(ppriv->store);
4561 
4562 		if (++i == priv->num_pairs)
4563 			return;
4564 	}
4565 }
4566 
4567 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4568 {
4569 	struct dpseci_rx_queue_cfg rx_queue_cfg;
4570 	struct device *dev = priv->dev;
4571 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4572 	struct dpaa2_caam_priv_per_cpu *ppriv;
4573 	int err = 0, i = 0, cpu;
4574 
4575 	/* Configure Rx queues */
4576 	for_each_online_cpu(cpu) {
4577 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4578 
4579 		rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4580 				       DPSECI_QUEUE_OPT_USER_CTX;
4581 		rx_queue_cfg.order_preservation_en = 0;
4582 		rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4583 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4584 		/*
4585 		 * Rx priority (WQ) doesn't really matter, since we use
4586 		 * pull mode, i.e. volatile dequeues from specific FQs
4587 		 */
4588 		rx_queue_cfg.dest_cfg.priority = 0;
4589 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4590 
4591 		err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4592 					  &rx_queue_cfg);
4593 		if (err) {
4594 			dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4595 				err);
4596 			return err;
4597 		}
4598 
4599 		if (++i == priv->num_pairs)
4600 			break;
4601 	}
4602 
4603 	return err;
4604 }
4605 
4606 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4607 {
4608 	struct device *dev = priv->dev;
4609 
4610 	if (!priv->cscn_mem)
4611 		return;
4612 
4613 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4614 	kfree(priv->cscn_mem);
4615 }
4616 
4617 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4618 {
4619 	struct device *dev = priv->dev;
4620 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4621 
4622 	dpaa2_dpseci_congestion_free(priv);
4623 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4624 }
4625 
4626 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4627 				  const struct dpaa2_fd *fd)
4628 {
4629 	struct caam_request *req;
4630 	u32 fd_err;
4631 
4632 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4633 		dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4634 		return;
4635 	}
4636 
4637 	fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4638 	if (unlikely(fd_err))
4639 		dev_err(priv->dev, "FD error: %08x\n", fd_err);
4640 
4641 	/*
4642 	 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4643 	 * in FD[ERR] or FD[FRC].
4644 	 */
4645 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4646 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4647 			 DMA_BIDIRECTIONAL);
4648 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4649 }
4650 
4651 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4652 {
4653 	int err;
4654 
4655 	/* Retry while portal is busy */
4656 	do {
4657 		err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
4658 					       ppriv->store);
4659 	} while (err == -EBUSY);
4660 
4661 	if (unlikely(err))
4662 		dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4663 
4664 	return err;
4665 }
4666 
4667 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4668 {
4669 	struct dpaa2_dq *dq;
4670 	int cleaned = 0, is_last;
4671 
4672 	do {
4673 		dq = dpaa2_io_store_next(ppriv->store, &is_last);
4674 		if (unlikely(!dq)) {
4675 			if (unlikely(!is_last)) {
4676 				dev_dbg(ppriv->priv->dev,
4677 					"FQ %d returned no valid frames\n",
4678 					ppriv->rsp_fqid);
4679 				/*
4680 				 * MUST retry until we get some sort of
4681 				 * valid response token (be it "empty dequeue"
4682 				 * or a valid frame).
4683 				 */
4684 				continue;
4685 			}
4686 			break;
4687 		}
4688 
4689 		/* Process FD */
4690 		dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4691 		cleaned++;
4692 	} while (!is_last);
4693 
4694 	return cleaned;
4695 }
4696 
4697 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4698 {
4699 	struct dpaa2_caam_priv_per_cpu *ppriv;
4700 	struct dpaa2_caam_priv *priv;
4701 	int err, cleaned = 0, store_cleaned;
4702 
4703 	ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4704 	priv = ppriv->priv;
4705 
4706 	if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4707 		return 0;
4708 
4709 	do {
4710 		store_cleaned = dpaa2_caam_store_consume(ppriv);
4711 		cleaned += store_cleaned;
4712 
4713 		if (store_cleaned == 0 ||
4714 		    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4715 			break;
4716 
4717 		/* Try to dequeue some more */
4718 		err = dpaa2_caam_pull_fq(ppriv);
4719 		if (unlikely(err))
4720 			break;
4721 	} while (1);
4722 
4723 	if (cleaned < budget) {
4724 		napi_complete_done(napi, cleaned);
4725 		err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
4726 		if (unlikely(err))
4727 			dev_err(priv->dev, "Notification rearm failed: %d\n",
4728 				err);
4729 	}
4730 
4731 	return cleaned;
4732 }
4733 
4734 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4735 					 u16 token)
4736 {
4737 	struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4738 	struct device *dev = priv->dev;
4739 	int err;
4740 
4741 	/*
4742 	 * Congestion group feature supported starting with DPSECI API v5.1
4743 	 * and only when object has been created with this capability.
4744 	 */
4745 	if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4746 	    !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4747 		return 0;
4748 
4749 	priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4750 				 GFP_KERNEL | GFP_DMA);
4751 	if (!priv->cscn_mem)
4752 		return -ENOMEM;
4753 
4754 	priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4755 	priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4756 					DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4757 	if (dma_mapping_error(dev, priv->cscn_dma)) {
4758 		dev_err(dev, "Error mapping CSCN memory area\n");
4759 		err = -ENOMEM;
4760 		goto err_dma_map;
4761 	}
4762 
4763 	cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4764 	cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4765 	cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4766 	cong_notif_cfg.message_ctx = (uintptr_t)priv;
4767 	cong_notif_cfg.message_iova = priv->cscn_dma;
4768 	cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4769 					DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4770 					DPSECI_CGN_MODE_COHERENT_WRITE;
4771 
4772 	err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4773 						 &cong_notif_cfg);
4774 	if (err) {
4775 		dev_err(dev, "dpseci_set_congestion_notification failed\n");
4776 		goto err_set_cong;
4777 	}
4778 
4779 	return 0;
4780 
4781 err_set_cong:
4782 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4783 err_dma_map:
4784 	kfree(priv->cscn_mem);
4785 
4786 	return err;
4787 }
4788 
4789 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4790 {
4791 	struct device *dev = &ls_dev->dev;
4792 	struct dpaa2_caam_priv *priv;
4793 	struct dpaa2_caam_priv_per_cpu *ppriv;
4794 	int err, cpu;
4795 	u8 i;
4796 
4797 	priv = dev_get_drvdata(dev);
4798 
4799 	priv->dev = dev;
4800 	priv->dpsec_id = ls_dev->obj_desc.id;
4801 
4802 	/* Get a handle for the DPSECI this interface is associate with */
4803 	err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4804 	if (err) {
4805 		dev_err(dev, "dpseci_open() failed: %d\n", err);
4806 		goto err_open;
4807 	}
4808 
4809 	err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4810 				     &priv->minor_ver);
4811 	if (err) {
4812 		dev_err(dev, "dpseci_get_api_version() failed\n");
4813 		goto err_get_vers;
4814 	}
4815 
4816 	dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4817 
4818 	err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4819 				    &priv->dpseci_attr);
4820 	if (err) {
4821 		dev_err(dev, "dpseci_get_attributes() failed\n");
4822 		goto err_get_vers;
4823 	}
4824 
4825 	err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4826 				  &priv->sec_attr);
4827 	if (err) {
4828 		dev_err(dev, "dpseci_get_sec_attr() failed\n");
4829 		goto err_get_vers;
4830 	}
4831 
4832 	err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4833 	if (err) {
4834 		dev_err(dev, "setup_congestion() failed\n");
4835 		goto err_get_vers;
4836 	}
4837 
4838 	priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4839 			      priv->dpseci_attr.num_tx_queues);
4840 	if (priv->num_pairs > num_online_cpus()) {
4841 		dev_warn(dev, "%d queues won't be used\n",
4842 			 priv->num_pairs - num_online_cpus());
4843 		priv->num_pairs = num_online_cpus();
4844 	}
4845 
4846 	for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4847 		err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4848 					  &priv->rx_queue_attr[i]);
4849 		if (err) {
4850 			dev_err(dev, "dpseci_get_rx_queue() failed\n");
4851 			goto err_get_rx_queue;
4852 		}
4853 	}
4854 
4855 	for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4856 		err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4857 					  &priv->tx_queue_attr[i]);
4858 		if (err) {
4859 			dev_err(dev, "dpseci_get_tx_queue() failed\n");
4860 			goto err_get_rx_queue;
4861 		}
4862 	}
4863 
4864 	i = 0;
4865 	for_each_online_cpu(cpu) {
4866 		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i,
4867 			priv->rx_queue_attr[i].fqid,
4868 			priv->tx_queue_attr[i].fqid);
4869 
4870 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4871 		ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
4872 		ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
4873 		ppriv->prio = i;
4874 
4875 		ppriv->net_dev.dev = *dev;
4876 		INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4877 		netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4878 			       DPAA2_CAAM_NAPI_WEIGHT);
4879 		if (++i == priv->num_pairs)
4880 			break;
4881 	}
4882 
4883 	return 0;
4884 
4885 err_get_rx_queue:
4886 	dpaa2_dpseci_congestion_free(priv);
4887 err_get_vers:
4888 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4889 err_open:
4890 	return err;
4891 }
4892 
4893 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
4894 {
4895 	struct device *dev = priv->dev;
4896 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4897 	struct dpaa2_caam_priv_per_cpu *ppriv;
4898 	int i;
4899 
4900 	for (i = 0; i < priv->num_pairs; i++) {
4901 		ppriv = per_cpu_ptr(priv->ppriv, i);
4902 		napi_enable(&ppriv->napi);
4903 	}
4904 
4905 	return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
4906 }
4907 
4908 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
4909 {
4910 	struct device *dev = priv->dev;
4911 	struct dpaa2_caam_priv_per_cpu *ppriv;
4912 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4913 	int i, err = 0, enabled;
4914 
4915 	err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
4916 	if (err) {
4917 		dev_err(dev, "dpseci_disable() failed\n");
4918 		return err;
4919 	}
4920 
4921 	err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
4922 	if (err) {
4923 		dev_err(dev, "dpseci_is_enabled() failed\n");
4924 		return err;
4925 	}
4926 
4927 	dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
4928 
4929 	for (i = 0; i < priv->num_pairs; i++) {
4930 		ppriv = per_cpu_ptr(priv->ppriv, i);
4931 		napi_disable(&ppriv->napi);
4932 		netif_napi_del(&ppriv->napi);
4933 	}
4934 
4935 	return 0;
4936 }
4937 
4938 static struct list_head hash_list;
4939 
4940 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
4941 {
4942 	struct device *dev;
4943 	struct dpaa2_caam_priv *priv;
4944 	int i, err = 0;
4945 	bool registered = false;
4946 
4947 	/*
4948 	 * There is no way to get CAAM endianness - there is no direct register
4949 	 * space access and MC f/w does not provide this attribute.
4950 	 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
4951 	 * property.
4952 	 */
4953 	caam_little_end = true;
4954 
4955 	caam_imx = false;
4956 
4957 	dev = &dpseci_dev->dev;
4958 
4959 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
4960 	if (!priv)
4961 		return -ENOMEM;
4962 
4963 	dev_set_drvdata(dev, priv);
4964 
4965 	priv->domain = iommu_get_domain_for_dev(dev);
4966 
4967 	qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
4968 				     0, SLAB_CACHE_DMA, NULL);
4969 	if (!qi_cache) {
4970 		dev_err(dev, "Can't allocate SEC cache\n");
4971 		return -ENOMEM;
4972 	}
4973 
4974 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
4975 	if (err) {
4976 		dev_err(dev, "dma_set_mask_and_coherent() failed\n");
4977 		goto err_dma_mask;
4978 	}
4979 
4980 	/* Obtain a MC portal */
4981 	err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
4982 	if (err) {
4983 		if (err == -ENXIO)
4984 			err = -EPROBE_DEFER;
4985 		else
4986 			dev_err(dev, "MC portal allocation failed\n");
4987 
4988 		goto err_dma_mask;
4989 	}
4990 
4991 	priv->ppriv = alloc_percpu(*priv->ppriv);
4992 	if (!priv->ppriv) {
4993 		dev_err(dev, "alloc_percpu() failed\n");
4994 		err = -ENOMEM;
4995 		goto err_alloc_ppriv;
4996 	}
4997 
4998 	/* DPSECI initialization */
4999 	err = dpaa2_dpseci_setup(dpseci_dev);
5000 	if (err) {
5001 		dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5002 		goto err_dpseci_setup;
5003 	}
5004 
5005 	/* DPIO */
5006 	err = dpaa2_dpseci_dpio_setup(priv);
5007 	if (err) {
5008 		if (err != -EPROBE_DEFER)
5009 			dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
5010 		goto err_dpio_setup;
5011 	}
5012 
5013 	/* DPSECI binding to DPIO */
5014 	err = dpaa2_dpseci_bind(priv);
5015 	if (err) {
5016 		dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5017 		goto err_bind;
5018 	}
5019 
5020 	/* DPSECI enable */
5021 	err = dpaa2_dpseci_enable(priv);
5022 	if (err) {
5023 		dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5024 		goto err_bind;
5025 	}
5026 
5027 	/* register crypto algorithms the device supports */
5028 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5029 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5030 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5031 
5032 		/* Skip DES algorithms if not supported by device */
5033 		if (!priv->sec_attr.des_acc_num &&
5034 		    (alg_sel == OP_ALG_ALGSEL_3DES ||
5035 		     alg_sel == OP_ALG_ALGSEL_DES))
5036 			continue;
5037 
5038 		/* Skip AES algorithms if not supported by device */
5039 		if (!priv->sec_attr.aes_acc_num &&
5040 		    alg_sel == OP_ALG_ALGSEL_AES)
5041 			continue;
5042 
5043 		/* Skip CHACHA20 algorithms if not supported by device */
5044 		if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5045 		    !priv->sec_attr.ccha_acc_num)
5046 			continue;
5047 
5048 		t_alg->caam.dev = dev;
5049 		caam_skcipher_alg_init(t_alg);
5050 
5051 		err = crypto_register_skcipher(&t_alg->skcipher);
5052 		if (err) {
5053 			dev_warn(dev, "%s alg registration failed: %d\n",
5054 				 t_alg->skcipher.base.cra_driver_name, err);
5055 			continue;
5056 		}
5057 
5058 		t_alg->registered = true;
5059 		registered = true;
5060 	}
5061 
5062 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5063 		struct caam_aead_alg *t_alg = driver_aeads + i;
5064 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5065 				 OP_ALG_ALGSEL_MASK;
5066 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5067 				 OP_ALG_ALGSEL_MASK;
5068 
5069 		/* Skip DES algorithms if not supported by device */
5070 		if (!priv->sec_attr.des_acc_num &&
5071 		    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5072 		     c1_alg_sel == OP_ALG_ALGSEL_DES))
5073 			continue;
5074 
5075 		/* Skip AES algorithms if not supported by device */
5076 		if (!priv->sec_attr.aes_acc_num &&
5077 		    c1_alg_sel == OP_ALG_ALGSEL_AES)
5078 			continue;
5079 
5080 		/* Skip CHACHA20 algorithms if not supported by device */
5081 		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5082 		    !priv->sec_attr.ccha_acc_num)
5083 			continue;
5084 
5085 		/* Skip POLY1305 algorithms if not supported by device */
5086 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5087 		    !priv->sec_attr.ptha_acc_num)
5088 			continue;
5089 
5090 		/*
5091 		 * Skip algorithms requiring message digests
5092 		 * if MD not supported by device.
5093 		 */
5094 		if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5095 		    !priv->sec_attr.md_acc_num)
5096 			continue;
5097 
5098 		t_alg->caam.dev = dev;
5099 		caam_aead_alg_init(t_alg);
5100 
5101 		err = crypto_register_aead(&t_alg->aead);
5102 		if (err) {
5103 			dev_warn(dev, "%s alg registration failed: %d\n",
5104 				 t_alg->aead.base.cra_driver_name, err);
5105 			continue;
5106 		}
5107 
5108 		t_alg->registered = true;
5109 		registered = true;
5110 	}
5111 	if (registered)
5112 		dev_info(dev, "algorithms registered in /proc/crypto\n");
5113 
5114 	/* register hash algorithms the device supports */
5115 	INIT_LIST_HEAD(&hash_list);
5116 
5117 	/*
5118 	 * Skip registration of any hashing algorithms if MD block
5119 	 * is not present.
5120 	 */
5121 	if (!priv->sec_attr.md_acc_num)
5122 		return 0;
5123 
5124 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5125 		struct caam_hash_alg *t_alg;
5126 		struct caam_hash_template *alg = driver_hash + i;
5127 
5128 		/* register hmac version */
5129 		t_alg = caam_hash_alloc(dev, alg, true);
5130 		if (IS_ERR(t_alg)) {
5131 			err = PTR_ERR(t_alg);
5132 			dev_warn(dev, "%s hash alg allocation failed: %d\n",
5133 				 alg->driver_name, err);
5134 			continue;
5135 		}
5136 
5137 		err = crypto_register_ahash(&t_alg->ahash_alg);
5138 		if (err) {
5139 			dev_warn(dev, "%s alg registration failed: %d\n",
5140 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5141 				 err);
5142 			kfree(t_alg);
5143 		} else {
5144 			list_add_tail(&t_alg->entry, &hash_list);
5145 		}
5146 
5147 		/* register unkeyed version */
5148 		t_alg = caam_hash_alloc(dev, alg, false);
5149 		if (IS_ERR(t_alg)) {
5150 			err = PTR_ERR(t_alg);
5151 			dev_warn(dev, "%s alg allocation failed: %d\n",
5152 				 alg->driver_name, err);
5153 			continue;
5154 		}
5155 
5156 		err = crypto_register_ahash(&t_alg->ahash_alg);
5157 		if (err) {
5158 			dev_warn(dev, "%s alg registration failed: %d\n",
5159 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5160 				 err);
5161 			kfree(t_alg);
5162 		} else {
5163 			list_add_tail(&t_alg->entry, &hash_list);
5164 		}
5165 	}
5166 	if (!list_empty(&hash_list))
5167 		dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5168 
5169 	return err;
5170 
5171 err_bind:
5172 	dpaa2_dpseci_dpio_free(priv);
5173 err_dpio_setup:
5174 	dpaa2_dpseci_free(priv);
5175 err_dpseci_setup:
5176 	free_percpu(priv->ppriv);
5177 err_alloc_ppriv:
5178 	fsl_mc_portal_free(priv->mc_io);
5179 err_dma_mask:
5180 	kmem_cache_destroy(qi_cache);
5181 
5182 	return err;
5183 }
5184 
5185 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5186 {
5187 	struct device *dev;
5188 	struct dpaa2_caam_priv *priv;
5189 	int i;
5190 
5191 	dev = &ls_dev->dev;
5192 	priv = dev_get_drvdata(dev);
5193 
5194 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5195 		struct caam_aead_alg *t_alg = driver_aeads + i;
5196 
5197 		if (t_alg->registered)
5198 			crypto_unregister_aead(&t_alg->aead);
5199 	}
5200 
5201 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5202 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5203 
5204 		if (t_alg->registered)
5205 			crypto_unregister_skcipher(&t_alg->skcipher);
5206 	}
5207 
5208 	if (hash_list.next) {
5209 		struct caam_hash_alg *t_hash_alg, *p;
5210 
5211 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5212 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5213 			list_del(&t_hash_alg->entry);
5214 			kfree(t_hash_alg);
5215 		}
5216 	}
5217 
5218 	dpaa2_dpseci_disable(priv);
5219 	dpaa2_dpseci_dpio_free(priv);
5220 	dpaa2_dpseci_free(priv);
5221 	free_percpu(priv->ppriv);
5222 	fsl_mc_portal_free(priv->mc_io);
5223 	kmem_cache_destroy(qi_cache);
5224 
5225 	return 0;
5226 }
5227 
5228 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5229 {
5230 	struct dpaa2_fd fd;
5231 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5232 	int err = 0, i, id;
5233 
5234 	if (IS_ERR(req))
5235 		return PTR_ERR(req);
5236 
5237 	if (priv->cscn_mem) {
5238 		dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5239 					DPAA2_CSCN_SIZE,
5240 					DMA_FROM_DEVICE);
5241 		if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5242 			dev_dbg_ratelimited(dev, "Dropping request\n");
5243 			return -EBUSY;
5244 		}
5245 	}
5246 
5247 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5248 
5249 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5250 					 DMA_BIDIRECTIONAL);
5251 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
5252 		dev_err(dev, "DMA mapping error for QI enqueue request\n");
5253 		goto err_out;
5254 	}
5255 
5256 	memset(&fd, 0, sizeof(fd));
5257 	dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5258 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5259 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5260 	dpaa2_fd_set_flc(&fd, req->flc_dma);
5261 
5262 	/*
5263 	 * There is no guarantee that preemption is disabled here,
5264 	 * thus take action.
5265 	 */
5266 	preempt_disable();
5267 	id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
5268 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5269 		err = dpaa2_io_service_enqueue_fq(NULL,
5270 						  priv->tx_queue_attr[id].fqid,
5271 						  &fd);
5272 		if (err != -EBUSY)
5273 			break;
5274 	}
5275 	preempt_enable();
5276 
5277 	if (unlikely(err)) {
5278 		dev_err(dev, "Error enqueuing frame: %d\n", err);
5279 		goto err_out;
5280 	}
5281 
5282 	return -EINPROGRESS;
5283 
5284 err_out:
5285 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5286 			 DMA_BIDIRECTIONAL);
5287 	return -EIO;
5288 }
5289 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5290 
5291 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5292 	{
5293 		.vendor = FSL_MC_VENDOR_FREESCALE,
5294 		.obj_type = "dpseci",
5295 	},
5296 	{ .vendor = 0x0 }
5297 };
5298 
5299 static struct fsl_mc_driver dpaa2_caam_driver = {
5300 	.driver = {
5301 		.name		= KBUILD_MODNAME,
5302 		.owner		= THIS_MODULE,
5303 	},
5304 	.probe		= dpaa2_caam_probe,
5305 	.remove		= dpaa2_caam_remove,
5306 	.match_id_table = dpaa2_caam_match_id_table
5307 };
5308 
5309 MODULE_LICENSE("Dual BSD/GPL");
5310 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5311 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5312 
5313 module_fsl_mc_driver(dpaa2_caam_driver);
5314