1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2018 NXP
5  */
6 
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include <linux/fsl/mc.h>
19 #include <soc/fsl/dpaa2-io.h>
20 #include <soc/fsl/dpaa2-fd.h>
21 
22 #define CAAM_CRA_PRIORITY	2000
23 
24 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
25 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
26 				 SHA512_DIGEST_SIZE * 2)
27 
28 /*
29  * This is a a cache of buffers, from which the users of CAAM QI driver
30  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
31  * NOTE: A more elegant solution would be to have some headroom in the frames
32  *       being processed. This can be added by the dpaa2-eth driver. This would
33  *       pose a problem for userspace application processing which cannot
34  *       know of this limitation. So for now, this will work.
35  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
36  */
37 static struct kmem_cache *qi_cache;
38 
39 struct caam_alg_entry {
40 	struct device *dev;
41 	int class1_alg_type;
42 	int class2_alg_type;
43 	bool rfc3686;
44 	bool geniv;
45 };
46 
47 struct caam_aead_alg {
48 	struct aead_alg aead;
49 	struct caam_alg_entry caam;
50 	bool registered;
51 };
52 
53 struct caam_skcipher_alg {
54 	struct skcipher_alg skcipher;
55 	struct caam_alg_entry caam;
56 	bool registered;
57 };
58 
59 /**
60  * caam_ctx - per-session context
61  * @flc: Flow Contexts array
62  * @key:  [authentication key], encryption key
63  * @flc_dma: I/O virtual addresses of the Flow Contexts
64  * @key_dma: I/O virtual address of the key
65  * @dir: DMA direction for mapping key and Flow Contexts
66  * @dev: dpseci device
67  * @adata: authentication algorithm details
68  * @cdata: encryption algorithm details
69  * @authsize: authentication tag (a.k.a. ICV / MAC) size
70  */
71 struct caam_ctx {
72 	struct caam_flc flc[NUM_OP];
73 	u8 key[CAAM_MAX_KEY_SIZE];
74 	dma_addr_t flc_dma[NUM_OP];
75 	dma_addr_t key_dma;
76 	enum dma_data_direction dir;
77 	struct device *dev;
78 	struct alginfo adata;
79 	struct alginfo cdata;
80 	unsigned int authsize;
81 };
82 
83 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
84 				     dma_addr_t iova_addr)
85 {
86 	phys_addr_t phys_addr;
87 
88 	phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
89 				   iova_addr;
90 
91 	return phys_to_virt(phys_addr);
92 }
93 
94 /*
95  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
96  *
97  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
98  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
99  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
100  * hosting 16 SG entries.
101  *
102  * @flags - flags that would be used for the equivalent kmalloc(..) call
103  *
104  * Returns a pointer to a retrieved buffer on success or NULL on failure.
105  */
106 static inline void *qi_cache_zalloc(gfp_t flags)
107 {
108 	return kmem_cache_zalloc(qi_cache, flags);
109 }
110 
111 /*
112  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
113  *
114  * @obj - buffer previously allocated by qi_cache_zalloc
115  *
116  * No checking is being done, the call is a passthrough call to
117  * kmem_cache_free(...)
118  */
119 static inline void qi_cache_free(void *obj)
120 {
121 	kmem_cache_free(qi_cache, obj);
122 }
123 
124 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
125 {
126 	switch (crypto_tfm_alg_type(areq->tfm)) {
127 	case CRYPTO_ALG_TYPE_SKCIPHER:
128 		return skcipher_request_ctx(skcipher_request_cast(areq));
129 	case CRYPTO_ALG_TYPE_AEAD:
130 		return aead_request_ctx(container_of(areq, struct aead_request,
131 						     base));
132 	case CRYPTO_ALG_TYPE_AHASH:
133 		return ahash_request_ctx(ahash_request_cast(areq));
134 	default:
135 		return ERR_PTR(-EINVAL);
136 	}
137 }
138 
139 static void caam_unmap(struct device *dev, struct scatterlist *src,
140 		       struct scatterlist *dst, int src_nents,
141 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
142 		       dma_addr_t qm_sg_dma, int qm_sg_bytes)
143 {
144 	if (dst != src) {
145 		if (src_nents)
146 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
147 		if (dst_nents)
148 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
149 	} else {
150 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
151 	}
152 
153 	if (iv_dma)
154 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
155 
156 	if (qm_sg_bytes)
157 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
158 }
159 
160 static int aead_set_sh_desc(struct crypto_aead *aead)
161 {
162 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
163 						 typeof(*alg), aead);
164 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
165 	unsigned int ivsize = crypto_aead_ivsize(aead);
166 	struct device *dev = ctx->dev;
167 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
168 	struct caam_flc *flc;
169 	u32 *desc;
170 	u32 ctx1_iv_off = 0;
171 	u32 *nonce = NULL;
172 	unsigned int data_len[2];
173 	u32 inl_mask;
174 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
175 			       OP_ALG_AAI_CTR_MOD128);
176 	const bool is_rfc3686 = alg->caam.rfc3686;
177 
178 	if (!ctx->cdata.keylen || !ctx->authsize)
179 		return 0;
180 
181 	/*
182 	 * AES-CTR needs to load IV in CONTEXT1 reg
183 	 * at an offset of 128bits (16bytes)
184 	 * CONTEXT1[255:128] = IV
185 	 */
186 	if (ctr_mode)
187 		ctx1_iv_off = 16;
188 
189 	/*
190 	 * RFC3686 specific:
191 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
192 	 */
193 	if (is_rfc3686) {
194 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
195 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
196 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
197 	}
198 
199 	data_len[0] = ctx->adata.keylen_pad;
200 	data_len[1] = ctx->cdata.keylen;
201 
202 	/* aead_encrypt shared descriptor */
203 	if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
204 						 DESC_QI_AEAD_ENC_LEN) +
205 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
206 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
207 			      ARRAY_SIZE(data_len)) < 0)
208 		return -EINVAL;
209 
210 	if (inl_mask & 1)
211 		ctx->adata.key_virt = ctx->key;
212 	else
213 		ctx->adata.key_dma = ctx->key_dma;
214 
215 	if (inl_mask & 2)
216 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
217 	else
218 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
219 
220 	ctx->adata.key_inline = !!(inl_mask & 1);
221 	ctx->cdata.key_inline = !!(inl_mask & 2);
222 
223 	flc = &ctx->flc[ENCRYPT];
224 	desc = flc->sh_desc;
225 
226 	if (alg->caam.geniv)
227 		cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
228 					  ivsize, ctx->authsize, is_rfc3686,
229 					  nonce, ctx1_iv_off, true,
230 					  priv->sec_attr.era);
231 	else
232 		cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
233 				       ivsize, ctx->authsize, is_rfc3686, nonce,
234 				       ctx1_iv_off, true, priv->sec_attr.era);
235 
236 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
237 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
238 				   sizeof(flc->flc) + desc_bytes(desc),
239 				   ctx->dir);
240 
241 	/* aead_decrypt shared descriptor */
242 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
243 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
244 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
245 			      ARRAY_SIZE(data_len)) < 0)
246 		return -EINVAL;
247 
248 	if (inl_mask & 1)
249 		ctx->adata.key_virt = ctx->key;
250 	else
251 		ctx->adata.key_dma = ctx->key_dma;
252 
253 	if (inl_mask & 2)
254 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
255 	else
256 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
257 
258 	ctx->adata.key_inline = !!(inl_mask & 1);
259 	ctx->cdata.key_inline = !!(inl_mask & 2);
260 
261 	flc = &ctx->flc[DECRYPT];
262 	desc = flc->sh_desc;
263 	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
264 			       ivsize, ctx->authsize, alg->caam.geniv,
265 			       is_rfc3686, nonce, ctx1_iv_off, true,
266 			       priv->sec_attr.era);
267 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
268 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
269 				   sizeof(flc->flc) + desc_bytes(desc),
270 				   ctx->dir);
271 
272 	return 0;
273 }
274 
275 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
276 {
277 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
278 
279 	ctx->authsize = authsize;
280 	aead_set_sh_desc(authenc);
281 
282 	return 0;
283 }
284 
285 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
286 		       unsigned int keylen)
287 {
288 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
289 	struct device *dev = ctx->dev;
290 	struct crypto_authenc_keys keys;
291 
292 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
293 		goto badkey;
294 
295 	dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
296 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
297 		keys.authkeylen);
298 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
299 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
300 
301 	ctx->adata.keylen = keys.authkeylen;
302 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
303 					      OP_ALG_ALGSEL_MASK);
304 
305 	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
306 		goto badkey;
307 
308 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
309 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
310 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
311 				   keys.enckeylen, ctx->dir);
312 	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
313 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
314 			     ctx->adata.keylen_pad + keys.enckeylen, 1);
315 
316 	ctx->cdata.keylen = keys.enckeylen;
317 
318 	memzero_explicit(&keys, sizeof(keys));
319 	return aead_set_sh_desc(aead);
320 badkey:
321 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
322 	memzero_explicit(&keys, sizeof(keys));
323 	return -EINVAL;
324 }
325 
326 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
327 					   bool encrypt)
328 {
329 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
330 	struct caam_request *req_ctx = aead_request_ctx(req);
331 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
332 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
333 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
334 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
335 						 typeof(*alg), aead);
336 	struct device *dev = ctx->dev;
337 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
338 		      GFP_KERNEL : GFP_ATOMIC;
339 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
340 	struct aead_edesc *edesc;
341 	dma_addr_t qm_sg_dma, iv_dma = 0;
342 	int ivsize = 0;
343 	unsigned int authsize = ctx->authsize;
344 	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
345 	int in_len, out_len;
346 	struct dpaa2_sg_entry *sg_table;
347 
348 	/* allocate space for base edesc, link tables and IV */
349 	edesc = qi_cache_zalloc(GFP_DMA | flags);
350 	if (unlikely(!edesc)) {
351 		dev_err(dev, "could not allocate extended descriptor\n");
352 		return ERR_PTR(-ENOMEM);
353 	}
354 
355 	if (unlikely(req->dst != req->src)) {
356 		src_nents = sg_nents_for_len(req->src, req->assoclen +
357 					     req->cryptlen);
358 		if (unlikely(src_nents < 0)) {
359 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
360 				req->assoclen + req->cryptlen);
361 			qi_cache_free(edesc);
362 			return ERR_PTR(src_nents);
363 		}
364 
365 		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
366 					     req->cryptlen +
367 					     (encrypt ? authsize :
368 							(-authsize)));
369 		if (unlikely(dst_nents < 0)) {
370 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
371 				req->assoclen + req->cryptlen +
372 				(encrypt ? authsize : (-authsize)));
373 			qi_cache_free(edesc);
374 			return ERR_PTR(dst_nents);
375 		}
376 
377 		if (src_nents) {
378 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
379 						      DMA_TO_DEVICE);
380 			if (unlikely(!mapped_src_nents)) {
381 				dev_err(dev, "unable to map source\n");
382 				qi_cache_free(edesc);
383 				return ERR_PTR(-ENOMEM);
384 			}
385 		} else {
386 			mapped_src_nents = 0;
387 		}
388 
389 		if (dst_nents) {
390 			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
391 						      DMA_FROM_DEVICE);
392 			if (unlikely(!mapped_dst_nents)) {
393 				dev_err(dev, "unable to map destination\n");
394 				dma_unmap_sg(dev, req->src, src_nents,
395 					     DMA_TO_DEVICE);
396 				qi_cache_free(edesc);
397 				return ERR_PTR(-ENOMEM);
398 			}
399 		} else {
400 			mapped_dst_nents = 0;
401 		}
402 	} else {
403 		src_nents = sg_nents_for_len(req->src, req->assoclen +
404 					     req->cryptlen +
405 						(encrypt ? authsize : 0));
406 		if (unlikely(src_nents < 0)) {
407 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
408 				req->assoclen + req->cryptlen +
409 				(encrypt ? authsize : 0));
410 			qi_cache_free(edesc);
411 			return ERR_PTR(src_nents);
412 		}
413 
414 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
415 					      DMA_BIDIRECTIONAL);
416 		if (unlikely(!mapped_src_nents)) {
417 			dev_err(dev, "unable to map source\n");
418 			qi_cache_free(edesc);
419 			return ERR_PTR(-ENOMEM);
420 		}
421 	}
422 
423 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
424 		ivsize = crypto_aead_ivsize(aead);
425 
426 	/*
427 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
428 	 * Input is not contiguous.
429 	 */
430 	qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
431 		      (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
432 	sg_table = &edesc->sgt[0];
433 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
434 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
435 		     CAAM_QI_MEMCACHE_SIZE)) {
436 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
437 			qm_sg_nents, ivsize);
438 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
439 			   0, 0, 0);
440 		qi_cache_free(edesc);
441 		return ERR_PTR(-ENOMEM);
442 	}
443 
444 	if (ivsize) {
445 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
446 
447 		/* Make sure IV is located in a DMAable area */
448 		memcpy(iv, req->iv, ivsize);
449 
450 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
451 		if (dma_mapping_error(dev, iv_dma)) {
452 			dev_err(dev, "unable to map IV\n");
453 			caam_unmap(dev, req->src, req->dst, src_nents,
454 				   dst_nents, 0, 0, 0, 0);
455 			qi_cache_free(edesc);
456 			return ERR_PTR(-ENOMEM);
457 		}
458 	}
459 
460 	edesc->src_nents = src_nents;
461 	edesc->dst_nents = dst_nents;
462 	edesc->iv_dma = iv_dma;
463 
464 	if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
465 	    OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
466 		/*
467 		 * The associated data comes already with the IV but we need
468 		 * to skip it when we authenticate or encrypt...
469 		 */
470 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
471 	else
472 		edesc->assoclen = cpu_to_caam32(req->assoclen);
473 	edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
474 					     DMA_TO_DEVICE);
475 	if (dma_mapping_error(dev, edesc->assoclen_dma)) {
476 		dev_err(dev, "unable to map assoclen\n");
477 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
478 			   iv_dma, ivsize, 0, 0);
479 		qi_cache_free(edesc);
480 		return ERR_PTR(-ENOMEM);
481 	}
482 
483 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
484 	qm_sg_index++;
485 	if (ivsize) {
486 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
487 		qm_sg_index++;
488 	}
489 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
490 	qm_sg_index += mapped_src_nents;
491 
492 	if (mapped_dst_nents > 1)
493 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
494 				 qm_sg_index, 0);
495 
496 	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
497 	if (dma_mapping_error(dev, qm_sg_dma)) {
498 		dev_err(dev, "unable to map S/G table\n");
499 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
500 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
501 			   iv_dma, ivsize, 0, 0);
502 		qi_cache_free(edesc);
503 		return ERR_PTR(-ENOMEM);
504 	}
505 
506 	edesc->qm_sg_dma = qm_sg_dma;
507 	edesc->qm_sg_bytes = qm_sg_bytes;
508 
509 	out_len = req->assoclen + req->cryptlen +
510 		  (encrypt ? ctx->authsize : (-ctx->authsize));
511 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
512 
513 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
514 	dpaa2_fl_set_final(in_fle, true);
515 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
516 	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
517 	dpaa2_fl_set_len(in_fle, in_len);
518 
519 	if (req->dst == req->src) {
520 		if (mapped_src_nents == 1) {
521 			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
522 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
523 		} else {
524 			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
525 			dpaa2_fl_set_addr(out_fle, qm_sg_dma +
526 					  (1 + !!ivsize) * sizeof(*sg_table));
527 		}
528 	} else if (mapped_dst_nents == 1) {
529 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
530 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
531 	} else {
532 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
533 		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
534 				  sizeof(*sg_table));
535 	}
536 
537 	dpaa2_fl_set_len(out_fle, out_len);
538 
539 	return edesc;
540 }
541 
542 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
543 {
544 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
545 	unsigned int ivsize = crypto_aead_ivsize(aead);
546 	struct device *dev = ctx->dev;
547 	struct caam_flc *flc;
548 	u32 *desc;
549 
550 	if (!ctx->cdata.keylen || !ctx->authsize)
551 		return 0;
552 
553 	flc = &ctx->flc[ENCRYPT];
554 	desc = flc->sh_desc;
555 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
556 			       ctx->authsize, true, true);
557 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
558 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
559 				   sizeof(flc->flc) + desc_bytes(desc),
560 				   ctx->dir);
561 
562 	flc = &ctx->flc[DECRYPT];
563 	desc = flc->sh_desc;
564 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
565 			       ctx->authsize, false, true);
566 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
567 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
568 				   sizeof(flc->flc) + desc_bytes(desc),
569 				   ctx->dir);
570 
571 	return 0;
572 }
573 
574 static int chachapoly_setauthsize(struct crypto_aead *aead,
575 				  unsigned int authsize)
576 {
577 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
578 
579 	if (authsize != POLY1305_DIGEST_SIZE)
580 		return -EINVAL;
581 
582 	ctx->authsize = authsize;
583 	return chachapoly_set_sh_desc(aead);
584 }
585 
586 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
587 			     unsigned int keylen)
588 {
589 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
590 	unsigned int ivsize = crypto_aead_ivsize(aead);
591 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
592 
593 	if (keylen != CHACHA_KEY_SIZE + saltlen) {
594 		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
595 		return -EINVAL;
596 	}
597 
598 	ctx->cdata.key_virt = key;
599 	ctx->cdata.keylen = keylen - saltlen;
600 
601 	return chachapoly_set_sh_desc(aead);
602 }
603 
604 static int gcm_set_sh_desc(struct crypto_aead *aead)
605 {
606 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
607 	struct device *dev = ctx->dev;
608 	unsigned int ivsize = crypto_aead_ivsize(aead);
609 	struct caam_flc *flc;
610 	u32 *desc;
611 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
612 			ctx->cdata.keylen;
613 
614 	if (!ctx->cdata.keylen || !ctx->authsize)
615 		return 0;
616 
617 	/*
618 	 * AES GCM encrypt shared descriptor
619 	 * Job Descriptor and Shared Descriptor
620 	 * must fit into the 64-word Descriptor h/w Buffer
621 	 */
622 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
623 		ctx->cdata.key_inline = true;
624 		ctx->cdata.key_virt = ctx->key;
625 	} else {
626 		ctx->cdata.key_inline = false;
627 		ctx->cdata.key_dma = ctx->key_dma;
628 	}
629 
630 	flc = &ctx->flc[ENCRYPT];
631 	desc = flc->sh_desc;
632 	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
633 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
634 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
635 				   sizeof(flc->flc) + desc_bytes(desc),
636 				   ctx->dir);
637 
638 	/*
639 	 * Job Descriptor and Shared Descriptors
640 	 * must all fit into the 64-word Descriptor h/w Buffer
641 	 */
642 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
643 		ctx->cdata.key_inline = true;
644 		ctx->cdata.key_virt = ctx->key;
645 	} else {
646 		ctx->cdata.key_inline = false;
647 		ctx->cdata.key_dma = ctx->key_dma;
648 	}
649 
650 	flc = &ctx->flc[DECRYPT];
651 	desc = flc->sh_desc;
652 	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
653 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
654 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
655 				   sizeof(flc->flc) + desc_bytes(desc),
656 				   ctx->dir);
657 
658 	return 0;
659 }
660 
661 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
662 {
663 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
664 
665 	ctx->authsize = authsize;
666 	gcm_set_sh_desc(authenc);
667 
668 	return 0;
669 }
670 
671 static int gcm_setkey(struct crypto_aead *aead,
672 		      const u8 *key, unsigned int keylen)
673 {
674 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
675 	struct device *dev = ctx->dev;
676 
677 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
678 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
679 
680 	memcpy(ctx->key, key, keylen);
681 	dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
682 	ctx->cdata.keylen = keylen;
683 
684 	return gcm_set_sh_desc(aead);
685 }
686 
687 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
688 {
689 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
690 	struct device *dev = ctx->dev;
691 	unsigned int ivsize = crypto_aead_ivsize(aead);
692 	struct caam_flc *flc;
693 	u32 *desc;
694 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
695 			ctx->cdata.keylen;
696 
697 	if (!ctx->cdata.keylen || !ctx->authsize)
698 		return 0;
699 
700 	ctx->cdata.key_virt = ctx->key;
701 
702 	/*
703 	 * RFC4106 encrypt shared descriptor
704 	 * Job Descriptor and Shared Descriptor
705 	 * must fit into the 64-word Descriptor h/w Buffer
706 	 */
707 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
708 		ctx->cdata.key_inline = true;
709 	} else {
710 		ctx->cdata.key_inline = false;
711 		ctx->cdata.key_dma = ctx->key_dma;
712 	}
713 
714 	flc = &ctx->flc[ENCRYPT];
715 	desc = flc->sh_desc;
716 	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
717 				  true);
718 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
719 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
720 				   sizeof(flc->flc) + desc_bytes(desc),
721 				   ctx->dir);
722 
723 	/*
724 	 * Job Descriptor and Shared Descriptors
725 	 * must all fit into the 64-word Descriptor h/w Buffer
726 	 */
727 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
728 		ctx->cdata.key_inline = true;
729 	} else {
730 		ctx->cdata.key_inline = false;
731 		ctx->cdata.key_dma = ctx->key_dma;
732 	}
733 
734 	flc = &ctx->flc[DECRYPT];
735 	desc = flc->sh_desc;
736 	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
737 				  true);
738 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
739 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
740 				   sizeof(flc->flc) + desc_bytes(desc),
741 				   ctx->dir);
742 
743 	return 0;
744 }
745 
746 static int rfc4106_setauthsize(struct crypto_aead *authenc,
747 			       unsigned int authsize)
748 {
749 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
750 
751 	ctx->authsize = authsize;
752 	rfc4106_set_sh_desc(authenc);
753 
754 	return 0;
755 }
756 
757 static int rfc4106_setkey(struct crypto_aead *aead,
758 			  const u8 *key, unsigned int keylen)
759 {
760 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
761 	struct device *dev = ctx->dev;
762 
763 	if (keylen < 4)
764 		return -EINVAL;
765 
766 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
767 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
768 
769 	memcpy(ctx->key, key, keylen);
770 	/*
771 	 * The last four bytes of the key material are used as the salt value
772 	 * in the nonce. Update the AES key length.
773 	 */
774 	ctx->cdata.keylen = keylen - 4;
775 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
776 				   ctx->dir);
777 
778 	return rfc4106_set_sh_desc(aead);
779 }
780 
781 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
782 {
783 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
784 	struct device *dev = ctx->dev;
785 	unsigned int ivsize = crypto_aead_ivsize(aead);
786 	struct caam_flc *flc;
787 	u32 *desc;
788 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
789 			ctx->cdata.keylen;
790 
791 	if (!ctx->cdata.keylen || !ctx->authsize)
792 		return 0;
793 
794 	ctx->cdata.key_virt = ctx->key;
795 
796 	/*
797 	 * RFC4543 encrypt shared descriptor
798 	 * Job Descriptor and Shared Descriptor
799 	 * must fit into the 64-word Descriptor h/w Buffer
800 	 */
801 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
802 		ctx->cdata.key_inline = true;
803 	} else {
804 		ctx->cdata.key_inline = false;
805 		ctx->cdata.key_dma = ctx->key_dma;
806 	}
807 
808 	flc = &ctx->flc[ENCRYPT];
809 	desc = flc->sh_desc;
810 	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
811 				  true);
812 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
813 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
814 				   sizeof(flc->flc) + desc_bytes(desc),
815 				   ctx->dir);
816 
817 	/*
818 	 * Job Descriptor and Shared Descriptors
819 	 * must all fit into the 64-word Descriptor h/w Buffer
820 	 */
821 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
822 		ctx->cdata.key_inline = true;
823 	} else {
824 		ctx->cdata.key_inline = false;
825 		ctx->cdata.key_dma = ctx->key_dma;
826 	}
827 
828 	flc = &ctx->flc[DECRYPT];
829 	desc = flc->sh_desc;
830 	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
831 				  true);
832 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
833 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
834 				   sizeof(flc->flc) + desc_bytes(desc),
835 				   ctx->dir);
836 
837 	return 0;
838 }
839 
840 static int rfc4543_setauthsize(struct crypto_aead *authenc,
841 			       unsigned int authsize)
842 {
843 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
844 
845 	ctx->authsize = authsize;
846 	rfc4543_set_sh_desc(authenc);
847 
848 	return 0;
849 }
850 
851 static int rfc4543_setkey(struct crypto_aead *aead,
852 			  const u8 *key, unsigned int keylen)
853 {
854 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
855 	struct device *dev = ctx->dev;
856 
857 	if (keylen < 4)
858 		return -EINVAL;
859 
860 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
861 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
862 
863 	memcpy(ctx->key, key, keylen);
864 	/*
865 	 * The last four bytes of the key material are used as the salt value
866 	 * in the nonce. Update the AES key length.
867 	 */
868 	ctx->cdata.keylen = keylen - 4;
869 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
870 				   ctx->dir);
871 
872 	return rfc4543_set_sh_desc(aead);
873 }
874 
875 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
876 			   unsigned int keylen)
877 {
878 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
879 	struct caam_skcipher_alg *alg =
880 		container_of(crypto_skcipher_alg(skcipher),
881 			     struct caam_skcipher_alg, skcipher);
882 	struct device *dev = ctx->dev;
883 	struct caam_flc *flc;
884 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
885 	u32 *desc;
886 	u32 ctx1_iv_off = 0;
887 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
888 			       OP_ALG_AAI_CTR_MOD128) &&
889 			       ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
890 			       OP_ALG_ALGSEL_CHACHA20);
891 	const bool is_rfc3686 = alg->caam.rfc3686;
892 
893 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
894 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
895 
896 	/*
897 	 * AES-CTR needs to load IV in CONTEXT1 reg
898 	 * at an offset of 128bits (16bytes)
899 	 * CONTEXT1[255:128] = IV
900 	 */
901 	if (ctr_mode)
902 		ctx1_iv_off = 16;
903 
904 	/*
905 	 * RFC3686 specific:
906 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
907 	 *	| *key = {KEY, NONCE}
908 	 */
909 	if (is_rfc3686) {
910 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
911 		keylen -= CTR_RFC3686_NONCE_SIZE;
912 	}
913 
914 	ctx->cdata.keylen = keylen;
915 	ctx->cdata.key_virt = key;
916 	ctx->cdata.key_inline = true;
917 
918 	/* skcipher_encrypt shared descriptor */
919 	flc = &ctx->flc[ENCRYPT];
920 	desc = flc->sh_desc;
921 	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
922 				   ctx1_iv_off);
923 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
924 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
925 				   sizeof(flc->flc) + desc_bytes(desc),
926 				   ctx->dir);
927 
928 	/* skcipher_decrypt shared descriptor */
929 	flc = &ctx->flc[DECRYPT];
930 	desc = flc->sh_desc;
931 	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
932 				   ctx1_iv_off);
933 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
934 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
935 				   sizeof(flc->flc) + desc_bytes(desc),
936 				   ctx->dir);
937 
938 	return 0;
939 }
940 
941 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
942 			       unsigned int keylen)
943 {
944 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
945 	struct device *dev = ctx->dev;
946 	struct caam_flc *flc;
947 	u32 *desc;
948 
949 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
950 		dev_err(dev, "key size mismatch\n");
951 		crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
952 		return -EINVAL;
953 	}
954 
955 	ctx->cdata.keylen = keylen;
956 	ctx->cdata.key_virt = key;
957 	ctx->cdata.key_inline = true;
958 
959 	/* xts_skcipher_encrypt shared descriptor */
960 	flc = &ctx->flc[ENCRYPT];
961 	desc = flc->sh_desc;
962 	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
963 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
964 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
965 				   sizeof(flc->flc) + desc_bytes(desc),
966 				   ctx->dir);
967 
968 	/* xts_skcipher_decrypt shared descriptor */
969 	flc = &ctx->flc[DECRYPT];
970 	desc = flc->sh_desc;
971 	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
972 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
973 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
974 				   sizeof(flc->flc) + desc_bytes(desc),
975 				   ctx->dir);
976 
977 	return 0;
978 }
979 
980 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
981 {
982 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
983 	struct caam_request *req_ctx = skcipher_request_ctx(req);
984 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
985 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
986 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
987 	struct device *dev = ctx->dev;
988 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
989 		       GFP_KERNEL : GFP_ATOMIC;
990 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
991 	struct skcipher_edesc *edesc;
992 	dma_addr_t iv_dma;
993 	u8 *iv;
994 	int ivsize = crypto_skcipher_ivsize(skcipher);
995 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
996 	struct dpaa2_sg_entry *sg_table;
997 
998 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
999 	if (unlikely(src_nents < 0)) {
1000 		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1001 			req->cryptlen);
1002 		return ERR_PTR(src_nents);
1003 	}
1004 
1005 	if (unlikely(req->dst != req->src)) {
1006 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1007 		if (unlikely(dst_nents < 0)) {
1008 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1009 				req->cryptlen);
1010 			return ERR_PTR(dst_nents);
1011 		}
1012 
1013 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1014 					      DMA_TO_DEVICE);
1015 		if (unlikely(!mapped_src_nents)) {
1016 			dev_err(dev, "unable to map source\n");
1017 			return ERR_PTR(-ENOMEM);
1018 		}
1019 
1020 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1021 					      DMA_FROM_DEVICE);
1022 		if (unlikely(!mapped_dst_nents)) {
1023 			dev_err(dev, "unable to map destination\n");
1024 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1025 			return ERR_PTR(-ENOMEM);
1026 		}
1027 	} else {
1028 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1029 					      DMA_BIDIRECTIONAL);
1030 		if (unlikely(!mapped_src_nents)) {
1031 			dev_err(dev, "unable to map source\n");
1032 			return ERR_PTR(-ENOMEM);
1033 		}
1034 	}
1035 
1036 	qm_sg_ents = 1 + mapped_src_nents;
1037 	dst_sg_idx = qm_sg_ents;
1038 
1039 	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1040 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1041 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1042 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1043 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1044 			qm_sg_ents, ivsize);
1045 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1046 			   0, 0, 0);
1047 		return ERR_PTR(-ENOMEM);
1048 	}
1049 
1050 	/* allocate space for base edesc, link tables and IV */
1051 	edesc = qi_cache_zalloc(GFP_DMA | flags);
1052 	if (unlikely(!edesc)) {
1053 		dev_err(dev, "could not allocate extended descriptor\n");
1054 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1055 			   0, 0, 0);
1056 		return ERR_PTR(-ENOMEM);
1057 	}
1058 
1059 	/* Make sure IV is located in a DMAable area */
1060 	sg_table = &edesc->sgt[0];
1061 	iv = (u8 *)(sg_table + qm_sg_ents);
1062 	memcpy(iv, req->iv, ivsize);
1063 
1064 	iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1065 	if (dma_mapping_error(dev, iv_dma)) {
1066 		dev_err(dev, "unable to map IV\n");
1067 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1068 			   0, 0, 0);
1069 		qi_cache_free(edesc);
1070 		return ERR_PTR(-ENOMEM);
1071 	}
1072 
1073 	edesc->src_nents = src_nents;
1074 	edesc->dst_nents = dst_nents;
1075 	edesc->iv_dma = iv_dma;
1076 	edesc->qm_sg_bytes = qm_sg_bytes;
1077 
1078 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1079 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1080 
1081 	if (mapped_dst_nents > 1)
1082 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1083 				 dst_sg_idx, 0);
1084 
1085 	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1086 					  DMA_TO_DEVICE);
1087 	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1088 		dev_err(dev, "unable to map S/G table\n");
1089 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1090 			   iv_dma, ivsize, 0, 0);
1091 		qi_cache_free(edesc);
1092 		return ERR_PTR(-ENOMEM);
1093 	}
1094 
1095 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1096 	dpaa2_fl_set_final(in_fle, true);
1097 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1098 	dpaa2_fl_set_len(out_fle, req->cryptlen);
1099 
1100 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1101 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1102 
1103 	if (req->src == req->dst) {
1104 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1105 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1106 				  sizeof(*sg_table));
1107 	} else if (mapped_dst_nents > 1) {
1108 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1109 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1110 				  sizeof(*sg_table));
1111 	} else {
1112 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
1113 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
1114 	}
1115 
1116 	return edesc;
1117 }
1118 
1119 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1120 		       struct aead_request *req)
1121 {
1122 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1123 	int ivsize = crypto_aead_ivsize(aead);
1124 
1125 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1126 		   edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1127 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1128 }
1129 
1130 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1131 			   struct skcipher_request *req)
1132 {
1133 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1134 	int ivsize = crypto_skcipher_ivsize(skcipher);
1135 
1136 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1137 		   edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1138 }
1139 
1140 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1141 {
1142 	struct crypto_async_request *areq = cbk_ctx;
1143 	struct aead_request *req = container_of(areq, struct aead_request,
1144 						base);
1145 	struct caam_request *req_ctx = to_caam_req(areq);
1146 	struct aead_edesc *edesc = req_ctx->edesc;
1147 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1148 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1149 	int ecode = 0;
1150 
1151 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1152 
1153 	if (unlikely(status)) {
1154 		caam_qi2_strstatus(ctx->dev, status);
1155 		ecode = -EIO;
1156 	}
1157 
1158 	aead_unmap(ctx->dev, edesc, req);
1159 	qi_cache_free(edesc);
1160 	aead_request_complete(req, ecode);
1161 }
1162 
1163 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1164 {
1165 	struct crypto_async_request *areq = cbk_ctx;
1166 	struct aead_request *req = container_of(areq, struct aead_request,
1167 						base);
1168 	struct caam_request *req_ctx = to_caam_req(areq);
1169 	struct aead_edesc *edesc = req_ctx->edesc;
1170 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1171 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1172 	int ecode = 0;
1173 
1174 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1175 
1176 	if (unlikely(status)) {
1177 		caam_qi2_strstatus(ctx->dev, status);
1178 		/*
1179 		 * verify hw auth check passed else return -EBADMSG
1180 		 */
1181 		if ((status & JRSTA_CCBERR_ERRID_MASK) ==
1182 		     JRSTA_CCBERR_ERRID_ICVCHK)
1183 			ecode = -EBADMSG;
1184 		else
1185 			ecode = -EIO;
1186 	}
1187 
1188 	aead_unmap(ctx->dev, edesc, req);
1189 	qi_cache_free(edesc);
1190 	aead_request_complete(req, ecode);
1191 }
1192 
1193 static int aead_encrypt(struct aead_request *req)
1194 {
1195 	struct aead_edesc *edesc;
1196 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1197 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1198 	struct caam_request *caam_req = aead_request_ctx(req);
1199 	int ret;
1200 
1201 	/* allocate extended descriptor */
1202 	edesc = aead_edesc_alloc(req, true);
1203 	if (IS_ERR(edesc))
1204 		return PTR_ERR(edesc);
1205 
1206 	caam_req->flc = &ctx->flc[ENCRYPT];
1207 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1208 	caam_req->cbk = aead_encrypt_done;
1209 	caam_req->ctx = &req->base;
1210 	caam_req->edesc = edesc;
1211 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1212 	if (ret != -EINPROGRESS &&
1213 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1214 		aead_unmap(ctx->dev, edesc, req);
1215 		qi_cache_free(edesc);
1216 	}
1217 
1218 	return ret;
1219 }
1220 
1221 static int aead_decrypt(struct aead_request *req)
1222 {
1223 	struct aead_edesc *edesc;
1224 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1225 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1226 	struct caam_request *caam_req = aead_request_ctx(req);
1227 	int ret;
1228 
1229 	/* allocate extended descriptor */
1230 	edesc = aead_edesc_alloc(req, false);
1231 	if (IS_ERR(edesc))
1232 		return PTR_ERR(edesc);
1233 
1234 	caam_req->flc = &ctx->flc[DECRYPT];
1235 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1236 	caam_req->cbk = aead_decrypt_done;
1237 	caam_req->ctx = &req->base;
1238 	caam_req->edesc = edesc;
1239 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1240 	if (ret != -EINPROGRESS &&
1241 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1242 		aead_unmap(ctx->dev, edesc, req);
1243 		qi_cache_free(edesc);
1244 	}
1245 
1246 	return ret;
1247 }
1248 
1249 static int ipsec_gcm_encrypt(struct aead_request *req)
1250 {
1251 	if (req->assoclen < 8)
1252 		return -EINVAL;
1253 
1254 	return aead_encrypt(req);
1255 }
1256 
1257 static int ipsec_gcm_decrypt(struct aead_request *req)
1258 {
1259 	if (req->assoclen < 8)
1260 		return -EINVAL;
1261 
1262 	return aead_decrypt(req);
1263 }
1264 
1265 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1266 {
1267 	struct crypto_async_request *areq = cbk_ctx;
1268 	struct skcipher_request *req = skcipher_request_cast(areq);
1269 	struct caam_request *req_ctx = to_caam_req(areq);
1270 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1271 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1272 	struct skcipher_edesc *edesc = req_ctx->edesc;
1273 	int ecode = 0;
1274 	int ivsize = crypto_skcipher_ivsize(skcipher);
1275 
1276 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1277 
1278 	if (unlikely(status)) {
1279 		caam_qi2_strstatus(ctx->dev, status);
1280 		ecode = -EIO;
1281 	}
1282 
1283 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1284 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1285 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1286 	caam_dump_sg(KERN_DEBUG, "dst    @" __stringify(__LINE__)": ",
1287 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1288 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1289 
1290 	skcipher_unmap(ctx->dev, edesc, req);
1291 
1292 	/*
1293 	 * The crypto API expects us to set the IV (req->iv) to the last
1294 	 * ciphertext block. This is used e.g. by the CTS mode.
1295 	 */
1296 	scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
1297 				 ivsize, 0);
1298 
1299 	qi_cache_free(edesc);
1300 	skcipher_request_complete(req, ecode);
1301 }
1302 
1303 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1304 {
1305 	struct crypto_async_request *areq = cbk_ctx;
1306 	struct skcipher_request *req = skcipher_request_cast(areq);
1307 	struct caam_request *req_ctx = to_caam_req(areq);
1308 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1309 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1310 	struct skcipher_edesc *edesc = req_ctx->edesc;
1311 	int ecode = 0;
1312 	int ivsize = crypto_skcipher_ivsize(skcipher);
1313 
1314 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1315 
1316 	if (unlikely(status)) {
1317 		caam_qi2_strstatus(ctx->dev, status);
1318 		ecode = -EIO;
1319 	}
1320 
1321 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1322 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1323 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1324 	caam_dump_sg(KERN_DEBUG, "dst    @" __stringify(__LINE__)": ",
1325 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1326 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1327 
1328 	skcipher_unmap(ctx->dev, edesc, req);
1329 	qi_cache_free(edesc);
1330 	skcipher_request_complete(req, ecode);
1331 }
1332 
1333 static int skcipher_encrypt(struct skcipher_request *req)
1334 {
1335 	struct skcipher_edesc *edesc;
1336 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1337 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1338 	struct caam_request *caam_req = skcipher_request_ctx(req);
1339 	int ret;
1340 
1341 	/* allocate extended descriptor */
1342 	edesc = skcipher_edesc_alloc(req);
1343 	if (IS_ERR(edesc))
1344 		return PTR_ERR(edesc);
1345 
1346 	caam_req->flc = &ctx->flc[ENCRYPT];
1347 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1348 	caam_req->cbk = skcipher_encrypt_done;
1349 	caam_req->ctx = &req->base;
1350 	caam_req->edesc = edesc;
1351 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1352 	if (ret != -EINPROGRESS &&
1353 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1354 		skcipher_unmap(ctx->dev, edesc, req);
1355 		qi_cache_free(edesc);
1356 	}
1357 
1358 	return ret;
1359 }
1360 
1361 static int skcipher_decrypt(struct skcipher_request *req)
1362 {
1363 	struct skcipher_edesc *edesc;
1364 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1365 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1366 	struct caam_request *caam_req = skcipher_request_ctx(req);
1367 	int ivsize = crypto_skcipher_ivsize(skcipher);
1368 	int ret;
1369 
1370 	/* allocate extended descriptor */
1371 	edesc = skcipher_edesc_alloc(req);
1372 	if (IS_ERR(edesc))
1373 		return PTR_ERR(edesc);
1374 
1375 	/*
1376 	 * The crypto API expects us to set the IV (req->iv) to the last
1377 	 * ciphertext block.
1378 	 */
1379 	scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
1380 				 ivsize, 0);
1381 
1382 	caam_req->flc = &ctx->flc[DECRYPT];
1383 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1384 	caam_req->cbk = skcipher_decrypt_done;
1385 	caam_req->ctx = &req->base;
1386 	caam_req->edesc = edesc;
1387 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1388 	if (ret != -EINPROGRESS &&
1389 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1390 		skcipher_unmap(ctx->dev, edesc, req);
1391 		qi_cache_free(edesc);
1392 	}
1393 
1394 	return ret;
1395 }
1396 
1397 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1398 			 bool uses_dkp)
1399 {
1400 	dma_addr_t dma_addr;
1401 	int i;
1402 
1403 	/* copy descriptor header template value */
1404 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1405 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1406 
1407 	ctx->dev = caam->dev;
1408 	ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1409 
1410 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1411 					offsetof(struct caam_ctx, flc_dma),
1412 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1413 	if (dma_mapping_error(ctx->dev, dma_addr)) {
1414 		dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1415 		return -ENOMEM;
1416 	}
1417 
1418 	for (i = 0; i < NUM_OP; i++)
1419 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1420 	ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1421 
1422 	return 0;
1423 }
1424 
1425 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1426 {
1427 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1428 	struct caam_skcipher_alg *caam_alg =
1429 		container_of(alg, typeof(*caam_alg), skcipher);
1430 
1431 	crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1432 	return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1433 }
1434 
1435 static int caam_cra_init_aead(struct crypto_aead *tfm)
1436 {
1437 	struct aead_alg *alg = crypto_aead_alg(tfm);
1438 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1439 						      aead);
1440 
1441 	crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1442 	return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1443 			     alg->setkey == aead_setkey);
1444 }
1445 
1446 static void caam_exit_common(struct caam_ctx *ctx)
1447 {
1448 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1449 			       offsetof(struct caam_ctx, flc_dma), ctx->dir,
1450 			       DMA_ATTR_SKIP_CPU_SYNC);
1451 }
1452 
1453 static void caam_cra_exit(struct crypto_skcipher *tfm)
1454 {
1455 	caam_exit_common(crypto_skcipher_ctx(tfm));
1456 }
1457 
1458 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1459 {
1460 	caam_exit_common(crypto_aead_ctx(tfm));
1461 }
1462 
1463 static struct caam_skcipher_alg driver_algs[] = {
1464 	{
1465 		.skcipher = {
1466 			.base = {
1467 				.cra_name = "cbc(aes)",
1468 				.cra_driver_name = "cbc-aes-caam-qi2",
1469 				.cra_blocksize = AES_BLOCK_SIZE,
1470 			},
1471 			.setkey = skcipher_setkey,
1472 			.encrypt = skcipher_encrypt,
1473 			.decrypt = skcipher_decrypt,
1474 			.min_keysize = AES_MIN_KEY_SIZE,
1475 			.max_keysize = AES_MAX_KEY_SIZE,
1476 			.ivsize = AES_BLOCK_SIZE,
1477 		},
1478 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1479 	},
1480 	{
1481 		.skcipher = {
1482 			.base = {
1483 				.cra_name = "cbc(des3_ede)",
1484 				.cra_driver_name = "cbc-3des-caam-qi2",
1485 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1486 			},
1487 			.setkey = skcipher_setkey,
1488 			.encrypt = skcipher_encrypt,
1489 			.decrypt = skcipher_decrypt,
1490 			.min_keysize = DES3_EDE_KEY_SIZE,
1491 			.max_keysize = DES3_EDE_KEY_SIZE,
1492 			.ivsize = DES3_EDE_BLOCK_SIZE,
1493 		},
1494 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1495 	},
1496 	{
1497 		.skcipher = {
1498 			.base = {
1499 				.cra_name = "cbc(des)",
1500 				.cra_driver_name = "cbc-des-caam-qi2",
1501 				.cra_blocksize = DES_BLOCK_SIZE,
1502 			},
1503 			.setkey = skcipher_setkey,
1504 			.encrypt = skcipher_encrypt,
1505 			.decrypt = skcipher_decrypt,
1506 			.min_keysize = DES_KEY_SIZE,
1507 			.max_keysize = DES_KEY_SIZE,
1508 			.ivsize = DES_BLOCK_SIZE,
1509 		},
1510 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1511 	},
1512 	{
1513 		.skcipher = {
1514 			.base = {
1515 				.cra_name = "ctr(aes)",
1516 				.cra_driver_name = "ctr-aes-caam-qi2",
1517 				.cra_blocksize = 1,
1518 			},
1519 			.setkey = skcipher_setkey,
1520 			.encrypt = skcipher_encrypt,
1521 			.decrypt = skcipher_decrypt,
1522 			.min_keysize = AES_MIN_KEY_SIZE,
1523 			.max_keysize = AES_MAX_KEY_SIZE,
1524 			.ivsize = AES_BLOCK_SIZE,
1525 			.chunksize = AES_BLOCK_SIZE,
1526 		},
1527 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1528 					OP_ALG_AAI_CTR_MOD128,
1529 	},
1530 	{
1531 		.skcipher = {
1532 			.base = {
1533 				.cra_name = "rfc3686(ctr(aes))",
1534 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1535 				.cra_blocksize = 1,
1536 			},
1537 			.setkey = skcipher_setkey,
1538 			.encrypt = skcipher_encrypt,
1539 			.decrypt = skcipher_decrypt,
1540 			.min_keysize = AES_MIN_KEY_SIZE +
1541 				       CTR_RFC3686_NONCE_SIZE,
1542 			.max_keysize = AES_MAX_KEY_SIZE +
1543 				       CTR_RFC3686_NONCE_SIZE,
1544 			.ivsize = CTR_RFC3686_IV_SIZE,
1545 			.chunksize = AES_BLOCK_SIZE,
1546 		},
1547 		.caam = {
1548 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1549 					   OP_ALG_AAI_CTR_MOD128,
1550 			.rfc3686 = true,
1551 		},
1552 	},
1553 	{
1554 		.skcipher = {
1555 			.base = {
1556 				.cra_name = "xts(aes)",
1557 				.cra_driver_name = "xts-aes-caam-qi2",
1558 				.cra_blocksize = AES_BLOCK_SIZE,
1559 			},
1560 			.setkey = xts_skcipher_setkey,
1561 			.encrypt = skcipher_encrypt,
1562 			.decrypt = skcipher_decrypt,
1563 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1564 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1565 			.ivsize = AES_BLOCK_SIZE,
1566 		},
1567 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1568 	},
1569 	{
1570 		.skcipher = {
1571 			.base = {
1572 				.cra_name = "chacha20",
1573 				.cra_driver_name = "chacha20-caam-qi2",
1574 				.cra_blocksize = 1,
1575 			},
1576 			.setkey = skcipher_setkey,
1577 			.encrypt = skcipher_encrypt,
1578 			.decrypt = skcipher_decrypt,
1579 			.min_keysize = CHACHA_KEY_SIZE,
1580 			.max_keysize = CHACHA_KEY_SIZE,
1581 			.ivsize = CHACHA_IV_SIZE,
1582 		},
1583 		.caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1584 	},
1585 };
1586 
1587 static struct caam_aead_alg driver_aeads[] = {
1588 	{
1589 		.aead = {
1590 			.base = {
1591 				.cra_name = "rfc4106(gcm(aes))",
1592 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1593 				.cra_blocksize = 1,
1594 			},
1595 			.setkey = rfc4106_setkey,
1596 			.setauthsize = rfc4106_setauthsize,
1597 			.encrypt = ipsec_gcm_encrypt,
1598 			.decrypt = ipsec_gcm_decrypt,
1599 			.ivsize = 8,
1600 			.maxauthsize = AES_BLOCK_SIZE,
1601 		},
1602 		.caam = {
1603 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1604 		},
1605 	},
1606 	{
1607 		.aead = {
1608 			.base = {
1609 				.cra_name = "rfc4543(gcm(aes))",
1610 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1611 				.cra_blocksize = 1,
1612 			},
1613 			.setkey = rfc4543_setkey,
1614 			.setauthsize = rfc4543_setauthsize,
1615 			.encrypt = ipsec_gcm_encrypt,
1616 			.decrypt = ipsec_gcm_decrypt,
1617 			.ivsize = 8,
1618 			.maxauthsize = AES_BLOCK_SIZE,
1619 		},
1620 		.caam = {
1621 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1622 		},
1623 	},
1624 	/* Galois Counter Mode */
1625 	{
1626 		.aead = {
1627 			.base = {
1628 				.cra_name = "gcm(aes)",
1629 				.cra_driver_name = "gcm-aes-caam-qi2",
1630 				.cra_blocksize = 1,
1631 			},
1632 			.setkey = gcm_setkey,
1633 			.setauthsize = gcm_setauthsize,
1634 			.encrypt = aead_encrypt,
1635 			.decrypt = aead_decrypt,
1636 			.ivsize = 12,
1637 			.maxauthsize = AES_BLOCK_SIZE,
1638 		},
1639 		.caam = {
1640 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1641 		}
1642 	},
1643 	/* single-pass ipsec_esp descriptor */
1644 	{
1645 		.aead = {
1646 			.base = {
1647 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1648 				.cra_driver_name = "authenc-hmac-md5-"
1649 						   "cbc-aes-caam-qi2",
1650 				.cra_blocksize = AES_BLOCK_SIZE,
1651 			},
1652 			.setkey = aead_setkey,
1653 			.setauthsize = aead_setauthsize,
1654 			.encrypt = aead_encrypt,
1655 			.decrypt = aead_decrypt,
1656 			.ivsize = AES_BLOCK_SIZE,
1657 			.maxauthsize = MD5_DIGEST_SIZE,
1658 		},
1659 		.caam = {
1660 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1661 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1662 					   OP_ALG_AAI_HMAC_PRECOMP,
1663 		}
1664 	},
1665 	{
1666 		.aead = {
1667 			.base = {
1668 				.cra_name = "echainiv(authenc(hmac(md5),"
1669 					    "cbc(aes)))",
1670 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1671 						   "cbc-aes-caam-qi2",
1672 				.cra_blocksize = AES_BLOCK_SIZE,
1673 			},
1674 			.setkey = aead_setkey,
1675 			.setauthsize = aead_setauthsize,
1676 			.encrypt = aead_encrypt,
1677 			.decrypt = aead_decrypt,
1678 			.ivsize = AES_BLOCK_SIZE,
1679 			.maxauthsize = MD5_DIGEST_SIZE,
1680 		},
1681 		.caam = {
1682 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1683 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1684 					   OP_ALG_AAI_HMAC_PRECOMP,
1685 			.geniv = true,
1686 		}
1687 	},
1688 	{
1689 		.aead = {
1690 			.base = {
1691 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1692 				.cra_driver_name = "authenc-hmac-sha1-"
1693 						   "cbc-aes-caam-qi2",
1694 				.cra_blocksize = AES_BLOCK_SIZE,
1695 			},
1696 			.setkey = aead_setkey,
1697 			.setauthsize = aead_setauthsize,
1698 			.encrypt = aead_encrypt,
1699 			.decrypt = aead_decrypt,
1700 			.ivsize = AES_BLOCK_SIZE,
1701 			.maxauthsize = SHA1_DIGEST_SIZE,
1702 		},
1703 		.caam = {
1704 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1705 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1706 					   OP_ALG_AAI_HMAC_PRECOMP,
1707 		}
1708 	},
1709 	{
1710 		.aead = {
1711 			.base = {
1712 				.cra_name = "echainiv(authenc(hmac(sha1),"
1713 					    "cbc(aes)))",
1714 				.cra_driver_name = "echainiv-authenc-"
1715 						   "hmac-sha1-cbc-aes-caam-qi2",
1716 				.cra_blocksize = AES_BLOCK_SIZE,
1717 			},
1718 			.setkey = aead_setkey,
1719 			.setauthsize = aead_setauthsize,
1720 			.encrypt = aead_encrypt,
1721 			.decrypt = aead_decrypt,
1722 			.ivsize = AES_BLOCK_SIZE,
1723 			.maxauthsize = SHA1_DIGEST_SIZE,
1724 		},
1725 		.caam = {
1726 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1727 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1728 					   OP_ALG_AAI_HMAC_PRECOMP,
1729 			.geniv = true,
1730 		},
1731 	},
1732 	{
1733 		.aead = {
1734 			.base = {
1735 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1736 				.cra_driver_name = "authenc-hmac-sha224-"
1737 						   "cbc-aes-caam-qi2",
1738 				.cra_blocksize = AES_BLOCK_SIZE,
1739 			},
1740 			.setkey = aead_setkey,
1741 			.setauthsize = aead_setauthsize,
1742 			.encrypt = aead_encrypt,
1743 			.decrypt = aead_decrypt,
1744 			.ivsize = AES_BLOCK_SIZE,
1745 			.maxauthsize = SHA224_DIGEST_SIZE,
1746 		},
1747 		.caam = {
1748 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1749 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1750 					   OP_ALG_AAI_HMAC_PRECOMP,
1751 		}
1752 	},
1753 	{
1754 		.aead = {
1755 			.base = {
1756 				.cra_name = "echainiv(authenc(hmac(sha224),"
1757 					    "cbc(aes)))",
1758 				.cra_driver_name = "echainiv-authenc-"
1759 						   "hmac-sha224-cbc-aes-caam-qi2",
1760 				.cra_blocksize = AES_BLOCK_SIZE,
1761 			},
1762 			.setkey = aead_setkey,
1763 			.setauthsize = aead_setauthsize,
1764 			.encrypt = aead_encrypt,
1765 			.decrypt = aead_decrypt,
1766 			.ivsize = AES_BLOCK_SIZE,
1767 			.maxauthsize = SHA224_DIGEST_SIZE,
1768 		},
1769 		.caam = {
1770 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1771 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1772 					   OP_ALG_AAI_HMAC_PRECOMP,
1773 			.geniv = true,
1774 		}
1775 	},
1776 	{
1777 		.aead = {
1778 			.base = {
1779 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1780 				.cra_driver_name = "authenc-hmac-sha256-"
1781 						   "cbc-aes-caam-qi2",
1782 				.cra_blocksize = AES_BLOCK_SIZE,
1783 			},
1784 			.setkey = aead_setkey,
1785 			.setauthsize = aead_setauthsize,
1786 			.encrypt = aead_encrypt,
1787 			.decrypt = aead_decrypt,
1788 			.ivsize = AES_BLOCK_SIZE,
1789 			.maxauthsize = SHA256_DIGEST_SIZE,
1790 		},
1791 		.caam = {
1792 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1793 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1794 					   OP_ALG_AAI_HMAC_PRECOMP,
1795 		}
1796 	},
1797 	{
1798 		.aead = {
1799 			.base = {
1800 				.cra_name = "echainiv(authenc(hmac(sha256),"
1801 					    "cbc(aes)))",
1802 				.cra_driver_name = "echainiv-authenc-"
1803 						   "hmac-sha256-cbc-aes-"
1804 						   "caam-qi2",
1805 				.cra_blocksize = AES_BLOCK_SIZE,
1806 			},
1807 			.setkey = aead_setkey,
1808 			.setauthsize = aead_setauthsize,
1809 			.encrypt = aead_encrypt,
1810 			.decrypt = aead_decrypt,
1811 			.ivsize = AES_BLOCK_SIZE,
1812 			.maxauthsize = SHA256_DIGEST_SIZE,
1813 		},
1814 		.caam = {
1815 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1816 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1817 					   OP_ALG_AAI_HMAC_PRECOMP,
1818 			.geniv = true,
1819 		}
1820 	},
1821 	{
1822 		.aead = {
1823 			.base = {
1824 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
1825 				.cra_driver_name = "authenc-hmac-sha384-"
1826 						   "cbc-aes-caam-qi2",
1827 				.cra_blocksize = AES_BLOCK_SIZE,
1828 			},
1829 			.setkey = aead_setkey,
1830 			.setauthsize = aead_setauthsize,
1831 			.encrypt = aead_encrypt,
1832 			.decrypt = aead_decrypt,
1833 			.ivsize = AES_BLOCK_SIZE,
1834 			.maxauthsize = SHA384_DIGEST_SIZE,
1835 		},
1836 		.caam = {
1837 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1838 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1839 					   OP_ALG_AAI_HMAC_PRECOMP,
1840 		}
1841 	},
1842 	{
1843 		.aead = {
1844 			.base = {
1845 				.cra_name = "echainiv(authenc(hmac(sha384),"
1846 					    "cbc(aes)))",
1847 				.cra_driver_name = "echainiv-authenc-"
1848 						   "hmac-sha384-cbc-aes-"
1849 						   "caam-qi2",
1850 				.cra_blocksize = AES_BLOCK_SIZE,
1851 			},
1852 			.setkey = aead_setkey,
1853 			.setauthsize = aead_setauthsize,
1854 			.encrypt = aead_encrypt,
1855 			.decrypt = aead_decrypt,
1856 			.ivsize = AES_BLOCK_SIZE,
1857 			.maxauthsize = SHA384_DIGEST_SIZE,
1858 		},
1859 		.caam = {
1860 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1861 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1862 					   OP_ALG_AAI_HMAC_PRECOMP,
1863 			.geniv = true,
1864 		}
1865 	},
1866 	{
1867 		.aead = {
1868 			.base = {
1869 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
1870 				.cra_driver_name = "authenc-hmac-sha512-"
1871 						   "cbc-aes-caam-qi2",
1872 				.cra_blocksize = AES_BLOCK_SIZE,
1873 			},
1874 			.setkey = aead_setkey,
1875 			.setauthsize = aead_setauthsize,
1876 			.encrypt = aead_encrypt,
1877 			.decrypt = aead_decrypt,
1878 			.ivsize = AES_BLOCK_SIZE,
1879 			.maxauthsize = SHA512_DIGEST_SIZE,
1880 		},
1881 		.caam = {
1882 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1883 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1884 					   OP_ALG_AAI_HMAC_PRECOMP,
1885 		}
1886 	},
1887 	{
1888 		.aead = {
1889 			.base = {
1890 				.cra_name = "echainiv(authenc(hmac(sha512),"
1891 					    "cbc(aes)))",
1892 				.cra_driver_name = "echainiv-authenc-"
1893 						   "hmac-sha512-cbc-aes-"
1894 						   "caam-qi2",
1895 				.cra_blocksize = AES_BLOCK_SIZE,
1896 			},
1897 			.setkey = aead_setkey,
1898 			.setauthsize = aead_setauthsize,
1899 			.encrypt = aead_encrypt,
1900 			.decrypt = aead_decrypt,
1901 			.ivsize = AES_BLOCK_SIZE,
1902 			.maxauthsize = SHA512_DIGEST_SIZE,
1903 		},
1904 		.caam = {
1905 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1906 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1907 					   OP_ALG_AAI_HMAC_PRECOMP,
1908 			.geniv = true,
1909 		}
1910 	},
1911 	{
1912 		.aead = {
1913 			.base = {
1914 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1915 				.cra_driver_name = "authenc-hmac-md5-"
1916 						   "cbc-des3_ede-caam-qi2",
1917 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1918 			},
1919 			.setkey = aead_setkey,
1920 			.setauthsize = aead_setauthsize,
1921 			.encrypt = aead_encrypt,
1922 			.decrypt = aead_decrypt,
1923 			.ivsize = DES3_EDE_BLOCK_SIZE,
1924 			.maxauthsize = MD5_DIGEST_SIZE,
1925 		},
1926 		.caam = {
1927 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1928 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1929 					   OP_ALG_AAI_HMAC_PRECOMP,
1930 		}
1931 	},
1932 	{
1933 		.aead = {
1934 			.base = {
1935 				.cra_name = "echainiv(authenc(hmac(md5),"
1936 					    "cbc(des3_ede)))",
1937 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1938 						   "cbc-des3_ede-caam-qi2",
1939 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1940 			},
1941 			.setkey = aead_setkey,
1942 			.setauthsize = aead_setauthsize,
1943 			.encrypt = aead_encrypt,
1944 			.decrypt = aead_decrypt,
1945 			.ivsize = DES3_EDE_BLOCK_SIZE,
1946 			.maxauthsize = MD5_DIGEST_SIZE,
1947 		},
1948 		.caam = {
1949 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1950 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1951 					   OP_ALG_AAI_HMAC_PRECOMP,
1952 			.geniv = true,
1953 		}
1954 	},
1955 	{
1956 		.aead = {
1957 			.base = {
1958 				.cra_name = "authenc(hmac(sha1),"
1959 					    "cbc(des3_ede))",
1960 				.cra_driver_name = "authenc-hmac-sha1-"
1961 						   "cbc-des3_ede-caam-qi2",
1962 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1963 			},
1964 			.setkey = aead_setkey,
1965 			.setauthsize = aead_setauthsize,
1966 			.encrypt = aead_encrypt,
1967 			.decrypt = aead_decrypt,
1968 			.ivsize = DES3_EDE_BLOCK_SIZE,
1969 			.maxauthsize = SHA1_DIGEST_SIZE,
1970 		},
1971 		.caam = {
1972 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1973 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1974 					   OP_ALG_AAI_HMAC_PRECOMP,
1975 		},
1976 	},
1977 	{
1978 		.aead = {
1979 			.base = {
1980 				.cra_name = "echainiv(authenc(hmac(sha1),"
1981 					    "cbc(des3_ede)))",
1982 				.cra_driver_name = "echainiv-authenc-"
1983 						   "hmac-sha1-"
1984 						   "cbc-des3_ede-caam-qi2",
1985 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1986 			},
1987 			.setkey = aead_setkey,
1988 			.setauthsize = aead_setauthsize,
1989 			.encrypt = aead_encrypt,
1990 			.decrypt = aead_decrypt,
1991 			.ivsize = DES3_EDE_BLOCK_SIZE,
1992 			.maxauthsize = SHA1_DIGEST_SIZE,
1993 		},
1994 		.caam = {
1995 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1996 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1997 					   OP_ALG_AAI_HMAC_PRECOMP,
1998 			.geniv = true,
1999 		}
2000 	},
2001 	{
2002 		.aead = {
2003 			.base = {
2004 				.cra_name = "authenc(hmac(sha224),"
2005 					    "cbc(des3_ede))",
2006 				.cra_driver_name = "authenc-hmac-sha224-"
2007 						   "cbc-des3_ede-caam-qi2",
2008 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2009 			},
2010 			.setkey = aead_setkey,
2011 			.setauthsize = aead_setauthsize,
2012 			.encrypt = aead_encrypt,
2013 			.decrypt = aead_decrypt,
2014 			.ivsize = DES3_EDE_BLOCK_SIZE,
2015 			.maxauthsize = SHA224_DIGEST_SIZE,
2016 		},
2017 		.caam = {
2018 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2019 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2020 					   OP_ALG_AAI_HMAC_PRECOMP,
2021 		},
2022 	},
2023 	{
2024 		.aead = {
2025 			.base = {
2026 				.cra_name = "echainiv(authenc(hmac(sha224),"
2027 					    "cbc(des3_ede)))",
2028 				.cra_driver_name = "echainiv-authenc-"
2029 						   "hmac-sha224-"
2030 						   "cbc-des3_ede-caam-qi2",
2031 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2032 			},
2033 			.setkey = aead_setkey,
2034 			.setauthsize = aead_setauthsize,
2035 			.encrypt = aead_encrypt,
2036 			.decrypt = aead_decrypt,
2037 			.ivsize = DES3_EDE_BLOCK_SIZE,
2038 			.maxauthsize = SHA224_DIGEST_SIZE,
2039 		},
2040 		.caam = {
2041 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2042 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2043 					   OP_ALG_AAI_HMAC_PRECOMP,
2044 			.geniv = true,
2045 		}
2046 	},
2047 	{
2048 		.aead = {
2049 			.base = {
2050 				.cra_name = "authenc(hmac(sha256),"
2051 					    "cbc(des3_ede))",
2052 				.cra_driver_name = "authenc-hmac-sha256-"
2053 						   "cbc-des3_ede-caam-qi2",
2054 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2055 			},
2056 			.setkey = aead_setkey,
2057 			.setauthsize = aead_setauthsize,
2058 			.encrypt = aead_encrypt,
2059 			.decrypt = aead_decrypt,
2060 			.ivsize = DES3_EDE_BLOCK_SIZE,
2061 			.maxauthsize = SHA256_DIGEST_SIZE,
2062 		},
2063 		.caam = {
2064 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2065 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2066 					   OP_ALG_AAI_HMAC_PRECOMP,
2067 		},
2068 	},
2069 	{
2070 		.aead = {
2071 			.base = {
2072 				.cra_name = "echainiv(authenc(hmac(sha256),"
2073 					    "cbc(des3_ede)))",
2074 				.cra_driver_name = "echainiv-authenc-"
2075 						   "hmac-sha256-"
2076 						   "cbc-des3_ede-caam-qi2",
2077 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2078 			},
2079 			.setkey = aead_setkey,
2080 			.setauthsize = aead_setauthsize,
2081 			.encrypt = aead_encrypt,
2082 			.decrypt = aead_decrypt,
2083 			.ivsize = DES3_EDE_BLOCK_SIZE,
2084 			.maxauthsize = SHA256_DIGEST_SIZE,
2085 		},
2086 		.caam = {
2087 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2088 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2089 					   OP_ALG_AAI_HMAC_PRECOMP,
2090 			.geniv = true,
2091 		}
2092 	},
2093 	{
2094 		.aead = {
2095 			.base = {
2096 				.cra_name = "authenc(hmac(sha384),"
2097 					    "cbc(des3_ede))",
2098 				.cra_driver_name = "authenc-hmac-sha384-"
2099 						   "cbc-des3_ede-caam-qi2",
2100 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2101 			},
2102 			.setkey = aead_setkey,
2103 			.setauthsize = aead_setauthsize,
2104 			.encrypt = aead_encrypt,
2105 			.decrypt = aead_decrypt,
2106 			.ivsize = DES3_EDE_BLOCK_SIZE,
2107 			.maxauthsize = SHA384_DIGEST_SIZE,
2108 		},
2109 		.caam = {
2110 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2111 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2112 					   OP_ALG_AAI_HMAC_PRECOMP,
2113 		},
2114 	},
2115 	{
2116 		.aead = {
2117 			.base = {
2118 				.cra_name = "echainiv(authenc(hmac(sha384),"
2119 					    "cbc(des3_ede)))",
2120 				.cra_driver_name = "echainiv-authenc-"
2121 						   "hmac-sha384-"
2122 						   "cbc-des3_ede-caam-qi2",
2123 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2124 			},
2125 			.setkey = aead_setkey,
2126 			.setauthsize = aead_setauthsize,
2127 			.encrypt = aead_encrypt,
2128 			.decrypt = aead_decrypt,
2129 			.ivsize = DES3_EDE_BLOCK_SIZE,
2130 			.maxauthsize = SHA384_DIGEST_SIZE,
2131 		},
2132 		.caam = {
2133 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2134 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2135 					   OP_ALG_AAI_HMAC_PRECOMP,
2136 			.geniv = true,
2137 		}
2138 	},
2139 	{
2140 		.aead = {
2141 			.base = {
2142 				.cra_name = "authenc(hmac(sha512),"
2143 					    "cbc(des3_ede))",
2144 				.cra_driver_name = "authenc-hmac-sha512-"
2145 						   "cbc-des3_ede-caam-qi2",
2146 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2147 			},
2148 			.setkey = aead_setkey,
2149 			.setauthsize = aead_setauthsize,
2150 			.encrypt = aead_encrypt,
2151 			.decrypt = aead_decrypt,
2152 			.ivsize = DES3_EDE_BLOCK_SIZE,
2153 			.maxauthsize = SHA512_DIGEST_SIZE,
2154 		},
2155 		.caam = {
2156 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2157 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2158 					   OP_ALG_AAI_HMAC_PRECOMP,
2159 		},
2160 	},
2161 	{
2162 		.aead = {
2163 			.base = {
2164 				.cra_name = "echainiv(authenc(hmac(sha512),"
2165 					    "cbc(des3_ede)))",
2166 				.cra_driver_name = "echainiv-authenc-"
2167 						   "hmac-sha512-"
2168 						   "cbc-des3_ede-caam-qi2",
2169 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2170 			},
2171 			.setkey = aead_setkey,
2172 			.setauthsize = aead_setauthsize,
2173 			.encrypt = aead_encrypt,
2174 			.decrypt = aead_decrypt,
2175 			.ivsize = DES3_EDE_BLOCK_SIZE,
2176 			.maxauthsize = SHA512_DIGEST_SIZE,
2177 		},
2178 		.caam = {
2179 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2180 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2181 					   OP_ALG_AAI_HMAC_PRECOMP,
2182 			.geniv = true,
2183 		}
2184 	},
2185 	{
2186 		.aead = {
2187 			.base = {
2188 				.cra_name = "authenc(hmac(md5),cbc(des))",
2189 				.cra_driver_name = "authenc-hmac-md5-"
2190 						   "cbc-des-caam-qi2",
2191 				.cra_blocksize = DES_BLOCK_SIZE,
2192 			},
2193 			.setkey = aead_setkey,
2194 			.setauthsize = aead_setauthsize,
2195 			.encrypt = aead_encrypt,
2196 			.decrypt = aead_decrypt,
2197 			.ivsize = DES_BLOCK_SIZE,
2198 			.maxauthsize = MD5_DIGEST_SIZE,
2199 		},
2200 		.caam = {
2201 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2202 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2203 					   OP_ALG_AAI_HMAC_PRECOMP,
2204 		},
2205 	},
2206 	{
2207 		.aead = {
2208 			.base = {
2209 				.cra_name = "echainiv(authenc(hmac(md5),"
2210 					    "cbc(des)))",
2211 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2212 						   "cbc-des-caam-qi2",
2213 				.cra_blocksize = DES_BLOCK_SIZE,
2214 			},
2215 			.setkey = aead_setkey,
2216 			.setauthsize = aead_setauthsize,
2217 			.encrypt = aead_encrypt,
2218 			.decrypt = aead_decrypt,
2219 			.ivsize = DES_BLOCK_SIZE,
2220 			.maxauthsize = MD5_DIGEST_SIZE,
2221 		},
2222 		.caam = {
2223 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2224 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2225 					   OP_ALG_AAI_HMAC_PRECOMP,
2226 			.geniv = true,
2227 		}
2228 	},
2229 	{
2230 		.aead = {
2231 			.base = {
2232 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2233 				.cra_driver_name = "authenc-hmac-sha1-"
2234 						   "cbc-des-caam-qi2",
2235 				.cra_blocksize = DES_BLOCK_SIZE,
2236 			},
2237 			.setkey = aead_setkey,
2238 			.setauthsize = aead_setauthsize,
2239 			.encrypt = aead_encrypt,
2240 			.decrypt = aead_decrypt,
2241 			.ivsize = DES_BLOCK_SIZE,
2242 			.maxauthsize = SHA1_DIGEST_SIZE,
2243 		},
2244 		.caam = {
2245 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2246 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2247 					   OP_ALG_AAI_HMAC_PRECOMP,
2248 		},
2249 	},
2250 	{
2251 		.aead = {
2252 			.base = {
2253 				.cra_name = "echainiv(authenc(hmac(sha1),"
2254 					    "cbc(des)))",
2255 				.cra_driver_name = "echainiv-authenc-"
2256 						   "hmac-sha1-cbc-des-caam-qi2",
2257 				.cra_blocksize = DES_BLOCK_SIZE,
2258 			},
2259 			.setkey = aead_setkey,
2260 			.setauthsize = aead_setauthsize,
2261 			.encrypt = aead_encrypt,
2262 			.decrypt = aead_decrypt,
2263 			.ivsize = DES_BLOCK_SIZE,
2264 			.maxauthsize = SHA1_DIGEST_SIZE,
2265 		},
2266 		.caam = {
2267 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2268 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2269 					   OP_ALG_AAI_HMAC_PRECOMP,
2270 			.geniv = true,
2271 		}
2272 	},
2273 	{
2274 		.aead = {
2275 			.base = {
2276 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2277 				.cra_driver_name = "authenc-hmac-sha224-"
2278 						   "cbc-des-caam-qi2",
2279 				.cra_blocksize = DES_BLOCK_SIZE,
2280 			},
2281 			.setkey = aead_setkey,
2282 			.setauthsize = aead_setauthsize,
2283 			.encrypt = aead_encrypt,
2284 			.decrypt = aead_decrypt,
2285 			.ivsize = DES_BLOCK_SIZE,
2286 			.maxauthsize = SHA224_DIGEST_SIZE,
2287 		},
2288 		.caam = {
2289 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2290 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2291 					   OP_ALG_AAI_HMAC_PRECOMP,
2292 		},
2293 	},
2294 	{
2295 		.aead = {
2296 			.base = {
2297 				.cra_name = "echainiv(authenc(hmac(sha224),"
2298 					    "cbc(des)))",
2299 				.cra_driver_name = "echainiv-authenc-"
2300 						   "hmac-sha224-cbc-des-"
2301 						   "caam-qi2",
2302 				.cra_blocksize = DES_BLOCK_SIZE,
2303 			},
2304 			.setkey = aead_setkey,
2305 			.setauthsize = aead_setauthsize,
2306 			.encrypt = aead_encrypt,
2307 			.decrypt = aead_decrypt,
2308 			.ivsize = DES_BLOCK_SIZE,
2309 			.maxauthsize = SHA224_DIGEST_SIZE,
2310 		},
2311 		.caam = {
2312 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2313 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2314 					   OP_ALG_AAI_HMAC_PRECOMP,
2315 			.geniv = true,
2316 		}
2317 	},
2318 	{
2319 		.aead = {
2320 			.base = {
2321 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2322 				.cra_driver_name = "authenc-hmac-sha256-"
2323 						   "cbc-des-caam-qi2",
2324 				.cra_blocksize = DES_BLOCK_SIZE,
2325 			},
2326 			.setkey = aead_setkey,
2327 			.setauthsize = aead_setauthsize,
2328 			.encrypt = aead_encrypt,
2329 			.decrypt = aead_decrypt,
2330 			.ivsize = DES_BLOCK_SIZE,
2331 			.maxauthsize = SHA256_DIGEST_SIZE,
2332 		},
2333 		.caam = {
2334 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2335 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2336 					   OP_ALG_AAI_HMAC_PRECOMP,
2337 		},
2338 	},
2339 	{
2340 		.aead = {
2341 			.base = {
2342 				.cra_name = "echainiv(authenc(hmac(sha256),"
2343 					    "cbc(des)))",
2344 				.cra_driver_name = "echainiv-authenc-"
2345 						   "hmac-sha256-cbc-desi-"
2346 						   "caam-qi2",
2347 				.cra_blocksize = DES_BLOCK_SIZE,
2348 			},
2349 			.setkey = aead_setkey,
2350 			.setauthsize = aead_setauthsize,
2351 			.encrypt = aead_encrypt,
2352 			.decrypt = aead_decrypt,
2353 			.ivsize = DES_BLOCK_SIZE,
2354 			.maxauthsize = SHA256_DIGEST_SIZE,
2355 		},
2356 		.caam = {
2357 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2358 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2359 					   OP_ALG_AAI_HMAC_PRECOMP,
2360 			.geniv = true,
2361 		},
2362 	},
2363 	{
2364 		.aead = {
2365 			.base = {
2366 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2367 				.cra_driver_name = "authenc-hmac-sha384-"
2368 						   "cbc-des-caam-qi2",
2369 				.cra_blocksize = DES_BLOCK_SIZE,
2370 			},
2371 			.setkey = aead_setkey,
2372 			.setauthsize = aead_setauthsize,
2373 			.encrypt = aead_encrypt,
2374 			.decrypt = aead_decrypt,
2375 			.ivsize = DES_BLOCK_SIZE,
2376 			.maxauthsize = SHA384_DIGEST_SIZE,
2377 		},
2378 		.caam = {
2379 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2380 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2381 					   OP_ALG_AAI_HMAC_PRECOMP,
2382 		},
2383 	},
2384 	{
2385 		.aead = {
2386 			.base = {
2387 				.cra_name = "echainiv(authenc(hmac(sha384),"
2388 					    "cbc(des)))",
2389 				.cra_driver_name = "echainiv-authenc-"
2390 						   "hmac-sha384-cbc-des-"
2391 						   "caam-qi2",
2392 				.cra_blocksize = DES_BLOCK_SIZE,
2393 			},
2394 			.setkey = aead_setkey,
2395 			.setauthsize = aead_setauthsize,
2396 			.encrypt = aead_encrypt,
2397 			.decrypt = aead_decrypt,
2398 			.ivsize = DES_BLOCK_SIZE,
2399 			.maxauthsize = SHA384_DIGEST_SIZE,
2400 		},
2401 		.caam = {
2402 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2403 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2404 					   OP_ALG_AAI_HMAC_PRECOMP,
2405 			.geniv = true,
2406 		}
2407 	},
2408 	{
2409 		.aead = {
2410 			.base = {
2411 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2412 				.cra_driver_name = "authenc-hmac-sha512-"
2413 						   "cbc-des-caam-qi2",
2414 				.cra_blocksize = DES_BLOCK_SIZE,
2415 			},
2416 			.setkey = aead_setkey,
2417 			.setauthsize = aead_setauthsize,
2418 			.encrypt = aead_encrypt,
2419 			.decrypt = aead_decrypt,
2420 			.ivsize = DES_BLOCK_SIZE,
2421 			.maxauthsize = SHA512_DIGEST_SIZE,
2422 		},
2423 		.caam = {
2424 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2425 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2426 					   OP_ALG_AAI_HMAC_PRECOMP,
2427 		}
2428 	},
2429 	{
2430 		.aead = {
2431 			.base = {
2432 				.cra_name = "echainiv(authenc(hmac(sha512),"
2433 					    "cbc(des)))",
2434 				.cra_driver_name = "echainiv-authenc-"
2435 						   "hmac-sha512-cbc-des-"
2436 						   "caam-qi2",
2437 				.cra_blocksize = DES_BLOCK_SIZE,
2438 			},
2439 			.setkey = aead_setkey,
2440 			.setauthsize = aead_setauthsize,
2441 			.encrypt = aead_encrypt,
2442 			.decrypt = aead_decrypt,
2443 			.ivsize = DES_BLOCK_SIZE,
2444 			.maxauthsize = SHA512_DIGEST_SIZE,
2445 		},
2446 		.caam = {
2447 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2448 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2449 					   OP_ALG_AAI_HMAC_PRECOMP,
2450 			.geniv = true,
2451 		}
2452 	},
2453 	{
2454 		.aead = {
2455 			.base = {
2456 				.cra_name = "authenc(hmac(md5),"
2457 					    "rfc3686(ctr(aes)))",
2458 				.cra_driver_name = "authenc-hmac-md5-"
2459 						   "rfc3686-ctr-aes-caam-qi2",
2460 				.cra_blocksize = 1,
2461 			},
2462 			.setkey = aead_setkey,
2463 			.setauthsize = aead_setauthsize,
2464 			.encrypt = aead_encrypt,
2465 			.decrypt = aead_decrypt,
2466 			.ivsize = CTR_RFC3686_IV_SIZE,
2467 			.maxauthsize = MD5_DIGEST_SIZE,
2468 		},
2469 		.caam = {
2470 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2471 					   OP_ALG_AAI_CTR_MOD128,
2472 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2473 					   OP_ALG_AAI_HMAC_PRECOMP,
2474 			.rfc3686 = true,
2475 		},
2476 	},
2477 	{
2478 		.aead = {
2479 			.base = {
2480 				.cra_name = "seqiv(authenc("
2481 					    "hmac(md5),rfc3686(ctr(aes))))",
2482 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
2483 						   "rfc3686-ctr-aes-caam-qi2",
2484 				.cra_blocksize = 1,
2485 			},
2486 			.setkey = aead_setkey,
2487 			.setauthsize = aead_setauthsize,
2488 			.encrypt = aead_encrypt,
2489 			.decrypt = aead_decrypt,
2490 			.ivsize = CTR_RFC3686_IV_SIZE,
2491 			.maxauthsize = MD5_DIGEST_SIZE,
2492 		},
2493 		.caam = {
2494 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2495 					   OP_ALG_AAI_CTR_MOD128,
2496 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2497 					   OP_ALG_AAI_HMAC_PRECOMP,
2498 			.rfc3686 = true,
2499 			.geniv = true,
2500 		},
2501 	},
2502 	{
2503 		.aead = {
2504 			.base = {
2505 				.cra_name = "authenc(hmac(sha1),"
2506 					    "rfc3686(ctr(aes)))",
2507 				.cra_driver_name = "authenc-hmac-sha1-"
2508 						   "rfc3686-ctr-aes-caam-qi2",
2509 				.cra_blocksize = 1,
2510 			},
2511 			.setkey = aead_setkey,
2512 			.setauthsize = aead_setauthsize,
2513 			.encrypt = aead_encrypt,
2514 			.decrypt = aead_decrypt,
2515 			.ivsize = CTR_RFC3686_IV_SIZE,
2516 			.maxauthsize = SHA1_DIGEST_SIZE,
2517 		},
2518 		.caam = {
2519 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2520 					   OP_ALG_AAI_CTR_MOD128,
2521 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2522 					   OP_ALG_AAI_HMAC_PRECOMP,
2523 			.rfc3686 = true,
2524 		},
2525 	},
2526 	{
2527 		.aead = {
2528 			.base = {
2529 				.cra_name = "seqiv(authenc("
2530 					    "hmac(sha1),rfc3686(ctr(aes))))",
2531 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
2532 						   "rfc3686-ctr-aes-caam-qi2",
2533 				.cra_blocksize = 1,
2534 			},
2535 			.setkey = aead_setkey,
2536 			.setauthsize = aead_setauthsize,
2537 			.encrypt = aead_encrypt,
2538 			.decrypt = aead_decrypt,
2539 			.ivsize = CTR_RFC3686_IV_SIZE,
2540 			.maxauthsize = SHA1_DIGEST_SIZE,
2541 		},
2542 		.caam = {
2543 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2544 					   OP_ALG_AAI_CTR_MOD128,
2545 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2546 					   OP_ALG_AAI_HMAC_PRECOMP,
2547 			.rfc3686 = true,
2548 			.geniv = true,
2549 		},
2550 	},
2551 	{
2552 		.aead = {
2553 			.base = {
2554 				.cra_name = "authenc(hmac(sha224),"
2555 					    "rfc3686(ctr(aes)))",
2556 				.cra_driver_name = "authenc-hmac-sha224-"
2557 						   "rfc3686-ctr-aes-caam-qi2",
2558 				.cra_blocksize = 1,
2559 			},
2560 			.setkey = aead_setkey,
2561 			.setauthsize = aead_setauthsize,
2562 			.encrypt = aead_encrypt,
2563 			.decrypt = aead_decrypt,
2564 			.ivsize = CTR_RFC3686_IV_SIZE,
2565 			.maxauthsize = SHA224_DIGEST_SIZE,
2566 		},
2567 		.caam = {
2568 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2569 					   OP_ALG_AAI_CTR_MOD128,
2570 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2571 					   OP_ALG_AAI_HMAC_PRECOMP,
2572 			.rfc3686 = true,
2573 		},
2574 	},
2575 	{
2576 		.aead = {
2577 			.base = {
2578 				.cra_name = "seqiv(authenc("
2579 					    "hmac(sha224),rfc3686(ctr(aes))))",
2580 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
2581 						   "rfc3686-ctr-aes-caam-qi2",
2582 				.cra_blocksize = 1,
2583 			},
2584 			.setkey = aead_setkey,
2585 			.setauthsize = aead_setauthsize,
2586 			.encrypt = aead_encrypt,
2587 			.decrypt = aead_decrypt,
2588 			.ivsize = CTR_RFC3686_IV_SIZE,
2589 			.maxauthsize = SHA224_DIGEST_SIZE,
2590 		},
2591 		.caam = {
2592 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2593 					   OP_ALG_AAI_CTR_MOD128,
2594 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2595 					   OP_ALG_AAI_HMAC_PRECOMP,
2596 			.rfc3686 = true,
2597 			.geniv = true,
2598 		},
2599 	},
2600 	{
2601 		.aead = {
2602 			.base = {
2603 				.cra_name = "authenc(hmac(sha256),"
2604 					    "rfc3686(ctr(aes)))",
2605 				.cra_driver_name = "authenc-hmac-sha256-"
2606 						   "rfc3686-ctr-aes-caam-qi2",
2607 				.cra_blocksize = 1,
2608 			},
2609 			.setkey = aead_setkey,
2610 			.setauthsize = aead_setauthsize,
2611 			.encrypt = aead_encrypt,
2612 			.decrypt = aead_decrypt,
2613 			.ivsize = CTR_RFC3686_IV_SIZE,
2614 			.maxauthsize = SHA256_DIGEST_SIZE,
2615 		},
2616 		.caam = {
2617 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2618 					   OP_ALG_AAI_CTR_MOD128,
2619 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2620 					   OP_ALG_AAI_HMAC_PRECOMP,
2621 			.rfc3686 = true,
2622 		},
2623 	},
2624 	{
2625 		.aead = {
2626 			.base = {
2627 				.cra_name = "seqiv(authenc(hmac(sha256),"
2628 					    "rfc3686(ctr(aes))))",
2629 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
2630 						   "rfc3686-ctr-aes-caam-qi2",
2631 				.cra_blocksize = 1,
2632 			},
2633 			.setkey = aead_setkey,
2634 			.setauthsize = aead_setauthsize,
2635 			.encrypt = aead_encrypt,
2636 			.decrypt = aead_decrypt,
2637 			.ivsize = CTR_RFC3686_IV_SIZE,
2638 			.maxauthsize = SHA256_DIGEST_SIZE,
2639 		},
2640 		.caam = {
2641 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2642 					   OP_ALG_AAI_CTR_MOD128,
2643 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2644 					   OP_ALG_AAI_HMAC_PRECOMP,
2645 			.rfc3686 = true,
2646 			.geniv = true,
2647 		},
2648 	},
2649 	{
2650 		.aead = {
2651 			.base = {
2652 				.cra_name = "authenc(hmac(sha384),"
2653 					    "rfc3686(ctr(aes)))",
2654 				.cra_driver_name = "authenc-hmac-sha384-"
2655 						   "rfc3686-ctr-aes-caam-qi2",
2656 				.cra_blocksize = 1,
2657 			},
2658 			.setkey = aead_setkey,
2659 			.setauthsize = aead_setauthsize,
2660 			.encrypt = aead_encrypt,
2661 			.decrypt = aead_decrypt,
2662 			.ivsize = CTR_RFC3686_IV_SIZE,
2663 			.maxauthsize = SHA384_DIGEST_SIZE,
2664 		},
2665 		.caam = {
2666 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2667 					   OP_ALG_AAI_CTR_MOD128,
2668 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2669 					   OP_ALG_AAI_HMAC_PRECOMP,
2670 			.rfc3686 = true,
2671 		},
2672 	},
2673 	{
2674 		.aead = {
2675 			.base = {
2676 				.cra_name = "seqiv(authenc(hmac(sha384),"
2677 					    "rfc3686(ctr(aes))))",
2678 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
2679 						   "rfc3686-ctr-aes-caam-qi2",
2680 				.cra_blocksize = 1,
2681 			},
2682 			.setkey = aead_setkey,
2683 			.setauthsize = aead_setauthsize,
2684 			.encrypt = aead_encrypt,
2685 			.decrypt = aead_decrypt,
2686 			.ivsize = CTR_RFC3686_IV_SIZE,
2687 			.maxauthsize = SHA384_DIGEST_SIZE,
2688 		},
2689 		.caam = {
2690 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2691 					   OP_ALG_AAI_CTR_MOD128,
2692 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2693 					   OP_ALG_AAI_HMAC_PRECOMP,
2694 			.rfc3686 = true,
2695 			.geniv = true,
2696 		},
2697 	},
2698 	{
2699 		.aead = {
2700 			.base = {
2701 				.cra_name = "rfc7539(chacha20,poly1305)",
2702 				.cra_driver_name = "rfc7539-chacha20-poly1305-"
2703 						   "caam-qi2",
2704 				.cra_blocksize = 1,
2705 			},
2706 			.setkey = chachapoly_setkey,
2707 			.setauthsize = chachapoly_setauthsize,
2708 			.encrypt = aead_encrypt,
2709 			.decrypt = aead_decrypt,
2710 			.ivsize = CHACHAPOLY_IV_SIZE,
2711 			.maxauthsize = POLY1305_DIGEST_SIZE,
2712 		},
2713 		.caam = {
2714 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2715 					   OP_ALG_AAI_AEAD,
2716 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2717 					   OP_ALG_AAI_AEAD,
2718 		},
2719 	},
2720 	{
2721 		.aead = {
2722 			.base = {
2723 				.cra_name = "rfc7539esp(chacha20,poly1305)",
2724 				.cra_driver_name = "rfc7539esp-chacha20-"
2725 						   "poly1305-caam-qi2",
2726 				.cra_blocksize = 1,
2727 			},
2728 			.setkey = chachapoly_setkey,
2729 			.setauthsize = chachapoly_setauthsize,
2730 			.encrypt = aead_encrypt,
2731 			.decrypt = aead_decrypt,
2732 			.ivsize = 8,
2733 			.maxauthsize = POLY1305_DIGEST_SIZE,
2734 		},
2735 		.caam = {
2736 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2737 					   OP_ALG_AAI_AEAD,
2738 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2739 					   OP_ALG_AAI_AEAD,
2740 		},
2741 	},
2742 	{
2743 		.aead = {
2744 			.base = {
2745 				.cra_name = "authenc(hmac(sha512),"
2746 					    "rfc3686(ctr(aes)))",
2747 				.cra_driver_name = "authenc-hmac-sha512-"
2748 						   "rfc3686-ctr-aes-caam-qi2",
2749 				.cra_blocksize = 1,
2750 			},
2751 			.setkey = aead_setkey,
2752 			.setauthsize = aead_setauthsize,
2753 			.encrypt = aead_encrypt,
2754 			.decrypt = aead_decrypt,
2755 			.ivsize = CTR_RFC3686_IV_SIZE,
2756 			.maxauthsize = SHA512_DIGEST_SIZE,
2757 		},
2758 		.caam = {
2759 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2760 					   OP_ALG_AAI_CTR_MOD128,
2761 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2762 					   OP_ALG_AAI_HMAC_PRECOMP,
2763 			.rfc3686 = true,
2764 		},
2765 	},
2766 	{
2767 		.aead = {
2768 			.base = {
2769 				.cra_name = "seqiv(authenc(hmac(sha512),"
2770 					    "rfc3686(ctr(aes))))",
2771 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
2772 						   "rfc3686-ctr-aes-caam-qi2",
2773 				.cra_blocksize = 1,
2774 			},
2775 			.setkey = aead_setkey,
2776 			.setauthsize = aead_setauthsize,
2777 			.encrypt = aead_encrypt,
2778 			.decrypt = aead_decrypt,
2779 			.ivsize = CTR_RFC3686_IV_SIZE,
2780 			.maxauthsize = SHA512_DIGEST_SIZE,
2781 		},
2782 		.caam = {
2783 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2784 					   OP_ALG_AAI_CTR_MOD128,
2785 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2786 					   OP_ALG_AAI_HMAC_PRECOMP,
2787 			.rfc3686 = true,
2788 			.geniv = true,
2789 		},
2790 	},
2791 };
2792 
2793 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2794 {
2795 	struct skcipher_alg *alg = &t_alg->skcipher;
2796 
2797 	alg->base.cra_module = THIS_MODULE;
2798 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2799 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2800 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2801 
2802 	alg->init = caam_cra_init_skcipher;
2803 	alg->exit = caam_cra_exit;
2804 }
2805 
2806 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2807 {
2808 	struct aead_alg *alg = &t_alg->aead;
2809 
2810 	alg->base.cra_module = THIS_MODULE;
2811 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2812 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2813 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2814 
2815 	alg->init = caam_cra_init_aead;
2816 	alg->exit = caam_cra_exit_aead;
2817 }
2818 
2819 /* max hash key is max split key size */
2820 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
2821 
2822 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
2823 
2824 /* caam context sizes for hashes: running digest + 8 */
2825 #define HASH_MSG_LEN			8
2826 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2827 
2828 enum hash_optype {
2829 	UPDATE = 0,
2830 	UPDATE_FIRST,
2831 	FINALIZE,
2832 	DIGEST,
2833 	HASH_NUM_OP
2834 };
2835 
2836 /**
2837  * caam_hash_ctx - ahash per-session context
2838  * @flc: Flow Contexts array
2839  * @flc_dma: I/O virtual addresses of the Flow Contexts
2840  * @dev: dpseci device
2841  * @ctx_len: size of Context Register
2842  * @adata: hashing algorithm details
2843  */
2844 struct caam_hash_ctx {
2845 	struct caam_flc flc[HASH_NUM_OP];
2846 	dma_addr_t flc_dma[HASH_NUM_OP];
2847 	struct device *dev;
2848 	int ctx_len;
2849 	struct alginfo adata;
2850 };
2851 
2852 /* ahash state */
2853 struct caam_hash_state {
2854 	struct caam_request caam_req;
2855 	dma_addr_t buf_dma;
2856 	dma_addr_t ctx_dma;
2857 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2858 	int buflen_0;
2859 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2860 	int buflen_1;
2861 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2862 	int (*update)(struct ahash_request *req);
2863 	int (*final)(struct ahash_request *req);
2864 	int (*finup)(struct ahash_request *req);
2865 	int current_buf;
2866 };
2867 
2868 struct caam_export_state {
2869 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2870 	u8 caam_ctx[MAX_CTX_LEN];
2871 	int buflen;
2872 	int (*update)(struct ahash_request *req);
2873 	int (*final)(struct ahash_request *req);
2874 	int (*finup)(struct ahash_request *req);
2875 };
2876 
2877 static inline void switch_buf(struct caam_hash_state *state)
2878 {
2879 	state->current_buf ^= 1;
2880 }
2881 
2882 static inline u8 *current_buf(struct caam_hash_state *state)
2883 {
2884 	return state->current_buf ? state->buf_1 : state->buf_0;
2885 }
2886 
2887 static inline u8 *alt_buf(struct caam_hash_state *state)
2888 {
2889 	return state->current_buf ? state->buf_0 : state->buf_1;
2890 }
2891 
2892 static inline int *current_buflen(struct caam_hash_state *state)
2893 {
2894 	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
2895 }
2896 
2897 static inline int *alt_buflen(struct caam_hash_state *state)
2898 {
2899 	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
2900 }
2901 
2902 /* Map current buffer in state (if length > 0) and put it in link table */
2903 static inline int buf_map_to_qm_sg(struct device *dev,
2904 				   struct dpaa2_sg_entry *qm_sg,
2905 				   struct caam_hash_state *state)
2906 {
2907 	int buflen = *current_buflen(state);
2908 
2909 	if (!buflen)
2910 		return 0;
2911 
2912 	state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
2913 					DMA_TO_DEVICE);
2914 	if (dma_mapping_error(dev, state->buf_dma)) {
2915 		dev_err(dev, "unable to map buf\n");
2916 		state->buf_dma = 0;
2917 		return -ENOMEM;
2918 	}
2919 
2920 	dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
2921 
2922 	return 0;
2923 }
2924 
2925 /* Map state->caam_ctx, and add it to link table */
2926 static inline int ctx_map_to_qm_sg(struct device *dev,
2927 				   struct caam_hash_state *state, int ctx_len,
2928 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
2929 {
2930 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
2931 	if (dma_mapping_error(dev, state->ctx_dma)) {
2932 		dev_err(dev, "unable to map ctx\n");
2933 		state->ctx_dma = 0;
2934 		return -ENOMEM;
2935 	}
2936 
2937 	dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
2938 
2939 	return 0;
2940 }
2941 
2942 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
2943 {
2944 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
2945 	int digestsize = crypto_ahash_digestsize(ahash);
2946 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
2947 	struct caam_flc *flc;
2948 	u32 *desc;
2949 
2950 	/* ahash_update shared descriptor */
2951 	flc = &ctx->flc[UPDATE];
2952 	desc = flc->sh_desc;
2953 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
2954 			  ctx->ctx_len, true, priv->sec_attr.era);
2955 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2956 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
2957 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
2958 	print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
2959 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2960 			     1);
2961 
2962 	/* ahash_update_first shared descriptor */
2963 	flc = &ctx->flc[UPDATE_FIRST];
2964 	desc = flc->sh_desc;
2965 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
2966 			  ctx->ctx_len, false, priv->sec_attr.era);
2967 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2968 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
2969 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
2970 	print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
2971 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2972 			     1);
2973 
2974 	/* ahash_final shared descriptor */
2975 	flc = &ctx->flc[FINALIZE];
2976 	desc = flc->sh_desc;
2977 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
2978 			  ctx->ctx_len, true, priv->sec_attr.era);
2979 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2980 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
2981 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
2982 	print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
2983 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2984 			     1);
2985 
2986 	/* ahash_digest shared descriptor */
2987 	flc = &ctx->flc[DIGEST];
2988 	desc = flc->sh_desc;
2989 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
2990 			  ctx->ctx_len, false, priv->sec_attr.era);
2991 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2992 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
2993 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
2994 	print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
2995 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2996 			     1);
2997 
2998 	return 0;
2999 }
3000 
3001 struct split_key_sh_result {
3002 	struct completion completion;
3003 	int err;
3004 	struct device *dev;
3005 };
3006 
3007 static void split_key_sh_done(void *cbk_ctx, u32 err)
3008 {
3009 	struct split_key_sh_result *res = cbk_ctx;
3010 
3011 	dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3012 
3013 	if (err)
3014 		caam_qi2_strstatus(res->dev, err);
3015 
3016 	res->err = err;
3017 	complete(&res->completion);
3018 }
3019 
3020 /* Digest hash size if it is too large */
3021 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
3022 			   u32 *keylen, u8 *key_out, u32 digestsize)
3023 {
3024 	struct caam_request *req_ctx;
3025 	u32 *desc;
3026 	struct split_key_sh_result result;
3027 	dma_addr_t src_dma, dst_dma;
3028 	struct caam_flc *flc;
3029 	dma_addr_t flc_dma;
3030 	int ret = -ENOMEM;
3031 	struct dpaa2_fl_entry *in_fle, *out_fle;
3032 
3033 	req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3034 	if (!req_ctx)
3035 		return -ENOMEM;
3036 
3037 	in_fle = &req_ctx->fd_flt[1];
3038 	out_fle = &req_ctx->fd_flt[0];
3039 
3040 	flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3041 	if (!flc)
3042 		goto err_flc;
3043 
3044 	src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
3045 				 DMA_TO_DEVICE);
3046 	if (dma_mapping_error(ctx->dev, src_dma)) {
3047 		dev_err(ctx->dev, "unable to map key input memory\n");
3048 		goto err_src_dma;
3049 	}
3050 	dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
3051 				 DMA_FROM_DEVICE);
3052 	if (dma_mapping_error(ctx->dev, dst_dma)) {
3053 		dev_err(ctx->dev, "unable to map key output memory\n");
3054 		goto err_dst_dma;
3055 	}
3056 
3057 	desc = flc->sh_desc;
3058 
3059 	init_sh_desc(desc, 0);
3060 
3061 	/* descriptor to perform unkeyed hash on key_in */
3062 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3063 			 OP_ALG_AS_INITFINAL);
3064 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3065 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3066 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3067 			 LDST_SRCDST_BYTE_CONTEXT);
3068 
3069 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3070 	flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3071 				 desc_bytes(desc), DMA_TO_DEVICE);
3072 	if (dma_mapping_error(ctx->dev, flc_dma)) {
3073 		dev_err(ctx->dev, "unable to map shared descriptor\n");
3074 		goto err_flc_dma;
3075 	}
3076 
3077 	dpaa2_fl_set_final(in_fle, true);
3078 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3079 	dpaa2_fl_set_addr(in_fle, src_dma);
3080 	dpaa2_fl_set_len(in_fle, *keylen);
3081 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3082 	dpaa2_fl_set_addr(out_fle, dst_dma);
3083 	dpaa2_fl_set_len(out_fle, digestsize);
3084 
3085 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3086 			     DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
3087 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3088 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3089 			     1);
3090 
3091 	result.err = 0;
3092 	init_completion(&result.completion);
3093 	result.dev = ctx->dev;
3094 
3095 	req_ctx->flc = flc;
3096 	req_ctx->flc_dma = flc_dma;
3097 	req_ctx->cbk = split_key_sh_done;
3098 	req_ctx->ctx = &result;
3099 
3100 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3101 	if (ret == -EINPROGRESS) {
3102 		/* in progress */
3103 		wait_for_completion(&result.completion);
3104 		ret = result.err;
3105 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3106 				     DUMP_PREFIX_ADDRESS, 16, 4, key_in,
3107 				     digestsize, 1);
3108 	}
3109 
3110 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3111 			 DMA_TO_DEVICE);
3112 err_flc_dma:
3113 	dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
3114 err_dst_dma:
3115 	dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
3116 err_src_dma:
3117 	kfree(flc);
3118 err_flc:
3119 	kfree(req_ctx);
3120 
3121 	*keylen = digestsize;
3122 
3123 	return ret;
3124 }
3125 
3126 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3127 			unsigned int keylen)
3128 {
3129 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3130 	unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3131 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
3132 	int ret;
3133 	u8 *hashed_key = NULL;
3134 
3135 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3136 
3137 	if (keylen > blocksize) {
3138 		hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
3139 					   GFP_KERNEL | GFP_DMA);
3140 		if (!hashed_key)
3141 			return -ENOMEM;
3142 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
3143 				      digestsize);
3144 		if (ret)
3145 			goto bad_free_key;
3146 		key = hashed_key;
3147 	}
3148 
3149 	ctx->adata.keylen = keylen;
3150 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3151 					      OP_ALG_ALGSEL_MASK);
3152 	if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3153 		goto bad_free_key;
3154 
3155 	ctx->adata.key_virt = key;
3156 	ctx->adata.key_inline = true;
3157 
3158 	ret = ahash_set_sh_desc(ahash);
3159 	kfree(hashed_key);
3160 	return ret;
3161 bad_free_key:
3162 	kfree(hashed_key);
3163 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3164 	return -EINVAL;
3165 }
3166 
3167 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3168 			       struct ahash_request *req, int dst_len)
3169 {
3170 	struct caam_hash_state *state = ahash_request_ctx(req);
3171 
3172 	if (edesc->src_nents)
3173 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3174 	if (edesc->dst_dma)
3175 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
3176 
3177 	if (edesc->qm_sg_bytes)
3178 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3179 				 DMA_TO_DEVICE);
3180 
3181 	if (state->buf_dma) {
3182 		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3183 				 DMA_TO_DEVICE);
3184 		state->buf_dma = 0;
3185 	}
3186 }
3187 
3188 static inline void ahash_unmap_ctx(struct device *dev,
3189 				   struct ahash_edesc *edesc,
3190 				   struct ahash_request *req, int dst_len,
3191 				   u32 flag)
3192 {
3193 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3194 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3195 	struct caam_hash_state *state = ahash_request_ctx(req);
3196 
3197 	if (state->ctx_dma) {
3198 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
3199 		state->ctx_dma = 0;
3200 	}
3201 	ahash_unmap(dev, edesc, req, dst_len);
3202 }
3203 
3204 static void ahash_done(void *cbk_ctx, u32 status)
3205 {
3206 	struct crypto_async_request *areq = cbk_ctx;
3207 	struct ahash_request *req = ahash_request_cast(areq);
3208 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3209 	struct caam_hash_state *state = ahash_request_ctx(req);
3210 	struct ahash_edesc *edesc = state->caam_req.edesc;
3211 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3212 	int digestsize = crypto_ahash_digestsize(ahash);
3213 	int ecode = 0;
3214 
3215 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3216 
3217 	if (unlikely(status)) {
3218 		caam_qi2_strstatus(ctx->dev, status);
3219 		ecode = -EIO;
3220 	}
3221 
3222 	ahash_unmap(ctx->dev, edesc, req, digestsize);
3223 	qi_cache_free(edesc);
3224 
3225 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3226 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3227 			     ctx->ctx_len, 1);
3228 	if (req->result)
3229 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3230 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3231 				     digestsize, 1);
3232 
3233 	req->base.complete(&req->base, ecode);
3234 }
3235 
3236 static void ahash_done_bi(void *cbk_ctx, u32 status)
3237 {
3238 	struct crypto_async_request *areq = cbk_ctx;
3239 	struct ahash_request *req = ahash_request_cast(areq);
3240 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3241 	struct caam_hash_state *state = ahash_request_ctx(req);
3242 	struct ahash_edesc *edesc = state->caam_req.edesc;
3243 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3244 	int ecode = 0;
3245 
3246 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3247 
3248 	if (unlikely(status)) {
3249 		caam_qi2_strstatus(ctx->dev, status);
3250 		ecode = -EIO;
3251 	}
3252 
3253 	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
3254 	switch_buf(state);
3255 	qi_cache_free(edesc);
3256 
3257 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3258 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3259 			     ctx->ctx_len, 1);
3260 	if (req->result)
3261 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3262 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3263 				     crypto_ahash_digestsize(ahash), 1);
3264 
3265 	req->base.complete(&req->base, ecode);
3266 }
3267 
3268 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3269 {
3270 	struct crypto_async_request *areq = cbk_ctx;
3271 	struct ahash_request *req = ahash_request_cast(areq);
3272 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3273 	struct caam_hash_state *state = ahash_request_ctx(req);
3274 	struct ahash_edesc *edesc = state->caam_req.edesc;
3275 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3276 	int digestsize = crypto_ahash_digestsize(ahash);
3277 	int ecode = 0;
3278 
3279 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3280 
3281 	if (unlikely(status)) {
3282 		caam_qi2_strstatus(ctx->dev, status);
3283 		ecode = -EIO;
3284 	}
3285 
3286 	ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
3287 	qi_cache_free(edesc);
3288 
3289 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3290 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3291 			     ctx->ctx_len, 1);
3292 	if (req->result)
3293 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3294 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3295 				     digestsize, 1);
3296 
3297 	req->base.complete(&req->base, ecode);
3298 }
3299 
3300 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3301 {
3302 	struct crypto_async_request *areq = cbk_ctx;
3303 	struct ahash_request *req = ahash_request_cast(areq);
3304 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3305 	struct caam_hash_state *state = ahash_request_ctx(req);
3306 	struct ahash_edesc *edesc = state->caam_req.edesc;
3307 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3308 	int ecode = 0;
3309 
3310 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3311 
3312 	if (unlikely(status)) {
3313 		caam_qi2_strstatus(ctx->dev, status);
3314 		ecode = -EIO;
3315 	}
3316 
3317 	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
3318 	switch_buf(state);
3319 	qi_cache_free(edesc);
3320 
3321 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3322 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3323 			     ctx->ctx_len, 1);
3324 	if (req->result)
3325 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3326 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3327 				     crypto_ahash_digestsize(ahash), 1);
3328 
3329 	req->base.complete(&req->base, ecode);
3330 }
3331 
3332 static int ahash_update_ctx(struct ahash_request *req)
3333 {
3334 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3335 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3336 	struct caam_hash_state *state = ahash_request_ctx(req);
3337 	struct caam_request *req_ctx = &state->caam_req;
3338 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3339 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3340 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3341 		      GFP_KERNEL : GFP_ATOMIC;
3342 	u8 *buf = current_buf(state);
3343 	int *buflen = current_buflen(state);
3344 	u8 *next_buf = alt_buf(state);
3345 	int *next_buflen = alt_buflen(state), last_buflen;
3346 	int in_len = *buflen + req->nbytes, to_hash;
3347 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3348 	struct ahash_edesc *edesc;
3349 	int ret = 0;
3350 
3351 	last_buflen = *next_buflen;
3352 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3353 	to_hash = in_len - *next_buflen;
3354 
3355 	if (to_hash) {
3356 		struct dpaa2_sg_entry *sg_table;
3357 
3358 		src_nents = sg_nents_for_len(req->src,
3359 					     req->nbytes - (*next_buflen));
3360 		if (src_nents < 0) {
3361 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3362 			return src_nents;
3363 		}
3364 
3365 		if (src_nents) {
3366 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3367 						  DMA_TO_DEVICE);
3368 			if (!mapped_nents) {
3369 				dev_err(ctx->dev, "unable to DMA map source\n");
3370 				return -ENOMEM;
3371 			}
3372 		} else {
3373 			mapped_nents = 0;
3374 		}
3375 
3376 		/* allocate space for base edesc and link tables */
3377 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3378 		if (!edesc) {
3379 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3380 				     DMA_TO_DEVICE);
3381 			return -ENOMEM;
3382 		}
3383 
3384 		edesc->src_nents = src_nents;
3385 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3386 		qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
3387 			      sizeof(*sg_table);
3388 		sg_table = &edesc->sgt[0];
3389 
3390 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3391 				       DMA_BIDIRECTIONAL);
3392 		if (ret)
3393 			goto unmap_ctx;
3394 
3395 		ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3396 		if (ret)
3397 			goto unmap_ctx;
3398 
3399 		if (mapped_nents) {
3400 			sg_to_qm_sg_last(req->src, mapped_nents,
3401 					 sg_table + qm_sg_src_index, 0);
3402 			if (*next_buflen)
3403 				scatterwalk_map_and_copy(next_buf, req->src,
3404 							 to_hash - *buflen,
3405 							 *next_buflen, 0);
3406 		} else {
3407 			dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3408 					   true);
3409 		}
3410 
3411 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3412 						  qm_sg_bytes, DMA_TO_DEVICE);
3413 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3414 			dev_err(ctx->dev, "unable to map S/G table\n");
3415 			ret = -ENOMEM;
3416 			goto unmap_ctx;
3417 		}
3418 		edesc->qm_sg_bytes = qm_sg_bytes;
3419 
3420 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3421 		dpaa2_fl_set_final(in_fle, true);
3422 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3423 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3424 		dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3425 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3426 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3427 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3428 
3429 		req_ctx->flc = &ctx->flc[UPDATE];
3430 		req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3431 		req_ctx->cbk = ahash_done_bi;
3432 		req_ctx->ctx = &req->base;
3433 		req_ctx->edesc = edesc;
3434 
3435 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3436 		if (ret != -EINPROGRESS &&
3437 		    !(ret == -EBUSY &&
3438 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3439 			goto unmap_ctx;
3440 	} else if (*next_buflen) {
3441 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3442 					 req->nbytes, 0);
3443 		*buflen = *next_buflen;
3444 		*next_buflen = last_buflen;
3445 	}
3446 
3447 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3448 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3449 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3450 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3451 			     1);
3452 
3453 	return ret;
3454 unmap_ctx:
3455 	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
3456 	qi_cache_free(edesc);
3457 	return ret;
3458 }
3459 
3460 static int ahash_final_ctx(struct ahash_request *req)
3461 {
3462 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3463 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3464 	struct caam_hash_state *state = ahash_request_ctx(req);
3465 	struct caam_request *req_ctx = &state->caam_req;
3466 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3467 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3468 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3469 		      GFP_KERNEL : GFP_ATOMIC;
3470 	int buflen = *current_buflen(state);
3471 	int qm_sg_bytes, qm_sg_src_index;
3472 	int digestsize = crypto_ahash_digestsize(ahash);
3473 	struct ahash_edesc *edesc;
3474 	struct dpaa2_sg_entry *sg_table;
3475 	int ret;
3476 
3477 	/* allocate space for base edesc and link tables */
3478 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3479 	if (!edesc)
3480 		return -ENOMEM;
3481 
3482 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3483 	qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
3484 	sg_table = &edesc->sgt[0];
3485 
3486 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3487 			       DMA_TO_DEVICE);
3488 	if (ret)
3489 		goto unmap_ctx;
3490 
3491 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3492 	if (ret)
3493 		goto unmap_ctx;
3494 
3495 	dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
3496 
3497 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3498 					  DMA_TO_DEVICE);
3499 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3500 		dev_err(ctx->dev, "unable to map S/G table\n");
3501 		ret = -ENOMEM;
3502 		goto unmap_ctx;
3503 	}
3504 	edesc->qm_sg_bytes = qm_sg_bytes;
3505 
3506 	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3507 					DMA_FROM_DEVICE);
3508 	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3509 		dev_err(ctx->dev, "unable to map dst\n");
3510 		edesc->dst_dma = 0;
3511 		ret = -ENOMEM;
3512 		goto unmap_ctx;
3513 	}
3514 
3515 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3516 	dpaa2_fl_set_final(in_fle, true);
3517 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3518 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3519 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3520 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3521 	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3522 	dpaa2_fl_set_len(out_fle, digestsize);
3523 
3524 	req_ctx->flc = &ctx->flc[FINALIZE];
3525 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3526 	req_ctx->cbk = ahash_done_ctx_src;
3527 	req_ctx->ctx = &req->base;
3528 	req_ctx->edesc = edesc;
3529 
3530 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3531 	if (ret == -EINPROGRESS ||
3532 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3533 		return ret;
3534 
3535 unmap_ctx:
3536 	ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
3537 	qi_cache_free(edesc);
3538 	return ret;
3539 }
3540 
3541 static int ahash_finup_ctx(struct ahash_request *req)
3542 {
3543 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3544 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3545 	struct caam_hash_state *state = ahash_request_ctx(req);
3546 	struct caam_request *req_ctx = &state->caam_req;
3547 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3548 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3549 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3550 		      GFP_KERNEL : GFP_ATOMIC;
3551 	int buflen = *current_buflen(state);
3552 	int qm_sg_bytes, qm_sg_src_index;
3553 	int src_nents, mapped_nents;
3554 	int digestsize = crypto_ahash_digestsize(ahash);
3555 	struct ahash_edesc *edesc;
3556 	struct dpaa2_sg_entry *sg_table;
3557 	int ret;
3558 
3559 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3560 	if (src_nents < 0) {
3561 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3562 		return src_nents;
3563 	}
3564 
3565 	if (src_nents) {
3566 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3567 					  DMA_TO_DEVICE);
3568 		if (!mapped_nents) {
3569 			dev_err(ctx->dev, "unable to DMA map source\n");
3570 			return -ENOMEM;
3571 		}
3572 	} else {
3573 		mapped_nents = 0;
3574 	}
3575 
3576 	/* allocate space for base edesc and link tables */
3577 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3578 	if (!edesc) {
3579 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3580 		return -ENOMEM;
3581 	}
3582 
3583 	edesc->src_nents = src_nents;
3584 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3585 	qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
3586 	sg_table = &edesc->sgt[0];
3587 
3588 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3589 			       DMA_TO_DEVICE);
3590 	if (ret)
3591 		goto unmap_ctx;
3592 
3593 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3594 	if (ret)
3595 		goto unmap_ctx;
3596 
3597 	sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
3598 
3599 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3600 					  DMA_TO_DEVICE);
3601 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3602 		dev_err(ctx->dev, "unable to map S/G table\n");
3603 		ret = -ENOMEM;
3604 		goto unmap_ctx;
3605 	}
3606 	edesc->qm_sg_bytes = qm_sg_bytes;
3607 
3608 	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3609 					DMA_FROM_DEVICE);
3610 	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3611 		dev_err(ctx->dev, "unable to map dst\n");
3612 		edesc->dst_dma = 0;
3613 		ret = -ENOMEM;
3614 		goto unmap_ctx;
3615 	}
3616 
3617 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3618 	dpaa2_fl_set_final(in_fle, true);
3619 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3620 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3621 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3622 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3623 	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3624 	dpaa2_fl_set_len(out_fle, digestsize);
3625 
3626 	req_ctx->flc = &ctx->flc[FINALIZE];
3627 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3628 	req_ctx->cbk = ahash_done_ctx_src;
3629 	req_ctx->ctx = &req->base;
3630 	req_ctx->edesc = edesc;
3631 
3632 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3633 	if (ret == -EINPROGRESS ||
3634 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3635 		return ret;
3636 
3637 unmap_ctx:
3638 	ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
3639 	qi_cache_free(edesc);
3640 	return ret;
3641 }
3642 
3643 static int ahash_digest(struct ahash_request *req)
3644 {
3645 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3646 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3647 	struct caam_hash_state *state = ahash_request_ctx(req);
3648 	struct caam_request *req_ctx = &state->caam_req;
3649 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3650 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3651 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3652 		      GFP_KERNEL : GFP_ATOMIC;
3653 	int digestsize = crypto_ahash_digestsize(ahash);
3654 	int src_nents, mapped_nents;
3655 	struct ahash_edesc *edesc;
3656 	int ret = -ENOMEM;
3657 
3658 	state->buf_dma = 0;
3659 
3660 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3661 	if (src_nents < 0) {
3662 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3663 		return src_nents;
3664 	}
3665 
3666 	if (src_nents) {
3667 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3668 					  DMA_TO_DEVICE);
3669 		if (!mapped_nents) {
3670 			dev_err(ctx->dev, "unable to map source for DMA\n");
3671 			return ret;
3672 		}
3673 	} else {
3674 		mapped_nents = 0;
3675 	}
3676 
3677 	/* allocate space for base edesc and link tables */
3678 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3679 	if (!edesc) {
3680 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3681 		return ret;
3682 	}
3683 
3684 	edesc->src_nents = src_nents;
3685 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3686 
3687 	if (mapped_nents > 1) {
3688 		int qm_sg_bytes;
3689 		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3690 
3691 		qm_sg_bytes = mapped_nents * sizeof(*sg_table);
3692 		sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
3693 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3694 						  qm_sg_bytes, DMA_TO_DEVICE);
3695 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3696 			dev_err(ctx->dev, "unable to map S/G table\n");
3697 			goto unmap;
3698 		}
3699 		edesc->qm_sg_bytes = qm_sg_bytes;
3700 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3701 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3702 	} else {
3703 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3704 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3705 	}
3706 
3707 	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3708 					DMA_FROM_DEVICE);
3709 	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3710 		dev_err(ctx->dev, "unable to map dst\n");
3711 		edesc->dst_dma = 0;
3712 		goto unmap;
3713 	}
3714 
3715 	dpaa2_fl_set_final(in_fle, true);
3716 	dpaa2_fl_set_len(in_fle, req->nbytes);
3717 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3718 	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3719 	dpaa2_fl_set_len(out_fle, digestsize);
3720 
3721 	req_ctx->flc = &ctx->flc[DIGEST];
3722 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3723 	req_ctx->cbk = ahash_done;
3724 	req_ctx->ctx = &req->base;
3725 	req_ctx->edesc = edesc;
3726 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3727 	if (ret == -EINPROGRESS ||
3728 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3729 		return ret;
3730 
3731 unmap:
3732 	ahash_unmap(ctx->dev, edesc, req, digestsize);
3733 	qi_cache_free(edesc);
3734 	return ret;
3735 }
3736 
3737 static int ahash_final_no_ctx(struct ahash_request *req)
3738 {
3739 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3740 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3741 	struct caam_hash_state *state = ahash_request_ctx(req);
3742 	struct caam_request *req_ctx = &state->caam_req;
3743 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3744 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3745 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3746 		      GFP_KERNEL : GFP_ATOMIC;
3747 	u8 *buf = current_buf(state);
3748 	int buflen = *current_buflen(state);
3749 	int digestsize = crypto_ahash_digestsize(ahash);
3750 	struct ahash_edesc *edesc;
3751 	int ret = -ENOMEM;
3752 
3753 	/* allocate space for base edesc and link tables */
3754 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3755 	if (!edesc)
3756 		return ret;
3757 
3758 	state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
3759 	if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3760 		dev_err(ctx->dev, "unable to map src\n");
3761 		goto unmap;
3762 	}
3763 
3764 	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3765 					DMA_FROM_DEVICE);
3766 	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3767 		dev_err(ctx->dev, "unable to map dst\n");
3768 		edesc->dst_dma = 0;
3769 		goto unmap;
3770 	}
3771 
3772 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3773 	dpaa2_fl_set_final(in_fle, true);
3774 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3775 	dpaa2_fl_set_addr(in_fle, state->buf_dma);
3776 	dpaa2_fl_set_len(in_fle, buflen);
3777 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3778 	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3779 	dpaa2_fl_set_len(out_fle, digestsize);
3780 
3781 	req_ctx->flc = &ctx->flc[DIGEST];
3782 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3783 	req_ctx->cbk = ahash_done;
3784 	req_ctx->ctx = &req->base;
3785 	req_ctx->edesc = edesc;
3786 
3787 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3788 	if (ret == -EINPROGRESS ||
3789 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3790 		return ret;
3791 
3792 unmap:
3793 	ahash_unmap(ctx->dev, edesc, req, digestsize);
3794 	qi_cache_free(edesc);
3795 	return ret;
3796 }
3797 
3798 static int ahash_update_no_ctx(struct ahash_request *req)
3799 {
3800 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3801 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3802 	struct caam_hash_state *state = ahash_request_ctx(req);
3803 	struct caam_request *req_ctx = &state->caam_req;
3804 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3805 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3806 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3807 		      GFP_KERNEL : GFP_ATOMIC;
3808 	u8 *buf = current_buf(state);
3809 	int *buflen = current_buflen(state);
3810 	u8 *next_buf = alt_buf(state);
3811 	int *next_buflen = alt_buflen(state);
3812 	int in_len = *buflen + req->nbytes, to_hash;
3813 	int qm_sg_bytes, src_nents, mapped_nents;
3814 	struct ahash_edesc *edesc;
3815 	int ret = 0;
3816 
3817 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3818 	to_hash = in_len - *next_buflen;
3819 
3820 	if (to_hash) {
3821 		struct dpaa2_sg_entry *sg_table;
3822 
3823 		src_nents = sg_nents_for_len(req->src,
3824 					     req->nbytes - *next_buflen);
3825 		if (src_nents < 0) {
3826 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3827 			return src_nents;
3828 		}
3829 
3830 		if (src_nents) {
3831 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3832 						  DMA_TO_DEVICE);
3833 			if (!mapped_nents) {
3834 				dev_err(ctx->dev, "unable to DMA map source\n");
3835 				return -ENOMEM;
3836 			}
3837 		} else {
3838 			mapped_nents = 0;
3839 		}
3840 
3841 		/* allocate space for base edesc and link tables */
3842 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3843 		if (!edesc) {
3844 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3845 				     DMA_TO_DEVICE);
3846 			return -ENOMEM;
3847 		}
3848 
3849 		edesc->src_nents = src_nents;
3850 		qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
3851 		sg_table = &edesc->sgt[0];
3852 
3853 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3854 		if (ret)
3855 			goto unmap_ctx;
3856 
3857 		sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3858 
3859 		if (*next_buflen)
3860 			scatterwalk_map_and_copy(next_buf, req->src,
3861 						 to_hash - *buflen,
3862 						 *next_buflen, 0);
3863 
3864 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3865 						  qm_sg_bytes, DMA_TO_DEVICE);
3866 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3867 			dev_err(ctx->dev, "unable to map S/G table\n");
3868 			ret = -ENOMEM;
3869 			goto unmap_ctx;
3870 		}
3871 		edesc->qm_sg_bytes = qm_sg_bytes;
3872 
3873 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3874 						ctx->ctx_len, DMA_FROM_DEVICE);
3875 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3876 			dev_err(ctx->dev, "unable to map ctx\n");
3877 			state->ctx_dma = 0;
3878 			ret = -ENOMEM;
3879 			goto unmap_ctx;
3880 		}
3881 
3882 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3883 		dpaa2_fl_set_final(in_fle, true);
3884 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3885 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3886 		dpaa2_fl_set_len(in_fle, to_hash);
3887 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3888 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3889 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3890 
3891 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3892 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3893 		req_ctx->cbk = ahash_done_ctx_dst;
3894 		req_ctx->ctx = &req->base;
3895 		req_ctx->edesc = edesc;
3896 
3897 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3898 		if (ret != -EINPROGRESS &&
3899 		    !(ret == -EBUSY &&
3900 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3901 			goto unmap_ctx;
3902 
3903 		state->update = ahash_update_ctx;
3904 		state->finup = ahash_finup_ctx;
3905 		state->final = ahash_final_ctx;
3906 	} else if (*next_buflen) {
3907 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3908 					 req->nbytes, 0);
3909 		*buflen = *next_buflen;
3910 		*next_buflen = 0;
3911 	}
3912 
3913 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3914 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3915 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3916 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3917 			     1);
3918 
3919 	return ret;
3920 unmap_ctx:
3921 	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
3922 	qi_cache_free(edesc);
3923 	return ret;
3924 }
3925 
3926 static int ahash_finup_no_ctx(struct ahash_request *req)
3927 {
3928 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3929 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3930 	struct caam_hash_state *state = ahash_request_ctx(req);
3931 	struct caam_request *req_ctx = &state->caam_req;
3932 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3933 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3934 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3935 		      GFP_KERNEL : GFP_ATOMIC;
3936 	int buflen = *current_buflen(state);
3937 	int qm_sg_bytes, src_nents, mapped_nents;
3938 	int digestsize = crypto_ahash_digestsize(ahash);
3939 	struct ahash_edesc *edesc;
3940 	struct dpaa2_sg_entry *sg_table;
3941 	int ret;
3942 
3943 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3944 	if (src_nents < 0) {
3945 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3946 		return src_nents;
3947 	}
3948 
3949 	if (src_nents) {
3950 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3951 					  DMA_TO_DEVICE);
3952 		if (!mapped_nents) {
3953 			dev_err(ctx->dev, "unable to DMA map source\n");
3954 			return -ENOMEM;
3955 		}
3956 	} else {
3957 		mapped_nents = 0;
3958 	}
3959 
3960 	/* allocate space for base edesc and link tables */
3961 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3962 	if (!edesc) {
3963 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3964 		return -ENOMEM;
3965 	}
3966 
3967 	edesc->src_nents = src_nents;
3968 	qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
3969 	sg_table = &edesc->sgt[0];
3970 
3971 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3972 	if (ret)
3973 		goto unmap;
3974 
3975 	sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3976 
3977 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3978 					  DMA_TO_DEVICE);
3979 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3980 		dev_err(ctx->dev, "unable to map S/G table\n");
3981 		ret = -ENOMEM;
3982 		goto unmap;
3983 	}
3984 	edesc->qm_sg_bytes = qm_sg_bytes;
3985 
3986 	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3987 					DMA_FROM_DEVICE);
3988 	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3989 		dev_err(ctx->dev, "unable to map dst\n");
3990 		edesc->dst_dma = 0;
3991 		ret = -ENOMEM;
3992 		goto unmap;
3993 	}
3994 
3995 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3996 	dpaa2_fl_set_final(in_fle, true);
3997 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3998 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3999 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4000 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4001 	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
4002 	dpaa2_fl_set_len(out_fle, digestsize);
4003 
4004 	req_ctx->flc = &ctx->flc[DIGEST];
4005 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4006 	req_ctx->cbk = ahash_done;
4007 	req_ctx->ctx = &req->base;
4008 	req_ctx->edesc = edesc;
4009 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4010 	if (ret != -EINPROGRESS &&
4011 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4012 		goto unmap;
4013 
4014 	return ret;
4015 unmap:
4016 	ahash_unmap(ctx->dev, edesc, req, digestsize);
4017 	qi_cache_free(edesc);
4018 	return -ENOMEM;
4019 }
4020 
4021 static int ahash_update_first(struct ahash_request *req)
4022 {
4023 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4024 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4025 	struct caam_hash_state *state = ahash_request_ctx(req);
4026 	struct caam_request *req_ctx = &state->caam_req;
4027 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4028 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4029 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4030 		      GFP_KERNEL : GFP_ATOMIC;
4031 	u8 *next_buf = alt_buf(state);
4032 	int *next_buflen = alt_buflen(state);
4033 	int to_hash;
4034 	int src_nents, mapped_nents;
4035 	struct ahash_edesc *edesc;
4036 	int ret = 0;
4037 
4038 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4039 				      1);
4040 	to_hash = req->nbytes - *next_buflen;
4041 
4042 	if (to_hash) {
4043 		struct dpaa2_sg_entry *sg_table;
4044 
4045 		src_nents = sg_nents_for_len(req->src,
4046 					     req->nbytes - (*next_buflen));
4047 		if (src_nents < 0) {
4048 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4049 			return src_nents;
4050 		}
4051 
4052 		if (src_nents) {
4053 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4054 						  DMA_TO_DEVICE);
4055 			if (!mapped_nents) {
4056 				dev_err(ctx->dev, "unable to map source for DMA\n");
4057 				return -ENOMEM;
4058 			}
4059 		} else {
4060 			mapped_nents = 0;
4061 		}
4062 
4063 		/* allocate space for base edesc and link tables */
4064 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4065 		if (!edesc) {
4066 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4067 				     DMA_TO_DEVICE);
4068 			return -ENOMEM;
4069 		}
4070 
4071 		edesc->src_nents = src_nents;
4072 		sg_table = &edesc->sgt[0];
4073 
4074 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4075 		dpaa2_fl_set_final(in_fle, true);
4076 		dpaa2_fl_set_len(in_fle, to_hash);
4077 
4078 		if (mapped_nents > 1) {
4079 			int qm_sg_bytes;
4080 
4081 			sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
4082 			qm_sg_bytes = mapped_nents * sizeof(*sg_table);
4083 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4084 							  qm_sg_bytes,
4085 							  DMA_TO_DEVICE);
4086 			if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4087 				dev_err(ctx->dev, "unable to map S/G table\n");
4088 				ret = -ENOMEM;
4089 				goto unmap_ctx;
4090 			}
4091 			edesc->qm_sg_bytes = qm_sg_bytes;
4092 			dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4093 			dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4094 		} else {
4095 			dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4096 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4097 		}
4098 
4099 		if (*next_buflen)
4100 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
4101 						 *next_buflen, 0);
4102 
4103 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4104 						ctx->ctx_len, DMA_FROM_DEVICE);
4105 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4106 			dev_err(ctx->dev, "unable to map ctx\n");
4107 			state->ctx_dma = 0;
4108 			ret = -ENOMEM;
4109 			goto unmap_ctx;
4110 		}
4111 
4112 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4113 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4114 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4115 
4116 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4117 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4118 		req_ctx->cbk = ahash_done_ctx_dst;
4119 		req_ctx->ctx = &req->base;
4120 		req_ctx->edesc = edesc;
4121 
4122 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4123 		if (ret != -EINPROGRESS &&
4124 		    !(ret == -EBUSY && req->base.flags &
4125 		      CRYPTO_TFM_REQ_MAY_BACKLOG))
4126 			goto unmap_ctx;
4127 
4128 		state->update = ahash_update_ctx;
4129 		state->finup = ahash_finup_ctx;
4130 		state->final = ahash_final_ctx;
4131 	} else if (*next_buflen) {
4132 		state->update = ahash_update_no_ctx;
4133 		state->finup = ahash_finup_no_ctx;
4134 		state->final = ahash_final_no_ctx;
4135 		scatterwalk_map_and_copy(next_buf, req->src, 0,
4136 					 req->nbytes, 0);
4137 		switch_buf(state);
4138 	}
4139 
4140 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4141 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4142 			     1);
4143 
4144 	return ret;
4145 unmap_ctx:
4146 	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
4147 	qi_cache_free(edesc);
4148 	return ret;
4149 }
4150 
4151 static int ahash_finup_first(struct ahash_request *req)
4152 {
4153 	return ahash_digest(req);
4154 }
4155 
4156 static int ahash_init(struct ahash_request *req)
4157 {
4158 	struct caam_hash_state *state = ahash_request_ctx(req);
4159 
4160 	state->update = ahash_update_first;
4161 	state->finup = ahash_finup_first;
4162 	state->final = ahash_final_no_ctx;
4163 
4164 	state->ctx_dma = 0;
4165 	state->current_buf = 0;
4166 	state->buf_dma = 0;
4167 	state->buflen_0 = 0;
4168 	state->buflen_1 = 0;
4169 
4170 	return 0;
4171 }
4172 
4173 static int ahash_update(struct ahash_request *req)
4174 {
4175 	struct caam_hash_state *state = ahash_request_ctx(req);
4176 
4177 	return state->update(req);
4178 }
4179 
4180 static int ahash_finup(struct ahash_request *req)
4181 {
4182 	struct caam_hash_state *state = ahash_request_ctx(req);
4183 
4184 	return state->finup(req);
4185 }
4186 
4187 static int ahash_final(struct ahash_request *req)
4188 {
4189 	struct caam_hash_state *state = ahash_request_ctx(req);
4190 
4191 	return state->final(req);
4192 }
4193 
4194 static int ahash_export(struct ahash_request *req, void *out)
4195 {
4196 	struct caam_hash_state *state = ahash_request_ctx(req);
4197 	struct caam_export_state *export = out;
4198 	int len;
4199 	u8 *buf;
4200 
4201 	if (state->current_buf) {
4202 		buf = state->buf_1;
4203 		len = state->buflen_1;
4204 	} else {
4205 		buf = state->buf_0;
4206 		len = state->buflen_0;
4207 	}
4208 
4209 	memcpy(export->buf, buf, len);
4210 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4211 	export->buflen = len;
4212 	export->update = state->update;
4213 	export->final = state->final;
4214 	export->finup = state->finup;
4215 
4216 	return 0;
4217 }
4218 
4219 static int ahash_import(struct ahash_request *req, const void *in)
4220 {
4221 	struct caam_hash_state *state = ahash_request_ctx(req);
4222 	const struct caam_export_state *export = in;
4223 
4224 	memset(state, 0, sizeof(*state));
4225 	memcpy(state->buf_0, export->buf, export->buflen);
4226 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4227 	state->buflen_0 = export->buflen;
4228 	state->update = export->update;
4229 	state->final = export->final;
4230 	state->finup = export->finup;
4231 
4232 	return 0;
4233 }
4234 
4235 struct caam_hash_template {
4236 	char name[CRYPTO_MAX_ALG_NAME];
4237 	char driver_name[CRYPTO_MAX_ALG_NAME];
4238 	char hmac_name[CRYPTO_MAX_ALG_NAME];
4239 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4240 	unsigned int blocksize;
4241 	struct ahash_alg template_ahash;
4242 	u32 alg_type;
4243 };
4244 
4245 /* ahash descriptors */
4246 static struct caam_hash_template driver_hash[] = {
4247 	{
4248 		.name = "sha1",
4249 		.driver_name = "sha1-caam-qi2",
4250 		.hmac_name = "hmac(sha1)",
4251 		.hmac_driver_name = "hmac-sha1-caam-qi2",
4252 		.blocksize = SHA1_BLOCK_SIZE,
4253 		.template_ahash = {
4254 			.init = ahash_init,
4255 			.update = ahash_update,
4256 			.final = ahash_final,
4257 			.finup = ahash_finup,
4258 			.digest = ahash_digest,
4259 			.export = ahash_export,
4260 			.import = ahash_import,
4261 			.setkey = ahash_setkey,
4262 			.halg = {
4263 				.digestsize = SHA1_DIGEST_SIZE,
4264 				.statesize = sizeof(struct caam_export_state),
4265 			},
4266 		},
4267 		.alg_type = OP_ALG_ALGSEL_SHA1,
4268 	}, {
4269 		.name = "sha224",
4270 		.driver_name = "sha224-caam-qi2",
4271 		.hmac_name = "hmac(sha224)",
4272 		.hmac_driver_name = "hmac-sha224-caam-qi2",
4273 		.blocksize = SHA224_BLOCK_SIZE,
4274 		.template_ahash = {
4275 			.init = ahash_init,
4276 			.update = ahash_update,
4277 			.final = ahash_final,
4278 			.finup = ahash_finup,
4279 			.digest = ahash_digest,
4280 			.export = ahash_export,
4281 			.import = ahash_import,
4282 			.setkey = ahash_setkey,
4283 			.halg = {
4284 				.digestsize = SHA224_DIGEST_SIZE,
4285 				.statesize = sizeof(struct caam_export_state),
4286 			},
4287 		},
4288 		.alg_type = OP_ALG_ALGSEL_SHA224,
4289 	}, {
4290 		.name = "sha256",
4291 		.driver_name = "sha256-caam-qi2",
4292 		.hmac_name = "hmac(sha256)",
4293 		.hmac_driver_name = "hmac-sha256-caam-qi2",
4294 		.blocksize = SHA256_BLOCK_SIZE,
4295 		.template_ahash = {
4296 			.init = ahash_init,
4297 			.update = ahash_update,
4298 			.final = ahash_final,
4299 			.finup = ahash_finup,
4300 			.digest = ahash_digest,
4301 			.export = ahash_export,
4302 			.import = ahash_import,
4303 			.setkey = ahash_setkey,
4304 			.halg = {
4305 				.digestsize = SHA256_DIGEST_SIZE,
4306 				.statesize = sizeof(struct caam_export_state),
4307 			},
4308 		},
4309 		.alg_type = OP_ALG_ALGSEL_SHA256,
4310 	}, {
4311 		.name = "sha384",
4312 		.driver_name = "sha384-caam-qi2",
4313 		.hmac_name = "hmac(sha384)",
4314 		.hmac_driver_name = "hmac-sha384-caam-qi2",
4315 		.blocksize = SHA384_BLOCK_SIZE,
4316 		.template_ahash = {
4317 			.init = ahash_init,
4318 			.update = ahash_update,
4319 			.final = ahash_final,
4320 			.finup = ahash_finup,
4321 			.digest = ahash_digest,
4322 			.export = ahash_export,
4323 			.import = ahash_import,
4324 			.setkey = ahash_setkey,
4325 			.halg = {
4326 				.digestsize = SHA384_DIGEST_SIZE,
4327 				.statesize = sizeof(struct caam_export_state),
4328 			},
4329 		},
4330 		.alg_type = OP_ALG_ALGSEL_SHA384,
4331 	}, {
4332 		.name = "sha512",
4333 		.driver_name = "sha512-caam-qi2",
4334 		.hmac_name = "hmac(sha512)",
4335 		.hmac_driver_name = "hmac-sha512-caam-qi2",
4336 		.blocksize = SHA512_BLOCK_SIZE,
4337 		.template_ahash = {
4338 			.init = ahash_init,
4339 			.update = ahash_update,
4340 			.final = ahash_final,
4341 			.finup = ahash_finup,
4342 			.digest = ahash_digest,
4343 			.export = ahash_export,
4344 			.import = ahash_import,
4345 			.setkey = ahash_setkey,
4346 			.halg = {
4347 				.digestsize = SHA512_DIGEST_SIZE,
4348 				.statesize = sizeof(struct caam_export_state),
4349 			},
4350 		},
4351 		.alg_type = OP_ALG_ALGSEL_SHA512,
4352 	}, {
4353 		.name = "md5",
4354 		.driver_name = "md5-caam-qi2",
4355 		.hmac_name = "hmac(md5)",
4356 		.hmac_driver_name = "hmac-md5-caam-qi2",
4357 		.blocksize = MD5_BLOCK_WORDS * 4,
4358 		.template_ahash = {
4359 			.init = ahash_init,
4360 			.update = ahash_update,
4361 			.final = ahash_final,
4362 			.finup = ahash_finup,
4363 			.digest = ahash_digest,
4364 			.export = ahash_export,
4365 			.import = ahash_import,
4366 			.setkey = ahash_setkey,
4367 			.halg = {
4368 				.digestsize = MD5_DIGEST_SIZE,
4369 				.statesize = sizeof(struct caam_export_state),
4370 			},
4371 		},
4372 		.alg_type = OP_ALG_ALGSEL_MD5,
4373 	}
4374 };
4375 
4376 struct caam_hash_alg {
4377 	struct list_head entry;
4378 	struct device *dev;
4379 	int alg_type;
4380 	struct ahash_alg ahash_alg;
4381 };
4382 
4383 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4384 {
4385 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4386 	struct crypto_alg *base = tfm->__crt_alg;
4387 	struct hash_alg_common *halg =
4388 		 container_of(base, struct hash_alg_common, base);
4389 	struct ahash_alg *alg =
4390 		 container_of(halg, struct ahash_alg, halg);
4391 	struct caam_hash_alg *caam_hash =
4392 		 container_of(alg, struct caam_hash_alg, ahash_alg);
4393 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4394 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4395 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4396 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4397 					 HASH_MSG_LEN + 32,
4398 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4399 					 HASH_MSG_LEN + 64,
4400 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4401 	dma_addr_t dma_addr;
4402 	int i;
4403 
4404 	ctx->dev = caam_hash->dev;
4405 
4406 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4407 					DMA_BIDIRECTIONAL,
4408 					DMA_ATTR_SKIP_CPU_SYNC);
4409 	if (dma_mapping_error(ctx->dev, dma_addr)) {
4410 		dev_err(ctx->dev, "unable to map shared descriptors\n");
4411 		return -ENOMEM;
4412 	}
4413 
4414 	for (i = 0; i < HASH_NUM_OP; i++)
4415 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4416 
4417 	/* copy descriptor header template value */
4418 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4419 
4420 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
4421 				   OP_ALG_ALGSEL_SUBMASK) >>
4422 				  OP_ALG_ALGSEL_SHIFT];
4423 
4424 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4425 				 sizeof(struct caam_hash_state));
4426 
4427 	return ahash_set_sh_desc(ahash);
4428 }
4429 
4430 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4431 {
4432 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4433 
4434 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4435 			       DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4436 }
4437 
4438 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4439 	struct caam_hash_template *template, bool keyed)
4440 {
4441 	struct caam_hash_alg *t_alg;
4442 	struct ahash_alg *halg;
4443 	struct crypto_alg *alg;
4444 
4445 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4446 	if (!t_alg)
4447 		return ERR_PTR(-ENOMEM);
4448 
4449 	t_alg->ahash_alg = template->template_ahash;
4450 	halg = &t_alg->ahash_alg;
4451 	alg = &halg->halg.base;
4452 
4453 	if (keyed) {
4454 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4455 			 template->hmac_name);
4456 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4457 			 template->hmac_driver_name);
4458 	} else {
4459 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4460 			 template->name);
4461 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4462 			 template->driver_name);
4463 		t_alg->ahash_alg.setkey = NULL;
4464 	}
4465 	alg->cra_module = THIS_MODULE;
4466 	alg->cra_init = caam_hash_cra_init;
4467 	alg->cra_exit = caam_hash_cra_exit;
4468 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4469 	alg->cra_priority = CAAM_CRA_PRIORITY;
4470 	alg->cra_blocksize = template->blocksize;
4471 	alg->cra_alignmask = 0;
4472 	alg->cra_flags = CRYPTO_ALG_ASYNC;
4473 
4474 	t_alg->alg_type = template->alg_type;
4475 	t_alg->dev = dev;
4476 
4477 	return t_alg;
4478 }
4479 
4480 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4481 {
4482 	struct dpaa2_caam_priv_per_cpu *ppriv;
4483 
4484 	ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4485 	napi_schedule_irqoff(&ppriv->napi);
4486 }
4487 
4488 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4489 {
4490 	struct device *dev = priv->dev;
4491 	struct dpaa2_io_notification_ctx *nctx;
4492 	struct dpaa2_caam_priv_per_cpu *ppriv;
4493 	int err, i = 0, cpu;
4494 
4495 	for_each_online_cpu(cpu) {
4496 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4497 		ppriv->priv = priv;
4498 		nctx = &ppriv->nctx;
4499 		nctx->is_cdan = 0;
4500 		nctx->id = ppriv->rsp_fqid;
4501 		nctx->desired_cpu = cpu;
4502 		nctx->cb = dpaa2_caam_fqdan_cb;
4503 
4504 		/* Register notification callbacks */
4505 		ppriv->dpio = dpaa2_io_service_select(cpu);
4506 		err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4507 		if (unlikely(err)) {
4508 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4509 			nctx->cb = NULL;
4510 			/*
4511 			 * If no affine DPIO for this core, there's probably
4512 			 * none available for next cores either. Signal we want
4513 			 * to retry later, in case the DPIO devices weren't
4514 			 * probed yet.
4515 			 */
4516 			err = -EPROBE_DEFER;
4517 			goto err;
4518 		}
4519 
4520 		ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4521 						     dev);
4522 		if (unlikely(!ppriv->store)) {
4523 			dev_err(dev, "dpaa2_io_store_create() failed\n");
4524 			err = -ENOMEM;
4525 			goto err;
4526 		}
4527 
4528 		if (++i == priv->num_pairs)
4529 			break;
4530 	}
4531 
4532 	return 0;
4533 
4534 err:
4535 	for_each_online_cpu(cpu) {
4536 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4537 		if (!ppriv->nctx.cb)
4538 			break;
4539 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4540 	}
4541 
4542 	for_each_online_cpu(cpu) {
4543 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4544 		if (!ppriv->store)
4545 			break;
4546 		dpaa2_io_store_destroy(ppriv->store);
4547 	}
4548 
4549 	return err;
4550 }
4551 
4552 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4553 {
4554 	struct dpaa2_caam_priv_per_cpu *ppriv;
4555 	int i = 0, cpu;
4556 
4557 	for_each_online_cpu(cpu) {
4558 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4559 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4560 					    priv->dev);
4561 		dpaa2_io_store_destroy(ppriv->store);
4562 
4563 		if (++i == priv->num_pairs)
4564 			return;
4565 	}
4566 }
4567 
4568 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4569 {
4570 	struct dpseci_rx_queue_cfg rx_queue_cfg;
4571 	struct device *dev = priv->dev;
4572 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4573 	struct dpaa2_caam_priv_per_cpu *ppriv;
4574 	int err = 0, i = 0, cpu;
4575 
4576 	/* Configure Rx queues */
4577 	for_each_online_cpu(cpu) {
4578 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4579 
4580 		rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4581 				       DPSECI_QUEUE_OPT_USER_CTX;
4582 		rx_queue_cfg.order_preservation_en = 0;
4583 		rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4584 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4585 		/*
4586 		 * Rx priority (WQ) doesn't really matter, since we use
4587 		 * pull mode, i.e. volatile dequeues from specific FQs
4588 		 */
4589 		rx_queue_cfg.dest_cfg.priority = 0;
4590 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4591 
4592 		err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4593 					  &rx_queue_cfg);
4594 		if (err) {
4595 			dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4596 				err);
4597 			return err;
4598 		}
4599 
4600 		if (++i == priv->num_pairs)
4601 			break;
4602 	}
4603 
4604 	return err;
4605 }
4606 
4607 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4608 {
4609 	struct device *dev = priv->dev;
4610 
4611 	if (!priv->cscn_mem)
4612 		return;
4613 
4614 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4615 	kfree(priv->cscn_mem);
4616 }
4617 
4618 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4619 {
4620 	struct device *dev = priv->dev;
4621 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4622 
4623 	dpaa2_dpseci_congestion_free(priv);
4624 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4625 }
4626 
4627 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4628 				  const struct dpaa2_fd *fd)
4629 {
4630 	struct caam_request *req;
4631 	u32 fd_err;
4632 
4633 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4634 		dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4635 		return;
4636 	}
4637 
4638 	fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4639 	if (unlikely(fd_err))
4640 		dev_err(priv->dev, "FD error: %08x\n", fd_err);
4641 
4642 	/*
4643 	 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4644 	 * in FD[ERR] or FD[FRC].
4645 	 */
4646 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4647 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4648 			 DMA_BIDIRECTIONAL);
4649 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4650 }
4651 
4652 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4653 {
4654 	int err;
4655 
4656 	/* Retry while portal is busy */
4657 	do {
4658 		err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4659 					       ppriv->store);
4660 	} while (err == -EBUSY);
4661 
4662 	if (unlikely(err))
4663 		dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4664 
4665 	return err;
4666 }
4667 
4668 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4669 {
4670 	struct dpaa2_dq *dq;
4671 	int cleaned = 0, is_last;
4672 
4673 	do {
4674 		dq = dpaa2_io_store_next(ppriv->store, &is_last);
4675 		if (unlikely(!dq)) {
4676 			if (unlikely(!is_last)) {
4677 				dev_dbg(ppriv->priv->dev,
4678 					"FQ %d returned no valid frames\n",
4679 					ppriv->rsp_fqid);
4680 				/*
4681 				 * MUST retry until we get some sort of
4682 				 * valid response token (be it "empty dequeue"
4683 				 * or a valid frame).
4684 				 */
4685 				continue;
4686 			}
4687 			break;
4688 		}
4689 
4690 		/* Process FD */
4691 		dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4692 		cleaned++;
4693 	} while (!is_last);
4694 
4695 	return cleaned;
4696 }
4697 
4698 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4699 {
4700 	struct dpaa2_caam_priv_per_cpu *ppriv;
4701 	struct dpaa2_caam_priv *priv;
4702 	int err, cleaned = 0, store_cleaned;
4703 
4704 	ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4705 	priv = ppriv->priv;
4706 
4707 	if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4708 		return 0;
4709 
4710 	do {
4711 		store_cleaned = dpaa2_caam_store_consume(ppriv);
4712 		cleaned += store_cleaned;
4713 
4714 		if (store_cleaned == 0 ||
4715 		    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4716 			break;
4717 
4718 		/* Try to dequeue some more */
4719 		err = dpaa2_caam_pull_fq(ppriv);
4720 		if (unlikely(err))
4721 			break;
4722 	} while (1);
4723 
4724 	if (cleaned < budget) {
4725 		napi_complete_done(napi, cleaned);
4726 		err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4727 		if (unlikely(err))
4728 			dev_err(priv->dev, "Notification rearm failed: %d\n",
4729 				err);
4730 	}
4731 
4732 	return cleaned;
4733 }
4734 
4735 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4736 					 u16 token)
4737 {
4738 	struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4739 	struct device *dev = priv->dev;
4740 	int err;
4741 
4742 	/*
4743 	 * Congestion group feature supported starting with DPSECI API v5.1
4744 	 * and only when object has been created with this capability.
4745 	 */
4746 	if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4747 	    !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4748 		return 0;
4749 
4750 	priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4751 				 GFP_KERNEL | GFP_DMA);
4752 	if (!priv->cscn_mem)
4753 		return -ENOMEM;
4754 
4755 	priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4756 	priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4757 					DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4758 	if (dma_mapping_error(dev, priv->cscn_dma)) {
4759 		dev_err(dev, "Error mapping CSCN memory area\n");
4760 		err = -ENOMEM;
4761 		goto err_dma_map;
4762 	}
4763 
4764 	cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4765 	cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4766 	cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4767 	cong_notif_cfg.message_ctx = (uintptr_t)priv;
4768 	cong_notif_cfg.message_iova = priv->cscn_dma;
4769 	cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4770 					DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4771 					DPSECI_CGN_MODE_COHERENT_WRITE;
4772 
4773 	err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4774 						 &cong_notif_cfg);
4775 	if (err) {
4776 		dev_err(dev, "dpseci_set_congestion_notification failed\n");
4777 		goto err_set_cong;
4778 	}
4779 
4780 	return 0;
4781 
4782 err_set_cong:
4783 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4784 err_dma_map:
4785 	kfree(priv->cscn_mem);
4786 
4787 	return err;
4788 }
4789 
4790 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4791 {
4792 	struct device *dev = &ls_dev->dev;
4793 	struct dpaa2_caam_priv *priv;
4794 	struct dpaa2_caam_priv_per_cpu *ppriv;
4795 	int err, cpu;
4796 	u8 i;
4797 
4798 	priv = dev_get_drvdata(dev);
4799 
4800 	priv->dev = dev;
4801 	priv->dpsec_id = ls_dev->obj_desc.id;
4802 
4803 	/* Get a handle for the DPSECI this interface is associate with */
4804 	err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4805 	if (err) {
4806 		dev_err(dev, "dpseci_open() failed: %d\n", err);
4807 		goto err_open;
4808 	}
4809 
4810 	err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4811 				     &priv->minor_ver);
4812 	if (err) {
4813 		dev_err(dev, "dpseci_get_api_version() failed\n");
4814 		goto err_get_vers;
4815 	}
4816 
4817 	dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4818 
4819 	err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4820 				    &priv->dpseci_attr);
4821 	if (err) {
4822 		dev_err(dev, "dpseci_get_attributes() failed\n");
4823 		goto err_get_vers;
4824 	}
4825 
4826 	err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4827 				  &priv->sec_attr);
4828 	if (err) {
4829 		dev_err(dev, "dpseci_get_sec_attr() failed\n");
4830 		goto err_get_vers;
4831 	}
4832 
4833 	err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4834 	if (err) {
4835 		dev_err(dev, "setup_congestion() failed\n");
4836 		goto err_get_vers;
4837 	}
4838 
4839 	priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4840 			      priv->dpseci_attr.num_tx_queues);
4841 	if (priv->num_pairs > num_online_cpus()) {
4842 		dev_warn(dev, "%d queues won't be used\n",
4843 			 priv->num_pairs - num_online_cpus());
4844 		priv->num_pairs = num_online_cpus();
4845 	}
4846 
4847 	for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4848 		err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4849 					  &priv->rx_queue_attr[i]);
4850 		if (err) {
4851 			dev_err(dev, "dpseci_get_rx_queue() failed\n");
4852 			goto err_get_rx_queue;
4853 		}
4854 	}
4855 
4856 	for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4857 		err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4858 					  &priv->tx_queue_attr[i]);
4859 		if (err) {
4860 			dev_err(dev, "dpseci_get_tx_queue() failed\n");
4861 			goto err_get_rx_queue;
4862 		}
4863 	}
4864 
4865 	i = 0;
4866 	for_each_online_cpu(cpu) {
4867 		u8 j;
4868 
4869 		j = i % priv->num_pairs;
4870 
4871 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4872 		ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
4873 
4874 		/*
4875 		 * Allow all cores to enqueue, while only some of them
4876 		 * will take part in dequeuing.
4877 		 */
4878 		if (++i > priv->num_pairs)
4879 			continue;
4880 
4881 		ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
4882 		ppriv->prio = j;
4883 
4884 		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
4885 			priv->rx_queue_attr[j].fqid,
4886 			priv->tx_queue_attr[j].fqid);
4887 
4888 		ppriv->net_dev.dev = *dev;
4889 		INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4890 		netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4891 			       DPAA2_CAAM_NAPI_WEIGHT);
4892 	}
4893 
4894 	return 0;
4895 
4896 err_get_rx_queue:
4897 	dpaa2_dpseci_congestion_free(priv);
4898 err_get_vers:
4899 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4900 err_open:
4901 	return err;
4902 }
4903 
4904 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
4905 {
4906 	struct device *dev = priv->dev;
4907 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4908 	struct dpaa2_caam_priv_per_cpu *ppriv;
4909 	int i;
4910 
4911 	for (i = 0; i < priv->num_pairs; i++) {
4912 		ppriv = per_cpu_ptr(priv->ppriv, i);
4913 		napi_enable(&ppriv->napi);
4914 	}
4915 
4916 	return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
4917 }
4918 
4919 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
4920 {
4921 	struct device *dev = priv->dev;
4922 	struct dpaa2_caam_priv_per_cpu *ppriv;
4923 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4924 	int i, err = 0, enabled;
4925 
4926 	err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
4927 	if (err) {
4928 		dev_err(dev, "dpseci_disable() failed\n");
4929 		return err;
4930 	}
4931 
4932 	err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
4933 	if (err) {
4934 		dev_err(dev, "dpseci_is_enabled() failed\n");
4935 		return err;
4936 	}
4937 
4938 	dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
4939 
4940 	for (i = 0; i < priv->num_pairs; i++) {
4941 		ppriv = per_cpu_ptr(priv->ppriv, i);
4942 		napi_disable(&ppriv->napi);
4943 		netif_napi_del(&ppriv->napi);
4944 	}
4945 
4946 	return 0;
4947 }
4948 
4949 static struct list_head hash_list;
4950 
4951 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
4952 {
4953 	struct device *dev;
4954 	struct dpaa2_caam_priv *priv;
4955 	int i, err = 0;
4956 	bool registered = false;
4957 
4958 	/*
4959 	 * There is no way to get CAAM endianness - there is no direct register
4960 	 * space access and MC f/w does not provide this attribute.
4961 	 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
4962 	 * property.
4963 	 */
4964 	caam_little_end = true;
4965 
4966 	caam_imx = false;
4967 
4968 	dev = &dpseci_dev->dev;
4969 
4970 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
4971 	if (!priv)
4972 		return -ENOMEM;
4973 
4974 	dev_set_drvdata(dev, priv);
4975 
4976 	priv->domain = iommu_get_domain_for_dev(dev);
4977 
4978 	qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
4979 				     0, SLAB_CACHE_DMA, NULL);
4980 	if (!qi_cache) {
4981 		dev_err(dev, "Can't allocate SEC cache\n");
4982 		return -ENOMEM;
4983 	}
4984 
4985 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
4986 	if (err) {
4987 		dev_err(dev, "dma_set_mask_and_coherent() failed\n");
4988 		goto err_dma_mask;
4989 	}
4990 
4991 	/* Obtain a MC portal */
4992 	err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
4993 	if (err) {
4994 		if (err == -ENXIO)
4995 			err = -EPROBE_DEFER;
4996 		else
4997 			dev_err(dev, "MC portal allocation failed\n");
4998 
4999 		goto err_dma_mask;
5000 	}
5001 
5002 	priv->ppriv = alloc_percpu(*priv->ppriv);
5003 	if (!priv->ppriv) {
5004 		dev_err(dev, "alloc_percpu() failed\n");
5005 		err = -ENOMEM;
5006 		goto err_alloc_ppriv;
5007 	}
5008 
5009 	/* DPSECI initialization */
5010 	err = dpaa2_dpseci_setup(dpseci_dev);
5011 	if (err) {
5012 		dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5013 		goto err_dpseci_setup;
5014 	}
5015 
5016 	/* DPIO */
5017 	err = dpaa2_dpseci_dpio_setup(priv);
5018 	if (err) {
5019 		if (err != -EPROBE_DEFER)
5020 			dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
5021 		goto err_dpio_setup;
5022 	}
5023 
5024 	/* DPSECI binding to DPIO */
5025 	err = dpaa2_dpseci_bind(priv);
5026 	if (err) {
5027 		dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5028 		goto err_bind;
5029 	}
5030 
5031 	/* DPSECI enable */
5032 	err = dpaa2_dpseci_enable(priv);
5033 	if (err) {
5034 		dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5035 		goto err_bind;
5036 	}
5037 
5038 	/* register crypto algorithms the device supports */
5039 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5040 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5041 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5042 
5043 		/* Skip DES algorithms if not supported by device */
5044 		if (!priv->sec_attr.des_acc_num &&
5045 		    (alg_sel == OP_ALG_ALGSEL_3DES ||
5046 		     alg_sel == OP_ALG_ALGSEL_DES))
5047 			continue;
5048 
5049 		/* Skip AES algorithms if not supported by device */
5050 		if (!priv->sec_attr.aes_acc_num &&
5051 		    alg_sel == OP_ALG_ALGSEL_AES)
5052 			continue;
5053 
5054 		/* Skip CHACHA20 algorithms if not supported by device */
5055 		if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5056 		    !priv->sec_attr.ccha_acc_num)
5057 			continue;
5058 
5059 		t_alg->caam.dev = dev;
5060 		caam_skcipher_alg_init(t_alg);
5061 
5062 		err = crypto_register_skcipher(&t_alg->skcipher);
5063 		if (err) {
5064 			dev_warn(dev, "%s alg registration failed: %d\n",
5065 				 t_alg->skcipher.base.cra_driver_name, err);
5066 			continue;
5067 		}
5068 
5069 		t_alg->registered = true;
5070 		registered = true;
5071 	}
5072 
5073 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5074 		struct caam_aead_alg *t_alg = driver_aeads + i;
5075 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5076 				 OP_ALG_ALGSEL_MASK;
5077 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5078 				 OP_ALG_ALGSEL_MASK;
5079 
5080 		/* Skip DES algorithms if not supported by device */
5081 		if (!priv->sec_attr.des_acc_num &&
5082 		    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5083 		     c1_alg_sel == OP_ALG_ALGSEL_DES))
5084 			continue;
5085 
5086 		/* Skip AES algorithms if not supported by device */
5087 		if (!priv->sec_attr.aes_acc_num &&
5088 		    c1_alg_sel == OP_ALG_ALGSEL_AES)
5089 			continue;
5090 
5091 		/* Skip CHACHA20 algorithms if not supported by device */
5092 		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5093 		    !priv->sec_attr.ccha_acc_num)
5094 			continue;
5095 
5096 		/* Skip POLY1305 algorithms if not supported by device */
5097 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5098 		    !priv->sec_attr.ptha_acc_num)
5099 			continue;
5100 
5101 		/*
5102 		 * Skip algorithms requiring message digests
5103 		 * if MD not supported by device.
5104 		 */
5105 		if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5106 		    !priv->sec_attr.md_acc_num)
5107 			continue;
5108 
5109 		t_alg->caam.dev = dev;
5110 		caam_aead_alg_init(t_alg);
5111 
5112 		err = crypto_register_aead(&t_alg->aead);
5113 		if (err) {
5114 			dev_warn(dev, "%s alg registration failed: %d\n",
5115 				 t_alg->aead.base.cra_driver_name, err);
5116 			continue;
5117 		}
5118 
5119 		t_alg->registered = true;
5120 		registered = true;
5121 	}
5122 	if (registered)
5123 		dev_info(dev, "algorithms registered in /proc/crypto\n");
5124 
5125 	/* register hash algorithms the device supports */
5126 	INIT_LIST_HEAD(&hash_list);
5127 
5128 	/*
5129 	 * Skip registration of any hashing algorithms if MD block
5130 	 * is not present.
5131 	 */
5132 	if (!priv->sec_attr.md_acc_num)
5133 		return 0;
5134 
5135 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5136 		struct caam_hash_alg *t_alg;
5137 		struct caam_hash_template *alg = driver_hash + i;
5138 
5139 		/* register hmac version */
5140 		t_alg = caam_hash_alloc(dev, alg, true);
5141 		if (IS_ERR(t_alg)) {
5142 			err = PTR_ERR(t_alg);
5143 			dev_warn(dev, "%s hash alg allocation failed: %d\n",
5144 				 alg->driver_name, err);
5145 			continue;
5146 		}
5147 
5148 		err = crypto_register_ahash(&t_alg->ahash_alg);
5149 		if (err) {
5150 			dev_warn(dev, "%s alg registration failed: %d\n",
5151 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5152 				 err);
5153 			kfree(t_alg);
5154 		} else {
5155 			list_add_tail(&t_alg->entry, &hash_list);
5156 		}
5157 
5158 		/* register unkeyed version */
5159 		t_alg = caam_hash_alloc(dev, alg, false);
5160 		if (IS_ERR(t_alg)) {
5161 			err = PTR_ERR(t_alg);
5162 			dev_warn(dev, "%s alg allocation failed: %d\n",
5163 				 alg->driver_name, err);
5164 			continue;
5165 		}
5166 
5167 		err = crypto_register_ahash(&t_alg->ahash_alg);
5168 		if (err) {
5169 			dev_warn(dev, "%s alg registration failed: %d\n",
5170 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5171 				 err);
5172 			kfree(t_alg);
5173 		} else {
5174 			list_add_tail(&t_alg->entry, &hash_list);
5175 		}
5176 	}
5177 	if (!list_empty(&hash_list))
5178 		dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5179 
5180 	return err;
5181 
5182 err_bind:
5183 	dpaa2_dpseci_dpio_free(priv);
5184 err_dpio_setup:
5185 	dpaa2_dpseci_free(priv);
5186 err_dpseci_setup:
5187 	free_percpu(priv->ppriv);
5188 err_alloc_ppriv:
5189 	fsl_mc_portal_free(priv->mc_io);
5190 err_dma_mask:
5191 	kmem_cache_destroy(qi_cache);
5192 
5193 	return err;
5194 }
5195 
5196 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5197 {
5198 	struct device *dev;
5199 	struct dpaa2_caam_priv *priv;
5200 	int i;
5201 
5202 	dev = &ls_dev->dev;
5203 	priv = dev_get_drvdata(dev);
5204 
5205 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5206 		struct caam_aead_alg *t_alg = driver_aeads + i;
5207 
5208 		if (t_alg->registered)
5209 			crypto_unregister_aead(&t_alg->aead);
5210 	}
5211 
5212 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5213 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5214 
5215 		if (t_alg->registered)
5216 			crypto_unregister_skcipher(&t_alg->skcipher);
5217 	}
5218 
5219 	if (hash_list.next) {
5220 		struct caam_hash_alg *t_hash_alg, *p;
5221 
5222 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5223 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5224 			list_del(&t_hash_alg->entry);
5225 			kfree(t_hash_alg);
5226 		}
5227 	}
5228 
5229 	dpaa2_dpseci_disable(priv);
5230 	dpaa2_dpseci_dpio_free(priv);
5231 	dpaa2_dpseci_free(priv);
5232 	free_percpu(priv->ppriv);
5233 	fsl_mc_portal_free(priv->mc_io);
5234 	kmem_cache_destroy(qi_cache);
5235 
5236 	return 0;
5237 }
5238 
5239 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5240 {
5241 	struct dpaa2_fd fd;
5242 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5243 	struct dpaa2_caam_priv_per_cpu *ppriv;
5244 	int err = 0, i;
5245 
5246 	if (IS_ERR(req))
5247 		return PTR_ERR(req);
5248 
5249 	if (priv->cscn_mem) {
5250 		dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5251 					DPAA2_CSCN_SIZE,
5252 					DMA_FROM_DEVICE);
5253 		if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5254 			dev_dbg_ratelimited(dev, "Dropping request\n");
5255 			return -EBUSY;
5256 		}
5257 	}
5258 
5259 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5260 
5261 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5262 					 DMA_BIDIRECTIONAL);
5263 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
5264 		dev_err(dev, "DMA mapping error for QI enqueue request\n");
5265 		goto err_out;
5266 	}
5267 
5268 	memset(&fd, 0, sizeof(fd));
5269 	dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5270 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5271 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5272 	dpaa2_fd_set_flc(&fd, req->flc_dma);
5273 
5274 	ppriv = this_cpu_ptr(priv->ppriv);
5275 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5276 		err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5277 						  &fd);
5278 		if (err != -EBUSY)
5279 			break;
5280 
5281 		cpu_relax();
5282 	}
5283 
5284 	if (unlikely(err)) {
5285 		dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5286 		goto err_out;
5287 	}
5288 
5289 	return -EINPROGRESS;
5290 
5291 err_out:
5292 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5293 			 DMA_BIDIRECTIONAL);
5294 	return -EIO;
5295 }
5296 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5297 
5298 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5299 	{
5300 		.vendor = FSL_MC_VENDOR_FREESCALE,
5301 		.obj_type = "dpseci",
5302 	},
5303 	{ .vendor = 0x0 }
5304 };
5305 
5306 static struct fsl_mc_driver dpaa2_caam_driver = {
5307 	.driver = {
5308 		.name		= KBUILD_MODNAME,
5309 		.owner		= THIS_MODULE,
5310 	},
5311 	.probe		= dpaa2_caam_probe,
5312 	.remove		= dpaa2_caam_remove,
5313 	.match_id_table = dpaa2_caam_match_id_table
5314 };
5315 
5316 MODULE_LICENSE("Dual BSD/GPL");
5317 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5318 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5319 
5320 module_fsl_mc_driver(dpaa2_caam_driver);
5321