xref: /openbmc/linux/drivers/crypto/caam/caamhash.c (revision 2a598d0b)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019, 2023 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  * relationship of digest job descriptor or first job descriptor after init to
11  * shared descriptors:
12  *
13  * ---------------                     ---------------
14  * | JobDesc #1  |-------------------->|  ShareDesc  |
15  * | *(packet 1) |                     |  (hashKey)  |
16  * ---------------                     | (operation) |
17  *                                     ---------------
18  *
19  * relationship of subsequent job descriptors to shared descriptors:
20  *
21  * ---------------                     ---------------
22  * | JobDesc #2  |-------------------->|  ShareDesc  |
23  * | *(packet 2) |      |------------->|  (hashKey)  |
24  * ---------------      |    |-------->| (operation) |
25  *       .              |    |         | (load ctx2) |
26  *       .              |    |         ---------------
27  * ---------------      |    |
28  * | JobDesc #3  |------|    |
29  * | *(packet 3) |           |
30  * ---------------           |
31  *       .                   |
32  *       .                   |
33  * ---------------           |
34  * | JobDesc #4  |------------
35  * | *(packet 4) |
36  * ---------------
37  *
38  * The SharedDesc never changes for a connection unless rekeyed, but
39  * each packet will likely be in a different place. So all we need
40  * to know to process the packet is where the input is, where the
41  * output goes, and what context we want to process with. Context is
42  * in the SharedDesc, packet references in the JobDesc.
43  *
44  * So, a job desc looks like:
45  *
46  * ---------------------
47  * | Header            |
48  * | ShareDesc Pointer |
49  * | SEQ_OUT_PTR       |
50  * | (output buffer)   |
51  * | (output length)   |
52  * | SEQ_IN_PTR        |
53  * | (input buffer)    |
54  * | (input length)    |
55  * ---------------------
56  */
57 
58 #include "compat.h"
59 
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 #include <crypto/engine.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/kernel.h>
71 
72 #define CAAM_CRA_PRIORITY		3000
73 
74 /* max hash key is max split key size */
75 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
76 
77 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
78 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
79 
80 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
81 					 CAAM_MAX_HASH_KEY_SIZE)
82 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
83 
84 /* caam context sizes for hashes: running digest + 8 */
85 #define HASH_MSG_LEN			8
86 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
87 
88 static struct list_head hash_list;
89 
90 /* ahash per-session context */
91 struct caam_hash_ctx {
92 	struct crypto_engine_ctx enginectx;
93 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
94 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
95 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
96 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
97 	u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
98 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
99 	dma_addr_t sh_desc_update_first_dma;
100 	dma_addr_t sh_desc_fin_dma;
101 	dma_addr_t sh_desc_digest_dma;
102 	enum dma_data_direction dir;
103 	enum dma_data_direction key_dir;
104 	struct device *jrdev;
105 	int ctx_len;
106 	struct alginfo adata;
107 };
108 
109 /* ahash state */
110 struct caam_hash_state {
111 	dma_addr_t buf_dma;
112 	dma_addr_t ctx_dma;
113 	int ctx_dma_len;
114 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
115 	int buflen;
116 	int next_buflen;
117 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
118 	int (*update)(struct ahash_request *req) ____cacheline_aligned;
119 	int (*final)(struct ahash_request *req);
120 	int (*finup)(struct ahash_request *req);
121 	struct ahash_edesc *edesc;
122 	void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
123 			      void *context);
124 };
125 
126 struct caam_export_state {
127 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
128 	u8 caam_ctx[MAX_CTX_LEN];
129 	int buflen;
130 	int (*update)(struct ahash_request *req);
131 	int (*final)(struct ahash_request *req);
132 	int (*finup)(struct ahash_request *req);
133 };
134 
135 static inline bool is_cmac_aes(u32 algtype)
136 {
137 	return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
138 	       (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
139 }
140 /* Common job descriptor seq in/out ptr routines */
141 
142 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
143 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
144 				      struct caam_hash_state *state,
145 				      int ctx_len)
146 {
147 	state->ctx_dma_len = ctx_len;
148 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
149 					ctx_len, DMA_FROM_DEVICE);
150 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
151 		dev_err(jrdev, "unable to map ctx\n");
152 		state->ctx_dma = 0;
153 		return -ENOMEM;
154 	}
155 
156 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
157 
158 	return 0;
159 }
160 
161 /* Map current buffer in state (if length > 0) and put it in link table */
162 static inline int buf_map_to_sec4_sg(struct device *jrdev,
163 				     struct sec4_sg_entry *sec4_sg,
164 				     struct caam_hash_state *state)
165 {
166 	int buflen = state->buflen;
167 
168 	if (!buflen)
169 		return 0;
170 
171 	state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
172 					DMA_TO_DEVICE);
173 	if (dma_mapping_error(jrdev, state->buf_dma)) {
174 		dev_err(jrdev, "unable to map buf\n");
175 		state->buf_dma = 0;
176 		return -ENOMEM;
177 	}
178 
179 	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
180 
181 	return 0;
182 }
183 
184 /* Map state->caam_ctx, and add it to link table */
185 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
186 				     struct caam_hash_state *state, int ctx_len,
187 				     struct sec4_sg_entry *sec4_sg, u32 flag)
188 {
189 	state->ctx_dma_len = ctx_len;
190 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
191 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
192 		dev_err(jrdev, "unable to map ctx\n");
193 		state->ctx_dma = 0;
194 		return -ENOMEM;
195 	}
196 
197 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
198 
199 	return 0;
200 }
201 
202 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
203 {
204 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
205 	int digestsize = crypto_ahash_digestsize(ahash);
206 	struct device *jrdev = ctx->jrdev;
207 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
208 	u32 *desc;
209 
210 	ctx->adata.key_virt = ctx->key;
211 
212 	/* ahash_update shared descriptor */
213 	desc = ctx->sh_desc_update;
214 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
215 			  ctx->ctx_len, true, ctrlpriv->era);
216 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
217 				   desc_bytes(desc), ctx->dir);
218 
219 	print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
220 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
221 			     1);
222 
223 	/* ahash_update_first shared descriptor */
224 	desc = ctx->sh_desc_update_first;
225 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
226 			  ctx->ctx_len, false, ctrlpriv->era);
227 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
228 				   desc_bytes(desc), ctx->dir);
229 	print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
230 			     ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
231 			     desc_bytes(desc), 1);
232 
233 	/* ahash_final shared descriptor */
234 	desc = ctx->sh_desc_fin;
235 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
236 			  ctx->ctx_len, true, ctrlpriv->era);
237 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
238 				   desc_bytes(desc), ctx->dir);
239 
240 	print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
241 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
242 			     desc_bytes(desc), 1);
243 
244 	/* ahash_digest shared descriptor */
245 	desc = ctx->sh_desc_digest;
246 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
247 			  ctx->ctx_len, false, ctrlpriv->era);
248 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
249 				   desc_bytes(desc), ctx->dir);
250 
251 	print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
252 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
253 			     desc_bytes(desc), 1);
254 
255 	return 0;
256 }
257 
258 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
259 {
260 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
261 	int digestsize = crypto_ahash_digestsize(ahash);
262 	struct device *jrdev = ctx->jrdev;
263 	u32 *desc;
264 
265 	/* shared descriptor for ahash_update */
266 	desc = ctx->sh_desc_update;
267 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
268 			    ctx->ctx_len, ctx->ctx_len);
269 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
270 				   desc_bytes(desc), ctx->dir);
271 	print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
272 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
273 			     1);
274 
275 	/* shared descriptor for ahash_{final,finup} */
276 	desc = ctx->sh_desc_fin;
277 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
278 			    digestsize, ctx->ctx_len);
279 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
280 				   desc_bytes(desc), ctx->dir);
281 	print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
282 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
283 			     1);
284 
285 	/* key is immediate data for INIT and INITFINAL states */
286 	ctx->adata.key_virt = ctx->key;
287 
288 	/* shared descriptor for first invocation of ahash_update */
289 	desc = ctx->sh_desc_update_first;
290 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
291 			    ctx->ctx_len);
292 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
293 				   desc_bytes(desc), ctx->dir);
294 	print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
295 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
296 			     desc_bytes(desc), 1);
297 
298 	/* shared descriptor for ahash_digest */
299 	desc = ctx->sh_desc_digest;
300 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
301 			    digestsize, ctx->ctx_len);
302 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
303 				   desc_bytes(desc), ctx->dir);
304 	print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
305 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
306 			     1);
307 	return 0;
308 }
309 
310 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
311 {
312 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
313 	int digestsize = crypto_ahash_digestsize(ahash);
314 	struct device *jrdev = ctx->jrdev;
315 	u32 *desc;
316 
317 	/* shared descriptor for ahash_update */
318 	desc = ctx->sh_desc_update;
319 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
320 			    ctx->ctx_len, ctx->ctx_len);
321 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
322 				   desc_bytes(desc), ctx->dir);
323 	print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
324 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
325 			     desc_bytes(desc), 1);
326 
327 	/* shared descriptor for ahash_{final,finup} */
328 	desc = ctx->sh_desc_fin;
329 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
330 			    digestsize, ctx->ctx_len);
331 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
332 				   desc_bytes(desc), ctx->dir);
333 	print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
334 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
335 			     desc_bytes(desc), 1);
336 
337 	/* shared descriptor for first invocation of ahash_update */
338 	desc = ctx->sh_desc_update_first;
339 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
340 			    ctx->ctx_len);
341 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
342 				   desc_bytes(desc), ctx->dir);
343 	print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
344 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
345 			     desc_bytes(desc), 1);
346 
347 	/* shared descriptor for ahash_digest */
348 	desc = ctx->sh_desc_digest;
349 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
350 			    digestsize, ctx->ctx_len);
351 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
352 				   desc_bytes(desc), ctx->dir);
353 	print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
354 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
355 			     desc_bytes(desc), 1);
356 
357 	return 0;
358 }
359 
360 /* Digest hash size if it is too large */
361 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
362 			   u32 digestsize)
363 {
364 	struct device *jrdev = ctx->jrdev;
365 	u32 *desc;
366 	struct split_key_result result;
367 	dma_addr_t key_dma;
368 	int ret;
369 
370 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
371 	if (!desc)
372 		return -ENOMEM;
373 
374 	init_job_desc(desc, 0);
375 
376 	key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
377 	if (dma_mapping_error(jrdev, key_dma)) {
378 		dev_err(jrdev, "unable to map key memory\n");
379 		kfree(desc);
380 		return -ENOMEM;
381 	}
382 
383 	/* Job descriptor to perform unkeyed hash on key_in */
384 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
385 			 OP_ALG_AS_INITFINAL);
386 	append_seq_in_ptr(desc, key_dma, *keylen, 0);
387 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
388 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
389 	append_seq_out_ptr(desc, key_dma, digestsize, 0);
390 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
391 			 LDST_SRCDST_BYTE_CONTEXT);
392 
393 	print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
394 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
395 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
396 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
397 			     1);
398 
399 	result.err = 0;
400 	init_completion(&result.completion);
401 
402 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
403 	if (ret == -EINPROGRESS) {
404 		/* in progress */
405 		wait_for_completion(&result.completion);
406 		ret = result.err;
407 
408 		print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
409 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
410 				     digestsize, 1);
411 	}
412 	dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
413 
414 	*keylen = digestsize;
415 
416 	kfree(desc);
417 
418 	return ret;
419 }
420 
421 static int ahash_setkey(struct crypto_ahash *ahash,
422 			const u8 *key, unsigned int keylen)
423 {
424 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
425 	struct device *jrdev = ctx->jrdev;
426 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
427 	int digestsize = crypto_ahash_digestsize(ahash);
428 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
429 	int ret;
430 	u8 *hashed_key = NULL;
431 
432 	dev_dbg(jrdev, "keylen %d\n", keylen);
433 
434 	if (keylen > blocksize) {
435 		unsigned int aligned_len =
436 			ALIGN(keylen, dma_get_cache_alignment());
437 
438 		if (aligned_len < keylen)
439 			return -EOVERFLOW;
440 
441 		hashed_key = kmemdup(key, keylen, GFP_KERNEL);
442 		if (!hashed_key)
443 			return -ENOMEM;
444 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
445 		if (ret)
446 			goto bad_free_key;
447 		key = hashed_key;
448 	}
449 
450 	/*
451 	 * If DKP is supported, use it in the shared descriptor to generate
452 	 * the split key.
453 	 */
454 	if (ctrlpriv->era >= 6) {
455 		ctx->adata.key_inline = true;
456 		ctx->adata.keylen = keylen;
457 		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
458 						      OP_ALG_ALGSEL_MASK);
459 
460 		if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
461 			goto bad_free_key;
462 
463 		memcpy(ctx->key, key, keylen);
464 
465 		/*
466 		 * In case |user key| > |derived key|, using DKP<imm,imm>
467 		 * would result in invalid opcodes (last bytes of user key) in
468 		 * the resulting descriptor. Use DKP<ptr,imm> instead => both
469 		 * virtual and dma key addresses are needed.
470 		 */
471 		if (keylen > ctx->adata.keylen_pad)
472 			dma_sync_single_for_device(ctx->jrdev,
473 						   ctx->adata.key_dma,
474 						   ctx->adata.keylen_pad,
475 						   DMA_TO_DEVICE);
476 	} else {
477 		ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
478 				    keylen, CAAM_MAX_HASH_KEY_SIZE);
479 		if (ret)
480 			goto bad_free_key;
481 	}
482 
483 	kfree(hashed_key);
484 	return ahash_set_sh_desc(ahash);
485  bad_free_key:
486 	kfree(hashed_key);
487 	return -EINVAL;
488 }
489 
490 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
491 			unsigned int keylen)
492 {
493 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
494 	struct device *jrdev = ctx->jrdev;
495 
496 	if (keylen != AES_KEYSIZE_128)
497 		return -EINVAL;
498 
499 	memcpy(ctx->key, key, keylen);
500 	dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
501 				   DMA_TO_DEVICE);
502 	ctx->adata.keylen = keylen;
503 
504 	print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
505 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
506 
507 	return axcbc_set_sh_desc(ahash);
508 }
509 
510 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
511 			unsigned int keylen)
512 {
513 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
514 	int err;
515 
516 	err = aes_check_keylen(keylen);
517 	if (err)
518 		return err;
519 
520 	/* key is immediate data for all cmac shared descriptors */
521 	ctx->adata.key_virt = key;
522 	ctx->adata.keylen = keylen;
523 
524 	print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
525 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
526 
527 	return acmac_set_sh_desc(ahash);
528 }
529 
530 /*
531  * ahash_edesc - s/w-extended ahash descriptor
532  * @sec4_sg_dma: physical mapped address of h/w link table
533  * @src_nents: number of segments in input scatterlist
534  * @sec4_sg_bytes: length of dma mapped sec4_sg space
535  * @bklog: stored to determine if the request needs backlog
536  * @hw_desc: the h/w job descriptor followed by any referenced link tables
537  * @sec4_sg: h/w link table
538  */
539 struct ahash_edesc {
540 	dma_addr_t sec4_sg_dma;
541 	int src_nents;
542 	int sec4_sg_bytes;
543 	bool bklog;
544 	u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
545 	struct sec4_sg_entry sec4_sg[];
546 };
547 
548 static inline void ahash_unmap(struct device *dev,
549 			struct ahash_edesc *edesc,
550 			struct ahash_request *req, int dst_len)
551 {
552 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
553 
554 	if (edesc->src_nents)
555 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
556 
557 	if (edesc->sec4_sg_bytes)
558 		dma_unmap_single(dev, edesc->sec4_sg_dma,
559 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
560 
561 	if (state->buf_dma) {
562 		dma_unmap_single(dev, state->buf_dma, state->buflen,
563 				 DMA_TO_DEVICE);
564 		state->buf_dma = 0;
565 	}
566 }
567 
568 static inline void ahash_unmap_ctx(struct device *dev,
569 			struct ahash_edesc *edesc,
570 			struct ahash_request *req, int dst_len, u32 flag)
571 {
572 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
573 
574 	if (state->ctx_dma) {
575 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
576 		state->ctx_dma = 0;
577 	}
578 	ahash_unmap(dev, edesc, req, dst_len);
579 }
580 
581 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
582 				  void *context, enum dma_data_direction dir)
583 {
584 	struct ahash_request *req = context;
585 	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
586 	struct ahash_edesc *edesc;
587 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
588 	int digestsize = crypto_ahash_digestsize(ahash);
589 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
590 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
591 	int ecode = 0;
592 	bool has_bklog;
593 
594 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
595 
596 	edesc = state->edesc;
597 	has_bklog = edesc->bklog;
598 
599 	if (err)
600 		ecode = caam_jr_strstatus(jrdev, err);
601 
602 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
603 	memcpy(req->result, state->caam_ctx, digestsize);
604 	kfree(edesc);
605 
606 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
607 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
608 			     ctx->ctx_len, 1);
609 
610 	/*
611 	 * If no backlog flag, the completion of the request is done
612 	 * by CAAM, not crypto engine.
613 	 */
614 	if (!has_bklog)
615 		ahash_request_complete(req, ecode);
616 	else
617 		crypto_finalize_hash_request(jrp->engine, req, ecode);
618 }
619 
620 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
621 		       void *context)
622 {
623 	ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
624 }
625 
626 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
627 			       void *context)
628 {
629 	ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
630 }
631 
632 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
633 				     void *context, enum dma_data_direction dir)
634 {
635 	struct ahash_request *req = context;
636 	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
637 	struct ahash_edesc *edesc;
638 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
639 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
640 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
641 	int digestsize = crypto_ahash_digestsize(ahash);
642 	int ecode = 0;
643 	bool has_bklog;
644 
645 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
646 
647 	edesc = state->edesc;
648 	has_bklog = edesc->bklog;
649 	if (err)
650 		ecode = caam_jr_strstatus(jrdev, err);
651 
652 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
653 	kfree(edesc);
654 
655 	scatterwalk_map_and_copy(state->buf, req->src,
656 				 req->nbytes - state->next_buflen,
657 				 state->next_buflen, 0);
658 	state->buflen = state->next_buflen;
659 
660 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
661 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
662 			     state->buflen, 1);
663 
664 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
665 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
666 			     ctx->ctx_len, 1);
667 	if (req->result)
668 		print_hex_dump_debug("result@"__stringify(__LINE__)": ",
669 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
670 				     digestsize, 1);
671 
672 	/*
673 	 * If no backlog flag, the completion of the request is done
674 	 * by CAAM, not crypto engine.
675 	 */
676 	if (!has_bklog)
677 		ahash_request_complete(req, ecode);
678 	else
679 		crypto_finalize_hash_request(jrp->engine, req, ecode);
680 
681 }
682 
683 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
684 			  void *context)
685 {
686 	ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
687 }
688 
689 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
690 			       void *context)
691 {
692 	ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
693 }
694 
695 /*
696  * Allocate an enhanced descriptor, which contains the hardware descriptor
697  * and space for hardware scatter table containing sg_num entries.
698  */
699 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
700 					     int sg_num, u32 *sh_desc,
701 					     dma_addr_t sh_desc_dma)
702 {
703 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
704 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
705 		       GFP_KERNEL : GFP_ATOMIC;
706 	struct ahash_edesc *edesc;
707 
708 	edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
709 	if (!edesc)
710 		return NULL;
711 
712 	state->edesc = edesc;
713 
714 	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
715 			     HDR_SHARE_DEFER | HDR_REVERSE);
716 
717 	return edesc;
718 }
719 
720 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
721 			       struct ahash_edesc *edesc,
722 			       struct ahash_request *req, int nents,
723 			       unsigned int first_sg,
724 			       unsigned int first_bytes, size_t to_hash)
725 {
726 	dma_addr_t src_dma;
727 	u32 options;
728 
729 	if (nents > 1 || first_sg) {
730 		struct sec4_sg_entry *sg = edesc->sec4_sg;
731 		unsigned int sgsize = sizeof(*sg) *
732 				      pad_sg_nents(first_sg + nents);
733 
734 		sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
735 
736 		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
737 		if (dma_mapping_error(ctx->jrdev, src_dma)) {
738 			dev_err(ctx->jrdev, "unable to map S/G table\n");
739 			return -ENOMEM;
740 		}
741 
742 		edesc->sec4_sg_bytes = sgsize;
743 		edesc->sec4_sg_dma = src_dma;
744 		options = LDST_SGF;
745 	} else {
746 		src_dma = sg_dma_address(req->src);
747 		options = 0;
748 	}
749 
750 	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
751 			  options);
752 
753 	return 0;
754 }
755 
756 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
757 {
758 	struct ahash_request *req = ahash_request_cast(areq);
759 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
760 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
761 	struct device *jrdev = ctx->jrdev;
762 	u32 *desc = state->edesc->hw_desc;
763 	int ret;
764 
765 	state->edesc->bklog = true;
766 
767 	ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
768 
769 	if (ret == -ENOSPC && engine->retry_support)
770 		return ret;
771 
772 	if (ret != -EINPROGRESS) {
773 		ahash_unmap(jrdev, state->edesc, req, 0);
774 		kfree(state->edesc);
775 	} else {
776 		ret = 0;
777 	}
778 
779 	return ret;
780 }
781 
782 static int ahash_enqueue_req(struct device *jrdev,
783 			     void (*cbk)(struct device *jrdev, u32 *desc,
784 					 u32 err, void *context),
785 			     struct ahash_request *req,
786 			     int dst_len, enum dma_data_direction dir)
787 {
788 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
789 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
790 	struct ahash_edesc *edesc = state->edesc;
791 	u32 *desc = edesc->hw_desc;
792 	int ret;
793 
794 	state->ahash_op_done = cbk;
795 
796 	/*
797 	 * Only the backlog request are sent to crypto-engine since the others
798 	 * can be handled by CAAM, if free, especially since JR has up to 1024
799 	 * entries (more than the 10 entries from crypto-engine).
800 	 */
801 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
802 		ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
803 							     req);
804 	else
805 		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
806 
807 	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
808 		ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
809 		kfree(edesc);
810 	}
811 
812 	return ret;
813 }
814 
815 /* submit update job descriptor */
816 static int ahash_update_ctx(struct ahash_request *req)
817 {
818 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
819 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
820 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
821 	struct device *jrdev = ctx->jrdev;
822 	u8 *buf = state->buf;
823 	int *buflen = &state->buflen;
824 	int *next_buflen = &state->next_buflen;
825 	int blocksize = crypto_ahash_blocksize(ahash);
826 	int in_len = *buflen + req->nbytes, to_hash;
827 	u32 *desc;
828 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
829 	struct ahash_edesc *edesc;
830 	int ret = 0;
831 
832 	*next_buflen = in_len & (blocksize - 1);
833 	to_hash = in_len - *next_buflen;
834 
835 	/*
836 	 * For XCBC and CMAC, if to_hash is multiple of block size,
837 	 * keep last block in internal buffer
838 	 */
839 	if ((is_xcbc_aes(ctx->adata.algtype) ||
840 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
841 	     (*next_buflen == 0)) {
842 		*next_buflen = blocksize;
843 		to_hash -= blocksize;
844 	}
845 
846 	if (to_hash) {
847 		int pad_nents;
848 		int src_len = req->nbytes - *next_buflen;
849 
850 		src_nents = sg_nents_for_len(req->src, src_len);
851 		if (src_nents < 0) {
852 			dev_err(jrdev, "Invalid number of src SG.\n");
853 			return src_nents;
854 		}
855 
856 		if (src_nents) {
857 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
858 						  DMA_TO_DEVICE);
859 			if (!mapped_nents) {
860 				dev_err(jrdev, "unable to DMA map source\n");
861 				return -ENOMEM;
862 			}
863 		} else {
864 			mapped_nents = 0;
865 		}
866 
867 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
868 		pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
869 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
870 
871 		/*
872 		 * allocate space for base edesc and hw desc commands,
873 		 * link tables
874 		 */
875 		edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
876 					  ctx->sh_desc_update_dma);
877 		if (!edesc) {
878 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
879 			return -ENOMEM;
880 		}
881 
882 		edesc->src_nents = src_nents;
883 		edesc->sec4_sg_bytes = sec4_sg_bytes;
884 
885 		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
886 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
887 		if (ret)
888 			goto unmap_ctx;
889 
890 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
891 		if (ret)
892 			goto unmap_ctx;
893 
894 		if (mapped_nents)
895 			sg_to_sec4_sg_last(req->src, src_len,
896 					   edesc->sec4_sg + sec4_sg_src_index,
897 					   0);
898 		else
899 			sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
900 					    1);
901 
902 		desc = edesc->hw_desc;
903 
904 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
905 						     sec4_sg_bytes,
906 						     DMA_TO_DEVICE);
907 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
908 			dev_err(jrdev, "unable to map S/G table\n");
909 			ret = -ENOMEM;
910 			goto unmap_ctx;
911 		}
912 
913 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
914 				       to_hash, LDST_SGF);
915 
916 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
917 
918 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
919 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
920 				     desc_bytes(desc), 1);
921 
922 		ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
923 					ctx->ctx_len, DMA_BIDIRECTIONAL);
924 	} else if (*next_buflen) {
925 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
926 					 req->nbytes, 0);
927 		*buflen = *next_buflen;
928 
929 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
930 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
931 				     *buflen, 1);
932 	}
933 
934 	return ret;
935 unmap_ctx:
936 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
937 	kfree(edesc);
938 	return ret;
939 }
940 
941 static int ahash_final_ctx(struct ahash_request *req)
942 {
943 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
944 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
945 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
946 	struct device *jrdev = ctx->jrdev;
947 	int buflen = state->buflen;
948 	u32 *desc;
949 	int sec4_sg_bytes;
950 	int digestsize = crypto_ahash_digestsize(ahash);
951 	struct ahash_edesc *edesc;
952 	int ret;
953 
954 	sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
955 			sizeof(struct sec4_sg_entry);
956 
957 	/* allocate space for base edesc and hw desc commands, link tables */
958 	edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
959 				  ctx->sh_desc_fin_dma);
960 	if (!edesc)
961 		return -ENOMEM;
962 
963 	desc = edesc->hw_desc;
964 
965 	edesc->sec4_sg_bytes = sec4_sg_bytes;
966 
967 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
968 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
969 	if (ret)
970 		goto unmap_ctx;
971 
972 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
973 	if (ret)
974 		goto unmap_ctx;
975 
976 	sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
977 
978 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
979 					    sec4_sg_bytes, DMA_TO_DEVICE);
980 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
981 		dev_err(jrdev, "unable to map S/G table\n");
982 		ret = -ENOMEM;
983 		goto unmap_ctx;
984 	}
985 
986 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
987 			  LDST_SGF);
988 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
989 
990 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
991 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
992 			     1);
993 
994 	return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
995 				 digestsize, DMA_BIDIRECTIONAL);
996  unmap_ctx:
997 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
998 	kfree(edesc);
999 	return ret;
1000 }
1001 
1002 static int ahash_finup_ctx(struct ahash_request *req)
1003 {
1004 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1005 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1006 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1007 	struct device *jrdev = ctx->jrdev;
1008 	int buflen = state->buflen;
1009 	u32 *desc;
1010 	int sec4_sg_src_index;
1011 	int src_nents, mapped_nents;
1012 	int digestsize = crypto_ahash_digestsize(ahash);
1013 	struct ahash_edesc *edesc;
1014 	int ret;
1015 
1016 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1017 	if (src_nents < 0) {
1018 		dev_err(jrdev, "Invalid number of src SG.\n");
1019 		return src_nents;
1020 	}
1021 
1022 	if (src_nents) {
1023 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1024 					  DMA_TO_DEVICE);
1025 		if (!mapped_nents) {
1026 			dev_err(jrdev, "unable to DMA map source\n");
1027 			return -ENOMEM;
1028 		}
1029 	} else {
1030 		mapped_nents = 0;
1031 	}
1032 
1033 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1034 
1035 	/* allocate space for base edesc and hw desc commands, link tables */
1036 	edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1037 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1038 	if (!edesc) {
1039 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1040 		return -ENOMEM;
1041 	}
1042 
1043 	desc = edesc->hw_desc;
1044 
1045 	edesc->src_nents = src_nents;
1046 
1047 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1048 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1049 	if (ret)
1050 		goto unmap_ctx;
1051 
1052 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1053 	if (ret)
1054 		goto unmap_ctx;
1055 
1056 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1057 				  sec4_sg_src_index, ctx->ctx_len + buflen,
1058 				  req->nbytes);
1059 	if (ret)
1060 		goto unmap_ctx;
1061 
1062 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1063 
1064 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1065 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1066 			     1);
1067 
1068 	return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1069 				 digestsize, DMA_BIDIRECTIONAL);
1070  unmap_ctx:
1071 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1072 	kfree(edesc);
1073 	return ret;
1074 }
1075 
1076 static int ahash_digest(struct ahash_request *req)
1077 {
1078 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1079 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1080 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1081 	struct device *jrdev = ctx->jrdev;
1082 	u32 *desc;
1083 	int digestsize = crypto_ahash_digestsize(ahash);
1084 	int src_nents, mapped_nents;
1085 	struct ahash_edesc *edesc;
1086 	int ret;
1087 
1088 	state->buf_dma = 0;
1089 
1090 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1091 	if (src_nents < 0) {
1092 		dev_err(jrdev, "Invalid number of src SG.\n");
1093 		return src_nents;
1094 	}
1095 
1096 	if (src_nents) {
1097 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1098 					  DMA_TO_DEVICE);
1099 		if (!mapped_nents) {
1100 			dev_err(jrdev, "unable to map source for DMA\n");
1101 			return -ENOMEM;
1102 		}
1103 	} else {
1104 		mapped_nents = 0;
1105 	}
1106 
1107 	/* allocate space for base edesc and hw desc commands, link tables */
1108 	edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1109 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1110 	if (!edesc) {
1111 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1112 		return -ENOMEM;
1113 	}
1114 
1115 	edesc->src_nents = src_nents;
1116 
1117 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1118 				  req->nbytes);
1119 	if (ret) {
1120 		ahash_unmap(jrdev, edesc, req, digestsize);
1121 		kfree(edesc);
1122 		return ret;
1123 	}
1124 
1125 	desc = edesc->hw_desc;
1126 
1127 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1128 	if (ret) {
1129 		ahash_unmap(jrdev, edesc, req, digestsize);
1130 		kfree(edesc);
1131 		return -ENOMEM;
1132 	}
1133 
1134 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1135 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1136 			     1);
1137 
1138 	return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1139 				 DMA_FROM_DEVICE);
1140 }
1141 
1142 /* submit ahash final if it the first job descriptor */
1143 static int ahash_final_no_ctx(struct ahash_request *req)
1144 {
1145 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1146 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1147 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1148 	struct device *jrdev = ctx->jrdev;
1149 	u8 *buf = state->buf;
1150 	int buflen = state->buflen;
1151 	u32 *desc;
1152 	int digestsize = crypto_ahash_digestsize(ahash);
1153 	struct ahash_edesc *edesc;
1154 	int ret;
1155 
1156 	/* allocate space for base edesc and hw desc commands, link tables */
1157 	edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1158 				  ctx->sh_desc_digest_dma);
1159 	if (!edesc)
1160 		return -ENOMEM;
1161 
1162 	desc = edesc->hw_desc;
1163 
1164 	if (buflen) {
1165 		state->buf_dma = dma_map_single(jrdev, buf, buflen,
1166 						DMA_TO_DEVICE);
1167 		if (dma_mapping_error(jrdev, state->buf_dma)) {
1168 			dev_err(jrdev, "unable to map src\n");
1169 			goto unmap;
1170 		}
1171 
1172 		append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1173 	}
1174 
1175 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1176 	if (ret)
1177 		goto unmap;
1178 
1179 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1180 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1181 			     1);
1182 
1183 	return ahash_enqueue_req(jrdev, ahash_done, req,
1184 				 digestsize, DMA_FROM_DEVICE);
1185  unmap:
1186 	ahash_unmap(jrdev, edesc, req, digestsize);
1187 	kfree(edesc);
1188 	return -ENOMEM;
1189 }
1190 
1191 /* submit ahash update if it the first job descriptor after update */
1192 static int ahash_update_no_ctx(struct ahash_request *req)
1193 {
1194 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1195 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1196 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1197 	struct device *jrdev = ctx->jrdev;
1198 	u8 *buf = state->buf;
1199 	int *buflen = &state->buflen;
1200 	int *next_buflen = &state->next_buflen;
1201 	int blocksize = crypto_ahash_blocksize(ahash);
1202 	int in_len = *buflen + req->nbytes, to_hash;
1203 	int sec4_sg_bytes, src_nents, mapped_nents;
1204 	struct ahash_edesc *edesc;
1205 	u32 *desc;
1206 	int ret = 0;
1207 
1208 	*next_buflen = in_len & (blocksize - 1);
1209 	to_hash = in_len - *next_buflen;
1210 
1211 	/*
1212 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1213 	 * keep last block in internal buffer
1214 	 */
1215 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1216 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1217 	     (*next_buflen == 0)) {
1218 		*next_buflen = blocksize;
1219 		to_hash -= blocksize;
1220 	}
1221 
1222 	if (to_hash) {
1223 		int pad_nents;
1224 		int src_len = req->nbytes - *next_buflen;
1225 
1226 		src_nents = sg_nents_for_len(req->src, src_len);
1227 		if (src_nents < 0) {
1228 			dev_err(jrdev, "Invalid number of src SG.\n");
1229 			return src_nents;
1230 		}
1231 
1232 		if (src_nents) {
1233 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1234 						  DMA_TO_DEVICE);
1235 			if (!mapped_nents) {
1236 				dev_err(jrdev, "unable to DMA map source\n");
1237 				return -ENOMEM;
1238 			}
1239 		} else {
1240 			mapped_nents = 0;
1241 		}
1242 
1243 		pad_nents = pad_sg_nents(1 + mapped_nents);
1244 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1245 
1246 		/*
1247 		 * allocate space for base edesc and hw desc commands,
1248 		 * link tables
1249 		 */
1250 		edesc = ahash_edesc_alloc(req, pad_nents,
1251 					  ctx->sh_desc_update_first,
1252 					  ctx->sh_desc_update_first_dma);
1253 		if (!edesc) {
1254 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1255 			return -ENOMEM;
1256 		}
1257 
1258 		edesc->src_nents = src_nents;
1259 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1260 
1261 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1262 		if (ret)
1263 			goto unmap_ctx;
1264 
1265 		sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1266 
1267 		desc = edesc->hw_desc;
1268 
1269 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1270 						    sec4_sg_bytes,
1271 						    DMA_TO_DEVICE);
1272 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1273 			dev_err(jrdev, "unable to map S/G table\n");
1274 			ret = -ENOMEM;
1275 			goto unmap_ctx;
1276 		}
1277 
1278 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1279 
1280 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1281 		if (ret)
1282 			goto unmap_ctx;
1283 
1284 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1285 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1286 				     desc_bytes(desc), 1);
1287 
1288 		ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1289 					ctx->ctx_len, DMA_TO_DEVICE);
1290 		if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1291 			return ret;
1292 		state->update = ahash_update_ctx;
1293 		state->finup = ahash_finup_ctx;
1294 		state->final = ahash_final_ctx;
1295 	} else if (*next_buflen) {
1296 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1297 					 req->nbytes, 0);
1298 		*buflen = *next_buflen;
1299 
1300 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1301 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
1302 				     *buflen, 1);
1303 	}
1304 
1305 	return ret;
1306  unmap_ctx:
1307 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1308 	kfree(edesc);
1309 	return ret;
1310 }
1311 
1312 /* submit ahash finup if it the first job descriptor after update */
1313 static int ahash_finup_no_ctx(struct ahash_request *req)
1314 {
1315 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1316 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1317 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1318 	struct device *jrdev = ctx->jrdev;
1319 	int buflen = state->buflen;
1320 	u32 *desc;
1321 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1322 	int digestsize = crypto_ahash_digestsize(ahash);
1323 	struct ahash_edesc *edesc;
1324 	int ret;
1325 
1326 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1327 	if (src_nents < 0) {
1328 		dev_err(jrdev, "Invalid number of src SG.\n");
1329 		return src_nents;
1330 	}
1331 
1332 	if (src_nents) {
1333 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1334 					  DMA_TO_DEVICE);
1335 		if (!mapped_nents) {
1336 			dev_err(jrdev, "unable to DMA map source\n");
1337 			return -ENOMEM;
1338 		}
1339 	} else {
1340 		mapped_nents = 0;
1341 	}
1342 
1343 	sec4_sg_src_index = 2;
1344 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1345 			 sizeof(struct sec4_sg_entry);
1346 
1347 	/* allocate space for base edesc and hw desc commands, link tables */
1348 	edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1349 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1350 	if (!edesc) {
1351 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1352 		return -ENOMEM;
1353 	}
1354 
1355 	desc = edesc->hw_desc;
1356 
1357 	edesc->src_nents = src_nents;
1358 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1359 
1360 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1361 	if (ret)
1362 		goto unmap;
1363 
1364 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1365 				  req->nbytes);
1366 	if (ret) {
1367 		dev_err(jrdev, "unable to map S/G table\n");
1368 		goto unmap;
1369 	}
1370 
1371 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1372 	if (ret)
1373 		goto unmap;
1374 
1375 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1376 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1377 			     1);
1378 
1379 	return ahash_enqueue_req(jrdev, ahash_done, req,
1380 				 digestsize, DMA_FROM_DEVICE);
1381  unmap:
1382 	ahash_unmap(jrdev, edesc, req, digestsize);
1383 	kfree(edesc);
1384 	return -ENOMEM;
1385 
1386 }
1387 
1388 /* submit first update job descriptor after init */
1389 static int ahash_update_first(struct ahash_request *req)
1390 {
1391 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1392 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1393 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1394 	struct device *jrdev = ctx->jrdev;
1395 	u8 *buf = state->buf;
1396 	int *buflen = &state->buflen;
1397 	int *next_buflen = &state->next_buflen;
1398 	int to_hash;
1399 	int blocksize = crypto_ahash_blocksize(ahash);
1400 	u32 *desc;
1401 	int src_nents, mapped_nents;
1402 	struct ahash_edesc *edesc;
1403 	int ret = 0;
1404 
1405 	*next_buflen = req->nbytes & (blocksize - 1);
1406 	to_hash = req->nbytes - *next_buflen;
1407 
1408 	/*
1409 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1410 	 * keep last block in internal buffer
1411 	 */
1412 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1413 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1414 	     (*next_buflen == 0)) {
1415 		*next_buflen = blocksize;
1416 		to_hash -= blocksize;
1417 	}
1418 
1419 	if (to_hash) {
1420 		src_nents = sg_nents_for_len(req->src,
1421 					     req->nbytes - *next_buflen);
1422 		if (src_nents < 0) {
1423 			dev_err(jrdev, "Invalid number of src SG.\n");
1424 			return src_nents;
1425 		}
1426 
1427 		if (src_nents) {
1428 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1429 						  DMA_TO_DEVICE);
1430 			if (!mapped_nents) {
1431 				dev_err(jrdev, "unable to map source for DMA\n");
1432 				return -ENOMEM;
1433 			}
1434 		} else {
1435 			mapped_nents = 0;
1436 		}
1437 
1438 		/*
1439 		 * allocate space for base edesc and hw desc commands,
1440 		 * link tables
1441 		 */
1442 		edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1443 					  mapped_nents : 0,
1444 					  ctx->sh_desc_update_first,
1445 					  ctx->sh_desc_update_first_dma);
1446 		if (!edesc) {
1447 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1448 			return -ENOMEM;
1449 		}
1450 
1451 		edesc->src_nents = src_nents;
1452 
1453 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1454 					  to_hash);
1455 		if (ret)
1456 			goto unmap_ctx;
1457 
1458 		desc = edesc->hw_desc;
1459 
1460 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1461 		if (ret)
1462 			goto unmap_ctx;
1463 
1464 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1465 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1466 				     desc_bytes(desc), 1);
1467 
1468 		ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1469 					ctx->ctx_len, DMA_TO_DEVICE);
1470 		if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1471 			return ret;
1472 		state->update = ahash_update_ctx;
1473 		state->finup = ahash_finup_ctx;
1474 		state->final = ahash_final_ctx;
1475 	} else if (*next_buflen) {
1476 		state->update = ahash_update_no_ctx;
1477 		state->finup = ahash_finup_no_ctx;
1478 		state->final = ahash_final_no_ctx;
1479 		scatterwalk_map_and_copy(buf, req->src, 0,
1480 					 req->nbytes, 0);
1481 		*buflen = *next_buflen;
1482 
1483 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1484 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
1485 				     *buflen, 1);
1486 	}
1487 
1488 	return ret;
1489  unmap_ctx:
1490 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1491 	kfree(edesc);
1492 	return ret;
1493 }
1494 
1495 static int ahash_finup_first(struct ahash_request *req)
1496 {
1497 	return ahash_digest(req);
1498 }
1499 
1500 static int ahash_init(struct ahash_request *req)
1501 {
1502 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1503 
1504 	state->update = ahash_update_first;
1505 	state->finup = ahash_finup_first;
1506 	state->final = ahash_final_no_ctx;
1507 
1508 	state->ctx_dma = 0;
1509 	state->ctx_dma_len = 0;
1510 	state->buf_dma = 0;
1511 	state->buflen = 0;
1512 	state->next_buflen = 0;
1513 
1514 	return 0;
1515 }
1516 
1517 static int ahash_update(struct ahash_request *req)
1518 {
1519 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1520 
1521 	return state->update(req);
1522 }
1523 
1524 static int ahash_finup(struct ahash_request *req)
1525 {
1526 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1527 
1528 	return state->finup(req);
1529 }
1530 
1531 static int ahash_final(struct ahash_request *req)
1532 {
1533 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1534 
1535 	return state->final(req);
1536 }
1537 
1538 static int ahash_export(struct ahash_request *req, void *out)
1539 {
1540 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1541 	struct caam_export_state *export = out;
1542 	u8 *buf = state->buf;
1543 	int len = state->buflen;
1544 
1545 	memcpy(export->buf, buf, len);
1546 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1547 	export->buflen = len;
1548 	export->update = state->update;
1549 	export->final = state->final;
1550 	export->finup = state->finup;
1551 
1552 	return 0;
1553 }
1554 
1555 static int ahash_import(struct ahash_request *req, const void *in)
1556 {
1557 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1558 	const struct caam_export_state *export = in;
1559 
1560 	memset(state, 0, sizeof(*state));
1561 	memcpy(state->buf, export->buf, export->buflen);
1562 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1563 	state->buflen = export->buflen;
1564 	state->update = export->update;
1565 	state->final = export->final;
1566 	state->finup = export->finup;
1567 
1568 	return 0;
1569 }
1570 
1571 struct caam_hash_template {
1572 	char name[CRYPTO_MAX_ALG_NAME];
1573 	char driver_name[CRYPTO_MAX_ALG_NAME];
1574 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1575 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1576 	unsigned int blocksize;
1577 	struct ahash_alg template_ahash;
1578 	u32 alg_type;
1579 };
1580 
1581 /* ahash descriptors */
1582 static struct caam_hash_template driver_hash[] = {
1583 	{
1584 		.name = "sha1",
1585 		.driver_name = "sha1-caam",
1586 		.hmac_name = "hmac(sha1)",
1587 		.hmac_driver_name = "hmac-sha1-caam",
1588 		.blocksize = SHA1_BLOCK_SIZE,
1589 		.template_ahash = {
1590 			.init = ahash_init,
1591 			.update = ahash_update,
1592 			.final = ahash_final,
1593 			.finup = ahash_finup,
1594 			.digest = ahash_digest,
1595 			.export = ahash_export,
1596 			.import = ahash_import,
1597 			.setkey = ahash_setkey,
1598 			.halg = {
1599 				.digestsize = SHA1_DIGEST_SIZE,
1600 				.statesize = sizeof(struct caam_export_state),
1601 			},
1602 		},
1603 		.alg_type = OP_ALG_ALGSEL_SHA1,
1604 	}, {
1605 		.name = "sha224",
1606 		.driver_name = "sha224-caam",
1607 		.hmac_name = "hmac(sha224)",
1608 		.hmac_driver_name = "hmac-sha224-caam",
1609 		.blocksize = SHA224_BLOCK_SIZE,
1610 		.template_ahash = {
1611 			.init = ahash_init,
1612 			.update = ahash_update,
1613 			.final = ahash_final,
1614 			.finup = ahash_finup,
1615 			.digest = ahash_digest,
1616 			.export = ahash_export,
1617 			.import = ahash_import,
1618 			.setkey = ahash_setkey,
1619 			.halg = {
1620 				.digestsize = SHA224_DIGEST_SIZE,
1621 				.statesize = sizeof(struct caam_export_state),
1622 			},
1623 		},
1624 		.alg_type = OP_ALG_ALGSEL_SHA224,
1625 	}, {
1626 		.name = "sha256",
1627 		.driver_name = "sha256-caam",
1628 		.hmac_name = "hmac(sha256)",
1629 		.hmac_driver_name = "hmac-sha256-caam",
1630 		.blocksize = SHA256_BLOCK_SIZE,
1631 		.template_ahash = {
1632 			.init = ahash_init,
1633 			.update = ahash_update,
1634 			.final = ahash_final,
1635 			.finup = ahash_finup,
1636 			.digest = ahash_digest,
1637 			.export = ahash_export,
1638 			.import = ahash_import,
1639 			.setkey = ahash_setkey,
1640 			.halg = {
1641 				.digestsize = SHA256_DIGEST_SIZE,
1642 				.statesize = sizeof(struct caam_export_state),
1643 			},
1644 		},
1645 		.alg_type = OP_ALG_ALGSEL_SHA256,
1646 	}, {
1647 		.name = "sha384",
1648 		.driver_name = "sha384-caam",
1649 		.hmac_name = "hmac(sha384)",
1650 		.hmac_driver_name = "hmac-sha384-caam",
1651 		.blocksize = SHA384_BLOCK_SIZE,
1652 		.template_ahash = {
1653 			.init = ahash_init,
1654 			.update = ahash_update,
1655 			.final = ahash_final,
1656 			.finup = ahash_finup,
1657 			.digest = ahash_digest,
1658 			.export = ahash_export,
1659 			.import = ahash_import,
1660 			.setkey = ahash_setkey,
1661 			.halg = {
1662 				.digestsize = SHA384_DIGEST_SIZE,
1663 				.statesize = sizeof(struct caam_export_state),
1664 			},
1665 		},
1666 		.alg_type = OP_ALG_ALGSEL_SHA384,
1667 	}, {
1668 		.name = "sha512",
1669 		.driver_name = "sha512-caam",
1670 		.hmac_name = "hmac(sha512)",
1671 		.hmac_driver_name = "hmac-sha512-caam",
1672 		.blocksize = SHA512_BLOCK_SIZE,
1673 		.template_ahash = {
1674 			.init = ahash_init,
1675 			.update = ahash_update,
1676 			.final = ahash_final,
1677 			.finup = ahash_finup,
1678 			.digest = ahash_digest,
1679 			.export = ahash_export,
1680 			.import = ahash_import,
1681 			.setkey = ahash_setkey,
1682 			.halg = {
1683 				.digestsize = SHA512_DIGEST_SIZE,
1684 				.statesize = sizeof(struct caam_export_state),
1685 			},
1686 		},
1687 		.alg_type = OP_ALG_ALGSEL_SHA512,
1688 	}, {
1689 		.name = "md5",
1690 		.driver_name = "md5-caam",
1691 		.hmac_name = "hmac(md5)",
1692 		.hmac_driver_name = "hmac-md5-caam",
1693 		.blocksize = MD5_BLOCK_WORDS * 4,
1694 		.template_ahash = {
1695 			.init = ahash_init,
1696 			.update = ahash_update,
1697 			.final = ahash_final,
1698 			.finup = ahash_finup,
1699 			.digest = ahash_digest,
1700 			.export = ahash_export,
1701 			.import = ahash_import,
1702 			.setkey = ahash_setkey,
1703 			.halg = {
1704 				.digestsize = MD5_DIGEST_SIZE,
1705 				.statesize = sizeof(struct caam_export_state),
1706 			},
1707 		},
1708 		.alg_type = OP_ALG_ALGSEL_MD5,
1709 	}, {
1710 		.hmac_name = "xcbc(aes)",
1711 		.hmac_driver_name = "xcbc-aes-caam",
1712 		.blocksize = AES_BLOCK_SIZE,
1713 		.template_ahash = {
1714 			.init = ahash_init,
1715 			.update = ahash_update,
1716 			.final = ahash_final,
1717 			.finup = ahash_finup,
1718 			.digest = ahash_digest,
1719 			.export = ahash_export,
1720 			.import = ahash_import,
1721 			.setkey = axcbc_setkey,
1722 			.halg = {
1723 				.digestsize = AES_BLOCK_SIZE,
1724 				.statesize = sizeof(struct caam_export_state),
1725 			},
1726 		 },
1727 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1728 	}, {
1729 		.hmac_name = "cmac(aes)",
1730 		.hmac_driver_name = "cmac-aes-caam",
1731 		.blocksize = AES_BLOCK_SIZE,
1732 		.template_ahash = {
1733 			.init = ahash_init,
1734 			.update = ahash_update,
1735 			.final = ahash_final,
1736 			.finup = ahash_finup,
1737 			.digest = ahash_digest,
1738 			.export = ahash_export,
1739 			.import = ahash_import,
1740 			.setkey = acmac_setkey,
1741 			.halg = {
1742 				.digestsize = AES_BLOCK_SIZE,
1743 				.statesize = sizeof(struct caam_export_state),
1744 			},
1745 		 },
1746 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1747 	},
1748 };
1749 
1750 struct caam_hash_alg {
1751 	struct list_head entry;
1752 	int alg_type;
1753 	struct ahash_alg ahash_alg;
1754 };
1755 
1756 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1757 {
1758 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1759 	struct crypto_alg *base = tfm->__crt_alg;
1760 	struct hash_alg_common *halg =
1761 		 container_of(base, struct hash_alg_common, base);
1762 	struct ahash_alg *alg =
1763 		 container_of(halg, struct ahash_alg, halg);
1764 	struct caam_hash_alg *caam_hash =
1765 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1766 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1767 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1768 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1769 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1770 					 HASH_MSG_LEN + 32,
1771 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1772 					 HASH_MSG_LEN + 64,
1773 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1774 	const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1775 						      sh_desc_update);
1776 	dma_addr_t dma_addr;
1777 	struct caam_drv_private *priv;
1778 
1779 	/*
1780 	 * Get a Job ring from Job Ring driver to ensure in-order
1781 	 * crypto request processing per tfm
1782 	 */
1783 	ctx->jrdev = caam_jr_alloc();
1784 	if (IS_ERR(ctx->jrdev)) {
1785 		pr_err("Job Ring Device allocation for transform failed\n");
1786 		return PTR_ERR(ctx->jrdev);
1787 	}
1788 
1789 	priv = dev_get_drvdata(ctx->jrdev->parent);
1790 
1791 	if (is_xcbc_aes(caam_hash->alg_type)) {
1792 		ctx->dir = DMA_TO_DEVICE;
1793 		ctx->key_dir = DMA_BIDIRECTIONAL;
1794 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1795 		ctx->ctx_len = 48;
1796 	} else if (is_cmac_aes(caam_hash->alg_type)) {
1797 		ctx->dir = DMA_TO_DEVICE;
1798 		ctx->key_dir = DMA_NONE;
1799 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1800 		ctx->ctx_len = 32;
1801 	} else {
1802 		if (priv->era >= 6) {
1803 			ctx->dir = DMA_BIDIRECTIONAL;
1804 			ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1805 		} else {
1806 			ctx->dir = DMA_TO_DEVICE;
1807 			ctx->key_dir = DMA_NONE;
1808 		}
1809 		ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1810 		ctx->ctx_len = runninglen[(ctx->adata.algtype &
1811 					   OP_ALG_ALGSEL_SUBMASK) >>
1812 					  OP_ALG_ALGSEL_SHIFT];
1813 	}
1814 
1815 	if (ctx->key_dir != DMA_NONE) {
1816 		ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1817 							  ARRAY_SIZE(ctx->key),
1818 							  ctx->key_dir,
1819 							  DMA_ATTR_SKIP_CPU_SYNC);
1820 		if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1821 			dev_err(ctx->jrdev, "unable to map key\n");
1822 			caam_jr_free(ctx->jrdev);
1823 			return -ENOMEM;
1824 		}
1825 	}
1826 
1827 	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1828 					offsetof(struct caam_hash_ctx, key) -
1829 					sh_desc_update_offset,
1830 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1831 	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1832 		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1833 
1834 		if (ctx->key_dir != DMA_NONE)
1835 			dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1836 					       ARRAY_SIZE(ctx->key),
1837 					       ctx->key_dir,
1838 					       DMA_ATTR_SKIP_CPU_SYNC);
1839 
1840 		caam_jr_free(ctx->jrdev);
1841 		return -ENOMEM;
1842 	}
1843 
1844 	ctx->sh_desc_update_dma = dma_addr;
1845 	ctx->sh_desc_update_first_dma = dma_addr +
1846 					offsetof(struct caam_hash_ctx,
1847 						 sh_desc_update_first) -
1848 					sh_desc_update_offset;
1849 	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1850 						   sh_desc_fin) -
1851 					sh_desc_update_offset;
1852 	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1853 						      sh_desc_digest) -
1854 					sh_desc_update_offset;
1855 
1856 	ctx->enginectx.op.do_one_request = ahash_do_one_req;
1857 
1858 	crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
1859 
1860 	/*
1861 	 * For keyed hash algorithms shared descriptors
1862 	 * will be created later in setkey() callback
1863 	 */
1864 	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1865 }
1866 
1867 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1868 {
1869 	struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1870 
1871 	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1872 			       offsetof(struct caam_hash_ctx, key) -
1873 			       offsetof(struct caam_hash_ctx, sh_desc_update),
1874 			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1875 	if (ctx->key_dir != DMA_NONE)
1876 		dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1877 				       ARRAY_SIZE(ctx->key), ctx->key_dir,
1878 				       DMA_ATTR_SKIP_CPU_SYNC);
1879 	caam_jr_free(ctx->jrdev);
1880 }
1881 
1882 void caam_algapi_hash_exit(void)
1883 {
1884 	struct caam_hash_alg *t_alg, *n;
1885 
1886 	if (!hash_list.next)
1887 		return;
1888 
1889 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1890 		crypto_unregister_ahash(&t_alg->ahash_alg);
1891 		list_del(&t_alg->entry);
1892 		kfree(t_alg);
1893 	}
1894 }
1895 
1896 static struct caam_hash_alg *
1897 caam_hash_alloc(struct caam_hash_template *template,
1898 		bool keyed)
1899 {
1900 	struct caam_hash_alg *t_alg;
1901 	struct ahash_alg *halg;
1902 	struct crypto_alg *alg;
1903 
1904 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1905 	if (!t_alg)
1906 		return ERR_PTR(-ENOMEM);
1907 
1908 	t_alg->ahash_alg = template->template_ahash;
1909 	halg = &t_alg->ahash_alg;
1910 	alg = &halg->halg.base;
1911 
1912 	if (keyed) {
1913 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1914 			 template->hmac_name);
1915 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1916 			 template->hmac_driver_name);
1917 	} else {
1918 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1919 			 template->name);
1920 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1921 			 template->driver_name);
1922 		t_alg->ahash_alg.setkey = NULL;
1923 	}
1924 	alg->cra_module = THIS_MODULE;
1925 	alg->cra_init = caam_hash_cra_init;
1926 	alg->cra_exit = caam_hash_cra_exit;
1927 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
1928 	alg->cra_priority = CAAM_CRA_PRIORITY;
1929 	alg->cra_blocksize = template->blocksize;
1930 	alg->cra_alignmask = 0;
1931 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1932 
1933 	t_alg->alg_type = template->alg_type;
1934 
1935 	return t_alg;
1936 }
1937 
1938 int caam_algapi_hash_init(struct device *ctrldev)
1939 {
1940 	int i = 0, err = 0;
1941 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1942 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1943 	u32 md_inst, md_vid;
1944 
1945 	/*
1946 	 * Register crypto algorithms the device supports.  First, identify
1947 	 * presence and attributes of MD block.
1948 	 */
1949 	if (priv->era < 10) {
1950 		struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
1951 
1952 		md_vid = (rd_reg32(&perfmon->cha_id_ls) &
1953 			  CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1954 		md_inst = (rd_reg32(&perfmon->cha_num_ls) &
1955 			   CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1956 	} else {
1957 		u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
1958 
1959 		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1960 		md_inst = mdha & CHA_VER_NUM_MASK;
1961 	}
1962 
1963 	/*
1964 	 * Skip registration of any hashing algorithms if MD block
1965 	 * is not present.
1966 	 */
1967 	if (!md_inst)
1968 		return 0;
1969 
1970 	/* Limit digest size based on LP256 */
1971 	if (md_vid == CHA_VER_VID_MD_LP256)
1972 		md_limit = SHA256_DIGEST_SIZE;
1973 
1974 	INIT_LIST_HEAD(&hash_list);
1975 
1976 	/* register crypto algorithms the device supports */
1977 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1978 		struct caam_hash_alg *t_alg;
1979 		struct caam_hash_template *alg = driver_hash + i;
1980 
1981 		/* If MD size is not supported by device, skip registration */
1982 		if (is_mdha(alg->alg_type) &&
1983 		    alg->template_ahash.halg.digestsize > md_limit)
1984 			continue;
1985 
1986 		/* register hmac version */
1987 		t_alg = caam_hash_alloc(alg, true);
1988 		if (IS_ERR(t_alg)) {
1989 			err = PTR_ERR(t_alg);
1990 			pr_warn("%s alg allocation failed\n",
1991 				alg->hmac_driver_name);
1992 			continue;
1993 		}
1994 
1995 		err = crypto_register_ahash(&t_alg->ahash_alg);
1996 		if (err) {
1997 			pr_warn("%s alg registration failed: %d\n",
1998 				t_alg->ahash_alg.halg.base.cra_driver_name,
1999 				err);
2000 			kfree(t_alg);
2001 		} else
2002 			list_add_tail(&t_alg->entry, &hash_list);
2003 
2004 		if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2005 			continue;
2006 
2007 		/* register unkeyed version */
2008 		t_alg = caam_hash_alloc(alg, false);
2009 		if (IS_ERR(t_alg)) {
2010 			err = PTR_ERR(t_alg);
2011 			pr_warn("%s alg allocation failed\n", alg->driver_name);
2012 			continue;
2013 		}
2014 
2015 		err = crypto_register_ahash(&t_alg->ahash_alg);
2016 		if (err) {
2017 			pr_warn("%s alg registration failed: %d\n",
2018 				t_alg->ahash_alg.halg.base.cra_driver_name,
2019 				err);
2020 			kfree(t_alg);
2021 		} else
2022 			list_add_tail(&t_alg->entry, &hash_list);
2023 	}
2024 
2025 	return err;
2026 }
2027