xref: /openbmc/linux/drivers/crypto/caam/caamhash.c (revision f97769fd)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  * relationship of digest job descriptor or first job descriptor after init to
11  * shared descriptors:
12  *
13  * ---------------                     ---------------
14  * | JobDesc #1  |-------------------->|  ShareDesc  |
15  * | *(packet 1) |                     |  (hashKey)  |
16  * ---------------                     | (operation) |
17  *                                     ---------------
18  *
19  * relationship of subsequent job descriptors to shared descriptors:
20  *
21  * ---------------                     ---------------
22  * | JobDesc #2  |-------------------->|  ShareDesc  |
23  * | *(packet 2) |      |------------->|  (hashKey)  |
24  * ---------------      |    |-------->| (operation) |
25  *       .              |    |         | (load ctx2) |
26  *       .              |    |         ---------------
27  * ---------------      |    |
28  * | JobDesc #3  |------|    |
29  * | *(packet 3) |           |
30  * ---------------           |
31  *       .                   |
32  *       .                   |
33  * ---------------           |
34  * | JobDesc #4  |------------
35  * | *(packet 4) |
36  * ---------------
37  *
38  * The SharedDesc never changes for a connection unless rekeyed, but
39  * each packet will likely be in a different place. So all we need
40  * to know to process the packet is where the input is, where the
41  * output goes, and what context we want to process with. Context is
42  * in the SharedDesc, packet references in the JobDesc.
43  *
44  * So, a job desc looks like:
45  *
46  * ---------------------
47  * | Header            |
48  * | ShareDesc Pointer |
49  * | SEQ_OUT_PTR       |
50  * | (output buffer)   |
51  * | (output length)   |
52  * | SEQ_IN_PTR        |
53  * | (input buffer)    |
54  * | (input length)    |
55  * ---------------------
56  */
57 
58 #include "compat.h"
59 
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 #include <crypto/engine.h>
69 
70 #define CAAM_CRA_PRIORITY		3000
71 
72 /* max hash key is max split key size */
73 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
74 
75 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
76 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
77 
78 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
79 					 CAAM_MAX_HASH_KEY_SIZE)
80 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
81 
82 /* caam context sizes for hashes: running digest + 8 */
83 #define HASH_MSG_LEN			8
84 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
85 
86 static struct list_head hash_list;
87 
88 /* ahash per-session context */
89 struct caam_hash_ctx {
90 	struct crypto_engine_ctx enginectx;
91 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
92 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
93 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
94 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
95 	u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
96 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
97 	dma_addr_t sh_desc_update_first_dma;
98 	dma_addr_t sh_desc_fin_dma;
99 	dma_addr_t sh_desc_digest_dma;
100 	enum dma_data_direction dir;
101 	enum dma_data_direction key_dir;
102 	struct device *jrdev;
103 	int ctx_len;
104 	struct alginfo adata;
105 };
106 
107 /* ahash state */
108 struct caam_hash_state {
109 	dma_addr_t buf_dma;
110 	dma_addr_t ctx_dma;
111 	int ctx_dma_len;
112 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
113 	int buflen;
114 	int next_buflen;
115 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
116 	int (*update)(struct ahash_request *req) ____cacheline_aligned;
117 	int (*final)(struct ahash_request *req);
118 	int (*finup)(struct ahash_request *req);
119 	struct ahash_edesc *edesc;
120 	void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
121 			      void *context);
122 };
123 
124 struct caam_export_state {
125 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
126 	u8 caam_ctx[MAX_CTX_LEN];
127 	int buflen;
128 	int (*update)(struct ahash_request *req);
129 	int (*final)(struct ahash_request *req);
130 	int (*finup)(struct ahash_request *req);
131 };
132 
133 static inline bool is_cmac_aes(u32 algtype)
134 {
135 	return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
136 	       (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
137 }
138 /* Common job descriptor seq in/out ptr routines */
139 
140 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
141 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
142 				      struct caam_hash_state *state,
143 				      int ctx_len)
144 {
145 	state->ctx_dma_len = ctx_len;
146 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
147 					ctx_len, DMA_FROM_DEVICE);
148 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
149 		dev_err(jrdev, "unable to map ctx\n");
150 		state->ctx_dma = 0;
151 		return -ENOMEM;
152 	}
153 
154 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
155 
156 	return 0;
157 }
158 
159 /* Map current buffer in state (if length > 0) and put it in link table */
160 static inline int buf_map_to_sec4_sg(struct device *jrdev,
161 				     struct sec4_sg_entry *sec4_sg,
162 				     struct caam_hash_state *state)
163 {
164 	int buflen = state->buflen;
165 
166 	if (!buflen)
167 		return 0;
168 
169 	state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
170 					DMA_TO_DEVICE);
171 	if (dma_mapping_error(jrdev, state->buf_dma)) {
172 		dev_err(jrdev, "unable to map buf\n");
173 		state->buf_dma = 0;
174 		return -ENOMEM;
175 	}
176 
177 	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
178 
179 	return 0;
180 }
181 
182 /* Map state->caam_ctx, and add it to link table */
183 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
184 				     struct caam_hash_state *state, int ctx_len,
185 				     struct sec4_sg_entry *sec4_sg, u32 flag)
186 {
187 	state->ctx_dma_len = ctx_len;
188 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
189 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
190 		dev_err(jrdev, "unable to map ctx\n");
191 		state->ctx_dma = 0;
192 		return -ENOMEM;
193 	}
194 
195 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
196 
197 	return 0;
198 }
199 
200 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
201 {
202 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
203 	int digestsize = crypto_ahash_digestsize(ahash);
204 	struct device *jrdev = ctx->jrdev;
205 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
206 	u32 *desc;
207 
208 	ctx->adata.key_virt = ctx->key;
209 
210 	/* ahash_update shared descriptor */
211 	desc = ctx->sh_desc_update;
212 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
213 			  ctx->ctx_len, true, ctrlpriv->era);
214 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
215 				   desc_bytes(desc), ctx->dir);
216 
217 	print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
218 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
219 			     1);
220 
221 	/* ahash_update_first shared descriptor */
222 	desc = ctx->sh_desc_update_first;
223 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
224 			  ctx->ctx_len, false, ctrlpriv->era);
225 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
226 				   desc_bytes(desc), ctx->dir);
227 	print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
228 			     ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
229 			     desc_bytes(desc), 1);
230 
231 	/* ahash_final shared descriptor */
232 	desc = ctx->sh_desc_fin;
233 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
234 			  ctx->ctx_len, true, ctrlpriv->era);
235 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
236 				   desc_bytes(desc), ctx->dir);
237 
238 	print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
239 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
240 			     desc_bytes(desc), 1);
241 
242 	/* ahash_digest shared descriptor */
243 	desc = ctx->sh_desc_digest;
244 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
245 			  ctx->ctx_len, false, ctrlpriv->era);
246 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
247 				   desc_bytes(desc), ctx->dir);
248 
249 	print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
250 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
251 			     desc_bytes(desc), 1);
252 
253 	return 0;
254 }
255 
256 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
257 {
258 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
259 	int digestsize = crypto_ahash_digestsize(ahash);
260 	struct device *jrdev = ctx->jrdev;
261 	u32 *desc;
262 
263 	/* shared descriptor for ahash_update */
264 	desc = ctx->sh_desc_update;
265 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
266 			    ctx->ctx_len, ctx->ctx_len);
267 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
268 				   desc_bytes(desc), ctx->dir);
269 	print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
270 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
271 			     1);
272 
273 	/* shared descriptor for ahash_{final,finup} */
274 	desc = ctx->sh_desc_fin;
275 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
276 			    digestsize, ctx->ctx_len);
277 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
278 				   desc_bytes(desc), ctx->dir);
279 	print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
280 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
281 			     1);
282 
283 	/* key is immediate data for INIT and INITFINAL states */
284 	ctx->adata.key_virt = ctx->key;
285 
286 	/* shared descriptor for first invocation of ahash_update */
287 	desc = ctx->sh_desc_update_first;
288 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
289 			    ctx->ctx_len);
290 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
291 				   desc_bytes(desc), ctx->dir);
292 	print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
293 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
294 			     desc_bytes(desc), 1);
295 
296 	/* shared descriptor for ahash_digest */
297 	desc = ctx->sh_desc_digest;
298 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
299 			    digestsize, ctx->ctx_len);
300 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
301 				   desc_bytes(desc), ctx->dir);
302 	print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
303 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
304 			     1);
305 	return 0;
306 }
307 
308 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
309 {
310 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
311 	int digestsize = crypto_ahash_digestsize(ahash);
312 	struct device *jrdev = ctx->jrdev;
313 	u32 *desc;
314 
315 	/* shared descriptor for ahash_update */
316 	desc = ctx->sh_desc_update;
317 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
318 			    ctx->ctx_len, ctx->ctx_len);
319 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
320 				   desc_bytes(desc), ctx->dir);
321 	print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
322 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
323 			     desc_bytes(desc), 1);
324 
325 	/* shared descriptor for ahash_{final,finup} */
326 	desc = ctx->sh_desc_fin;
327 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
328 			    digestsize, ctx->ctx_len);
329 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
330 				   desc_bytes(desc), ctx->dir);
331 	print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
332 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
333 			     desc_bytes(desc), 1);
334 
335 	/* shared descriptor for first invocation of ahash_update */
336 	desc = ctx->sh_desc_update_first;
337 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
338 			    ctx->ctx_len);
339 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
340 				   desc_bytes(desc), ctx->dir);
341 	print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
342 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
343 			     desc_bytes(desc), 1);
344 
345 	/* shared descriptor for ahash_digest */
346 	desc = ctx->sh_desc_digest;
347 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
348 			    digestsize, ctx->ctx_len);
349 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
350 				   desc_bytes(desc), ctx->dir);
351 	print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
352 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
353 			     desc_bytes(desc), 1);
354 
355 	return 0;
356 }
357 
358 /* Digest hash size if it is too large */
359 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
360 			   u32 digestsize)
361 {
362 	struct device *jrdev = ctx->jrdev;
363 	u32 *desc;
364 	struct split_key_result result;
365 	dma_addr_t key_dma;
366 	int ret;
367 
368 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
369 	if (!desc) {
370 		dev_err(jrdev, "unable to allocate key input memory\n");
371 		return -ENOMEM;
372 	}
373 
374 	init_job_desc(desc, 0);
375 
376 	key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
377 	if (dma_mapping_error(jrdev, key_dma)) {
378 		dev_err(jrdev, "unable to map key memory\n");
379 		kfree(desc);
380 		return -ENOMEM;
381 	}
382 
383 	/* Job descriptor to perform unkeyed hash on key_in */
384 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
385 			 OP_ALG_AS_INITFINAL);
386 	append_seq_in_ptr(desc, key_dma, *keylen, 0);
387 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
388 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
389 	append_seq_out_ptr(desc, key_dma, digestsize, 0);
390 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
391 			 LDST_SRCDST_BYTE_CONTEXT);
392 
393 	print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
394 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
395 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
396 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
397 			     1);
398 
399 	result.err = 0;
400 	init_completion(&result.completion);
401 
402 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
403 	if (ret == -EINPROGRESS) {
404 		/* in progress */
405 		wait_for_completion(&result.completion);
406 		ret = result.err;
407 
408 		print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
409 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
410 				     digestsize, 1);
411 	}
412 	dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
413 
414 	*keylen = digestsize;
415 
416 	kfree(desc);
417 
418 	return ret;
419 }
420 
421 static int ahash_setkey(struct crypto_ahash *ahash,
422 			const u8 *key, unsigned int keylen)
423 {
424 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
425 	struct device *jrdev = ctx->jrdev;
426 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
427 	int digestsize = crypto_ahash_digestsize(ahash);
428 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
429 	int ret;
430 	u8 *hashed_key = NULL;
431 
432 	dev_dbg(jrdev, "keylen %d\n", keylen);
433 
434 	if (keylen > blocksize) {
435 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
436 		if (!hashed_key)
437 			return -ENOMEM;
438 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
439 		if (ret)
440 			goto bad_free_key;
441 		key = hashed_key;
442 	}
443 
444 	/*
445 	 * If DKP is supported, use it in the shared descriptor to generate
446 	 * the split key.
447 	 */
448 	if (ctrlpriv->era >= 6) {
449 		ctx->adata.key_inline = true;
450 		ctx->adata.keylen = keylen;
451 		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
452 						      OP_ALG_ALGSEL_MASK);
453 
454 		if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
455 			goto bad_free_key;
456 
457 		memcpy(ctx->key, key, keylen);
458 
459 		/*
460 		 * In case |user key| > |derived key|, using DKP<imm,imm>
461 		 * would result in invalid opcodes (last bytes of user key) in
462 		 * the resulting descriptor. Use DKP<ptr,imm> instead => both
463 		 * virtual and dma key addresses are needed.
464 		 */
465 		if (keylen > ctx->adata.keylen_pad)
466 			dma_sync_single_for_device(ctx->jrdev,
467 						   ctx->adata.key_dma,
468 						   ctx->adata.keylen_pad,
469 						   DMA_TO_DEVICE);
470 	} else {
471 		ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
472 				    keylen, CAAM_MAX_HASH_KEY_SIZE);
473 		if (ret)
474 			goto bad_free_key;
475 	}
476 
477 	kfree(hashed_key);
478 	return ahash_set_sh_desc(ahash);
479  bad_free_key:
480 	kfree(hashed_key);
481 	return -EINVAL;
482 }
483 
484 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
485 			unsigned int keylen)
486 {
487 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
488 	struct device *jrdev = ctx->jrdev;
489 
490 	if (keylen != AES_KEYSIZE_128)
491 		return -EINVAL;
492 
493 	memcpy(ctx->key, key, keylen);
494 	dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
495 				   DMA_TO_DEVICE);
496 	ctx->adata.keylen = keylen;
497 
498 	print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
499 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
500 
501 	return axcbc_set_sh_desc(ahash);
502 }
503 
504 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
505 			unsigned int keylen)
506 {
507 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
508 	int err;
509 
510 	err = aes_check_keylen(keylen);
511 	if (err)
512 		return err;
513 
514 	/* key is immediate data for all cmac shared descriptors */
515 	ctx->adata.key_virt = key;
516 	ctx->adata.keylen = keylen;
517 
518 	print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
519 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
520 
521 	return acmac_set_sh_desc(ahash);
522 }
523 
524 /*
525  * ahash_edesc - s/w-extended ahash descriptor
526  * @sec4_sg_dma: physical mapped address of h/w link table
527  * @src_nents: number of segments in input scatterlist
528  * @sec4_sg_bytes: length of dma mapped sec4_sg space
529  * @bklog: stored to determine if the request needs backlog
530  * @hw_desc: the h/w job descriptor followed by any referenced link tables
531  * @sec4_sg: h/w link table
532  */
533 struct ahash_edesc {
534 	dma_addr_t sec4_sg_dma;
535 	int src_nents;
536 	int sec4_sg_bytes;
537 	bool bklog;
538 	u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
539 	struct sec4_sg_entry sec4_sg[];
540 };
541 
542 static inline void ahash_unmap(struct device *dev,
543 			struct ahash_edesc *edesc,
544 			struct ahash_request *req, int dst_len)
545 {
546 	struct caam_hash_state *state = ahash_request_ctx(req);
547 
548 	if (edesc->src_nents)
549 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
550 
551 	if (edesc->sec4_sg_bytes)
552 		dma_unmap_single(dev, edesc->sec4_sg_dma,
553 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
554 
555 	if (state->buf_dma) {
556 		dma_unmap_single(dev, state->buf_dma, state->buflen,
557 				 DMA_TO_DEVICE);
558 		state->buf_dma = 0;
559 	}
560 }
561 
562 static inline void ahash_unmap_ctx(struct device *dev,
563 			struct ahash_edesc *edesc,
564 			struct ahash_request *req, int dst_len, u32 flag)
565 {
566 	struct caam_hash_state *state = ahash_request_ctx(req);
567 
568 	if (state->ctx_dma) {
569 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
570 		state->ctx_dma = 0;
571 	}
572 	ahash_unmap(dev, edesc, req, dst_len);
573 }
574 
575 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
576 				  void *context, enum dma_data_direction dir)
577 {
578 	struct ahash_request *req = context;
579 	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
580 	struct ahash_edesc *edesc;
581 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
582 	int digestsize = crypto_ahash_digestsize(ahash);
583 	struct caam_hash_state *state = ahash_request_ctx(req);
584 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
585 	int ecode = 0;
586 	bool has_bklog;
587 
588 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
589 
590 	edesc = state->edesc;
591 	has_bklog = edesc->bklog;
592 
593 	if (err)
594 		ecode = caam_jr_strstatus(jrdev, err);
595 
596 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
597 	memcpy(req->result, state->caam_ctx, digestsize);
598 	kfree(edesc);
599 
600 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
601 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
602 			     ctx->ctx_len, 1);
603 
604 	/*
605 	 * If no backlog flag, the completion of the request is done
606 	 * by CAAM, not crypto engine.
607 	 */
608 	if (!has_bklog)
609 		req->base.complete(&req->base, ecode);
610 	else
611 		crypto_finalize_hash_request(jrp->engine, req, ecode);
612 }
613 
614 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
615 		       void *context)
616 {
617 	ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
618 }
619 
620 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
621 			       void *context)
622 {
623 	ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
624 }
625 
626 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
627 				     void *context, enum dma_data_direction dir)
628 {
629 	struct ahash_request *req = context;
630 	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
631 	struct ahash_edesc *edesc;
632 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
633 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
634 	struct caam_hash_state *state = ahash_request_ctx(req);
635 	int digestsize = crypto_ahash_digestsize(ahash);
636 	int ecode = 0;
637 	bool has_bklog;
638 
639 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
640 
641 	edesc = state->edesc;
642 	has_bklog = edesc->bklog;
643 	if (err)
644 		ecode = caam_jr_strstatus(jrdev, err);
645 
646 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
647 	kfree(edesc);
648 
649 	scatterwalk_map_and_copy(state->buf, req->src,
650 				 req->nbytes - state->next_buflen,
651 				 state->next_buflen, 0);
652 	state->buflen = state->next_buflen;
653 
654 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
655 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
656 			     state->buflen, 1);
657 
658 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
659 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
660 			     ctx->ctx_len, 1);
661 	if (req->result)
662 		print_hex_dump_debug("result@"__stringify(__LINE__)": ",
663 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
664 				     digestsize, 1);
665 
666 	/*
667 	 * If no backlog flag, the completion of the request is done
668 	 * by CAAM, not crypto engine.
669 	 */
670 	if (!has_bklog)
671 		req->base.complete(&req->base, ecode);
672 	else
673 		crypto_finalize_hash_request(jrp->engine, req, ecode);
674 
675 }
676 
677 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
678 			  void *context)
679 {
680 	ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
681 }
682 
683 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
684 			       void *context)
685 {
686 	ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
687 }
688 
689 /*
690  * Allocate an enhanced descriptor, which contains the hardware descriptor
691  * and space for hardware scatter table containing sg_num entries.
692  */
693 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
694 					     int sg_num, u32 *sh_desc,
695 					     dma_addr_t sh_desc_dma)
696 {
697 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
698 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
699 	struct caam_hash_state *state = ahash_request_ctx(req);
700 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
701 		       GFP_KERNEL : GFP_ATOMIC;
702 	struct ahash_edesc *edesc;
703 	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
704 
705 	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
706 	if (!edesc) {
707 		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
708 		return NULL;
709 	}
710 
711 	state->edesc = edesc;
712 
713 	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
714 			     HDR_SHARE_DEFER | HDR_REVERSE);
715 
716 	return edesc;
717 }
718 
719 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
720 			       struct ahash_edesc *edesc,
721 			       struct ahash_request *req, int nents,
722 			       unsigned int first_sg,
723 			       unsigned int first_bytes, size_t to_hash)
724 {
725 	dma_addr_t src_dma;
726 	u32 options;
727 
728 	if (nents > 1 || first_sg) {
729 		struct sec4_sg_entry *sg = edesc->sec4_sg;
730 		unsigned int sgsize = sizeof(*sg) *
731 				      pad_sg_nents(first_sg + nents);
732 
733 		sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
734 
735 		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
736 		if (dma_mapping_error(ctx->jrdev, src_dma)) {
737 			dev_err(ctx->jrdev, "unable to map S/G table\n");
738 			return -ENOMEM;
739 		}
740 
741 		edesc->sec4_sg_bytes = sgsize;
742 		edesc->sec4_sg_dma = src_dma;
743 		options = LDST_SGF;
744 	} else {
745 		src_dma = sg_dma_address(req->src);
746 		options = 0;
747 	}
748 
749 	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
750 			  options);
751 
752 	return 0;
753 }
754 
755 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
756 {
757 	struct ahash_request *req = ahash_request_cast(areq);
758 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
759 	struct caam_hash_state *state = ahash_request_ctx(req);
760 	struct device *jrdev = ctx->jrdev;
761 	u32 *desc = state->edesc->hw_desc;
762 	int ret;
763 
764 	state->edesc->bklog = true;
765 
766 	ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
767 
768 	if (ret != -EINPROGRESS) {
769 		ahash_unmap(jrdev, state->edesc, req, 0);
770 		kfree(state->edesc);
771 	} else {
772 		ret = 0;
773 	}
774 
775 	return ret;
776 }
777 
778 static int ahash_enqueue_req(struct device *jrdev,
779 			     void (*cbk)(struct device *jrdev, u32 *desc,
780 					 u32 err, void *context),
781 			     struct ahash_request *req,
782 			     int dst_len, enum dma_data_direction dir)
783 {
784 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
785 	struct caam_hash_state *state = ahash_request_ctx(req);
786 	struct ahash_edesc *edesc = state->edesc;
787 	u32 *desc = edesc->hw_desc;
788 	int ret;
789 
790 	state->ahash_op_done = cbk;
791 
792 	/*
793 	 * Only the backlog request are sent to crypto-engine since the others
794 	 * can be handled by CAAM, if free, especially since JR has up to 1024
795 	 * entries (more than the 10 entries from crypto-engine).
796 	 */
797 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
798 		ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
799 							     req);
800 	else
801 		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
802 
803 	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
804 		ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
805 		kfree(edesc);
806 	}
807 
808 	return ret;
809 }
810 
811 /* submit update job descriptor */
812 static int ahash_update_ctx(struct ahash_request *req)
813 {
814 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
815 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
816 	struct caam_hash_state *state = ahash_request_ctx(req);
817 	struct device *jrdev = ctx->jrdev;
818 	u8 *buf = state->buf;
819 	int *buflen = &state->buflen;
820 	int *next_buflen = &state->next_buflen;
821 	int blocksize = crypto_ahash_blocksize(ahash);
822 	int in_len = *buflen + req->nbytes, to_hash;
823 	u32 *desc;
824 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
825 	struct ahash_edesc *edesc;
826 	int ret = 0;
827 
828 	*next_buflen = in_len & (blocksize - 1);
829 	to_hash = in_len - *next_buflen;
830 
831 	/*
832 	 * For XCBC and CMAC, if to_hash is multiple of block size,
833 	 * keep last block in internal buffer
834 	 */
835 	if ((is_xcbc_aes(ctx->adata.algtype) ||
836 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
837 	     (*next_buflen == 0)) {
838 		*next_buflen = blocksize;
839 		to_hash -= blocksize;
840 	}
841 
842 	if (to_hash) {
843 		int pad_nents;
844 		int src_len = req->nbytes - *next_buflen;
845 
846 		src_nents = sg_nents_for_len(req->src, src_len);
847 		if (src_nents < 0) {
848 			dev_err(jrdev, "Invalid number of src SG.\n");
849 			return src_nents;
850 		}
851 
852 		if (src_nents) {
853 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
854 						  DMA_TO_DEVICE);
855 			if (!mapped_nents) {
856 				dev_err(jrdev, "unable to DMA map source\n");
857 				return -ENOMEM;
858 			}
859 		} else {
860 			mapped_nents = 0;
861 		}
862 
863 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
864 		pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
865 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
866 
867 		/*
868 		 * allocate space for base edesc and hw desc commands,
869 		 * link tables
870 		 */
871 		edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
872 					  ctx->sh_desc_update_dma);
873 		if (!edesc) {
874 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
875 			return -ENOMEM;
876 		}
877 
878 		edesc->src_nents = src_nents;
879 		edesc->sec4_sg_bytes = sec4_sg_bytes;
880 
881 		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
882 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
883 		if (ret)
884 			goto unmap_ctx;
885 
886 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
887 		if (ret)
888 			goto unmap_ctx;
889 
890 		if (mapped_nents)
891 			sg_to_sec4_sg_last(req->src, src_len,
892 					   edesc->sec4_sg + sec4_sg_src_index,
893 					   0);
894 		else
895 			sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
896 					    1);
897 
898 		desc = edesc->hw_desc;
899 
900 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
901 						     sec4_sg_bytes,
902 						     DMA_TO_DEVICE);
903 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
904 			dev_err(jrdev, "unable to map S/G table\n");
905 			ret = -ENOMEM;
906 			goto unmap_ctx;
907 		}
908 
909 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
910 				       to_hash, LDST_SGF);
911 
912 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
913 
914 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
915 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
916 				     desc_bytes(desc), 1);
917 
918 		ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
919 					ctx->ctx_len, DMA_BIDIRECTIONAL);
920 	} else if (*next_buflen) {
921 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
922 					 req->nbytes, 0);
923 		*buflen = *next_buflen;
924 
925 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
926 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
927 				     *buflen, 1);
928 	}
929 
930 	return ret;
931 unmap_ctx:
932 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
933 	kfree(edesc);
934 	return ret;
935 }
936 
937 static int ahash_final_ctx(struct ahash_request *req)
938 {
939 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
940 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
941 	struct caam_hash_state *state = ahash_request_ctx(req);
942 	struct device *jrdev = ctx->jrdev;
943 	int buflen = state->buflen;
944 	u32 *desc;
945 	int sec4_sg_bytes;
946 	int digestsize = crypto_ahash_digestsize(ahash);
947 	struct ahash_edesc *edesc;
948 	int ret;
949 
950 	sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
951 			sizeof(struct sec4_sg_entry);
952 
953 	/* allocate space for base edesc and hw desc commands, link tables */
954 	edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
955 				  ctx->sh_desc_fin_dma);
956 	if (!edesc)
957 		return -ENOMEM;
958 
959 	desc = edesc->hw_desc;
960 
961 	edesc->sec4_sg_bytes = sec4_sg_bytes;
962 
963 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
964 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
965 	if (ret)
966 		goto unmap_ctx;
967 
968 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
969 	if (ret)
970 		goto unmap_ctx;
971 
972 	sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
973 
974 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
975 					    sec4_sg_bytes, DMA_TO_DEVICE);
976 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
977 		dev_err(jrdev, "unable to map S/G table\n");
978 		ret = -ENOMEM;
979 		goto unmap_ctx;
980 	}
981 
982 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
983 			  LDST_SGF);
984 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
985 
986 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
987 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
988 			     1);
989 
990 	return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
991 				 digestsize, DMA_BIDIRECTIONAL);
992  unmap_ctx:
993 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
994 	kfree(edesc);
995 	return ret;
996 }
997 
998 static int ahash_finup_ctx(struct ahash_request *req)
999 {
1000 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1001 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1002 	struct caam_hash_state *state = ahash_request_ctx(req);
1003 	struct device *jrdev = ctx->jrdev;
1004 	int buflen = state->buflen;
1005 	u32 *desc;
1006 	int sec4_sg_src_index;
1007 	int src_nents, mapped_nents;
1008 	int digestsize = crypto_ahash_digestsize(ahash);
1009 	struct ahash_edesc *edesc;
1010 	int ret;
1011 
1012 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1013 	if (src_nents < 0) {
1014 		dev_err(jrdev, "Invalid number of src SG.\n");
1015 		return src_nents;
1016 	}
1017 
1018 	if (src_nents) {
1019 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1020 					  DMA_TO_DEVICE);
1021 		if (!mapped_nents) {
1022 			dev_err(jrdev, "unable to DMA map source\n");
1023 			return -ENOMEM;
1024 		}
1025 	} else {
1026 		mapped_nents = 0;
1027 	}
1028 
1029 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1030 
1031 	/* allocate space for base edesc and hw desc commands, link tables */
1032 	edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1033 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1034 	if (!edesc) {
1035 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1036 		return -ENOMEM;
1037 	}
1038 
1039 	desc = edesc->hw_desc;
1040 
1041 	edesc->src_nents = src_nents;
1042 
1043 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1044 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1045 	if (ret)
1046 		goto unmap_ctx;
1047 
1048 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1049 	if (ret)
1050 		goto unmap_ctx;
1051 
1052 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1053 				  sec4_sg_src_index, ctx->ctx_len + buflen,
1054 				  req->nbytes);
1055 	if (ret)
1056 		goto unmap_ctx;
1057 
1058 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1059 
1060 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1061 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1062 			     1);
1063 
1064 	return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1065 				 digestsize, DMA_BIDIRECTIONAL);
1066  unmap_ctx:
1067 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1068 	kfree(edesc);
1069 	return ret;
1070 }
1071 
1072 static int ahash_digest(struct ahash_request *req)
1073 {
1074 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1075 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1076 	struct caam_hash_state *state = ahash_request_ctx(req);
1077 	struct device *jrdev = ctx->jrdev;
1078 	u32 *desc;
1079 	int digestsize = crypto_ahash_digestsize(ahash);
1080 	int src_nents, mapped_nents;
1081 	struct ahash_edesc *edesc;
1082 	int ret;
1083 
1084 	state->buf_dma = 0;
1085 
1086 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1087 	if (src_nents < 0) {
1088 		dev_err(jrdev, "Invalid number of src SG.\n");
1089 		return src_nents;
1090 	}
1091 
1092 	if (src_nents) {
1093 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1094 					  DMA_TO_DEVICE);
1095 		if (!mapped_nents) {
1096 			dev_err(jrdev, "unable to map source for DMA\n");
1097 			return -ENOMEM;
1098 		}
1099 	} else {
1100 		mapped_nents = 0;
1101 	}
1102 
1103 	/* allocate space for base edesc and hw desc commands, link tables */
1104 	edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1105 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1106 	if (!edesc) {
1107 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1108 		return -ENOMEM;
1109 	}
1110 
1111 	edesc->src_nents = src_nents;
1112 
1113 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1114 				  req->nbytes);
1115 	if (ret) {
1116 		ahash_unmap(jrdev, edesc, req, digestsize);
1117 		kfree(edesc);
1118 		return ret;
1119 	}
1120 
1121 	desc = edesc->hw_desc;
1122 
1123 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1124 	if (ret) {
1125 		ahash_unmap(jrdev, edesc, req, digestsize);
1126 		kfree(edesc);
1127 		return -ENOMEM;
1128 	}
1129 
1130 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1131 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1132 			     1);
1133 
1134 	return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1135 				 DMA_FROM_DEVICE);
1136 }
1137 
1138 /* submit ahash final if it the first job descriptor */
1139 static int ahash_final_no_ctx(struct ahash_request *req)
1140 {
1141 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1142 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1143 	struct caam_hash_state *state = ahash_request_ctx(req);
1144 	struct device *jrdev = ctx->jrdev;
1145 	u8 *buf = state->buf;
1146 	int buflen = state->buflen;
1147 	u32 *desc;
1148 	int digestsize = crypto_ahash_digestsize(ahash);
1149 	struct ahash_edesc *edesc;
1150 	int ret;
1151 
1152 	/* allocate space for base edesc and hw desc commands, link tables */
1153 	edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1154 				  ctx->sh_desc_digest_dma);
1155 	if (!edesc)
1156 		return -ENOMEM;
1157 
1158 	desc = edesc->hw_desc;
1159 
1160 	if (buflen) {
1161 		state->buf_dma = dma_map_single(jrdev, buf, buflen,
1162 						DMA_TO_DEVICE);
1163 		if (dma_mapping_error(jrdev, state->buf_dma)) {
1164 			dev_err(jrdev, "unable to map src\n");
1165 			goto unmap;
1166 		}
1167 
1168 		append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1169 	}
1170 
1171 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1172 	if (ret)
1173 		goto unmap;
1174 
1175 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1176 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1177 			     1);
1178 
1179 	return ahash_enqueue_req(jrdev, ahash_done, req,
1180 				 digestsize, DMA_FROM_DEVICE);
1181  unmap:
1182 	ahash_unmap(jrdev, edesc, req, digestsize);
1183 	kfree(edesc);
1184 	return -ENOMEM;
1185 }
1186 
1187 /* submit ahash update if it the first job descriptor after update */
1188 static int ahash_update_no_ctx(struct ahash_request *req)
1189 {
1190 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1191 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1192 	struct caam_hash_state *state = ahash_request_ctx(req);
1193 	struct device *jrdev = ctx->jrdev;
1194 	u8 *buf = state->buf;
1195 	int *buflen = &state->buflen;
1196 	int *next_buflen = &state->next_buflen;
1197 	int blocksize = crypto_ahash_blocksize(ahash);
1198 	int in_len = *buflen + req->nbytes, to_hash;
1199 	int sec4_sg_bytes, src_nents, mapped_nents;
1200 	struct ahash_edesc *edesc;
1201 	u32 *desc;
1202 	int ret = 0;
1203 
1204 	*next_buflen = in_len & (blocksize - 1);
1205 	to_hash = in_len - *next_buflen;
1206 
1207 	/*
1208 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1209 	 * keep last block in internal buffer
1210 	 */
1211 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1212 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1213 	     (*next_buflen == 0)) {
1214 		*next_buflen = blocksize;
1215 		to_hash -= blocksize;
1216 	}
1217 
1218 	if (to_hash) {
1219 		int pad_nents;
1220 		int src_len = req->nbytes - *next_buflen;
1221 
1222 		src_nents = sg_nents_for_len(req->src, src_len);
1223 		if (src_nents < 0) {
1224 			dev_err(jrdev, "Invalid number of src SG.\n");
1225 			return src_nents;
1226 		}
1227 
1228 		if (src_nents) {
1229 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1230 						  DMA_TO_DEVICE);
1231 			if (!mapped_nents) {
1232 				dev_err(jrdev, "unable to DMA map source\n");
1233 				return -ENOMEM;
1234 			}
1235 		} else {
1236 			mapped_nents = 0;
1237 		}
1238 
1239 		pad_nents = pad_sg_nents(1 + mapped_nents);
1240 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1241 
1242 		/*
1243 		 * allocate space for base edesc and hw desc commands,
1244 		 * link tables
1245 		 */
1246 		edesc = ahash_edesc_alloc(req, pad_nents,
1247 					  ctx->sh_desc_update_first,
1248 					  ctx->sh_desc_update_first_dma);
1249 		if (!edesc) {
1250 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1251 			return -ENOMEM;
1252 		}
1253 
1254 		edesc->src_nents = src_nents;
1255 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1256 
1257 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1258 		if (ret)
1259 			goto unmap_ctx;
1260 
1261 		sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1262 
1263 		desc = edesc->hw_desc;
1264 
1265 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1266 						    sec4_sg_bytes,
1267 						    DMA_TO_DEVICE);
1268 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1269 			dev_err(jrdev, "unable to map S/G table\n");
1270 			ret = -ENOMEM;
1271 			goto unmap_ctx;
1272 		}
1273 
1274 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1275 
1276 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1277 		if (ret)
1278 			goto unmap_ctx;
1279 
1280 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1281 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1282 				     desc_bytes(desc), 1);
1283 
1284 		ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1285 					ctx->ctx_len, DMA_TO_DEVICE);
1286 		if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1287 			return ret;
1288 		state->update = ahash_update_ctx;
1289 		state->finup = ahash_finup_ctx;
1290 		state->final = ahash_final_ctx;
1291 	} else if (*next_buflen) {
1292 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1293 					 req->nbytes, 0);
1294 		*buflen = *next_buflen;
1295 
1296 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1297 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
1298 				     *buflen, 1);
1299 	}
1300 
1301 	return ret;
1302  unmap_ctx:
1303 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1304 	kfree(edesc);
1305 	return ret;
1306 }
1307 
1308 /* submit ahash finup if it the first job descriptor after update */
1309 static int ahash_finup_no_ctx(struct ahash_request *req)
1310 {
1311 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1312 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1313 	struct caam_hash_state *state = ahash_request_ctx(req);
1314 	struct device *jrdev = ctx->jrdev;
1315 	int buflen = state->buflen;
1316 	u32 *desc;
1317 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1318 	int digestsize = crypto_ahash_digestsize(ahash);
1319 	struct ahash_edesc *edesc;
1320 	int ret;
1321 
1322 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1323 	if (src_nents < 0) {
1324 		dev_err(jrdev, "Invalid number of src SG.\n");
1325 		return src_nents;
1326 	}
1327 
1328 	if (src_nents) {
1329 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1330 					  DMA_TO_DEVICE);
1331 		if (!mapped_nents) {
1332 			dev_err(jrdev, "unable to DMA map source\n");
1333 			return -ENOMEM;
1334 		}
1335 	} else {
1336 		mapped_nents = 0;
1337 	}
1338 
1339 	sec4_sg_src_index = 2;
1340 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1341 			 sizeof(struct sec4_sg_entry);
1342 
1343 	/* allocate space for base edesc and hw desc commands, link tables */
1344 	edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1345 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1346 	if (!edesc) {
1347 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1348 		return -ENOMEM;
1349 	}
1350 
1351 	desc = edesc->hw_desc;
1352 
1353 	edesc->src_nents = src_nents;
1354 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1355 
1356 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1357 	if (ret)
1358 		goto unmap;
1359 
1360 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1361 				  req->nbytes);
1362 	if (ret) {
1363 		dev_err(jrdev, "unable to map S/G table\n");
1364 		goto unmap;
1365 	}
1366 
1367 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1368 	if (ret)
1369 		goto unmap;
1370 
1371 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1372 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1373 			     1);
1374 
1375 	return ahash_enqueue_req(jrdev, ahash_done, req,
1376 				 digestsize, DMA_FROM_DEVICE);
1377  unmap:
1378 	ahash_unmap(jrdev, edesc, req, digestsize);
1379 	kfree(edesc);
1380 	return -ENOMEM;
1381 
1382 }
1383 
1384 /* submit first update job descriptor after init */
1385 static int ahash_update_first(struct ahash_request *req)
1386 {
1387 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1388 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1389 	struct caam_hash_state *state = ahash_request_ctx(req);
1390 	struct device *jrdev = ctx->jrdev;
1391 	u8 *buf = state->buf;
1392 	int *buflen = &state->buflen;
1393 	int *next_buflen = &state->next_buflen;
1394 	int to_hash;
1395 	int blocksize = crypto_ahash_blocksize(ahash);
1396 	u32 *desc;
1397 	int src_nents, mapped_nents;
1398 	struct ahash_edesc *edesc;
1399 	int ret = 0;
1400 
1401 	*next_buflen = req->nbytes & (blocksize - 1);
1402 	to_hash = req->nbytes - *next_buflen;
1403 
1404 	/*
1405 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1406 	 * keep last block in internal buffer
1407 	 */
1408 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1409 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1410 	     (*next_buflen == 0)) {
1411 		*next_buflen = blocksize;
1412 		to_hash -= blocksize;
1413 	}
1414 
1415 	if (to_hash) {
1416 		src_nents = sg_nents_for_len(req->src,
1417 					     req->nbytes - *next_buflen);
1418 		if (src_nents < 0) {
1419 			dev_err(jrdev, "Invalid number of src SG.\n");
1420 			return src_nents;
1421 		}
1422 
1423 		if (src_nents) {
1424 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1425 						  DMA_TO_DEVICE);
1426 			if (!mapped_nents) {
1427 				dev_err(jrdev, "unable to map source for DMA\n");
1428 				return -ENOMEM;
1429 			}
1430 		} else {
1431 			mapped_nents = 0;
1432 		}
1433 
1434 		/*
1435 		 * allocate space for base edesc and hw desc commands,
1436 		 * link tables
1437 		 */
1438 		edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1439 					  mapped_nents : 0,
1440 					  ctx->sh_desc_update_first,
1441 					  ctx->sh_desc_update_first_dma);
1442 		if (!edesc) {
1443 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1444 			return -ENOMEM;
1445 		}
1446 
1447 		edesc->src_nents = src_nents;
1448 
1449 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1450 					  to_hash);
1451 		if (ret)
1452 			goto unmap_ctx;
1453 
1454 		desc = edesc->hw_desc;
1455 
1456 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1457 		if (ret)
1458 			goto unmap_ctx;
1459 
1460 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1461 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1462 				     desc_bytes(desc), 1);
1463 
1464 		ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1465 					ctx->ctx_len, DMA_TO_DEVICE);
1466 		if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1467 			return ret;
1468 		state->update = ahash_update_ctx;
1469 		state->finup = ahash_finup_ctx;
1470 		state->final = ahash_final_ctx;
1471 	} else if (*next_buflen) {
1472 		state->update = ahash_update_no_ctx;
1473 		state->finup = ahash_finup_no_ctx;
1474 		state->final = ahash_final_no_ctx;
1475 		scatterwalk_map_and_copy(buf, req->src, 0,
1476 					 req->nbytes, 0);
1477 		*buflen = *next_buflen;
1478 
1479 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1480 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
1481 				     *buflen, 1);
1482 	}
1483 
1484 	return ret;
1485  unmap_ctx:
1486 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1487 	kfree(edesc);
1488 	return ret;
1489 }
1490 
1491 static int ahash_finup_first(struct ahash_request *req)
1492 {
1493 	return ahash_digest(req);
1494 }
1495 
1496 static int ahash_init(struct ahash_request *req)
1497 {
1498 	struct caam_hash_state *state = ahash_request_ctx(req);
1499 
1500 	state->update = ahash_update_first;
1501 	state->finup = ahash_finup_first;
1502 	state->final = ahash_final_no_ctx;
1503 
1504 	state->ctx_dma = 0;
1505 	state->ctx_dma_len = 0;
1506 	state->buf_dma = 0;
1507 	state->buflen = 0;
1508 	state->next_buflen = 0;
1509 
1510 	return 0;
1511 }
1512 
1513 static int ahash_update(struct ahash_request *req)
1514 {
1515 	struct caam_hash_state *state = ahash_request_ctx(req);
1516 
1517 	return state->update(req);
1518 }
1519 
1520 static int ahash_finup(struct ahash_request *req)
1521 {
1522 	struct caam_hash_state *state = ahash_request_ctx(req);
1523 
1524 	return state->finup(req);
1525 }
1526 
1527 static int ahash_final(struct ahash_request *req)
1528 {
1529 	struct caam_hash_state *state = ahash_request_ctx(req);
1530 
1531 	return state->final(req);
1532 }
1533 
1534 static int ahash_export(struct ahash_request *req, void *out)
1535 {
1536 	struct caam_hash_state *state = ahash_request_ctx(req);
1537 	struct caam_export_state *export = out;
1538 	u8 *buf = state->buf;
1539 	int len = state->buflen;
1540 
1541 	memcpy(export->buf, buf, len);
1542 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1543 	export->buflen = len;
1544 	export->update = state->update;
1545 	export->final = state->final;
1546 	export->finup = state->finup;
1547 
1548 	return 0;
1549 }
1550 
1551 static int ahash_import(struct ahash_request *req, const void *in)
1552 {
1553 	struct caam_hash_state *state = ahash_request_ctx(req);
1554 	const struct caam_export_state *export = in;
1555 
1556 	memset(state, 0, sizeof(*state));
1557 	memcpy(state->buf, export->buf, export->buflen);
1558 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1559 	state->buflen = export->buflen;
1560 	state->update = export->update;
1561 	state->final = export->final;
1562 	state->finup = export->finup;
1563 
1564 	return 0;
1565 }
1566 
1567 struct caam_hash_template {
1568 	char name[CRYPTO_MAX_ALG_NAME];
1569 	char driver_name[CRYPTO_MAX_ALG_NAME];
1570 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1571 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1572 	unsigned int blocksize;
1573 	struct ahash_alg template_ahash;
1574 	u32 alg_type;
1575 };
1576 
1577 /* ahash descriptors */
1578 static struct caam_hash_template driver_hash[] = {
1579 	{
1580 		.name = "sha1",
1581 		.driver_name = "sha1-caam",
1582 		.hmac_name = "hmac(sha1)",
1583 		.hmac_driver_name = "hmac-sha1-caam",
1584 		.blocksize = SHA1_BLOCK_SIZE,
1585 		.template_ahash = {
1586 			.init = ahash_init,
1587 			.update = ahash_update,
1588 			.final = ahash_final,
1589 			.finup = ahash_finup,
1590 			.digest = ahash_digest,
1591 			.export = ahash_export,
1592 			.import = ahash_import,
1593 			.setkey = ahash_setkey,
1594 			.halg = {
1595 				.digestsize = SHA1_DIGEST_SIZE,
1596 				.statesize = sizeof(struct caam_export_state),
1597 			},
1598 		},
1599 		.alg_type = OP_ALG_ALGSEL_SHA1,
1600 	}, {
1601 		.name = "sha224",
1602 		.driver_name = "sha224-caam",
1603 		.hmac_name = "hmac(sha224)",
1604 		.hmac_driver_name = "hmac-sha224-caam",
1605 		.blocksize = SHA224_BLOCK_SIZE,
1606 		.template_ahash = {
1607 			.init = ahash_init,
1608 			.update = ahash_update,
1609 			.final = ahash_final,
1610 			.finup = ahash_finup,
1611 			.digest = ahash_digest,
1612 			.export = ahash_export,
1613 			.import = ahash_import,
1614 			.setkey = ahash_setkey,
1615 			.halg = {
1616 				.digestsize = SHA224_DIGEST_SIZE,
1617 				.statesize = sizeof(struct caam_export_state),
1618 			},
1619 		},
1620 		.alg_type = OP_ALG_ALGSEL_SHA224,
1621 	}, {
1622 		.name = "sha256",
1623 		.driver_name = "sha256-caam",
1624 		.hmac_name = "hmac(sha256)",
1625 		.hmac_driver_name = "hmac-sha256-caam",
1626 		.blocksize = SHA256_BLOCK_SIZE,
1627 		.template_ahash = {
1628 			.init = ahash_init,
1629 			.update = ahash_update,
1630 			.final = ahash_final,
1631 			.finup = ahash_finup,
1632 			.digest = ahash_digest,
1633 			.export = ahash_export,
1634 			.import = ahash_import,
1635 			.setkey = ahash_setkey,
1636 			.halg = {
1637 				.digestsize = SHA256_DIGEST_SIZE,
1638 				.statesize = sizeof(struct caam_export_state),
1639 			},
1640 		},
1641 		.alg_type = OP_ALG_ALGSEL_SHA256,
1642 	}, {
1643 		.name = "sha384",
1644 		.driver_name = "sha384-caam",
1645 		.hmac_name = "hmac(sha384)",
1646 		.hmac_driver_name = "hmac-sha384-caam",
1647 		.blocksize = SHA384_BLOCK_SIZE,
1648 		.template_ahash = {
1649 			.init = ahash_init,
1650 			.update = ahash_update,
1651 			.final = ahash_final,
1652 			.finup = ahash_finup,
1653 			.digest = ahash_digest,
1654 			.export = ahash_export,
1655 			.import = ahash_import,
1656 			.setkey = ahash_setkey,
1657 			.halg = {
1658 				.digestsize = SHA384_DIGEST_SIZE,
1659 				.statesize = sizeof(struct caam_export_state),
1660 			},
1661 		},
1662 		.alg_type = OP_ALG_ALGSEL_SHA384,
1663 	}, {
1664 		.name = "sha512",
1665 		.driver_name = "sha512-caam",
1666 		.hmac_name = "hmac(sha512)",
1667 		.hmac_driver_name = "hmac-sha512-caam",
1668 		.blocksize = SHA512_BLOCK_SIZE,
1669 		.template_ahash = {
1670 			.init = ahash_init,
1671 			.update = ahash_update,
1672 			.final = ahash_final,
1673 			.finup = ahash_finup,
1674 			.digest = ahash_digest,
1675 			.export = ahash_export,
1676 			.import = ahash_import,
1677 			.setkey = ahash_setkey,
1678 			.halg = {
1679 				.digestsize = SHA512_DIGEST_SIZE,
1680 				.statesize = sizeof(struct caam_export_state),
1681 			},
1682 		},
1683 		.alg_type = OP_ALG_ALGSEL_SHA512,
1684 	}, {
1685 		.name = "md5",
1686 		.driver_name = "md5-caam",
1687 		.hmac_name = "hmac(md5)",
1688 		.hmac_driver_name = "hmac-md5-caam",
1689 		.blocksize = MD5_BLOCK_WORDS * 4,
1690 		.template_ahash = {
1691 			.init = ahash_init,
1692 			.update = ahash_update,
1693 			.final = ahash_final,
1694 			.finup = ahash_finup,
1695 			.digest = ahash_digest,
1696 			.export = ahash_export,
1697 			.import = ahash_import,
1698 			.setkey = ahash_setkey,
1699 			.halg = {
1700 				.digestsize = MD5_DIGEST_SIZE,
1701 				.statesize = sizeof(struct caam_export_state),
1702 			},
1703 		},
1704 		.alg_type = OP_ALG_ALGSEL_MD5,
1705 	}, {
1706 		.hmac_name = "xcbc(aes)",
1707 		.hmac_driver_name = "xcbc-aes-caam",
1708 		.blocksize = AES_BLOCK_SIZE,
1709 		.template_ahash = {
1710 			.init = ahash_init,
1711 			.update = ahash_update,
1712 			.final = ahash_final,
1713 			.finup = ahash_finup,
1714 			.digest = ahash_digest,
1715 			.export = ahash_export,
1716 			.import = ahash_import,
1717 			.setkey = axcbc_setkey,
1718 			.halg = {
1719 				.digestsize = AES_BLOCK_SIZE,
1720 				.statesize = sizeof(struct caam_export_state),
1721 			},
1722 		 },
1723 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1724 	}, {
1725 		.hmac_name = "cmac(aes)",
1726 		.hmac_driver_name = "cmac-aes-caam",
1727 		.blocksize = AES_BLOCK_SIZE,
1728 		.template_ahash = {
1729 			.init = ahash_init,
1730 			.update = ahash_update,
1731 			.final = ahash_final,
1732 			.finup = ahash_finup,
1733 			.digest = ahash_digest,
1734 			.export = ahash_export,
1735 			.import = ahash_import,
1736 			.setkey = acmac_setkey,
1737 			.halg = {
1738 				.digestsize = AES_BLOCK_SIZE,
1739 				.statesize = sizeof(struct caam_export_state),
1740 			},
1741 		 },
1742 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1743 	},
1744 };
1745 
1746 struct caam_hash_alg {
1747 	struct list_head entry;
1748 	int alg_type;
1749 	struct ahash_alg ahash_alg;
1750 };
1751 
1752 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1753 {
1754 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1755 	struct crypto_alg *base = tfm->__crt_alg;
1756 	struct hash_alg_common *halg =
1757 		 container_of(base, struct hash_alg_common, base);
1758 	struct ahash_alg *alg =
1759 		 container_of(halg, struct ahash_alg, halg);
1760 	struct caam_hash_alg *caam_hash =
1761 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1762 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1763 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1764 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1765 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1766 					 HASH_MSG_LEN + 32,
1767 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1768 					 HASH_MSG_LEN + 64,
1769 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1770 	const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1771 						      sh_desc_update);
1772 	dma_addr_t dma_addr;
1773 	struct caam_drv_private *priv;
1774 
1775 	/*
1776 	 * Get a Job ring from Job Ring driver to ensure in-order
1777 	 * crypto request processing per tfm
1778 	 */
1779 	ctx->jrdev = caam_jr_alloc();
1780 	if (IS_ERR(ctx->jrdev)) {
1781 		pr_err("Job Ring Device allocation for transform failed\n");
1782 		return PTR_ERR(ctx->jrdev);
1783 	}
1784 
1785 	priv = dev_get_drvdata(ctx->jrdev->parent);
1786 
1787 	if (is_xcbc_aes(caam_hash->alg_type)) {
1788 		ctx->dir = DMA_TO_DEVICE;
1789 		ctx->key_dir = DMA_BIDIRECTIONAL;
1790 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1791 		ctx->ctx_len = 48;
1792 	} else if (is_cmac_aes(caam_hash->alg_type)) {
1793 		ctx->dir = DMA_TO_DEVICE;
1794 		ctx->key_dir = DMA_NONE;
1795 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1796 		ctx->ctx_len = 32;
1797 	} else {
1798 		if (priv->era >= 6) {
1799 			ctx->dir = DMA_BIDIRECTIONAL;
1800 			ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1801 		} else {
1802 			ctx->dir = DMA_TO_DEVICE;
1803 			ctx->key_dir = DMA_NONE;
1804 		}
1805 		ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1806 		ctx->ctx_len = runninglen[(ctx->adata.algtype &
1807 					   OP_ALG_ALGSEL_SUBMASK) >>
1808 					  OP_ALG_ALGSEL_SHIFT];
1809 	}
1810 
1811 	if (ctx->key_dir != DMA_NONE) {
1812 		ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1813 							  ARRAY_SIZE(ctx->key),
1814 							  ctx->key_dir,
1815 							  DMA_ATTR_SKIP_CPU_SYNC);
1816 		if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1817 			dev_err(ctx->jrdev, "unable to map key\n");
1818 			caam_jr_free(ctx->jrdev);
1819 			return -ENOMEM;
1820 		}
1821 	}
1822 
1823 	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1824 					offsetof(struct caam_hash_ctx, key) -
1825 					sh_desc_update_offset,
1826 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1827 	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1828 		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1829 
1830 		if (ctx->key_dir != DMA_NONE)
1831 			dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1832 					       ARRAY_SIZE(ctx->key),
1833 					       ctx->key_dir,
1834 					       DMA_ATTR_SKIP_CPU_SYNC);
1835 
1836 		caam_jr_free(ctx->jrdev);
1837 		return -ENOMEM;
1838 	}
1839 
1840 	ctx->sh_desc_update_dma = dma_addr;
1841 	ctx->sh_desc_update_first_dma = dma_addr +
1842 					offsetof(struct caam_hash_ctx,
1843 						 sh_desc_update_first) -
1844 					sh_desc_update_offset;
1845 	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1846 						   sh_desc_fin) -
1847 					sh_desc_update_offset;
1848 	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1849 						      sh_desc_digest) -
1850 					sh_desc_update_offset;
1851 
1852 	ctx->enginectx.op.do_one_request = ahash_do_one_req;
1853 
1854 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1855 				 sizeof(struct caam_hash_state));
1856 
1857 	/*
1858 	 * For keyed hash algorithms shared descriptors
1859 	 * will be created later in setkey() callback
1860 	 */
1861 	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1862 }
1863 
1864 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1865 {
1866 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1867 
1868 	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1869 			       offsetof(struct caam_hash_ctx, key) -
1870 			       offsetof(struct caam_hash_ctx, sh_desc_update),
1871 			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1872 	if (ctx->key_dir != DMA_NONE)
1873 		dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1874 				       ARRAY_SIZE(ctx->key), ctx->key_dir,
1875 				       DMA_ATTR_SKIP_CPU_SYNC);
1876 	caam_jr_free(ctx->jrdev);
1877 }
1878 
1879 void caam_algapi_hash_exit(void)
1880 {
1881 	struct caam_hash_alg *t_alg, *n;
1882 
1883 	if (!hash_list.next)
1884 		return;
1885 
1886 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1887 		crypto_unregister_ahash(&t_alg->ahash_alg);
1888 		list_del(&t_alg->entry);
1889 		kfree(t_alg);
1890 	}
1891 }
1892 
1893 static struct caam_hash_alg *
1894 caam_hash_alloc(struct caam_hash_template *template,
1895 		bool keyed)
1896 {
1897 	struct caam_hash_alg *t_alg;
1898 	struct ahash_alg *halg;
1899 	struct crypto_alg *alg;
1900 
1901 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1902 	if (!t_alg) {
1903 		pr_err("failed to allocate t_alg\n");
1904 		return ERR_PTR(-ENOMEM);
1905 	}
1906 
1907 	t_alg->ahash_alg = template->template_ahash;
1908 	halg = &t_alg->ahash_alg;
1909 	alg = &halg->halg.base;
1910 
1911 	if (keyed) {
1912 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1913 			 template->hmac_name);
1914 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1915 			 template->hmac_driver_name);
1916 	} else {
1917 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1918 			 template->name);
1919 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1920 			 template->driver_name);
1921 		t_alg->ahash_alg.setkey = NULL;
1922 	}
1923 	alg->cra_module = THIS_MODULE;
1924 	alg->cra_init = caam_hash_cra_init;
1925 	alg->cra_exit = caam_hash_cra_exit;
1926 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1927 	alg->cra_priority = CAAM_CRA_PRIORITY;
1928 	alg->cra_blocksize = template->blocksize;
1929 	alg->cra_alignmask = 0;
1930 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1931 
1932 	t_alg->alg_type = template->alg_type;
1933 
1934 	return t_alg;
1935 }
1936 
1937 int caam_algapi_hash_init(struct device *ctrldev)
1938 {
1939 	int i = 0, err = 0;
1940 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1941 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1942 	u32 md_inst, md_vid;
1943 
1944 	/*
1945 	 * Register crypto algorithms the device supports.  First, identify
1946 	 * presence and attributes of MD block.
1947 	 */
1948 	if (priv->era < 10) {
1949 		md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1950 			  CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1951 		md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1952 			   CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1953 	} else {
1954 		u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1955 
1956 		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1957 		md_inst = mdha & CHA_VER_NUM_MASK;
1958 	}
1959 
1960 	/*
1961 	 * Skip registration of any hashing algorithms if MD block
1962 	 * is not present.
1963 	 */
1964 	if (!md_inst)
1965 		return 0;
1966 
1967 	/* Limit digest size based on LP256 */
1968 	if (md_vid == CHA_VER_VID_MD_LP256)
1969 		md_limit = SHA256_DIGEST_SIZE;
1970 
1971 	INIT_LIST_HEAD(&hash_list);
1972 
1973 	/* register crypto algorithms the device supports */
1974 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1975 		struct caam_hash_alg *t_alg;
1976 		struct caam_hash_template *alg = driver_hash + i;
1977 
1978 		/* If MD size is not supported by device, skip registration */
1979 		if (is_mdha(alg->alg_type) &&
1980 		    alg->template_ahash.halg.digestsize > md_limit)
1981 			continue;
1982 
1983 		/* register hmac version */
1984 		t_alg = caam_hash_alloc(alg, true);
1985 		if (IS_ERR(t_alg)) {
1986 			err = PTR_ERR(t_alg);
1987 			pr_warn("%s alg allocation failed\n",
1988 				alg->hmac_driver_name);
1989 			continue;
1990 		}
1991 
1992 		err = crypto_register_ahash(&t_alg->ahash_alg);
1993 		if (err) {
1994 			pr_warn("%s alg registration failed: %d\n",
1995 				t_alg->ahash_alg.halg.base.cra_driver_name,
1996 				err);
1997 			kfree(t_alg);
1998 		} else
1999 			list_add_tail(&t_alg->entry, &hash_list);
2000 
2001 		if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2002 			continue;
2003 
2004 		/* register unkeyed version */
2005 		t_alg = caam_hash_alloc(alg, false);
2006 		if (IS_ERR(t_alg)) {
2007 			err = PTR_ERR(t_alg);
2008 			pr_warn("%s alg allocation failed\n", alg->driver_name);
2009 			continue;
2010 		}
2011 
2012 		err = crypto_register_ahash(&t_alg->ahash_alg);
2013 		if (err) {
2014 			pr_warn("%s alg registration failed: %d\n",
2015 				t_alg->ahash_alg.halg.base.cra_driver_name,
2016 				err);
2017 			kfree(t_alg);
2018 		} else
2019 			list_add_tail(&t_alg->entry, &hash_list);
2020 	}
2021 
2022 	return err;
2023 }
2024