xref: /openbmc/linux/drivers/crypto/caam/caamhash.c (revision 0f103b37)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  * relationship of digest job descriptor or first job descriptor after init to
11  * shared descriptors:
12  *
13  * ---------------                     ---------------
14  * | JobDesc #1  |-------------------->|  ShareDesc  |
15  * | *(packet 1) |                     |  (hashKey)  |
16  * ---------------                     | (operation) |
17  *                                     ---------------
18  *
19  * relationship of subsequent job descriptors to shared descriptors:
20  *
21  * ---------------                     ---------------
22  * | JobDesc #2  |-------------------->|  ShareDesc  |
23  * | *(packet 2) |      |------------->|  (hashKey)  |
24  * ---------------      |    |-------->| (operation) |
25  *       .              |    |         | (load ctx2) |
26  *       .              |    |         ---------------
27  * ---------------      |    |
28  * | JobDesc #3  |------|    |
29  * | *(packet 3) |           |
30  * ---------------           |
31  *       .                   |
32  *       .                   |
33  * ---------------           |
34  * | JobDesc #4  |------------
35  * | *(packet 4) |
36  * ---------------
37  *
38  * The SharedDesc never changes for a connection unless rekeyed, but
39  * each packet will likely be in a different place. So all we need
40  * to know to process the packet is where the input is, where the
41  * output goes, and what context we want to process with. Context is
42  * in the SharedDesc, packet references in the JobDesc.
43  *
44  * So, a job desc looks like:
45  *
46  * ---------------------
47  * | Header            |
48  * | ShareDesc Pointer |
49  * | SEQ_OUT_PTR       |
50  * | (output buffer)   |
51  * | (output length)   |
52  * | SEQ_IN_PTR        |
53  * | (input buffer)    |
54  * | (input length)    |
55  * ---------------------
56  */
57 
58 #include "compat.h"
59 
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 
69 #define CAAM_CRA_PRIORITY		3000
70 
71 /* max hash key is max split key size */
72 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
73 
74 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
75 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
76 
77 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
78 					 CAAM_MAX_HASH_KEY_SIZE)
79 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
80 
81 /* caam context sizes for hashes: running digest + 8 */
82 #define HASH_MSG_LEN			8
83 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
84 
85 #ifdef DEBUG
86 /* for print_hex_dumps with line references */
87 #define debug(format, arg...) printk(format, arg)
88 #else
89 #define debug(format, arg...)
90 #endif
91 
92 
93 static struct list_head hash_list;
94 
95 /* ahash per-session context */
96 struct caam_hash_ctx {
97 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
101 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
102 	dma_addr_t sh_desc_update_first_dma;
103 	dma_addr_t sh_desc_fin_dma;
104 	dma_addr_t sh_desc_digest_dma;
105 	enum dma_data_direction dir;
106 	struct device *jrdev;
107 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
108 	int ctx_len;
109 	struct alginfo adata;
110 };
111 
112 /* ahash state */
113 struct caam_hash_state {
114 	dma_addr_t buf_dma;
115 	dma_addr_t ctx_dma;
116 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
117 	int buflen_0;
118 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
119 	int buflen_1;
120 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
121 	int (*update)(struct ahash_request *req);
122 	int (*final)(struct ahash_request *req);
123 	int (*finup)(struct ahash_request *req);
124 	int current_buf;
125 };
126 
127 struct caam_export_state {
128 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
129 	u8 caam_ctx[MAX_CTX_LEN];
130 	int buflen;
131 	int (*update)(struct ahash_request *req);
132 	int (*final)(struct ahash_request *req);
133 	int (*finup)(struct ahash_request *req);
134 };
135 
136 static inline void switch_buf(struct caam_hash_state *state)
137 {
138 	state->current_buf ^= 1;
139 }
140 
141 static inline u8 *current_buf(struct caam_hash_state *state)
142 {
143 	return state->current_buf ? state->buf_1 : state->buf_0;
144 }
145 
146 static inline u8 *alt_buf(struct caam_hash_state *state)
147 {
148 	return state->current_buf ? state->buf_0 : state->buf_1;
149 }
150 
151 static inline int *current_buflen(struct caam_hash_state *state)
152 {
153 	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
154 }
155 
156 static inline int *alt_buflen(struct caam_hash_state *state)
157 {
158 	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
159 }
160 
161 /* Common job descriptor seq in/out ptr routines */
162 
163 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
164 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
165 				      struct caam_hash_state *state,
166 				      int ctx_len)
167 {
168 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
169 					ctx_len, DMA_FROM_DEVICE);
170 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
171 		dev_err(jrdev, "unable to map ctx\n");
172 		state->ctx_dma = 0;
173 		return -ENOMEM;
174 	}
175 
176 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
177 
178 	return 0;
179 }
180 
181 /* Map req->result, and append seq_out_ptr command that points to it */
182 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
183 						u8 *result, int digestsize)
184 {
185 	dma_addr_t dst_dma;
186 
187 	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
188 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
189 
190 	return dst_dma;
191 }
192 
193 /* Map current buffer in state (if length > 0) and put it in link table */
194 static inline int buf_map_to_sec4_sg(struct device *jrdev,
195 				     struct sec4_sg_entry *sec4_sg,
196 				     struct caam_hash_state *state)
197 {
198 	int buflen = *current_buflen(state);
199 
200 	if (!buflen)
201 		return 0;
202 
203 	state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
204 					DMA_TO_DEVICE);
205 	if (dma_mapping_error(jrdev, state->buf_dma)) {
206 		dev_err(jrdev, "unable to map buf\n");
207 		state->buf_dma = 0;
208 		return -ENOMEM;
209 	}
210 
211 	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
212 
213 	return 0;
214 }
215 
216 /* Map state->caam_ctx, and add it to link table */
217 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
218 				     struct caam_hash_state *state, int ctx_len,
219 				     struct sec4_sg_entry *sec4_sg, u32 flag)
220 {
221 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
222 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
223 		dev_err(jrdev, "unable to map ctx\n");
224 		state->ctx_dma = 0;
225 		return -ENOMEM;
226 	}
227 
228 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
229 
230 	return 0;
231 }
232 
233 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
234 {
235 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
236 	int digestsize = crypto_ahash_digestsize(ahash);
237 	struct device *jrdev = ctx->jrdev;
238 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
239 	u32 *desc;
240 
241 	ctx->adata.key_virt = ctx->key;
242 
243 	/* ahash_update shared descriptor */
244 	desc = ctx->sh_desc_update;
245 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
246 			  ctx->ctx_len, true, ctrlpriv->era);
247 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
248 				   desc_bytes(desc), ctx->dir);
249 #ifdef DEBUG
250 	print_hex_dump(KERN_ERR,
251 		       "ahash update shdesc@"__stringify(__LINE__)": ",
252 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
253 #endif
254 
255 	/* ahash_update_first shared descriptor */
256 	desc = ctx->sh_desc_update_first;
257 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
258 			  ctx->ctx_len, false, ctrlpriv->era);
259 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
260 				   desc_bytes(desc), ctx->dir);
261 #ifdef DEBUG
262 	print_hex_dump(KERN_ERR,
263 		       "ahash update first shdesc@"__stringify(__LINE__)": ",
264 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
265 #endif
266 
267 	/* ahash_final shared descriptor */
268 	desc = ctx->sh_desc_fin;
269 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
270 			  ctx->ctx_len, true, ctrlpriv->era);
271 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
272 				   desc_bytes(desc), ctx->dir);
273 #ifdef DEBUG
274 	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
275 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
276 		       desc_bytes(desc), 1);
277 #endif
278 
279 	/* ahash_digest shared descriptor */
280 	desc = ctx->sh_desc_digest;
281 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
282 			  ctx->ctx_len, false, ctrlpriv->era);
283 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
284 				   desc_bytes(desc), ctx->dir);
285 #ifdef DEBUG
286 	print_hex_dump(KERN_ERR,
287 		       "ahash digest shdesc@"__stringify(__LINE__)": ",
288 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
289 		       desc_bytes(desc), 1);
290 #endif
291 
292 	return 0;
293 }
294 
295 /* Digest hash size if it is too large */
296 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
297 			   u32 *keylen, u8 *key_out, u32 digestsize)
298 {
299 	struct device *jrdev = ctx->jrdev;
300 	u32 *desc;
301 	struct split_key_result result;
302 	dma_addr_t src_dma, dst_dma;
303 	int ret;
304 
305 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
306 	if (!desc) {
307 		dev_err(jrdev, "unable to allocate key input memory\n");
308 		return -ENOMEM;
309 	}
310 
311 	init_job_desc(desc, 0);
312 
313 	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
314 				 DMA_TO_DEVICE);
315 	if (dma_mapping_error(jrdev, src_dma)) {
316 		dev_err(jrdev, "unable to map key input memory\n");
317 		kfree(desc);
318 		return -ENOMEM;
319 	}
320 	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
321 				 DMA_FROM_DEVICE);
322 	if (dma_mapping_error(jrdev, dst_dma)) {
323 		dev_err(jrdev, "unable to map key output memory\n");
324 		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
325 		kfree(desc);
326 		return -ENOMEM;
327 	}
328 
329 	/* Job descriptor to perform unkeyed hash on key_in */
330 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
331 			 OP_ALG_AS_INITFINAL);
332 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
333 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
334 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
335 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
336 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
337 			 LDST_SRCDST_BYTE_CONTEXT);
338 
339 #ifdef DEBUG
340 	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
341 		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
342 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
343 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
344 #endif
345 
346 	result.err = 0;
347 	init_completion(&result.completion);
348 
349 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
350 	if (!ret) {
351 		/* in progress */
352 		wait_for_completion(&result.completion);
353 		ret = result.err;
354 #ifdef DEBUG
355 		print_hex_dump(KERN_ERR,
356 			       "digested key@"__stringify(__LINE__)": ",
357 			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
358 			       digestsize, 1);
359 #endif
360 	}
361 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
362 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
363 
364 	*keylen = digestsize;
365 
366 	kfree(desc);
367 
368 	return ret;
369 }
370 
371 static int ahash_setkey(struct crypto_ahash *ahash,
372 			const u8 *key, unsigned int keylen)
373 {
374 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
375 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
376 	int digestsize = crypto_ahash_digestsize(ahash);
377 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
378 	int ret;
379 	u8 *hashed_key = NULL;
380 
381 #ifdef DEBUG
382 	printk(KERN_ERR "keylen %d\n", keylen);
383 #endif
384 
385 	if (keylen > blocksize) {
386 		hashed_key = kmalloc_array(digestsize,
387 					   sizeof(*hashed_key),
388 					   GFP_KERNEL | GFP_DMA);
389 		if (!hashed_key)
390 			return -ENOMEM;
391 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
392 				      digestsize);
393 		if (ret)
394 			goto bad_free_key;
395 		key = hashed_key;
396 	}
397 
398 	/*
399 	 * If DKP is supported, use it in the shared descriptor to generate
400 	 * the split key.
401 	 */
402 	if (ctrlpriv->era >= 6) {
403 		ctx->adata.key_inline = true;
404 		ctx->adata.keylen = keylen;
405 		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
406 						      OP_ALG_ALGSEL_MASK);
407 
408 		if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
409 			goto bad_free_key;
410 
411 		memcpy(ctx->key, key, keylen);
412 	} else {
413 		ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
414 				    keylen, CAAM_MAX_HASH_KEY_SIZE);
415 		if (ret)
416 			goto bad_free_key;
417 	}
418 
419 	kfree(hashed_key);
420 	return ahash_set_sh_desc(ahash);
421  bad_free_key:
422 	kfree(hashed_key);
423 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
424 	return -EINVAL;
425 }
426 
427 /*
428  * ahash_edesc - s/w-extended ahash descriptor
429  * @dst_dma: physical mapped address of req->result
430  * @sec4_sg_dma: physical mapped address of h/w link table
431  * @src_nents: number of segments in input scatterlist
432  * @sec4_sg_bytes: length of dma mapped sec4_sg space
433  * @hw_desc: the h/w job descriptor followed by any referenced link tables
434  * @sec4_sg: h/w link table
435  */
436 struct ahash_edesc {
437 	dma_addr_t dst_dma;
438 	dma_addr_t sec4_sg_dma;
439 	int src_nents;
440 	int sec4_sg_bytes;
441 	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
442 	struct sec4_sg_entry sec4_sg[0];
443 };
444 
445 static inline void ahash_unmap(struct device *dev,
446 			struct ahash_edesc *edesc,
447 			struct ahash_request *req, int dst_len)
448 {
449 	struct caam_hash_state *state = ahash_request_ctx(req);
450 
451 	if (edesc->src_nents)
452 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
453 	if (edesc->dst_dma)
454 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
455 
456 	if (edesc->sec4_sg_bytes)
457 		dma_unmap_single(dev, edesc->sec4_sg_dma,
458 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
459 
460 	if (state->buf_dma) {
461 		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
462 				 DMA_TO_DEVICE);
463 		state->buf_dma = 0;
464 	}
465 }
466 
467 static inline void ahash_unmap_ctx(struct device *dev,
468 			struct ahash_edesc *edesc,
469 			struct ahash_request *req, int dst_len, u32 flag)
470 {
471 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
472 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
473 	struct caam_hash_state *state = ahash_request_ctx(req);
474 
475 	if (state->ctx_dma) {
476 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
477 		state->ctx_dma = 0;
478 	}
479 	ahash_unmap(dev, edesc, req, dst_len);
480 }
481 
482 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
483 		       void *context)
484 {
485 	struct ahash_request *req = context;
486 	struct ahash_edesc *edesc;
487 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
488 	int digestsize = crypto_ahash_digestsize(ahash);
489 #ifdef DEBUG
490 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
491 	struct caam_hash_state *state = ahash_request_ctx(req);
492 
493 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
494 #endif
495 
496 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
497 	if (err)
498 		caam_jr_strstatus(jrdev, err);
499 
500 	ahash_unmap(jrdev, edesc, req, digestsize);
501 	kfree(edesc);
502 
503 #ifdef DEBUG
504 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
505 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
506 		       ctx->ctx_len, 1);
507 	if (req->result)
508 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
509 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
510 			       digestsize, 1);
511 #endif
512 
513 	req->base.complete(&req->base, err);
514 }
515 
516 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
517 			    void *context)
518 {
519 	struct ahash_request *req = context;
520 	struct ahash_edesc *edesc;
521 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
522 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
523 	struct caam_hash_state *state = ahash_request_ctx(req);
524 #ifdef DEBUG
525 	int digestsize = crypto_ahash_digestsize(ahash);
526 
527 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
528 #endif
529 
530 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
531 	if (err)
532 		caam_jr_strstatus(jrdev, err);
533 
534 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
535 	switch_buf(state);
536 	kfree(edesc);
537 
538 #ifdef DEBUG
539 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
540 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
541 		       ctx->ctx_len, 1);
542 	if (req->result)
543 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
544 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
545 			       digestsize, 1);
546 #endif
547 
548 	req->base.complete(&req->base, err);
549 }
550 
551 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
552 			       void *context)
553 {
554 	struct ahash_request *req = context;
555 	struct ahash_edesc *edesc;
556 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
557 	int digestsize = crypto_ahash_digestsize(ahash);
558 #ifdef DEBUG
559 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
560 	struct caam_hash_state *state = ahash_request_ctx(req);
561 
562 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
563 #endif
564 
565 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
566 	if (err)
567 		caam_jr_strstatus(jrdev, err);
568 
569 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
570 	kfree(edesc);
571 
572 #ifdef DEBUG
573 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
574 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
575 		       ctx->ctx_len, 1);
576 	if (req->result)
577 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
578 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
579 			       digestsize, 1);
580 #endif
581 
582 	req->base.complete(&req->base, err);
583 }
584 
585 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
586 			       void *context)
587 {
588 	struct ahash_request *req = context;
589 	struct ahash_edesc *edesc;
590 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
591 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
592 	struct caam_hash_state *state = ahash_request_ctx(req);
593 #ifdef DEBUG
594 	int digestsize = crypto_ahash_digestsize(ahash);
595 
596 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
597 #endif
598 
599 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
600 	if (err)
601 		caam_jr_strstatus(jrdev, err);
602 
603 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
604 	switch_buf(state);
605 	kfree(edesc);
606 
607 #ifdef DEBUG
608 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
609 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
610 		       ctx->ctx_len, 1);
611 	if (req->result)
612 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
613 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
614 			       digestsize, 1);
615 #endif
616 
617 	req->base.complete(&req->base, err);
618 }
619 
620 /*
621  * Allocate an enhanced descriptor, which contains the hardware descriptor
622  * and space for hardware scatter table containing sg_num entries.
623  */
624 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
625 					     int sg_num, u32 *sh_desc,
626 					     dma_addr_t sh_desc_dma,
627 					     gfp_t flags)
628 {
629 	struct ahash_edesc *edesc;
630 	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
631 
632 	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
633 	if (!edesc) {
634 		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
635 		return NULL;
636 	}
637 
638 	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
639 			     HDR_SHARE_DEFER | HDR_REVERSE);
640 
641 	return edesc;
642 }
643 
644 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
645 			       struct ahash_edesc *edesc,
646 			       struct ahash_request *req, int nents,
647 			       unsigned int first_sg,
648 			       unsigned int first_bytes, size_t to_hash)
649 {
650 	dma_addr_t src_dma;
651 	u32 options;
652 
653 	if (nents > 1 || first_sg) {
654 		struct sec4_sg_entry *sg = edesc->sec4_sg;
655 		unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
656 
657 		sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
658 
659 		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
660 		if (dma_mapping_error(ctx->jrdev, src_dma)) {
661 			dev_err(ctx->jrdev, "unable to map S/G table\n");
662 			return -ENOMEM;
663 		}
664 
665 		edesc->sec4_sg_bytes = sgsize;
666 		edesc->sec4_sg_dma = src_dma;
667 		options = LDST_SGF;
668 	} else {
669 		src_dma = sg_dma_address(req->src);
670 		options = 0;
671 	}
672 
673 	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
674 			  options);
675 
676 	return 0;
677 }
678 
679 /* submit update job descriptor */
680 static int ahash_update_ctx(struct ahash_request *req)
681 {
682 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
683 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
684 	struct caam_hash_state *state = ahash_request_ctx(req);
685 	struct device *jrdev = ctx->jrdev;
686 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
687 		       GFP_KERNEL : GFP_ATOMIC;
688 	u8 *buf = current_buf(state);
689 	int *buflen = current_buflen(state);
690 	u8 *next_buf = alt_buf(state);
691 	int *next_buflen = alt_buflen(state), last_buflen;
692 	int in_len = *buflen + req->nbytes, to_hash;
693 	u32 *desc;
694 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
695 	struct ahash_edesc *edesc;
696 	int ret = 0;
697 
698 	last_buflen = *next_buflen;
699 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
700 	to_hash = in_len - *next_buflen;
701 
702 	if (to_hash) {
703 		src_nents = sg_nents_for_len(req->src,
704 					     req->nbytes - (*next_buflen));
705 		if (src_nents < 0) {
706 			dev_err(jrdev, "Invalid number of src SG.\n");
707 			return src_nents;
708 		}
709 
710 		if (src_nents) {
711 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
712 						  DMA_TO_DEVICE);
713 			if (!mapped_nents) {
714 				dev_err(jrdev, "unable to DMA map source\n");
715 				return -ENOMEM;
716 			}
717 		} else {
718 			mapped_nents = 0;
719 		}
720 
721 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
722 		sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
723 				 sizeof(struct sec4_sg_entry);
724 
725 		/*
726 		 * allocate space for base edesc and hw desc commands,
727 		 * link tables
728 		 */
729 		edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
730 					  ctx->sh_desc_update,
731 					  ctx->sh_desc_update_dma, flags);
732 		if (!edesc) {
733 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
734 			return -ENOMEM;
735 		}
736 
737 		edesc->src_nents = src_nents;
738 		edesc->sec4_sg_bytes = sec4_sg_bytes;
739 
740 		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
741 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
742 		if (ret)
743 			goto unmap_ctx;
744 
745 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
746 		if (ret)
747 			goto unmap_ctx;
748 
749 		if (mapped_nents) {
750 			sg_to_sec4_sg_last(req->src, mapped_nents,
751 					   edesc->sec4_sg + sec4_sg_src_index,
752 					   0);
753 			if (*next_buflen)
754 				scatterwalk_map_and_copy(next_buf, req->src,
755 							 to_hash - *buflen,
756 							 *next_buflen, 0);
757 		} else {
758 			sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
759 					    1);
760 		}
761 
762 		desc = edesc->hw_desc;
763 
764 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
765 						     sec4_sg_bytes,
766 						     DMA_TO_DEVICE);
767 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
768 			dev_err(jrdev, "unable to map S/G table\n");
769 			ret = -ENOMEM;
770 			goto unmap_ctx;
771 		}
772 
773 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
774 				       to_hash, LDST_SGF);
775 
776 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
777 
778 #ifdef DEBUG
779 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
780 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
781 			       desc_bytes(desc), 1);
782 #endif
783 
784 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
785 		if (ret)
786 			goto unmap_ctx;
787 
788 		ret = -EINPROGRESS;
789 	} else if (*next_buflen) {
790 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
791 					 req->nbytes, 0);
792 		*buflen = *next_buflen;
793 		*next_buflen = last_buflen;
794 	}
795 #ifdef DEBUG
796 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
797 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
798 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
799 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
800 		       *next_buflen, 1);
801 #endif
802 
803 	return ret;
804  unmap_ctx:
805 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
806 	kfree(edesc);
807 	return ret;
808 }
809 
810 static int ahash_final_ctx(struct ahash_request *req)
811 {
812 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
813 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
814 	struct caam_hash_state *state = ahash_request_ctx(req);
815 	struct device *jrdev = ctx->jrdev;
816 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
817 		       GFP_KERNEL : GFP_ATOMIC;
818 	int buflen = *current_buflen(state);
819 	u32 *desc;
820 	int sec4_sg_bytes, sec4_sg_src_index;
821 	int digestsize = crypto_ahash_digestsize(ahash);
822 	struct ahash_edesc *edesc;
823 	int ret;
824 
825 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
826 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
827 
828 	/* allocate space for base edesc and hw desc commands, link tables */
829 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
830 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
831 				  flags);
832 	if (!edesc)
833 		return -ENOMEM;
834 
835 	desc = edesc->hw_desc;
836 
837 	edesc->sec4_sg_bytes = sec4_sg_bytes;
838 
839 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
840 				 edesc->sec4_sg, DMA_TO_DEVICE);
841 	if (ret)
842 		goto unmap_ctx;
843 
844 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
845 	if (ret)
846 		goto unmap_ctx;
847 
848 	sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
849 
850 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
851 					    sec4_sg_bytes, DMA_TO_DEVICE);
852 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
853 		dev_err(jrdev, "unable to map S/G table\n");
854 		ret = -ENOMEM;
855 		goto unmap_ctx;
856 	}
857 
858 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
859 			  LDST_SGF);
860 
861 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
862 						digestsize);
863 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
864 		dev_err(jrdev, "unable to map dst\n");
865 		ret = -ENOMEM;
866 		goto unmap_ctx;
867 	}
868 
869 #ifdef DEBUG
870 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
871 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
872 #endif
873 
874 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
875 	if (ret)
876 		goto unmap_ctx;
877 
878 	return -EINPROGRESS;
879  unmap_ctx:
880 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
881 	kfree(edesc);
882 	return ret;
883 }
884 
885 static int ahash_finup_ctx(struct ahash_request *req)
886 {
887 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
888 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
889 	struct caam_hash_state *state = ahash_request_ctx(req);
890 	struct device *jrdev = ctx->jrdev;
891 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
892 		       GFP_KERNEL : GFP_ATOMIC;
893 	int buflen = *current_buflen(state);
894 	u32 *desc;
895 	int sec4_sg_src_index;
896 	int src_nents, mapped_nents;
897 	int digestsize = crypto_ahash_digestsize(ahash);
898 	struct ahash_edesc *edesc;
899 	int ret;
900 
901 	src_nents = sg_nents_for_len(req->src, req->nbytes);
902 	if (src_nents < 0) {
903 		dev_err(jrdev, "Invalid number of src SG.\n");
904 		return src_nents;
905 	}
906 
907 	if (src_nents) {
908 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
909 					  DMA_TO_DEVICE);
910 		if (!mapped_nents) {
911 			dev_err(jrdev, "unable to DMA map source\n");
912 			return -ENOMEM;
913 		}
914 	} else {
915 		mapped_nents = 0;
916 	}
917 
918 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
919 
920 	/* allocate space for base edesc and hw desc commands, link tables */
921 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
922 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
923 				  flags);
924 	if (!edesc) {
925 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
926 		return -ENOMEM;
927 	}
928 
929 	desc = edesc->hw_desc;
930 
931 	edesc->src_nents = src_nents;
932 
933 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
934 				 edesc->sec4_sg, DMA_TO_DEVICE);
935 	if (ret)
936 		goto unmap_ctx;
937 
938 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
939 	if (ret)
940 		goto unmap_ctx;
941 
942 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
943 				  sec4_sg_src_index, ctx->ctx_len + buflen,
944 				  req->nbytes);
945 	if (ret)
946 		goto unmap_ctx;
947 
948 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
949 						digestsize);
950 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
951 		dev_err(jrdev, "unable to map dst\n");
952 		ret = -ENOMEM;
953 		goto unmap_ctx;
954 	}
955 
956 #ifdef DEBUG
957 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
958 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
959 #endif
960 
961 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
962 	if (ret)
963 		goto unmap_ctx;
964 
965 	return -EINPROGRESS;
966  unmap_ctx:
967 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
968 	kfree(edesc);
969 	return ret;
970 }
971 
972 static int ahash_digest(struct ahash_request *req)
973 {
974 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
975 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
976 	struct caam_hash_state *state = ahash_request_ctx(req);
977 	struct device *jrdev = ctx->jrdev;
978 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
979 		       GFP_KERNEL : GFP_ATOMIC;
980 	u32 *desc;
981 	int digestsize = crypto_ahash_digestsize(ahash);
982 	int src_nents, mapped_nents;
983 	struct ahash_edesc *edesc;
984 	int ret;
985 
986 	state->buf_dma = 0;
987 
988 	src_nents = sg_nents_for_len(req->src, req->nbytes);
989 	if (src_nents < 0) {
990 		dev_err(jrdev, "Invalid number of src SG.\n");
991 		return src_nents;
992 	}
993 
994 	if (src_nents) {
995 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
996 					  DMA_TO_DEVICE);
997 		if (!mapped_nents) {
998 			dev_err(jrdev, "unable to map source for DMA\n");
999 			return -ENOMEM;
1000 		}
1001 	} else {
1002 		mapped_nents = 0;
1003 	}
1004 
1005 	/* allocate space for base edesc and hw desc commands, link tables */
1006 	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1007 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1008 				  flags);
1009 	if (!edesc) {
1010 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1011 		return -ENOMEM;
1012 	}
1013 
1014 	edesc->src_nents = src_nents;
1015 
1016 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1017 				  req->nbytes);
1018 	if (ret) {
1019 		ahash_unmap(jrdev, edesc, req, digestsize);
1020 		kfree(edesc);
1021 		return ret;
1022 	}
1023 
1024 	desc = edesc->hw_desc;
1025 
1026 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1027 						digestsize);
1028 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1029 		dev_err(jrdev, "unable to map dst\n");
1030 		ahash_unmap(jrdev, edesc, req, digestsize);
1031 		kfree(edesc);
1032 		return -ENOMEM;
1033 	}
1034 
1035 #ifdef DEBUG
1036 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1037 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1038 #endif
1039 
1040 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1041 	if (!ret) {
1042 		ret = -EINPROGRESS;
1043 	} else {
1044 		ahash_unmap(jrdev, edesc, req, digestsize);
1045 		kfree(edesc);
1046 	}
1047 
1048 	return ret;
1049 }
1050 
1051 /* submit ahash final if it the first job descriptor */
1052 static int ahash_final_no_ctx(struct ahash_request *req)
1053 {
1054 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1055 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1056 	struct caam_hash_state *state = ahash_request_ctx(req);
1057 	struct device *jrdev = ctx->jrdev;
1058 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1059 		       GFP_KERNEL : GFP_ATOMIC;
1060 	u8 *buf = current_buf(state);
1061 	int buflen = *current_buflen(state);
1062 	u32 *desc;
1063 	int digestsize = crypto_ahash_digestsize(ahash);
1064 	struct ahash_edesc *edesc;
1065 	int ret;
1066 
1067 	/* allocate space for base edesc and hw desc commands, link tables */
1068 	edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1069 				  ctx->sh_desc_digest_dma, flags);
1070 	if (!edesc)
1071 		return -ENOMEM;
1072 
1073 	desc = edesc->hw_desc;
1074 
1075 	if (buflen) {
1076 		state->buf_dma = dma_map_single(jrdev, buf, buflen,
1077 						DMA_TO_DEVICE);
1078 		if (dma_mapping_error(jrdev, state->buf_dma)) {
1079 			dev_err(jrdev, "unable to map src\n");
1080 			goto unmap;
1081 		}
1082 
1083 		append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1084 	}
1085 
1086 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1087 						digestsize);
1088 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1089 		dev_err(jrdev, "unable to map dst\n");
1090 		goto unmap;
1091 	}
1092 
1093 #ifdef DEBUG
1094 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1095 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1096 #endif
1097 
1098 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1099 	if (!ret) {
1100 		ret = -EINPROGRESS;
1101 	} else {
1102 		ahash_unmap(jrdev, edesc, req, digestsize);
1103 		kfree(edesc);
1104 	}
1105 
1106 	return ret;
1107  unmap:
1108 	ahash_unmap(jrdev, edesc, req, digestsize);
1109 	kfree(edesc);
1110 	return -ENOMEM;
1111 
1112 }
1113 
1114 /* submit ahash update if it the first job descriptor after update */
1115 static int ahash_update_no_ctx(struct ahash_request *req)
1116 {
1117 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1118 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1119 	struct caam_hash_state *state = ahash_request_ctx(req);
1120 	struct device *jrdev = ctx->jrdev;
1121 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1122 		       GFP_KERNEL : GFP_ATOMIC;
1123 	u8 *buf = current_buf(state);
1124 	int *buflen = current_buflen(state);
1125 	u8 *next_buf = alt_buf(state);
1126 	int *next_buflen = alt_buflen(state);
1127 	int in_len = *buflen + req->nbytes, to_hash;
1128 	int sec4_sg_bytes, src_nents, mapped_nents;
1129 	struct ahash_edesc *edesc;
1130 	u32 *desc;
1131 	int ret = 0;
1132 
1133 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1134 	to_hash = in_len - *next_buflen;
1135 
1136 	if (to_hash) {
1137 		src_nents = sg_nents_for_len(req->src,
1138 					     req->nbytes - *next_buflen);
1139 		if (src_nents < 0) {
1140 			dev_err(jrdev, "Invalid number of src SG.\n");
1141 			return src_nents;
1142 		}
1143 
1144 		if (src_nents) {
1145 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1146 						  DMA_TO_DEVICE);
1147 			if (!mapped_nents) {
1148 				dev_err(jrdev, "unable to DMA map source\n");
1149 				return -ENOMEM;
1150 			}
1151 		} else {
1152 			mapped_nents = 0;
1153 		}
1154 
1155 		sec4_sg_bytes = (1 + mapped_nents) *
1156 				sizeof(struct sec4_sg_entry);
1157 
1158 		/*
1159 		 * allocate space for base edesc and hw desc commands,
1160 		 * link tables
1161 		 */
1162 		edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1163 					  ctx->sh_desc_update_first,
1164 					  ctx->sh_desc_update_first_dma,
1165 					  flags);
1166 		if (!edesc) {
1167 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1168 			return -ENOMEM;
1169 		}
1170 
1171 		edesc->src_nents = src_nents;
1172 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1173 
1174 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1175 		if (ret)
1176 			goto unmap_ctx;
1177 
1178 		sg_to_sec4_sg_last(req->src, mapped_nents,
1179 				   edesc->sec4_sg + 1, 0);
1180 
1181 		if (*next_buflen) {
1182 			scatterwalk_map_and_copy(next_buf, req->src,
1183 						 to_hash - *buflen,
1184 						 *next_buflen, 0);
1185 		}
1186 
1187 		desc = edesc->hw_desc;
1188 
1189 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1190 						    sec4_sg_bytes,
1191 						    DMA_TO_DEVICE);
1192 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1193 			dev_err(jrdev, "unable to map S/G table\n");
1194 			ret = -ENOMEM;
1195 			goto unmap_ctx;
1196 		}
1197 
1198 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1199 
1200 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1201 		if (ret)
1202 			goto unmap_ctx;
1203 
1204 #ifdef DEBUG
1205 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1206 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1207 			       desc_bytes(desc), 1);
1208 #endif
1209 
1210 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1211 		if (ret)
1212 			goto unmap_ctx;
1213 
1214 		ret = -EINPROGRESS;
1215 		state->update = ahash_update_ctx;
1216 		state->finup = ahash_finup_ctx;
1217 		state->final = ahash_final_ctx;
1218 	} else if (*next_buflen) {
1219 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1220 					 req->nbytes, 0);
1221 		*buflen = *next_buflen;
1222 		*next_buflen = 0;
1223 	}
1224 #ifdef DEBUG
1225 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1226 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1227 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1228 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1229 		       *next_buflen, 1);
1230 #endif
1231 
1232 	return ret;
1233  unmap_ctx:
1234 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1235 	kfree(edesc);
1236 	return ret;
1237 }
1238 
1239 /* submit ahash finup if it the first job descriptor after update */
1240 static int ahash_finup_no_ctx(struct ahash_request *req)
1241 {
1242 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1243 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1244 	struct caam_hash_state *state = ahash_request_ctx(req);
1245 	struct device *jrdev = ctx->jrdev;
1246 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1247 		       GFP_KERNEL : GFP_ATOMIC;
1248 	int buflen = *current_buflen(state);
1249 	u32 *desc;
1250 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1251 	int digestsize = crypto_ahash_digestsize(ahash);
1252 	struct ahash_edesc *edesc;
1253 	int ret;
1254 
1255 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1256 	if (src_nents < 0) {
1257 		dev_err(jrdev, "Invalid number of src SG.\n");
1258 		return src_nents;
1259 	}
1260 
1261 	if (src_nents) {
1262 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1263 					  DMA_TO_DEVICE);
1264 		if (!mapped_nents) {
1265 			dev_err(jrdev, "unable to DMA map source\n");
1266 			return -ENOMEM;
1267 		}
1268 	} else {
1269 		mapped_nents = 0;
1270 	}
1271 
1272 	sec4_sg_src_index = 2;
1273 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1274 			 sizeof(struct sec4_sg_entry);
1275 
1276 	/* allocate space for base edesc and hw desc commands, link tables */
1277 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1278 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1279 				  flags);
1280 	if (!edesc) {
1281 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1282 		return -ENOMEM;
1283 	}
1284 
1285 	desc = edesc->hw_desc;
1286 
1287 	edesc->src_nents = src_nents;
1288 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1289 
1290 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1291 	if (ret)
1292 		goto unmap;
1293 
1294 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1295 				  req->nbytes);
1296 	if (ret) {
1297 		dev_err(jrdev, "unable to map S/G table\n");
1298 		goto unmap;
1299 	}
1300 
1301 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1302 						digestsize);
1303 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1304 		dev_err(jrdev, "unable to map dst\n");
1305 		goto unmap;
1306 	}
1307 
1308 #ifdef DEBUG
1309 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1310 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1311 #endif
1312 
1313 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1314 	if (!ret) {
1315 		ret = -EINPROGRESS;
1316 	} else {
1317 		ahash_unmap(jrdev, edesc, req, digestsize);
1318 		kfree(edesc);
1319 	}
1320 
1321 	return ret;
1322  unmap:
1323 	ahash_unmap(jrdev, edesc, req, digestsize);
1324 	kfree(edesc);
1325 	return -ENOMEM;
1326 
1327 }
1328 
1329 /* submit first update job descriptor after init */
1330 static int ahash_update_first(struct ahash_request *req)
1331 {
1332 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1333 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1334 	struct caam_hash_state *state = ahash_request_ctx(req);
1335 	struct device *jrdev = ctx->jrdev;
1336 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1337 		       GFP_KERNEL : GFP_ATOMIC;
1338 	u8 *next_buf = alt_buf(state);
1339 	int *next_buflen = alt_buflen(state);
1340 	int to_hash;
1341 	u32 *desc;
1342 	int src_nents, mapped_nents;
1343 	struct ahash_edesc *edesc;
1344 	int ret = 0;
1345 
1346 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1347 				      1);
1348 	to_hash = req->nbytes - *next_buflen;
1349 
1350 	if (to_hash) {
1351 		src_nents = sg_nents_for_len(req->src,
1352 					     req->nbytes - *next_buflen);
1353 		if (src_nents < 0) {
1354 			dev_err(jrdev, "Invalid number of src SG.\n");
1355 			return src_nents;
1356 		}
1357 
1358 		if (src_nents) {
1359 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1360 						  DMA_TO_DEVICE);
1361 			if (!mapped_nents) {
1362 				dev_err(jrdev, "unable to map source for DMA\n");
1363 				return -ENOMEM;
1364 			}
1365 		} else {
1366 			mapped_nents = 0;
1367 		}
1368 
1369 		/*
1370 		 * allocate space for base edesc and hw desc commands,
1371 		 * link tables
1372 		 */
1373 		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1374 					  mapped_nents : 0,
1375 					  ctx->sh_desc_update_first,
1376 					  ctx->sh_desc_update_first_dma,
1377 					  flags);
1378 		if (!edesc) {
1379 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1380 			return -ENOMEM;
1381 		}
1382 
1383 		edesc->src_nents = src_nents;
1384 
1385 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1386 					  to_hash);
1387 		if (ret)
1388 			goto unmap_ctx;
1389 
1390 		if (*next_buflen)
1391 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1392 						 *next_buflen, 0);
1393 
1394 		desc = edesc->hw_desc;
1395 
1396 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1397 		if (ret)
1398 			goto unmap_ctx;
1399 
1400 #ifdef DEBUG
1401 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1402 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1403 			       desc_bytes(desc), 1);
1404 #endif
1405 
1406 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1407 		if (ret)
1408 			goto unmap_ctx;
1409 
1410 		ret = -EINPROGRESS;
1411 		state->update = ahash_update_ctx;
1412 		state->finup = ahash_finup_ctx;
1413 		state->final = ahash_final_ctx;
1414 	} else if (*next_buflen) {
1415 		state->update = ahash_update_no_ctx;
1416 		state->finup = ahash_finup_no_ctx;
1417 		state->final = ahash_final_no_ctx;
1418 		scatterwalk_map_and_copy(next_buf, req->src, 0,
1419 					 req->nbytes, 0);
1420 		switch_buf(state);
1421 	}
1422 #ifdef DEBUG
1423 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1424 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1425 		       *next_buflen, 1);
1426 #endif
1427 
1428 	return ret;
1429  unmap_ctx:
1430 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1431 	kfree(edesc);
1432 	return ret;
1433 }
1434 
1435 static int ahash_finup_first(struct ahash_request *req)
1436 {
1437 	return ahash_digest(req);
1438 }
1439 
1440 static int ahash_init(struct ahash_request *req)
1441 {
1442 	struct caam_hash_state *state = ahash_request_ctx(req);
1443 
1444 	state->update = ahash_update_first;
1445 	state->finup = ahash_finup_first;
1446 	state->final = ahash_final_no_ctx;
1447 
1448 	state->ctx_dma = 0;
1449 	state->current_buf = 0;
1450 	state->buf_dma = 0;
1451 	state->buflen_0 = 0;
1452 	state->buflen_1 = 0;
1453 
1454 	return 0;
1455 }
1456 
1457 static int ahash_update(struct ahash_request *req)
1458 {
1459 	struct caam_hash_state *state = ahash_request_ctx(req);
1460 
1461 	return state->update(req);
1462 }
1463 
1464 static int ahash_finup(struct ahash_request *req)
1465 {
1466 	struct caam_hash_state *state = ahash_request_ctx(req);
1467 
1468 	return state->finup(req);
1469 }
1470 
1471 static int ahash_final(struct ahash_request *req)
1472 {
1473 	struct caam_hash_state *state = ahash_request_ctx(req);
1474 
1475 	return state->final(req);
1476 }
1477 
1478 static int ahash_export(struct ahash_request *req, void *out)
1479 {
1480 	struct caam_hash_state *state = ahash_request_ctx(req);
1481 	struct caam_export_state *export = out;
1482 	int len;
1483 	u8 *buf;
1484 
1485 	if (state->current_buf) {
1486 		buf = state->buf_1;
1487 		len = state->buflen_1;
1488 	} else {
1489 		buf = state->buf_0;
1490 		len = state->buflen_0;
1491 	}
1492 
1493 	memcpy(export->buf, buf, len);
1494 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1495 	export->buflen = len;
1496 	export->update = state->update;
1497 	export->final = state->final;
1498 	export->finup = state->finup;
1499 
1500 	return 0;
1501 }
1502 
1503 static int ahash_import(struct ahash_request *req, const void *in)
1504 {
1505 	struct caam_hash_state *state = ahash_request_ctx(req);
1506 	const struct caam_export_state *export = in;
1507 
1508 	memset(state, 0, sizeof(*state));
1509 	memcpy(state->buf_0, export->buf, export->buflen);
1510 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1511 	state->buflen_0 = export->buflen;
1512 	state->update = export->update;
1513 	state->final = export->final;
1514 	state->finup = export->finup;
1515 
1516 	return 0;
1517 }
1518 
1519 struct caam_hash_template {
1520 	char name[CRYPTO_MAX_ALG_NAME];
1521 	char driver_name[CRYPTO_MAX_ALG_NAME];
1522 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1523 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1524 	unsigned int blocksize;
1525 	struct ahash_alg template_ahash;
1526 	u32 alg_type;
1527 };
1528 
1529 /* ahash descriptors */
1530 static struct caam_hash_template driver_hash[] = {
1531 	{
1532 		.name = "sha1",
1533 		.driver_name = "sha1-caam",
1534 		.hmac_name = "hmac(sha1)",
1535 		.hmac_driver_name = "hmac-sha1-caam",
1536 		.blocksize = SHA1_BLOCK_SIZE,
1537 		.template_ahash = {
1538 			.init = ahash_init,
1539 			.update = ahash_update,
1540 			.final = ahash_final,
1541 			.finup = ahash_finup,
1542 			.digest = ahash_digest,
1543 			.export = ahash_export,
1544 			.import = ahash_import,
1545 			.setkey = ahash_setkey,
1546 			.halg = {
1547 				.digestsize = SHA1_DIGEST_SIZE,
1548 				.statesize = sizeof(struct caam_export_state),
1549 			},
1550 		},
1551 		.alg_type = OP_ALG_ALGSEL_SHA1,
1552 	}, {
1553 		.name = "sha224",
1554 		.driver_name = "sha224-caam",
1555 		.hmac_name = "hmac(sha224)",
1556 		.hmac_driver_name = "hmac-sha224-caam",
1557 		.blocksize = SHA224_BLOCK_SIZE,
1558 		.template_ahash = {
1559 			.init = ahash_init,
1560 			.update = ahash_update,
1561 			.final = ahash_final,
1562 			.finup = ahash_finup,
1563 			.digest = ahash_digest,
1564 			.export = ahash_export,
1565 			.import = ahash_import,
1566 			.setkey = ahash_setkey,
1567 			.halg = {
1568 				.digestsize = SHA224_DIGEST_SIZE,
1569 				.statesize = sizeof(struct caam_export_state),
1570 			},
1571 		},
1572 		.alg_type = OP_ALG_ALGSEL_SHA224,
1573 	}, {
1574 		.name = "sha256",
1575 		.driver_name = "sha256-caam",
1576 		.hmac_name = "hmac(sha256)",
1577 		.hmac_driver_name = "hmac-sha256-caam",
1578 		.blocksize = SHA256_BLOCK_SIZE,
1579 		.template_ahash = {
1580 			.init = ahash_init,
1581 			.update = ahash_update,
1582 			.final = ahash_final,
1583 			.finup = ahash_finup,
1584 			.digest = ahash_digest,
1585 			.export = ahash_export,
1586 			.import = ahash_import,
1587 			.setkey = ahash_setkey,
1588 			.halg = {
1589 				.digestsize = SHA256_DIGEST_SIZE,
1590 				.statesize = sizeof(struct caam_export_state),
1591 			},
1592 		},
1593 		.alg_type = OP_ALG_ALGSEL_SHA256,
1594 	}, {
1595 		.name = "sha384",
1596 		.driver_name = "sha384-caam",
1597 		.hmac_name = "hmac(sha384)",
1598 		.hmac_driver_name = "hmac-sha384-caam",
1599 		.blocksize = SHA384_BLOCK_SIZE,
1600 		.template_ahash = {
1601 			.init = ahash_init,
1602 			.update = ahash_update,
1603 			.final = ahash_final,
1604 			.finup = ahash_finup,
1605 			.digest = ahash_digest,
1606 			.export = ahash_export,
1607 			.import = ahash_import,
1608 			.setkey = ahash_setkey,
1609 			.halg = {
1610 				.digestsize = SHA384_DIGEST_SIZE,
1611 				.statesize = sizeof(struct caam_export_state),
1612 			},
1613 		},
1614 		.alg_type = OP_ALG_ALGSEL_SHA384,
1615 	}, {
1616 		.name = "sha512",
1617 		.driver_name = "sha512-caam",
1618 		.hmac_name = "hmac(sha512)",
1619 		.hmac_driver_name = "hmac-sha512-caam",
1620 		.blocksize = SHA512_BLOCK_SIZE,
1621 		.template_ahash = {
1622 			.init = ahash_init,
1623 			.update = ahash_update,
1624 			.final = ahash_final,
1625 			.finup = ahash_finup,
1626 			.digest = ahash_digest,
1627 			.export = ahash_export,
1628 			.import = ahash_import,
1629 			.setkey = ahash_setkey,
1630 			.halg = {
1631 				.digestsize = SHA512_DIGEST_SIZE,
1632 				.statesize = sizeof(struct caam_export_state),
1633 			},
1634 		},
1635 		.alg_type = OP_ALG_ALGSEL_SHA512,
1636 	}, {
1637 		.name = "md5",
1638 		.driver_name = "md5-caam",
1639 		.hmac_name = "hmac(md5)",
1640 		.hmac_driver_name = "hmac-md5-caam",
1641 		.blocksize = MD5_BLOCK_WORDS * 4,
1642 		.template_ahash = {
1643 			.init = ahash_init,
1644 			.update = ahash_update,
1645 			.final = ahash_final,
1646 			.finup = ahash_finup,
1647 			.digest = ahash_digest,
1648 			.export = ahash_export,
1649 			.import = ahash_import,
1650 			.setkey = ahash_setkey,
1651 			.halg = {
1652 				.digestsize = MD5_DIGEST_SIZE,
1653 				.statesize = sizeof(struct caam_export_state),
1654 			},
1655 		},
1656 		.alg_type = OP_ALG_ALGSEL_MD5,
1657 	},
1658 };
1659 
1660 struct caam_hash_alg {
1661 	struct list_head entry;
1662 	int alg_type;
1663 	struct ahash_alg ahash_alg;
1664 };
1665 
1666 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1667 {
1668 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1669 	struct crypto_alg *base = tfm->__crt_alg;
1670 	struct hash_alg_common *halg =
1671 		 container_of(base, struct hash_alg_common, base);
1672 	struct ahash_alg *alg =
1673 		 container_of(halg, struct ahash_alg, halg);
1674 	struct caam_hash_alg *caam_hash =
1675 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1676 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1677 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1678 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1679 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1680 					 HASH_MSG_LEN + 32,
1681 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1682 					 HASH_MSG_LEN + 64,
1683 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1684 	dma_addr_t dma_addr;
1685 	struct caam_drv_private *priv;
1686 
1687 	/*
1688 	 * Get a Job ring from Job Ring driver to ensure in-order
1689 	 * crypto request processing per tfm
1690 	 */
1691 	ctx->jrdev = caam_jr_alloc();
1692 	if (IS_ERR(ctx->jrdev)) {
1693 		pr_err("Job Ring Device allocation for transform failed\n");
1694 		return PTR_ERR(ctx->jrdev);
1695 	}
1696 
1697 	priv = dev_get_drvdata(ctx->jrdev->parent);
1698 	ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1699 
1700 	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1701 					offsetof(struct caam_hash_ctx,
1702 						 sh_desc_update_dma),
1703 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1704 	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1705 		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1706 		caam_jr_free(ctx->jrdev);
1707 		return -ENOMEM;
1708 	}
1709 
1710 	ctx->sh_desc_update_dma = dma_addr;
1711 	ctx->sh_desc_update_first_dma = dma_addr +
1712 					offsetof(struct caam_hash_ctx,
1713 						 sh_desc_update_first);
1714 	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1715 						   sh_desc_fin);
1716 	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1717 						      sh_desc_digest);
1718 
1719 	/* copy descriptor header template value */
1720 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1721 
1722 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
1723 				   OP_ALG_ALGSEL_SUBMASK) >>
1724 				  OP_ALG_ALGSEL_SHIFT];
1725 
1726 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1727 				 sizeof(struct caam_hash_state));
1728 	return ahash_set_sh_desc(ahash);
1729 }
1730 
1731 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1732 {
1733 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1734 
1735 	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1736 			       offsetof(struct caam_hash_ctx,
1737 					sh_desc_update_dma),
1738 			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1739 	caam_jr_free(ctx->jrdev);
1740 }
1741 
1742 static void __exit caam_algapi_hash_exit(void)
1743 {
1744 	struct caam_hash_alg *t_alg, *n;
1745 
1746 	if (!hash_list.next)
1747 		return;
1748 
1749 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1750 		crypto_unregister_ahash(&t_alg->ahash_alg);
1751 		list_del(&t_alg->entry);
1752 		kfree(t_alg);
1753 	}
1754 }
1755 
1756 static struct caam_hash_alg *
1757 caam_hash_alloc(struct caam_hash_template *template,
1758 		bool keyed)
1759 {
1760 	struct caam_hash_alg *t_alg;
1761 	struct ahash_alg *halg;
1762 	struct crypto_alg *alg;
1763 
1764 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1765 	if (!t_alg) {
1766 		pr_err("failed to allocate t_alg\n");
1767 		return ERR_PTR(-ENOMEM);
1768 	}
1769 
1770 	t_alg->ahash_alg = template->template_ahash;
1771 	halg = &t_alg->ahash_alg;
1772 	alg = &halg->halg.base;
1773 
1774 	if (keyed) {
1775 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1776 			 template->hmac_name);
1777 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1778 			 template->hmac_driver_name);
1779 	} else {
1780 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1781 			 template->name);
1782 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1783 			 template->driver_name);
1784 		t_alg->ahash_alg.setkey = NULL;
1785 	}
1786 	alg->cra_module = THIS_MODULE;
1787 	alg->cra_init = caam_hash_cra_init;
1788 	alg->cra_exit = caam_hash_cra_exit;
1789 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1790 	alg->cra_priority = CAAM_CRA_PRIORITY;
1791 	alg->cra_blocksize = template->blocksize;
1792 	alg->cra_alignmask = 0;
1793 	alg->cra_flags = CRYPTO_ALG_ASYNC;
1794 
1795 	t_alg->alg_type = template->alg_type;
1796 
1797 	return t_alg;
1798 }
1799 
1800 static int __init caam_algapi_hash_init(void)
1801 {
1802 	struct device_node *dev_node;
1803 	struct platform_device *pdev;
1804 	struct device *ctrldev;
1805 	int i = 0, err = 0;
1806 	struct caam_drv_private *priv;
1807 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1808 	u32 md_inst, md_vid;
1809 
1810 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1811 	if (!dev_node) {
1812 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1813 		if (!dev_node)
1814 			return -ENODEV;
1815 	}
1816 
1817 	pdev = of_find_device_by_node(dev_node);
1818 	if (!pdev) {
1819 		of_node_put(dev_node);
1820 		return -ENODEV;
1821 	}
1822 
1823 	ctrldev = &pdev->dev;
1824 	priv = dev_get_drvdata(ctrldev);
1825 	of_node_put(dev_node);
1826 
1827 	/*
1828 	 * If priv is NULL, it's probably because the caam driver wasn't
1829 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1830 	 */
1831 	if (!priv)
1832 		return -ENODEV;
1833 
1834 	/*
1835 	 * Register crypto algorithms the device supports.  First, identify
1836 	 * presence and attributes of MD block.
1837 	 */
1838 	if (priv->era < 10) {
1839 		md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1840 			  CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1841 		md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1842 			   CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1843 	} else {
1844 		u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1845 
1846 		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1847 		md_inst = mdha & CHA_VER_NUM_MASK;
1848 	}
1849 
1850 	/*
1851 	 * Skip registration of any hashing algorithms if MD block
1852 	 * is not present.
1853 	 */
1854 	if (!md_inst)
1855 		return -ENODEV;
1856 
1857 	/* Limit digest size based on LP256 */
1858 	if (md_vid == CHA_VER_VID_MD_LP256)
1859 		md_limit = SHA256_DIGEST_SIZE;
1860 
1861 	INIT_LIST_HEAD(&hash_list);
1862 
1863 	/* register crypto algorithms the device supports */
1864 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1865 		struct caam_hash_alg *t_alg;
1866 		struct caam_hash_template *alg = driver_hash + i;
1867 
1868 		/* If MD size is not supported by device, skip registration */
1869 		if (alg->template_ahash.halg.digestsize > md_limit)
1870 			continue;
1871 
1872 		/* register hmac version */
1873 		t_alg = caam_hash_alloc(alg, true);
1874 		if (IS_ERR(t_alg)) {
1875 			err = PTR_ERR(t_alg);
1876 			pr_warn("%s alg allocation failed\n",
1877 				alg->hmac_driver_name);
1878 			continue;
1879 		}
1880 
1881 		err = crypto_register_ahash(&t_alg->ahash_alg);
1882 		if (err) {
1883 			pr_warn("%s alg registration failed: %d\n",
1884 				t_alg->ahash_alg.halg.base.cra_driver_name,
1885 				err);
1886 			kfree(t_alg);
1887 		} else
1888 			list_add_tail(&t_alg->entry, &hash_list);
1889 
1890 		/* register unkeyed version */
1891 		t_alg = caam_hash_alloc(alg, false);
1892 		if (IS_ERR(t_alg)) {
1893 			err = PTR_ERR(t_alg);
1894 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1895 			continue;
1896 		}
1897 
1898 		err = crypto_register_ahash(&t_alg->ahash_alg);
1899 		if (err) {
1900 			pr_warn("%s alg registration failed: %d\n",
1901 				t_alg->ahash_alg.halg.base.cra_driver_name,
1902 				err);
1903 			kfree(t_alg);
1904 		} else
1905 			list_add_tail(&t_alg->entry, &hash_list);
1906 	}
1907 
1908 	return err;
1909 }
1910 
1911 module_init(caam_algapi_hash_init);
1912 module_exit(caam_algapi_hash_exit);
1913 
1914 MODULE_LICENSE("GPL");
1915 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1916 MODULE_AUTHOR("Freescale Semiconductor - NMG");
1917