xref: /openbmc/linux/drivers/crypto/caam/caamhash.c (revision bc13c69e)
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55 
56 #include "compat.h"
57 
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65 
66 #define CAAM_CRA_PRIORITY		3000
67 
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
70 
71 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
73 
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE			(4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81 
82 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
83 					 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85 
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN			8
88 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89 
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96 
97 
98 static struct list_head hash_list;
99 
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106 	u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
107 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
108 	dma_addr_t sh_desc_update_first_dma;
109 	dma_addr_t sh_desc_fin_dma;
110 	dma_addr_t sh_desc_digest_dma;
111 	dma_addr_t sh_desc_finup_dma;
112 	struct device *jrdev;
113 	u32 alg_type;
114 	u32 alg_op;
115 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 	dma_addr_t key_dma;
117 	int ctx_len;
118 	unsigned int split_key_len;
119 	unsigned int split_key_pad_len;
120 };
121 
122 /* ahash state */
123 struct caam_hash_state {
124 	dma_addr_t buf_dma;
125 	dma_addr_t ctx_dma;
126 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 	int buflen_0;
128 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 	int buflen_1;
130 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
131 	int (*update)(struct ahash_request *req);
132 	int (*final)(struct ahash_request *req);
133 	int (*finup)(struct ahash_request *req);
134 	int current_buf;
135 };
136 
137 struct caam_export_state {
138 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
139 	u8 caam_ctx[MAX_CTX_LEN];
140 	int buflen;
141 	int (*update)(struct ahash_request *req);
142 	int (*final)(struct ahash_request *req);
143 	int (*finup)(struct ahash_request *req);
144 };
145 
146 /* Common job descriptor seq in/out ptr routines */
147 
148 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
149 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
150 				      struct caam_hash_state *state,
151 				      int ctx_len)
152 {
153 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
154 					ctx_len, DMA_FROM_DEVICE);
155 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
156 		dev_err(jrdev, "unable to map ctx\n");
157 		return -ENOMEM;
158 	}
159 
160 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
161 
162 	return 0;
163 }
164 
165 /* Map req->result, and append seq_out_ptr command that points to it */
166 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
167 						u8 *result, int digestsize)
168 {
169 	dma_addr_t dst_dma;
170 
171 	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
172 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
173 
174 	return dst_dma;
175 }
176 
177 /* Map current buffer in state and put it in link table */
178 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
179 					    struct sec4_sg_entry *sec4_sg,
180 					    u8 *buf, int buflen)
181 {
182 	dma_addr_t buf_dma;
183 
184 	buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
185 	dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
186 
187 	return buf_dma;
188 }
189 
190 /*
191  * Only put buffer in link table if it contains data, which is possible,
192  * since a buffer has previously been used, and needs to be unmapped,
193  */
194 static inline dma_addr_t
195 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
196 		       u8 *buf, dma_addr_t buf_dma, int buflen,
197 		       int last_buflen)
198 {
199 	if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
200 		dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
201 	if (buflen)
202 		buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
203 	else
204 		buf_dma = 0;
205 
206 	return buf_dma;
207 }
208 
209 /* Map state->caam_ctx, and add it to link table */
210 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
211 				     struct caam_hash_state *state, int ctx_len,
212 				     struct sec4_sg_entry *sec4_sg, u32 flag)
213 {
214 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
215 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
216 		dev_err(jrdev, "unable to map ctx\n");
217 		return -ENOMEM;
218 	}
219 
220 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
221 
222 	return 0;
223 }
224 
225 /* Common shared descriptor commands */
226 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
227 {
228 	append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
229 			  ctx->split_key_len, CLASS_2 |
230 			  KEY_DEST_MDHA_SPLIT | KEY_ENC);
231 }
232 
233 /* Append key if it has been set */
234 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
235 {
236 	u32 *key_jump_cmd;
237 
238 	init_sh_desc(desc, HDR_SHARE_SERIAL);
239 
240 	if (ctx->split_key_len) {
241 		/* Skip if already shared */
242 		key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
243 					   JUMP_COND_SHRD);
244 
245 		append_key_ahash(desc, ctx);
246 
247 		set_jump_tgt_here(desc, key_jump_cmd);
248 	}
249 
250 	/* Propagate errors from shared to job descriptor */
251 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
252 }
253 
254 /*
255  * For ahash read data from seqin following state->caam_ctx,
256  * and write resulting class2 context to seqout, which may be state->caam_ctx
257  * or req->result
258  */
259 static inline void ahash_append_load_str(u32 *desc, int digestsize)
260 {
261 	/* Calculate remaining bytes to read */
262 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
263 
264 	/* Read remaining bytes */
265 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
266 			     FIFOLD_TYPE_MSG | KEY_VLF);
267 
268 	/* Store class2 context bytes */
269 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
270 			 LDST_SRCDST_BYTE_CONTEXT);
271 }
272 
273 /*
274  * For ahash update, final and finup, import context, read and write to seqout
275  */
276 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
277 					 int digestsize,
278 					 struct caam_hash_ctx *ctx)
279 {
280 	init_sh_desc_key_ahash(desc, ctx);
281 
282 	/* Import context from software */
283 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
284 		   LDST_CLASS_2_CCB | ctx->ctx_len);
285 
286 	/* Class 2 operation */
287 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
288 
289 	/*
290 	 * Load from buf and/or src and write to req->result or state->context
291 	 */
292 	ahash_append_load_str(desc, digestsize);
293 }
294 
295 /* For ahash firsts and digest, read and write to seqout */
296 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
297 				     int digestsize, struct caam_hash_ctx *ctx)
298 {
299 	init_sh_desc_key_ahash(desc, ctx);
300 
301 	/* Class 2 operation */
302 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
303 
304 	/*
305 	 * Load from buf and/or src and write to req->result or state->context
306 	 */
307 	ahash_append_load_str(desc, digestsize);
308 }
309 
310 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
311 {
312 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
313 	int digestsize = crypto_ahash_digestsize(ahash);
314 	struct device *jrdev = ctx->jrdev;
315 	u32 have_key = 0;
316 	u32 *desc;
317 
318 	if (ctx->split_key_len)
319 		have_key = OP_ALG_AAI_HMAC_PRECOMP;
320 
321 	/* ahash_update shared descriptor */
322 	desc = ctx->sh_desc_update;
323 
324 	init_sh_desc(desc, HDR_SHARE_SERIAL);
325 
326 	/* Import context from software */
327 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
328 		   LDST_CLASS_2_CCB | ctx->ctx_len);
329 
330 	/* Class 2 operation */
331 	append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
332 			 OP_ALG_ENCRYPT);
333 
334 	/* Load data and write to result or context */
335 	ahash_append_load_str(desc, ctx->ctx_len);
336 
337 	ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
338 						 DMA_TO_DEVICE);
339 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
340 		dev_err(jrdev, "unable to map shared descriptor\n");
341 		return -ENOMEM;
342 	}
343 #ifdef DEBUG
344 	print_hex_dump(KERN_ERR,
345 		       "ahash update shdesc@"__stringify(__LINE__)": ",
346 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
347 #endif
348 
349 	/* ahash_update_first shared descriptor */
350 	desc = ctx->sh_desc_update_first;
351 
352 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
353 			  ctx->ctx_len, ctx);
354 
355 	ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
356 						       desc_bytes(desc),
357 						       DMA_TO_DEVICE);
358 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
359 		dev_err(jrdev, "unable to map shared descriptor\n");
360 		return -ENOMEM;
361 	}
362 #ifdef DEBUG
363 	print_hex_dump(KERN_ERR,
364 		       "ahash update first shdesc@"__stringify(__LINE__)": ",
365 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
366 #endif
367 
368 	/* ahash_final shared descriptor */
369 	desc = ctx->sh_desc_fin;
370 
371 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
372 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
373 
374 	ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
375 					      DMA_TO_DEVICE);
376 	if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
377 		dev_err(jrdev, "unable to map shared descriptor\n");
378 		return -ENOMEM;
379 	}
380 #ifdef DEBUG
381 	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
382 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
383 		       desc_bytes(desc), 1);
384 #endif
385 
386 	/* ahash_finup shared descriptor */
387 	desc = ctx->sh_desc_finup;
388 
389 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
390 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
391 
392 	ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
393 						DMA_TO_DEVICE);
394 	if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
395 		dev_err(jrdev, "unable to map shared descriptor\n");
396 		return -ENOMEM;
397 	}
398 #ifdef DEBUG
399 	print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
400 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
401 		       desc_bytes(desc), 1);
402 #endif
403 
404 	/* ahash_digest shared descriptor */
405 	desc = ctx->sh_desc_digest;
406 
407 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
408 			  digestsize, ctx);
409 
410 	ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
411 						 desc_bytes(desc),
412 						 DMA_TO_DEVICE);
413 	if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
414 		dev_err(jrdev, "unable to map shared descriptor\n");
415 		return -ENOMEM;
416 	}
417 #ifdef DEBUG
418 	print_hex_dump(KERN_ERR,
419 		       "ahash digest shdesc@"__stringify(__LINE__)": ",
420 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
421 		       desc_bytes(desc), 1);
422 #endif
423 
424 	return 0;
425 }
426 
427 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
428 			      u32 keylen)
429 {
430 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
431 			       ctx->split_key_pad_len, key_in, keylen,
432 			       ctx->alg_op);
433 }
434 
435 /* Digest hash size if it is too large */
436 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
437 			   u32 *keylen, u8 *key_out, u32 digestsize)
438 {
439 	struct device *jrdev = ctx->jrdev;
440 	u32 *desc;
441 	struct split_key_result result;
442 	dma_addr_t src_dma, dst_dma;
443 	int ret = 0;
444 
445 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
446 	if (!desc) {
447 		dev_err(jrdev, "unable to allocate key input memory\n");
448 		return -ENOMEM;
449 	}
450 
451 	init_job_desc(desc, 0);
452 
453 	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
454 				 DMA_TO_DEVICE);
455 	if (dma_mapping_error(jrdev, src_dma)) {
456 		dev_err(jrdev, "unable to map key input memory\n");
457 		kfree(desc);
458 		return -ENOMEM;
459 	}
460 	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
461 				 DMA_FROM_DEVICE);
462 	if (dma_mapping_error(jrdev, dst_dma)) {
463 		dev_err(jrdev, "unable to map key output memory\n");
464 		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
465 		kfree(desc);
466 		return -ENOMEM;
467 	}
468 
469 	/* Job descriptor to perform unkeyed hash on key_in */
470 	append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
471 			 OP_ALG_AS_INITFINAL);
472 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
473 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
474 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
475 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
476 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
477 			 LDST_SRCDST_BYTE_CONTEXT);
478 
479 #ifdef DEBUG
480 	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
481 		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
482 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
483 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
484 #endif
485 
486 	result.err = 0;
487 	init_completion(&result.completion);
488 
489 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
490 	if (!ret) {
491 		/* in progress */
492 		wait_for_completion_interruptible(&result.completion);
493 		ret = result.err;
494 #ifdef DEBUG
495 		print_hex_dump(KERN_ERR,
496 			       "digested key@"__stringify(__LINE__)": ",
497 			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
498 			       digestsize, 1);
499 #endif
500 	}
501 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
502 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
503 
504 	*keylen = digestsize;
505 
506 	kfree(desc);
507 
508 	return ret;
509 }
510 
511 static int ahash_setkey(struct crypto_ahash *ahash,
512 			const u8 *key, unsigned int keylen)
513 {
514 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
515 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
516 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
517 	struct device *jrdev = ctx->jrdev;
518 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
519 	int digestsize = crypto_ahash_digestsize(ahash);
520 	int ret = 0;
521 	u8 *hashed_key = NULL;
522 
523 #ifdef DEBUG
524 	printk(KERN_ERR "keylen %d\n", keylen);
525 #endif
526 
527 	if (keylen > blocksize) {
528 		hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
529 				     GFP_DMA);
530 		if (!hashed_key)
531 			return -ENOMEM;
532 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
533 				      digestsize);
534 		if (ret)
535 			goto badkey;
536 		key = hashed_key;
537 	}
538 
539 	/* Pick class 2 key length from algorithm submask */
540 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
541 				      OP_ALG_ALGSEL_SHIFT] * 2;
542 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
543 
544 #ifdef DEBUG
545 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
546 	       ctx->split_key_len, ctx->split_key_pad_len);
547 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
548 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
549 #endif
550 
551 	ret = gen_split_hash_key(ctx, key, keylen);
552 	if (ret)
553 		goto badkey;
554 
555 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
556 				      DMA_TO_DEVICE);
557 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
558 		dev_err(jrdev, "unable to map key i/o memory\n");
559 		ret = -ENOMEM;
560 		goto map_err;
561 	}
562 #ifdef DEBUG
563 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
564 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
565 		       ctx->split_key_pad_len, 1);
566 #endif
567 
568 	ret = ahash_set_sh_desc(ahash);
569 	if (ret) {
570 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
571 				 DMA_TO_DEVICE);
572 	}
573 
574 map_err:
575 	kfree(hashed_key);
576 	return ret;
577 badkey:
578 	kfree(hashed_key);
579 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
580 	return -EINVAL;
581 }
582 
583 /*
584  * ahash_edesc - s/w-extended ahash descriptor
585  * @dst_dma: physical mapped address of req->result
586  * @sec4_sg_dma: physical mapped address of h/w link table
587  * @src_nents: number of segments in input scatterlist
588  * @sec4_sg_bytes: length of dma mapped sec4_sg space
589  * @hw_desc: the h/w job descriptor followed by any referenced link tables
590  * @sec4_sg: h/w link table
591  */
592 struct ahash_edesc {
593 	dma_addr_t dst_dma;
594 	dma_addr_t sec4_sg_dma;
595 	int src_nents;
596 	int sec4_sg_bytes;
597 	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
598 	struct sec4_sg_entry sec4_sg[0];
599 };
600 
601 static inline void ahash_unmap(struct device *dev,
602 			struct ahash_edesc *edesc,
603 			struct ahash_request *req, int dst_len)
604 {
605 	if (edesc->src_nents)
606 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
607 	if (edesc->dst_dma)
608 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
609 
610 	if (edesc->sec4_sg_bytes)
611 		dma_unmap_single(dev, edesc->sec4_sg_dma,
612 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
613 }
614 
615 static inline void ahash_unmap_ctx(struct device *dev,
616 			struct ahash_edesc *edesc,
617 			struct ahash_request *req, int dst_len, u32 flag)
618 {
619 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
620 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
621 	struct caam_hash_state *state = ahash_request_ctx(req);
622 
623 	if (state->ctx_dma)
624 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
625 	ahash_unmap(dev, edesc, req, dst_len);
626 }
627 
628 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
629 		       void *context)
630 {
631 	struct ahash_request *req = context;
632 	struct ahash_edesc *edesc;
633 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
634 	int digestsize = crypto_ahash_digestsize(ahash);
635 #ifdef DEBUG
636 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
637 	struct caam_hash_state *state = ahash_request_ctx(req);
638 
639 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
640 #endif
641 
642 	edesc = (struct ahash_edesc *)((char *)desc -
643 		 offsetof(struct ahash_edesc, hw_desc));
644 	if (err)
645 		caam_jr_strstatus(jrdev, err);
646 
647 	ahash_unmap(jrdev, edesc, req, digestsize);
648 	kfree(edesc);
649 
650 #ifdef DEBUG
651 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
652 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
653 		       ctx->ctx_len, 1);
654 	if (req->result)
655 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
656 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
657 			       digestsize, 1);
658 #endif
659 
660 	req->base.complete(&req->base, err);
661 }
662 
663 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
664 			    void *context)
665 {
666 	struct ahash_request *req = context;
667 	struct ahash_edesc *edesc;
668 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
669 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
670 #ifdef DEBUG
671 	struct caam_hash_state *state = ahash_request_ctx(req);
672 	int digestsize = crypto_ahash_digestsize(ahash);
673 
674 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
675 #endif
676 
677 	edesc = (struct ahash_edesc *)((char *)desc -
678 		 offsetof(struct ahash_edesc, hw_desc));
679 	if (err)
680 		caam_jr_strstatus(jrdev, err);
681 
682 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
683 	kfree(edesc);
684 
685 #ifdef DEBUG
686 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
687 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
688 		       ctx->ctx_len, 1);
689 	if (req->result)
690 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
691 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
692 			       digestsize, 1);
693 #endif
694 
695 	req->base.complete(&req->base, err);
696 }
697 
698 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
699 			       void *context)
700 {
701 	struct ahash_request *req = context;
702 	struct ahash_edesc *edesc;
703 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
704 	int digestsize = crypto_ahash_digestsize(ahash);
705 #ifdef DEBUG
706 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
707 	struct caam_hash_state *state = ahash_request_ctx(req);
708 
709 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
710 #endif
711 
712 	edesc = (struct ahash_edesc *)((char *)desc -
713 		 offsetof(struct ahash_edesc, hw_desc));
714 	if (err)
715 		caam_jr_strstatus(jrdev, err);
716 
717 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
718 	kfree(edesc);
719 
720 #ifdef DEBUG
721 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
722 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
723 		       ctx->ctx_len, 1);
724 	if (req->result)
725 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
726 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
727 			       digestsize, 1);
728 #endif
729 
730 	req->base.complete(&req->base, err);
731 }
732 
733 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
734 			       void *context)
735 {
736 	struct ahash_request *req = context;
737 	struct ahash_edesc *edesc;
738 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
739 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
740 #ifdef DEBUG
741 	struct caam_hash_state *state = ahash_request_ctx(req);
742 	int digestsize = crypto_ahash_digestsize(ahash);
743 
744 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
745 #endif
746 
747 	edesc = (struct ahash_edesc *)((char *)desc -
748 		 offsetof(struct ahash_edesc, hw_desc));
749 	if (err)
750 		caam_jr_strstatus(jrdev, err);
751 
752 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
753 	kfree(edesc);
754 
755 #ifdef DEBUG
756 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
757 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
758 		       ctx->ctx_len, 1);
759 	if (req->result)
760 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
761 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
762 			       digestsize, 1);
763 #endif
764 
765 	req->base.complete(&req->base, err);
766 }
767 
768 /* submit update job descriptor */
769 static int ahash_update_ctx(struct ahash_request *req)
770 {
771 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
772 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
773 	struct caam_hash_state *state = ahash_request_ctx(req);
774 	struct device *jrdev = ctx->jrdev;
775 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
776 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
777 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
778 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
779 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
780 	int *next_buflen = state->current_buf ? &state->buflen_0 :
781 			   &state->buflen_1, last_buflen;
782 	int in_len = *buflen + req->nbytes, to_hash;
783 	u32 *sh_desc = ctx->sh_desc_update, *desc;
784 	dma_addr_t ptr = ctx->sh_desc_update_dma;
785 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
786 	struct ahash_edesc *edesc;
787 	int ret = 0;
788 	int sh_len;
789 
790 	last_buflen = *next_buflen;
791 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
792 	to_hash = in_len - *next_buflen;
793 
794 	if (to_hash) {
795 		src_nents = sg_nents_for_len(req->src,
796 					     req->nbytes - (*next_buflen));
797 		if (src_nents < 0) {
798 			dev_err(jrdev, "Invalid number of src SG.\n");
799 			return src_nents;
800 		}
801 
802 		if (src_nents) {
803 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
804 						  DMA_TO_DEVICE);
805 			if (!mapped_nents) {
806 				dev_err(jrdev, "unable to DMA map source\n");
807 				return -ENOMEM;
808 			}
809 		} else {
810 			mapped_nents = 0;
811 		}
812 
813 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
814 		sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
815 				 sizeof(struct sec4_sg_entry);
816 
817 		/*
818 		 * allocate space for base edesc and hw desc commands,
819 		 * link tables
820 		 */
821 		edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes,
822 				GFP_DMA | flags);
823 		if (!edesc) {
824 			dev_err(jrdev,
825 				"could not allocate extended descriptor\n");
826 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
827 			return -ENOMEM;
828 		}
829 
830 		edesc->src_nents = src_nents;
831 		edesc->sec4_sg_bytes = sec4_sg_bytes;
832 
833 		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
834 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
835 		if (ret)
836 			goto err;
837 
838 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
839 							edesc->sec4_sg + 1,
840 							buf, state->buf_dma,
841 							*buflen, last_buflen);
842 
843 		if (mapped_nents) {
844 			sg_to_sec4_sg_last(req->src, mapped_nents,
845 					   edesc->sec4_sg + sec4_sg_src_index,
846 					   0);
847 			if (*next_buflen)
848 				scatterwalk_map_and_copy(next_buf, req->src,
849 							 to_hash - *buflen,
850 							 *next_buflen, 0);
851 		} else {
852 			(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
853 				cpu_to_caam32(SEC4_SG_LEN_FIN);
854 		}
855 
856 		state->current_buf = !state->current_buf;
857 
858 		sh_len = desc_len(sh_desc);
859 		desc = edesc->hw_desc;
860 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
861 				     HDR_REVERSE);
862 
863 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
864 						     sec4_sg_bytes,
865 						     DMA_TO_DEVICE);
866 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
867 			dev_err(jrdev, "unable to map S/G table\n");
868 			ret = -ENOMEM;
869 			goto err;
870 		}
871 
872 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
873 				       to_hash, LDST_SGF);
874 
875 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
876 
877 #ifdef DEBUG
878 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
879 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
880 			       desc_bytes(desc), 1);
881 #endif
882 
883 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
884 		if (ret)
885 			goto err;
886 
887 		ret = -EINPROGRESS;
888 	} else if (*next_buflen) {
889 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
890 					 req->nbytes, 0);
891 		*buflen = *next_buflen;
892 		*next_buflen = last_buflen;
893 	}
894 #ifdef DEBUG
895 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
896 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
897 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
898 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
899 		       *next_buflen, 1);
900 #endif
901 
902 	return ret;
903 
904  err:
905 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
906 	kfree(edesc);
907 	return ret;
908 }
909 
910 static int ahash_final_ctx(struct ahash_request *req)
911 {
912 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
913 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
914 	struct caam_hash_state *state = ahash_request_ctx(req);
915 	struct device *jrdev = ctx->jrdev;
916 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
917 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
918 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
919 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
920 	int last_buflen = state->current_buf ? state->buflen_0 :
921 			  state->buflen_1;
922 	u32 *sh_desc = ctx->sh_desc_fin, *desc;
923 	dma_addr_t ptr = ctx->sh_desc_fin_dma;
924 	int sec4_sg_bytes, sec4_sg_src_index;
925 	int digestsize = crypto_ahash_digestsize(ahash);
926 	struct ahash_edesc *edesc;
927 	int ret = 0;
928 	int sh_len;
929 
930 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
931 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
932 
933 	/* allocate space for base edesc and hw desc commands, link tables */
934 	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
935 	if (!edesc) {
936 		dev_err(jrdev, "could not allocate extended descriptor\n");
937 		return -ENOMEM;
938 	}
939 
940 	sh_len = desc_len(sh_desc);
941 	desc = edesc->hw_desc;
942 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
943 
944 	edesc->sec4_sg_bytes = sec4_sg_bytes;
945 	edesc->src_nents = 0;
946 
947 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
948 				 edesc->sec4_sg, DMA_TO_DEVICE);
949 	if (ret)
950 		goto err;
951 
952 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
953 						buf, state->buf_dma, buflen,
954 						last_buflen);
955 	(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
956 		cpu_to_caam32(SEC4_SG_LEN_FIN);
957 
958 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
959 					    sec4_sg_bytes, DMA_TO_DEVICE);
960 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
961 		dev_err(jrdev, "unable to map S/G table\n");
962 		ret = -ENOMEM;
963 		goto err;
964 	}
965 
966 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
967 			  LDST_SGF);
968 
969 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
970 						digestsize);
971 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
972 		dev_err(jrdev, "unable to map dst\n");
973 		ret = -ENOMEM;
974 		goto err;
975 	}
976 
977 #ifdef DEBUG
978 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
979 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
980 #endif
981 
982 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
983 	if (ret)
984 		goto err;
985 
986 	return -EINPROGRESS;
987 
988 err:
989 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
990 	kfree(edesc);
991 	return ret;
992 }
993 
994 static int ahash_finup_ctx(struct ahash_request *req)
995 {
996 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
997 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
998 	struct caam_hash_state *state = ahash_request_ctx(req);
999 	struct device *jrdev = ctx->jrdev;
1000 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1001 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1002 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1003 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1004 	int last_buflen = state->current_buf ? state->buflen_0 :
1005 			  state->buflen_1;
1006 	u32 *sh_desc = ctx->sh_desc_finup, *desc;
1007 	dma_addr_t ptr = ctx->sh_desc_finup_dma;
1008 	int sec4_sg_bytes, sec4_sg_src_index;
1009 	int src_nents, mapped_nents;
1010 	int digestsize = crypto_ahash_digestsize(ahash);
1011 	struct ahash_edesc *edesc;
1012 	int ret = 0;
1013 	int sh_len;
1014 
1015 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1016 	if (src_nents < 0) {
1017 		dev_err(jrdev, "Invalid number of src SG.\n");
1018 		return src_nents;
1019 	}
1020 
1021 	if (src_nents) {
1022 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1023 					  DMA_TO_DEVICE);
1024 		if (!mapped_nents) {
1025 			dev_err(jrdev, "unable to DMA map source\n");
1026 			return -ENOMEM;
1027 		}
1028 	} else {
1029 		mapped_nents = 0;
1030 	}
1031 
1032 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1033 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1034 			 sizeof(struct sec4_sg_entry);
1035 
1036 	/* allocate space for base edesc and hw desc commands, link tables */
1037 	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
1038 	if (!edesc) {
1039 		dev_err(jrdev, "could not allocate extended descriptor\n");
1040 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1041 		return -ENOMEM;
1042 	}
1043 
1044 	sh_len = desc_len(sh_desc);
1045 	desc = edesc->hw_desc;
1046 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1047 
1048 	edesc->src_nents = src_nents;
1049 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1050 
1051 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1052 				 edesc->sec4_sg, DMA_TO_DEVICE);
1053 	if (ret)
1054 		goto err;
1055 
1056 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1057 						buf, state->buf_dma, buflen,
1058 						last_buflen);
1059 
1060 	sg_to_sec4_sg_last(req->src, mapped_nents,
1061 			   edesc->sec4_sg + sec4_sg_src_index, 0);
1062 
1063 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1064 					    sec4_sg_bytes, DMA_TO_DEVICE);
1065 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1066 		dev_err(jrdev, "unable to map S/G table\n");
1067 		ret = -ENOMEM;
1068 		goto err;
1069 	}
1070 
1071 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1072 			       buflen + req->nbytes, LDST_SGF);
1073 
1074 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1075 						digestsize);
1076 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1077 		dev_err(jrdev, "unable to map dst\n");
1078 		ret = -ENOMEM;
1079 		goto err;
1080 	}
1081 
1082 #ifdef DEBUG
1083 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1084 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1085 #endif
1086 
1087 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1088 	if (ret)
1089 		goto err;
1090 
1091 	return -EINPROGRESS;
1092 
1093 err:
1094 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1095 	kfree(edesc);
1096 	return ret;
1097 }
1098 
1099 static int ahash_digest(struct ahash_request *req)
1100 {
1101 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1102 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1103 	struct device *jrdev = ctx->jrdev;
1104 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1105 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1106 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1107 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1108 	int digestsize = crypto_ahash_digestsize(ahash);
1109 	int src_nents, mapped_nents, sec4_sg_bytes;
1110 	dma_addr_t src_dma;
1111 	struct ahash_edesc *edesc;
1112 	int ret = 0;
1113 	u32 options;
1114 	int sh_len;
1115 
1116 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1117 	if (src_nents < 0) {
1118 		dev_err(jrdev, "Invalid number of src SG.\n");
1119 		return src_nents;
1120 	}
1121 
1122 	if (src_nents) {
1123 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1124 					  DMA_TO_DEVICE);
1125 		if (!mapped_nents) {
1126 			dev_err(jrdev, "unable to map source for DMA\n");
1127 			return -ENOMEM;
1128 		}
1129 	} else {
1130 		mapped_nents = 0;
1131 	}
1132 
1133 	if (mapped_nents > 1)
1134 		sec4_sg_bytes = mapped_nents * sizeof(struct sec4_sg_entry);
1135 	else
1136 		sec4_sg_bytes = 0;
1137 
1138 	/* allocate space for base edesc and hw desc commands, link tables */
1139 	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
1140 	if (!edesc) {
1141 		dev_err(jrdev, "could not allocate extended descriptor\n");
1142 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1143 		return -ENOMEM;
1144 	}
1145 
1146 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1147 	edesc->src_nents = src_nents;
1148 
1149 	sh_len = desc_len(sh_desc);
1150 	desc = edesc->hw_desc;
1151 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1152 
1153 	if (src_nents > 1) {
1154 		sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg, 0);
1155 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1156 					    sec4_sg_bytes, DMA_TO_DEVICE);
1157 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1158 			dev_err(jrdev, "unable to map S/G table\n");
1159 			ahash_unmap(jrdev, edesc, req, digestsize);
1160 			kfree(edesc);
1161 			return -ENOMEM;
1162 		}
1163 		src_dma = edesc->sec4_sg_dma;
1164 		options = LDST_SGF;
1165 	} else {
1166 		src_dma = sg_dma_address(req->src);
1167 		options = 0;
1168 	}
1169 	append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1170 
1171 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1172 						digestsize);
1173 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1174 		dev_err(jrdev, "unable to map dst\n");
1175 		ahash_unmap(jrdev, edesc, req, digestsize);
1176 		kfree(edesc);
1177 		return -ENOMEM;
1178 	}
1179 
1180 #ifdef DEBUG
1181 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1182 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1183 #endif
1184 
1185 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1186 	if (!ret) {
1187 		ret = -EINPROGRESS;
1188 	} else {
1189 		ahash_unmap(jrdev, edesc, req, digestsize);
1190 		kfree(edesc);
1191 	}
1192 
1193 	return ret;
1194 }
1195 
1196 /* submit ahash final if it the first job descriptor */
1197 static int ahash_final_no_ctx(struct ahash_request *req)
1198 {
1199 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1200 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1201 	struct caam_hash_state *state = ahash_request_ctx(req);
1202 	struct device *jrdev = ctx->jrdev;
1203 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1204 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1205 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1206 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1207 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1208 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1209 	int digestsize = crypto_ahash_digestsize(ahash);
1210 	struct ahash_edesc *edesc;
1211 	int ret = 0;
1212 	int sh_len;
1213 
1214 	/* allocate space for base edesc and hw desc commands, link tables */
1215 	edesc = kzalloc(sizeof(*edesc), GFP_DMA | flags);
1216 	if (!edesc) {
1217 		dev_err(jrdev, "could not allocate extended descriptor\n");
1218 		return -ENOMEM;
1219 	}
1220 
1221 	edesc->sec4_sg_bytes = 0;
1222 	sh_len = desc_len(sh_desc);
1223 	desc = edesc->hw_desc;
1224 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1225 
1226 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1227 	if (dma_mapping_error(jrdev, state->buf_dma)) {
1228 		dev_err(jrdev, "unable to map src\n");
1229 		ahash_unmap(jrdev, edesc, req, digestsize);
1230 		kfree(edesc);
1231 		return -ENOMEM;
1232 	}
1233 
1234 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1235 
1236 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1237 						digestsize);
1238 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1239 		dev_err(jrdev, "unable to map dst\n");
1240 		ahash_unmap(jrdev, edesc, req, digestsize);
1241 		kfree(edesc);
1242 		return -ENOMEM;
1243 	}
1244 	edesc->src_nents = 0;
1245 
1246 #ifdef DEBUG
1247 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1248 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1249 #endif
1250 
1251 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1252 	if (!ret) {
1253 		ret = -EINPROGRESS;
1254 	} else {
1255 		ahash_unmap(jrdev, edesc, req, digestsize);
1256 		kfree(edesc);
1257 	}
1258 
1259 	return ret;
1260 }
1261 
1262 /* submit ahash update if it the first job descriptor after update */
1263 static int ahash_update_no_ctx(struct ahash_request *req)
1264 {
1265 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1266 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1267 	struct caam_hash_state *state = ahash_request_ctx(req);
1268 	struct device *jrdev = ctx->jrdev;
1269 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1270 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1271 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1272 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1273 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1274 	int *next_buflen = state->current_buf ? &state->buflen_0 :
1275 			   &state->buflen_1;
1276 	int in_len = *buflen + req->nbytes, to_hash;
1277 	int sec4_sg_bytes, src_nents, mapped_nents;
1278 	struct ahash_edesc *edesc;
1279 	u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1280 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1281 	int ret = 0;
1282 	int sh_len;
1283 
1284 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1285 	to_hash = in_len - *next_buflen;
1286 
1287 	if (to_hash) {
1288 		src_nents = sg_nents_for_len(req->src,
1289 					     req->nbytes - *next_buflen);
1290 		if (src_nents < 0) {
1291 			dev_err(jrdev, "Invalid number of src SG.\n");
1292 			return src_nents;
1293 		}
1294 
1295 		if (src_nents) {
1296 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1297 						  DMA_TO_DEVICE);
1298 			if (!mapped_nents) {
1299 				dev_err(jrdev, "unable to DMA map source\n");
1300 				return -ENOMEM;
1301 			}
1302 		} else {
1303 			mapped_nents = 0;
1304 		}
1305 
1306 		sec4_sg_bytes = (1 + mapped_nents) *
1307 				sizeof(struct sec4_sg_entry);
1308 
1309 		/*
1310 		 * allocate space for base edesc and hw desc commands,
1311 		 * link tables
1312 		 */
1313 		edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes,
1314 				GFP_DMA | flags);
1315 		if (!edesc) {
1316 			dev_err(jrdev,
1317 				"could not allocate extended descriptor\n");
1318 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1319 			return -ENOMEM;
1320 		}
1321 
1322 		edesc->src_nents = src_nents;
1323 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1324 		edesc->dst_dma = 0;
1325 
1326 		state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1327 						    buf, *buflen);
1328 		sg_to_sec4_sg_last(req->src, mapped_nents,
1329 				   edesc->sec4_sg + 1, 0);
1330 
1331 		if (*next_buflen) {
1332 			scatterwalk_map_and_copy(next_buf, req->src,
1333 						 to_hash - *buflen,
1334 						 *next_buflen, 0);
1335 		}
1336 
1337 		state->current_buf = !state->current_buf;
1338 
1339 		sh_len = desc_len(sh_desc);
1340 		desc = edesc->hw_desc;
1341 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1342 				     HDR_REVERSE);
1343 
1344 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1345 						    sec4_sg_bytes,
1346 						    DMA_TO_DEVICE);
1347 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1348 			dev_err(jrdev, "unable to map S/G table\n");
1349 			ret = -ENOMEM;
1350 			goto err;
1351 		}
1352 
1353 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1354 
1355 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1356 		if (ret)
1357 			goto err;
1358 
1359 #ifdef DEBUG
1360 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1361 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1362 			       desc_bytes(desc), 1);
1363 #endif
1364 
1365 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1366 		if (ret)
1367 			goto err;
1368 
1369 		ret = -EINPROGRESS;
1370 		state->update = ahash_update_ctx;
1371 		state->finup = ahash_finup_ctx;
1372 		state->final = ahash_final_ctx;
1373 	} else if (*next_buflen) {
1374 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1375 					 req->nbytes, 0);
1376 		*buflen = *next_buflen;
1377 		*next_buflen = 0;
1378 	}
1379 #ifdef DEBUG
1380 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1381 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1382 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1383 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1384 		       *next_buflen, 1);
1385 #endif
1386 
1387 	return ret;
1388 
1389 err:
1390 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1391 	kfree(edesc);
1392 	return ret;
1393 }
1394 
1395 /* submit ahash finup if it the first job descriptor after update */
1396 static int ahash_finup_no_ctx(struct ahash_request *req)
1397 {
1398 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1399 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1400 	struct caam_hash_state *state = ahash_request_ctx(req);
1401 	struct device *jrdev = ctx->jrdev;
1402 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1403 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1404 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1405 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1406 	int last_buflen = state->current_buf ? state->buflen_0 :
1407 			  state->buflen_1;
1408 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1409 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1410 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1411 	int digestsize = crypto_ahash_digestsize(ahash);
1412 	struct ahash_edesc *edesc;
1413 	int sh_len;
1414 	int ret = 0;
1415 
1416 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1417 	if (src_nents < 0) {
1418 		dev_err(jrdev, "Invalid number of src SG.\n");
1419 		return src_nents;
1420 	}
1421 
1422 	if (src_nents) {
1423 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1424 					  DMA_TO_DEVICE);
1425 		if (!mapped_nents) {
1426 			dev_err(jrdev, "unable to DMA map source\n");
1427 			return -ENOMEM;
1428 		}
1429 	} else {
1430 		mapped_nents = 0;
1431 	}
1432 
1433 	sec4_sg_src_index = 2;
1434 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1435 			 sizeof(struct sec4_sg_entry);
1436 
1437 	/* allocate space for base edesc and hw desc commands, link tables */
1438 	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
1439 	if (!edesc) {
1440 		dev_err(jrdev, "could not allocate extended descriptor\n");
1441 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1442 		return -ENOMEM;
1443 	}
1444 
1445 	sh_len = desc_len(sh_desc);
1446 	desc = edesc->hw_desc;
1447 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1448 
1449 	edesc->src_nents = src_nents;
1450 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1451 
1452 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1453 						state->buf_dma, buflen,
1454 						last_buflen);
1455 
1456 	sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg + 1, 0);
1457 
1458 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1459 					    sec4_sg_bytes, DMA_TO_DEVICE);
1460 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1461 		dev_err(jrdev, "unable to map S/G table\n");
1462 		ahash_unmap(jrdev, edesc, req, digestsize);
1463 		kfree(edesc);
1464 		return -ENOMEM;
1465 	}
1466 
1467 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1468 			       req->nbytes, LDST_SGF);
1469 
1470 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1471 						digestsize);
1472 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1473 		dev_err(jrdev, "unable to map dst\n");
1474 		ahash_unmap(jrdev, edesc, req, digestsize);
1475 		kfree(edesc);
1476 		return -ENOMEM;
1477 	}
1478 
1479 #ifdef DEBUG
1480 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1481 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1482 #endif
1483 
1484 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1485 	if (!ret) {
1486 		ret = -EINPROGRESS;
1487 	} else {
1488 		ahash_unmap(jrdev, edesc, req, digestsize);
1489 		kfree(edesc);
1490 	}
1491 
1492 	return ret;
1493 }
1494 
1495 /* submit first update job descriptor after init */
1496 static int ahash_update_first(struct ahash_request *req)
1497 {
1498 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1499 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1500 	struct caam_hash_state *state = ahash_request_ctx(req);
1501 	struct device *jrdev = ctx->jrdev;
1502 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1503 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1504 	u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1505 	int *next_buflen = state->current_buf ?
1506 		&state->buflen_1 : &state->buflen_0;
1507 	int to_hash;
1508 	u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1509 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1510 	int sec4_sg_bytes, src_nents, mapped_nents;
1511 	dma_addr_t src_dma;
1512 	u32 options;
1513 	struct ahash_edesc *edesc;
1514 	int ret = 0;
1515 	int sh_len;
1516 
1517 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1518 				      1);
1519 	to_hash = req->nbytes - *next_buflen;
1520 
1521 	if (to_hash) {
1522 		src_nents = sg_nents_for_len(req->src,
1523 					     req->nbytes - *next_buflen);
1524 		if (src_nents < 0) {
1525 			dev_err(jrdev, "Invalid number of src SG.\n");
1526 			return src_nents;
1527 		}
1528 
1529 		if (src_nents) {
1530 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1531 						  DMA_TO_DEVICE);
1532 			if (!mapped_nents) {
1533 				dev_err(jrdev, "unable to map source for DMA\n");
1534 				return -ENOMEM;
1535 			}
1536 		} else {
1537 			mapped_nents = 0;
1538 		}
1539 		if (mapped_nents > 1)
1540 			sec4_sg_bytes = mapped_nents *
1541 					sizeof(struct sec4_sg_entry);
1542 		else
1543 			sec4_sg_bytes = 0;
1544 
1545 		/*
1546 		 * allocate space for base edesc and hw desc commands,
1547 		 * link tables
1548 		 */
1549 		edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes,
1550 				GFP_DMA | flags);
1551 		if (!edesc) {
1552 			dev_err(jrdev,
1553 				"could not allocate extended descriptor\n");
1554 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1555 			return -ENOMEM;
1556 		}
1557 
1558 		edesc->src_nents = src_nents;
1559 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1560 		edesc->dst_dma = 0;
1561 
1562 		if (src_nents > 1) {
1563 			sg_to_sec4_sg_last(req->src, mapped_nents,
1564 					   edesc->sec4_sg, 0);
1565 			edesc->sec4_sg_dma = dma_map_single(jrdev,
1566 							    edesc->sec4_sg,
1567 							    sec4_sg_bytes,
1568 							    DMA_TO_DEVICE);
1569 			if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1570 				dev_err(jrdev, "unable to map S/G table\n");
1571 				ret = -ENOMEM;
1572 				goto err;
1573 			}
1574 			src_dma = edesc->sec4_sg_dma;
1575 			options = LDST_SGF;
1576 		} else {
1577 			src_dma = sg_dma_address(req->src);
1578 			options = 0;
1579 		}
1580 
1581 		if (*next_buflen)
1582 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1583 						 *next_buflen, 0);
1584 
1585 		sh_len = desc_len(sh_desc);
1586 		desc = edesc->hw_desc;
1587 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1588 				     HDR_REVERSE);
1589 
1590 		append_seq_in_ptr(desc, src_dma, to_hash, options);
1591 
1592 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1593 		if (ret)
1594 			goto err;
1595 
1596 #ifdef DEBUG
1597 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1598 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1599 			       desc_bytes(desc), 1);
1600 #endif
1601 
1602 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1603 		if (ret)
1604 			goto err;
1605 
1606 		ret = -EINPROGRESS;
1607 		state->update = ahash_update_ctx;
1608 		state->finup = ahash_finup_ctx;
1609 		state->final = ahash_final_ctx;
1610 	} else if (*next_buflen) {
1611 		state->update = ahash_update_no_ctx;
1612 		state->finup = ahash_finup_no_ctx;
1613 		state->final = ahash_final_no_ctx;
1614 		scatterwalk_map_and_copy(next_buf, req->src, 0,
1615 					 req->nbytes, 0);
1616 	}
1617 #ifdef DEBUG
1618 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1619 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1620 		       *next_buflen, 1);
1621 #endif
1622 
1623 	return ret;
1624 
1625 err:
1626 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1627 	kfree(edesc);
1628 	return ret;
1629 }
1630 
1631 static int ahash_finup_first(struct ahash_request *req)
1632 {
1633 	return ahash_digest(req);
1634 }
1635 
1636 static int ahash_init(struct ahash_request *req)
1637 {
1638 	struct caam_hash_state *state = ahash_request_ctx(req);
1639 
1640 	state->update = ahash_update_first;
1641 	state->finup = ahash_finup_first;
1642 	state->final = ahash_final_no_ctx;
1643 
1644 	state->current_buf = 0;
1645 	state->buf_dma = 0;
1646 	state->buflen_0 = 0;
1647 	state->buflen_1 = 0;
1648 
1649 	return 0;
1650 }
1651 
1652 static int ahash_update(struct ahash_request *req)
1653 {
1654 	struct caam_hash_state *state = ahash_request_ctx(req);
1655 
1656 	return state->update(req);
1657 }
1658 
1659 static int ahash_finup(struct ahash_request *req)
1660 {
1661 	struct caam_hash_state *state = ahash_request_ctx(req);
1662 
1663 	return state->finup(req);
1664 }
1665 
1666 static int ahash_final(struct ahash_request *req)
1667 {
1668 	struct caam_hash_state *state = ahash_request_ctx(req);
1669 
1670 	return state->final(req);
1671 }
1672 
1673 static int ahash_export(struct ahash_request *req, void *out)
1674 {
1675 	struct caam_hash_state *state = ahash_request_ctx(req);
1676 	struct caam_export_state *export = out;
1677 	int len;
1678 	u8 *buf;
1679 
1680 	if (state->current_buf) {
1681 		buf = state->buf_1;
1682 		len = state->buflen_1;
1683 	} else {
1684 		buf = state->buf_0;
1685 		len = state->buflen_0;
1686 	}
1687 
1688 	memcpy(export->buf, buf, len);
1689 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1690 	export->buflen = len;
1691 	export->update = state->update;
1692 	export->final = state->final;
1693 	export->finup = state->finup;
1694 
1695 	return 0;
1696 }
1697 
1698 static int ahash_import(struct ahash_request *req, const void *in)
1699 {
1700 	struct caam_hash_state *state = ahash_request_ctx(req);
1701 	const struct caam_export_state *export = in;
1702 
1703 	memset(state, 0, sizeof(*state));
1704 	memcpy(state->buf_0, export->buf, export->buflen);
1705 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1706 	state->buflen_0 = export->buflen;
1707 	state->update = export->update;
1708 	state->final = export->final;
1709 	state->finup = export->finup;
1710 
1711 	return 0;
1712 }
1713 
1714 struct caam_hash_template {
1715 	char name[CRYPTO_MAX_ALG_NAME];
1716 	char driver_name[CRYPTO_MAX_ALG_NAME];
1717 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1718 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1719 	unsigned int blocksize;
1720 	struct ahash_alg template_ahash;
1721 	u32 alg_type;
1722 	u32 alg_op;
1723 };
1724 
1725 /* ahash descriptors */
1726 static struct caam_hash_template driver_hash[] = {
1727 	{
1728 		.name = "sha1",
1729 		.driver_name = "sha1-caam",
1730 		.hmac_name = "hmac(sha1)",
1731 		.hmac_driver_name = "hmac-sha1-caam",
1732 		.blocksize = SHA1_BLOCK_SIZE,
1733 		.template_ahash = {
1734 			.init = ahash_init,
1735 			.update = ahash_update,
1736 			.final = ahash_final,
1737 			.finup = ahash_finup,
1738 			.digest = ahash_digest,
1739 			.export = ahash_export,
1740 			.import = ahash_import,
1741 			.setkey = ahash_setkey,
1742 			.halg = {
1743 				.digestsize = SHA1_DIGEST_SIZE,
1744 				.statesize = sizeof(struct caam_export_state),
1745 			},
1746 		},
1747 		.alg_type = OP_ALG_ALGSEL_SHA1,
1748 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1749 	}, {
1750 		.name = "sha224",
1751 		.driver_name = "sha224-caam",
1752 		.hmac_name = "hmac(sha224)",
1753 		.hmac_driver_name = "hmac-sha224-caam",
1754 		.blocksize = SHA224_BLOCK_SIZE,
1755 		.template_ahash = {
1756 			.init = ahash_init,
1757 			.update = ahash_update,
1758 			.final = ahash_final,
1759 			.finup = ahash_finup,
1760 			.digest = ahash_digest,
1761 			.export = ahash_export,
1762 			.import = ahash_import,
1763 			.setkey = ahash_setkey,
1764 			.halg = {
1765 				.digestsize = SHA224_DIGEST_SIZE,
1766 				.statesize = sizeof(struct caam_export_state),
1767 			},
1768 		},
1769 		.alg_type = OP_ALG_ALGSEL_SHA224,
1770 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1771 	}, {
1772 		.name = "sha256",
1773 		.driver_name = "sha256-caam",
1774 		.hmac_name = "hmac(sha256)",
1775 		.hmac_driver_name = "hmac-sha256-caam",
1776 		.blocksize = SHA256_BLOCK_SIZE,
1777 		.template_ahash = {
1778 			.init = ahash_init,
1779 			.update = ahash_update,
1780 			.final = ahash_final,
1781 			.finup = ahash_finup,
1782 			.digest = ahash_digest,
1783 			.export = ahash_export,
1784 			.import = ahash_import,
1785 			.setkey = ahash_setkey,
1786 			.halg = {
1787 				.digestsize = SHA256_DIGEST_SIZE,
1788 				.statesize = sizeof(struct caam_export_state),
1789 			},
1790 		},
1791 		.alg_type = OP_ALG_ALGSEL_SHA256,
1792 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1793 	}, {
1794 		.name = "sha384",
1795 		.driver_name = "sha384-caam",
1796 		.hmac_name = "hmac(sha384)",
1797 		.hmac_driver_name = "hmac-sha384-caam",
1798 		.blocksize = SHA384_BLOCK_SIZE,
1799 		.template_ahash = {
1800 			.init = ahash_init,
1801 			.update = ahash_update,
1802 			.final = ahash_final,
1803 			.finup = ahash_finup,
1804 			.digest = ahash_digest,
1805 			.export = ahash_export,
1806 			.import = ahash_import,
1807 			.setkey = ahash_setkey,
1808 			.halg = {
1809 				.digestsize = SHA384_DIGEST_SIZE,
1810 				.statesize = sizeof(struct caam_export_state),
1811 			},
1812 		},
1813 		.alg_type = OP_ALG_ALGSEL_SHA384,
1814 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1815 	}, {
1816 		.name = "sha512",
1817 		.driver_name = "sha512-caam",
1818 		.hmac_name = "hmac(sha512)",
1819 		.hmac_driver_name = "hmac-sha512-caam",
1820 		.blocksize = SHA512_BLOCK_SIZE,
1821 		.template_ahash = {
1822 			.init = ahash_init,
1823 			.update = ahash_update,
1824 			.final = ahash_final,
1825 			.finup = ahash_finup,
1826 			.digest = ahash_digest,
1827 			.export = ahash_export,
1828 			.import = ahash_import,
1829 			.setkey = ahash_setkey,
1830 			.halg = {
1831 				.digestsize = SHA512_DIGEST_SIZE,
1832 				.statesize = sizeof(struct caam_export_state),
1833 			},
1834 		},
1835 		.alg_type = OP_ALG_ALGSEL_SHA512,
1836 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1837 	}, {
1838 		.name = "md5",
1839 		.driver_name = "md5-caam",
1840 		.hmac_name = "hmac(md5)",
1841 		.hmac_driver_name = "hmac-md5-caam",
1842 		.blocksize = MD5_BLOCK_WORDS * 4,
1843 		.template_ahash = {
1844 			.init = ahash_init,
1845 			.update = ahash_update,
1846 			.final = ahash_final,
1847 			.finup = ahash_finup,
1848 			.digest = ahash_digest,
1849 			.export = ahash_export,
1850 			.import = ahash_import,
1851 			.setkey = ahash_setkey,
1852 			.halg = {
1853 				.digestsize = MD5_DIGEST_SIZE,
1854 				.statesize = sizeof(struct caam_export_state),
1855 			},
1856 		},
1857 		.alg_type = OP_ALG_ALGSEL_MD5,
1858 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1859 	},
1860 };
1861 
1862 struct caam_hash_alg {
1863 	struct list_head entry;
1864 	int alg_type;
1865 	int alg_op;
1866 	struct ahash_alg ahash_alg;
1867 };
1868 
1869 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1870 {
1871 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1872 	struct crypto_alg *base = tfm->__crt_alg;
1873 	struct hash_alg_common *halg =
1874 		 container_of(base, struct hash_alg_common, base);
1875 	struct ahash_alg *alg =
1876 		 container_of(halg, struct ahash_alg, halg);
1877 	struct caam_hash_alg *caam_hash =
1878 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1879 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1880 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1881 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1882 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1883 					 HASH_MSG_LEN + 32,
1884 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1885 					 HASH_MSG_LEN + 64,
1886 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1887 	int ret = 0;
1888 
1889 	/*
1890 	 * Get a Job ring from Job Ring driver to ensure in-order
1891 	 * crypto request processing per tfm
1892 	 */
1893 	ctx->jrdev = caam_jr_alloc();
1894 	if (IS_ERR(ctx->jrdev)) {
1895 		pr_err("Job Ring Device allocation for transform failed\n");
1896 		return PTR_ERR(ctx->jrdev);
1897 	}
1898 	/* copy descriptor header template value */
1899 	ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1900 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1901 
1902 	ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1903 				  OP_ALG_ALGSEL_SHIFT];
1904 
1905 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1906 				 sizeof(struct caam_hash_state));
1907 
1908 	ret = ahash_set_sh_desc(ahash);
1909 
1910 	return ret;
1911 }
1912 
1913 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1914 {
1915 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1916 
1917 	if (ctx->sh_desc_update_dma &&
1918 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1919 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1920 				 desc_bytes(ctx->sh_desc_update),
1921 				 DMA_TO_DEVICE);
1922 	if (ctx->sh_desc_update_first_dma &&
1923 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1924 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1925 				 desc_bytes(ctx->sh_desc_update_first),
1926 				 DMA_TO_DEVICE);
1927 	if (ctx->sh_desc_fin_dma &&
1928 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1929 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1930 				 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1931 	if (ctx->sh_desc_digest_dma &&
1932 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1933 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1934 				 desc_bytes(ctx->sh_desc_digest),
1935 				 DMA_TO_DEVICE);
1936 	if (ctx->sh_desc_finup_dma &&
1937 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1938 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1939 				 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1940 
1941 	caam_jr_free(ctx->jrdev);
1942 }
1943 
1944 static void __exit caam_algapi_hash_exit(void)
1945 {
1946 	struct caam_hash_alg *t_alg, *n;
1947 
1948 	if (!hash_list.next)
1949 		return;
1950 
1951 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1952 		crypto_unregister_ahash(&t_alg->ahash_alg);
1953 		list_del(&t_alg->entry);
1954 		kfree(t_alg);
1955 	}
1956 }
1957 
1958 static struct caam_hash_alg *
1959 caam_hash_alloc(struct caam_hash_template *template,
1960 		bool keyed)
1961 {
1962 	struct caam_hash_alg *t_alg;
1963 	struct ahash_alg *halg;
1964 	struct crypto_alg *alg;
1965 
1966 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1967 	if (!t_alg) {
1968 		pr_err("failed to allocate t_alg\n");
1969 		return ERR_PTR(-ENOMEM);
1970 	}
1971 
1972 	t_alg->ahash_alg = template->template_ahash;
1973 	halg = &t_alg->ahash_alg;
1974 	alg = &halg->halg.base;
1975 
1976 	if (keyed) {
1977 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1978 			 template->hmac_name);
1979 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1980 			 template->hmac_driver_name);
1981 	} else {
1982 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1983 			 template->name);
1984 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1985 			 template->driver_name);
1986 		t_alg->ahash_alg.setkey = NULL;
1987 	}
1988 	alg->cra_module = THIS_MODULE;
1989 	alg->cra_init = caam_hash_cra_init;
1990 	alg->cra_exit = caam_hash_cra_exit;
1991 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1992 	alg->cra_priority = CAAM_CRA_PRIORITY;
1993 	alg->cra_blocksize = template->blocksize;
1994 	alg->cra_alignmask = 0;
1995 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1996 	alg->cra_type = &crypto_ahash_type;
1997 
1998 	t_alg->alg_type = template->alg_type;
1999 	t_alg->alg_op = template->alg_op;
2000 
2001 	return t_alg;
2002 }
2003 
2004 static int __init caam_algapi_hash_init(void)
2005 {
2006 	struct device_node *dev_node;
2007 	struct platform_device *pdev;
2008 	struct device *ctrldev;
2009 	int i = 0, err = 0;
2010 	struct caam_drv_private *priv;
2011 	unsigned int md_limit = SHA512_DIGEST_SIZE;
2012 	u32 cha_inst, cha_vid;
2013 
2014 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2015 	if (!dev_node) {
2016 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2017 		if (!dev_node)
2018 			return -ENODEV;
2019 	}
2020 
2021 	pdev = of_find_device_by_node(dev_node);
2022 	if (!pdev) {
2023 		of_node_put(dev_node);
2024 		return -ENODEV;
2025 	}
2026 
2027 	ctrldev = &pdev->dev;
2028 	priv = dev_get_drvdata(ctrldev);
2029 	of_node_put(dev_node);
2030 
2031 	/*
2032 	 * If priv is NULL, it's probably because the caam driver wasn't
2033 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2034 	 */
2035 	if (!priv)
2036 		return -ENODEV;
2037 
2038 	/*
2039 	 * Register crypto algorithms the device supports.  First, identify
2040 	 * presence and attributes of MD block.
2041 	 */
2042 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2043 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2044 
2045 	/*
2046 	 * Skip registration of any hashing algorithms if MD block
2047 	 * is not present.
2048 	 */
2049 	if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
2050 		return -ENODEV;
2051 
2052 	/* Limit digest size based on LP256 */
2053 	if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
2054 		md_limit = SHA256_DIGEST_SIZE;
2055 
2056 	INIT_LIST_HEAD(&hash_list);
2057 
2058 	/* register crypto algorithms the device supports */
2059 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
2060 		struct caam_hash_alg *t_alg;
2061 		struct caam_hash_template *alg = driver_hash + i;
2062 
2063 		/* If MD size is not supported by device, skip registration */
2064 		if (alg->template_ahash.halg.digestsize > md_limit)
2065 			continue;
2066 
2067 		/* register hmac version */
2068 		t_alg = caam_hash_alloc(alg, true);
2069 		if (IS_ERR(t_alg)) {
2070 			err = PTR_ERR(t_alg);
2071 			pr_warn("%s alg allocation failed\n", alg->driver_name);
2072 			continue;
2073 		}
2074 
2075 		err = crypto_register_ahash(&t_alg->ahash_alg);
2076 		if (err) {
2077 			pr_warn("%s alg registration failed: %d\n",
2078 				t_alg->ahash_alg.halg.base.cra_driver_name,
2079 				err);
2080 			kfree(t_alg);
2081 		} else
2082 			list_add_tail(&t_alg->entry, &hash_list);
2083 
2084 		/* register unkeyed version */
2085 		t_alg = caam_hash_alloc(alg, false);
2086 		if (IS_ERR(t_alg)) {
2087 			err = PTR_ERR(t_alg);
2088 			pr_warn("%s alg allocation failed\n", alg->driver_name);
2089 			continue;
2090 		}
2091 
2092 		err = crypto_register_ahash(&t_alg->ahash_alg);
2093 		if (err) {
2094 			pr_warn("%s alg registration failed: %d\n",
2095 				t_alg->ahash_alg.halg.base.cra_driver_name,
2096 				err);
2097 			kfree(t_alg);
2098 		} else
2099 			list_add_tail(&t_alg->entry, &hash_list);
2100 	}
2101 
2102 	return err;
2103 }
2104 
2105 module_init(caam_algapi_hash_init);
2106 module_exit(caam_algapi_hash_exit);
2107 
2108 MODULE_LICENSE("GPL");
2109 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2110 MODULE_AUTHOR("Freescale Semiconductor - NMG");
2111