xref: /openbmc/linux/drivers/crypto/caam/caamhash.c (revision 3932b9ca)
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55 
56 #include "compat.h"
57 
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65 
66 #define CAAM_CRA_PRIORITY		3000
67 
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
70 
71 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
73 
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE			(4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81 
82 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
83 					 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85 
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN			8
88 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89 
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96 
97 
98 static struct list_head hash_list;
99 
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 	struct device *jrdev;
103 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107 	u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108 	dma_addr_t sh_desc_update_dma;
109 	dma_addr_t sh_desc_update_first_dma;
110 	dma_addr_t sh_desc_fin_dma;
111 	dma_addr_t sh_desc_digest_dma;
112 	dma_addr_t sh_desc_finup_dma;
113 	u32 alg_type;
114 	u32 alg_op;
115 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 	dma_addr_t key_dma;
117 	int ctx_len;
118 	unsigned int split_key_len;
119 	unsigned int split_key_pad_len;
120 };
121 
122 /* ahash state */
123 struct caam_hash_state {
124 	dma_addr_t buf_dma;
125 	dma_addr_t ctx_dma;
126 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 	int buflen_0;
128 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 	int buflen_1;
130 	u8 caam_ctx[MAX_CTX_LEN];
131 	int (*update)(struct ahash_request *req);
132 	int (*final)(struct ahash_request *req);
133 	int (*finup)(struct ahash_request *req);
134 	int current_buf;
135 };
136 
137 /* Common job descriptor seq in/out ptr routines */
138 
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141 				      struct caam_hash_state *state,
142 				      int ctx_len)
143 {
144 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145 					ctx_len, DMA_FROM_DEVICE);
146 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
147 		dev_err(jrdev, "unable to map ctx\n");
148 		return -ENOMEM;
149 	}
150 
151 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
152 
153 	return 0;
154 }
155 
156 /* Map req->result, and append seq_out_ptr command that points to it */
157 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
158 						u8 *result, int digestsize)
159 {
160 	dma_addr_t dst_dma;
161 
162 	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
163 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
164 
165 	return dst_dma;
166 }
167 
168 /* Map current buffer in state and put it in link table */
169 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
170 					    struct sec4_sg_entry *sec4_sg,
171 					    u8 *buf, int buflen)
172 {
173 	dma_addr_t buf_dma;
174 
175 	buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
176 	dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
177 
178 	return buf_dma;
179 }
180 
181 /* Map req->src and put it in link table */
182 static inline void src_map_to_sec4_sg(struct device *jrdev,
183 				      struct scatterlist *src, int src_nents,
184 				      struct sec4_sg_entry *sec4_sg,
185 				      bool chained)
186 {
187 	dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
188 	sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
189 }
190 
191 /*
192  * Only put buffer in link table if it contains data, which is possible,
193  * since a buffer has previously been used, and needs to be unmapped,
194  */
195 static inline dma_addr_t
196 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
197 		       u8 *buf, dma_addr_t buf_dma, int buflen,
198 		       int last_buflen)
199 {
200 	if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
201 		dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
202 	if (buflen)
203 		buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
204 	else
205 		buf_dma = 0;
206 
207 	return buf_dma;
208 }
209 
210 /* Map state->caam_ctx, and add it to link table */
211 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
212 				     struct caam_hash_state *state, int ctx_len,
213 				     struct sec4_sg_entry *sec4_sg, u32 flag)
214 {
215 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
216 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
217 		dev_err(jrdev, "unable to map ctx\n");
218 		return -ENOMEM;
219 	}
220 
221 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
222 
223 	return 0;
224 }
225 
226 /* Common shared descriptor commands */
227 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
228 {
229 	append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
230 			  ctx->split_key_len, CLASS_2 |
231 			  KEY_DEST_MDHA_SPLIT | KEY_ENC);
232 }
233 
234 /* Append key if it has been set */
235 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
236 {
237 	u32 *key_jump_cmd;
238 
239 	init_sh_desc(desc, HDR_SHARE_SERIAL);
240 
241 	if (ctx->split_key_len) {
242 		/* Skip if already shared */
243 		key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
244 					   JUMP_COND_SHRD);
245 
246 		append_key_ahash(desc, ctx);
247 
248 		set_jump_tgt_here(desc, key_jump_cmd);
249 	}
250 
251 	/* Propagate errors from shared to job descriptor */
252 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
253 }
254 
255 /*
256  * For ahash read data from seqin following state->caam_ctx,
257  * and write resulting class2 context to seqout, which may be state->caam_ctx
258  * or req->result
259  */
260 static inline void ahash_append_load_str(u32 *desc, int digestsize)
261 {
262 	/* Calculate remaining bytes to read */
263 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
264 
265 	/* Read remaining bytes */
266 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
267 			     FIFOLD_TYPE_MSG | KEY_VLF);
268 
269 	/* Store class2 context bytes */
270 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
271 			 LDST_SRCDST_BYTE_CONTEXT);
272 }
273 
274 /*
275  * For ahash update, final and finup, import context, read and write to seqout
276  */
277 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
278 					 int digestsize,
279 					 struct caam_hash_ctx *ctx)
280 {
281 	init_sh_desc_key_ahash(desc, ctx);
282 
283 	/* Import context from software */
284 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
285 		   LDST_CLASS_2_CCB | ctx->ctx_len);
286 
287 	/* Class 2 operation */
288 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
289 
290 	/*
291 	 * Load from buf and/or src and write to req->result or state->context
292 	 */
293 	ahash_append_load_str(desc, digestsize);
294 }
295 
296 /* For ahash firsts and digest, read and write to seqout */
297 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
298 				     int digestsize, struct caam_hash_ctx *ctx)
299 {
300 	init_sh_desc_key_ahash(desc, ctx);
301 
302 	/* Class 2 operation */
303 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
304 
305 	/*
306 	 * Load from buf and/or src and write to req->result or state->context
307 	 */
308 	ahash_append_load_str(desc, digestsize);
309 }
310 
311 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
312 {
313 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
314 	int digestsize = crypto_ahash_digestsize(ahash);
315 	struct device *jrdev = ctx->jrdev;
316 	u32 have_key = 0;
317 	u32 *desc;
318 
319 	if (ctx->split_key_len)
320 		have_key = OP_ALG_AAI_HMAC_PRECOMP;
321 
322 	/* ahash_update shared descriptor */
323 	desc = ctx->sh_desc_update;
324 
325 	init_sh_desc(desc, HDR_SHARE_SERIAL);
326 
327 	/* Import context from software */
328 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
329 		   LDST_CLASS_2_CCB | ctx->ctx_len);
330 
331 	/* Class 2 operation */
332 	append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
333 			 OP_ALG_ENCRYPT);
334 
335 	/* Load data and write to result or context */
336 	ahash_append_load_str(desc, ctx->ctx_len);
337 
338 	ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
339 						 DMA_TO_DEVICE);
340 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
341 		dev_err(jrdev, "unable to map shared descriptor\n");
342 		return -ENOMEM;
343 	}
344 #ifdef DEBUG
345 	print_hex_dump(KERN_ERR,
346 		       "ahash update shdesc@"__stringify(__LINE__)": ",
347 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
348 #endif
349 
350 	/* ahash_update_first shared descriptor */
351 	desc = ctx->sh_desc_update_first;
352 
353 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
354 			  ctx->ctx_len, ctx);
355 
356 	ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
357 						       desc_bytes(desc),
358 						       DMA_TO_DEVICE);
359 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
360 		dev_err(jrdev, "unable to map shared descriptor\n");
361 		return -ENOMEM;
362 	}
363 #ifdef DEBUG
364 	print_hex_dump(KERN_ERR,
365 		       "ahash update first shdesc@"__stringify(__LINE__)": ",
366 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
367 #endif
368 
369 	/* ahash_final shared descriptor */
370 	desc = ctx->sh_desc_fin;
371 
372 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
373 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
374 
375 	ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
376 					      DMA_TO_DEVICE);
377 	if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
378 		dev_err(jrdev, "unable to map shared descriptor\n");
379 		return -ENOMEM;
380 	}
381 #ifdef DEBUG
382 	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
383 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
384 		       desc_bytes(desc), 1);
385 #endif
386 
387 	/* ahash_finup shared descriptor */
388 	desc = ctx->sh_desc_finup;
389 
390 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
391 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
392 
393 	ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
394 						DMA_TO_DEVICE);
395 	if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
396 		dev_err(jrdev, "unable to map shared descriptor\n");
397 		return -ENOMEM;
398 	}
399 #ifdef DEBUG
400 	print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
401 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
402 		       desc_bytes(desc), 1);
403 #endif
404 
405 	/* ahash_digest shared descriptor */
406 	desc = ctx->sh_desc_digest;
407 
408 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
409 			  digestsize, ctx);
410 
411 	ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
412 						 desc_bytes(desc),
413 						 DMA_TO_DEVICE);
414 	if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
415 		dev_err(jrdev, "unable to map shared descriptor\n");
416 		return -ENOMEM;
417 	}
418 #ifdef DEBUG
419 	print_hex_dump(KERN_ERR,
420 		       "ahash digest shdesc@"__stringify(__LINE__)": ",
421 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
422 		       desc_bytes(desc), 1);
423 #endif
424 
425 	return 0;
426 }
427 
428 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
429 			      u32 keylen)
430 {
431 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
432 			       ctx->split_key_pad_len, key_in, keylen,
433 			       ctx->alg_op);
434 }
435 
436 /* Digest hash size if it is too large */
437 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
438 			   u32 *keylen, u8 *key_out, u32 digestsize)
439 {
440 	struct device *jrdev = ctx->jrdev;
441 	u32 *desc;
442 	struct split_key_result result;
443 	dma_addr_t src_dma, dst_dma;
444 	int ret = 0;
445 
446 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
447 	if (!desc) {
448 		dev_err(jrdev, "unable to allocate key input memory\n");
449 		return -ENOMEM;
450 	}
451 
452 	init_job_desc(desc, 0);
453 
454 	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
455 				 DMA_TO_DEVICE);
456 	if (dma_mapping_error(jrdev, src_dma)) {
457 		dev_err(jrdev, "unable to map key input memory\n");
458 		kfree(desc);
459 		return -ENOMEM;
460 	}
461 	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
462 				 DMA_FROM_DEVICE);
463 	if (dma_mapping_error(jrdev, dst_dma)) {
464 		dev_err(jrdev, "unable to map key output memory\n");
465 		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
466 		kfree(desc);
467 		return -ENOMEM;
468 	}
469 
470 	/* Job descriptor to perform unkeyed hash on key_in */
471 	append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
472 			 OP_ALG_AS_INITFINAL);
473 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
474 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
475 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
476 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
477 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
478 			 LDST_SRCDST_BYTE_CONTEXT);
479 
480 #ifdef DEBUG
481 	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
482 		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
483 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
484 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
485 #endif
486 
487 	result.err = 0;
488 	init_completion(&result.completion);
489 
490 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
491 	if (!ret) {
492 		/* in progress */
493 		wait_for_completion_interruptible(&result.completion);
494 		ret = result.err;
495 #ifdef DEBUG
496 		print_hex_dump(KERN_ERR,
497 			       "digested key@"__stringify(__LINE__)": ",
498 			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
499 			       digestsize, 1);
500 #endif
501 	}
502 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
503 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
504 
505 	*keylen = digestsize;
506 
507 	kfree(desc);
508 
509 	return ret;
510 }
511 
512 static int ahash_setkey(struct crypto_ahash *ahash,
513 			const u8 *key, unsigned int keylen)
514 {
515 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
516 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
517 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
518 	struct device *jrdev = ctx->jrdev;
519 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
520 	int digestsize = crypto_ahash_digestsize(ahash);
521 	int ret = 0;
522 	u8 *hashed_key = NULL;
523 
524 #ifdef DEBUG
525 	printk(KERN_ERR "keylen %d\n", keylen);
526 #endif
527 
528 	if (keylen > blocksize) {
529 		hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
530 				     GFP_DMA);
531 		if (!hashed_key)
532 			return -ENOMEM;
533 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
534 				      digestsize);
535 		if (ret)
536 			goto badkey;
537 		key = hashed_key;
538 	}
539 
540 	/* Pick class 2 key length from algorithm submask */
541 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
542 				      OP_ALG_ALGSEL_SHIFT] * 2;
543 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
544 
545 #ifdef DEBUG
546 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
547 	       ctx->split_key_len, ctx->split_key_pad_len);
548 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
549 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
550 #endif
551 
552 	ret = gen_split_hash_key(ctx, key, keylen);
553 	if (ret)
554 		goto badkey;
555 
556 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
557 				      DMA_TO_DEVICE);
558 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
559 		dev_err(jrdev, "unable to map key i/o memory\n");
560 		ret = -ENOMEM;
561 		goto map_err;
562 	}
563 #ifdef DEBUG
564 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
565 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
566 		       ctx->split_key_pad_len, 1);
567 #endif
568 
569 	ret = ahash_set_sh_desc(ahash);
570 	if (ret) {
571 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
572 				 DMA_TO_DEVICE);
573 	}
574 
575 map_err:
576 	kfree(hashed_key);
577 	return ret;
578 badkey:
579 	kfree(hashed_key);
580 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
581 	return -EINVAL;
582 }
583 
584 /*
585  * ahash_edesc - s/w-extended ahash descriptor
586  * @dst_dma: physical mapped address of req->result
587  * @sec4_sg_dma: physical mapped address of h/w link table
588  * @chained: if source is chained
589  * @src_nents: number of segments in input scatterlist
590  * @sec4_sg_bytes: length of dma mapped sec4_sg space
591  * @sec4_sg: pointer to h/w link table
592  * @hw_desc: the h/w job descriptor followed by any referenced link tables
593  */
594 struct ahash_edesc {
595 	dma_addr_t dst_dma;
596 	dma_addr_t sec4_sg_dma;
597 	bool chained;
598 	int src_nents;
599 	int sec4_sg_bytes;
600 	struct sec4_sg_entry *sec4_sg;
601 	u32 hw_desc[0];
602 };
603 
604 static inline void ahash_unmap(struct device *dev,
605 			struct ahash_edesc *edesc,
606 			struct ahash_request *req, int dst_len)
607 {
608 	if (edesc->src_nents)
609 		dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
610 				     DMA_TO_DEVICE, edesc->chained);
611 	if (edesc->dst_dma)
612 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
613 
614 	if (edesc->sec4_sg_bytes)
615 		dma_unmap_single(dev, edesc->sec4_sg_dma,
616 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
617 }
618 
619 static inline void ahash_unmap_ctx(struct device *dev,
620 			struct ahash_edesc *edesc,
621 			struct ahash_request *req, int dst_len, u32 flag)
622 {
623 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
624 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
625 	struct caam_hash_state *state = ahash_request_ctx(req);
626 
627 	if (state->ctx_dma)
628 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
629 	ahash_unmap(dev, edesc, req, dst_len);
630 }
631 
632 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
633 		       void *context)
634 {
635 	struct ahash_request *req = context;
636 	struct ahash_edesc *edesc;
637 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
638 	int digestsize = crypto_ahash_digestsize(ahash);
639 #ifdef DEBUG
640 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
641 	struct caam_hash_state *state = ahash_request_ctx(req);
642 
643 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
644 #endif
645 
646 	edesc = (struct ahash_edesc *)((char *)desc -
647 		 offsetof(struct ahash_edesc, hw_desc));
648 	if (err)
649 		caam_jr_strstatus(jrdev, err);
650 
651 	ahash_unmap(jrdev, edesc, req, digestsize);
652 	kfree(edesc);
653 
654 #ifdef DEBUG
655 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
656 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
657 		       ctx->ctx_len, 1);
658 	if (req->result)
659 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
660 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
661 			       digestsize, 1);
662 #endif
663 
664 	req->base.complete(&req->base, err);
665 }
666 
667 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
668 			    void *context)
669 {
670 	struct ahash_request *req = context;
671 	struct ahash_edesc *edesc;
672 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
673 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
674 #ifdef DEBUG
675 	struct caam_hash_state *state = ahash_request_ctx(req);
676 	int digestsize = crypto_ahash_digestsize(ahash);
677 
678 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
679 #endif
680 
681 	edesc = (struct ahash_edesc *)((char *)desc -
682 		 offsetof(struct ahash_edesc, hw_desc));
683 	if (err)
684 		caam_jr_strstatus(jrdev, err);
685 
686 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
687 	kfree(edesc);
688 
689 #ifdef DEBUG
690 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
691 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
692 		       ctx->ctx_len, 1);
693 	if (req->result)
694 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
695 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
696 			       digestsize, 1);
697 #endif
698 
699 	req->base.complete(&req->base, err);
700 }
701 
702 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
703 			       void *context)
704 {
705 	struct ahash_request *req = context;
706 	struct ahash_edesc *edesc;
707 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
708 	int digestsize = crypto_ahash_digestsize(ahash);
709 #ifdef DEBUG
710 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
711 	struct caam_hash_state *state = ahash_request_ctx(req);
712 
713 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
714 #endif
715 
716 	edesc = (struct ahash_edesc *)((char *)desc -
717 		 offsetof(struct ahash_edesc, hw_desc));
718 	if (err)
719 		caam_jr_strstatus(jrdev, err);
720 
721 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
722 	kfree(edesc);
723 
724 #ifdef DEBUG
725 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
726 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
727 		       ctx->ctx_len, 1);
728 	if (req->result)
729 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
730 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
731 			       digestsize, 1);
732 #endif
733 
734 	req->base.complete(&req->base, err);
735 }
736 
737 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
738 			       void *context)
739 {
740 	struct ahash_request *req = context;
741 	struct ahash_edesc *edesc;
742 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
743 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
744 #ifdef DEBUG
745 	struct caam_hash_state *state = ahash_request_ctx(req);
746 	int digestsize = crypto_ahash_digestsize(ahash);
747 
748 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
749 #endif
750 
751 	edesc = (struct ahash_edesc *)((char *)desc -
752 		 offsetof(struct ahash_edesc, hw_desc));
753 	if (err)
754 		caam_jr_strstatus(jrdev, err);
755 
756 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
757 	kfree(edesc);
758 
759 #ifdef DEBUG
760 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
761 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
762 		       ctx->ctx_len, 1);
763 	if (req->result)
764 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
765 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
766 			       digestsize, 1);
767 #endif
768 
769 	req->base.complete(&req->base, err);
770 }
771 
772 /* submit update job descriptor */
773 static int ahash_update_ctx(struct ahash_request *req)
774 {
775 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
776 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
777 	struct caam_hash_state *state = ahash_request_ctx(req);
778 	struct device *jrdev = ctx->jrdev;
779 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
780 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
781 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
782 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
783 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
784 	int *next_buflen = state->current_buf ? &state->buflen_0 :
785 			   &state->buflen_1, last_buflen;
786 	int in_len = *buflen + req->nbytes, to_hash;
787 	u32 *sh_desc = ctx->sh_desc_update, *desc;
788 	dma_addr_t ptr = ctx->sh_desc_update_dma;
789 	int src_nents, sec4_sg_bytes, sec4_sg_src_index;
790 	struct ahash_edesc *edesc;
791 	bool chained = false;
792 	int ret = 0;
793 	int sh_len;
794 
795 	last_buflen = *next_buflen;
796 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
797 	to_hash = in_len - *next_buflen;
798 
799 	if (to_hash) {
800 		src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
801 				       &chained);
802 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
803 		sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
804 				 sizeof(struct sec4_sg_entry);
805 
806 		/*
807 		 * allocate space for base edesc and hw desc commands,
808 		 * link tables
809 		 */
810 		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
811 				sec4_sg_bytes, GFP_DMA | flags);
812 		if (!edesc) {
813 			dev_err(jrdev,
814 				"could not allocate extended descriptor\n");
815 			return -ENOMEM;
816 		}
817 
818 		edesc->src_nents = src_nents;
819 		edesc->chained = chained;
820 		edesc->sec4_sg_bytes = sec4_sg_bytes;
821 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
822 				 DESC_JOB_IO_LEN;
823 
824 		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
825 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
826 		if (ret)
827 			return ret;
828 
829 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
830 							edesc->sec4_sg + 1,
831 							buf, state->buf_dma,
832 							*buflen, last_buflen);
833 
834 		if (src_nents) {
835 			src_map_to_sec4_sg(jrdev, req->src, src_nents,
836 					   edesc->sec4_sg + sec4_sg_src_index,
837 					   chained);
838 			if (*next_buflen) {
839 				sg_copy_part(next_buf, req->src, to_hash -
840 					     *buflen, req->nbytes);
841 				state->current_buf = !state->current_buf;
842 			}
843 		} else {
844 			(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
845 							SEC4_SG_LEN_FIN;
846 		}
847 
848 		sh_len = desc_len(sh_desc);
849 		desc = edesc->hw_desc;
850 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
851 				     HDR_REVERSE);
852 
853 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
854 						     sec4_sg_bytes,
855 						     DMA_TO_DEVICE);
856 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
857 			dev_err(jrdev, "unable to map S/G table\n");
858 			return -ENOMEM;
859 		}
860 
861 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
862 				       to_hash, LDST_SGF);
863 
864 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
865 
866 #ifdef DEBUG
867 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
868 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
869 			       desc_bytes(desc), 1);
870 #endif
871 
872 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
873 		if (!ret) {
874 			ret = -EINPROGRESS;
875 		} else {
876 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
877 					   DMA_BIDIRECTIONAL);
878 			kfree(edesc);
879 		}
880 	} else if (*next_buflen) {
881 		sg_copy(buf + *buflen, req->src, req->nbytes);
882 		*buflen = *next_buflen;
883 		*next_buflen = last_buflen;
884 	}
885 #ifdef DEBUG
886 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
887 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
888 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
889 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
890 		       *next_buflen, 1);
891 #endif
892 
893 	return ret;
894 }
895 
896 static int ahash_final_ctx(struct ahash_request *req)
897 {
898 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
899 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
900 	struct caam_hash_state *state = ahash_request_ctx(req);
901 	struct device *jrdev = ctx->jrdev;
902 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
903 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
904 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
905 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
906 	int last_buflen = state->current_buf ? state->buflen_0 :
907 			  state->buflen_1;
908 	u32 *sh_desc = ctx->sh_desc_fin, *desc;
909 	dma_addr_t ptr = ctx->sh_desc_fin_dma;
910 	int sec4_sg_bytes;
911 	int digestsize = crypto_ahash_digestsize(ahash);
912 	struct ahash_edesc *edesc;
913 	int ret = 0;
914 	int sh_len;
915 
916 	sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
917 
918 	/* allocate space for base edesc and hw desc commands, link tables */
919 	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
920 			sec4_sg_bytes, GFP_DMA | flags);
921 	if (!edesc) {
922 		dev_err(jrdev, "could not allocate extended descriptor\n");
923 		return -ENOMEM;
924 	}
925 
926 	sh_len = desc_len(sh_desc);
927 	desc = edesc->hw_desc;
928 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
929 
930 	edesc->sec4_sg_bytes = sec4_sg_bytes;
931 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
932 			 DESC_JOB_IO_LEN;
933 	edesc->src_nents = 0;
934 
935 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
936 				 edesc->sec4_sg, DMA_TO_DEVICE);
937 	if (ret)
938 		return ret;
939 
940 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
941 						buf, state->buf_dma, buflen,
942 						last_buflen);
943 	(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
944 
945 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
946 					    sec4_sg_bytes, DMA_TO_DEVICE);
947 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
948 		dev_err(jrdev, "unable to map S/G table\n");
949 		return -ENOMEM;
950 	}
951 
952 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
953 			  LDST_SGF);
954 
955 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
956 						digestsize);
957 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
958 		dev_err(jrdev, "unable to map dst\n");
959 		return -ENOMEM;
960 	}
961 
962 #ifdef DEBUG
963 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
964 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
965 #endif
966 
967 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
968 	if (!ret) {
969 		ret = -EINPROGRESS;
970 	} else {
971 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
972 		kfree(edesc);
973 	}
974 
975 	return ret;
976 }
977 
978 static int ahash_finup_ctx(struct ahash_request *req)
979 {
980 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
981 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
982 	struct caam_hash_state *state = ahash_request_ctx(req);
983 	struct device *jrdev = ctx->jrdev;
984 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
985 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
986 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
987 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
988 	int last_buflen = state->current_buf ? state->buflen_0 :
989 			  state->buflen_1;
990 	u32 *sh_desc = ctx->sh_desc_finup, *desc;
991 	dma_addr_t ptr = ctx->sh_desc_finup_dma;
992 	int sec4_sg_bytes, sec4_sg_src_index;
993 	int src_nents;
994 	int digestsize = crypto_ahash_digestsize(ahash);
995 	struct ahash_edesc *edesc;
996 	bool chained = false;
997 	int ret = 0;
998 	int sh_len;
999 
1000 	src_nents = __sg_count(req->src, req->nbytes, &chained);
1001 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1002 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1003 			 sizeof(struct sec4_sg_entry);
1004 
1005 	/* allocate space for base edesc and hw desc commands, link tables */
1006 	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1007 			sec4_sg_bytes, GFP_DMA | flags);
1008 	if (!edesc) {
1009 		dev_err(jrdev, "could not allocate extended descriptor\n");
1010 		return -ENOMEM;
1011 	}
1012 
1013 	sh_len = desc_len(sh_desc);
1014 	desc = edesc->hw_desc;
1015 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1016 
1017 	edesc->src_nents = src_nents;
1018 	edesc->chained = chained;
1019 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1020 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1021 			 DESC_JOB_IO_LEN;
1022 
1023 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1024 				 edesc->sec4_sg, DMA_TO_DEVICE);
1025 	if (ret)
1026 		return ret;
1027 
1028 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1029 						buf, state->buf_dma, buflen,
1030 						last_buflen);
1031 
1032 	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1033 			   sec4_sg_src_index, chained);
1034 
1035 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1036 					    sec4_sg_bytes, DMA_TO_DEVICE);
1037 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1038 		dev_err(jrdev, "unable to map S/G table\n");
1039 		return -ENOMEM;
1040 	}
1041 
1042 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1043 			       buflen + req->nbytes, LDST_SGF);
1044 
1045 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1046 						digestsize);
1047 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1048 		dev_err(jrdev, "unable to map dst\n");
1049 		return -ENOMEM;
1050 	}
1051 
1052 #ifdef DEBUG
1053 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1054 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1055 #endif
1056 
1057 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1058 	if (!ret) {
1059 		ret = -EINPROGRESS;
1060 	} else {
1061 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1062 		kfree(edesc);
1063 	}
1064 
1065 	return ret;
1066 }
1067 
1068 static int ahash_digest(struct ahash_request *req)
1069 {
1070 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1071 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1072 	struct device *jrdev = ctx->jrdev;
1073 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1074 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1075 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1076 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1077 	int digestsize = crypto_ahash_digestsize(ahash);
1078 	int src_nents, sec4_sg_bytes;
1079 	dma_addr_t src_dma;
1080 	struct ahash_edesc *edesc;
1081 	bool chained = false;
1082 	int ret = 0;
1083 	u32 options;
1084 	int sh_len;
1085 
1086 	src_nents = sg_count(req->src, req->nbytes, &chained);
1087 	dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1088 			   chained);
1089 	sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1090 
1091 	/* allocate space for base edesc and hw desc commands, link tables */
1092 	edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1093 			DESC_JOB_IO_LEN, GFP_DMA | flags);
1094 	if (!edesc) {
1095 		dev_err(jrdev, "could not allocate extended descriptor\n");
1096 		return -ENOMEM;
1097 	}
1098 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1099 			  DESC_JOB_IO_LEN;
1100 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1101 	edesc->src_nents = src_nents;
1102 	edesc->chained = chained;
1103 
1104 	sh_len = desc_len(sh_desc);
1105 	desc = edesc->hw_desc;
1106 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1107 
1108 	if (src_nents) {
1109 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1110 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1111 					    sec4_sg_bytes, DMA_TO_DEVICE);
1112 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1113 			dev_err(jrdev, "unable to map S/G table\n");
1114 			return -ENOMEM;
1115 		}
1116 		src_dma = edesc->sec4_sg_dma;
1117 		options = LDST_SGF;
1118 	} else {
1119 		src_dma = sg_dma_address(req->src);
1120 		options = 0;
1121 	}
1122 	append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1123 
1124 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1125 						digestsize);
1126 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1127 		dev_err(jrdev, "unable to map dst\n");
1128 		return -ENOMEM;
1129 	}
1130 
1131 #ifdef DEBUG
1132 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1133 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1134 #endif
1135 
1136 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1137 	if (!ret) {
1138 		ret = -EINPROGRESS;
1139 	} else {
1140 		ahash_unmap(jrdev, edesc, req, digestsize);
1141 		kfree(edesc);
1142 	}
1143 
1144 	return ret;
1145 }
1146 
1147 /* submit ahash final if it the first job descriptor */
1148 static int ahash_final_no_ctx(struct ahash_request *req)
1149 {
1150 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1151 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1152 	struct caam_hash_state *state = ahash_request_ctx(req);
1153 	struct device *jrdev = ctx->jrdev;
1154 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1155 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1156 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1157 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1158 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1159 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1160 	int digestsize = crypto_ahash_digestsize(ahash);
1161 	struct ahash_edesc *edesc;
1162 	int ret = 0;
1163 	int sh_len;
1164 
1165 	/* allocate space for base edesc and hw desc commands, link tables */
1166 	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1167 			GFP_DMA | flags);
1168 	if (!edesc) {
1169 		dev_err(jrdev, "could not allocate extended descriptor\n");
1170 		return -ENOMEM;
1171 	}
1172 
1173 	sh_len = desc_len(sh_desc);
1174 	desc = edesc->hw_desc;
1175 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1176 
1177 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1178 	if (dma_mapping_error(jrdev, state->buf_dma)) {
1179 		dev_err(jrdev, "unable to map src\n");
1180 		return -ENOMEM;
1181 	}
1182 
1183 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1184 
1185 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1186 						digestsize);
1187 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1188 		dev_err(jrdev, "unable to map dst\n");
1189 		return -ENOMEM;
1190 	}
1191 	edesc->src_nents = 0;
1192 
1193 #ifdef DEBUG
1194 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1195 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1196 #endif
1197 
1198 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1199 	if (!ret) {
1200 		ret = -EINPROGRESS;
1201 	} else {
1202 		ahash_unmap(jrdev, edesc, req, digestsize);
1203 		kfree(edesc);
1204 	}
1205 
1206 	return ret;
1207 }
1208 
1209 /* submit ahash update if it the first job descriptor after update */
1210 static int ahash_update_no_ctx(struct ahash_request *req)
1211 {
1212 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1213 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1214 	struct caam_hash_state *state = ahash_request_ctx(req);
1215 	struct device *jrdev = ctx->jrdev;
1216 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1217 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1218 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1219 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1220 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1221 	int *next_buflen = state->current_buf ? &state->buflen_0 :
1222 			   &state->buflen_1;
1223 	int in_len = *buflen + req->nbytes, to_hash;
1224 	int sec4_sg_bytes, src_nents;
1225 	struct ahash_edesc *edesc;
1226 	u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1227 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1228 	bool chained = false;
1229 	int ret = 0;
1230 	int sh_len;
1231 
1232 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1233 	to_hash = in_len - *next_buflen;
1234 
1235 	if (to_hash) {
1236 		src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1237 				       &chained);
1238 		sec4_sg_bytes = (1 + src_nents) *
1239 				sizeof(struct sec4_sg_entry);
1240 
1241 		/*
1242 		 * allocate space for base edesc and hw desc commands,
1243 		 * link tables
1244 		 */
1245 		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1246 				sec4_sg_bytes, GFP_DMA | flags);
1247 		if (!edesc) {
1248 			dev_err(jrdev,
1249 				"could not allocate extended descriptor\n");
1250 			return -ENOMEM;
1251 		}
1252 
1253 		edesc->src_nents = src_nents;
1254 		edesc->chained = chained;
1255 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1256 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1257 				 DESC_JOB_IO_LEN;
1258 		edesc->dst_dma = 0;
1259 
1260 		state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1261 						    buf, *buflen);
1262 		src_map_to_sec4_sg(jrdev, req->src, src_nents,
1263 				   edesc->sec4_sg + 1, chained);
1264 		if (*next_buflen) {
1265 			sg_copy_part(next_buf, req->src, to_hash - *buflen,
1266 				    req->nbytes);
1267 			state->current_buf = !state->current_buf;
1268 		}
1269 
1270 		sh_len = desc_len(sh_desc);
1271 		desc = edesc->hw_desc;
1272 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1273 				     HDR_REVERSE);
1274 
1275 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1276 						    sec4_sg_bytes,
1277 						    DMA_TO_DEVICE);
1278 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1279 			dev_err(jrdev, "unable to map S/G table\n");
1280 			return -ENOMEM;
1281 		}
1282 
1283 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1284 
1285 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1286 		if (ret)
1287 			return ret;
1288 
1289 #ifdef DEBUG
1290 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1291 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1292 			       desc_bytes(desc), 1);
1293 #endif
1294 
1295 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1296 		if (!ret) {
1297 			ret = -EINPROGRESS;
1298 			state->update = ahash_update_ctx;
1299 			state->finup = ahash_finup_ctx;
1300 			state->final = ahash_final_ctx;
1301 		} else {
1302 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1303 					DMA_TO_DEVICE);
1304 			kfree(edesc);
1305 		}
1306 	} else if (*next_buflen) {
1307 		sg_copy(buf + *buflen, req->src, req->nbytes);
1308 		*buflen = *next_buflen;
1309 		*next_buflen = 0;
1310 	}
1311 #ifdef DEBUG
1312 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1313 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1314 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1315 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1316 		       *next_buflen, 1);
1317 #endif
1318 
1319 	return ret;
1320 }
1321 
1322 /* submit ahash finup if it the first job descriptor after update */
1323 static int ahash_finup_no_ctx(struct ahash_request *req)
1324 {
1325 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1326 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1327 	struct caam_hash_state *state = ahash_request_ctx(req);
1328 	struct device *jrdev = ctx->jrdev;
1329 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1330 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1331 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1332 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1333 	int last_buflen = state->current_buf ? state->buflen_0 :
1334 			  state->buflen_1;
1335 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1336 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1337 	int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1338 	int digestsize = crypto_ahash_digestsize(ahash);
1339 	struct ahash_edesc *edesc;
1340 	bool chained = false;
1341 	int sh_len;
1342 	int ret = 0;
1343 
1344 	src_nents = __sg_count(req->src, req->nbytes, &chained);
1345 	sec4_sg_src_index = 2;
1346 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1347 			 sizeof(struct sec4_sg_entry);
1348 
1349 	/* allocate space for base edesc and hw desc commands, link tables */
1350 	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1351 			sec4_sg_bytes, GFP_DMA | flags);
1352 	if (!edesc) {
1353 		dev_err(jrdev, "could not allocate extended descriptor\n");
1354 		return -ENOMEM;
1355 	}
1356 
1357 	sh_len = desc_len(sh_desc);
1358 	desc = edesc->hw_desc;
1359 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1360 
1361 	edesc->src_nents = src_nents;
1362 	edesc->chained = chained;
1363 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1364 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1365 			 DESC_JOB_IO_LEN;
1366 
1367 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1368 						state->buf_dma, buflen,
1369 						last_buflen);
1370 
1371 	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1372 			   chained);
1373 
1374 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1375 					    sec4_sg_bytes, DMA_TO_DEVICE);
1376 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1377 		dev_err(jrdev, "unable to map S/G table\n");
1378 		return -ENOMEM;
1379 	}
1380 
1381 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1382 			       req->nbytes, LDST_SGF);
1383 
1384 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1385 						digestsize);
1386 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1387 		dev_err(jrdev, "unable to map dst\n");
1388 		return -ENOMEM;
1389 	}
1390 
1391 #ifdef DEBUG
1392 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1393 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1394 #endif
1395 
1396 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1397 	if (!ret) {
1398 		ret = -EINPROGRESS;
1399 	} else {
1400 		ahash_unmap(jrdev, edesc, req, digestsize);
1401 		kfree(edesc);
1402 	}
1403 
1404 	return ret;
1405 }
1406 
1407 /* submit first update job descriptor after init */
1408 static int ahash_update_first(struct ahash_request *req)
1409 {
1410 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1411 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1412 	struct caam_hash_state *state = ahash_request_ctx(req);
1413 	struct device *jrdev = ctx->jrdev;
1414 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1415 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1416 	u8 *next_buf = state->buf_0 + state->current_buf *
1417 		       CAAM_MAX_HASH_BLOCK_SIZE;
1418 	int *next_buflen = &state->buflen_0 + state->current_buf;
1419 	int to_hash;
1420 	u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1421 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1422 	int sec4_sg_bytes, src_nents;
1423 	dma_addr_t src_dma;
1424 	u32 options;
1425 	struct ahash_edesc *edesc;
1426 	bool chained = false;
1427 	int ret = 0;
1428 	int sh_len;
1429 
1430 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1431 				      1);
1432 	to_hash = req->nbytes - *next_buflen;
1433 
1434 	if (to_hash) {
1435 		src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1436 				     &chained);
1437 		dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1438 				   DMA_TO_DEVICE, chained);
1439 		sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1440 
1441 		/*
1442 		 * allocate space for base edesc and hw desc commands,
1443 		 * link tables
1444 		 */
1445 		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1446 				sec4_sg_bytes, GFP_DMA | flags);
1447 		if (!edesc) {
1448 			dev_err(jrdev,
1449 				"could not allocate extended descriptor\n");
1450 			return -ENOMEM;
1451 		}
1452 
1453 		edesc->src_nents = src_nents;
1454 		edesc->chained = chained;
1455 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1456 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1457 				 DESC_JOB_IO_LEN;
1458 		edesc->dst_dma = 0;
1459 
1460 		if (src_nents) {
1461 			sg_to_sec4_sg_last(req->src, src_nents,
1462 					   edesc->sec4_sg, 0);
1463 			edesc->sec4_sg_dma = dma_map_single(jrdev,
1464 							    edesc->sec4_sg,
1465 							    sec4_sg_bytes,
1466 							    DMA_TO_DEVICE);
1467 			if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1468 				dev_err(jrdev, "unable to map S/G table\n");
1469 				return -ENOMEM;
1470 			}
1471 			src_dma = edesc->sec4_sg_dma;
1472 			options = LDST_SGF;
1473 		} else {
1474 			src_dma = sg_dma_address(req->src);
1475 			options = 0;
1476 		}
1477 
1478 		if (*next_buflen)
1479 			sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1480 
1481 		sh_len = desc_len(sh_desc);
1482 		desc = edesc->hw_desc;
1483 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1484 				     HDR_REVERSE);
1485 
1486 		append_seq_in_ptr(desc, src_dma, to_hash, options);
1487 
1488 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1489 		if (ret)
1490 			return ret;
1491 
1492 #ifdef DEBUG
1493 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1494 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1495 			       desc_bytes(desc), 1);
1496 #endif
1497 
1498 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1499 				      req);
1500 		if (!ret) {
1501 			ret = -EINPROGRESS;
1502 			state->update = ahash_update_ctx;
1503 			state->finup = ahash_finup_ctx;
1504 			state->final = ahash_final_ctx;
1505 		} else {
1506 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1507 					DMA_TO_DEVICE);
1508 			kfree(edesc);
1509 		}
1510 	} else if (*next_buflen) {
1511 		state->update = ahash_update_no_ctx;
1512 		state->finup = ahash_finup_no_ctx;
1513 		state->final = ahash_final_no_ctx;
1514 		sg_copy(next_buf, req->src, req->nbytes);
1515 	}
1516 #ifdef DEBUG
1517 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1518 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1519 		       *next_buflen, 1);
1520 #endif
1521 
1522 	return ret;
1523 }
1524 
1525 static int ahash_finup_first(struct ahash_request *req)
1526 {
1527 	return ahash_digest(req);
1528 }
1529 
1530 static int ahash_init(struct ahash_request *req)
1531 {
1532 	struct caam_hash_state *state = ahash_request_ctx(req);
1533 
1534 	state->update = ahash_update_first;
1535 	state->finup = ahash_finup_first;
1536 	state->final = ahash_final_no_ctx;
1537 
1538 	state->current_buf = 0;
1539 	state->buf_dma = 0;
1540 
1541 	return 0;
1542 }
1543 
1544 static int ahash_update(struct ahash_request *req)
1545 {
1546 	struct caam_hash_state *state = ahash_request_ctx(req);
1547 
1548 	return state->update(req);
1549 }
1550 
1551 static int ahash_finup(struct ahash_request *req)
1552 {
1553 	struct caam_hash_state *state = ahash_request_ctx(req);
1554 
1555 	return state->finup(req);
1556 }
1557 
1558 static int ahash_final(struct ahash_request *req)
1559 {
1560 	struct caam_hash_state *state = ahash_request_ctx(req);
1561 
1562 	return state->final(req);
1563 }
1564 
1565 static int ahash_export(struct ahash_request *req, void *out)
1566 {
1567 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1568 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1569 	struct caam_hash_state *state = ahash_request_ctx(req);
1570 
1571 	memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1572 	memcpy(out + sizeof(struct caam_hash_ctx), state,
1573 	       sizeof(struct caam_hash_state));
1574 	return 0;
1575 }
1576 
1577 static int ahash_import(struct ahash_request *req, const void *in)
1578 {
1579 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1580 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1581 	struct caam_hash_state *state = ahash_request_ctx(req);
1582 
1583 	memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1584 	memcpy(state, in + sizeof(struct caam_hash_ctx),
1585 	       sizeof(struct caam_hash_state));
1586 	return 0;
1587 }
1588 
1589 struct caam_hash_template {
1590 	char name[CRYPTO_MAX_ALG_NAME];
1591 	char driver_name[CRYPTO_MAX_ALG_NAME];
1592 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1593 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1594 	unsigned int blocksize;
1595 	struct ahash_alg template_ahash;
1596 	u32 alg_type;
1597 	u32 alg_op;
1598 };
1599 
1600 /* ahash descriptors */
1601 static struct caam_hash_template driver_hash[] = {
1602 	{
1603 		.name = "sha1",
1604 		.driver_name = "sha1-caam",
1605 		.hmac_name = "hmac(sha1)",
1606 		.hmac_driver_name = "hmac-sha1-caam",
1607 		.blocksize = SHA1_BLOCK_SIZE,
1608 		.template_ahash = {
1609 			.init = ahash_init,
1610 			.update = ahash_update,
1611 			.final = ahash_final,
1612 			.finup = ahash_finup,
1613 			.digest = ahash_digest,
1614 			.export = ahash_export,
1615 			.import = ahash_import,
1616 			.setkey = ahash_setkey,
1617 			.halg = {
1618 				.digestsize = SHA1_DIGEST_SIZE,
1619 				},
1620 			},
1621 		.alg_type = OP_ALG_ALGSEL_SHA1,
1622 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1623 	}, {
1624 		.name = "sha224",
1625 		.driver_name = "sha224-caam",
1626 		.hmac_name = "hmac(sha224)",
1627 		.hmac_driver_name = "hmac-sha224-caam",
1628 		.blocksize = SHA224_BLOCK_SIZE,
1629 		.template_ahash = {
1630 			.init = ahash_init,
1631 			.update = ahash_update,
1632 			.final = ahash_final,
1633 			.finup = ahash_finup,
1634 			.digest = ahash_digest,
1635 			.export = ahash_export,
1636 			.import = ahash_import,
1637 			.setkey = ahash_setkey,
1638 			.halg = {
1639 				.digestsize = SHA224_DIGEST_SIZE,
1640 				},
1641 			},
1642 		.alg_type = OP_ALG_ALGSEL_SHA224,
1643 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1644 	}, {
1645 		.name = "sha256",
1646 		.driver_name = "sha256-caam",
1647 		.hmac_name = "hmac(sha256)",
1648 		.hmac_driver_name = "hmac-sha256-caam",
1649 		.blocksize = SHA256_BLOCK_SIZE,
1650 		.template_ahash = {
1651 			.init = ahash_init,
1652 			.update = ahash_update,
1653 			.final = ahash_final,
1654 			.finup = ahash_finup,
1655 			.digest = ahash_digest,
1656 			.export = ahash_export,
1657 			.import = ahash_import,
1658 			.setkey = ahash_setkey,
1659 			.halg = {
1660 				.digestsize = SHA256_DIGEST_SIZE,
1661 				},
1662 			},
1663 		.alg_type = OP_ALG_ALGSEL_SHA256,
1664 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1665 	}, {
1666 		.name = "sha384",
1667 		.driver_name = "sha384-caam",
1668 		.hmac_name = "hmac(sha384)",
1669 		.hmac_driver_name = "hmac-sha384-caam",
1670 		.blocksize = SHA384_BLOCK_SIZE,
1671 		.template_ahash = {
1672 			.init = ahash_init,
1673 			.update = ahash_update,
1674 			.final = ahash_final,
1675 			.finup = ahash_finup,
1676 			.digest = ahash_digest,
1677 			.export = ahash_export,
1678 			.import = ahash_import,
1679 			.setkey = ahash_setkey,
1680 			.halg = {
1681 				.digestsize = SHA384_DIGEST_SIZE,
1682 				},
1683 			},
1684 		.alg_type = OP_ALG_ALGSEL_SHA384,
1685 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1686 	}, {
1687 		.name = "sha512",
1688 		.driver_name = "sha512-caam",
1689 		.hmac_name = "hmac(sha512)",
1690 		.hmac_driver_name = "hmac-sha512-caam",
1691 		.blocksize = SHA512_BLOCK_SIZE,
1692 		.template_ahash = {
1693 			.init = ahash_init,
1694 			.update = ahash_update,
1695 			.final = ahash_final,
1696 			.finup = ahash_finup,
1697 			.digest = ahash_digest,
1698 			.export = ahash_export,
1699 			.import = ahash_import,
1700 			.setkey = ahash_setkey,
1701 			.halg = {
1702 				.digestsize = SHA512_DIGEST_SIZE,
1703 				},
1704 			},
1705 		.alg_type = OP_ALG_ALGSEL_SHA512,
1706 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1707 	}, {
1708 		.name = "md5",
1709 		.driver_name = "md5-caam",
1710 		.hmac_name = "hmac(md5)",
1711 		.hmac_driver_name = "hmac-md5-caam",
1712 		.blocksize = MD5_BLOCK_WORDS * 4,
1713 		.template_ahash = {
1714 			.init = ahash_init,
1715 			.update = ahash_update,
1716 			.final = ahash_final,
1717 			.finup = ahash_finup,
1718 			.digest = ahash_digest,
1719 			.export = ahash_export,
1720 			.import = ahash_import,
1721 			.setkey = ahash_setkey,
1722 			.halg = {
1723 				.digestsize = MD5_DIGEST_SIZE,
1724 				},
1725 			},
1726 		.alg_type = OP_ALG_ALGSEL_MD5,
1727 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1728 	},
1729 };
1730 
1731 struct caam_hash_alg {
1732 	struct list_head entry;
1733 	int alg_type;
1734 	int alg_op;
1735 	struct ahash_alg ahash_alg;
1736 };
1737 
1738 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1739 {
1740 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1741 	struct crypto_alg *base = tfm->__crt_alg;
1742 	struct hash_alg_common *halg =
1743 		 container_of(base, struct hash_alg_common, base);
1744 	struct ahash_alg *alg =
1745 		 container_of(halg, struct ahash_alg, halg);
1746 	struct caam_hash_alg *caam_hash =
1747 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1748 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1749 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1750 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1751 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1752 					 HASH_MSG_LEN + 32,
1753 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1754 					 HASH_MSG_LEN + 64,
1755 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1756 	int ret = 0;
1757 
1758 	/*
1759 	 * Get a Job ring from Job Ring driver to ensure in-order
1760 	 * crypto request processing per tfm
1761 	 */
1762 	ctx->jrdev = caam_jr_alloc();
1763 	if (IS_ERR(ctx->jrdev)) {
1764 		pr_err("Job Ring Device allocation for transform failed\n");
1765 		return PTR_ERR(ctx->jrdev);
1766 	}
1767 	/* copy descriptor header template value */
1768 	ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1769 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1770 
1771 	ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1772 				  OP_ALG_ALGSEL_SHIFT];
1773 
1774 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1775 				 sizeof(struct caam_hash_state));
1776 
1777 	ret = ahash_set_sh_desc(ahash);
1778 
1779 	return ret;
1780 }
1781 
1782 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1783 {
1784 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1785 
1786 	if (ctx->sh_desc_update_dma &&
1787 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1788 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1789 				 desc_bytes(ctx->sh_desc_update),
1790 				 DMA_TO_DEVICE);
1791 	if (ctx->sh_desc_update_first_dma &&
1792 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1793 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1794 				 desc_bytes(ctx->sh_desc_update_first),
1795 				 DMA_TO_DEVICE);
1796 	if (ctx->sh_desc_fin_dma &&
1797 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1798 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1799 				 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1800 	if (ctx->sh_desc_digest_dma &&
1801 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1802 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1803 				 desc_bytes(ctx->sh_desc_digest),
1804 				 DMA_TO_DEVICE);
1805 	if (ctx->sh_desc_finup_dma &&
1806 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1807 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1808 				 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1809 
1810 	caam_jr_free(ctx->jrdev);
1811 }
1812 
1813 static void __exit caam_algapi_hash_exit(void)
1814 {
1815 	struct caam_hash_alg *t_alg, *n;
1816 
1817 	if (!hash_list.next)
1818 		return;
1819 
1820 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1821 		crypto_unregister_ahash(&t_alg->ahash_alg);
1822 		list_del(&t_alg->entry);
1823 		kfree(t_alg);
1824 	}
1825 }
1826 
1827 static struct caam_hash_alg *
1828 caam_hash_alloc(struct caam_hash_template *template,
1829 		bool keyed)
1830 {
1831 	struct caam_hash_alg *t_alg;
1832 	struct ahash_alg *halg;
1833 	struct crypto_alg *alg;
1834 
1835 	t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1836 	if (!t_alg) {
1837 		pr_err("failed to allocate t_alg\n");
1838 		return ERR_PTR(-ENOMEM);
1839 	}
1840 
1841 	t_alg->ahash_alg = template->template_ahash;
1842 	halg = &t_alg->ahash_alg;
1843 	alg = &halg->halg.base;
1844 
1845 	if (keyed) {
1846 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1847 			 template->hmac_name);
1848 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1849 			 template->hmac_driver_name);
1850 	} else {
1851 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1852 			 template->name);
1853 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1854 			 template->driver_name);
1855 	}
1856 	alg->cra_module = THIS_MODULE;
1857 	alg->cra_init = caam_hash_cra_init;
1858 	alg->cra_exit = caam_hash_cra_exit;
1859 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1860 	alg->cra_priority = CAAM_CRA_PRIORITY;
1861 	alg->cra_blocksize = template->blocksize;
1862 	alg->cra_alignmask = 0;
1863 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1864 	alg->cra_type = &crypto_ahash_type;
1865 
1866 	t_alg->alg_type = template->alg_type;
1867 	t_alg->alg_op = template->alg_op;
1868 
1869 	return t_alg;
1870 }
1871 
1872 static int __init caam_algapi_hash_init(void)
1873 {
1874 	struct device_node *dev_node;
1875 	struct platform_device *pdev;
1876 	struct device *ctrldev;
1877 	void *priv;
1878 	int i = 0, err = 0;
1879 
1880 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1881 	if (!dev_node) {
1882 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1883 		if (!dev_node)
1884 			return -ENODEV;
1885 	}
1886 
1887 	pdev = of_find_device_by_node(dev_node);
1888 	if (!pdev) {
1889 		of_node_put(dev_node);
1890 		return -ENODEV;
1891 	}
1892 
1893 	ctrldev = &pdev->dev;
1894 	priv = dev_get_drvdata(ctrldev);
1895 	of_node_put(dev_node);
1896 
1897 	/*
1898 	 * If priv is NULL, it's probably because the caam driver wasn't
1899 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1900 	 */
1901 	if (!priv)
1902 		return -ENODEV;
1903 
1904 	INIT_LIST_HEAD(&hash_list);
1905 
1906 	/* register crypto algorithms the device supports */
1907 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1908 		/* TODO: check if h/w supports alg */
1909 		struct caam_hash_alg *t_alg;
1910 
1911 		/* register hmac version */
1912 		t_alg = caam_hash_alloc(&driver_hash[i], true);
1913 		if (IS_ERR(t_alg)) {
1914 			err = PTR_ERR(t_alg);
1915 			pr_warn("%s alg allocation failed\n",
1916 				driver_hash[i].driver_name);
1917 			continue;
1918 		}
1919 
1920 		err = crypto_register_ahash(&t_alg->ahash_alg);
1921 		if (err) {
1922 			pr_warn("%s alg registration failed\n",
1923 				t_alg->ahash_alg.halg.base.cra_driver_name);
1924 			kfree(t_alg);
1925 		} else
1926 			list_add_tail(&t_alg->entry, &hash_list);
1927 
1928 		/* register unkeyed version */
1929 		t_alg = caam_hash_alloc(&driver_hash[i], false);
1930 		if (IS_ERR(t_alg)) {
1931 			err = PTR_ERR(t_alg);
1932 			pr_warn("%s alg allocation failed\n",
1933 				driver_hash[i].driver_name);
1934 			continue;
1935 		}
1936 
1937 		err = crypto_register_ahash(&t_alg->ahash_alg);
1938 		if (err) {
1939 			pr_warn("%s alg registration failed\n",
1940 				t_alg->ahash_alg.halg.base.cra_driver_name);
1941 			kfree(t_alg);
1942 		} else
1943 			list_add_tail(&t_alg->entry, &hash_list);
1944 	}
1945 
1946 	return err;
1947 }
1948 
1949 module_init(caam_algapi_hash_init);
1950 module_exit(caam_algapi_hash_exit);
1951 
1952 MODULE_LICENSE("GPL");
1953 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1954 MODULE_AUTHOR("Freescale Semiconductor - NMG");
1955