xref: /openbmc/linux/drivers/crypto/caam/caamhash.c (revision 95e9fd10)
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55 
56 #include "compat.h"
57 
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65 
66 #define CAAM_CRA_PRIORITY		3000
67 
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
70 
71 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
73 
74 /* length of descriptors text */
75 #define DESC_JOB_IO_LEN			(CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
76 
77 #define DESC_AHASH_BASE			(4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
79 #define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
80 #define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
81 #define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
82 #define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
83 
84 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
85 					 CAAM_MAX_HASH_KEY_SIZE)
86 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
87 
88 /* caam context sizes for hashes: running digest + 8 */
89 #define HASH_MSG_LEN			8
90 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 
92 #ifdef DEBUG
93 /* for print_hex_dumps with line references */
94 #define xstr(s) str(s)
95 #define str(s) #s
96 #define debug(format, arg...) printk(format, arg)
97 #else
98 #define debug(format, arg...)
99 #endif
100 
101 /* ahash per-session context */
102 struct caam_hash_ctx {
103 	struct device *jrdev;
104 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
105 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
106 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
107 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
108 	u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
109 	dma_addr_t sh_desc_update_dma;
110 	dma_addr_t sh_desc_update_first_dma;
111 	dma_addr_t sh_desc_fin_dma;
112 	dma_addr_t sh_desc_digest_dma;
113 	dma_addr_t sh_desc_finup_dma;
114 	u32 alg_type;
115 	u32 alg_op;
116 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
117 	dma_addr_t key_dma;
118 	int ctx_len;
119 	unsigned int split_key_len;
120 	unsigned int split_key_pad_len;
121 };
122 
123 /* ahash state */
124 struct caam_hash_state {
125 	dma_addr_t buf_dma;
126 	dma_addr_t ctx_dma;
127 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
128 	int buflen_0;
129 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
130 	int buflen_1;
131 	u8 caam_ctx[MAX_CTX_LEN];
132 	int (*update)(struct ahash_request *req);
133 	int (*final)(struct ahash_request *req);
134 	int (*finup)(struct ahash_request *req);
135 	int current_buf;
136 };
137 
138 /* Common job descriptor seq in/out ptr routines */
139 
140 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
141 static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
142 				       struct caam_hash_state *state,
143 				       int ctx_len)
144 {
145 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
146 					ctx_len, DMA_FROM_DEVICE);
147 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
148 }
149 
150 /* Map req->result, and append seq_out_ptr command that points to it */
151 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
152 						u8 *result, int digestsize)
153 {
154 	dma_addr_t dst_dma;
155 
156 	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
157 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
158 
159 	return dst_dma;
160 }
161 
162 /* Map current buffer in state and put it in link table */
163 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
164 					    struct sec4_sg_entry *sec4_sg,
165 					    u8 *buf, int buflen)
166 {
167 	dma_addr_t buf_dma;
168 
169 	buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
170 	dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
171 
172 	return buf_dma;
173 }
174 
175 /* Map req->src and put it in link table */
176 static inline void src_map_to_sec4_sg(struct device *jrdev,
177 				      struct scatterlist *src, int src_nents,
178 				      struct sec4_sg_entry *sec4_sg,
179 				      bool chained)
180 {
181 	dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
182 	sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
183 }
184 
185 /*
186  * Only put buffer in link table if it contains data, which is possible,
187  * since a buffer has previously been used, and needs to be unmapped,
188  */
189 static inline dma_addr_t
190 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
191 		       u8 *buf, dma_addr_t buf_dma, int buflen,
192 		       int last_buflen)
193 {
194 	if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
195 		dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
196 	if (buflen)
197 		buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
198 	else
199 		buf_dma = 0;
200 
201 	return buf_dma;
202 }
203 
204 /* Map state->caam_ctx, and add it to link table */
205 static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
206 				      struct caam_hash_state *state,
207 				      int ctx_len,
208 				      struct sec4_sg_entry *sec4_sg,
209 				      u32 flag)
210 {
211 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
212 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
213 }
214 
215 /* Common shared descriptor commands */
216 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
217 {
218 	append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
219 			  ctx->split_key_len, CLASS_2 |
220 			  KEY_DEST_MDHA_SPLIT | KEY_ENC);
221 }
222 
223 /* Append key if it has been set */
224 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
225 {
226 	u32 *key_jump_cmd;
227 
228 	init_sh_desc(desc, HDR_SHARE_WAIT);
229 
230 	if (ctx->split_key_len) {
231 		/* Skip if already shared */
232 		key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
233 					   JUMP_COND_SHRD);
234 
235 		append_key_ahash(desc, ctx);
236 
237 		set_jump_tgt_here(desc, key_jump_cmd);
238 	}
239 
240 	/* Propagate errors from shared to job descriptor */
241 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
242 }
243 
244 /*
245  * For ahash read data from seqin following state->caam_ctx,
246  * and write resulting class2 context to seqout, which may be state->caam_ctx
247  * or req->result
248  */
249 static inline void ahash_append_load_str(u32 *desc, int digestsize)
250 {
251 	/* Calculate remaining bytes to read */
252 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
253 
254 	/* Read remaining bytes */
255 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
256 			     FIFOLD_TYPE_MSG | KEY_VLF);
257 
258 	/* Store class2 context bytes */
259 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
260 			 LDST_SRCDST_BYTE_CONTEXT);
261 }
262 
263 /*
264  * For ahash update, final and finup, import context, read and write to seqout
265  */
266 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
267 					 int digestsize,
268 					 struct caam_hash_ctx *ctx)
269 {
270 	init_sh_desc_key_ahash(desc, ctx);
271 
272 	/* Import context from software */
273 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
274 		   LDST_CLASS_2_CCB | ctx->ctx_len);
275 
276 	/* Class 2 operation */
277 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
278 
279 	/*
280 	 * Load from buf and/or src and write to req->result or state->context
281 	 */
282 	ahash_append_load_str(desc, digestsize);
283 }
284 
285 /* For ahash firsts and digest, read and write to seqout */
286 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
287 				     int digestsize, struct caam_hash_ctx *ctx)
288 {
289 	init_sh_desc_key_ahash(desc, ctx);
290 
291 	/* Class 2 operation */
292 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
293 
294 	/*
295 	 * Load from buf and/or src and write to req->result or state->context
296 	 */
297 	ahash_append_load_str(desc, digestsize);
298 }
299 
300 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
301 {
302 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
303 	int digestsize = crypto_ahash_digestsize(ahash);
304 	struct device *jrdev = ctx->jrdev;
305 	u32 have_key = 0;
306 	u32 *desc;
307 
308 	if (ctx->split_key_len)
309 		have_key = OP_ALG_AAI_HMAC_PRECOMP;
310 
311 	/* ahash_update shared descriptor */
312 	desc = ctx->sh_desc_update;
313 
314 	init_sh_desc(desc, HDR_SHARE_WAIT);
315 
316 	/* Import context from software */
317 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
318 		   LDST_CLASS_2_CCB | ctx->ctx_len);
319 
320 	/* Class 2 operation */
321 	append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
322 			 OP_ALG_ENCRYPT);
323 
324 	/* Load data and write to result or context */
325 	ahash_append_load_str(desc, ctx->ctx_len);
326 
327 	ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
328 						 DMA_TO_DEVICE);
329 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
330 		dev_err(jrdev, "unable to map shared descriptor\n");
331 		return -ENOMEM;
332 	}
333 #ifdef DEBUG
334 	print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
335 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
336 #endif
337 
338 	/* ahash_update_first shared descriptor */
339 	desc = ctx->sh_desc_update_first;
340 
341 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
342 			  ctx->ctx_len, ctx);
343 
344 	ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
345 						       desc_bytes(desc),
346 						       DMA_TO_DEVICE);
347 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
348 		dev_err(jrdev, "unable to map shared descriptor\n");
349 		return -ENOMEM;
350 	}
351 #ifdef DEBUG
352 	print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
353 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
354 #endif
355 
356 	/* ahash_final shared descriptor */
357 	desc = ctx->sh_desc_fin;
358 
359 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
360 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
361 
362 	ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
363 					      DMA_TO_DEVICE);
364 	if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
365 		dev_err(jrdev, "unable to map shared descriptor\n");
366 		return -ENOMEM;
367 	}
368 #ifdef DEBUG
369 	print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
370 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
371 		       desc_bytes(desc), 1);
372 #endif
373 
374 	/* ahash_finup shared descriptor */
375 	desc = ctx->sh_desc_finup;
376 
377 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
378 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
379 
380 	ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
381 						DMA_TO_DEVICE);
382 	if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
383 		dev_err(jrdev, "unable to map shared descriptor\n");
384 		return -ENOMEM;
385 	}
386 #ifdef DEBUG
387 	print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
388 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
389 		       desc_bytes(desc), 1);
390 #endif
391 
392 	/* ahash_digest shared descriptor */
393 	desc = ctx->sh_desc_digest;
394 
395 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
396 			  digestsize, ctx);
397 
398 	ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
399 						 desc_bytes(desc),
400 						 DMA_TO_DEVICE);
401 	if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
402 		dev_err(jrdev, "unable to map shared descriptor\n");
403 		return -ENOMEM;
404 	}
405 #ifdef DEBUG
406 	print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
407 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
408 		       desc_bytes(desc), 1);
409 #endif
410 
411 	return 0;
412 }
413 
414 static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
415 			      u32 keylen)
416 {
417 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
418 			       ctx->split_key_pad_len, key_in, keylen,
419 			       ctx->alg_op);
420 }
421 
422 /* Digest hash size if it is too large */
423 static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
424 			   u32 *keylen, u8 *key_out, u32 digestsize)
425 {
426 	struct device *jrdev = ctx->jrdev;
427 	u32 *desc;
428 	struct split_key_result result;
429 	dma_addr_t src_dma, dst_dma;
430 	int ret = 0;
431 
432 	desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
433 
434 	init_job_desc(desc, 0);
435 
436 	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
437 				 DMA_TO_DEVICE);
438 	if (dma_mapping_error(jrdev, src_dma)) {
439 		dev_err(jrdev, "unable to map key input memory\n");
440 		kfree(desc);
441 		return -ENOMEM;
442 	}
443 	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
444 				 DMA_FROM_DEVICE);
445 	if (dma_mapping_error(jrdev, dst_dma)) {
446 		dev_err(jrdev, "unable to map key output memory\n");
447 		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
448 		kfree(desc);
449 		return -ENOMEM;
450 	}
451 
452 	/* Job descriptor to perform unkeyed hash on key_in */
453 	append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
454 			 OP_ALG_AS_INITFINAL);
455 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
456 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
457 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
458 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
459 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
460 			 LDST_SRCDST_BYTE_CONTEXT);
461 
462 #ifdef DEBUG
463 	print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
464 		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
465 	print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
466 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
467 #endif
468 
469 	result.err = 0;
470 	init_completion(&result.completion);
471 
472 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
473 	if (!ret) {
474 		/* in progress */
475 		wait_for_completion_interruptible(&result.completion);
476 		ret = result.err;
477 #ifdef DEBUG
478 		print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
479 			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
480 			       digestsize, 1);
481 #endif
482 	}
483 	*keylen = digestsize;
484 
485 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
486 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
487 
488 	kfree(desc);
489 
490 	return ret;
491 }
492 
493 static int ahash_setkey(struct crypto_ahash *ahash,
494 			const u8 *key, unsigned int keylen)
495 {
496 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
497 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
498 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
499 	struct device *jrdev = ctx->jrdev;
500 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
501 	int digestsize = crypto_ahash_digestsize(ahash);
502 	int ret = 0;
503 	u8 *hashed_key = NULL;
504 
505 #ifdef DEBUG
506 	printk(KERN_ERR "keylen %d\n", keylen);
507 #endif
508 
509 	if (keylen > blocksize) {
510 		hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
511 				     GFP_DMA);
512 		if (!hashed_key)
513 			return -ENOMEM;
514 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
515 				      digestsize);
516 		if (ret)
517 			goto badkey;
518 		key = hashed_key;
519 	}
520 
521 	/* Pick class 2 key length from algorithm submask */
522 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
523 				      OP_ALG_ALGSEL_SHIFT] * 2;
524 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
525 
526 #ifdef DEBUG
527 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
528 	       ctx->split_key_len, ctx->split_key_pad_len);
529 	print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
530 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
531 #endif
532 
533 	ret = gen_split_hash_key(ctx, key, keylen);
534 	if (ret)
535 		goto badkey;
536 
537 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
538 				      DMA_TO_DEVICE);
539 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
540 		dev_err(jrdev, "unable to map key i/o memory\n");
541 		return -ENOMEM;
542 	}
543 #ifdef DEBUG
544 	print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
545 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
546 		       ctx->split_key_pad_len, 1);
547 #endif
548 
549 	ret = ahash_set_sh_desc(ahash);
550 	if (ret) {
551 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
552 				 DMA_TO_DEVICE);
553 	}
554 
555 	kfree(hashed_key);
556 	return ret;
557 badkey:
558 	kfree(hashed_key);
559 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
560 	return -EINVAL;
561 }
562 
563 /*
564  * ahash_edesc - s/w-extended ahash descriptor
565  * @dst_dma: physical mapped address of req->result
566  * @sec4_sg_dma: physical mapped address of h/w link table
567  * @chained: if source is chained
568  * @src_nents: number of segments in input scatterlist
569  * @sec4_sg_bytes: length of dma mapped sec4_sg space
570  * @sec4_sg: pointer to h/w link table
571  * @hw_desc: the h/w job descriptor followed by any referenced link tables
572  */
573 struct ahash_edesc {
574 	dma_addr_t dst_dma;
575 	dma_addr_t sec4_sg_dma;
576 	bool chained;
577 	int src_nents;
578 	int sec4_sg_bytes;
579 	struct sec4_sg_entry *sec4_sg;
580 	u32 hw_desc[0];
581 };
582 
583 static inline void ahash_unmap(struct device *dev,
584 			struct ahash_edesc *edesc,
585 			struct ahash_request *req, int dst_len)
586 {
587 	if (edesc->src_nents)
588 		dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
589 				     DMA_TO_DEVICE, edesc->chained);
590 	if (edesc->dst_dma)
591 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
592 
593 	if (edesc->sec4_sg_bytes)
594 		dma_unmap_single(dev, edesc->sec4_sg_dma,
595 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
596 }
597 
598 static inline void ahash_unmap_ctx(struct device *dev,
599 			struct ahash_edesc *edesc,
600 			struct ahash_request *req, int dst_len, u32 flag)
601 {
602 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
603 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
604 	struct caam_hash_state *state = ahash_request_ctx(req);
605 
606 	if (state->ctx_dma)
607 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
608 	ahash_unmap(dev, edesc, req, dst_len);
609 }
610 
611 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
612 		       void *context)
613 {
614 	struct ahash_request *req = context;
615 	struct ahash_edesc *edesc;
616 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
617 	int digestsize = crypto_ahash_digestsize(ahash);
618 #ifdef DEBUG
619 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
620 	struct caam_hash_state *state = ahash_request_ctx(req);
621 
622 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
623 #endif
624 
625 	edesc = (struct ahash_edesc *)((char *)desc -
626 		 offsetof(struct ahash_edesc, hw_desc));
627 	if (err) {
628 		char tmp[CAAM_ERROR_STR_MAX];
629 
630 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
631 	}
632 
633 	ahash_unmap(jrdev, edesc, req, digestsize);
634 	kfree(edesc);
635 
636 #ifdef DEBUG
637 	print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
638 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
639 		       ctx->ctx_len, 1);
640 	if (req->result)
641 		print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
642 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
643 			       digestsize, 1);
644 #endif
645 
646 	req->base.complete(&req->base, err);
647 }
648 
649 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
650 			    void *context)
651 {
652 	struct ahash_request *req = context;
653 	struct ahash_edesc *edesc;
654 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
655 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
656 #ifdef DEBUG
657 	struct caam_hash_state *state = ahash_request_ctx(req);
658 	int digestsize = crypto_ahash_digestsize(ahash);
659 
660 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
661 #endif
662 
663 	edesc = (struct ahash_edesc *)((char *)desc -
664 		 offsetof(struct ahash_edesc, hw_desc));
665 	if (err) {
666 		char tmp[CAAM_ERROR_STR_MAX];
667 
668 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
669 	}
670 
671 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
672 	kfree(edesc);
673 
674 #ifdef DEBUG
675 	print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
676 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
677 		       ctx->ctx_len, 1);
678 	if (req->result)
679 		print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
680 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
681 			       digestsize, 1);
682 #endif
683 
684 	req->base.complete(&req->base, err);
685 }
686 
687 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
688 			       void *context)
689 {
690 	struct ahash_request *req = context;
691 	struct ahash_edesc *edesc;
692 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
693 	int digestsize = crypto_ahash_digestsize(ahash);
694 #ifdef DEBUG
695 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
696 	struct caam_hash_state *state = ahash_request_ctx(req);
697 
698 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
699 #endif
700 
701 	edesc = (struct ahash_edesc *)((char *)desc -
702 		 offsetof(struct ahash_edesc, hw_desc));
703 	if (err) {
704 		char tmp[CAAM_ERROR_STR_MAX];
705 
706 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
707 	}
708 
709 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
710 	kfree(edesc);
711 
712 #ifdef DEBUG
713 	print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
714 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
715 		       ctx->ctx_len, 1);
716 	if (req->result)
717 		print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
718 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
719 			       digestsize, 1);
720 #endif
721 
722 	req->base.complete(&req->base, err);
723 }
724 
725 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
726 			       void *context)
727 {
728 	struct ahash_request *req = context;
729 	struct ahash_edesc *edesc;
730 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
731 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
732 #ifdef DEBUG
733 	struct caam_hash_state *state = ahash_request_ctx(req);
734 	int digestsize = crypto_ahash_digestsize(ahash);
735 
736 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
737 #endif
738 
739 	edesc = (struct ahash_edesc *)((char *)desc -
740 		 offsetof(struct ahash_edesc, hw_desc));
741 	if (err) {
742 		char tmp[CAAM_ERROR_STR_MAX];
743 
744 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
745 	}
746 
747 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
748 	kfree(edesc);
749 
750 #ifdef DEBUG
751 	print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
752 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
753 		       ctx->ctx_len, 1);
754 	if (req->result)
755 		print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
756 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
757 			       digestsize, 1);
758 #endif
759 
760 	req->base.complete(&req->base, err);
761 }
762 
763 /* submit update job descriptor */
764 static int ahash_update_ctx(struct ahash_request *req)
765 {
766 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
767 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
768 	struct caam_hash_state *state = ahash_request_ctx(req);
769 	struct device *jrdev = ctx->jrdev;
770 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
771 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
772 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
773 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
774 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
775 	int *next_buflen = state->current_buf ? &state->buflen_0 :
776 			   &state->buflen_1, last_buflen;
777 	int in_len = *buflen + req->nbytes, to_hash;
778 	u32 *sh_desc = ctx->sh_desc_update, *desc;
779 	dma_addr_t ptr = ctx->sh_desc_update_dma;
780 	int src_nents, sec4_sg_bytes, sec4_sg_src_index;
781 	struct ahash_edesc *edesc;
782 	bool chained = false;
783 	int ret = 0;
784 	int sh_len;
785 
786 	last_buflen = *next_buflen;
787 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
788 	to_hash = in_len - *next_buflen;
789 
790 	if (to_hash) {
791 		src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
792 				       &chained);
793 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
794 		sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
795 				 sizeof(struct sec4_sg_entry);
796 
797 		/*
798 		 * allocate space for base edesc and hw desc commands,
799 		 * link tables
800 		 */
801 		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
802 				sec4_sg_bytes, GFP_DMA | flags);
803 		if (!edesc) {
804 			dev_err(jrdev,
805 				"could not allocate extended descriptor\n");
806 			return -ENOMEM;
807 		}
808 
809 		edesc->src_nents = src_nents;
810 		edesc->chained = chained;
811 		edesc->sec4_sg_bytes = sec4_sg_bytes;
812 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
813 				 DESC_JOB_IO_LEN;
814 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
815 						     sec4_sg_bytes,
816 						     DMA_TO_DEVICE);
817 
818 		ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
819 				   edesc->sec4_sg, DMA_BIDIRECTIONAL);
820 
821 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
822 							edesc->sec4_sg + 1,
823 							buf, state->buf_dma,
824 							*buflen, last_buflen);
825 
826 		if (src_nents) {
827 			src_map_to_sec4_sg(jrdev, req->src, src_nents,
828 					   edesc->sec4_sg + sec4_sg_src_index,
829 					   chained);
830 			if (*next_buflen) {
831 				sg_copy_part(next_buf, req->src, to_hash -
832 					     *buflen, req->nbytes);
833 				state->current_buf = !state->current_buf;
834 			}
835 		} else {
836 			(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
837 							SEC4_SG_LEN_FIN;
838 		}
839 
840 		sh_len = desc_len(sh_desc);
841 		desc = edesc->hw_desc;
842 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
843 				     HDR_REVERSE);
844 
845 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
846 				       to_hash, LDST_SGF);
847 
848 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
849 
850 #ifdef DEBUG
851 		print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
852 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
853 			       desc_bytes(desc), 1);
854 #endif
855 
856 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
857 		if (!ret) {
858 			ret = -EINPROGRESS;
859 		} else {
860 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
861 					   DMA_BIDIRECTIONAL);
862 			kfree(edesc);
863 		}
864 	} else if (*next_buflen) {
865 		sg_copy(buf + *buflen, req->src, req->nbytes);
866 		*buflen = *next_buflen;
867 		*next_buflen = last_buflen;
868 	}
869 #ifdef DEBUG
870 	print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
871 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
872 	print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
873 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
874 		       *next_buflen, 1);
875 #endif
876 
877 	return ret;
878 }
879 
880 static int ahash_final_ctx(struct ahash_request *req)
881 {
882 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
883 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
884 	struct caam_hash_state *state = ahash_request_ctx(req);
885 	struct device *jrdev = ctx->jrdev;
886 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
887 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
888 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
889 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
890 	int last_buflen = state->current_buf ? state->buflen_0 :
891 			  state->buflen_1;
892 	u32 *sh_desc = ctx->sh_desc_fin, *desc;
893 	dma_addr_t ptr = ctx->sh_desc_fin_dma;
894 	int sec4_sg_bytes;
895 	int digestsize = crypto_ahash_digestsize(ahash);
896 	struct ahash_edesc *edesc;
897 	int ret = 0;
898 	int sh_len;
899 
900 	sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
901 
902 	/* allocate space for base edesc and hw desc commands, link tables */
903 	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
904 			sec4_sg_bytes, GFP_DMA | flags);
905 	if (!edesc) {
906 		dev_err(jrdev, "could not allocate extended descriptor\n");
907 		return -ENOMEM;
908 	}
909 
910 	sh_len = desc_len(sh_desc);
911 	desc = edesc->hw_desc;
912 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
913 
914 	edesc->sec4_sg_bytes = sec4_sg_bytes;
915 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
916 			 DESC_JOB_IO_LEN;
917 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
918 					    sec4_sg_bytes, DMA_TO_DEVICE);
919 	edesc->src_nents = 0;
920 
921 	ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
922 			   DMA_TO_DEVICE);
923 
924 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
925 						buf, state->buf_dma, buflen,
926 						last_buflen);
927 	(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
928 
929 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
930 			  LDST_SGF);
931 
932 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
933 						digestsize);
934 
935 #ifdef DEBUG
936 	print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
937 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
938 #endif
939 
940 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
941 	if (!ret) {
942 		ret = -EINPROGRESS;
943 	} else {
944 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
945 		kfree(edesc);
946 	}
947 
948 	return ret;
949 }
950 
951 static int ahash_finup_ctx(struct ahash_request *req)
952 {
953 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
954 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
955 	struct caam_hash_state *state = ahash_request_ctx(req);
956 	struct device *jrdev = ctx->jrdev;
957 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
958 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
959 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
960 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
961 	int last_buflen = state->current_buf ? state->buflen_0 :
962 			  state->buflen_1;
963 	u32 *sh_desc = ctx->sh_desc_finup, *desc;
964 	dma_addr_t ptr = ctx->sh_desc_finup_dma;
965 	int sec4_sg_bytes, sec4_sg_src_index;
966 	int src_nents;
967 	int digestsize = crypto_ahash_digestsize(ahash);
968 	struct ahash_edesc *edesc;
969 	bool chained = false;
970 	int ret = 0;
971 	int sh_len;
972 
973 	src_nents = __sg_count(req->src, req->nbytes, &chained);
974 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
975 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
976 			 sizeof(struct sec4_sg_entry);
977 
978 	/* allocate space for base edesc and hw desc commands, link tables */
979 	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
980 			sec4_sg_bytes, GFP_DMA | flags);
981 	if (!edesc) {
982 		dev_err(jrdev, "could not allocate extended descriptor\n");
983 		return -ENOMEM;
984 	}
985 
986 	sh_len = desc_len(sh_desc);
987 	desc = edesc->hw_desc;
988 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
989 
990 	edesc->src_nents = src_nents;
991 	edesc->chained = chained;
992 	edesc->sec4_sg_bytes = sec4_sg_bytes;
993 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
994 			 DESC_JOB_IO_LEN;
995 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
996 					    sec4_sg_bytes, DMA_TO_DEVICE);
997 
998 	ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
999 			   DMA_TO_DEVICE);
1000 
1001 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1002 						buf, state->buf_dma, buflen,
1003 						last_buflen);
1004 
1005 	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1006 			   sec4_sg_src_index, chained);
1007 
1008 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1009 			       buflen + req->nbytes, LDST_SGF);
1010 
1011 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1012 						digestsize);
1013 
1014 #ifdef DEBUG
1015 	print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1016 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1017 #endif
1018 
1019 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1020 	if (!ret) {
1021 		ret = -EINPROGRESS;
1022 	} else {
1023 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1024 		kfree(edesc);
1025 	}
1026 
1027 	return ret;
1028 }
1029 
1030 static int ahash_digest(struct ahash_request *req)
1031 {
1032 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1033 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1034 	struct device *jrdev = ctx->jrdev;
1035 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1036 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1037 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1038 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1039 	int digestsize = crypto_ahash_digestsize(ahash);
1040 	int src_nents, sec4_sg_bytes;
1041 	dma_addr_t src_dma;
1042 	struct ahash_edesc *edesc;
1043 	bool chained = false;
1044 	int ret = 0;
1045 	u32 options;
1046 	int sh_len;
1047 
1048 	src_nents = sg_count(req->src, req->nbytes, &chained);
1049 	dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1050 			   chained);
1051 	sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1052 
1053 	/* allocate space for base edesc and hw desc commands, link tables */
1054 	edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1055 			DESC_JOB_IO_LEN, GFP_DMA | flags);
1056 	if (!edesc) {
1057 		dev_err(jrdev, "could not allocate extended descriptor\n");
1058 		return -ENOMEM;
1059 	}
1060 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1061 			  DESC_JOB_IO_LEN;
1062 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1063 					    sec4_sg_bytes, DMA_TO_DEVICE);
1064 	edesc->src_nents = src_nents;
1065 	edesc->chained = chained;
1066 
1067 	sh_len = desc_len(sh_desc);
1068 	desc = edesc->hw_desc;
1069 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1070 
1071 	if (src_nents) {
1072 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1073 		src_dma = edesc->sec4_sg_dma;
1074 		options = LDST_SGF;
1075 	} else {
1076 		src_dma = sg_dma_address(req->src);
1077 		options = 0;
1078 	}
1079 	append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1080 
1081 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1082 						digestsize);
1083 
1084 #ifdef DEBUG
1085 	print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1086 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1087 #endif
1088 
1089 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1090 	if (!ret) {
1091 		ret = -EINPROGRESS;
1092 	} else {
1093 		ahash_unmap(jrdev, edesc, req, digestsize);
1094 		kfree(edesc);
1095 	}
1096 
1097 	return ret;
1098 }
1099 
1100 /* submit ahash final if it the first job descriptor */
1101 static int ahash_final_no_ctx(struct ahash_request *req)
1102 {
1103 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1104 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1105 	struct caam_hash_state *state = ahash_request_ctx(req);
1106 	struct device *jrdev = ctx->jrdev;
1107 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1108 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1109 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1110 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1111 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1112 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1113 	int digestsize = crypto_ahash_digestsize(ahash);
1114 	struct ahash_edesc *edesc;
1115 	int ret = 0;
1116 	int sh_len;
1117 
1118 	/* allocate space for base edesc and hw desc commands, link tables */
1119 	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1120 			GFP_DMA | flags);
1121 	if (!edesc) {
1122 		dev_err(jrdev, "could not allocate extended descriptor\n");
1123 		return -ENOMEM;
1124 	}
1125 
1126 	sh_len = desc_len(sh_desc);
1127 	desc = edesc->hw_desc;
1128 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1129 
1130 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1131 
1132 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1133 
1134 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1135 						digestsize);
1136 	edesc->src_nents = 0;
1137 
1138 #ifdef DEBUG
1139 	print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1140 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1141 #endif
1142 
1143 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1144 	if (!ret) {
1145 		ret = -EINPROGRESS;
1146 	} else {
1147 		ahash_unmap(jrdev, edesc, req, digestsize);
1148 		kfree(edesc);
1149 	}
1150 
1151 	return ret;
1152 }
1153 
1154 /* submit ahash update if it the first job descriptor after update */
1155 static int ahash_update_no_ctx(struct ahash_request *req)
1156 {
1157 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1158 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1159 	struct caam_hash_state *state = ahash_request_ctx(req);
1160 	struct device *jrdev = ctx->jrdev;
1161 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1162 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1163 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1164 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1165 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1166 	int *next_buflen = state->current_buf ? &state->buflen_0 :
1167 			   &state->buflen_1;
1168 	int in_len = *buflen + req->nbytes, to_hash;
1169 	int sec4_sg_bytes, src_nents;
1170 	struct ahash_edesc *edesc;
1171 	u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1172 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1173 	bool chained = false;
1174 	int ret = 0;
1175 	int sh_len;
1176 
1177 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1178 	to_hash = in_len - *next_buflen;
1179 
1180 	if (to_hash) {
1181 		src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1182 				       &chained);
1183 		sec4_sg_bytes = (1 + src_nents) *
1184 				sizeof(struct sec4_sg_entry);
1185 
1186 		/*
1187 		 * allocate space for base edesc and hw desc commands,
1188 		 * link tables
1189 		 */
1190 		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1191 				sec4_sg_bytes, GFP_DMA | flags);
1192 		if (!edesc) {
1193 			dev_err(jrdev,
1194 				"could not allocate extended descriptor\n");
1195 			return -ENOMEM;
1196 		}
1197 
1198 		edesc->src_nents = src_nents;
1199 		edesc->chained = chained;
1200 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1201 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1202 				 DESC_JOB_IO_LEN;
1203 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1204 						    sec4_sg_bytes,
1205 						    DMA_TO_DEVICE);
1206 
1207 		state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1208 						    buf, *buflen);
1209 		src_map_to_sec4_sg(jrdev, req->src, src_nents,
1210 				   edesc->sec4_sg + 1, chained);
1211 		if (*next_buflen) {
1212 			sg_copy_part(next_buf, req->src, to_hash - *buflen,
1213 				    req->nbytes);
1214 			state->current_buf = !state->current_buf;
1215 		}
1216 
1217 		sh_len = desc_len(sh_desc);
1218 		desc = edesc->hw_desc;
1219 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1220 				     HDR_REVERSE);
1221 
1222 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1223 
1224 		map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1225 
1226 #ifdef DEBUG
1227 		print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1228 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1229 			       desc_bytes(desc), 1);
1230 #endif
1231 
1232 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1233 		if (!ret) {
1234 			ret = -EINPROGRESS;
1235 			state->update = ahash_update_ctx;
1236 			state->finup = ahash_finup_ctx;
1237 			state->final = ahash_final_ctx;
1238 		} else {
1239 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1240 					DMA_TO_DEVICE);
1241 			kfree(edesc);
1242 		}
1243 	} else if (*next_buflen) {
1244 		sg_copy(buf + *buflen, req->src, req->nbytes);
1245 		*buflen = *next_buflen;
1246 		*next_buflen = 0;
1247 	}
1248 #ifdef DEBUG
1249 	print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
1250 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1251 	print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1252 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1253 		       *next_buflen, 1);
1254 #endif
1255 
1256 	return ret;
1257 }
1258 
1259 /* submit ahash finup if it the first job descriptor after update */
1260 static int ahash_finup_no_ctx(struct ahash_request *req)
1261 {
1262 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1263 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1264 	struct caam_hash_state *state = ahash_request_ctx(req);
1265 	struct device *jrdev = ctx->jrdev;
1266 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1267 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1268 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1269 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1270 	int last_buflen = state->current_buf ? state->buflen_0 :
1271 			  state->buflen_1;
1272 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1273 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1274 	int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1275 	int digestsize = crypto_ahash_digestsize(ahash);
1276 	struct ahash_edesc *edesc;
1277 	bool chained = false;
1278 	int sh_len;
1279 	int ret = 0;
1280 
1281 	src_nents = __sg_count(req->src, req->nbytes, &chained);
1282 	sec4_sg_src_index = 2;
1283 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1284 			 sizeof(struct sec4_sg_entry);
1285 
1286 	/* allocate space for base edesc and hw desc commands, link tables */
1287 	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1288 			sec4_sg_bytes, GFP_DMA | flags);
1289 	if (!edesc) {
1290 		dev_err(jrdev, "could not allocate extended descriptor\n");
1291 		return -ENOMEM;
1292 	}
1293 
1294 	sh_len = desc_len(sh_desc);
1295 	desc = edesc->hw_desc;
1296 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1297 
1298 	edesc->src_nents = src_nents;
1299 	edesc->chained = chained;
1300 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1301 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1302 			 DESC_JOB_IO_LEN;
1303 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1304 					    sec4_sg_bytes, DMA_TO_DEVICE);
1305 
1306 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1307 						state->buf_dma, buflen,
1308 						last_buflen);
1309 
1310 	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1311 			   chained);
1312 
1313 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1314 			       req->nbytes, LDST_SGF);
1315 
1316 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1317 						digestsize);
1318 
1319 #ifdef DEBUG
1320 	print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1321 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1322 #endif
1323 
1324 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1325 	if (!ret) {
1326 		ret = -EINPROGRESS;
1327 	} else {
1328 		ahash_unmap(jrdev, edesc, req, digestsize);
1329 		kfree(edesc);
1330 	}
1331 
1332 	return ret;
1333 }
1334 
1335 /* submit first update job descriptor after init */
1336 static int ahash_update_first(struct ahash_request *req)
1337 {
1338 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1339 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1340 	struct caam_hash_state *state = ahash_request_ctx(req);
1341 	struct device *jrdev = ctx->jrdev;
1342 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1343 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1344 	u8 *next_buf = state->buf_0 + state->current_buf *
1345 		       CAAM_MAX_HASH_BLOCK_SIZE;
1346 	int *next_buflen = &state->buflen_0 + state->current_buf;
1347 	int to_hash;
1348 	u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1349 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1350 	int sec4_sg_bytes, src_nents;
1351 	dma_addr_t src_dma;
1352 	u32 options;
1353 	struct ahash_edesc *edesc;
1354 	bool chained = false;
1355 	int ret = 0;
1356 	int sh_len;
1357 
1358 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1359 				      1);
1360 	to_hash = req->nbytes - *next_buflen;
1361 
1362 	if (to_hash) {
1363 		src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1364 				     &chained);
1365 		dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1366 				   DMA_TO_DEVICE, chained);
1367 		sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1368 
1369 		/*
1370 		 * allocate space for base edesc and hw desc commands,
1371 		 * link tables
1372 		 */
1373 		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1374 				sec4_sg_bytes, GFP_DMA | flags);
1375 		if (!edesc) {
1376 			dev_err(jrdev,
1377 				"could not allocate extended descriptor\n");
1378 			return -ENOMEM;
1379 		}
1380 
1381 		edesc->src_nents = src_nents;
1382 		edesc->chained = chained;
1383 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1384 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1385 				 DESC_JOB_IO_LEN;
1386 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1387 						    sec4_sg_bytes,
1388 						    DMA_TO_DEVICE);
1389 
1390 		if (src_nents) {
1391 			sg_to_sec4_sg_last(req->src, src_nents,
1392 					   edesc->sec4_sg, 0);
1393 			src_dma = edesc->sec4_sg_dma;
1394 			options = LDST_SGF;
1395 		} else {
1396 			src_dma = sg_dma_address(req->src);
1397 			options = 0;
1398 		}
1399 
1400 		if (*next_buflen)
1401 			sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1402 
1403 		sh_len = desc_len(sh_desc);
1404 		desc = edesc->hw_desc;
1405 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1406 				     HDR_REVERSE);
1407 
1408 		append_seq_in_ptr(desc, src_dma, to_hash, options);
1409 
1410 		map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1411 
1412 #ifdef DEBUG
1413 		print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1414 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1415 			       desc_bytes(desc), 1);
1416 #endif
1417 
1418 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1419 				      req);
1420 		if (!ret) {
1421 			ret = -EINPROGRESS;
1422 			state->update = ahash_update_ctx;
1423 			state->finup = ahash_finup_ctx;
1424 			state->final = ahash_final_ctx;
1425 		} else {
1426 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1427 					DMA_TO_DEVICE);
1428 			kfree(edesc);
1429 		}
1430 	} else if (*next_buflen) {
1431 		state->update = ahash_update_no_ctx;
1432 		state->finup = ahash_finup_no_ctx;
1433 		state->final = ahash_final_no_ctx;
1434 		sg_copy(next_buf, req->src, req->nbytes);
1435 	}
1436 #ifdef DEBUG
1437 	print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1438 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1439 		       *next_buflen, 1);
1440 #endif
1441 
1442 	return ret;
1443 }
1444 
1445 static int ahash_finup_first(struct ahash_request *req)
1446 {
1447 	return ahash_digest(req);
1448 }
1449 
1450 static int ahash_init(struct ahash_request *req)
1451 {
1452 	struct caam_hash_state *state = ahash_request_ctx(req);
1453 
1454 	state->update = ahash_update_first;
1455 	state->finup = ahash_finup_first;
1456 	state->final = ahash_final_no_ctx;
1457 
1458 	state->current_buf = 0;
1459 
1460 	return 0;
1461 }
1462 
1463 static int ahash_update(struct ahash_request *req)
1464 {
1465 	struct caam_hash_state *state = ahash_request_ctx(req);
1466 
1467 	return state->update(req);
1468 }
1469 
1470 static int ahash_finup(struct ahash_request *req)
1471 {
1472 	struct caam_hash_state *state = ahash_request_ctx(req);
1473 
1474 	return state->finup(req);
1475 }
1476 
1477 static int ahash_final(struct ahash_request *req)
1478 {
1479 	struct caam_hash_state *state = ahash_request_ctx(req);
1480 
1481 	return state->final(req);
1482 }
1483 
1484 static int ahash_export(struct ahash_request *req, void *out)
1485 {
1486 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1487 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1488 	struct caam_hash_state *state = ahash_request_ctx(req);
1489 
1490 	memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1491 	memcpy(out + sizeof(struct caam_hash_ctx), state,
1492 	       sizeof(struct caam_hash_state));
1493 	return 0;
1494 }
1495 
1496 static int ahash_import(struct ahash_request *req, const void *in)
1497 {
1498 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1499 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1500 	struct caam_hash_state *state = ahash_request_ctx(req);
1501 
1502 	memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1503 	memcpy(state, in + sizeof(struct caam_hash_ctx),
1504 	       sizeof(struct caam_hash_state));
1505 	return 0;
1506 }
1507 
1508 struct caam_hash_template {
1509 	char name[CRYPTO_MAX_ALG_NAME];
1510 	char driver_name[CRYPTO_MAX_ALG_NAME];
1511 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1512 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1513 	unsigned int blocksize;
1514 	struct ahash_alg template_ahash;
1515 	u32 alg_type;
1516 	u32 alg_op;
1517 };
1518 
1519 /* ahash descriptors */
1520 static struct caam_hash_template driver_hash[] = {
1521 	{
1522 		.name = "sha1",
1523 		.driver_name = "sha1-caam",
1524 		.hmac_name = "hmac(sha1)",
1525 		.hmac_driver_name = "hmac-sha1-caam",
1526 		.blocksize = SHA1_BLOCK_SIZE,
1527 		.template_ahash = {
1528 			.init = ahash_init,
1529 			.update = ahash_update,
1530 			.final = ahash_final,
1531 			.finup = ahash_finup,
1532 			.digest = ahash_digest,
1533 			.export = ahash_export,
1534 			.import = ahash_import,
1535 			.setkey = ahash_setkey,
1536 			.halg = {
1537 				.digestsize = SHA1_DIGEST_SIZE,
1538 				},
1539 			},
1540 		.alg_type = OP_ALG_ALGSEL_SHA1,
1541 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1542 	}, {
1543 		.name = "sha224",
1544 		.driver_name = "sha224-caam",
1545 		.hmac_name = "hmac(sha224)",
1546 		.hmac_driver_name = "hmac-sha224-caam",
1547 		.blocksize = SHA224_BLOCK_SIZE,
1548 		.template_ahash = {
1549 			.init = ahash_init,
1550 			.update = ahash_update,
1551 			.final = ahash_final,
1552 			.finup = ahash_finup,
1553 			.digest = ahash_digest,
1554 			.export = ahash_export,
1555 			.import = ahash_import,
1556 			.setkey = ahash_setkey,
1557 			.halg = {
1558 				.digestsize = SHA224_DIGEST_SIZE,
1559 				},
1560 			},
1561 		.alg_type = OP_ALG_ALGSEL_SHA224,
1562 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1563 	}, {
1564 		.name = "sha256",
1565 		.driver_name = "sha256-caam",
1566 		.hmac_name = "hmac(sha256)",
1567 		.hmac_driver_name = "hmac-sha256-caam",
1568 		.blocksize = SHA256_BLOCK_SIZE,
1569 		.template_ahash = {
1570 			.init = ahash_init,
1571 			.update = ahash_update,
1572 			.final = ahash_final,
1573 			.finup = ahash_finup,
1574 			.digest = ahash_digest,
1575 			.export = ahash_export,
1576 			.import = ahash_import,
1577 			.setkey = ahash_setkey,
1578 			.halg = {
1579 				.digestsize = SHA256_DIGEST_SIZE,
1580 				},
1581 			},
1582 		.alg_type = OP_ALG_ALGSEL_SHA256,
1583 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1584 	}, {
1585 		.name = "sha384",
1586 		.driver_name = "sha384-caam",
1587 		.hmac_name = "hmac(sha384)",
1588 		.hmac_driver_name = "hmac-sha384-caam",
1589 		.blocksize = SHA384_BLOCK_SIZE,
1590 		.template_ahash = {
1591 			.init = ahash_init,
1592 			.update = ahash_update,
1593 			.final = ahash_final,
1594 			.finup = ahash_finup,
1595 			.digest = ahash_digest,
1596 			.export = ahash_export,
1597 			.import = ahash_import,
1598 			.setkey = ahash_setkey,
1599 			.halg = {
1600 				.digestsize = SHA384_DIGEST_SIZE,
1601 				},
1602 			},
1603 		.alg_type = OP_ALG_ALGSEL_SHA384,
1604 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1605 	}, {
1606 		.name = "sha512",
1607 		.driver_name = "sha512-caam",
1608 		.hmac_name = "hmac(sha512)",
1609 		.hmac_driver_name = "hmac-sha512-caam",
1610 		.blocksize = SHA512_BLOCK_SIZE,
1611 		.template_ahash = {
1612 			.init = ahash_init,
1613 			.update = ahash_update,
1614 			.final = ahash_final,
1615 			.finup = ahash_finup,
1616 			.digest = ahash_digest,
1617 			.export = ahash_export,
1618 			.import = ahash_import,
1619 			.setkey = ahash_setkey,
1620 			.halg = {
1621 				.digestsize = SHA512_DIGEST_SIZE,
1622 				},
1623 			},
1624 		.alg_type = OP_ALG_ALGSEL_SHA512,
1625 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1626 	}, {
1627 		.name = "md5",
1628 		.driver_name = "md5-caam",
1629 		.hmac_name = "hmac(md5)",
1630 		.hmac_driver_name = "hmac-md5-caam",
1631 		.blocksize = MD5_BLOCK_WORDS * 4,
1632 		.template_ahash = {
1633 			.init = ahash_init,
1634 			.update = ahash_update,
1635 			.final = ahash_final,
1636 			.finup = ahash_finup,
1637 			.digest = ahash_digest,
1638 			.export = ahash_export,
1639 			.import = ahash_import,
1640 			.setkey = ahash_setkey,
1641 			.halg = {
1642 				.digestsize = MD5_DIGEST_SIZE,
1643 				},
1644 			},
1645 		.alg_type = OP_ALG_ALGSEL_MD5,
1646 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1647 	},
1648 };
1649 
1650 struct caam_hash_alg {
1651 	struct list_head entry;
1652 	struct device *ctrldev;
1653 	int alg_type;
1654 	int alg_op;
1655 	struct ahash_alg ahash_alg;
1656 };
1657 
1658 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1659 {
1660 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1661 	struct crypto_alg *base = tfm->__crt_alg;
1662 	struct hash_alg_common *halg =
1663 		 container_of(base, struct hash_alg_common, base);
1664 	struct ahash_alg *alg =
1665 		 container_of(halg, struct ahash_alg, halg);
1666 	struct caam_hash_alg *caam_hash =
1667 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1668 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1669 	struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
1670 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1671 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1672 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1673 					 HASH_MSG_LEN + 32,
1674 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1675 					 HASH_MSG_LEN + 64,
1676 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1677 	int tgt_jr = atomic_inc_return(&priv->tfm_count);
1678 	int ret = 0;
1679 
1680 	/*
1681 	 * distribute tfms across job rings to ensure in-order
1682 	 * crypto request processing per tfm
1683 	 */
1684 	ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
1685 
1686 	/* copy descriptor header template value */
1687 	ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1688 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1689 
1690 	ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1691 				  OP_ALG_ALGSEL_SHIFT];
1692 
1693 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1694 				 sizeof(struct caam_hash_state));
1695 
1696 	ret = ahash_set_sh_desc(ahash);
1697 
1698 	return ret;
1699 }
1700 
1701 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1702 {
1703 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1704 
1705 	if (ctx->sh_desc_update_dma &&
1706 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1707 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1708 				 desc_bytes(ctx->sh_desc_update),
1709 				 DMA_TO_DEVICE);
1710 	if (ctx->sh_desc_update_first_dma &&
1711 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1712 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1713 				 desc_bytes(ctx->sh_desc_update_first),
1714 				 DMA_TO_DEVICE);
1715 	if (ctx->sh_desc_fin_dma &&
1716 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1717 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1718 				 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1719 	if (ctx->sh_desc_digest_dma &&
1720 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1721 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1722 				 desc_bytes(ctx->sh_desc_digest),
1723 				 DMA_TO_DEVICE);
1724 	if (ctx->sh_desc_finup_dma &&
1725 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1726 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1727 				 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1728 }
1729 
1730 static void __exit caam_algapi_hash_exit(void)
1731 {
1732 	struct device_node *dev_node;
1733 	struct platform_device *pdev;
1734 	struct device *ctrldev;
1735 	struct caam_drv_private *priv;
1736 	struct caam_hash_alg *t_alg, *n;
1737 
1738 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1739 	if (!dev_node)
1740 		return;
1741 
1742 	pdev = of_find_device_by_node(dev_node);
1743 	if (!pdev)
1744 		return;
1745 
1746 	ctrldev = &pdev->dev;
1747 	of_node_put(dev_node);
1748 	priv = dev_get_drvdata(ctrldev);
1749 
1750 	if (!priv->hash_list.next)
1751 		return;
1752 
1753 	list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
1754 		crypto_unregister_ahash(&t_alg->ahash_alg);
1755 		list_del(&t_alg->entry);
1756 		kfree(t_alg);
1757 	}
1758 }
1759 
1760 static struct caam_hash_alg *
1761 caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1762 		bool keyed)
1763 {
1764 	struct caam_hash_alg *t_alg;
1765 	struct ahash_alg *halg;
1766 	struct crypto_alg *alg;
1767 
1768 	t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1769 	if (!t_alg) {
1770 		dev_err(ctrldev, "failed to allocate t_alg\n");
1771 		return ERR_PTR(-ENOMEM);
1772 	}
1773 
1774 	t_alg->ahash_alg = template->template_ahash;
1775 	halg = &t_alg->ahash_alg;
1776 	alg = &halg->halg.base;
1777 
1778 	if (keyed) {
1779 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1780 			 template->hmac_name);
1781 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1782 			 template->hmac_driver_name);
1783 	} else {
1784 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1785 			 template->name);
1786 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1787 			 template->driver_name);
1788 	}
1789 	alg->cra_module = THIS_MODULE;
1790 	alg->cra_init = caam_hash_cra_init;
1791 	alg->cra_exit = caam_hash_cra_exit;
1792 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1793 	alg->cra_priority = CAAM_CRA_PRIORITY;
1794 	alg->cra_blocksize = template->blocksize;
1795 	alg->cra_alignmask = 0;
1796 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1797 	alg->cra_type = &crypto_ahash_type;
1798 
1799 	t_alg->alg_type = template->alg_type;
1800 	t_alg->alg_op = template->alg_op;
1801 	t_alg->ctrldev = ctrldev;
1802 
1803 	return t_alg;
1804 }
1805 
1806 static int __init caam_algapi_hash_init(void)
1807 {
1808 	struct device_node *dev_node;
1809 	struct platform_device *pdev;
1810 	struct device *ctrldev;
1811 	struct caam_drv_private *priv;
1812 	int i = 0, err = 0;
1813 
1814 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1815 	if (!dev_node)
1816 		return -ENODEV;
1817 
1818 	pdev = of_find_device_by_node(dev_node);
1819 	if (!pdev)
1820 		return -ENODEV;
1821 
1822 	ctrldev = &pdev->dev;
1823 	priv = dev_get_drvdata(ctrldev);
1824 	of_node_put(dev_node);
1825 
1826 	INIT_LIST_HEAD(&priv->hash_list);
1827 
1828 	atomic_set(&priv->tfm_count, -1);
1829 
1830 	/* register crypto algorithms the device supports */
1831 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1832 		/* TODO: check if h/w supports alg */
1833 		struct caam_hash_alg *t_alg;
1834 
1835 		/* register hmac version */
1836 		t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
1837 		if (IS_ERR(t_alg)) {
1838 			err = PTR_ERR(t_alg);
1839 			dev_warn(ctrldev, "%s alg allocation failed\n",
1840 				 driver_hash[i].driver_name);
1841 			continue;
1842 		}
1843 
1844 		err = crypto_register_ahash(&t_alg->ahash_alg);
1845 		if (err) {
1846 			dev_warn(ctrldev, "%s alg registration failed\n",
1847 				t_alg->ahash_alg.halg.base.cra_driver_name);
1848 			kfree(t_alg);
1849 		} else
1850 			list_add_tail(&t_alg->entry, &priv->hash_list);
1851 
1852 		/* register unkeyed version */
1853 		t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
1854 		if (IS_ERR(t_alg)) {
1855 			err = PTR_ERR(t_alg);
1856 			dev_warn(ctrldev, "%s alg allocation failed\n",
1857 				 driver_hash[i].driver_name);
1858 			continue;
1859 		}
1860 
1861 		err = crypto_register_ahash(&t_alg->ahash_alg);
1862 		if (err) {
1863 			dev_warn(ctrldev, "%s alg registration failed\n",
1864 				t_alg->ahash_alg.halg.base.cra_driver_name);
1865 			kfree(t_alg);
1866 		} else
1867 			list_add_tail(&t_alg->entry, &priv->hash_list);
1868 	}
1869 
1870 	return err;
1871 }
1872 
1873 module_init(caam_algapi_hash_init);
1874 module_exit(caam_algapi_hash_exit);
1875 
1876 MODULE_LICENSE("GPL");
1877 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1878 MODULE_AUTHOR("Freescale Semiconductor - NMG");
1879