xref: /openbmc/linux/drivers/crypto/caam/caamhash.c (revision 343e44b1)
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55 
56 #include "compat.h"
57 
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65 
66 #define CAAM_CRA_PRIORITY		3000
67 
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
70 
71 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
73 
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE			(4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81 
82 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
83 					 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85 
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN			8
88 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89 
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96 
97 
98 static struct list_head hash_list;
99 
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106 	u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
107 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
108 	dma_addr_t sh_desc_update_first_dma;
109 	dma_addr_t sh_desc_fin_dma;
110 	dma_addr_t sh_desc_digest_dma;
111 	dma_addr_t sh_desc_finup_dma;
112 	struct device *jrdev;
113 	u32 alg_type;
114 	u32 alg_op;
115 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 	dma_addr_t key_dma;
117 	int ctx_len;
118 	unsigned int split_key_len;
119 	unsigned int split_key_pad_len;
120 };
121 
122 /* ahash state */
123 struct caam_hash_state {
124 	dma_addr_t buf_dma;
125 	dma_addr_t ctx_dma;
126 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 	int buflen_0;
128 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 	int buflen_1;
130 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
131 	int (*update)(struct ahash_request *req);
132 	int (*final)(struct ahash_request *req);
133 	int (*finup)(struct ahash_request *req);
134 	int current_buf;
135 };
136 
137 struct caam_export_state {
138 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
139 	u8 caam_ctx[MAX_CTX_LEN];
140 	int buflen;
141 	int (*update)(struct ahash_request *req);
142 	int (*final)(struct ahash_request *req);
143 	int (*finup)(struct ahash_request *req);
144 };
145 
146 /* Common job descriptor seq in/out ptr routines */
147 
148 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
149 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
150 				      struct caam_hash_state *state,
151 				      int ctx_len)
152 {
153 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
154 					ctx_len, DMA_FROM_DEVICE);
155 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
156 		dev_err(jrdev, "unable to map ctx\n");
157 		return -ENOMEM;
158 	}
159 
160 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
161 
162 	return 0;
163 }
164 
165 /* Map req->result, and append seq_out_ptr command that points to it */
166 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
167 						u8 *result, int digestsize)
168 {
169 	dma_addr_t dst_dma;
170 
171 	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
172 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
173 
174 	return dst_dma;
175 }
176 
177 /* Map current buffer in state and put it in link table */
178 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
179 					    struct sec4_sg_entry *sec4_sg,
180 					    u8 *buf, int buflen)
181 {
182 	dma_addr_t buf_dma;
183 
184 	buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
185 	dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
186 
187 	return buf_dma;
188 }
189 
190 /* Map req->src and put it in link table */
191 static inline void src_map_to_sec4_sg(struct device *jrdev,
192 				      struct scatterlist *src, int src_nents,
193 				      struct sec4_sg_entry *sec4_sg)
194 {
195 	dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
196 	sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
197 }
198 
199 /*
200  * Only put buffer in link table if it contains data, which is possible,
201  * since a buffer has previously been used, and needs to be unmapped,
202  */
203 static inline dma_addr_t
204 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
205 		       u8 *buf, dma_addr_t buf_dma, int buflen,
206 		       int last_buflen)
207 {
208 	if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
209 		dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
210 	if (buflen)
211 		buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
212 	else
213 		buf_dma = 0;
214 
215 	return buf_dma;
216 }
217 
218 /* Map state->caam_ctx, and add it to link table */
219 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
220 				     struct caam_hash_state *state, int ctx_len,
221 				     struct sec4_sg_entry *sec4_sg, u32 flag)
222 {
223 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
224 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
225 		dev_err(jrdev, "unable to map ctx\n");
226 		return -ENOMEM;
227 	}
228 
229 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
230 
231 	return 0;
232 }
233 
234 /* Common shared descriptor commands */
235 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
236 {
237 	append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
238 			  ctx->split_key_len, CLASS_2 |
239 			  KEY_DEST_MDHA_SPLIT | KEY_ENC);
240 }
241 
242 /* Append key if it has been set */
243 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
244 {
245 	u32 *key_jump_cmd;
246 
247 	init_sh_desc(desc, HDR_SHARE_SERIAL);
248 
249 	if (ctx->split_key_len) {
250 		/* Skip if already shared */
251 		key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
252 					   JUMP_COND_SHRD);
253 
254 		append_key_ahash(desc, ctx);
255 
256 		set_jump_tgt_here(desc, key_jump_cmd);
257 	}
258 
259 	/* Propagate errors from shared to job descriptor */
260 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
261 }
262 
263 /*
264  * For ahash read data from seqin following state->caam_ctx,
265  * and write resulting class2 context to seqout, which may be state->caam_ctx
266  * or req->result
267  */
268 static inline void ahash_append_load_str(u32 *desc, int digestsize)
269 {
270 	/* Calculate remaining bytes to read */
271 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
272 
273 	/* Read remaining bytes */
274 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
275 			     FIFOLD_TYPE_MSG | KEY_VLF);
276 
277 	/* Store class2 context bytes */
278 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
279 			 LDST_SRCDST_BYTE_CONTEXT);
280 }
281 
282 /*
283  * For ahash update, final and finup, import context, read and write to seqout
284  */
285 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
286 					 int digestsize,
287 					 struct caam_hash_ctx *ctx)
288 {
289 	init_sh_desc_key_ahash(desc, ctx);
290 
291 	/* Import context from software */
292 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
293 		   LDST_CLASS_2_CCB | ctx->ctx_len);
294 
295 	/* Class 2 operation */
296 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
297 
298 	/*
299 	 * Load from buf and/or src and write to req->result or state->context
300 	 */
301 	ahash_append_load_str(desc, digestsize);
302 }
303 
304 /* For ahash firsts and digest, read and write to seqout */
305 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
306 				     int digestsize, struct caam_hash_ctx *ctx)
307 {
308 	init_sh_desc_key_ahash(desc, ctx);
309 
310 	/* Class 2 operation */
311 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
312 
313 	/*
314 	 * Load from buf and/or src and write to req->result or state->context
315 	 */
316 	ahash_append_load_str(desc, digestsize);
317 }
318 
319 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
320 {
321 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
322 	int digestsize = crypto_ahash_digestsize(ahash);
323 	struct device *jrdev = ctx->jrdev;
324 	u32 have_key = 0;
325 	u32 *desc;
326 
327 	if (ctx->split_key_len)
328 		have_key = OP_ALG_AAI_HMAC_PRECOMP;
329 
330 	/* ahash_update shared descriptor */
331 	desc = ctx->sh_desc_update;
332 
333 	init_sh_desc(desc, HDR_SHARE_SERIAL);
334 
335 	/* Import context from software */
336 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
337 		   LDST_CLASS_2_CCB | ctx->ctx_len);
338 
339 	/* Class 2 operation */
340 	append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
341 			 OP_ALG_ENCRYPT);
342 
343 	/* Load data and write to result or context */
344 	ahash_append_load_str(desc, ctx->ctx_len);
345 
346 	ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
347 						 DMA_TO_DEVICE);
348 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
349 		dev_err(jrdev, "unable to map shared descriptor\n");
350 		return -ENOMEM;
351 	}
352 #ifdef DEBUG
353 	print_hex_dump(KERN_ERR,
354 		       "ahash update shdesc@"__stringify(__LINE__)": ",
355 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
356 #endif
357 
358 	/* ahash_update_first shared descriptor */
359 	desc = ctx->sh_desc_update_first;
360 
361 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
362 			  ctx->ctx_len, ctx);
363 
364 	ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
365 						       desc_bytes(desc),
366 						       DMA_TO_DEVICE);
367 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
368 		dev_err(jrdev, "unable to map shared descriptor\n");
369 		return -ENOMEM;
370 	}
371 #ifdef DEBUG
372 	print_hex_dump(KERN_ERR,
373 		       "ahash update first shdesc@"__stringify(__LINE__)": ",
374 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
375 #endif
376 
377 	/* ahash_final shared descriptor */
378 	desc = ctx->sh_desc_fin;
379 
380 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
381 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
382 
383 	ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
384 					      DMA_TO_DEVICE);
385 	if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
386 		dev_err(jrdev, "unable to map shared descriptor\n");
387 		return -ENOMEM;
388 	}
389 #ifdef DEBUG
390 	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
391 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
392 		       desc_bytes(desc), 1);
393 #endif
394 
395 	/* ahash_finup shared descriptor */
396 	desc = ctx->sh_desc_finup;
397 
398 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
399 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
400 
401 	ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
402 						DMA_TO_DEVICE);
403 	if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
404 		dev_err(jrdev, "unable to map shared descriptor\n");
405 		return -ENOMEM;
406 	}
407 #ifdef DEBUG
408 	print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
409 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
410 		       desc_bytes(desc), 1);
411 #endif
412 
413 	/* ahash_digest shared descriptor */
414 	desc = ctx->sh_desc_digest;
415 
416 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
417 			  digestsize, ctx);
418 
419 	ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
420 						 desc_bytes(desc),
421 						 DMA_TO_DEVICE);
422 	if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
423 		dev_err(jrdev, "unable to map shared descriptor\n");
424 		return -ENOMEM;
425 	}
426 #ifdef DEBUG
427 	print_hex_dump(KERN_ERR,
428 		       "ahash digest shdesc@"__stringify(__LINE__)": ",
429 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
430 		       desc_bytes(desc), 1);
431 #endif
432 
433 	return 0;
434 }
435 
436 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
437 			      u32 keylen)
438 {
439 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
440 			       ctx->split_key_pad_len, key_in, keylen,
441 			       ctx->alg_op);
442 }
443 
444 /* Digest hash size if it is too large */
445 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
446 			   u32 *keylen, u8 *key_out, u32 digestsize)
447 {
448 	struct device *jrdev = ctx->jrdev;
449 	u32 *desc;
450 	struct split_key_result result;
451 	dma_addr_t src_dma, dst_dma;
452 	int ret = 0;
453 
454 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
455 	if (!desc) {
456 		dev_err(jrdev, "unable to allocate key input memory\n");
457 		return -ENOMEM;
458 	}
459 
460 	init_job_desc(desc, 0);
461 
462 	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
463 				 DMA_TO_DEVICE);
464 	if (dma_mapping_error(jrdev, src_dma)) {
465 		dev_err(jrdev, "unable to map key input memory\n");
466 		kfree(desc);
467 		return -ENOMEM;
468 	}
469 	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
470 				 DMA_FROM_DEVICE);
471 	if (dma_mapping_error(jrdev, dst_dma)) {
472 		dev_err(jrdev, "unable to map key output memory\n");
473 		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
474 		kfree(desc);
475 		return -ENOMEM;
476 	}
477 
478 	/* Job descriptor to perform unkeyed hash on key_in */
479 	append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
480 			 OP_ALG_AS_INITFINAL);
481 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
482 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
483 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
484 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
485 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
486 			 LDST_SRCDST_BYTE_CONTEXT);
487 
488 #ifdef DEBUG
489 	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
490 		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
491 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
492 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
493 #endif
494 
495 	result.err = 0;
496 	init_completion(&result.completion);
497 
498 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
499 	if (!ret) {
500 		/* in progress */
501 		wait_for_completion_interruptible(&result.completion);
502 		ret = result.err;
503 #ifdef DEBUG
504 		print_hex_dump(KERN_ERR,
505 			       "digested key@"__stringify(__LINE__)": ",
506 			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
507 			       digestsize, 1);
508 #endif
509 	}
510 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
511 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
512 
513 	*keylen = digestsize;
514 
515 	kfree(desc);
516 
517 	return ret;
518 }
519 
520 static int ahash_setkey(struct crypto_ahash *ahash,
521 			const u8 *key, unsigned int keylen)
522 {
523 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
524 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
525 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
526 	struct device *jrdev = ctx->jrdev;
527 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
528 	int digestsize = crypto_ahash_digestsize(ahash);
529 	int ret = 0;
530 	u8 *hashed_key = NULL;
531 
532 #ifdef DEBUG
533 	printk(KERN_ERR "keylen %d\n", keylen);
534 #endif
535 
536 	if (keylen > blocksize) {
537 		hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
538 				     GFP_DMA);
539 		if (!hashed_key)
540 			return -ENOMEM;
541 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
542 				      digestsize);
543 		if (ret)
544 			goto badkey;
545 		key = hashed_key;
546 	}
547 
548 	/* Pick class 2 key length from algorithm submask */
549 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
550 				      OP_ALG_ALGSEL_SHIFT] * 2;
551 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
552 
553 #ifdef DEBUG
554 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
555 	       ctx->split_key_len, ctx->split_key_pad_len);
556 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
557 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
558 #endif
559 
560 	ret = gen_split_hash_key(ctx, key, keylen);
561 	if (ret)
562 		goto badkey;
563 
564 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
565 				      DMA_TO_DEVICE);
566 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
567 		dev_err(jrdev, "unable to map key i/o memory\n");
568 		ret = -ENOMEM;
569 		goto map_err;
570 	}
571 #ifdef DEBUG
572 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
573 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
574 		       ctx->split_key_pad_len, 1);
575 #endif
576 
577 	ret = ahash_set_sh_desc(ahash);
578 	if (ret) {
579 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
580 				 DMA_TO_DEVICE);
581 	}
582 
583 map_err:
584 	kfree(hashed_key);
585 	return ret;
586 badkey:
587 	kfree(hashed_key);
588 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
589 	return -EINVAL;
590 }
591 
592 /*
593  * ahash_edesc - s/w-extended ahash descriptor
594  * @dst_dma: physical mapped address of req->result
595  * @sec4_sg_dma: physical mapped address of h/w link table
596  * @src_nents: number of segments in input scatterlist
597  * @sec4_sg_bytes: length of dma mapped sec4_sg space
598  * @hw_desc: the h/w job descriptor followed by any referenced link tables
599  * @sec4_sg: h/w link table
600  */
601 struct ahash_edesc {
602 	dma_addr_t dst_dma;
603 	dma_addr_t sec4_sg_dma;
604 	int src_nents;
605 	int sec4_sg_bytes;
606 	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
607 	struct sec4_sg_entry sec4_sg[0];
608 };
609 
610 static inline void ahash_unmap(struct device *dev,
611 			struct ahash_edesc *edesc,
612 			struct ahash_request *req, int dst_len)
613 {
614 	if (edesc->src_nents)
615 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
616 	if (edesc->dst_dma)
617 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
618 
619 	if (edesc->sec4_sg_bytes)
620 		dma_unmap_single(dev, edesc->sec4_sg_dma,
621 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
622 }
623 
624 static inline void ahash_unmap_ctx(struct device *dev,
625 			struct ahash_edesc *edesc,
626 			struct ahash_request *req, int dst_len, u32 flag)
627 {
628 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
629 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
630 	struct caam_hash_state *state = ahash_request_ctx(req);
631 
632 	if (state->ctx_dma)
633 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
634 	ahash_unmap(dev, edesc, req, dst_len);
635 }
636 
637 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
638 		       void *context)
639 {
640 	struct ahash_request *req = context;
641 	struct ahash_edesc *edesc;
642 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
643 	int digestsize = crypto_ahash_digestsize(ahash);
644 #ifdef DEBUG
645 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
646 	struct caam_hash_state *state = ahash_request_ctx(req);
647 
648 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
649 #endif
650 
651 	edesc = (struct ahash_edesc *)((char *)desc -
652 		 offsetof(struct ahash_edesc, hw_desc));
653 	if (err)
654 		caam_jr_strstatus(jrdev, err);
655 
656 	ahash_unmap(jrdev, edesc, req, digestsize);
657 	kfree(edesc);
658 
659 #ifdef DEBUG
660 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
661 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
662 		       ctx->ctx_len, 1);
663 	if (req->result)
664 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
665 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
666 			       digestsize, 1);
667 #endif
668 
669 	req->base.complete(&req->base, err);
670 }
671 
672 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
673 			    void *context)
674 {
675 	struct ahash_request *req = context;
676 	struct ahash_edesc *edesc;
677 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
678 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
679 #ifdef DEBUG
680 	struct caam_hash_state *state = ahash_request_ctx(req);
681 	int digestsize = crypto_ahash_digestsize(ahash);
682 
683 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
684 #endif
685 
686 	edesc = (struct ahash_edesc *)((char *)desc -
687 		 offsetof(struct ahash_edesc, hw_desc));
688 	if (err)
689 		caam_jr_strstatus(jrdev, err);
690 
691 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
692 	kfree(edesc);
693 
694 #ifdef DEBUG
695 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
696 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
697 		       ctx->ctx_len, 1);
698 	if (req->result)
699 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
700 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
701 			       digestsize, 1);
702 #endif
703 
704 	req->base.complete(&req->base, err);
705 }
706 
707 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
708 			       void *context)
709 {
710 	struct ahash_request *req = context;
711 	struct ahash_edesc *edesc;
712 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
713 	int digestsize = crypto_ahash_digestsize(ahash);
714 #ifdef DEBUG
715 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
716 	struct caam_hash_state *state = ahash_request_ctx(req);
717 
718 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
719 #endif
720 
721 	edesc = (struct ahash_edesc *)((char *)desc -
722 		 offsetof(struct ahash_edesc, hw_desc));
723 	if (err)
724 		caam_jr_strstatus(jrdev, err);
725 
726 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
727 	kfree(edesc);
728 
729 #ifdef DEBUG
730 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
731 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
732 		       ctx->ctx_len, 1);
733 	if (req->result)
734 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
735 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
736 			       digestsize, 1);
737 #endif
738 
739 	req->base.complete(&req->base, err);
740 }
741 
742 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
743 			       void *context)
744 {
745 	struct ahash_request *req = context;
746 	struct ahash_edesc *edesc;
747 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
748 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
749 #ifdef DEBUG
750 	struct caam_hash_state *state = ahash_request_ctx(req);
751 	int digestsize = crypto_ahash_digestsize(ahash);
752 
753 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
754 #endif
755 
756 	edesc = (struct ahash_edesc *)((char *)desc -
757 		 offsetof(struct ahash_edesc, hw_desc));
758 	if (err)
759 		caam_jr_strstatus(jrdev, err);
760 
761 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
762 	kfree(edesc);
763 
764 #ifdef DEBUG
765 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
766 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
767 		       ctx->ctx_len, 1);
768 	if (req->result)
769 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
770 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
771 			       digestsize, 1);
772 #endif
773 
774 	req->base.complete(&req->base, err);
775 }
776 
777 /* submit update job descriptor */
778 static int ahash_update_ctx(struct ahash_request *req)
779 {
780 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
781 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
782 	struct caam_hash_state *state = ahash_request_ctx(req);
783 	struct device *jrdev = ctx->jrdev;
784 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
785 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
786 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
787 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
788 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
789 	int *next_buflen = state->current_buf ? &state->buflen_0 :
790 			   &state->buflen_1, last_buflen;
791 	int in_len = *buflen + req->nbytes, to_hash;
792 	u32 *sh_desc = ctx->sh_desc_update, *desc;
793 	dma_addr_t ptr = ctx->sh_desc_update_dma;
794 	int src_nents, sec4_sg_bytes, sec4_sg_src_index;
795 	struct ahash_edesc *edesc;
796 	int ret = 0;
797 	int sh_len;
798 
799 	last_buflen = *next_buflen;
800 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
801 	to_hash = in_len - *next_buflen;
802 
803 	if (to_hash) {
804 		src_nents = sg_nents_for_len(req->src,
805 					     req->nbytes - (*next_buflen));
806 		if (src_nents < 0) {
807 			dev_err(jrdev, "Invalid number of src SG.\n");
808 			return src_nents;
809 		}
810 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
811 		sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
812 				 sizeof(struct sec4_sg_entry);
813 
814 		/*
815 		 * allocate space for base edesc and hw desc commands,
816 		 * link tables
817 		 */
818 		edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes,
819 				GFP_DMA | flags);
820 		if (!edesc) {
821 			dev_err(jrdev,
822 				"could not allocate extended descriptor\n");
823 			return -ENOMEM;
824 		}
825 
826 		edesc->src_nents = src_nents;
827 		edesc->sec4_sg_bytes = sec4_sg_bytes;
828 
829 		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
830 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
831 		if (ret)
832 			return ret;
833 
834 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
835 							edesc->sec4_sg + 1,
836 							buf, state->buf_dma,
837 							*buflen, last_buflen);
838 
839 		if (src_nents) {
840 			src_map_to_sec4_sg(jrdev, req->src, src_nents,
841 					   edesc->sec4_sg + sec4_sg_src_index);
842 			if (*next_buflen)
843 				scatterwalk_map_and_copy(next_buf, req->src,
844 							 to_hash - *buflen,
845 							 *next_buflen, 0);
846 		} else {
847 			(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
848 				cpu_to_caam32(SEC4_SG_LEN_FIN);
849 		}
850 
851 		state->current_buf = !state->current_buf;
852 
853 		sh_len = desc_len(sh_desc);
854 		desc = edesc->hw_desc;
855 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
856 				     HDR_REVERSE);
857 
858 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
859 						     sec4_sg_bytes,
860 						     DMA_TO_DEVICE);
861 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
862 			dev_err(jrdev, "unable to map S/G table\n");
863 			return -ENOMEM;
864 		}
865 
866 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
867 				       to_hash, LDST_SGF);
868 
869 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
870 
871 #ifdef DEBUG
872 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
873 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
874 			       desc_bytes(desc), 1);
875 #endif
876 
877 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
878 		if (!ret) {
879 			ret = -EINPROGRESS;
880 		} else {
881 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
882 					   DMA_BIDIRECTIONAL);
883 			kfree(edesc);
884 		}
885 	} else if (*next_buflen) {
886 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
887 					 req->nbytes, 0);
888 		*buflen = *next_buflen;
889 		*next_buflen = last_buflen;
890 	}
891 #ifdef DEBUG
892 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
893 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
894 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
895 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
896 		       *next_buflen, 1);
897 #endif
898 
899 	return ret;
900 }
901 
902 static int ahash_final_ctx(struct ahash_request *req)
903 {
904 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
905 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
906 	struct caam_hash_state *state = ahash_request_ctx(req);
907 	struct device *jrdev = ctx->jrdev;
908 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
909 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
910 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
911 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
912 	int last_buflen = state->current_buf ? state->buflen_0 :
913 			  state->buflen_1;
914 	u32 *sh_desc = ctx->sh_desc_fin, *desc;
915 	dma_addr_t ptr = ctx->sh_desc_fin_dma;
916 	int sec4_sg_bytes, sec4_sg_src_index;
917 	int digestsize = crypto_ahash_digestsize(ahash);
918 	struct ahash_edesc *edesc;
919 	int ret = 0;
920 	int sh_len;
921 
922 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
923 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
924 
925 	/* allocate space for base edesc and hw desc commands, link tables */
926 	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
927 	if (!edesc) {
928 		dev_err(jrdev, "could not allocate extended descriptor\n");
929 		return -ENOMEM;
930 	}
931 
932 	sh_len = desc_len(sh_desc);
933 	desc = edesc->hw_desc;
934 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
935 
936 	edesc->sec4_sg_bytes = sec4_sg_bytes;
937 	edesc->src_nents = 0;
938 
939 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
940 				 edesc->sec4_sg, DMA_TO_DEVICE);
941 	if (ret)
942 		return ret;
943 
944 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
945 						buf, state->buf_dma, buflen,
946 						last_buflen);
947 	(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
948 		cpu_to_caam32(SEC4_SG_LEN_FIN);
949 
950 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
951 					    sec4_sg_bytes, DMA_TO_DEVICE);
952 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
953 		dev_err(jrdev, "unable to map S/G table\n");
954 		return -ENOMEM;
955 	}
956 
957 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
958 			  LDST_SGF);
959 
960 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
961 						digestsize);
962 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
963 		dev_err(jrdev, "unable to map dst\n");
964 		return -ENOMEM;
965 	}
966 
967 #ifdef DEBUG
968 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
969 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
970 #endif
971 
972 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
973 	if (!ret) {
974 		ret = -EINPROGRESS;
975 	} else {
976 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
977 		kfree(edesc);
978 	}
979 
980 	return ret;
981 }
982 
983 static int ahash_finup_ctx(struct ahash_request *req)
984 {
985 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
986 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
987 	struct caam_hash_state *state = ahash_request_ctx(req);
988 	struct device *jrdev = ctx->jrdev;
989 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
990 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
991 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
992 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
993 	int last_buflen = state->current_buf ? state->buflen_0 :
994 			  state->buflen_1;
995 	u32 *sh_desc = ctx->sh_desc_finup, *desc;
996 	dma_addr_t ptr = ctx->sh_desc_finup_dma;
997 	int sec4_sg_bytes, sec4_sg_src_index;
998 	int src_nents;
999 	int digestsize = crypto_ahash_digestsize(ahash);
1000 	struct ahash_edesc *edesc;
1001 	int ret = 0;
1002 	int sh_len;
1003 
1004 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1005 	if (src_nents < 0) {
1006 		dev_err(jrdev, "Invalid number of src SG.\n");
1007 		return src_nents;
1008 	}
1009 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1010 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1011 			 sizeof(struct sec4_sg_entry);
1012 
1013 	/* allocate space for base edesc and hw desc commands, link tables */
1014 	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
1015 	if (!edesc) {
1016 		dev_err(jrdev, "could not allocate extended descriptor\n");
1017 		return -ENOMEM;
1018 	}
1019 
1020 	sh_len = desc_len(sh_desc);
1021 	desc = edesc->hw_desc;
1022 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1023 
1024 	edesc->src_nents = src_nents;
1025 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1026 
1027 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1028 				 edesc->sec4_sg, DMA_TO_DEVICE);
1029 	if (ret)
1030 		return ret;
1031 
1032 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1033 						buf, state->buf_dma, buflen,
1034 						last_buflen);
1035 
1036 	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1037 			   sec4_sg_src_index);
1038 
1039 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1040 					    sec4_sg_bytes, DMA_TO_DEVICE);
1041 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1042 		dev_err(jrdev, "unable to map S/G table\n");
1043 		return -ENOMEM;
1044 	}
1045 
1046 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1047 			       buflen + req->nbytes, LDST_SGF);
1048 
1049 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1050 						digestsize);
1051 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1052 		dev_err(jrdev, "unable to map dst\n");
1053 		return -ENOMEM;
1054 	}
1055 
1056 #ifdef DEBUG
1057 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1058 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1059 #endif
1060 
1061 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1062 	if (!ret) {
1063 		ret = -EINPROGRESS;
1064 	} else {
1065 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1066 		kfree(edesc);
1067 	}
1068 
1069 	return ret;
1070 }
1071 
1072 static int ahash_digest(struct ahash_request *req)
1073 {
1074 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1075 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1076 	struct device *jrdev = ctx->jrdev;
1077 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1078 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1079 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1080 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1081 	int digestsize = crypto_ahash_digestsize(ahash);
1082 	int src_nents, sec4_sg_bytes;
1083 	dma_addr_t src_dma;
1084 	struct ahash_edesc *edesc;
1085 	int ret = 0;
1086 	u32 options;
1087 	int sh_len;
1088 
1089 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1090 	if (src_nents < 0) {
1091 		dev_err(jrdev, "Invalid number of src SG.\n");
1092 		return src_nents;
1093 	}
1094 	dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1095 	if (src_nents > 1)
1096 		sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1097 	else
1098 		sec4_sg_bytes = 0;
1099 
1100 	/* allocate space for base edesc and hw desc commands, link tables */
1101 	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
1102 	if (!edesc) {
1103 		dev_err(jrdev, "could not allocate extended descriptor\n");
1104 		return -ENOMEM;
1105 	}
1106 
1107 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1108 	edesc->src_nents = src_nents;
1109 
1110 	sh_len = desc_len(sh_desc);
1111 	desc = edesc->hw_desc;
1112 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1113 
1114 	if (src_nents > 1) {
1115 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1116 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1117 					    sec4_sg_bytes, DMA_TO_DEVICE);
1118 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1119 			dev_err(jrdev, "unable to map S/G table\n");
1120 			return -ENOMEM;
1121 		}
1122 		src_dma = edesc->sec4_sg_dma;
1123 		options = LDST_SGF;
1124 	} else {
1125 		src_dma = sg_dma_address(req->src);
1126 		options = 0;
1127 	}
1128 	append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1129 
1130 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1131 						digestsize);
1132 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1133 		dev_err(jrdev, "unable to map dst\n");
1134 		return -ENOMEM;
1135 	}
1136 
1137 #ifdef DEBUG
1138 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1139 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1140 #endif
1141 
1142 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1143 	if (!ret) {
1144 		ret = -EINPROGRESS;
1145 	} else {
1146 		ahash_unmap(jrdev, edesc, req, digestsize);
1147 		kfree(edesc);
1148 	}
1149 
1150 	return ret;
1151 }
1152 
1153 /* submit ahash final if it the first job descriptor */
1154 static int ahash_final_no_ctx(struct ahash_request *req)
1155 {
1156 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1157 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1158 	struct caam_hash_state *state = ahash_request_ctx(req);
1159 	struct device *jrdev = ctx->jrdev;
1160 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1161 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1162 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1163 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1164 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1165 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1166 	int digestsize = crypto_ahash_digestsize(ahash);
1167 	struct ahash_edesc *edesc;
1168 	int ret = 0;
1169 	int sh_len;
1170 
1171 	/* allocate space for base edesc and hw desc commands, link tables */
1172 	edesc = kzalloc(sizeof(*edesc), GFP_DMA | flags);
1173 	if (!edesc) {
1174 		dev_err(jrdev, "could not allocate extended descriptor\n");
1175 		return -ENOMEM;
1176 	}
1177 
1178 	edesc->sec4_sg_bytes = 0;
1179 	sh_len = desc_len(sh_desc);
1180 	desc = edesc->hw_desc;
1181 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1182 
1183 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1184 	if (dma_mapping_error(jrdev, state->buf_dma)) {
1185 		dev_err(jrdev, "unable to map src\n");
1186 		return -ENOMEM;
1187 	}
1188 
1189 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1190 
1191 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1192 						digestsize);
1193 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1194 		dev_err(jrdev, "unable to map dst\n");
1195 		return -ENOMEM;
1196 	}
1197 	edesc->src_nents = 0;
1198 
1199 #ifdef DEBUG
1200 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1201 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1202 #endif
1203 
1204 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1205 	if (!ret) {
1206 		ret = -EINPROGRESS;
1207 	} else {
1208 		ahash_unmap(jrdev, edesc, req, digestsize);
1209 		kfree(edesc);
1210 	}
1211 
1212 	return ret;
1213 }
1214 
1215 /* submit ahash update if it the first job descriptor after update */
1216 static int ahash_update_no_ctx(struct ahash_request *req)
1217 {
1218 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1219 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1220 	struct caam_hash_state *state = ahash_request_ctx(req);
1221 	struct device *jrdev = ctx->jrdev;
1222 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1223 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1224 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1225 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1226 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1227 	int *next_buflen = state->current_buf ? &state->buflen_0 :
1228 			   &state->buflen_1;
1229 	int in_len = *buflen + req->nbytes, to_hash;
1230 	int sec4_sg_bytes, src_nents;
1231 	struct ahash_edesc *edesc;
1232 	u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1233 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1234 	int ret = 0;
1235 	int sh_len;
1236 
1237 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1238 	to_hash = in_len - *next_buflen;
1239 
1240 	if (to_hash) {
1241 		src_nents = sg_nents_for_len(req->src,
1242 					     req->nbytes - *next_buflen);
1243 		if (src_nents < 0) {
1244 			dev_err(jrdev, "Invalid number of src SG.\n");
1245 			return src_nents;
1246 		}
1247 		sec4_sg_bytes = (1 + src_nents) *
1248 				sizeof(struct sec4_sg_entry);
1249 
1250 		/*
1251 		 * allocate space for base edesc and hw desc commands,
1252 		 * link tables
1253 		 */
1254 		edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes,
1255 				GFP_DMA | flags);
1256 		if (!edesc) {
1257 			dev_err(jrdev,
1258 				"could not allocate extended descriptor\n");
1259 			return -ENOMEM;
1260 		}
1261 
1262 		edesc->src_nents = src_nents;
1263 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1264 		edesc->dst_dma = 0;
1265 
1266 		state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1267 						    buf, *buflen);
1268 		src_map_to_sec4_sg(jrdev, req->src, src_nents,
1269 				   edesc->sec4_sg + 1);
1270 		if (*next_buflen) {
1271 			scatterwalk_map_and_copy(next_buf, req->src,
1272 						 to_hash - *buflen,
1273 						 *next_buflen, 0);
1274 		}
1275 
1276 		state->current_buf = !state->current_buf;
1277 
1278 		sh_len = desc_len(sh_desc);
1279 		desc = edesc->hw_desc;
1280 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1281 				     HDR_REVERSE);
1282 
1283 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1284 						    sec4_sg_bytes,
1285 						    DMA_TO_DEVICE);
1286 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1287 			dev_err(jrdev, "unable to map S/G table\n");
1288 			return -ENOMEM;
1289 		}
1290 
1291 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1292 
1293 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1294 		if (ret)
1295 			return ret;
1296 
1297 #ifdef DEBUG
1298 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1299 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1300 			       desc_bytes(desc), 1);
1301 #endif
1302 
1303 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1304 		if (!ret) {
1305 			ret = -EINPROGRESS;
1306 			state->update = ahash_update_ctx;
1307 			state->finup = ahash_finup_ctx;
1308 			state->final = ahash_final_ctx;
1309 		} else {
1310 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1311 					DMA_TO_DEVICE);
1312 			kfree(edesc);
1313 		}
1314 	} else if (*next_buflen) {
1315 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1316 					 req->nbytes, 0);
1317 		*buflen = *next_buflen;
1318 		*next_buflen = 0;
1319 	}
1320 #ifdef DEBUG
1321 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1322 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1323 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1324 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1325 		       *next_buflen, 1);
1326 #endif
1327 
1328 	return ret;
1329 }
1330 
1331 /* submit ahash finup if it the first job descriptor after update */
1332 static int ahash_finup_no_ctx(struct ahash_request *req)
1333 {
1334 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1335 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1336 	struct caam_hash_state *state = ahash_request_ctx(req);
1337 	struct device *jrdev = ctx->jrdev;
1338 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1339 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1340 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1341 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1342 	int last_buflen = state->current_buf ? state->buflen_0 :
1343 			  state->buflen_1;
1344 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1345 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1346 	int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1347 	int digestsize = crypto_ahash_digestsize(ahash);
1348 	struct ahash_edesc *edesc;
1349 	int sh_len;
1350 	int ret = 0;
1351 
1352 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1353 	if (src_nents < 0) {
1354 		dev_err(jrdev, "Invalid number of src SG.\n");
1355 		return src_nents;
1356 	}
1357 	sec4_sg_src_index = 2;
1358 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1359 			 sizeof(struct sec4_sg_entry);
1360 
1361 	/* allocate space for base edesc and hw desc commands, link tables */
1362 	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
1363 	if (!edesc) {
1364 		dev_err(jrdev, "could not allocate extended descriptor\n");
1365 		return -ENOMEM;
1366 	}
1367 
1368 	sh_len = desc_len(sh_desc);
1369 	desc = edesc->hw_desc;
1370 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1371 
1372 	edesc->src_nents = src_nents;
1373 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1374 
1375 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1376 						state->buf_dma, buflen,
1377 						last_buflen);
1378 
1379 	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
1380 
1381 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1382 					    sec4_sg_bytes, DMA_TO_DEVICE);
1383 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1384 		dev_err(jrdev, "unable to map S/G table\n");
1385 		return -ENOMEM;
1386 	}
1387 
1388 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1389 			       req->nbytes, LDST_SGF);
1390 
1391 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1392 						digestsize);
1393 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1394 		dev_err(jrdev, "unable to map dst\n");
1395 		return -ENOMEM;
1396 	}
1397 
1398 #ifdef DEBUG
1399 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1400 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1401 #endif
1402 
1403 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1404 	if (!ret) {
1405 		ret = -EINPROGRESS;
1406 	} else {
1407 		ahash_unmap(jrdev, edesc, req, digestsize);
1408 		kfree(edesc);
1409 	}
1410 
1411 	return ret;
1412 }
1413 
1414 /* submit first update job descriptor after init */
1415 static int ahash_update_first(struct ahash_request *req)
1416 {
1417 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1418 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1419 	struct caam_hash_state *state = ahash_request_ctx(req);
1420 	struct device *jrdev = ctx->jrdev;
1421 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1422 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1423 	u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1424 	int *next_buflen = state->current_buf ?
1425 		&state->buflen_1 : &state->buflen_0;
1426 	int to_hash;
1427 	u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1428 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1429 	int sec4_sg_bytes, src_nents;
1430 	dma_addr_t src_dma;
1431 	u32 options;
1432 	struct ahash_edesc *edesc;
1433 	int ret = 0;
1434 	int sh_len;
1435 
1436 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1437 				      1);
1438 	to_hash = req->nbytes - *next_buflen;
1439 
1440 	if (to_hash) {
1441 		src_nents = sg_nents_for_len(req->src,
1442 					     req->nbytes - *next_buflen);
1443 		if (src_nents < 0) {
1444 			dev_err(jrdev, "Invalid number of src SG.\n");
1445 			return src_nents;
1446 		}
1447 		dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1448 		if (src_nents > 1)
1449 			sec4_sg_bytes = src_nents *
1450 					sizeof(struct sec4_sg_entry);
1451 		else
1452 			sec4_sg_bytes = 0;
1453 
1454 		/*
1455 		 * allocate space for base edesc and hw desc commands,
1456 		 * link tables
1457 		 */
1458 		edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes,
1459 				GFP_DMA | flags);
1460 		if (!edesc) {
1461 			dev_err(jrdev,
1462 				"could not allocate extended descriptor\n");
1463 			return -ENOMEM;
1464 		}
1465 
1466 		edesc->src_nents = src_nents;
1467 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1468 		edesc->dst_dma = 0;
1469 
1470 		if (src_nents > 1) {
1471 			sg_to_sec4_sg_last(req->src, src_nents,
1472 					   edesc->sec4_sg, 0);
1473 			edesc->sec4_sg_dma = dma_map_single(jrdev,
1474 							    edesc->sec4_sg,
1475 							    sec4_sg_bytes,
1476 							    DMA_TO_DEVICE);
1477 			if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1478 				dev_err(jrdev, "unable to map S/G table\n");
1479 				return -ENOMEM;
1480 			}
1481 			src_dma = edesc->sec4_sg_dma;
1482 			options = LDST_SGF;
1483 		} else {
1484 			src_dma = sg_dma_address(req->src);
1485 			options = 0;
1486 		}
1487 
1488 		if (*next_buflen)
1489 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1490 						 *next_buflen, 0);
1491 
1492 		sh_len = desc_len(sh_desc);
1493 		desc = edesc->hw_desc;
1494 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1495 				     HDR_REVERSE);
1496 
1497 		append_seq_in_ptr(desc, src_dma, to_hash, options);
1498 
1499 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1500 		if (ret)
1501 			return ret;
1502 
1503 #ifdef DEBUG
1504 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1505 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1506 			       desc_bytes(desc), 1);
1507 #endif
1508 
1509 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1510 				      req);
1511 		if (!ret) {
1512 			ret = -EINPROGRESS;
1513 			state->update = ahash_update_ctx;
1514 			state->finup = ahash_finup_ctx;
1515 			state->final = ahash_final_ctx;
1516 		} else {
1517 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1518 					DMA_TO_DEVICE);
1519 			kfree(edesc);
1520 		}
1521 	} else if (*next_buflen) {
1522 		state->update = ahash_update_no_ctx;
1523 		state->finup = ahash_finup_no_ctx;
1524 		state->final = ahash_final_no_ctx;
1525 		scatterwalk_map_and_copy(next_buf, req->src, 0,
1526 					 req->nbytes, 0);
1527 	}
1528 #ifdef DEBUG
1529 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1530 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1531 		       *next_buflen, 1);
1532 #endif
1533 
1534 	return ret;
1535 }
1536 
1537 static int ahash_finup_first(struct ahash_request *req)
1538 {
1539 	return ahash_digest(req);
1540 }
1541 
1542 static int ahash_init(struct ahash_request *req)
1543 {
1544 	struct caam_hash_state *state = ahash_request_ctx(req);
1545 
1546 	state->update = ahash_update_first;
1547 	state->finup = ahash_finup_first;
1548 	state->final = ahash_final_no_ctx;
1549 
1550 	state->current_buf = 0;
1551 	state->buf_dma = 0;
1552 	state->buflen_0 = 0;
1553 	state->buflen_1 = 0;
1554 
1555 	return 0;
1556 }
1557 
1558 static int ahash_update(struct ahash_request *req)
1559 {
1560 	struct caam_hash_state *state = ahash_request_ctx(req);
1561 
1562 	return state->update(req);
1563 }
1564 
1565 static int ahash_finup(struct ahash_request *req)
1566 {
1567 	struct caam_hash_state *state = ahash_request_ctx(req);
1568 
1569 	return state->finup(req);
1570 }
1571 
1572 static int ahash_final(struct ahash_request *req)
1573 {
1574 	struct caam_hash_state *state = ahash_request_ctx(req);
1575 
1576 	return state->final(req);
1577 }
1578 
1579 static int ahash_export(struct ahash_request *req, void *out)
1580 {
1581 	struct caam_hash_state *state = ahash_request_ctx(req);
1582 	struct caam_export_state *export = out;
1583 	int len;
1584 	u8 *buf;
1585 
1586 	if (state->current_buf) {
1587 		buf = state->buf_1;
1588 		len = state->buflen_1;
1589 	} else {
1590 		buf = state->buf_0;
1591 		len = state->buflen_0;
1592 	}
1593 
1594 	memcpy(export->buf, buf, len);
1595 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1596 	export->buflen = len;
1597 	export->update = state->update;
1598 	export->final = state->final;
1599 	export->finup = state->finup;
1600 
1601 	return 0;
1602 }
1603 
1604 static int ahash_import(struct ahash_request *req, const void *in)
1605 {
1606 	struct caam_hash_state *state = ahash_request_ctx(req);
1607 	const struct caam_export_state *export = in;
1608 
1609 	memset(state, 0, sizeof(*state));
1610 	memcpy(state->buf_0, export->buf, export->buflen);
1611 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1612 	state->buflen_0 = export->buflen;
1613 	state->update = export->update;
1614 	state->final = export->final;
1615 	state->finup = export->finup;
1616 
1617 	return 0;
1618 }
1619 
1620 struct caam_hash_template {
1621 	char name[CRYPTO_MAX_ALG_NAME];
1622 	char driver_name[CRYPTO_MAX_ALG_NAME];
1623 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1624 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1625 	unsigned int blocksize;
1626 	struct ahash_alg template_ahash;
1627 	u32 alg_type;
1628 	u32 alg_op;
1629 };
1630 
1631 /* ahash descriptors */
1632 static struct caam_hash_template driver_hash[] = {
1633 	{
1634 		.name = "sha1",
1635 		.driver_name = "sha1-caam",
1636 		.hmac_name = "hmac(sha1)",
1637 		.hmac_driver_name = "hmac-sha1-caam",
1638 		.blocksize = SHA1_BLOCK_SIZE,
1639 		.template_ahash = {
1640 			.init = ahash_init,
1641 			.update = ahash_update,
1642 			.final = ahash_final,
1643 			.finup = ahash_finup,
1644 			.digest = ahash_digest,
1645 			.export = ahash_export,
1646 			.import = ahash_import,
1647 			.setkey = ahash_setkey,
1648 			.halg = {
1649 				.digestsize = SHA1_DIGEST_SIZE,
1650 				.statesize = sizeof(struct caam_export_state),
1651 			},
1652 		},
1653 		.alg_type = OP_ALG_ALGSEL_SHA1,
1654 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1655 	}, {
1656 		.name = "sha224",
1657 		.driver_name = "sha224-caam",
1658 		.hmac_name = "hmac(sha224)",
1659 		.hmac_driver_name = "hmac-sha224-caam",
1660 		.blocksize = SHA224_BLOCK_SIZE,
1661 		.template_ahash = {
1662 			.init = ahash_init,
1663 			.update = ahash_update,
1664 			.final = ahash_final,
1665 			.finup = ahash_finup,
1666 			.digest = ahash_digest,
1667 			.export = ahash_export,
1668 			.import = ahash_import,
1669 			.setkey = ahash_setkey,
1670 			.halg = {
1671 				.digestsize = SHA224_DIGEST_SIZE,
1672 				.statesize = sizeof(struct caam_export_state),
1673 			},
1674 		},
1675 		.alg_type = OP_ALG_ALGSEL_SHA224,
1676 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1677 	}, {
1678 		.name = "sha256",
1679 		.driver_name = "sha256-caam",
1680 		.hmac_name = "hmac(sha256)",
1681 		.hmac_driver_name = "hmac-sha256-caam",
1682 		.blocksize = SHA256_BLOCK_SIZE,
1683 		.template_ahash = {
1684 			.init = ahash_init,
1685 			.update = ahash_update,
1686 			.final = ahash_final,
1687 			.finup = ahash_finup,
1688 			.digest = ahash_digest,
1689 			.export = ahash_export,
1690 			.import = ahash_import,
1691 			.setkey = ahash_setkey,
1692 			.halg = {
1693 				.digestsize = SHA256_DIGEST_SIZE,
1694 				.statesize = sizeof(struct caam_export_state),
1695 			},
1696 		},
1697 		.alg_type = OP_ALG_ALGSEL_SHA256,
1698 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1699 	}, {
1700 		.name = "sha384",
1701 		.driver_name = "sha384-caam",
1702 		.hmac_name = "hmac(sha384)",
1703 		.hmac_driver_name = "hmac-sha384-caam",
1704 		.blocksize = SHA384_BLOCK_SIZE,
1705 		.template_ahash = {
1706 			.init = ahash_init,
1707 			.update = ahash_update,
1708 			.final = ahash_final,
1709 			.finup = ahash_finup,
1710 			.digest = ahash_digest,
1711 			.export = ahash_export,
1712 			.import = ahash_import,
1713 			.setkey = ahash_setkey,
1714 			.halg = {
1715 				.digestsize = SHA384_DIGEST_SIZE,
1716 				.statesize = sizeof(struct caam_export_state),
1717 			},
1718 		},
1719 		.alg_type = OP_ALG_ALGSEL_SHA384,
1720 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1721 	}, {
1722 		.name = "sha512",
1723 		.driver_name = "sha512-caam",
1724 		.hmac_name = "hmac(sha512)",
1725 		.hmac_driver_name = "hmac-sha512-caam",
1726 		.blocksize = SHA512_BLOCK_SIZE,
1727 		.template_ahash = {
1728 			.init = ahash_init,
1729 			.update = ahash_update,
1730 			.final = ahash_final,
1731 			.finup = ahash_finup,
1732 			.digest = ahash_digest,
1733 			.export = ahash_export,
1734 			.import = ahash_import,
1735 			.setkey = ahash_setkey,
1736 			.halg = {
1737 				.digestsize = SHA512_DIGEST_SIZE,
1738 				.statesize = sizeof(struct caam_export_state),
1739 			},
1740 		},
1741 		.alg_type = OP_ALG_ALGSEL_SHA512,
1742 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1743 	}, {
1744 		.name = "md5",
1745 		.driver_name = "md5-caam",
1746 		.hmac_name = "hmac(md5)",
1747 		.hmac_driver_name = "hmac-md5-caam",
1748 		.blocksize = MD5_BLOCK_WORDS * 4,
1749 		.template_ahash = {
1750 			.init = ahash_init,
1751 			.update = ahash_update,
1752 			.final = ahash_final,
1753 			.finup = ahash_finup,
1754 			.digest = ahash_digest,
1755 			.export = ahash_export,
1756 			.import = ahash_import,
1757 			.setkey = ahash_setkey,
1758 			.halg = {
1759 				.digestsize = MD5_DIGEST_SIZE,
1760 				.statesize = sizeof(struct caam_export_state),
1761 			},
1762 		},
1763 		.alg_type = OP_ALG_ALGSEL_MD5,
1764 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1765 	},
1766 };
1767 
1768 struct caam_hash_alg {
1769 	struct list_head entry;
1770 	int alg_type;
1771 	int alg_op;
1772 	struct ahash_alg ahash_alg;
1773 };
1774 
1775 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1776 {
1777 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1778 	struct crypto_alg *base = tfm->__crt_alg;
1779 	struct hash_alg_common *halg =
1780 		 container_of(base, struct hash_alg_common, base);
1781 	struct ahash_alg *alg =
1782 		 container_of(halg, struct ahash_alg, halg);
1783 	struct caam_hash_alg *caam_hash =
1784 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1785 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1786 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1787 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1788 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1789 					 HASH_MSG_LEN + 32,
1790 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1791 					 HASH_MSG_LEN + 64,
1792 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1793 	int ret = 0;
1794 
1795 	/*
1796 	 * Get a Job ring from Job Ring driver to ensure in-order
1797 	 * crypto request processing per tfm
1798 	 */
1799 	ctx->jrdev = caam_jr_alloc();
1800 	if (IS_ERR(ctx->jrdev)) {
1801 		pr_err("Job Ring Device allocation for transform failed\n");
1802 		return PTR_ERR(ctx->jrdev);
1803 	}
1804 	/* copy descriptor header template value */
1805 	ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1806 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1807 
1808 	ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1809 				  OP_ALG_ALGSEL_SHIFT];
1810 
1811 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1812 				 sizeof(struct caam_hash_state));
1813 
1814 	ret = ahash_set_sh_desc(ahash);
1815 
1816 	return ret;
1817 }
1818 
1819 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1820 {
1821 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1822 
1823 	if (ctx->sh_desc_update_dma &&
1824 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1825 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1826 				 desc_bytes(ctx->sh_desc_update),
1827 				 DMA_TO_DEVICE);
1828 	if (ctx->sh_desc_update_first_dma &&
1829 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1830 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1831 				 desc_bytes(ctx->sh_desc_update_first),
1832 				 DMA_TO_DEVICE);
1833 	if (ctx->sh_desc_fin_dma &&
1834 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1835 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1836 				 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1837 	if (ctx->sh_desc_digest_dma &&
1838 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1839 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1840 				 desc_bytes(ctx->sh_desc_digest),
1841 				 DMA_TO_DEVICE);
1842 	if (ctx->sh_desc_finup_dma &&
1843 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1844 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1845 				 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1846 
1847 	caam_jr_free(ctx->jrdev);
1848 }
1849 
1850 static void __exit caam_algapi_hash_exit(void)
1851 {
1852 	struct caam_hash_alg *t_alg, *n;
1853 
1854 	if (!hash_list.next)
1855 		return;
1856 
1857 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1858 		crypto_unregister_ahash(&t_alg->ahash_alg);
1859 		list_del(&t_alg->entry);
1860 		kfree(t_alg);
1861 	}
1862 }
1863 
1864 static struct caam_hash_alg *
1865 caam_hash_alloc(struct caam_hash_template *template,
1866 		bool keyed)
1867 {
1868 	struct caam_hash_alg *t_alg;
1869 	struct ahash_alg *halg;
1870 	struct crypto_alg *alg;
1871 
1872 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1873 	if (!t_alg) {
1874 		pr_err("failed to allocate t_alg\n");
1875 		return ERR_PTR(-ENOMEM);
1876 	}
1877 
1878 	t_alg->ahash_alg = template->template_ahash;
1879 	halg = &t_alg->ahash_alg;
1880 	alg = &halg->halg.base;
1881 
1882 	if (keyed) {
1883 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1884 			 template->hmac_name);
1885 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1886 			 template->hmac_driver_name);
1887 	} else {
1888 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1889 			 template->name);
1890 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1891 			 template->driver_name);
1892 		t_alg->ahash_alg.setkey = NULL;
1893 	}
1894 	alg->cra_module = THIS_MODULE;
1895 	alg->cra_init = caam_hash_cra_init;
1896 	alg->cra_exit = caam_hash_cra_exit;
1897 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1898 	alg->cra_priority = CAAM_CRA_PRIORITY;
1899 	alg->cra_blocksize = template->blocksize;
1900 	alg->cra_alignmask = 0;
1901 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1902 	alg->cra_type = &crypto_ahash_type;
1903 
1904 	t_alg->alg_type = template->alg_type;
1905 	t_alg->alg_op = template->alg_op;
1906 
1907 	return t_alg;
1908 }
1909 
1910 static int __init caam_algapi_hash_init(void)
1911 {
1912 	struct device_node *dev_node;
1913 	struct platform_device *pdev;
1914 	struct device *ctrldev;
1915 	int i = 0, err = 0;
1916 	struct caam_drv_private *priv;
1917 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1918 	u32 cha_inst, cha_vid;
1919 
1920 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1921 	if (!dev_node) {
1922 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1923 		if (!dev_node)
1924 			return -ENODEV;
1925 	}
1926 
1927 	pdev = of_find_device_by_node(dev_node);
1928 	if (!pdev) {
1929 		of_node_put(dev_node);
1930 		return -ENODEV;
1931 	}
1932 
1933 	ctrldev = &pdev->dev;
1934 	priv = dev_get_drvdata(ctrldev);
1935 	of_node_put(dev_node);
1936 
1937 	/*
1938 	 * If priv is NULL, it's probably because the caam driver wasn't
1939 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1940 	 */
1941 	if (!priv)
1942 		return -ENODEV;
1943 
1944 	/*
1945 	 * Register crypto algorithms the device supports.  First, identify
1946 	 * presence and attributes of MD block.
1947 	 */
1948 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1949 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1950 
1951 	/*
1952 	 * Skip registration of any hashing algorithms if MD block
1953 	 * is not present.
1954 	 */
1955 	if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1956 		return -ENODEV;
1957 
1958 	/* Limit digest size based on LP256 */
1959 	if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1960 		md_limit = SHA256_DIGEST_SIZE;
1961 
1962 	INIT_LIST_HEAD(&hash_list);
1963 
1964 	/* register crypto algorithms the device supports */
1965 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1966 		struct caam_hash_alg *t_alg;
1967 		struct caam_hash_template *alg = driver_hash + i;
1968 
1969 		/* If MD size is not supported by device, skip registration */
1970 		if (alg->template_ahash.halg.digestsize > md_limit)
1971 			continue;
1972 
1973 		/* register hmac version */
1974 		t_alg = caam_hash_alloc(alg, true);
1975 		if (IS_ERR(t_alg)) {
1976 			err = PTR_ERR(t_alg);
1977 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1978 			continue;
1979 		}
1980 
1981 		err = crypto_register_ahash(&t_alg->ahash_alg);
1982 		if (err) {
1983 			pr_warn("%s alg registration failed: %d\n",
1984 				t_alg->ahash_alg.halg.base.cra_driver_name,
1985 				err);
1986 			kfree(t_alg);
1987 		} else
1988 			list_add_tail(&t_alg->entry, &hash_list);
1989 
1990 		/* register unkeyed version */
1991 		t_alg = caam_hash_alloc(alg, false);
1992 		if (IS_ERR(t_alg)) {
1993 			err = PTR_ERR(t_alg);
1994 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1995 			continue;
1996 		}
1997 
1998 		err = crypto_register_ahash(&t_alg->ahash_alg);
1999 		if (err) {
2000 			pr_warn("%s alg registration failed: %d\n",
2001 				t_alg->ahash_alg.halg.base.cra_driver_name,
2002 				err);
2003 			kfree(t_alg);
2004 		} else
2005 			list_add_tail(&t_alg->entry, &hash_list);
2006 	}
2007 
2008 	return err;
2009 }
2010 
2011 module_init(caam_algapi_hash_init);
2012 module_exit(caam_algapi_hash_exit);
2013 
2014 MODULE_LICENSE("GPL");
2015 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2016 MODULE_AUTHOR("Freescale Semiconductor - NMG");
2017