xref: /openbmc/linux/drivers/crypto/caam/caamhash.c (revision 281669df)
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55 
56 #include "compat.h"
57 
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65 
66 #define CAAM_CRA_PRIORITY		3000
67 
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
70 
71 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
73 
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE			(3 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81 
82 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
83 					 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85 
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN			8
88 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89 
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96 
97 
98 static struct list_head hash_list;
99 
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
107 	dma_addr_t sh_desc_update_first_dma;
108 	dma_addr_t sh_desc_fin_dma;
109 	dma_addr_t sh_desc_digest_dma;
110 	struct device *jrdev;
111 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
112 	dma_addr_t key_dma;
113 	int ctx_len;
114 	struct alginfo adata;
115 };
116 
117 /* ahash state */
118 struct caam_hash_state {
119 	dma_addr_t buf_dma;
120 	dma_addr_t ctx_dma;
121 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
122 	int buflen_0;
123 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
124 	int buflen_1;
125 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
126 	int (*update)(struct ahash_request *req);
127 	int (*final)(struct ahash_request *req);
128 	int (*finup)(struct ahash_request *req);
129 	int current_buf;
130 };
131 
132 struct caam_export_state {
133 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
134 	u8 caam_ctx[MAX_CTX_LEN];
135 	int buflen;
136 	int (*update)(struct ahash_request *req);
137 	int (*final)(struct ahash_request *req);
138 	int (*finup)(struct ahash_request *req);
139 };
140 
141 /* Common job descriptor seq in/out ptr routines */
142 
143 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
144 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
145 				      struct caam_hash_state *state,
146 				      int ctx_len)
147 {
148 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
149 					ctx_len, DMA_FROM_DEVICE);
150 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
151 		dev_err(jrdev, "unable to map ctx\n");
152 		return -ENOMEM;
153 	}
154 
155 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
156 
157 	return 0;
158 }
159 
160 /* Map req->result, and append seq_out_ptr command that points to it */
161 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
162 						u8 *result, int digestsize)
163 {
164 	dma_addr_t dst_dma;
165 
166 	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
167 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
168 
169 	return dst_dma;
170 }
171 
172 /* Map current buffer in state and put it in link table */
173 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
174 					    struct sec4_sg_entry *sec4_sg,
175 					    u8 *buf, int buflen)
176 {
177 	dma_addr_t buf_dma;
178 
179 	buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
180 	dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
181 
182 	return buf_dma;
183 }
184 
185 /*
186  * Only put buffer in link table if it contains data, which is possible,
187  * since a buffer has previously been used, and needs to be unmapped,
188  */
189 static inline dma_addr_t
190 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
191 		       u8 *buf, dma_addr_t buf_dma, int buflen,
192 		       int last_buflen)
193 {
194 	if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
195 		dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
196 	if (buflen)
197 		buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
198 	else
199 		buf_dma = 0;
200 
201 	return buf_dma;
202 }
203 
204 /* Map state->caam_ctx, and add it to link table */
205 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
206 				     struct caam_hash_state *state, int ctx_len,
207 				     struct sec4_sg_entry *sec4_sg, u32 flag)
208 {
209 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
210 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
211 		dev_err(jrdev, "unable to map ctx\n");
212 		return -ENOMEM;
213 	}
214 
215 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
216 
217 	return 0;
218 }
219 
220 /* Common shared descriptor commands */
221 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
222 {
223 	append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
224 			  ctx->adata.keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
225 			  KEY_ENC);
226 }
227 
228 /* Append key if it has been set */
229 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
230 {
231 	u32 *key_jump_cmd;
232 
233 	init_sh_desc(desc, HDR_SHARE_SERIAL);
234 
235 	if (ctx->adata.keylen) {
236 		/* Skip if already shared */
237 		key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
238 					   JUMP_COND_SHRD);
239 
240 		append_key_ahash(desc, ctx);
241 
242 		set_jump_tgt_here(desc, key_jump_cmd);
243 	}
244 }
245 
246 /*
247  * For ahash read data from seqin following state->caam_ctx,
248  * and write resulting class2 context to seqout, which may be state->caam_ctx
249  * or req->result
250  */
251 static inline void ahash_append_load_str(u32 *desc, int digestsize)
252 {
253 	/* Calculate remaining bytes to read */
254 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
255 
256 	/* Read remaining bytes */
257 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
258 			     FIFOLD_TYPE_MSG | KEY_VLF);
259 
260 	/* Store class2 context bytes */
261 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
262 			 LDST_SRCDST_BYTE_CONTEXT);
263 }
264 
265 /*
266  * For ahash update, final and finup, import context, read and write to seqout
267  */
268 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
269 					 int digestsize,
270 					 struct caam_hash_ctx *ctx)
271 {
272 	init_sh_desc_key_ahash(desc, ctx);
273 
274 	/* Import context from software */
275 	append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
276 			LDST_SRCDST_BYTE_CONTEXT);
277 
278 	/* Class 2 operation */
279 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
280 
281 	/*
282 	 * Load from buf and/or src and write to req->result or state->context
283 	 */
284 	ahash_append_load_str(desc, digestsize);
285 }
286 
287 /* For ahash firsts and digest, read and write to seqout */
288 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
289 				     int digestsize, struct caam_hash_ctx *ctx)
290 {
291 	init_sh_desc_key_ahash(desc, ctx);
292 
293 	/* Class 2 operation */
294 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
295 
296 	/*
297 	 * Load from buf and/or src and write to req->result or state->context
298 	 */
299 	ahash_append_load_str(desc, digestsize);
300 }
301 
302 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
303 {
304 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
305 	int digestsize = crypto_ahash_digestsize(ahash);
306 	struct device *jrdev = ctx->jrdev;
307 	u32 have_key = 0;
308 	u32 *desc;
309 
310 	if (ctx->adata.keylen)
311 		have_key = OP_ALG_AAI_HMAC_PRECOMP;
312 
313 	/* ahash_update shared descriptor */
314 	desc = ctx->sh_desc_update;
315 
316 	init_sh_desc(desc, HDR_SHARE_SERIAL);
317 
318 	/* Import context from software */
319 	append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
320 			LDST_SRCDST_BYTE_CONTEXT);
321 
322 	/* Class 2 operation */
323 	append_operation(desc, ctx->adata.algtype | OP_ALG_AS_UPDATE |
324 			 OP_ALG_ENCRYPT);
325 
326 	/* Load data and write to result or context */
327 	ahash_append_load_str(desc, ctx->ctx_len);
328 
329 	ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
330 						 DMA_TO_DEVICE);
331 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
332 		dev_err(jrdev, "unable to map shared descriptor\n");
333 		return -ENOMEM;
334 	}
335 #ifdef DEBUG
336 	print_hex_dump(KERN_ERR,
337 		       "ahash update shdesc@"__stringify(__LINE__)": ",
338 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
339 #endif
340 
341 	/* ahash_update_first shared descriptor */
342 	desc = ctx->sh_desc_update_first;
343 
344 	ahash_data_to_out(desc, have_key | ctx->adata.algtype, OP_ALG_AS_INIT,
345 			  ctx->ctx_len, ctx);
346 
347 	ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
348 						       desc_bytes(desc),
349 						       DMA_TO_DEVICE);
350 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
351 		dev_err(jrdev, "unable to map shared descriptor\n");
352 		return -ENOMEM;
353 	}
354 #ifdef DEBUG
355 	print_hex_dump(KERN_ERR,
356 		       "ahash update first shdesc@"__stringify(__LINE__)": ",
357 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
358 #endif
359 
360 	/* ahash_final shared descriptor */
361 	desc = ctx->sh_desc_fin;
362 
363 	ahash_ctx_data_to_out(desc, have_key | ctx->adata.algtype,
364 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
365 
366 	ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
367 					      DMA_TO_DEVICE);
368 	if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
369 		dev_err(jrdev, "unable to map shared descriptor\n");
370 		return -ENOMEM;
371 	}
372 #ifdef DEBUG
373 	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
374 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
375 		       desc_bytes(desc), 1);
376 #endif
377 
378 	/* ahash_digest shared descriptor */
379 	desc = ctx->sh_desc_digest;
380 
381 	ahash_data_to_out(desc, have_key | ctx->adata.algtype,
382 			  OP_ALG_AS_INITFINAL, digestsize, ctx);
383 
384 	ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
385 						 desc_bytes(desc),
386 						 DMA_TO_DEVICE);
387 	if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
388 		dev_err(jrdev, "unable to map shared descriptor\n");
389 		return -ENOMEM;
390 	}
391 #ifdef DEBUG
392 	print_hex_dump(KERN_ERR,
393 		       "ahash digest shdesc@"__stringify(__LINE__)": ",
394 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
395 		       desc_bytes(desc), 1);
396 #endif
397 
398 	return 0;
399 }
400 
401 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
402 			      u32 keylen)
403 {
404 	return gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key_in, keylen);
405 }
406 
407 /* Digest hash size if it is too large */
408 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
409 			   u32 *keylen, u8 *key_out, u32 digestsize)
410 {
411 	struct device *jrdev = ctx->jrdev;
412 	u32 *desc;
413 	struct split_key_result result;
414 	dma_addr_t src_dma, dst_dma;
415 	int ret;
416 
417 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
418 	if (!desc) {
419 		dev_err(jrdev, "unable to allocate key input memory\n");
420 		return -ENOMEM;
421 	}
422 
423 	init_job_desc(desc, 0);
424 
425 	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
426 				 DMA_TO_DEVICE);
427 	if (dma_mapping_error(jrdev, src_dma)) {
428 		dev_err(jrdev, "unable to map key input memory\n");
429 		kfree(desc);
430 		return -ENOMEM;
431 	}
432 	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
433 				 DMA_FROM_DEVICE);
434 	if (dma_mapping_error(jrdev, dst_dma)) {
435 		dev_err(jrdev, "unable to map key output memory\n");
436 		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
437 		kfree(desc);
438 		return -ENOMEM;
439 	}
440 
441 	/* Job descriptor to perform unkeyed hash on key_in */
442 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
443 			 OP_ALG_AS_INITFINAL);
444 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
445 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
446 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
447 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
448 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
449 			 LDST_SRCDST_BYTE_CONTEXT);
450 
451 #ifdef DEBUG
452 	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
453 		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
454 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
455 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
456 #endif
457 
458 	result.err = 0;
459 	init_completion(&result.completion);
460 
461 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
462 	if (!ret) {
463 		/* in progress */
464 		wait_for_completion_interruptible(&result.completion);
465 		ret = result.err;
466 #ifdef DEBUG
467 		print_hex_dump(KERN_ERR,
468 			       "digested key@"__stringify(__LINE__)": ",
469 			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
470 			       digestsize, 1);
471 #endif
472 	}
473 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
474 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
475 
476 	*keylen = digestsize;
477 
478 	kfree(desc);
479 
480 	return ret;
481 }
482 
483 static int ahash_setkey(struct crypto_ahash *ahash,
484 			const u8 *key, unsigned int keylen)
485 {
486 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
487 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
488 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
489 	struct device *jrdev = ctx->jrdev;
490 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
491 	int digestsize = crypto_ahash_digestsize(ahash);
492 	int ret;
493 	u8 *hashed_key = NULL;
494 
495 #ifdef DEBUG
496 	printk(KERN_ERR "keylen %d\n", keylen);
497 #endif
498 
499 	if (keylen > blocksize) {
500 		hashed_key = kmalloc_array(digestsize,
501 					   sizeof(*hashed_key),
502 					   GFP_KERNEL | GFP_DMA);
503 		if (!hashed_key)
504 			return -ENOMEM;
505 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
506 				      digestsize);
507 		if (ret)
508 			goto bad_free_key;
509 		key = hashed_key;
510 	}
511 
512 	/* Pick class 2 key length from algorithm submask */
513 	ctx->adata.keylen = mdpadlen[(ctx->adata.algtype &
514 				      OP_ALG_ALGSEL_SUBMASK) >>
515 				     OP_ALG_ALGSEL_SHIFT] * 2;
516 	ctx->adata.keylen_pad = ALIGN(ctx->adata.keylen, 16);
517 
518 #ifdef DEBUG
519 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
520 	       ctx->adata.keylen, ctx->adata.keylen_pad);
521 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
522 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
523 #endif
524 
525 	ret = gen_split_hash_key(ctx, key, keylen);
526 	if (ret)
527 		goto bad_free_key;
528 
529 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
530 				      DMA_TO_DEVICE);
531 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
532 		dev_err(jrdev, "unable to map key i/o memory\n");
533 		ret = -ENOMEM;
534 		goto error_free_key;
535 	}
536 #ifdef DEBUG
537 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
538 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
539 		       ctx->adata.keylen_pad, 1);
540 #endif
541 
542 	ret = ahash_set_sh_desc(ahash);
543 	if (ret) {
544 		dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
545 				 DMA_TO_DEVICE);
546 	}
547 
548  error_free_key:
549 	kfree(hashed_key);
550 	return ret;
551  bad_free_key:
552 	kfree(hashed_key);
553 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
554 	return -EINVAL;
555 }
556 
557 /*
558  * ahash_edesc - s/w-extended ahash descriptor
559  * @dst_dma: physical mapped address of req->result
560  * @sec4_sg_dma: physical mapped address of h/w link table
561  * @src_nents: number of segments in input scatterlist
562  * @sec4_sg_bytes: length of dma mapped sec4_sg space
563  * @hw_desc: the h/w job descriptor followed by any referenced link tables
564  * @sec4_sg: h/w link table
565  */
566 struct ahash_edesc {
567 	dma_addr_t dst_dma;
568 	dma_addr_t sec4_sg_dma;
569 	int src_nents;
570 	int sec4_sg_bytes;
571 	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
572 	struct sec4_sg_entry sec4_sg[0];
573 };
574 
575 static inline void ahash_unmap(struct device *dev,
576 			struct ahash_edesc *edesc,
577 			struct ahash_request *req, int dst_len)
578 {
579 	if (edesc->src_nents)
580 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
581 	if (edesc->dst_dma)
582 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
583 
584 	if (edesc->sec4_sg_bytes)
585 		dma_unmap_single(dev, edesc->sec4_sg_dma,
586 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
587 }
588 
589 static inline void ahash_unmap_ctx(struct device *dev,
590 			struct ahash_edesc *edesc,
591 			struct ahash_request *req, int dst_len, u32 flag)
592 {
593 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
594 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
595 	struct caam_hash_state *state = ahash_request_ctx(req);
596 
597 	if (state->ctx_dma)
598 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
599 	ahash_unmap(dev, edesc, req, dst_len);
600 }
601 
602 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
603 		       void *context)
604 {
605 	struct ahash_request *req = context;
606 	struct ahash_edesc *edesc;
607 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
608 	int digestsize = crypto_ahash_digestsize(ahash);
609 #ifdef DEBUG
610 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
611 	struct caam_hash_state *state = ahash_request_ctx(req);
612 
613 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
614 #endif
615 
616 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
617 	if (err)
618 		caam_jr_strstatus(jrdev, err);
619 
620 	ahash_unmap(jrdev, edesc, req, digestsize);
621 	kfree(edesc);
622 
623 #ifdef DEBUG
624 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
625 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
626 		       ctx->ctx_len, 1);
627 	if (req->result)
628 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
629 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
630 			       digestsize, 1);
631 #endif
632 
633 	req->base.complete(&req->base, err);
634 }
635 
636 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
637 			    void *context)
638 {
639 	struct ahash_request *req = context;
640 	struct ahash_edesc *edesc;
641 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
642 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
643 #ifdef DEBUG
644 	struct caam_hash_state *state = ahash_request_ctx(req);
645 	int digestsize = crypto_ahash_digestsize(ahash);
646 
647 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
648 #endif
649 
650 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
651 	if (err)
652 		caam_jr_strstatus(jrdev, err);
653 
654 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
655 	kfree(edesc);
656 
657 #ifdef DEBUG
658 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
659 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
660 		       ctx->ctx_len, 1);
661 	if (req->result)
662 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
663 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
664 			       digestsize, 1);
665 #endif
666 
667 	req->base.complete(&req->base, err);
668 }
669 
670 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
671 			       void *context)
672 {
673 	struct ahash_request *req = context;
674 	struct ahash_edesc *edesc;
675 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
676 	int digestsize = crypto_ahash_digestsize(ahash);
677 #ifdef DEBUG
678 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
679 	struct caam_hash_state *state = ahash_request_ctx(req);
680 
681 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
682 #endif
683 
684 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
685 	if (err)
686 		caam_jr_strstatus(jrdev, err);
687 
688 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
689 	kfree(edesc);
690 
691 #ifdef DEBUG
692 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
693 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
694 		       ctx->ctx_len, 1);
695 	if (req->result)
696 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
697 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
698 			       digestsize, 1);
699 #endif
700 
701 	req->base.complete(&req->base, err);
702 }
703 
704 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
705 			       void *context)
706 {
707 	struct ahash_request *req = context;
708 	struct ahash_edesc *edesc;
709 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
710 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
711 #ifdef DEBUG
712 	struct caam_hash_state *state = ahash_request_ctx(req);
713 	int digestsize = crypto_ahash_digestsize(ahash);
714 
715 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
716 #endif
717 
718 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
719 	if (err)
720 		caam_jr_strstatus(jrdev, err);
721 
722 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
723 	kfree(edesc);
724 
725 #ifdef DEBUG
726 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
727 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
728 		       ctx->ctx_len, 1);
729 	if (req->result)
730 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
731 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
732 			       digestsize, 1);
733 #endif
734 
735 	req->base.complete(&req->base, err);
736 }
737 
738 /*
739  * Allocate an enhanced descriptor, which contains the hardware descriptor
740  * and space for hardware scatter table containing sg_num entries.
741  */
742 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
743 					     int sg_num, u32 *sh_desc,
744 					     dma_addr_t sh_desc_dma,
745 					     gfp_t flags)
746 {
747 	struct ahash_edesc *edesc;
748 	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
749 
750 	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
751 	if (!edesc) {
752 		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
753 		return NULL;
754 	}
755 
756 	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
757 			     HDR_SHARE_DEFER | HDR_REVERSE);
758 
759 	return edesc;
760 }
761 
762 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
763 			       struct ahash_edesc *edesc,
764 			       struct ahash_request *req, int nents,
765 			       unsigned int first_sg,
766 			       unsigned int first_bytes, size_t to_hash)
767 {
768 	dma_addr_t src_dma;
769 	u32 options;
770 
771 	if (nents > 1 || first_sg) {
772 		struct sec4_sg_entry *sg = edesc->sec4_sg;
773 		unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
774 
775 		sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
776 
777 		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
778 		if (dma_mapping_error(ctx->jrdev, src_dma)) {
779 			dev_err(ctx->jrdev, "unable to map S/G table\n");
780 			return -ENOMEM;
781 		}
782 
783 		edesc->sec4_sg_bytes = sgsize;
784 		edesc->sec4_sg_dma = src_dma;
785 		options = LDST_SGF;
786 	} else {
787 		src_dma = sg_dma_address(req->src);
788 		options = 0;
789 	}
790 
791 	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
792 			  options);
793 
794 	return 0;
795 }
796 
797 /* submit update job descriptor */
798 static int ahash_update_ctx(struct ahash_request *req)
799 {
800 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
801 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
802 	struct caam_hash_state *state = ahash_request_ctx(req);
803 	struct device *jrdev = ctx->jrdev;
804 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
805 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
806 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
807 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
808 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
809 	int *next_buflen = state->current_buf ? &state->buflen_0 :
810 			   &state->buflen_1, last_buflen;
811 	int in_len = *buflen + req->nbytes, to_hash;
812 	u32 *desc;
813 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
814 	struct ahash_edesc *edesc;
815 	int ret = 0;
816 
817 	last_buflen = *next_buflen;
818 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
819 	to_hash = in_len - *next_buflen;
820 
821 	if (to_hash) {
822 		src_nents = sg_nents_for_len(req->src,
823 					     req->nbytes - (*next_buflen));
824 		if (src_nents < 0) {
825 			dev_err(jrdev, "Invalid number of src SG.\n");
826 			return src_nents;
827 		}
828 
829 		if (src_nents) {
830 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
831 						  DMA_TO_DEVICE);
832 			if (!mapped_nents) {
833 				dev_err(jrdev, "unable to DMA map source\n");
834 				return -ENOMEM;
835 			}
836 		} else {
837 			mapped_nents = 0;
838 		}
839 
840 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
841 		sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
842 				 sizeof(struct sec4_sg_entry);
843 
844 		/*
845 		 * allocate space for base edesc and hw desc commands,
846 		 * link tables
847 		 */
848 		edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
849 					  ctx->sh_desc_update,
850 					  ctx->sh_desc_update_dma, flags);
851 		if (!edesc) {
852 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
853 			return -ENOMEM;
854 		}
855 
856 		edesc->src_nents = src_nents;
857 		edesc->sec4_sg_bytes = sec4_sg_bytes;
858 
859 		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
860 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
861 		if (ret)
862 			goto unmap_ctx;
863 
864 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
865 							edesc->sec4_sg + 1,
866 							buf, state->buf_dma,
867 							*buflen, last_buflen);
868 
869 		if (mapped_nents) {
870 			sg_to_sec4_sg_last(req->src, mapped_nents,
871 					   edesc->sec4_sg + sec4_sg_src_index,
872 					   0);
873 			if (*next_buflen)
874 				scatterwalk_map_and_copy(next_buf, req->src,
875 							 to_hash - *buflen,
876 							 *next_buflen, 0);
877 		} else {
878 			(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
879 				cpu_to_caam32(SEC4_SG_LEN_FIN);
880 		}
881 
882 		state->current_buf = !state->current_buf;
883 
884 		desc = edesc->hw_desc;
885 
886 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
887 						     sec4_sg_bytes,
888 						     DMA_TO_DEVICE);
889 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
890 			dev_err(jrdev, "unable to map S/G table\n");
891 			ret = -ENOMEM;
892 			goto unmap_ctx;
893 		}
894 
895 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
896 				       to_hash, LDST_SGF);
897 
898 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
899 
900 #ifdef DEBUG
901 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
902 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
903 			       desc_bytes(desc), 1);
904 #endif
905 
906 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
907 		if (ret)
908 			goto unmap_ctx;
909 
910 		ret = -EINPROGRESS;
911 	} else if (*next_buflen) {
912 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
913 					 req->nbytes, 0);
914 		*buflen = *next_buflen;
915 		*next_buflen = last_buflen;
916 	}
917 #ifdef DEBUG
918 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
919 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
920 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
921 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
922 		       *next_buflen, 1);
923 #endif
924 
925 	return ret;
926  unmap_ctx:
927 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
928 	kfree(edesc);
929 	return ret;
930 }
931 
932 static int ahash_final_ctx(struct ahash_request *req)
933 {
934 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
935 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
936 	struct caam_hash_state *state = ahash_request_ctx(req);
937 	struct device *jrdev = ctx->jrdev;
938 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
939 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
940 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
941 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
942 	int last_buflen = state->current_buf ? state->buflen_0 :
943 			  state->buflen_1;
944 	u32 *desc;
945 	int sec4_sg_bytes, sec4_sg_src_index;
946 	int digestsize = crypto_ahash_digestsize(ahash);
947 	struct ahash_edesc *edesc;
948 	int ret;
949 
950 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
951 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
952 
953 	/* allocate space for base edesc and hw desc commands, link tables */
954 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
955 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
956 				  flags);
957 	if (!edesc)
958 		return -ENOMEM;
959 
960 	desc = edesc->hw_desc;
961 
962 	edesc->sec4_sg_bytes = sec4_sg_bytes;
963 	edesc->src_nents = 0;
964 
965 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
966 				 edesc->sec4_sg, DMA_TO_DEVICE);
967 	if (ret)
968 		goto unmap_ctx;
969 
970 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
971 						buf, state->buf_dma, buflen,
972 						last_buflen);
973 	(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
974 		cpu_to_caam32(SEC4_SG_LEN_FIN);
975 
976 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
977 					    sec4_sg_bytes, DMA_TO_DEVICE);
978 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
979 		dev_err(jrdev, "unable to map S/G table\n");
980 		ret = -ENOMEM;
981 		goto unmap_ctx;
982 	}
983 
984 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
985 			  LDST_SGF);
986 
987 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
988 						digestsize);
989 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
990 		dev_err(jrdev, "unable to map dst\n");
991 		ret = -ENOMEM;
992 		goto unmap_ctx;
993 	}
994 
995 #ifdef DEBUG
996 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
997 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
998 #endif
999 
1000 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1001 	if (ret)
1002 		goto unmap_ctx;
1003 
1004 	return -EINPROGRESS;
1005  unmap_ctx:
1006 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1007 	kfree(edesc);
1008 	return ret;
1009 }
1010 
1011 static int ahash_finup_ctx(struct ahash_request *req)
1012 {
1013 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1014 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1015 	struct caam_hash_state *state = ahash_request_ctx(req);
1016 	struct device *jrdev = ctx->jrdev;
1017 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1018 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1019 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1020 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1021 	int last_buflen = state->current_buf ? state->buflen_0 :
1022 			  state->buflen_1;
1023 	u32 *desc;
1024 	int sec4_sg_src_index;
1025 	int src_nents, mapped_nents;
1026 	int digestsize = crypto_ahash_digestsize(ahash);
1027 	struct ahash_edesc *edesc;
1028 	int ret;
1029 
1030 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1031 	if (src_nents < 0) {
1032 		dev_err(jrdev, "Invalid number of src SG.\n");
1033 		return src_nents;
1034 	}
1035 
1036 	if (src_nents) {
1037 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1038 					  DMA_TO_DEVICE);
1039 		if (!mapped_nents) {
1040 			dev_err(jrdev, "unable to DMA map source\n");
1041 			return -ENOMEM;
1042 		}
1043 	} else {
1044 		mapped_nents = 0;
1045 	}
1046 
1047 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1048 
1049 	/* allocate space for base edesc and hw desc commands, link tables */
1050 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1051 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
1052 				  flags);
1053 	if (!edesc) {
1054 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1055 		return -ENOMEM;
1056 	}
1057 
1058 	desc = edesc->hw_desc;
1059 
1060 	edesc->src_nents = src_nents;
1061 
1062 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1063 				 edesc->sec4_sg, DMA_TO_DEVICE);
1064 	if (ret)
1065 		goto unmap_ctx;
1066 
1067 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1068 						buf, state->buf_dma, buflen,
1069 						last_buflen);
1070 
1071 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1072 				  sec4_sg_src_index, ctx->ctx_len + buflen,
1073 				  req->nbytes);
1074 	if (ret)
1075 		goto unmap_ctx;
1076 
1077 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1078 						digestsize);
1079 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1080 		dev_err(jrdev, "unable to map dst\n");
1081 		ret = -ENOMEM;
1082 		goto unmap_ctx;
1083 	}
1084 
1085 #ifdef DEBUG
1086 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1087 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1088 #endif
1089 
1090 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1091 	if (ret)
1092 		goto unmap_ctx;
1093 
1094 	return -EINPROGRESS;
1095  unmap_ctx:
1096 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1097 	kfree(edesc);
1098 	return ret;
1099 }
1100 
1101 static int ahash_digest(struct ahash_request *req)
1102 {
1103 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1104 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1105 	struct device *jrdev = ctx->jrdev;
1106 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1107 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1108 	u32 *desc;
1109 	int digestsize = crypto_ahash_digestsize(ahash);
1110 	int src_nents, mapped_nents;
1111 	struct ahash_edesc *edesc;
1112 	int ret;
1113 
1114 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1115 	if (src_nents < 0) {
1116 		dev_err(jrdev, "Invalid number of src SG.\n");
1117 		return src_nents;
1118 	}
1119 
1120 	if (src_nents) {
1121 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1122 					  DMA_TO_DEVICE);
1123 		if (!mapped_nents) {
1124 			dev_err(jrdev, "unable to map source for DMA\n");
1125 			return -ENOMEM;
1126 		}
1127 	} else {
1128 		mapped_nents = 0;
1129 	}
1130 
1131 	/* allocate space for base edesc and hw desc commands, link tables */
1132 	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1133 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1134 				  flags);
1135 	if (!edesc) {
1136 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1137 		return -ENOMEM;
1138 	}
1139 
1140 	edesc->src_nents = src_nents;
1141 
1142 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1143 				  req->nbytes);
1144 	if (ret) {
1145 		ahash_unmap(jrdev, edesc, req, digestsize);
1146 		kfree(edesc);
1147 		return ret;
1148 	}
1149 
1150 	desc = edesc->hw_desc;
1151 
1152 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1153 						digestsize);
1154 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1155 		dev_err(jrdev, "unable to map dst\n");
1156 		ahash_unmap(jrdev, edesc, req, digestsize);
1157 		kfree(edesc);
1158 		return -ENOMEM;
1159 	}
1160 
1161 #ifdef DEBUG
1162 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1163 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1164 #endif
1165 
1166 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1167 	if (!ret) {
1168 		ret = -EINPROGRESS;
1169 	} else {
1170 		ahash_unmap(jrdev, edesc, req, digestsize);
1171 		kfree(edesc);
1172 	}
1173 
1174 	return ret;
1175 }
1176 
1177 /* submit ahash final if it the first job descriptor */
1178 static int ahash_final_no_ctx(struct ahash_request *req)
1179 {
1180 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1181 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1182 	struct caam_hash_state *state = ahash_request_ctx(req);
1183 	struct device *jrdev = ctx->jrdev;
1184 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1185 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1186 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1187 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1188 	u32 *desc;
1189 	int digestsize = crypto_ahash_digestsize(ahash);
1190 	struct ahash_edesc *edesc;
1191 	int ret;
1192 
1193 	/* allocate space for base edesc and hw desc commands, link tables */
1194 	edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1195 				  ctx->sh_desc_digest_dma, flags);
1196 	if (!edesc)
1197 		return -ENOMEM;
1198 
1199 	desc = edesc->hw_desc;
1200 
1201 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1202 	if (dma_mapping_error(jrdev, state->buf_dma)) {
1203 		dev_err(jrdev, "unable to map src\n");
1204 		goto unmap;
1205 	}
1206 
1207 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1208 
1209 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1210 						digestsize);
1211 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1212 		dev_err(jrdev, "unable to map dst\n");
1213 		goto unmap;
1214 	}
1215 	edesc->src_nents = 0;
1216 
1217 #ifdef DEBUG
1218 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1219 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1220 #endif
1221 
1222 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1223 	if (!ret) {
1224 		ret = -EINPROGRESS;
1225 	} else {
1226 		ahash_unmap(jrdev, edesc, req, digestsize);
1227 		kfree(edesc);
1228 	}
1229 
1230 	return ret;
1231  unmap:
1232 	ahash_unmap(jrdev, edesc, req, digestsize);
1233 	kfree(edesc);
1234 	return -ENOMEM;
1235 
1236 }
1237 
1238 /* submit ahash update if it the first job descriptor after update */
1239 static int ahash_update_no_ctx(struct ahash_request *req)
1240 {
1241 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1242 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1243 	struct caam_hash_state *state = ahash_request_ctx(req);
1244 	struct device *jrdev = ctx->jrdev;
1245 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1246 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1247 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1248 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1249 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1250 	int *next_buflen = state->current_buf ? &state->buflen_0 :
1251 			   &state->buflen_1;
1252 	int in_len = *buflen + req->nbytes, to_hash;
1253 	int sec4_sg_bytes, src_nents, mapped_nents;
1254 	struct ahash_edesc *edesc;
1255 	u32 *desc;
1256 	int ret = 0;
1257 
1258 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1259 	to_hash = in_len - *next_buflen;
1260 
1261 	if (to_hash) {
1262 		src_nents = sg_nents_for_len(req->src,
1263 					     req->nbytes - *next_buflen);
1264 		if (src_nents < 0) {
1265 			dev_err(jrdev, "Invalid number of src SG.\n");
1266 			return src_nents;
1267 		}
1268 
1269 		if (src_nents) {
1270 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1271 						  DMA_TO_DEVICE);
1272 			if (!mapped_nents) {
1273 				dev_err(jrdev, "unable to DMA map source\n");
1274 				return -ENOMEM;
1275 			}
1276 		} else {
1277 			mapped_nents = 0;
1278 		}
1279 
1280 		sec4_sg_bytes = (1 + mapped_nents) *
1281 				sizeof(struct sec4_sg_entry);
1282 
1283 		/*
1284 		 * allocate space for base edesc and hw desc commands,
1285 		 * link tables
1286 		 */
1287 		edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1288 					  ctx->sh_desc_update_first,
1289 					  ctx->sh_desc_update_first_dma,
1290 					  flags);
1291 		if (!edesc) {
1292 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1293 			return -ENOMEM;
1294 		}
1295 
1296 		edesc->src_nents = src_nents;
1297 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1298 		edesc->dst_dma = 0;
1299 
1300 		state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1301 						    buf, *buflen);
1302 		sg_to_sec4_sg_last(req->src, mapped_nents,
1303 				   edesc->sec4_sg + 1, 0);
1304 
1305 		if (*next_buflen) {
1306 			scatterwalk_map_and_copy(next_buf, req->src,
1307 						 to_hash - *buflen,
1308 						 *next_buflen, 0);
1309 		}
1310 
1311 		state->current_buf = !state->current_buf;
1312 
1313 		desc = edesc->hw_desc;
1314 
1315 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1316 						    sec4_sg_bytes,
1317 						    DMA_TO_DEVICE);
1318 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1319 			dev_err(jrdev, "unable to map S/G table\n");
1320 			ret = -ENOMEM;
1321 			goto unmap_ctx;
1322 		}
1323 
1324 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1325 
1326 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1327 		if (ret)
1328 			goto unmap_ctx;
1329 
1330 #ifdef DEBUG
1331 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1332 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1333 			       desc_bytes(desc), 1);
1334 #endif
1335 
1336 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1337 		if (ret)
1338 			goto unmap_ctx;
1339 
1340 		ret = -EINPROGRESS;
1341 		state->update = ahash_update_ctx;
1342 		state->finup = ahash_finup_ctx;
1343 		state->final = ahash_final_ctx;
1344 	} else if (*next_buflen) {
1345 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1346 					 req->nbytes, 0);
1347 		*buflen = *next_buflen;
1348 		*next_buflen = 0;
1349 	}
1350 #ifdef DEBUG
1351 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1352 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1353 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1354 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1355 		       *next_buflen, 1);
1356 #endif
1357 
1358 	return ret;
1359  unmap_ctx:
1360 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1361 	kfree(edesc);
1362 	return ret;
1363 }
1364 
1365 /* submit ahash finup if it the first job descriptor after update */
1366 static int ahash_finup_no_ctx(struct ahash_request *req)
1367 {
1368 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1369 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1370 	struct caam_hash_state *state = ahash_request_ctx(req);
1371 	struct device *jrdev = ctx->jrdev;
1372 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1373 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1374 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1375 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1376 	int last_buflen = state->current_buf ? state->buflen_0 :
1377 			  state->buflen_1;
1378 	u32 *desc;
1379 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1380 	int digestsize = crypto_ahash_digestsize(ahash);
1381 	struct ahash_edesc *edesc;
1382 	int ret;
1383 
1384 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1385 	if (src_nents < 0) {
1386 		dev_err(jrdev, "Invalid number of src SG.\n");
1387 		return src_nents;
1388 	}
1389 
1390 	if (src_nents) {
1391 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1392 					  DMA_TO_DEVICE);
1393 		if (!mapped_nents) {
1394 			dev_err(jrdev, "unable to DMA map source\n");
1395 			return -ENOMEM;
1396 		}
1397 	} else {
1398 		mapped_nents = 0;
1399 	}
1400 
1401 	sec4_sg_src_index = 2;
1402 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1403 			 sizeof(struct sec4_sg_entry);
1404 
1405 	/* allocate space for base edesc and hw desc commands, link tables */
1406 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1407 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1408 				  flags);
1409 	if (!edesc) {
1410 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1411 		return -ENOMEM;
1412 	}
1413 
1414 	desc = edesc->hw_desc;
1415 
1416 	edesc->src_nents = src_nents;
1417 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1418 
1419 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1420 						state->buf_dma, buflen,
1421 						last_buflen);
1422 
1423 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1424 				  req->nbytes);
1425 	if (ret) {
1426 		dev_err(jrdev, "unable to map S/G table\n");
1427 		goto unmap;
1428 	}
1429 
1430 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1431 						digestsize);
1432 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1433 		dev_err(jrdev, "unable to map dst\n");
1434 		goto unmap;
1435 	}
1436 
1437 #ifdef DEBUG
1438 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1439 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1440 #endif
1441 
1442 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1443 	if (!ret) {
1444 		ret = -EINPROGRESS;
1445 	} else {
1446 		ahash_unmap(jrdev, edesc, req, digestsize);
1447 		kfree(edesc);
1448 	}
1449 
1450 	return ret;
1451  unmap:
1452 	ahash_unmap(jrdev, edesc, req, digestsize);
1453 	kfree(edesc);
1454 	return -ENOMEM;
1455 
1456 }
1457 
1458 /* submit first update job descriptor after init */
1459 static int ahash_update_first(struct ahash_request *req)
1460 {
1461 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1462 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1463 	struct caam_hash_state *state = ahash_request_ctx(req);
1464 	struct device *jrdev = ctx->jrdev;
1465 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1466 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1467 	u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1468 	int *next_buflen = state->current_buf ?
1469 		&state->buflen_1 : &state->buflen_0;
1470 	int to_hash;
1471 	u32 *desc;
1472 	int src_nents, mapped_nents;
1473 	struct ahash_edesc *edesc;
1474 	int ret = 0;
1475 
1476 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1477 				      1);
1478 	to_hash = req->nbytes - *next_buflen;
1479 
1480 	if (to_hash) {
1481 		src_nents = sg_nents_for_len(req->src,
1482 					     req->nbytes - *next_buflen);
1483 		if (src_nents < 0) {
1484 			dev_err(jrdev, "Invalid number of src SG.\n");
1485 			return src_nents;
1486 		}
1487 
1488 		if (src_nents) {
1489 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1490 						  DMA_TO_DEVICE);
1491 			if (!mapped_nents) {
1492 				dev_err(jrdev, "unable to map source for DMA\n");
1493 				return -ENOMEM;
1494 			}
1495 		} else {
1496 			mapped_nents = 0;
1497 		}
1498 
1499 		/*
1500 		 * allocate space for base edesc and hw desc commands,
1501 		 * link tables
1502 		 */
1503 		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1504 					  mapped_nents : 0,
1505 					  ctx->sh_desc_update_first,
1506 					  ctx->sh_desc_update_first_dma,
1507 					  flags);
1508 		if (!edesc) {
1509 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1510 			return -ENOMEM;
1511 		}
1512 
1513 		edesc->src_nents = src_nents;
1514 		edesc->dst_dma = 0;
1515 
1516 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1517 					  to_hash);
1518 		if (ret)
1519 			goto unmap_ctx;
1520 
1521 		if (*next_buflen)
1522 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1523 						 *next_buflen, 0);
1524 
1525 		desc = edesc->hw_desc;
1526 
1527 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1528 		if (ret)
1529 			goto unmap_ctx;
1530 
1531 #ifdef DEBUG
1532 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1533 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1534 			       desc_bytes(desc), 1);
1535 #endif
1536 
1537 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1538 		if (ret)
1539 			goto unmap_ctx;
1540 
1541 		ret = -EINPROGRESS;
1542 		state->update = ahash_update_ctx;
1543 		state->finup = ahash_finup_ctx;
1544 		state->final = ahash_final_ctx;
1545 	} else if (*next_buflen) {
1546 		state->update = ahash_update_no_ctx;
1547 		state->finup = ahash_finup_no_ctx;
1548 		state->final = ahash_final_no_ctx;
1549 		scatterwalk_map_and_copy(next_buf, req->src, 0,
1550 					 req->nbytes, 0);
1551 	}
1552 #ifdef DEBUG
1553 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1554 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1555 		       *next_buflen, 1);
1556 #endif
1557 
1558 	return ret;
1559  unmap_ctx:
1560 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1561 	kfree(edesc);
1562 	return ret;
1563 }
1564 
1565 static int ahash_finup_first(struct ahash_request *req)
1566 {
1567 	return ahash_digest(req);
1568 }
1569 
1570 static int ahash_init(struct ahash_request *req)
1571 {
1572 	struct caam_hash_state *state = ahash_request_ctx(req);
1573 
1574 	state->update = ahash_update_first;
1575 	state->finup = ahash_finup_first;
1576 	state->final = ahash_final_no_ctx;
1577 
1578 	state->current_buf = 0;
1579 	state->buf_dma = 0;
1580 	state->buflen_0 = 0;
1581 	state->buflen_1 = 0;
1582 
1583 	return 0;
1584 }
1585 
1586 static int ahash_update(struct ahash_request *req)
1587 {
1588 	struct caam_hash_state *state = ahash_request_ctx(req);
1589 
1590 	return state->update(req);
1591 }
1592 
1593 static int ahash_finup(struct ahash_request *req)
1594 {
1595 	struct caam_hash_state *state = ahash_request_ctx(req);
1596 
1597 	return state->finup(req);
1598 }
1599 
1600 static int ahash_final(struct ahash_request *req)
1601 {
1602 	struct caam_hash_state *state = ahash_request_ctx(req);
1603 
1604 	return state->final(req);
1605 }
1606 
1607 static int ahash_export(struct ahash_request *req, void *out)
1608 {
1609 	struct caam_hash_state *state = ahash_request_ctx(req);
1610 	struct caam_export_state *export = out;
1611 	int len;
1612 	u8 *buf;
1613 
1614 	if (state->current_buf) {
1615 		buf = state->buf_1;
1616 		len = state->buflen_1;
1617 	} else {
1618 		buf = state->buf_0;
1619 		len = state->buflen_0;
1620 	}
1621 
1622 	memcpy(export->buf, buf, len);
1623 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1624 	export->buflen = len;
1625 	export->update = state->update;
1626 	export->final = state->final;
1627 	export->finup = state->finup;
1628 
1629 	return 0;
1630 }
1631 
1632 static int ahash_import(struct ahash_request *req, const void *in)
1633 {
1634 	struct caam_hash_state *state = ahash_request_ctx(req);
1635 	const struct caam_export_state *export = in;
1636 
1637 	memset(state, 0, sizeof(*state));
1638 	memcpy(state->buf_0, export->buf, export->buflen);
1639 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1640 	state->buflen_0 = export->buflen;
1641 	state->update = export->update;
1642 	state->final = export->final;
1643 	state->finup = export->finup;
1644 
1645 	return 0;
1646 }
1647 
1648 struct caam_hash_template {
1649 	char name[CRYPTO_MAX_ALG_NAME];
1650 	char driver_name[CRYPTO_MAX_ALG_NAME];
1651 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1652 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1653 	unsigned int blocksize;
1654 	struct ahash_alg template_ahash;
1655 	u32 alg_type;
1656 };
1657 
1658 /* ahash descriptors */
1659 static struct caam_hash_template driver_hash[] = {
1660 	{
1661 		.name = "sha1",
1662 		.driver_name = "sha1-caam",
1663 		.hmac_name = "hmac(sha1)",
1664 		.hmac_driver_name = "hmac-sha1-caam",
1665 		.blocksize = SHA1_BLOCK_SIZE,
1666 		.template_ahash = {
1667 			.init = ahash_init,
1668 			.update = ahash_update,
1669 			.final = ahash_final,
1670 			.finup = ahash_finup,
1671 			.digest = ahash_digest,
1672 			.export = ahash_export,
1673 			.import = ahash_import,
1674 			.setkey = ahash_setkey,
1675 			.halg = {
1676 				.digestsize = SHA1_DIGEST_SIZE,
1677 				.statesize = sizeof(struct caam_export_state),
1678 			},
1679 		},
1680 		.alg_type = OP_ALG_ALGSEL_SHA1,
1681 	}, {
1682 		.name = "sha224",
1683 		.driver_name = "sha224-caam",
1684 		.hmac_name = "hmac(sha224)",
1685 		.hmac_driver_name = "hmac-sha224-caam",
1686 		.blocksize = SHA224_BLOCK_SIZE,
1687 		.template_ahash = {
1688 			.init = ahash_init,
1689 			.update = ahash_update,
1690 			.final = ahash_final,
1691 			.finup = ahash_finup,
1692 			.digest = ahash_digest,
1693 			.export = ahash_export,
1694 			.import = ahash_import,
1695 			.setkey = ahash_setkey,
1696 			.halg = {
1697 				.digestsize = SHA224_DIGEST_SIZE,
1698 				.statesize = sizeof(struct caam_export_state),
1699 			},
1700 		},
1701 		.alg_type = OP_ALG_ALGSEL_SHA224,
1702 	}, {
1703 		.name = "sha256",
1704 		.driver_name = "sha256-caam",
1705 		.hmac_name = "hmac(sha256)",
1706 		.hmac_driver_name = "hmac-sha256-caam",
1707 		.blocksize = SHA256_BLOCK_SIZE,
1708 		.template_ahash = {
1709 			.init = ahash_init,
1710 			.update = ahash_update,
1711 			.final = ahash_final,
1712 			.finup = ahash_finup,
1713 			.digest = ahash_digest,
1714 			.export = ahash_export,
1715 			.import = ahash_import,
1716 			.setkey = ahash_setkey,
1717 			.halg = {
1718 				.digestsize = SHA256_DIGEST_SIZE,
1719 				.statesize = sizeof(struct caam_export_state),
1720 			},
1721 		},
1722 		.alg_type = OP_ALG_ALGSEL_SHA256,
1723 	}, {
1724 		.name = "sha384",
1725 		.driver_name = "sha384-caam",
1726 		.hmac_name = "hmac(sha384)",
1727 		.hmac_driver_name = "hmac-sha384-caam",
1728 		.blocksize = SHA384_BLOCK_SIZE,
1729 		.template_ahash = {
1730 			.init = ahash_init,
1731 			.update = ahash_update,
1732 			.final = ahash_final,
1733 			.finup = ahash_finup,
1734 			.digest = ahash_digest,
1735 			.export = ahash_export,
1736 			.import = ahash_import,
1737 			.setkey = ahash_setkey,
1738 			.halg = {
1739 				.digestsize = SHA384_DIGEST_SIZE,
1740 				.statesize = sizeof(struct caam_export_state),
1741 			},
1742 		},
1743 		.alg_type = OP_ALG_ALGSEL_SHA384,
1744 	}, {
1745 		.name = "sha512",
1746 		.driver_name = "sha512-caam",
1747 		.hmac_name = "hmac(sha512)",
1748 		.hmac_driver_name = "hmac-sha512-caam",
1749 		.blocksize = SHA512_BLOCK_SIZE,
1750 		.template_ahash = {
1751 			.init = ahash_init,
1752 			.update = ahash_update,
1753 			.final = ahash_final,
1754 			.finup = ahash_finup,
1755 			.digest = ahash_digest,
1756 			.export = ahash_export,
1757 			.import = ahash_import,
1758 			.setkey = ahash_setkey,
1759 			.halg = {
1760 				.digestsize = SHA512_DIGEST_SIZE,
1761 				.statesize = sizeof(struct caam_export_state),
1762 			},
1763 		},
1764 		.alg_type = OP_ALG_ALGSEL_SHA512,
1765 	}, {
1766 		.name = "md5",
1767 		.driver_name = "md5-caam",
1768 		.hmac_name = "hmac(md5)",
1769 		.hmac_driver_name = "hmac-md5-caam",
1770 		.blocksize = MD5_BLOCK_WORDS * 4,
1771 		.template_ahash = {
1772 			.init = ahash_init,
1773 			.update = ahash_update,
1774 			.final = ahash_final,
1775 			.finup = ahash_finup,
1776 			.digest = ahash_digest,
1777 			.export = ahash_export,
1778 			.import = ahash_import,
1779 			.setkey = ahash_setkey,
1780 			.halg = {
1781 				.digestsize = MD5_DIGEST_SIZE,
1782 				.statesize = sizeof(struct caam_export_state),
1783 			},
1784 		},
1785 		.alg_type = OP_ALG_ALGSEL_MD5,
1786 	},
1787 };
1788 
1789 struct caam_hash_alg {
1790 	struct list_head entry;
1791 	int alg_type;
1792 	struct ahash_alg ahash_alg;
1793 };
1794 
1795 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1796 {
1797 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1798 	struct crypto_alg *base = tfm->__crt_alg;
1799 	struct hash_alg_common *halg =
1800 		 container_of(base, struct hash_alg_common, base);
1801 	struct ahash_alg *alg =
1802 		 container_of(halg, struct ahash_alg, halg);
1803 	struct caam_hash_alg *caam_hash =
1804 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1805 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1806 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1807 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1808 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1809 					 HASH_MSG_LEN + 32,
1810 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1811 					 HASH_MSG_LEN + 64,
1812 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1813 
1814 	/*
1815 	 * Get a Job ring from Job Ring driver to ensure in-order
1816 	 * crypto request processing per tfm
1817 	 */
1818 	ctx->jrdev = caam_jr_alloc();
1819 	if (IS_ERR(ctx->jrdev)) {
1820 		pr_err("Job Ring Device allocation for transform failed\n");
1821 		return PTR_ERR(ctx->jrdev);
1822 	}
1823 	/* copy descriptor header template value */
1824 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1825 
1826 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
1827 				   OP_ALG_ALGSEL_SUBMASK) >>
1828 				  OP_ALG_ALGSEL_SHIFT];
1829 
1830 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1831 				 sizeof(struct caam_hash_state));
1832 	return ahash_set_sh_desc(ahash);
1833 }
1834 
1835 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1836 {
1837 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1838 
1839 	if (ctx->sh_desc_update_dma &&
1840 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1841 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1842 				 desc_bytes(ctx->sh_desc_update),
1843 				 DMA_TO_DEVICE);
1844 	if (ctx->sh_desc_update_first_dma &&
1845 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1846 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1847 				 desc_bytes(ctx->sh_desc_update_first),
1848 				 DMA_TO_DEVICE);
1849 	if (ctx->sh_desc_fin_dma &&
1850 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1851 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1852 				 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1853 	if (ctx->sh_desc_digest_dma &&
1854 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1855 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1856 				 desc_bytes(ctx->sh_desc_digest),
1857 				 DMA_TO_DEVICE);
1858 
1859 	caam_jr_free(ctx->jrdev);
1860 }
1861 
1862 static void __exit caam_algapi_hash_exit(void)
1863 {
1864 	struct caam_hash_alg *t_alg, *n;
1865 
1866 	if (!hash_list.next)
1867 		return;
1868 
1869 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1870 		crypto_unregister_ahash(&t_alg->ahash_alg);
1871 		list_del(&t_alg->entry);
1872 		kfree(t_alg);
1873 	}
1874 }
1875 
1876 static struct caam_hash_alg *
1877 caam_hash_alloc(struct caam_hash_template *template,
1878 		bool keyed)
1879 {
1880 	struct caam_hash_alg *t_alg;
1881 	struct ahash_alg *halg;
1882 	struct crypto_alg *alg;
1883 
1884 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1885 	if (!t_alg) {
1886 		pr_err("failed to allocate t_alg\n");
1887 		return ERR_PTR(-ENOMEM);
1888 	}
1889 
1890 	t_alg->ahash_alg = template->template_ahash;
1891 	halg = &t_alg->ahash_alg;
1892 	alg = &halg->halg.base;
1893 
1894 	if (keyed) {
1895 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1896 			 template->hmac_name);
1897 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1898 			 template->hmac_driver_name);
1899 	} else {
1900 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1901 			 template->name);
1902 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1903 			 template->driver_name);
1904 		t_alg->ahash_alg.setkey = NULL;
1905 	}
1906 	alg->cra_module = THIS_MODULE;
1907 	alg->cra_init = caam_hash_cra_init;
1908 	alg->cra_exit = caam_hash_cra_exit;
1909 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1910 	alg->cra_priority = CAAM_CRA_PRIORITY;
1911 	alg->cra_blocksize = template->blocksize;
1912 	alg->cra_alignmask = 0;
1913 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1914 	alg->cra_type = &crypto_ahash_type;
1915 
1916 	t_alg->alg_type = template->alg_type;
1917 
1918 	return t_alg;
1919 }
1920 
1921 static int __init caam_algapi_hash_init(void)
1922 {
1923 	struct device_node *dev_node;
1924 	struct platform_device *pdev;
1925 	struct device *ctrldev;
1926 	int i = 0, err = 0;
1927 	struct caam_drv_private *priv;
1928 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1929 	u32 cha_inst, cha_vid;
1930 
1931 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1932 	if (!dev_node) {
1933 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1934 		if (!dev_node)
1935 			return -ENODEV;
1936 	}
1937 
1938 	pdev = of_find_device_by_node(dev_node);
1939 	if (!pdev) {
1940 		of_node_put(dev_node);
1941 		return -ENODEV;
1942 	}
1943 
1944 	ctrldev = &pdev->dev;
1945 	priv = dev_get_drvdata(ctrldev);
1946 	of_node_put(dev_node);
1947 
1948 	/*
1949 	 * If priv is NULL, it's probably because the caam driver wasn't
1950 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1951 	 */
1952 	if (!priv)
1953 		return -ENODEV;
1954 
1955 	/*
1956 	 * Register crypto algorithms the device supports.  First, identify
1957 	 * presence and attributes of MD block.
1958 	 */
1959 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1960 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1961 
1962 	/*
1963 	 * Skip registration of any hashing algorithms if MD block
1964 	 * is not present.
1965 	 */
1966 	if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1967 		return -ENODEV;
1968 
1969 	/* Limit digest size based on LP256 */
1970 	if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1971 		md_limit = SHA256_DIGEST_SIZE;
1972 
1973 	INIT_LIST_HEAD(&hash_list);
1974 
1975 	/* register crypto algorithms the device supports */
1976 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1977 		struct caam_hash_alg *t_alg;
1978 		struct caam_hash_template *alg = driver_hash + i;
1979 
1980 		/* If MD size is not supported by device, skip registration */
1981 		if (alg->template_ahash.halg.digestsize > md_limit)
1982 			continue;
1983 
1984 		/* register hmac version */
1985 		t_alg = caam_hash_alloc(alg, true);
1986 		if (IS_ERR(t_alg)) {
1987 			err = PTR_ERR(t_alg);
1988 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1989 			continue;
1990 		}
1991 
1992 		err = crypto_register_ahash(&t_alg->ahash_alg);
1993 		if (err) {
1994 			pr_warn("%s alg registration failed: %d\n",
1995 				t_alg->ahash_alg.halg.base.cra_driver_name,
1996 				err);
1997 			kfree(t_alg);
1998 		} else
1999 			list_add_tail(&t_alg->entry, &hash_list);
2000 
2001 		/* register unkeyed version */
2002 		t_alg = caam_hash_alloc(alg, false);
2003 		if (IS_ERR(t_alg)) {
2004 			err = PTR_ERR(t_alg);
2005 			pr_warn("%s alg allocation failed\n", alg->driver_name);
2006 			continue;
2007 		}
2008 
2009 		err = crypto_register_ahash(&t_alg->ahash_alg);
2010 		if (err) {
2011 			pr_warn("%s alg registration failed: %d\n",
2012 				t_alg->ahash_alg.halg.base.cra_driver_name,
2013 				err);
2014 			kfree(t_alg);
2015 		} else
2016 			list_add_tail(&t_alg->entry, &hash_list);
2017 	}
2018 
2019 	return err;
2020 }
2021 
2022 module_init(caam_algapi_hash_init);
2023 module_exit(caam_algapi_hash_exit);
2024 
2025 MODULE_LICENSE("GPL");
2026 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2027 MODULE_AUTHOR("Freescale Semiconductor - NMG");
2028