xref: /openbmc/linux/drivers/crypto/caam/caamhash.c (revision 9d4fa1a1)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  * relationship of digest job descriptor or first job descriptor after init to
11  * shared descriptors:
12  *
13  * ---------------                     ---------------
14  * | JobDesc #1  |-------------------->|  ShareDesc  |
15  * | *(packet 1) |                     |  (hashKey)  |
16  * ---------------                     | (operation) |
17  *                                     ---------------
18  *
19  * relationship of subsequent job descriptors to shared descriptors:
20  *
21  * ---------------                     ---------------
22  * | JobDesc #2  |-------------------->|  ShareDesc  |
23  * | *(packet 2) |      |------------->|  (hashKey)  |
24  * ---------------      |    |-------->| (operation) |
25  *       .              |    |         | (load ctx2) |
26  *       .              |    |         ---------------
27  * ---------------      |    |
28  * | JobDesc #3  |------|    |
29  * | *(packet 3) |           |
30  * ---------------           |
31  *       .                   |
32  *       .                   |
33  * ---------------           |
34  * | JobDesc #4  |------------
35  * | *(packet 4) |
36  * ---------------
37  *
38  * The SharedDesc never changes for a connection unless rekeyed, but
39  * each packet will likely be in a different place. So all we need
40  * to know to process the packet is where the input is, where the
41  * output goes, and what context we want to process with. Context is
42  * in the SharedDesc, packet references in the JobDesc.
43  *
44  * So, a job desc looks like:
45  *
46  * ---------------------
47  * | Header            |
48  * | ShareDesc Pointer |
49  * | SEQ_OUT_PTR       |
50  * | (output buffer)   |
51  * | (output length)   |
52  * | SEQ_IN_PTR        |
53  * | (input buffer)    |
54  * | (input length)    |
55  * ---------------------
56  */
57 
58 #include "compat.h"
59 
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 #include <crypto/engine.h>
69 
70 #define CAAM_CRA_PRIORITY		3000
71 
72 /* max hash key is max split key size */
73 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
74 
75 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
76 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
77 
78 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
79 					 CAAM_MAX_HASH_KEY_SIZE)
80 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
81 
82 /* caam context sizes for hashes: running digest + 8 */
83 #define HASH_MSG_LEN			8
84 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
85 
86 static struct list_head hash_list;
87 
88 /* ahash per-session context */
89 struct caam_hash_ctx {
90 	struct crypto_engine_ctx enginectx;
91 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
92 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
93 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
94 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
95 	u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
96 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
97 	dma_addr_t sh_desc_update_first_dma;
98 	dma_addr_t sh_desc_fin_dma;
99 	dma_addr_t sh_desc_digest_dma;
100 	enum dma_data_direction dir;
101 	enum dma_data_direction key_dir;
102 	struct device *jrdev;
103 	int ctx_len;
104 	struct alginfo adata;
105 };
106 
107 /* ahash state */
108 struct caam_hash_state {
109 	dma_addr_t buf_dma;
110 	dma_addr_t ctx_dma;
111 	int ctx_dma_len;
112 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
113 	int buflen;
114 	int next_buflen;
115 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
116 	int (*update)(struct ahash_request *req) ____cacheline_aligned;
117 	int (*final)(struct ahash_request *req);
118 	int (*finup)(struct ahash_request *req);
119 	struct ahash_edesc *edesc;
120 	void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
121 			      void *context);
122 };
123 
124 struct caam_export_state {
125 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
126 	u8 caam_ctx[MAX_CTX_LEN];
127 	int buflen;
128 	int (*update)(struct ahash_request *req);
129 	int (*final)(struct ahash_request *req);
130 	int (*finup)(struct ahash_request *req);
131 };
132 
133 static inline bool is_cmac_aes(u32 algtype)
134 {
135 	return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
136 	       (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
137 }
138 /* Common job descriptor seq in/out ptr routines */
139 
140 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
141 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
142 				      struct caam_hash_state *state,
143 				      int ctx_len)
144 {
145 	state->ctx_dma_len = ctx_len;
146 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
147 					ctx_len, DMA_FROM_DEVICE);
148 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
149 		dev_err(jrdev, "unable to map ctx\n");
150 		state->ctx_dma = 0;
151 		return -ENOMEM;
152 	}
153 
154 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
155 
156 	return 0;
157 }
158 
159 /* Map current buffer in state (if length > 0) and put it in link table */
160 static inline int buf_map_to_sec4_sg(struct device *jrdev,
161 				     struct sec4_sg_entry *sec4_sg,
162 				     struct caam_hash_state *state)
163 {
164 	int buflen = state->buflen;
165 
166 	if (!buflen)
167 		return 0;
168 
169 	state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
170 					DMA_TO_DEVICE);
171 	if (dma_mapping_error(jrdev, state->buf_dma)) {
172 		dev_err(jrdev, "unable to map buf\n");
173 		state->buf_dma = 0;
174 		return -ENOMEM;
175 	}
176 
177 	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
178 
179 	return 0;
180 }
181 
182 /* Map state->caam_ctx, and add it to link table */
183 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
184 				     struct caam_hash_state *state, int ctx_len,
185 				     struct sec4_sg_entry *sec4_sg, u32 flag)
186 {
187 	state->ctx_dma_len = ctx_len;
188 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
189 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
190 		dev_err(jrdev, "unable to map ctx\n");
191 		state->ctx_dma = 0;
192 		return -ENOMEM;
193 	}
194 
195 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
196 
197 	return 0;
198 }
199 
200 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
201 {
202 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
203 	int digestsize = crypto_ahash_digestsize(ahash);
204 	struct device *jrdev = ctx->jrdev;
205 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
206 	u32 *desc;
207 
208 	ctx->adata.key_virt = ctx->key;
209 
210 	/* ahash_update shared descriptor */
211 	desc = ctx->sh_desc_update;
212 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
213 			  ctx->ctx_len, true, ctrlpriv->era);
214 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
215 				   desc_bytes(desc), ctx->dir);
216 
217 	print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
218 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
219 			     1);
220 
221 	/* ahash_update_first shared descriptor */
222 	desc = ctx->sh_desc_update_first;
223 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
224 			  ctx->ctx_len, false, ctrlpriv->era);
225 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
226 				   desc_bytes(desc), ctx->dir);
227 	print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
228 			     ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
229 			     desc_bytes(desc), 1);
230 
231 	/* ahash_final shared descriptor */
232 	desc = ctx->sh_desc_fin;
233 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
234 			  ctx->ctx_len, true, ctrlpriv->era);
235 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
236 				   desc_bytes(desc), ctx->dir);
237 
238 	print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
239 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
240 			     desc_bytes(desc), 1);
241 
242 	/* ahash_digest shared descriptor */
243 	desc = ctx->sh_desc_digest;
244 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
245 			  ctx->ctx_len, false, ctrlpriv->era);
246 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
247 				   desc_bytes(desc), ctx->dir);
248 
249 	print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
250 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
251 			     desc_bytes(desc), 1);
252 
253 	return 0;
254 }
255 
256 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
257 {
258 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
259 	int digestsize = crypto_ahash_digestsize(ahash);
260 	struct device *jrdev = ctx->jrdev;
261 	u32 *desc;
262 
263 	/* shared descriptor for ahash_update */
264 	desc = ctx->sh_desc_update;
265 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
266 			    ctx->ctx_len, ctx->ctx_len);
267 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
268 				   desc_bytes(desc), ctx->dir);
269 	print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
270 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
271 			     1);
272 
273 	/* shared descriptor for ahash_{final,finup} */
274 	desc = ctx->sh_desc_fin;
275 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
276 			    digestsize, ctx->ctx_len);
277 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
278 				   desc_bytes(desc), ctx->dir);
279 	print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
280 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
281 			     1);
282 
283 	/* key is immediate data for INIT and INITFINAL states */
284 	ctx->adata.key_virt = ctx->key;
285 
286 	/* shared descriptor for first invocation of ahash_update */
287 	desc = ctx->sh_desc_update_first;
288 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
289 			    ctx->ctx_len);
290 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
291 				   desc_bytes(desc), ctx->dir);
292 	print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
293 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
294 			     desc_bytes(desc), 1);
295 
296 	/* shared descriptor for ahash_digest */
297 	desc = ctx->sh_desc_digest;
298 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
299 			    digestsize, ctx->ctx_len);
300 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
301 				   desc_bytes(desc), ctx->dir);
302 	print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
303 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
304 			     1);
305 	return 0;
306 }
307 
308 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
309 {
310 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
311 	int digestsize = crypto_ahash_digestsize(ahash);
312 	struct device *jrdev = ctx->jrdev;
313 	u32 *desc;
314 
315 	/* shared descriptor for ahash_update */
316 	desc = ctx->sh_desc_update;
317 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
318 			    ctx->ctx_len, ctx->ctx_len);
319 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
320 				   desc_bytes(desc), ctx->dir);
321 	print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
322 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
323 			     desc_bytes(desc), 1);
324 
325 	/* shared descriptor for ahash_{final,finup} */
326 	desc = ctx->sh_desc_fin;
327 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
328 			    digestsize, ctx->ctx_len);
329 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
330 				   desc_bytes(desc), ctx->dir);
331 	print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
332 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
333 			     desc_bytes(desc), 1);
334 
335 	/* shared descriptor for first invocation of ahash_update */
336 	desc = ctx->sh_desc_update_first;
337 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
338 			    ctx->ctx_len);
339 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
340 				   desc_bytes(desc), ctx->dir);
341 	print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
342 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
343 			     desc_bytes(desc), 1);
344 
345 	/* shared descriptor for ahash_digest */
346 	desc = ctx->sh_desc_digest;
347 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
348 			    digestsize, ctx->ctx_len);
349 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
350 				   desc_bytes(desc), ctx->dir);
351 	print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
352 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
353 			     desc_bytes(desc), 1);
354 
355 	return 0;
356 }
357 
358 /* Digest hash size if it is too large */
359 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
360 			   u32 digestsize)
361 {
362 	struct device *jrdev = ctx->jrdev;
363 	u32 *desc;
364 	struct split_key_result result;
365 	dma_addr_t key_dma;
366 	int ret;
367 
368 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
369 	if (!desc) {
370 		dev_err(jrdev, "unable to allocate key input memory\n");
371 		return -ENOMEM;
372 	}
373 
374 	init_job_desc(desc, 0);
375 
376 	key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
377 	if (dma_mapping_error(jrdev, key_dma)) {
378 		dev_err(jrdev, "unable to map key memory\n");
379 		kfree(desc);
380 		return -ENOMEM;
381 	}
382 
383 	/* Job descriptor to perform unkeyed hash on key_in */
384 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
385 			 OP_ALG_AS_INITFINAL);
386 	append_seq_in_ptr(desc, key_dma, *keylen, 0);
387 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
388 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
389 	append_seq_out_ptr(desc, key_dma, digestsize, 0);
390 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
391 			 LDST_SRCDST_BYTE_CONTEXT);
392 
393 	print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
394 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
395 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
396 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
397 			     1);
398 
399 	result.err = 0;
400 	init_completion(&result.completion);
401 
402 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
403 	if (ret == -EINPROGRESS) {
404 		/* in progress */
405 		wait_for_completion(&result.completion);
406 		ret = result.err;
407 
408 		print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
409 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
410 				     digestsize, 1);
411 	}
412 	dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
413 
414 	*keylen = digestsize;
415 
416 	kfree(desc);
417 
418 	return ret;
419 }
420 
421 static int ahash_setkey(struct crypto_ahash *ahash,
422 			const u8 *key, unsigned int keylen)
423 {
424 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
425 	struct device *jrdev = ctx->jrdev;
426 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
427 	int digestsize = crypto_ahash_digestsize(ahash);
428 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
429 	int ret;
430 	u8 *hashed_key = NULL;
431 
432 	dev_dbg(jrdev, "keylen %d\n", keylen);
433 
434 	if (keylen > blocksize) {
435 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
436 		if (!hashed_key)
437 			return -ENOMEM;
438 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
439 		if (ret)
440 			goto bad_free_key;
441 		key = hashed_key;
442 	}
443 
444 	/*
445 	 * If DKP is supported, use it in the shared descriptor to generate
446 	 * the split key.
447 	 */
448 	if (ctrlpriv->era >= 6) {
449 		ctx->adata.key_inline = true;
450 		ctx->adata.keylen = keylen;
451 		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
452 						      OP_ALG_ALGSEL_MASK);
453 
454 		if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
455 			goto bad_free_key;
456 
457 		memcpy(ctx->key, key, keylen);
458 
459 		/*
460 		 * In case |user key| > |derived key|, using DKP<imm,imm>
461 		 * would result in invalid opcodes (last bytes of user key) in
462 		 * the resulting descriptor. Use DKP<ptr,imm> instead => both
463 		 * virtual and dma key addresses are needed.
464 		 */
465 		if (keylen > ctx->adata.keylen_pad)
466 			dma_sync_single_for_device(ctx->jrdev,
467 						   ctx->adata.key_dma,
468 						   ctx->adata.keylen_pad,
469 						   DMA_TO_DEVICE);
470 	} else {
471 		ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
472 				    keylen, CAAM_MAX_HASH_KEY_SIZE);
473 		if (ret)
474 			goto bad_free_key;
475 	}
476 
477 	kfree(hashed_key);
478 	return ahash_set_sh_desc(ahash);
479  bad_free_key:
480 	kfree(hashed_key);
481 	return -EINVAL;
482 }
483 
484 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
485 			unsigned int keylen)
486 {
487 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
488 	struct device *jrdev = ctx->jrdev;
489 
490 	if (keylen != AES_KEYSIZE_128)
491 		return -EINVAL;
492 
493 	memcpy(ctx->key, key, keylen);
494 	dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
495 				   DMA_TO_DEVICE);
496 	ctx->adata.keylen = keylen;
497 
498 	print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
499 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
500 
501 	return axcbc_set_sh_desc(ahash);
502 }
503 
504 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
505 			unsigned int keylen)
506 {
507 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
508 	int err;
509 
510 	err = aes_check_keylen(keylen);
511 	if (err)
512 		return err;
513 
514 	/* key is immediate data for all cmac shared descriptors */
515 	ctx->adata.key_virt = key;
516 	ctx->adata.keylen = keylen;
517 
518 	print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
519 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
520 
521 	return acmac_set_sh_desc(ahash);
522 }
523 
524 /*
525  * ahash_edesc - s/w-extended ahash descriptor
526  * @sec4_sg_dma: physical mapped address of h/w link table
527  * @src_nents: number of segments in input scatterlist
528  * @sec4_sg_bytes: length of dma mapped sec4_sg space
529  * @bklog: stored to determine if the request needs backlog
530  * @hw_desc: the h/w job descriptor followed by any referenced link tables
531  * @sec4_sg: h/w link table
532  */
533 struct ahash_edesc {
534 	dma_addr_t sec4_sg_dma;
535 	int src_nents;
536 	int sec4_sg_bytes;
537 	bool bklog;
538 	u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
539 	struct sec4_sg_entry sec4_sg[];
540 };
541 
542 static inline void ahash_unmap(struct device *dev,
543 			struct ahash_edesc *edesc,
544 			struct ahash_request *req, int dst_len)
545 {
546 	struct caam_hash_state *state = ahash_request_ctx(req);
547 
548 	if (edesc->src_nents)
549 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
550 
551 	if (edesc->sec4_sg_bytes)
552 		dma_unmap_single(dev, edesc->sec4_sg_dma,
553 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
554 
555 	if (state->buf_dma) {
556 		dma_unmap_single(dev, state->buf_dma, state->buflen,
557 				 DMA_TO_DEVICE);
558 		state->buf_dma = 0;
559 	}
560 }
561 
562 static inline void ahash_unmap_ctx(struct device *dev,
563 			struct ahash_edesc *edesc,
564 			struct ahash_request *req, int dst_len, u32 flag)
565 {
566 	struct caam_hash_state *state = ahash_request_ctx(req);
567 
568 	if (state->ctx_dma) {
569 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
570 		state->ctx_dma = 0;
571 	}
572 	ahash_unmap(dev, edesc, req, dst_len);
573 }
574 
575 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
576 				  void *context, enum dma_data_direction dir)
577 {
578 	struct ahash_request *req = context;
579 	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
580 	struct ahash_edesc *edesc;
581 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
582 	int digestsize = crypto_ahash_digestsize(ahash);
583 	struct caam_hash_state *state = ahash_request_ctx(req);
584 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
585 	int ecode = 0;
586 
587 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
588 
589 	edesc = state->edesc;
590 
591 	if (err)
592 		ecode = caam_jr_strstatus(jrdev, err);
593 
594 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
595 	memcpy(req->result, state->caam_ctx, digestsize);
596 	kfree(edesc);
597 
598 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
599 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
600 			     ctx->ctx_len, 1);
601 
602 	/*
603 	 * If no backlog flag, the completion of the request is done
604 	 * by CAAM, not crypto engine.
605 	 */
606 	if (!edesc->bklog)
607 		req->base.complete(&req->base, ecode);
608 	else
609 		crypto_finalize_hash_request(jrp->engine, req, ecode);
610 }
611 
612 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
613 		       void *context)
614 {
615 	ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
616 }
617 
618 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
619 			       void *context)
620 {
621 	ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
622 }
623 
624 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
625 				     void *context, enum dma_data_direction dir)
626 {
627 	struct ahash_request *req = context;
628 	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
629 	struct ahash_edesc *edesc;
630 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
631 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
632 	struct caam_hash_state *state = ahash_request_ctx(req);
633 	int digestsize = crypto_ahash_digestsize(ahash);
634 	int ecode = 0;
635 
636 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
637 
638 	edesc = state->edesc;
639 	if (err)
640 		ecode = caam_jr_strstatus(jrdev, err);
641 
642 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
643 	kfree(edesc);
644 
645 	scatterwalk_map_and_copy(state->buf, req->src,
646 				 req->nbytes - state->next_buflen,
647 				 state->next_buflen, 0);
648 	state->buflen = state->next_buflen;
649 
650 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
651 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
652 			     state->buflen, 1);
653 
654 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
655 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
656 			     ctx->ctx_len, 1);
657 	if (req->result)
658 		print_hex_dump_debug("result@"__stringify(__LINE__)": ",
659 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
660 				     digestsize, 1);
661 
662 	/*
663 	 * If no backlog flag, the completion of the request is done
664 	 * by CAAM, not crypto engine.
665 	 */
666 	if (!edesc->bklog)
667 		req->base.complete(&req->base, ecode);
668 	else
669 		crypto_finalize_hash_request(jrp->engine, req, ecode);
670 
671 }
672 
673 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
674 			  void *context)
675 {
676 	ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
677 }
678 
679 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
680 			       void *context)
681 {
682 	ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
683 }
684 
685 /*
686  * Allocate an enhanced descriptor, which contains the hardware descriptor
687  * and space for hardware scatter table containing sg_num entries.
688  */
689 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
690 					     int sg_num, u32 *sh_desc,
691 					     dma_addr_t sh_desc_dma)
692 {
693 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
694 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
695 	struct caam_hash_state *state = ahash_request_ctx(req);
696 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
697 		       GFP_KERNEL : GFP_ATOMIC;
698 	struct ahash_edesc *edesc;
699 	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
700 
701 	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
702 	if (!edesc) {
703 		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
704 		return NULL;
705 	}
706 
707 	state->edesc = edesc;
708 
709 	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
710 			     HDR_SHARE_DEFER | HDR_REVERSE);
711 
712 	return edesc;
713 }
714 
715 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
716 			       struct ahash_edesc *edesc,
717 			       struct ahash_request *req, int nents,
718 			       unsigned int first_sg,
719 			       unsigned int first_bytes, size_t to_hash)
720 {
721 	dma_addr_t src_dma;
722 	u32 options;
723 
724 	if (nents > 1 || first_sg) {
725 		struct sec4_sg_entry *sg = edesc->sec4_sg;
726 		unsigned int sgsize = sizeof(*sg) *
727 				      pad_sg_nents(first_sg + nents);
728 
729 		sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
730 
731 		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
732 		if (dma_mapping_error(ctx->jrdev, src_dma)) {
733 			dev_err(ctx->jrdev, "unable to map S/G table\n");
734 			return -ENOMEM;
735 		}
736 
737 		edesc->sec4_sg_bytes = sgsize;
738 		edesc->sec4_sg_dma = src_dma;
739 		options = LDST_SGF;
740 	} else {
741 		src_dma = sg_dma_address(req->src);
742 		options = 0;
743 	}
744 
745 	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
746 			  options);
747 
748 	return 0;
749 }
750 
751 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
752 {
753 	struct ahash_request *req = ahash_request_cast(areq);
754 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
755 	struct caam_hash_state *state = ahash_request_ctx(req);
756 	struct device *jrdev = ctx->jrdev;
757 	u32 *desc = state->edesc->hw_desc;
758 	int ret;
759 
760 	state->edesc->bklog = true;
761 
762 	ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
763 
764 	if (ret != -EINPROGRESS) {
765 		ahash_unmap(jrdev, state->edesc, req, 0);
766 		kfree(state->edesc);
767 	} else {
768 		ret = 0;
769 	}
770 
771 	return ret;
772 }
773 
774 static int ahash_enqueue_req(struct device *jrdev,
775 			     void (*cbk)(struct device *jrdev, u32 *desc,
776 					 u32 err, void *context),
777 			     struct ahash_request *req,
778 			     int dst_len, enum dma_data_direction dir)
779 {
780 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
781 	struct caam_hash_state *state = ahash_request_ctx(req);
782 	struct ahash_edesc *edesc = state->edesc;
783 	u32 *desc = edesc->hw_desc;
784 	int ret;
785 
786 	state->ahash_op_done = cbk;
787 
788 	/*
789 	 * Only the backlog request are sent to crypto-engine since the others
790 	 * can be handled by CAAM, if free, especially since JR has up to 1024
791 	 * entries (more than the 10 entries from crypto-engine).
792 	 */
793 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
794 		ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
795 							     req);
796 	else
797 		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
798 
799 	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
800 		ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
801 		kfree(edesc);
802 	}
803 
804 	return ret;
805 }
806 
807 /* submit update job descriptor */
808 static int ahash_update_ctx(struct ahash_request *req)
809 {
810 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
811 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
812 	struct caam_hash_state *state = ahash_request_ctx(req);
813 	struct device *jrdev = ctx->jrdev;
814 	u8 *buf = state->buf;
815 	int *buflen = &state->buflen;
816 	int *next_buflen = &state->next_buflen;
817 	int blocksize = crypto_ahash_blocksize(ahash);
818 	int in_len = *buflen + req->nbytes, to_hash;
819 	u32 *desc;
820 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
821 	struct ahash_edesc *edesc;
822 	int ret = 0;
823 
824 	*next_buflen = in_len & (blocksize - 1);
825 	to_hash = in_len - *next_buflen;
826 
827 	/*
828 	 * For XCBC and CMAC, if to_hash is multiple of block size,
829 	 * keep last block in internal buffer
830 	 */
831 	if ((is_xcbc_aes(ctx->adata.algtype) ||
832 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
833 	     (*next_buflen == 0)) {
834 		*next_buflen = blocksize;
835 		to_hash -= blocksize;
836 	}
837 
838 	if (to_hash) {
839 		int pad_nents;
840 		int src_len = req->nbytes - *next_buflen;
841 
842 		src_nents = sg_nents_for_len(req->src, src_len);
843 		if (src_nents < 0) {
844 			dev_err(jrdev, "Invalid number of src SG.\n");
845 			return src_nents;
846 		}
847 
848 		if (src_nents) {
849 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
850 						  DMA_TO_DEVICE);
851 			if (!mapped_nents) {
852 				dev_err(jrdev, "unable to DMA map source\n");
853 				return -ENOMEM;
854 			}
855 		} else {
856 			mapped_nents = 0;
857 		}
858 
859 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
860 		pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
861 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
862 
863 		/*
864 		 * allocate space for base edesc and hw desc commands,
865 		 * link tables
866 		 */
867 		edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
868 					  ctx->sh_desc_update_dma);
869 		if (!edesc) {
870 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
871 			return -ENOMEM;
872 		}
873 
874 		edesc->src_nents = src_nents;
875 		edesc->sec4_sg_bytes = sec4_sg_bytes;
876 
877 		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
878 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
879 		if (ret)
880 			goto unmap_ctx;
881 
882 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
883 		if (ret)
884 			goto unmap_ctx;
885 
886 		if (mapped_nents)
887 			sg_to_sec4_sg_last(req->src, src_len,
888 					   edesc->sec4_sg + sec4_sg_src_index,
889 					   0);
890 		else
891 			sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
892 					    1);
893 
894 		desc = edesc->hw_desc;
895 
896 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
897 						     sec4_sg_bytes,
898 						     DMA_TO_DEVICE);
899 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
900 			dev_err(jrdev, "unable to map S/G table\n");
901 			ret = -ENOMEM;
902 			goto unmap_ctx;
903 		}
904 
905 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
906 				       to_hash, LDST_SGF);
907 
908 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
909 
910 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
911 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
912 				     desc_bytes(desc), 1);
913 
914 		ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
915 					ctx->ctx_len, DMA_BIDIRECTIONAL);
916 	} else if (*next_buflen) {
917 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
918 					 req->nbytes, 0);
919 		*buflen = *next_buflen;
920 
921 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
922 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
923 				     *buflen, 1);
924 	}
925 
926 	return ret;
927 unmap_ctx:
928 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
929 	kfree(edesc);
930 	return ret;
931 }
932 
933 static int ahash_final_ctx(struct ahash_request *req)
934 {
935 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
936 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
937 	struct caam_hash_state *state = ahash_request_ctx(req);
938 	struct device *jrdev = ctx->jrdev;
939 	int buflen = state->buflen;
940 	u32 *desc;
941 	int sec4_sg_bytes;
942 	int digestsize = crypto_ahash_digestsize(ahash);
943 	struct ahash_edesc *edesc;
944 	int ret;
945 
946 	sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
947 			sizeof(struct sec4_sg_entry);
948 
949 	/* allocate space for base edesc and hw desc commands, link tables */
950 	edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
951 				  ctx->sh_desc_fin_dma);
952 	if (!edesc)
953 		return -ENOMEM;
954 
955 	desc = edesc->hw_desc;
956 
957 	edesc->sec4_sg_bytes = sec4_sg_bytes;
958 
959 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
960 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
961 	if (ret)
962 		goto unmap_ctx;
963 
964 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
965 	if (ret)
966 		goto unmap_ctx;
967 
968 	sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
969 
970 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
971 					    sec4_sg_bytes, DMA_TO_DEVICE);
972 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
973 		dev_err(jrdev, "unable to map S/G table\n");
974 		ret = -ENOMEM;
975 		goto unmap_ctx;
976 	}
977 
978 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
979 			  LDST_SGF);
980 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
981 
982 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
983 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
984 			     1);
985 
986 	return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
987 				 digestsize, DMA_BIDIRECTIONAL);
988  unmap_ctx:
989 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
990 	kfree(edesc);
991 	return ret;
992 }
993 
994 static int ahash_finup_ctx(struct ahash_request *req)
995 {
996 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
997 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
998 	struct caam_hash_state *state = ahash_request_ctx(req);
999 	struct device *jrdev = ctx->jrdev;
1000 	int buflen = state->buflen;
1001 	u32 *desc;
1002 	int sec4_sg_src_index;
1003 	int src_nents, mapped_nents;
1004 	int digestsize = crypto_ahash_digestsize(ahash);
1005 	struct ahash_edesc *edesc;
1006 	int ret;
1007 
1008 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1009 	if (src_nents < 0) {
1010 		dev_err(jrdev, "Invalid number of src SG.\n");
1011 		return src_nents;
1012 	}
1013 
1014 	if (src_nents) {
1015 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1016 					  DMA_TO_DEVICE);
1017 		if (!mapped_nents) {
1018 			dev_err(jrdev, "unable to DMA map source\n");
1019 			return -ENOMEM;
1020 		}
1021 	} else {
1022 		mapped_nents = 0;
1023 	}
1024 
1025 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1026 
1027 	/* allocate space for base edesc and hw desc commands, link tables */
1028 	edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1029 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1030 	if (!edesc) {
1031 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1032 		return -ENOMEM;
1033 	}
1034 
1035 	desc = edesc->hw_desc;
1036 
1037 	edesc->src_nents = src_nents;
1038 
1039 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1040 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1041 	if (ret)
1042 		goto unmap_ctx;
1043 
1044 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1045 	if (ret)
1046 		goto unmap_ctx;
1047 
1048 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1049 				  sec4_sg_src_index, ctx->ctx_len + buflen,
1050 				  req->nbytes);
1051 	if (ret)
1052 		goto unmap_ctx;
1053 
1054 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1055 
1056 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1057 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1058 			     1);
1059 
1060 	return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1061 				 digestsize, DMA_BIDIRECTIONAL);
1062  unmap_ctx:
1063 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1064 	kfree(edesc);
1065 	return ret;
1066 }
1067 
1068 static int ahash_digest(struct ahash_request *req)
1069 {
1070 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1071 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1072 	struct caam_hash_state *state = ahash_request_ctx(req);
1073 	struct device *jrdev = ctx->jrdev;
1074 	u32 *desc;
1075 	int digestsize = crypto_ahash_digestsize(ahash);
1076 	int src_nents, mapped_nents;
1077 	struct ahash_edesc *edesc;
1078 	int ret;
1079 
1080 	state->buf_dma = 0;
1081 
1082 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1083 	if (src_nents < 0) {
1084 		dev_err(jrdev, "Invalid number of src SG.\n");
1085 		return src_nents;
1086 	}
1087 
1088 	if (src_nents) {
1089 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1090 					  DMA_TO_DEVICE);
1091 		if (!mapped_nents) {
1092 			dev_err(jrdev, "unable to map source for DMA\n");
1093 			return -ENOMEM;
1094 		}
1095 	} else {
1096 		mapped_nents = 0;
1097 	}
1098 
1099 	/* allocate space for base edesc and hw desc commands, link tables */
1100 	edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1101 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1102 	if (!edesc) {
1103 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1104 		return -ENOMEM;
1105 	}
1106 
1107 	edesc->src_nents = src_nents;
1108 
1109 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1110 				  req->nbytes);
1111 	if (ret) {
1112 		ahash_unmap(jrdev, edesc, req, digestsize);
1113 		kfree(edesc);
1114 		return ret;
1115 	}
1116 
1117 	desc = edesc->hw_desc;
1118 
1119 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1120 	if (ret) {
1121 		ahash_unmap(jrdev, edesc, req, digestsize);
1122 		kfree(edesc);
1123 		return -ENOMEM;
1124 	}
1125 
1126 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1127 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1128 			     1);
1129 
1130 	return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1131 				 DMA_FROM_DEVICE);
1132 }
1133 
1134 /* submit ahash final if it the first job descriptor */
1135 static int ahash_final_no_ctx(struct ahash_request *req)
1136 {
1137 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1138 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1139 	struct caam_hash_state *state = ahash_request_ctx(req);
1140 	struct device *jrdev = ctx->jrdev;
1141 	u8 *buf = state->buf;
1142 	int buflen = state->buflen;
1143 	u32 *desc;
1144 	int digestsize = crypto_ahash_digestsize(ahash);
1145 	struct ahash_edesc *edesc;
1146 	int ret;
1147 
1148 	/* allocate space for base edesc and hw desc commands, link tables */
1149 	edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1150 				  ctx->sh_desc_digest_dma);
1151 	if (!edesc)
1152 		return -ENOMEM;
1153 
1154 	desc = edesc->hw_desc;
1155 
1156 	if (buflen) {
1157 		state->buf_dma = dma_map_single(jrdev, buf, buflen,
1158 						DMA_TO_DEVICE);
1159 		if (dma_mapping_error(jrdev, state->buf_dma)) {
1160 			dev_err(jrdev, "unable to map src\n");
1161 			goto unmap;
1162 		}
1163 
1164 		append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1165 	}
1166 
1167 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1168 	if (ret)
1169 		goto unmap;
1170 
1171 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1172 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1173 			     1);
1174 
1175 	return ahash_enqueue_req(jrdev, ahash_done, req,
1176 				 digestsize, DMA_FROM_DEVICE);
1177  unmap:
1178 	ahash_unmap(jrdev, edesc, req, digestsize);
1179 	kfree(edesc);
1180 	return -ENOMEM;
1181 }
1182 
1183 /* submit ahash update if it the first job descriptor after update */
1184 static int ahash_update_no_ctx(struct ahash_request *req)
1185 {
1186 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1187 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1188 	struct caam_hash_state *state = ahash_request_ctx(req);
1189 	struct device *jrdev = ctx->jrdev;
1190 	u8 *buf = state->buf;
1191 	int *buflen = &state->buflen;
1192 	int *next_buflen = &state->next_buflen;
1193 	int blocksize = crypto_ahash_blocksize(ahash);
1194 	int in_len = *buflen + req->nbytes, to_hash;
1195 	int sec4_sg_bytes, src_nents, mapped_nents;
1196 	struct ahash_edesc *edesc;
1197 	u32 *desc;
1198 	int ret = 0;
1199 
1200 	*next_buflen = in_len & (blocksize - 1);
1201 	to_hash = in_len - *next_buflen;
1202 
1203 	/*
1204 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1205 	 * keep last block in internal buffer
1206 	 */
1207 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1208 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1209 	     (*next_buflen == 0)) {
1210 		*next_buflen = blocksize;
1211 		to_hash -= blocksize;
1212 	}
1213 
1214 	if (to_hash) {
1215 		int pad_nents;
1216 		int src_len = req->nbytes - *next_buflen;
1217 
1218 		src_nents = sg_nents_for_len(req->src, src_len);
1219 		if (src_nents < 0) {
1220 			dev_err(jrdev, "Invalid number of src SG.\n");
1221 			return src_nents;
1222 		}
1223 
1224 		if (src_nents) {
1225 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1226 						  DMA_TO_DEVICE);
1227 			if (!mapped_nents) {
1228 				dev_err(jrdev, "unable to DMA map source\n");
1229 				return -ENOMEM;
1230 			}
1231 		} else {
1232 			mapped_nents = 0;
1233 		}
1234 
1235 		pad_nents = pad_sg_nents(1 + mapped_nents);
1236 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1237 
1238 		/*
1239 		 * allocate space for base edesc and hw desc commands,
1240 		 * link tables
1241 		 */
1242 		edesc = ahash_edesc_alloc(req, pad_nents,
1243 					  ctx->sh_desc_update_first,
1244 					  ctx->sh_desc_update_first_dma);
1245 		if (!edesc) {
1246 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1247 			return -ENOMEM;
1248 		}
1249 
1250 		edesc->src_nents = src_nents;
1251 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1252 
1253 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1254 		if (ret)
1255 			goto unmap_ctx;
1256 
1257 		sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1258 
1259 		desc = edesc->hw_desc;
1260 
1261 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1262 						    sec4_sg_bytes,
1263 						    DMA_TO_DEVICE);
1264 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1265 			dev_err(jrdev, "unable to map S/G table\n");
1266 			ret = -ENOMEM;
1267 			goto unmap_ctx;
1268 		}
1269 
1270 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1271 
1272 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1273 		if (ret)
1274 			goto unmap_ctx;
1275 
1276 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1277 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1278 				     desc_bytes(desc), 1);
1279 
1280 		ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1281 					ctx->ctx_len, DMA_TO_DEVICE);
1282 		if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1283 			return ret;
1284 		state->update = ahash_update_ctx;
1285 		state->finup = ahash_finup_ctx;
1286 		state->final = ahash_final_ctx;
1287 	} else if (*next_buflen) {
1288 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1289 					 req->nbytes, 0);
1290 		*buflen = *next_buflen;
1291 
1292 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1293 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
1294 				     *buflen, 1);
1295 	}
1296 
1297 	return ret;
1298  unmap_ctx:
1299 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1300 	kfree(edesc);
1301 	return ret;
1302 }
1303 
1304 /* submit ahash finup if it the first job descriptor after update */
1305 static int ahash_finup_no_ctx(struct ahash_request *req)
1306 {
1307 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1308 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1309 	struct caam_hash_state *state = ahash_request_ctx(req);
1310 	struct device *jrdev = ctx->jrdev;
1311 	int buflen = state->buflen;
1312 	u32 *desc;
1313 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1314 	int digestsize = crypto_ahash_digestsize(ahash);
1315 	struct ahash_edesc *edesc;
1316 	int ret;
1317 
1318 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1319 	if (src_nents < 0) {
1320 		dev_err(jrdev, "Invalid number of src SG.\n");
1321 		return src_nents;
1322 	}
1323 
1324 	if (src_nents) {
1325 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1326 					  DMA_TO_DEVICE);
1327 		if (!mapped_nents) {
1328 			dev_err(jrdev, "unable to DMA map source\n");
1329 			return -ENOMEM;
1330 		}
1331 	} else {
1332 		mapped_nents = 0;
1333 	}
1334 
1335 	sec4_sg_src_index = 2;
1336 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1337 			 sizeof(struct sec4_sg_entry);
1338 
1339 	/* allocate space for base edesc and hw desc commands, link tables */
1340 	edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1341 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1342 	if (!edesc) {
1343 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1344 		return -ENOMEM;
1345 	}
1346 
1347 	desc = edesc->hw_desc;
1348 
1349 	edesc->src_nents = src_nents;
1350 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1351 
1352 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1353 	if (ret)
1354 		goto unmap;
1355 
1356 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1357 				  req->nbytes);
1358 	if (ret) {
1359 		dev_err(jrdev, "unable to map S/G table\n");
1360 		goto unmap;
1361 	}
1362 
1363 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1364 	if (ret)
1365 		goto unmap;
1366 
1367 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1368 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1369 			     1);
1370 
1371 	return ahash_enqueue_req(jrdev, ahash_done, req,
1372 				 digestsize, DMA_FROM_DEVICE);
1373  unmap:
1374 	ahash_unmap(jrdev, edesc, req, digestsize);
1375 	kfree(edesc);
1376 	return -ENOMEM;
1377 
1378 }
1379 
1380 /* submit first update job descriptor after init */
1381 static int ahash_update_first(struct ahash_request *req)
1382 {
1383 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1384 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1385 	struct caam_hash_state *state = ahash_request_ctx(req);
1386 	struct device *jrdev = ctx->jrdev;
1387 	u8 *buf = state->buf;
1388 	int *buflen = &state->buflen;
1389 	int *next_buflen = &state->next_buflen;
1390 	int to_hash;
1391 	int blocksize = crypto_ahash_blocksize(ahash);
1392 	u32 *desc;
1393 	int src_nents, mapped_nents;
1394 	struct ahash_edesc *edesc;
1395 	int ret = 0;
1396 
1397 	*next_buflen = req->nbytes & (blocksize - 1);
1398 	to_hash = req->nbytes - *next_buflen;
1399 
1400 	/*
1401 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1402 	 * keep last block in internal buffer
1403 	 */
1404 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1405 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1406 	     (*next_buflen == 0)) {
1407 		*next_buflen = blocksize;
1408 		to_hash -= blocksize;
1409 	}
1410 
1411 	if (to_hash) {
1412 		src_nents = sg_nents_for_len(req->src,
1413 					     req->nbytes - *next_buflen);
1414 		if (src_nents < 0) {
1415 			dev_err(jrdev, "Invalid number of src SG.\n");
1416 			return src_nents;
1417 		}
1418 
1419 		if (src_nents) {
1420 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1421 						  DMA_TO_DEVICE);
1422 			if (!mapped_nents) {
1423 				dev_err(jrdev, "unable to map source for DMA\n");
1424 				return -ENOMEM;
1425 			}
1426 		} else {
1427 			mapped_nents = 0;
1428 		}
1429 
1430 		/*
1431 		 * allocate space for base edesc and hw desc commands,
1432 		 * link tables
1433 		 */
1434 		edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1435 					  mapped_nents : 0,
1436 					  ctx->sh_desc_update_first,
1437 					  ctx->sh_desc_update_first_dma);
1438 		if (!edesc) {
1439 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1440 			return -ENOMEM;
1441 		}
1442 
1443 		edesc->src_nents = src_nents;
1444 
1445 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1446 					  to_hash);
1447 		if (ret)
1448 			goto unmap_ctx;
1449 
1450 		desc = edesc->hw_desc;
1451 
1452 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1453 		if (ret)
1454 			goto unmap_ctx;
1455 
1456 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1457 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1458 				     desc_bytes(desc), 1);
1459 
1460 		ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1461 					ctx->ctx_len, DMA_TO_DEVICE);
1462 		if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1463 			return ret;
1464 		state->update = ahash_update_ctx;
1465 		state->finup = ahash_finup_ctx;
1466 		state->final = ahash_final_ctx;
1467 	} else if (*next_buflen) {
1468 		state->update = ahash_update_no_ctx;
1469 		state->finup = ahash_finup_no_ctx;
1470 		state->final = ahash_final_no_ctx;
1471 		scatterwalk_map_and_copy(buf, req->src, 0,
1472 					 req->nbytes, 0);
1473 		*buflen = *next_buflen;
1474 
1475 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1476 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
1477 				     *buflen, 1);
1478 	}
1479 
1480 	return ret;
1481  unmap_ctx:
1482 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1483 	kfree(edesc);
1484 	return ret;
1485 }
1486 
1487 static int ahash_finup_first(struct ahash_request *req)
1488 {
1489 	return ahash_digest(req);
1490 }
1491 
1492 static int ahash_init(struct ahash_request *req)
1493 {
1494 	struct caam_hash_state *state = ahash_request_ctx(req);
1495 
1496 	state->update = ahash_update_first;
1497 	state->finup = ahash_finup_first;
1498 	state->final = ahash_final_no_ctx;
1499 
1500 	state->ctx_dma = 0;
1501 	state->ctx_dma_len = 0;
1502 	state->buf_dma = 0;
1503 	state->buflen = 0;
1504 	state->next_buflen = 0;
1505 
1506 	return 0;
1507 }
1508 
1509 static int ahash_update(struct ahash_request *req)
1510 {
1511 	struct caam_hash_state *state = ahash_request_ctx(req);
1512 
1513 	return state->update(req);
1514 }
1515 
1516 static int ahash_finup(struct ahash_request *req)
1517 {
1518 	struct caam_hash_state *state = ahash_request_ctx(req);
1519 
1520 	return state->finup(req);
1521 }
1522 
1523 static int ahash_final(struct ahash_request *req)
1524 {
1525 	struct caam_hash_state *state = ahash_request_ctx(req);
1526 
1527 	return state->final(req);
1528 }
1529 
1530 static int ahash_export(struct ahash_request *req, void *out)
1531 {
1532 	struct caam_hash_state *state = ahash_request_ctx(req);
1533 	struct caam_export_state *export = out;
1534 	u8 *buf = state->buf;
1535 	int len = state->buflen;
1536 
1537 	memcpy(export->buf, buf, len);
1538 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1539 	export->buflen = len;
1540 	export->update = state->update;
1541 	export->final = state->final;
1542 	export->finup = state->finup;
1543 
1544 	return 0;
1545 }
1546 
1547 static int ahash_import(struct ahash_request *req, const void *in)
1548 {
1549 	struct caam_hash_state *state = ahash_request_ctx(req);
1550 	const struct caam_export_state *export = in;
1551 
1552 	memset(state, 0, sizeof(*state));
1553 	memcpy(state->buf, export->buf, export->buflen);
1554 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1555 	state->buflen = export->buflen;
1556 	state->update = export->update;
1557 	state->final = export->final;
1558 	state->finup = export->finup;
1559 
1560 	return 0;
1561 }
1562 
1563 struct caam_hash_template {
1564 	char name[CRYPTO_MAX_ALG_NAME];
1565 	char driver_name[CRYPTO_MAX_ALG_NAME];
1566 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1567 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1568 	unsigned int blocksize;
1569 	struct ahash_alg template_ahash;
1570 	u32 alg_type;
1571 };
1572 
1573 /* ahash descriptors */
1574 static struct caam_hash_template driver_hash[] = {
1575 	{
1576 		.name = "sha1",
1577 		.driver_name = "sha1-caam",
1578 		.hmac_name = "hmac(sha1)",
1579 		.hmac_driver_name = "hmac-sha1-caam",
1580 		.blocksize = SHA1_BLOCK_SIZE,
1581 		.template_ahash = {
1582 			.init = ahash_init,
1583 			.update = ahash_update,
1584 			.final = ahash_final,
1585 			.finup = ahash_finup,
1586 			.digest = ahash_digest,
1587 			.export = ahash_export,
1588 			.import = ahash_import,
1589 			.setkey = ahash_setkey,
1590 			.halg = {
1591 				.digestsize = SHA1_DIGEST_SIZE,
1592 				.statesize = sizeof(struct caam_export_state),
1593 			},
1594 		},
1595 		.alg_type = OP_ALG_ALGSEL_SHA1,
1596 	}, {
1597 		.name = "sha224",
1598 		.driver_name = "sha224-caam",
1599 		.hmac_name = "hmac(sha224)",
1600 		.hmac_driver_name = "hmac-sha224-caam",
1601 		.blocksize = SHA224_BLOCK_SIZE,
1602 		.template_ahash = {
1603 			.init = ahash_init,
1604 			.update = ahash_update,
1605 			.final = ahash_final,
1606 			.finup = ahash_finup,
1607 			.digest = ahash_digest,
1608 			.export = ahash_export,
1609 			.import = ahash_import,
1610 			.setkey = ahash_setkey,
1611 			.halg = {
1612 				.digestsize = SHA224_DIGEST_SIZE,
1613 				.statesize = sizeof(struct caam_export_state),
1614 			},
1615 		},
1616 		.alg_type = OP_ALG_ALGSEL_SHA224,
1617 	}, {
1618 		.name = "sha256",
1619 		.driver_name = "sha256-caam",
1620 		.hmac_name = "hmac(sha256)",
1621 		.hmac_driver_name = "hmac-sha256-caam",
1622 		.blocksize = SHA256_BLOCK_SIZE,
1623 		.template_ahash = {
1624 			.init = ahash_init,
1625 			.update = ahash_update,
1626 			.final = ahash_final,
1627 			.finup = ahash_finup,
1628 			.digest = ahash_digest,
1629 			.export = ahash_export,
1630 			.import = ahash_import,
1631 			.setkey = ahash_setkey,
1632 			.halg = {
1633 				.digestsize = SHA256_DIGEST_SIZE,
1634 				.statesize = sizeof(struct caam_export_state),
1635 			},
1636 		},
1637 		.alg_type = OP_ALG_ALGSEL_SHA256,
1638 	}, {
1639 		.name = "sha384",
1640 		.driver_name = "sha384-caam",
1641 		.hmac_name = "hmac(sha384)",
1642 		.hmac_driver_name = "hmac-sha384-caam",
1643 		.blocksize = SHA384_BLOCK_SIZE,
1644 		.template_ahash = {
1645 			.init = ahash_init,
1646 			.update = ahash_update,
1647 			.final = ahash_final,
1648 			.finup = ahash_finup,
1649 			.digest = ahash_digest,
1650 			.export = ahash_export,
1651 			.import = ahash_import,
1652 			.setkey = ahash_setkey,
1653 			.halg = {
1654 				.digestsize = SHA384_DIGEST_SIZE,
1655 				.statesize = sizeof(struct caam_export_state),
1656 			},
1657 		},
1658 		.alg_type = OP_ALG_ALGSEL_SHA384,
1659 	}, {
1660 		.name = "sha512",
1661 		.driver_name = "sha512-caam",
1662 		.hmac_name = "hmac(sha512)",
1663 		.hmac_driver_name = "hmac-sha512-caam",
1664 		.blocksize = SHA512_BLOCK_SIZE,
1665 		.template_ahash = {
1666 			.init = ahash_init,
1667 			.update = ahash_update,
1668 			.final = ahash_final,
1669 			.finup = ahash_finup,
1670 			.digest = ahash_digest,
1671 			.export = ahash_export,
1672 			.import = ahash_import,
1673 			.setkey = ahash_setkey,
1674 			.halg = {
1675 				.digestsize = SHA512_DIGEST_SIZE,
1676 				.statesize = sizeof(struct caam_export_state),
1677 			},
1678 		},
1679 		.alg_type = OP_ALG_ALGSEL_SHA512,
1680 	}, {
1681 		.name = "md5",
1682 		.driver_name = "md5-caam",
1683 		.hmac_name = "hmac(md5)",
1684 		.hmac_driver_name = "hmac-md5-caam",
1685 		.blocksize = MD5_BLOCK_WORDS * 4,
1686 		.template_ahash = {
1687 			.init = ahash_init,
1688 			.update = ahash_update,
1689 			.final = ahash_final,
1690 			.finup = ahash_finup,
1691 			.digest = ahash_digest,
1692 			.export = ahash_export,
1693 			.import = ahash_import,
1694 			.setkey = ahash_setkey,
1695 			.halg = {
1696 				.digestsize = MD5_DIGEST_SIZE,
1697 				.statesize = sizeof(struct caam_export_state),
1698 			},
1699 		},
1700 		.alg_type = OP_ALG_ALGSEL_MD5,
1701 	}, {
1702 		.hmac_name = "xcbc(aes)",
1703 		.hmac_driver_name = "xcbc-aes-caam",
1704 		.blocksize = AES_BLOCK_SIZE,
1705 		.template_ahash = {
1706 			.init = ahash_init,
1707 			.update = ahash_update,
1708 			.final = ahash_final,
1709 			.finup = ahash_finup,
1710 			.digest = ahash_digest,
1711 			.export = ahash_export,
1712 			.import = ahash_import,
1713 			.setkey = axcbc_setkey,
1714 			.halg = {
1715 				.digestsize = AES_BLOCK_SIZE,
1716 				.statesize = sizeof(struct caam_export_state),
1717 			},
1718 		 },
1719 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1720 	}, {
1721 		.hmac_name = "cmac(aes)",
1722 		.hmac_driver_name = "cmac-aes-caam",
1723 		.blocksize = AES_BLOCK_SIZE,
1724 		.template_ahash = {
1725 			.init = ahash_init,
1726 			.update = ahash_update,
1727 			.final = ahash_final,
1728 			.finup = ahash_finup,
1729 			.digest = ahash_digest,
1730 			.export = ahash_export,
1731 			.import = ahash_import,
1732 			.setkey = acmac_setkey,
1733 			.halg = {
1734 				.digestsize = AES_BLOCK_SIZE,
1735 				.statesize = sizeof(struct caam_export_state),
1736 			},
1737 		 },
1738 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1739 	},
1740 };
1741 
1742 struct caam_hash_alg {
1743 	struct list_head entry;
1744 	int alg_type;
1745 	struct ahash_alg ahash_alg;
1746 };
1747 
1748 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1749 {
1750 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1751 	struct crypto_alg *base = tfm->__crt_alg;
1752 	struct hash_alg_common *halg =
1753 		 container_of(base, struct hash_alg_common, base);
1754 	struct ahash_alg *alg =
1755 		 container_of(halg, struct ahash_alg, halg);
1756 	struct caam_hash_alg *caam_hash =
1757 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1758 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1759 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1760 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1761 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1762 					 HASH_MSG_LEN + 32,
1763 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1764 					 HASH_MSG_LEN + 64,
1765 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1766 	const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1767 						      sh_desc_update);
1768 	dma_addr_t dma_addr;
1769 	struct caam_drv_private *priv;
1770 
1771 	/*
1772 	 * Get a Job ring from Job Ring driver to ensure in-order
1773 	 * crypto request processing per tfm
1774 	 */
1775 	ctx->jrdev = caam_jr_alloc();
1776 	if (IS_ERR(ctx->jrdev)) {
1777 		pr_err("Job Ring Device allocation for transform failed\n");
1778 		return PTR_ERR(ctx->jrdev);
1779 	}
1780 
1781 	priv = dev_get_drvdata(ctx->jrdev->parent);
1782 
1783 	if (is_xcbc_aes(caam_hash->alg_type)) {
1784 		ctx->dir = DMA_TO_DEVICE;
1785 		ctx->key_dir = DMA_BIDIRECTIONAL;
1786 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1787 		ctx->ctx_len = 48;
1788 	} else if (is_cmac_aes(caam_hash->alg_type)) {
1789 		ctx->dir = DMA_TO_DEVICE;
1790 		ctx->key_dir = DMA_NONE;
1791 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1792 		ctx->ctx_len = 32;
1793 	} else {
1794 		if (priv->era >= 6) {
1795 			ctx->dir = DMA_BIDIRECTIONAL;
1796 			ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1797 		} else {
1798 			ctx->dir = DMA_TO_DEVICE;
1799 			ctx->key_dir = DMA_NONE;
1800 		}
1801 		ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1802 		ctx->ctx_len = runninglen[(ctx->adata.algtype &
1803 					   OP_ALG_ALGSEL_SUBMASK) >>
1804 					  OP_ALG_ALGSEL_SHIFT];
1805 	}
1806 
1807 	if (ctx->key_dir != DMA_NONE) {
1808 		ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1809 							  ARRAY_SIZE(ctx->key),
1810 							  ctx->key_dir,
1811 							  DMA_ATTR_SKIP_CPU_SYNC);
1812 		if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1813 			dev_err(ctx->jrdev, "unable to map key\n");
1814 			caam_jr_free(ctx->jrdev);
1815 			return -ENOMEM;
1816 		}
1817 	}
1818 
1819 	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1820 					offsetof(struct caam_hash_ctx, key) -
1821 					sh_desc_update_offset,
1822 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1823 	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1824 		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1825 
1826 		if (ctx->key_dir != DMA_NONE)
1827 			dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1828 					       ARRAY_SIZE(ctx->key),
1829 					       ctx->key_dir,
1830 					       DMA_ATTR_SKIP_CPU_SYNC);
1831 
1832 		caam_jr_free(ctx->jrdev);
1833 		return -ENOMEM;
1834 	}
1835 
1836 	ctx->sh_desc_update_dma = dma_addr;
1837 	ctx->sh_desc_update_first_dma = dma_addr +
1838 					offsetof(struct caam_hash_ctx,
1839 						 sh_desc_update_first) -
1840 					sh_desc_update_offset;
1841 	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1842 						   sh_desc_fin) -
1843 					sh_desc_update_offset;
1844 	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1845 						      sh_desc_digest) -
1846 					sh_desc_update_offset;
1847 
1848 	ctx->enginectx.op.do_one_request = ahash_do_one_req;
1849 
1850 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1851 				 sizeof(struct caam_hash_state));
1852 
1853 	/*
1854 	 * For keyed hash algorithms shared descriptors
1855 	 * will be created later in setkey() callback
1856 	 */
1857 	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1858 }
1859 
1860 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1861 {
1862 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1863 
1864 	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1865 			       offsetof(struct caam_hash_ctx, key) -
1866 			       offsetof(struct caam_hash_ctx, sh_desc_update),
1867 			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1868 	if (ctx->key_dir != DMA_NONE)
1869 		dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1870 				       ARRAY_SIZE(ctx->key), ctx->key_dir,
1871 				       DMA_ATTR_SKIP_CPU_SYNC);
1872 	caam_jr_free(ctx->jrdev);
1873 }
1874 
1875 void caam_algapi_hash_exit(void)
1876 {
1877 	struct caam_hash_alg *t_alg, *n;
1878 
1879 	if (!hash_list.next)
1880 		return;
1881 
1882 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1883 		crypto_unregister_ahash(&t_alg->ahash_alg);
1884 		list_del(&t_alg->entry);
1885 		kfree(t_alg);
1886 	}
1887 }
1888 
1889 static struct caam_hash_alg *
1890 caam_hash_alloc(struct caam_hash_template *template,
1891 		bool keyed)
1892 {
1893 	struct caam_hash_alg *t_alg;
1894 	struct ahash_alg *halg;
1895 	struct crypto_alg *alg;
1896 
1897 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1898 	if (!t_alg) {
1899 		pr_err("failed to allocate t_alg\n");
1900 		return ERR_PTR(-ENOMEM);
1901 	}
1902 
1903 	t_alg->ahash_alg = template->template_ahash;
1904 	halg = &t_alg->ahash_alg;
1905 	alg = &halg->halg.base;
1906 
1907 	if (keyed) {
1908 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1909 			 template->hmac_name);
1910 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1911 			 template->hmac_driver_name);
1912 	} else {
1913 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1914 			 template->name);
1915 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1916 			 template->driver_name);
1917 		t_alg->ahash_alg.setkey = NULL;
1918 	}
1919 	alg->cra_module = THIS_MODULE;
1920 	alg->cra_init = caam_hash_cra_init;
1921 	alg->cra_exit = caam_hash_cra_exit;
1922 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1923 	alg->cra_priority = CAAM_CRA_PRIORITY;
1924 	alg->cra_blocksize = template->blocksize;
1925 	alg->cra_alignmask = 0;
1926 	alg->cra_flags = CRYPTO_ALG_ASYNC;
1927 
1928 	t_alg->alg_type = template->alg_type;
1929 
1930 	return t_alg;
1931 }
1932 
1933 int caam_algapi_hash_init(struct device *ctrldev)
1934 {
1935 	int i = 0, err = 0;
1936 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1937 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1938 	u32 md_inst, md_vid;
1939 
1940 	/*
1941 	 * Register crypto algorithms the device supports.  First, identify
1942 	 * presence and attributes of MD block.
1943 	 */
1944 	if (priv->era < 10) {
1945 		md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1946 			  CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1947 		md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1948 			   CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1949 	} else {
1950 		u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1951 
1952 		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1953 		md_inst = mdha & CHA_VER_NUM_MASK;
1954 	}
1955 
1956 	/*
1957 	 * Skip registration of any hashing algorithms if MD block
1958 	 * is not present.
1959 	 */
1960 	if (!md_inst)
1961 		return 0;
1962 
1963 	/* Limit digest size based on LP256 */
1964 	if (md_vid == CHA_VER_VID_MD_LP256)
1965 		md_limit = SHA256_DIGEST_SIZE;
1966 
1967 	INIT_LIST_HEAD(&hash_list);
1968 
1969 	/* register crypto algorithms the device supports */
1970 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1971 		struct caam_hash_alg *t_alg;
1972 		struct caam_hash_template *alg = driver_hash + i;
1973 
1974 		/* If MD size is not supported by device, skip registration */
1975 		if (is_mdha(alg->alg_type) &&
1976 		    alg->template_ahash.halg.digestsize > md_limit)
1977 			continue;
1978 
1979 		/* register hmac version */
1980 		t_alg = caam_hash_alloc(alg, true);
1981 		if (IS_ERR(t_alg)) {
1982 			err = PTR_ERR(t_alg);
1983 			pr_warn("%s alg allocation failed\n",
1984 				alg->hmac_driver_name);
1985 			continue;
1986 		}
1987 
1988 		err = crypto_register_ahash(&t_alg->ahash_alg);
1989 		if (err) {
1990 			pr_warn("%s alg registration failed: %d\n",
1991 				t_alg->ahash_alg.halg.base.cra_driver_name,
1992 				err);
1993 			kfree(t_alg);
1994 		} else
1995 			list_add_tail(&t_alg->entry, &hash_list);
1996 
1997 		if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
1998 			continue;
1999 
2000 		/* register unkeyed version */
2001 		t_alg = caam_hash_alloc(alg, false);
2002 		if (IS_ERR(t_alg)) {
2003 			err = PTR_ERR(t_alg);
2004 			pr_warn("%s alg allocation failed\n", alg->driver_name);
2005 			continue;
2006 		}
2007 
2008 		err = crypto_register_ahash(&t_alg->ahash_alg);
2009 		if (err) {
2010 			pr_warn("%s alg registration failed: %d\n",
2011 				t_alg->ahash_alg.halg.base.cra_driver_name,
2012 				err);
2013 			kfree(t_alg);
2014 		} else
2015 			list_add_tail(&t_alg->entry, &hash_list);
2016 	}
2017 
2018 	return err;
2019 }
2020