xref: /openbmc/linux/drivers/crypto/caam/caamhash.c (revision a2fb864c)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  * relationship of digest job descriptor or first job descriptor after init to
11  * shared descriptors:
12  *
13  * ---------------                     ---------------
14  * | JobDesc #1  |-------------------->|  ShareDesc  |
15  * | *(packet 1) |                     |  (hashKey)  |
16  * ---------------                     | (operation) |
17  *                                     ---------------
18  *
19  * relationship of subsequent job descriptors to shared descriptors:
20  *
21  * ---------------                     ---------------
22  * | JobDesc #2  |-------------------->|  ShareDesc  |
23  * | *(packet 2) |      |------------->|  (hashKey)  |
24  * ---------------      |    |-------->| (operation) |
25  *       .              |    |         | (load ctx2) |
26  *       .              |    |         ---------------
27  * ---------------      |    |
28  * | JobDesc #3  |------|    |
29  * | *(packet 3) |           |
30  * ---------------           |
31  *       .                   |
32  *       .                   |
33  * ---------------           |
34  * | JobDesc #4  |------------
35  * | *(packet 4) |
36  * ---------------
37  *
38  * The SharedDesc never changes for a connection unless rekeyed, but
39  * each packet will likely be in a different place. So all we need
40  * to know to process the packet is where the input is, where the
41  * output goes, and what context we want to process with. Context is
42  * in the SharedDesc, packet references in the JobDesc.
43  *
44  * So, a job desc looks like:
45  *
46  * ---------------------
47  * | Header            |
48  * | ShareDesc Pointer |
49  * | SEQ_OUT_PTR       |
50  * | (output buffer)   |
51  * | (output length)   |
52  * | SEQ_IN_PTR        |
53  * | (input buffer)    |
54  * | (input length)    |
55  * ---------------------
56  */
57 
58 #include "compat.h"
59 
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 
69 #define CAAM_CRA_PRIORITY		3000
70 
71 /* max hash key is max split key size */
72 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
73 
74 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
75 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
76 
77 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
78 					 CAAM_MAX_HASH_KEY_SIZE)
79 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
80 
81 /* caam context sizes for hashes: running digest + 8 */
82 #define HASH_MSG_LEN			8
83 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
84 
85 static struct list_head hash_list;
86 
87 /* ahash per-session context */
88 struct caam_hash_ctx {
89 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
90 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
91 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
92 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
93 	u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
94 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
95 	dma_addr_t sh_desc_update_first_dma;
96 	dma_addr_t sh_desc_fin_dma;
97 	dma_addr_t sh_desc_digest_dma;
98 	enum dma_data_direction dir;
99 	struct device *jrdev;
100 	int ctx_len;
101 	struct alginfo adata;
102 };
103 
104 /* ahash state */
105 struct caam_hash_state {
106 	dma_addr_t buf_dma;
107 	dma_addr_t ctx_dma;
108 	int ctx_dma_len;
109 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
110 	int buflen_0;
111 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
112 	int buflen_1;
113 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
114 	int (*update)(struct ahash_request *req);
115 	int (*final)(struct ahash_request *req);
116 	int (*finup)(struct ahash_request *req);
117 	int current_buf;
118 };
119 
120 struct caam_export_state {
121 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
122 	u8 caam_ctx[MAX_CTX_LEN];
123 	int buflen;
124 	int (*update)(struct ahash_request *req);
125 	int (*final)(struct ahash_request *req);
126 	int (*finup)(struct ahash_request *req);
127 };
128 
129 static inline void switch_buf(struct caam_hash_state *state)
130 {
131 	state->current_buf ^= 1;
132 }
133 
134 static inline u8 *current_buf(struct caam_hash_state *state)
135 {
136 	return state->current_buf ? state->buf_1 : state->buf_0;
137 }
138 
139 static inline u8 *alt_buf(struct caam_hash_state *state)
140 {
141 	return state->current_buf ? state->buf_0 : state->buf_1;
142 }
143 
144 static inline int *current_buflen(struct caam_hash_state *state)
145 {
146 	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
147 }
148 
149 static inline int *alt_buflen(struct caam_hash_state *state)
150 {
151 	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
152 }
153 
154 static inline bool is_cmac_aes(u32 algtype)
155 {
156 	return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
157 	       (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
158 }
159 /* Common job descriptor seq in/out ptr routines */
160 
161 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
162 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
163 				      struct caam_hash_state *state,
164 				      int ctx_len)
165 {
166 	state->ctx_dma_len = ctx_len;
167 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
168 					ctx_len, DMA_FROM_DEVICE);
169 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
170 		dev_err(jrdev, "unable to map ctx\n");
171 		state->ctx_dma = 0;
172 		return -ENOMEM;
173 	}
174 
175 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
176 
177 	return 0;
178 }
179 
180 /* Map current buffer in state (if length > 0) and put it in link table */
181 static inline int buf_map_to_sec4_sg(struct device *jrdev,
182 				     struct sec4_sg_entry *sec4_sg,
183 				     struct caam_hash_state *state)
184 {
185 	int buflen = *current_buflen(state);
186 
187 	if (!buflen)
188 		return 0;
189 
190 	state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
191 					DMA_TO_DEVICE);
192 	if (dma_mapping_error(jrdev, state->buf_dma)) {
193 		dev_err(jrdev, "unable to map buf\n");
194 		state->buf_dma = 0;
195 		return -ENOMEM;
196 	}
197 
198 	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
199 
200 	return 0;
201 }
202 
203 /* Map state->caam_ctx, and add it to link table */
204 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
205 				     struct caam_hash_state *state, int ctx_len,
206 				     struct sec4_sg_entry *sec4_sg, u32 flag)
207 {
208 	state->ctx_dma_len = ctx_len;
209 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
210 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
211 		dev_err(jrdev, "unable to map ctx\n");
212 		state->ctx_dma = 0;
213 		return -ENOMEM;
214 	}
215 
216 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
217 
218 	return 0;
219 }
220 
221 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
222 {
223 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
224 	int digestsize = crypto_ahash_digestsize(ahash);
225 	struct device *jrdev = ctx->jrdev;
226 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
227 	u32 *desc;
228 
229 	ctx->adata.key_virt = ctx->key;
230 
231 	/* ahash_update shared descriptor */
232 	desc = ctx->sh_desc_update;
233 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
234 			  ctx->ctx_len, true, ctrlpriv->era);
235 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
236 				   desc_bytes(desc), ctx->dir);
237 
238 	print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
239 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
240 			     1);
241 
242 	/* ahash_update_first shared descriptor */
243 	desc = ctx->sh_desc_update_first;
244 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
245 			  ctx->ctx_len, false, ctrlpriv->era);
246 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
247 				   desc_bytes(desc), ctx->dir);
248 	print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
249 			     ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
250 			     desc_bytes(desc), 1);
251 
252 	/* ahash_final shared descriptor */
253 	desc = ctx->sh_desc_fin;
254 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
255 			  ctx->ctx_len, true, ctrlpriv->era);
256 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
257 				   desc_bytes(desc), ctx->dir);
258 
259 	print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
260 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
261 			     desc_bytes(desc), 1);
262 
263 	/* ahash_digest shared descriptor */
264 	desc = ctx->sh_desc_digest;
265 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
266 			  ctx->ctx_len, false, ctrlpriv->era);
267 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
268 				   desc_bytes(desc), ctx->dir);
269 
270 	print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
271 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
272 			     desc_bytes(desc), 1);
273 
274 	return 0;
275 }
276 
277 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
278 {
279 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
280 	int digestsize = crypto_ahash_digestsize(ahash);
281 	struct device *jrdev = ctx->jrdev;
282 	u32 *desc;
283 
284 	/* shared descriptor for ahash_update */
285 	desc = ctx->sh_desc_update;
286 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
287 			    ctx->ctx_len, ctx->ctx_len);
288 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
289 				   desc_bytes(desc), ctx->dir);
290 	print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
291 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
292 			     1);
293 
294 	/* shared descriptor for ahash_{final,finup} */
295 	desc = ctx->sh_desc_fin;
296 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
297 			    digestsize, ctx->ctx_len);
298 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
299 				   desc_bytes(desc), ctx->dir);
300 	print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
301 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
302 			     1);
303 
304 	/* key is immediate data for INIT and INITFINAL states */
305 	ctx->adata.key_virt = ctx->key;
306 
307 	/* shared descriptor for first invocation of ahash_update */
308 	desc = ctx->sh_desc_update_first;
309 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
310 			    ctx->ctx_len);
311 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
312 				   desc_bytes(desc), ctx->dir);
313 	print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
314 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
315 			     desc_bytes(desc), 1);
316 
317 	/* shared descriptor for ahash_digest */
318 	desc = ctx->sh_desc_digest;
319 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
320 			    digestsize, ctx->ctx_len);
321 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
322 				   desc_bytes(desc), ctx->dir);
323 	print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
324 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
325 			     1);
326 	return 0;
327 }
328 
329 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
330 {
331 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
332 	int digestsize = crypto_ahash_digestsize(ahash);
333 	struct device *jrdev = ctx->jrdev;
334 	u32 *desc;
335 
336 	/* shared descriptor for ahash_update */
337 	desc = ctx->sh_desc_update;
338 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
339 			    ctx->ctx_len, ctx->ctx_len);
340 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
341 				   desc_bytes(desc), ctx->dir);
342 	print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
343 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
344 			     desc_bytes(desc), 1);
345 
346 	/* shared descriptor for ahash_{final,finup} */
347 	desc = ctx->sh_desc_fin;
348 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
349 			    digestsize, ctx->ctx_len);
350 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
351 				   desc_bytes(desc), ctx->dir);
352 	print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
353 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
354 			     desc_bytes(desc), 1);
355 
356 	/* shared descriptor for first invocation of ahash_update */
357 	desc = ctx->sh_desc_update_first;
358 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
359 			    ctx->ctx_len);
360 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
361 				   desc_bytes(desc), ctx->dir);
362 	print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
363 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
364 			     desc_bytes(desc), 1);
365 
366 	/* shared descriptor for ahash_digest */
367 	desc = ctx->sh_desc_digest;
368 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
369 			    digestsize, ctx->ctx_len);
370 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
371 				   desc_bytes(desc), ctx->dir);
372 	print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
373 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
374 			     desc_bytes(desc), 1);
375 
376 	return 0;
377 }
378 
379 /* Digest hash size if it is too large */
380 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
381 			   u32 digestsize)
382 {
383 	struct device *jrdev = ctx->jrdev;
384 	u32 *desc;
385 	struct split_key_result result;
386 	dma_addr_t key_dma;
387 	int ret;
388 
389 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
390 	if (!desc) {
391 		dev_err(jrdev, "unable to allocate key input memory\n");
392 		return -ENOMEM;
393 	}
394 
395 	init_job_desc(desc, 0);
396 
397 	key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
398 	if (dma_mapping_error(jrdev, key_dma)) {
399 		dev_err(jrdev, "unable to map key memory\n");
400 		kfree(desc);
401 		return -ENOMEM;
402 	}
403 
404 	/* Job descriptor to perform unkeyed hash on key_in */
405 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
406 			 OP_ALG_AS_INITFINAL);
407 	append_seq_in_ptr(desc, key_dma, *keylen, 0);
408 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
409 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
410 	append_seq_out_ptr(desc, key_dma, digestsize, 0);
411 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
412 			 LDST_SRCDST_BYTE_CONTEXT);
413 
414 	print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
415 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
416 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
417 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
418 			     1);
419 
420 	result.err = 0;
421 	init_completion(&result.completion);
422 
423 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
424 	if (!ret) {
425 		/* in progress */
426 		wait_for_completion(&result.completion);
427 		ret = result.err;
428 
429 		print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
430 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
431 				     digestsize, 1);
432 	}
433 	dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
434 
435 	*keylen = digestsize;
436 
437 	kfree(desc);
438 
439 	return ret;
440 }
441 
442 static int ahash_setkey(struct crypto_ahash *ahash,
443 			const u8 *key, unsigned int keylen)
444 {
445 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
446 	struct device *jrdev = ctx->jrdev;
447 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
448 	int digestsize = crypto_ahash_digestsize(ahash);
449 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
450 	int ret;
451 	u8 *hashed_key = NULL;
452 
453 	dev_dbg(jrdev, "keylen %d\n", keylen);
454 
455 	if (keylen > blocksize) {
456 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
457 		if (!hashed_key)
458 			return -ENOMEM;
459 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
460 		if (ret)
461 			goto bad_free_key;
462 		key = hashed_key;
463 	}
464 
465 	/*
466 	 * If DKP is supported, use it in the shared descriptor to generate
467 	 * the split key.
468 	 */
469 	if (ctrlpriv->era >= 6) {
470 		ctx->adata.key_inline = true;
471 		ctx->adata.keylen = keylen;
472 		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
473 						      OP_ALG_ALGSEL_MASK);
474 
475 		if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
476 			goto bad_free_key;
477 
478 		memcpy(ctx->key, key, keylen);
479 	} else {
480 		ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
481 				    keylen, CAAM_MAX_HASH_KEY_SIZE);
482 		if (ret)
483 			goto bad_free_key;
484 	}
485 
486 	kfree(hashed_key);
487 	return ahash_set_sh_desc(ahash);
488  bad_free_key:
489 	kfree(hashed_key);
490 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
491 	return -EINVAL;
492 }
493 
494 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
495 			unsigned int keylen)
496 {
497 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
498 	struct device *jrdev = ctx->jrdev;
499 
500 	if (keylen != AES_KEYSIZE_128) {
501 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
502 		return -EINVAL;
503 	}
504 
505 	memcpy(ctx->key, key, keylen);
506 	dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
507 				   DMA_TO_DEVICE);
508 	ctx->adata.keylen = keylen;
509 
510 	print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
511 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
512 
513 	return axcbc_set_sh_desc(ahash);
514 }
515 
516 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
517 			unsigned int keylen)
518 {
519 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
520 	int err;
521 
522 	err = aes_check_keylen(keylen);
523 	if (err) {
524 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
525 		return err;
526 	}
527 
528 	/* key is immediate data for all cmac shared descriptors */
529 	ctx->adata.key_virt = key;
530 	ctx->adata.keylen = keylen;
531 
532 	print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
533 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
534 
535 	return acmac_set_sh_desc(ahash);
536 }
537 
538 /*
539  * ahash_edesc - s/w-extended ahash descriptor
540  * @sec4_sg_dma: physical mapped address of h/w link table
541  * @src_nents: number of segments in input scatterlist
542  * @sec4_sg_bytes: length of dma mapped sec4_sg space
543  * @hw_desc: the h/w job descriptor followed by any referenced link tables
544  * @sec4_sg: h/w link table
545  */
546 struct ahash_edesc {
547 	dma_addr_t sec4_sg_dma;
548 	int src_nents;
549 	int sec4_sg_bytes;
550 	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
551 	struct sec4_sg_entry sec4_sg[0];
552 };
553 
554 static inline void ahash_unmap(struct device *dev,
555 			struct ahash_edesc *edesc,
556 			struct ahash_request *req, int dst_len)
557 {
558 	struct caam_hash_state *state = ahash_request_ctx(req);
559 
560 	if (edesc->src_nents)
561 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
562 
563 	if (edesc->sec4_sg_bytes)
564 		dma_unmap_single(dev, edesc->sec4_sg_dma,
565 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
566 
567 	if (state->buf_dma) {
568 		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
569 				 DMA_TO_DEVICE);
570 		state->buf_dma = 0;
571 	}
572 }
573 
574 static inline void ahash_unmap_ctx(struct device *dev,
575 			struct ahash_edesc *edesc,
576 			struct ahash_request *req, int dst_len, u32 flag)
577 {
578 	struct caam_hash_state *state = ahash_request_ctx(req);
579 
580 	if (state->ctx_dma) {
581 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
582 		state->ctx_dma = 0;
583 	}
584 	ahash_unmap(dev, edesc, req, dst_len);
585 }
586 
587 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
588 		       void *context)
589 {
590 	struct ahash_request *req = context;
591 	struct ahash_edesc *edesc;
592 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
593 	int digestsize = crypto_ahash_digestsize(ahash);
594 	struct caam_hash_state *state = ahash_request_ctx(req);
595 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
596 	int ecode = 0;
597 
598 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
599 
600 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
601 	if (err)
602 		ecode = caam_jr_strstatus(jrdev, err);
603 
604 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
605 	memcpy(req->result, state->caam_ctx, digestsize);
606 	kfree(edesc);
607 
608 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
609 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
610 			     ctx->ctx_len, 1);
611 
612 	req->base.complete(&req->base, ecode);
613 }
614 
615 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
616 			    void *context)
617 {
618 	struct ahash_request *req = context;
619 	struct ahash_edesc *edesc;
620 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
621 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
622 	struct caam_hash_state *state = ahash_request_ctx(req);
623 	int digestsize = crypto_ahash_digestsize(ahash);
624 	int ecode = 0;
625 
626 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
627 
628 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
629 	if (err)
630 		ecode = caam_jr_strstatus(jrdev, err);
631 
632 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
633 	switch_buf(state);
634 	kfree(edesc);
635 
636 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
637 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
638 			     ctx->ctx_len, 1);
639 	if (req->result)
640 		print_hex_dump_debug("result@"__stringify(__LINE__)": ",
641 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
642 				     digestsize, 1);
643 
644 	req->base.complete(&req->base, ecode);
645 }
646 
647 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
648 			       void *context)
649 {
650 	struct ahash_request *req = context;
651 	struct ahash_edesc *edesc;
652 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
653 	int digestsize = crypto_ahash_digestsize(ahash);
654 	struct caam_hash_state *state = ahash_request_ctx(req);
655 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
656 	int ecode = 0;
657 
658 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
659 
660 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
661 	if (err)
662 		ecode = caam_jr_strstatus(jrdev, err);
663 
664 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
665 	memcpy(req->result, state->caam_ctx, digestsize);
666 	kfree(edesc);
667 
668 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
669 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
670 			     ctx->ctx_len, 1);
671 
672 	req->base.complete(&req->base, ecode);
673 }
674 
675 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
676 			       void *context)
677 {
678 	struct ahash_request *req = context;
679 	struct ahash_edesc *edesc;
680 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
681 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
682 	struct caam_hash_state *state = ahash_request_ctx(req);
683 	int digestsize = crypto_ahash_digestsize(ahash);
684 	int ecode = 0;
685 
686 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
687 
688 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
689 	if (err)
690 		ecode = caam_jr_strstatus(jrdev, err);
691 
692 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
693 	switch_buf(state);
694 	kfree(edesc);
695 
696 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
697 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
698 			     ctx->ctx_len, 1);
699 	if (req->result)
700 		print_hex_dump_debug("result@"__stringify(__LINE__)": ",
701 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
702 				     digestsize, 1);
703 
704 	req->base.complete(&req->base, ecode);
705 }
706 
707 /*
708  * Allocate an enhanced descriptor, which contains the hardware descriptor
709  * and space for hardware scatter table containing sg_num entries.
710  */
711 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
712 					     int sg_num, u32 *sh_desc,
713 					     dma_addr_t sh_desc_dma,
714 					     gfp_t flags)
715 {
716 	struct ahash_edesc *edesc;
717 	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
718 
719 	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
720 	if (!edesc) {
721 		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
722 		return NULL;
723 	}
724 
725 	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
726 			     HDR_SHARE_DEFER | HDR_REVERSE);
727 
728 	return edesc;
729 }
730 
731 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
732 			       struct ahash_edesc *edesc,
733 			       struct ahash_request *req, int nents,
734 			       unsigned int first_sg,
735 			       unsigned int first_bytes, size_t to_hash)
736 {
737 	dma_addr_t src_dma;
738 	u32 options;
739 
740 	if (nents > 1 || first_sg) {
741 		struct sec4_sg_entry *sg = edesc->sec4_sg;
742 		unsigned int sgsize = sizeof(*sg) *
743 				      pad_sg_nents(first_sg + nents);
744 
745 		sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
746 
747 		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
748 		if (dma_mapping_error(ctx->jrdev, src_dma)) {
749 			dev_err(ctx->jrdev, "unable to map S/G table\n");
750 			return -ENOMEM;
751 		}
752 
753 		edesc->sec4_sg_bytes = sgsize;
754 		edesc->sec4_sg_dma = src_dma;
755 		options = LDST_SGF;
756 	} else {
757 		src_dma = sg_dma_address(req->src);
758 		options = 0;
759 	}
760 
761 	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
762 			  options);
763 
764 	return 0;
765 }
766 
767 /* submit update job descriptor */
768 static int ahash_update_ctx(struct ahash_request *req)
769 {
770 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
771 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
772 	struct caam_hash_state *state = ahash_request_ctx(req);
773 	struct device *jrdev = ctx->jrdev;
774 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
775 		       GFP_KERNEL : GFP_ATOMIC;
776 	u8 *buf = current_buf(state);
777 	int *buflen = current_buflen(state);
778 	u8 *next_buf = alt_buf(state);
779 	int blocksize = crypto_ahash_blocksize(ahash);
780 	int *next_buflen = alt_buflen(state), last_buflen;
781 	int in_len = *buflen + req->nbytes, to_hash;
782 	u32 *desc;
783 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
784 	struct ahash_edesc *edesc;
785 	int ret = 0;
786 
787 	last_buflen = *next_buflen;
788 	*next_buflen = in_len & (blocksize - 1);
789 	to_hash = in_len - *next_buflen;
790 
791 	/*
792 	 * For XCBC and CMAC, if to_hash is multiple of block size,
793 	 * keep last block in internal buffer
794 	 */
795 	if ((is_xcbc_aes(ctx->adata.algtype) ||
796 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
797 	     (*next_buflen == 0)) {
798 		*next_buflen = blocksize;
799 		to_hash -= blocksize;
800 	}
801 
802 	if (to_hash) {
803 		int pad_nents;
804 		int src_len = req->nbytes - *next_buflen;
805 
806 		src_nents = sg_nents_for_len(req->src, src_len);
807 		if (src_nents < 0) {
808 			dev_err(jrdev, "Invalid number of src SG.\n");
809 			return src_nents;
810 		}
811 
812 		if (src_nents) {
813 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
814 						  DMA_TO_DEVICE);
815 			if (!mapped_nents) {
816 				dev_err(jrdev, "unable to DMA map source\n");
817 				return -ENOMEM;
818 			}
819 		} else {
820 			mapped_nents = 0;
821 		}
822 
823 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
824 		pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
825 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
826 
827 		/*
828 		 * allocate space for base edesc and hw desc commands,
829 		 * link tables
830 		 */
831 		edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
832 					  ctx->sh_desc_update_dma, flags);
833 		if (!edesc) {
834 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
835 			return -ENOMEM;
836 		}
837 
838 		edesc->src_nents = src_nents;
839 		edesc->sec4_sg_bytes = sec4_sg_bytes;
840 
841 		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
842 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
843 		if (ret)
844 			goto unmap_ctx;
845 
846 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
847 		if (ret)
848 			goto unmap_ctx;
849 
850 		if (mapped_nents)
851 			sg_to_sec4_sg_last(req->src, src_len,
852 					   edesc->sec4_sg + sec4_sg_src_index,
853 					   0);
854 		else
855 			sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
856 					    1);
857 
858 		if (*next_buflen)
859 			scatterwalk_map_and_copy(next_buf, req->src,
860 						 to_hash - *buflen,
861 						 *next_buflen, 0);
862 		desc = edesc->hw_desc;
863 
864 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
865 						     sec4_sg_bytes,
866 						     DMA_TO_DEVICE);
867 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
868 			dev_err(jrdev, "unable to map S/G table\n");
869 			ret = -ENOMEM;
870 			goto unmap_ctx;
871 		}
872 
873 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
874 				       to_hash, LDST_SGF);
875 
876 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
877 
878 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
879 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
880 				     desc_bytes(desc), 1);
881 
882 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
883 		if (ret)
884 			goto unmap_ctx;
885 
886 		ret = -EINPROGRESS;
887 	} else if (*next_buflen) {
888 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
889 					 req->nbytes, 0);
890 		*buflen = *next_buflen;
891 		*next_buflen = last_buflen;
892 	}
893 
894 	print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
895 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
896 	print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
897 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
898 			     *next_buflen, 1);
899 
900 	return ret;
901 unmap_ctx:
902 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
903 	kfree(edesc);
904 	return ret;
905 }
906 
907 static int ahash_final_ctx(struct ahash_request *req)
908 {
909 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
910 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
911 	struct caam_hash_state *state = ahash_request_ctx(req);
912 	struct device *jrdev = ctx->jrdev;
913 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
914 		       GFP_KERNEL : GFP_ATOMIC;
915 	int buflen = *current_buflen(state);
916 	u32 *desc;
917 	int sec4_sg_bytes;
918 	int digestsize = crypto_ahash_digestsize(ahash);
919 	struct ahash_edesc *edesc;
920 	int ret;
921 
922 	sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
923 			sizeof(struct sec4_sg_entry);
924 
925 	/* allocate space for base edesc and hw desc commands, link tables */
926 	edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
927 				  ctx->sh_desc_fin_dma, flags);
928 	if (!edesc)
929 		return -ENOMEM;
930 
931 	desc = edesc->hw_desc;
932 
933 	edesc->sec4_sg_bytes = sec4_sg_bytes;
934 
935 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
936 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
937 	if (ret)
938 		goto unmap_ctx;
939 
940 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
941 	if (ret)
942 		goto unmap_ctx;
943 
944 	sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
945 
946 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
947 					    sec4_sg_bytes, DMA_TO_DEVICE);
948 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
949 		dev_err(jrdev, "unable to map S/G table\n");
950 		ret = -ENOMEM;
951 		goto unmap_ctx;
952 	}
953 
954 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
955 			  LDST_SGF);
956 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
957 
958 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
959 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
960 			     1);
961 
962 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
963 	if (ret)
964 		goto unmap_ctx;
965 
966 	return -EINPROGRESS;
967  unmap_ctx:
968 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
969 	kfree(edesc);
970 	return ret;
971 }
972 
973 static int ahash_finup_ctx(struct ahash_request *req)
974 {
975 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
976 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
977 	struct caam_hash_state *state = ahash_request_ctx(req);
978 	struct device *jrdev = ctx->jrdev;
979 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
980 		       GFP_KERNEL : GFP_ATOMIC;
981 	int buflen = *current_buflen(state);
982 	u32 *desc;
983 	int sec4_sg_src_index;
984 	int src_nents, mapped_nents;
985 	int digestsize = crypto_ahash_digestsize(ahash);
986 	struct ahash_edesc *edesc;
987 	int ret;
988 
989 	src_nents = sg_nents_for_len(req->src, req->nbytes);
990 	if (src_nents < 0) {
991 		dev_err(jrdev, "Invalid number of src SG.\n");
992 		return src_nents;
993 	}
994 
995 	if (src_nents) {
996 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
997 					  DMA_TO_DEVICE);
998 		if (!mapped_nents) {
999 			dev_err(jrdev, "unable to DMA map source\n");
1000 			return -ENOMEM;
1001 		}
1002 	} else {
1003 		mapped_nents = 0;
1004 	}
1005 
1006 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1007 
1008 	/* allocate space for base edesc and hw desc commands, link tables */
1009 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1010 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
1011 				  flags);
1012 	if (!edesc) {
1013 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1014 		return -ENOMEM;
1015 	}
1016 
1017 	desc = edesc->hw_desc;
1018 
1019 	edesc->src_nents = src_nents;
1020 
1021 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1022 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1023 	if (ret)
1024 		goto unmap_ctx;
1025 
1026 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1027 	if (ret)
1028 		goto unmap_ctx;
1029 
1030 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1031 				  sec4_sg_src_index, ctx->ctx_len + buflen,
1032 				  req->nbytes);
1033 	if (ret)
1034 		goto unmap_ctx;
1035 
1036 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1037 
1038 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1039 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1040 			     1);
1041 
1042 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1043 	if (ret)
1044 		goto unmap_ctx;
1045 
1046 	return -EINPROGRESS;
1047  unmap_ctx:
1048 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1049 	kfree(edesc);
1050 	return ret;
1051 }
1052 
1053 static int ahash_digest(struct ahash_request *req)
1054 {
1055 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1056 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1057 	struct caam_hash_state *state = ahash_request_ctx(req);
1058 	struct device *jrdev = ctx->jrdev;
1059 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1060 		       GFP_KERNEL : GFP_ATOMIC;
1061 	u32 *desc;
1062 	int digestsize = crypto_ahash_digestsize(ahash);
1063 	int src_nents, mapped_nents;
1064 	struct ahash_edesc *edesc;
1065 	int ret;
1066 
1067 	state->buf_dma = 0;
1068 
1069 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1070 	if (src_nents < 0) {
1071 		dev_err(jrdev, "Invalid number of src SG.\n");
1072 		return src_nents;
1073 	}
1074 
1075 	if (src_nents) {
1076 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1077 					  DMA_TO_DEVICE);
1078 		if (!mapped_nents) {
1079 			dev_err(jrdev, "unable to map source for DMA\n");
1080 			return -ENOMEM;
1081 		}
1082 	} else {
1083 		mapped_nents = 0;
1084 	}
1085 
1086 	/* allocate space for base edesc and hw desc commands, link tables */
1087 	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1088 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1089 				  flags);
1090 	if (!edesc) {
1091 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1092 		return -ENOMEM;
1093 	}
1094 
1095 	edesc->src_nents = src_nents;
1096 
1097 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1098 				  req->nbytes);
1099 	if (ret) {
1100 		ahash_unmap(jrdev, edesc, req, digestsize);
1101 		kfree(edesc);
1102 		return ret;
1103 	}
1104 
1105 	desc = edesc->hw_desc;
1106 
1107 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1108 	if (ret) {
1109 		ahash_unmap(jrdev, edesc, req, digestsize);
1110 		kfree(edesc);
1111 		return -ENOMEM;
1112 	}
1113 
1114 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1115 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1116 			     1);
1117 
1118 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1119 	if (!ret) {
1120 		ret = -EINPROGRESS;
1121 	} else {
1122 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1123 		kfree(edesc);
1124 	}
1125 
1126 	return ret;
1127 }
1128 
1129 /* submit ahash final if it the first job descriptor */
1130 static int ahash_final_no_ctx(struct ahash_request *req)
1131 {
1132 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1133 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1134 	struct caam_hash_state *state = ahash_request_ctx(req);
1135 	struct device *jrdev = ctx->jrdev;
1136 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1137 		       GFP_KERNEL : GFP_ATOMIC;
1138 	u8 *buf = current_buf(state);
1139 	int buflen = *current_buflen(state);
1140 	u32 *desc;
1141 	int digestsize = crypto_ahash_digestsize(ahash);
1142 	struct ahash_edesc *edesc;
1143 	int ret;
1144 
1145 	/* allocate space for base edesc and hw desc commands, link tables */
1146 	edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1147 				  ctx->sh_desc_digest_dma, flags);
1148 	if (!edesc)
1149 		return -ENOMEM;
1150 
1151 	desc = edesc->hw_desc;
1152 
1153 	if (buflen) {
1154 		state->buf_dma = dma_map_single(jrdev, buf, buflen,
1155 						DMA_TO_DEVICE);
1156 		if (dma_mapping_error(jrdev, state->buf_dma)) {
1157 			dev_err(jrdev, "unable to map src\n");
1158 			goto unmap;
1159 		}
1160 
1161 		append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1162 	}
1163 
1164 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1165 	if (ret)
1166 		goto unmap;
1167 
1168 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1169 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1170 			     1);
1171 
1172 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1173 	if (!ret) {
1174 		ret = -EINPROGRESS;
1175 	} else {
1176 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1177 		kfree(edesc);
1178 	}
1179 
1180 	return ret;
1181  unmap:
1182 	ahash_unmap(jrdev, edesc, req, digestsize);
1183 	kfree(edesc);
1184 	return -ENOMEM;
1185 
1186 }
1187 
1188 /* submit ahash update if it the first job descriptor after update */
1189 static int ahash_update_no_ctx(struct ahash_request *req)
1190 {
1191 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1192 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1193 	struct caam_hash_state *state = ahash_request_ctx(req);
1194 	struct device *jrdev = ctx->jrdev;
1195 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1196 		       GFP_KERNEL : GFP_ATOMIC;
1197 	u8 *buf = current_buf(state);
1198 	int *buflen = current_buflen(state);
1199 	int blocksize = crypto_ahash_blocksize(ahash);
1200 	u8 *next_buf = alt_buf(state);
1201 	int *next_buflen = alt_buflen(state);
1202 	int in_len = *buflen + req->nbytes, to_hash;
1203 	int sec4_sg_bytes, src_nents, mapped_nents;
1204 	struct ahash_edesc *edesc;
1205 	u32 *desc;
1206 	int ret = 0;
1207 
1208 	*next_buflen = in_len & (blocksize - 1);
1209 	to_hash = in_len - *next_buflen;
1210 
1211 	/*
1212 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1213 	 * keep last block in internal buffer
1214 	 */
1215 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1216 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1217 	     (*next_buflen == 0)) {
1218 		*next_buflen = blocksize;
1219 		to_hash -= blocksize;
1220 	}
1221 
1222 	if (to_hash) {
1223 		int pad_nents;
1224 		int src_len = req->nbytes - *next_buflen;
1225 
1226 		src_nents = sg_nents_for_len(req->src, src_len);
1227 		if (src_nents < 0) {
1228 			dev_err(jrdev, "Invalid number of src SG.\n");
1229 			return src_nents;
1230 		}
1231 
1232 		if (src_nents) {
1233 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1234 						  DMA_TO_DEVICE);
1235 			if (!mapped_nents) {
1236 				dev_err(jrdev, "unable to DMA map source\n");
1237 				return -ENOMEM;
1238 			}
1239 		} else {
1240 			mapped_nents = 0;
1241 		}
1242 
1243 		pad_nents = pad_sg_nents(1 + mapped_nents);
1244 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1245 
1246 		/*
1247 		 * allocate space for base edesc and hw desc commands,
1248 		 * link tables
1249 		 */
1250 		edesc = ahash_edesc_alloc(ctx, pad_nents,
1251 					  ctx->sh_desc_update_first,
1252 					  ctx->sh_desc_update_first_dma,
1253 					  flags);
1254 		if (!edesc) {
1255 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1256 			return -ENOMEM;
1257 		}
1258 
1259 		edesc->src_nents = src_nents;
1260 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1261 
1262 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1263 		if (ret)
1264 			goto unmap_ctx;
1265 
1266 		sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1267 
1268 		if (*next_buflen) {
1269 			scatterwalk_map_and_copy(next_buf, req->src,
1270 						 to_hash - *buflen,
1271 						 *next_buflen, 0);
1272 		}
1273 
1274 		desc = edesc->hw_desc;
1275 
1276 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1277 						    sec4_sg_bytes,
1278 						    DMA_TO_DEVICE);
1279 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1280 			dev_err(jrdev, "unable to map S/G table\n");
1281 			ret = -ENOMEM;
1282 			goto unmap_ctx;
1283 		}
1284 
1285 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1286 
1287 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1288 		if (ret)
1289 			goto unmap_ctx;
1290 
1291 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1292 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1293 				     desc_bytes(desc), 1);
1294 
1295 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1296 		if (ret)
1297 			goto unmap_ctx;
1298 
1299 		ret = -EINPROGRESS;
1300 		state->update = ahash_update_ctx;
1301 		state->finup = ahash_finup_ctx;
1302 		state->final = ahash_final_ctx;
1303 	} else if (*next_buflen) {
1304 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1305 					 req->nbytes, 0);
1306 		*buflen = *next_buflen;
1307 		*next_buflen = 0;
1308 	}
1309 
1310 	print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
1311 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1312 	print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
1313 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
1314 			     1);
1315 
1316 	return ret;
1317  unmap_ctx:
1318 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1319 	kfree(edesc);
1320 	return ret;
1321 }
1322 
1323 /* submit ahash finup if it the first job descriptor after update */
1324 static int ahash_finup_no_ctx(struct ahash_request *req)
1325 {
1326 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1327 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1328 	struct caam_hash_state *state = ahash_request_ctx(req);
1329 	struct device *jrdev = ctx->jrdev;
1330 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1331 		       GFP_KERNEL : GFP_ATOMIC;
1332 	int buflen = *current_buflen(state);
1333 	u32 *desc;
1334 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1335 	int digestsize = crypto_ahash_digestsize(ahash);
1336 	struct ahash_edesc *edesc;
1337 	int ret;
1338 
1339 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1340 	if (src_nents < 0) {
1341 		dev_err(jrdev, "Invalid number of src SG.\n");
1342 		return src_nents;
1343 	}
1344 
1345 	if (src_nents) {
1346 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1347 					  DMA_TO_DEVICE);
1348 		if (!mapped_nents) {
1349 			dev_err(jrdev, "unable to DMA map source\n");
1350 			return -ENOMEM;
1351 		}
1352 	} else {
1353 		mapped_nents = 0;
1354 	}
1355 
1356 	sec4_sg_src_index = 2;
1357 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1358 			 sizeof(struct sec4_sg_entry);
1359 
1360 	/* allocate space for base edesc and hw desc commands, link tables */
1361 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1362 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1363 				  flags);
1364 	if (!edesc) {
1365 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1366 		return -ENOMEM;
1367 	}
1368 
1369 	desc = edesc->hw_desc;
1370 
1371 	edesc->src_nents = src_nents;
1372 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1373 
1374 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1375 	if (ret)
1376 		goto unmap;
1377 
1378 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1379 				  req->nbytes);
1380 	if (ret) {
1381 		dev_err(jrdev, "unable to map S/G table\n");
1382 		goto unmap;
1383 	}
1384 
1385 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1386 	if (ret)
1387 		goto unmap;
1388 
1389 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1390 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1391 			     1);
1392 
1393 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1394 	if (!ret) {
1395 		ret = -EINPROGRESS;
1396 	} else {
1397 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1398 		kfree(edesc);
1399 	}
1400 
1401 	return ret;
1402  unmap:
1403 	ahash_unmap(jrdev, edesc, req, digestsize);
1404 	kfree(edesc);
1405 	return -ENOMEM;
1406 
1407 }
1408 
1409 /* submit first update job descriptor after init */
1410 static int ahash_update_first(struct ahash_request *req)
1411 {
1412 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1413 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1414 	struct caam_hash_state *state = ahash_request_ctx(req);
1415 	struct device *jrdev = ctx->jrdev;
1416 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1417 		       GFP_KERNEL : GFP_ATOMIC;
1418 	u8 *next_buf = alt_buf(state);
1419 	int *next_buflen = alt_buflen(state);
1420 	int to_hash;
1421 	int blocksize = crypto_ahash_blocksize(ahash);
1422 	u32 *desc;
1423 	int src_nents, mapped_nents;
1424 	struct ahash_edesc *edesc;
1425 	int ret = 0;
1426 
1427 	*next_buflen = req->nbytes & (blocksize - 1);
1428 	to_hash = req->nbytes - *next_buflen;
1429 
1430 	/*
1431 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1432 	 * keep last block in internal buffer
1433 	 */
1434 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1435 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1436 	     (*next_buflen == 0)) {
1437 		*next_buflen = blocksize;
1438 		to_hash -= blocksize;
1439 	}
1440 
1441 	if (to_hash) {
1442 		src_nents = sg_nents_for_len(req->src,
1443 					     req->nbytes - *next_buflen);
1444 		if (src_nents < 0) {
1445 			dev_err(jrdev, "Invalid number of src SG.\n");
1446 			return src_nents;
1447 		}
1448 
1449 		if (src_nents) {
1450 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1451 						  DMA_TO_DEVICE);
1452 			if (!mapped_nents) {
1453 				dev_err(jrdev, "unable to map source for DMA\n");
1454 				return -ENOMEM;
1455 			}
1456 		} else {
1457 			mapped_nents = 0;
1458 		}
1459 
1460 		/*
1461 		 * allocate space for base edesc and hw desc commands,
1462 		 * link tables
1463 		 */
1464 		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1465 					  mapped_nents : 0,
1466 					  ctx->sh_desc_update_first,
1467 					  ctx->sh_desc_update_first_dma,
1468 					  flags);
1469 		if (!edesc) {
1470 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1471 			return -ENOMEM;
1472 		}
1473 
1474 		edesc->src_nents = src_nents;
1475 
1476 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1477 					  to_hash);
1478 		if (ret)
1479 			goto unmap_ctx;
1480 
1481 		if (*next_buflen)
1482 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1483 						 *next_buflen, 0);
1484 
1485 		desc = edesc->hw_desc;
1486 
1487 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1488 		if (ret)
1489 			goto unmap_ctx;
1490 
1491 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1492 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1493 				     desc_bytes(desc), 1);
1494 
1495 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1496 		if (ret)
1497 			goto unmap_ctx;
1498 
1499 		ret = -EINPROGRESS;
1500 		state->update = ahash_update_ctx;
1501 		state->finup = ahash_finup_ctx;
1502 		state->final = ahash_final_ctx;
1503 	} else if (*next_buflen) {
1504 		state->update = ahash_update_no_ctx;
1505 		state->finup = ahash_finup_no_ctx;
1506 		state->final = ahash_final_no_ctx;
1507 		scatterwalk_map_and_copy(next_buf, req->src, 0,
1508 					 req->nbytes, 0);
1509 		switch_buf(state);
1510 	}
1511 
1512 	print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
1513 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
1514 			     1);
1515 
1516 	return ret;
1517  unmap_ctx:
1518 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1519 	kfree(edesc);
1520 	return ret;
1521 }
1522 
1523 static int ahash_finup_first(struct ahash_request *req)
1524 {
1525 	return ahash_digest(req);
1526 }
1527 
1528 static int ahash_init(struct ahash_request *req)
1529 {
1530 	struct caam_hash_state *state = ahash_request_ctx(req);
1531 
1532 	state->update = ahash_update_first;
1533 	state->finup = ahash_finup_first;
1534 	state->final = ahash_final_no_ctx;
1535 
1536 	state->ctx_dma = 0;
1537 	state->ctx_dma_len = 0;
1538 	state->current_buf = 0;
1539 	state->buf_dma = 0;
1540 	state->buflen_0 = 0;
1541 	state->buflen_1 = 0;
1542 
1543 	return 0;
1544 }
1545 
1546 static int ahash_update(struct ahash_request *req)
1547 {
1548 	struct caam_hash_state *state = ahash_request_ctx(req);
1549 
1550 	return state->update(req);
1551 }
1552 
1553 static int ahash_finup(struct ahash_request *req)
1554 {
1555 	struct caam_hash_state *state = ahash_request_ctx(req);
1556 
1557 	return state->finup(req);
1558 }
1559 
1560 static int ahash_final(struct ahash_request *req)
1561 {
1562 	struct caam_hash_state *state = ahash_request_ctx(req);
1563 
1564 	return state->final(req);
1565 }
1566 
1567 static int ahash_export(struct ahash_request *req, void *out)
1568 {
1569 	struct caam_hash_state *state = ahash_request_ctx(req);
1570 	struct caam_export_state *export = out;
1571 	int len;
1572 	u8 *buf;
1573 
1574 	if (state->current_buf) {
1575 		buf = state->buf_1;
1576 		len = state->buflen_1;
1577 	} else {
1578 		buf = state->buf_0;
1579 		len = state->buflen_0;
1580 	}
1581 
1582 	memcpy(export->buf, buf, len);
1583 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1584 	export->buflen = len;
1585 	export->update = state->update;
1586 	export->final = state->final;
1587 	export->finup = state->finup;
1588 
1589 	return 0;
1590 }
1591 
1592 static int ahash_import(struct ahash_request *req, const void *in)
1593 {
1594 	struct caam_hash_state *state = ahash_request_ctx(req);
1595 	const struct caam_export_state *export = in;
1596 
1597 	memset(state, 0, sizeof(*state));
1598 	memcpy(state->buf_0, export->buf, export->buflen);
1599 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1600 	state->buflen_0 = export->buflen;
1601 	state->update = export->update;
1602 	state->final = export->final;
1603 	state->finup = export->finup;
1604 
1605 	return 0;
1606 }
1607 
1608 struct caam_hash_template {
1609 	char name[CRYPTO_MAX_ALG_NAME];
1610 	char driver_name[CRYPTO_MAX_ALG_NAME];
1611 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1612 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1613 	unsigned int blocksize;
1614 	struct ahash_alg template_ahash;
1615 	u32 alg_type;
1616 };
1617 
1618 /* ahash descriptors */
1619 static struct caam_hash_template driver_hash[] = {
1620 	{
1621 		.name = "sha1",
1622 		.driver_name = "sha1-caam",
1623 		.hmac_name = "hmac(sha1)",
1624 		.hmac_driver_name = "hmac-sha1-caam",
1625 		.blocksize = SHA1_BLOCK_SIZE,
1626 		.template_ahash = {
1627 			.init = ahash_init,
1628 			.update = ahash_update,
1629 			.final = ahash_final,
1630 			.finup = ahash_finup,
1631 			.digest = ahash_digest,
1632 			.export = ahash_export,
1633 			.import = ahash_import,
1634 			.setkey = ahash_setkey,
1635 			.halg = {
1636 				.digestsize = SHA1_DIGEST_SIZE,
1637 				.statesize = sizeof(struct caam_export_state),
1638 			},
1639 		},
1640 		.alg_type = OP_ALG_ALGSEL_SHA1,
1641 	}, {
1642 		.name = "sha224",
1643 		.driver_name = "sha224-caam",
1644 		.hmac_name = "hmac(sha224)",
1645 		.hmac_driver_name = "hmac-sha224-caam",
1646 		.blocksize = SHA224_BLOCK_SIZE,
1647 		.template_ahash = {
1648 			.init = ahash_init,
1649 			.update = ahash_update,
1650 			.final = ahash_final,
1651 			.finup = ahash_finup,
1652 			.digest = ahash_digest,
1653 			.export = ahash_export,
1654 			.import = ahash_import,
1655 			.setkey = ahash_setkey,
1656 			.halg = {
1657 				.digestsize = SHA224_DIGEST_SIZE,
1658 				.statesize = sizeof(struct caam_export_state),
1659 			},
1660 		},
1661 		.alg_type = OP_ALG_ALGSEL_SHA224,
1662 	}, {
1663 		.name = "sha256",
1664 		.driver_name = "sha256-caam",
1665 		.hmac_name = "hmac(sha256)",
1666 		.hmac_driver_name = "hmac-sha256-caam",
1667 		.blocksize = SHA256_BLOCK_SIZE,
1668 		.template_ahash = {
1669 			.init = ahash_init,
1670 			.update = ahash_update,
1671 			.final = ahash_final,
1672 			.finup = ahash_finup,
1673 			.digest = ahash_digest,
1674 			.export = ahash_export,
1675 			.import = ahash_import,
1676 			.setkey = ahash_setkey,
1677 			.halg = {
1678 				.digestsize = SHA256_DIGEST_SIZE,
1679 				.statesize = sizeof(struct caam_export_state),
1680 			},
1681 		},
1682 		.alg_type = OP_ALG_ALGSEL_SHA256,
1683 	}, {
1684 		.name = "sha384",
1685 		.driver_name = "sha384-caam",
1686 		.hmac_name = "hmac(sha384)",
1687 		.hmac_driver_name = "hmac-sha384-caam",
1688 		.blocksize = SHA384_BLOCK_SIZE,
1689 		.template_ahash = {
1690 			.init = ahash_init,
1691 			.update = ahash_update,
1692 			.final = ahash_final,
1693 			.finup = ahash_finup,
1694 			.digest = ahash_digest,
1695 			.export = ahash_export,
1696 			.import = ahash_import,
1697 			.setkey = ahash_setkey,
1698 			.halg = {
1699 				.digestsize = SHA384_DIGEST_SIZE,
1700 				.statesize = sizeof(struct caam_export_state),
1701 			},
1702 		},
1703 		.alg_type = OP_ALG_ALGSEL_SHA384,
1704 	}, {
1705 		.name = "sha512",
1706 		.driver_name = "sha512-caam",
1707 		.hmac_name = "hmac(sha512)",
1708 		.hmac_driver_name = "hmac-sha512-caam",
1709 		.blocksize = SHA512_BLOCK_SIZE,
1710 		.template_ahash = {
1711 			.init = ahash_init,
1712 			.update = ahash_update,
1713 			.final = ahash_final,
1714 			.finup = ahash_finup,
1715 			.digest = ahash_digest,
1716 			.export = ahash_export,
1717 			.import = ahash_import,
1718 			.setkey = ahash_setkey,
1719 			.halg = {
1720 				.digestsize = SHA512_DIGEST_SIZE,
1721 				.statesize = sizeof(struct caam_export_state),
1722 			},
1723 		},
1724 		.alg_type = OP_ALG_ALGSEL_SHA512,
1725 	}, {
1726 		.name = "md5",
1727 		.driver_name = "md5-caam",
1728 		.hmac_name = "hmac(md5)",
1729 		.hmac_driver_name = "hmac-md5-caam",
1730 		.blocksize = MD5_BLOCK_WORDS * 4,
1731 		.template_ahash = {
1732 			.init = ahash_init,
1733 			.update = ahash_update,
1734 			.final = ahash_final,
1735 			.finup = ahash_finup,
1736 			.digest = ahash_digest,
1737 			.export = ahash_export,
1738 			.import = ahash_import,
1739 			.setkey = ahash_setkey,
1740 			.halg = {
1741 				.digestsize = MD5_DIGEST_SIZE,
1742 				.statesize = sizeof(struct caam_export_state),
1743 			},
1744 		},
1745 		.alg_type = OP_ALG_ALGSEL_MD5,
1746 	}, {
1747 		.hmac_name = "xcbc(aes)",
1748 		.hmac_driver_name = "xcbc-aes-caam",
1749 		.blocksize = AES_BLOCK_SIZE,
1750 		.template_ahash = {
1751 			.init = ahash_init,
1752 			.update = ahash_update,
1753 			.final = ahash_final,
1754 			.finup = ahash_finup,
1755 			.digest = ahash_digest,
1756 			.export = ahash_export,
1757 			.import = ahash_import,
1758 			.setkey = axcbc_setkey,
1759 			.halg = {
1760 				.digestsize = AES_BLOCK_SIZE,
1761 				.statesize = sizeof(struct caam_export_state),
1762 			},
1763 		 },
1764 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1765 	}, {
1766 		.hmac_name = "cmac(aes)",
1767 		.hmac_driver_name = "cmac-aes-caam",
1768 		.blocksize = AES_BLOCK_SIZE,
1769 		.template_ahash = {
1770 			.init = ahash_init,
1771 			.update = ahash_update,
1772 			.final = ahash_final,
1773 			.finup = ahash_finup,
1774 			.digest = ahash_digest,
1775 			.export = ahash_export,
1776 			.import = ahash_import,
1777 			.setkey = acmac_setkey,
1778 			.halg = {
1779 				.digestsize = AES_BLOCK_SIZE,
1780 				.statesize = sizeof(struct caam_export_state),
1781 			},
1782 		 },
1783 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1784 	},
1785 };
1786 
1787 struct caam_hash_alg {
1788 	struct list_head entry;
1789 	int alg_type;
1790 	struct ahash_alg ahash_alg;
1791 };
1792 
1793 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1794 {
1795 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1796 	struct crypto_alg *base = tfm->__crt_alg;
1797 	struct hash_alg_common *halg =
1798 		 container_of(base, struct hash_alg_common, base);
1799 	struct ahash_alg *alg =
1800 		 container_of(halg, struct ahash_alg, halg);
1801 	struct caam_hash_alg *caam_hash =
1802 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1803 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1804 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1805 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1806 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1807 					 HASH_MSG_LEN + 32,
1808 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1809 					 HASH_MSG_LEN + 64,
1810 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1811 	dma_addr_t dma_addr;
1812 	struct caam_drv_private *priv;
1813 
1814 	/*
1815 	 * Get a Job ring from Job Ring driver to ensure in-order
1816 	 * crypto request processing per tfm
1817 	 */
1818 	ctx->jrdev = caam_jr_alloc();
1819 	if (IS_ERR(ctx->jrdev)) {
1820 		pr_err("Job Ring Device allocation for transform failed\n");
1821 		return PTR_ERR(ctx->jrdev);
1822 	}
1823 
1824 	priv = dev_get_drvdata(ctx->jrdev->parent);
1825 
1826 	if (is_xcbc_aes(caam_hash->alg_type)) {
1827 		ctx->dir = DMA_TO_DEVICE;
1828 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1829 		ctx->ctx_len = 48;
1830 
1831 		ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1832 							  ARRAY_SIZE(ctx->key),
1833 							  DMA_BIDIRECTIONAL,
1834 							  DMA_ATTR_SKIP_CPU_SYNC);
1835 		if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1836 			dev_err(ctx->jrdev, "unable to map key\n");
1837 			caam_jr_free(ctx->jrdev);
1838 			return -ENOMEM;
1839 		}
1840 	} else if (is_cmac_aes(caam_hash->alg_type)) {
1841 		ctx->dir = DMA_TO_DEVICE;
1842 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1843 		ctx->ctx_len = 32;
1844 	} else {
1845 		ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1846 		ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1847 		ctx->ctx_len = runninglen[(ctx->adata.algtype &
1848 					   OP_ALG_ALGSEL_SUBMASK) >>
1849 					  OP_ALG_ALGSEL_SHIFT];
1850 	}
1851 
1852 	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1853 					offsetof(struct caam_hash_ctx, key),
1854 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1855 	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1856 		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1857 
1858 		if (is_xcbc_aes(caam_hash->alg_type))
1859 			dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1860 					       ARRAY_SIZE(ctx->key),
1861 					       DMA_BIDIRECTIONAL,
1862 					       DMA_ATTR_SKIP_CPU_SYNC);
1863 
1864 		caam_jr_free(ctx->jrdev);
1865 		return -ENOMEM;
1866 	}
1867 
1868 	ctx->sh_desc_update_dma = dma_addr;
1869 	ctx->sh_desc_update_first_dma = dma_addr +
1870 					offsetof(struct caam_hash_ctx,
1871 						 sh_desc_update_first);
1872 	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1873 						   sh_desc_fin);
1874 	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1875 						      sh_desc_digest);
1876 
1877 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1878 				 sizeof(struct caam_hash_state));
1879 
1880 	/*
1881 	 * For keyed hash algorithms shared descriptors
1882 	 * will be created later in setkey() callback
1883 	 */
1884 	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1885 }
1886 
1887 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1888 {
1889 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1890 
1891 	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1892 			       offsetof(struct caam_hash_ctx, key),
1893 			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1894 	if (is_xcbc_aes(ctx->adata.algtype))
1895 		dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1896 				       ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL,
1897 				       DMA_ATTR_SKIP_CPU_SYNC);
1898 	caam_jr_free(ctx->jrdev);
1899 }
1900 
1901 void caam_algapi_hash_exit(void)
1902 {
1903 	struct caam_hash_alg *t_alg, *n;
1904 
1905 	if (!hash_list.next)
1906 		return;
1907 
1908 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1909 		crypto_unregister_ahash(&t_alg->ahash_alg);
1910 		list_del(&t_alg->entry);
1911 		kfree(t_alg);
1912 	}
1913 }
1914 
1915 static struct caam_hash_alg *
1916 caam_hash_alloc(struct caam_hash_template *template,
1917 		bool keyed)
1918 {
1919 	struct caam_hash_alg *t_alg;
1920 	struct ahash_alg *halg;
1921 	struct crypto_alg *alg;
1922 
1923 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1924 	if (!t_alg) {
1925 		pr_err("failed to allocate t_alg\n");
1926 		return ERR_PTR(-ENOMEM);
1927 	}
1928 
1929 	t_alg->ahash_alg = template->template_ahash;
1930 	halg = &t_alg->ahash_alg;
1931 	alg = &halg->halg.base;
1932 
1933 	if (keyed) {
1934 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1935 			 template->hmac_name);
1936 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1937 			 template->hmac_driver_name);
1938 	} else {
1939 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1940 			 template->name);
1941 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1942 			 template->driver_name);
1943 		t_alg->ahash_alg.setkey = NULL;
1944 	}
1945 	alg->cra_module = THIS_MODULE;
1946 	alg->cra_init = caam_hash_cra_init;
1947 	alg->cra_exit = caam_hash_cra_exit;
1948 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1949 	alg->cra_priority = CAAM_CRA_PRIORITY;
1950 	alg->cra_blocksize = template->blocksize;
1951 	alg->cra_alignmask = 0;
1952 	alg->cra_flags = CRYPTO_ALG_ASYNC;
1953 
1954 	t_alg->alg_type = template->alg_type;
1955 
1956 	return t_alg;
1957 }
1958 
1959 int caam_algapi_hash_init(struct device *ctrldev)
1960 {
1961 	int i = 0, err = 0;
1962 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1963 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1964 	u32 md_inst, md_vid;
1965 
1966 	/*
1967 	 * Register crypto algorithms the device supports.  First, identify
1968 	 * presence and attributes of MD block.
1969 	 */
1970 	if (priv->era < 10) {
1971 		md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1972 			  CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1973 		md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1974 			   CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1975 	} else {
1976 		u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1977 
1978 		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1979 		md_inst = mdha & CHA_VER_NUM_MASK;
1980 	}
1981 
1982 	/*
1983 	 * Skip registration of any hashing algorithms if MD block
1984 	 * is not present.
1985 	 */
1986 	if (!md_inst)
1987 		return -ENODEV;
1988 
1989 	/* Limit digest size based on LP256 */
1990 	if (md_vid == CHA_VER_VID_MD_LP256)
1991 		md_limit = SHA256_DIGEST_SIZE;
1992 
1993 	INIT_LIST_HEAD(&hash_list);
1994 
1995 	/* register crypto algorithms the device supports */
1996 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1997 		struct caam_hash_alg *t_alg;
1998 		struct caam_hash_template *alg = driver_hash + i;
1999 
2000 		/* If MD size is not supported by device, skip registration */
2001 		if (is_mdha(alg->alg_type) &&
2002 		    alg->template_ahash.halg.digestsize > md_limit)
2003 			continue;
2004 
2005 		/* register hmac version */
2006 		t_alg = caam_hash_alloc(alg, true);
2007 		if (IS_ERR(t_alg)) {
2008 			err = PTR_ERR(t_alg);
2009 			pr_warn("%s alg allocation failed\n",
2010 				alg->hmac_driver_name);
2011 			continue;
2012 		}
2013 
2014 		err = crypto_register_ahash(&t_alg->ahash_alg);
2015 		if (err) {
2016 			pr_warn("%s alg registration failed: %d\n",
2017 				t_alg->ahash_alg.halg.base.cra_driver_name,
2018 				err);
2019 			kfree(t_alg);
2020 		} else
2021 			list_add_tail(&t_alg->entry, &hash_list);
2022 
2023 		if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2024 			continue;
2025 
2026 		/* register unkeyed version */
2027 		t_alg = caam_hash_alloc(alg, false);
2028 		if (IS_ERR(t_alg)) {
2029 			err = PTR_ERR(t_alg);
2030 			pr_warn("%s alg allocation failed\n", alg->driver_name);
2031 			continue;
2032 		}
2033 
2034 		err = crypto_register_ahash(&t_alg->ahash_alg);
2035 		if (err) {
2036 			pr_warn("%s alg registration failed: %d\n",
2037 				t_alg->ahash_alg.halg.base.cra_driver_name,
2038 				err);
2039 			kfree(t_alg);
2040 		} else
2041 			list_add_tail(&t_alg->entry, &hash_list);
2042 	}
2043 
2044 	return err;
2045 }
2046