xref: /openbmc/linux/drivers/crypto/ccree/cc_hash.c (revision 5ff32883)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/hash.h>
8 #include <crypto/md5.h>
9 #include <crypto/sm3.h>
10 #include <crypto/internal/hash.h>
11 
12 #include "cc_driver.h"
13 #include "cc_request_mgr.h"
14 #include "cc_buffer_mgr.h"
15 #include "cc_hash.h"
16 #include "cc_sram_mgr.h"
17 
18 #define CC_MAX_HASH_SEQ_LEN 12
19 #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
20 #define CC_SM3_HASH_LEN_SIZE 8
21 
22 struct cc_hash_handle {
23 	cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
24 	cc_sram_addr_t larval_digest_sram_addr;   /* const value in SRAM */
25 	struct list_head hash_list;
26 };
27 
28 static const u32 digest_len_init[] = {
29 	0x00000040, 0x00000000, 0x00000000, 0x00000000 };
30 static const u32 md5_init[] = {
31 	SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
32 static const u32 sha1_init[] = {
33 	SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
34 static const u32 sha224_init[] = {
35 	SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
36 	SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
37 static const u32 sha256_init[] = {
38 	SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
39 	SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
40 static const u32 digest_len_sha512_init[] = {
41 	0x00000080, 0x00000000, 0x00000000, 0x00000000 };
42 static u64 sha384_init[] = {
43 	SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
44 	SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
45 static u64 sha512_init[] = {
46 	SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
47 	SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
48 static const u32 sm3_init[] = {
49 	SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
50 	SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
51 
52 static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
53 			  unsigned int *seq_size);
54 
55 static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
56 			  unsigned int *seq_size);
57 
58 static const void *cc_larval_digest(struct device *dev, u32 mode);
59 
60 struct cc_hash_alg {
61 	struct list_head entry;
62 	int hash_mode;
63 	int hw_mode;
64 	int inter_digestsize;
65 	struct cc_drvdata *drvdata;
66 	struct ahash_alg ahash_alg;
67 };
68 
69 struct hash_key_req_ctx {
70 	u32 keylen;
71 	dma_addr_t key_dma_addr;
72 };
73 
74 /* hash per-session context */
75 struct cc_hash_ctx {
76 	struct cc_drvdata *drvdata;
77 	/* holds the origin digest; the digest after "setkey" if HMAC,*
78 	 * the initial digest if HASH.
79 	 */
80 	u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
81 	u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE]  ____cacheline_aligned;
82 
83 	dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
84 	dma_addr_t digest_buff_dma_addr;
85 	/* use for hmac with key large then mode block size */
86 	struct hash_key_req_ctx key_params;
87 	int hash_mode;
88 	int hw_mode;
89 	int inter_digestsize;
90 	unsigned int hash_len;
91 	struct completion setkey_comp;
92 	bool is_hmac;
93 };
94 
95 static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
96 			unsigned int flow_mode, struct cc_hw_desc desc[],
97 			bool is_not_last_data, unsigned int *seq_size);
98 
99 static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
100 {
101 	if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
102 	    mode == DRV_HASH_SHA512) {
103 		set_bytes_swap(desc, 1);
104 	} else {
105 		set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
106 	}
107 }
108 
109 static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
110 			 unsigned int digestsize)
111 {
112 	state->digest_result_dma_addr =
113 		dma_map_single(dev, state->digest_result_buff,
114 			       digestsize, DMA_BIDIRECTIONAL);
115 	if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
116 		dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
117 			digestsize);
118 		return -ENOMEM;
119 	}
120 	dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
121 		digestsize, state->digest_result_buff,
122 		&state->digest_result_dma_addr);
123 
124 	return 0;
125 }
126 
127 static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
128 			struct cc_hash_ctx *ctx)
129 {
130 	bool is_hmac = ctx->is_hmac;
131 
132 	memset(state, 0, sizeof(*state));
133 
134 	if (is_hmac) {
135 		if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
136 		    ctx->hw_mode != DRV_CIPHER_CMAC) {
137 			dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
138 						ctx->inter_digestsize,
139 						DMA_BIDIRECTIONAL);
140 
141 			memcpy(state->digest_buff, ctx->digest_buff,
142 			       ctx->inter_digestsize);
143 			if (ctx->hash_mode == DRV_HASH_SHA512 ||
144 			    ctx->hash_mode == DRV_HASH_SHA384)
145 				memcpy(state->digest_bytes_len,
146 				       digest_len_sha512_init,
147 				       ctx->hash_len);
148 			else
149 				memcpy(state->digest_bytes_len, digest_len_init,
150 				       ctx->hash_len);
151 		}
152 
153 		if (ctx->hash_mode != DRV_HASH_NULL) {
154 			dma_sync_single_for_cpu(dev,
155 						ctx->opad_tmp_keys_dma_addr,
156 						ctx->inter_digestsize,
157 						DMA_BIDIRECTIONAL);
158 			memcpy(state->opad_digest_buff,
159 			       ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
160 		}
161 	} else { /*hash*/
162 		/* Copy the initial digests if hash flow. */
163 		const void *larval = cc_larval_digest(dev, ctx->hash_mode);
164 
165 		memcpy(state->digest_buff, larval, ctx->inter_digestsize);
166 	}
167 }
168 
169 static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
170 		      struct cc_hash_ctx *ctx)
171 {
172 	bool is_hmac = ctx->is_hmac;
173 
174 	state->digest_buff_dma_addr =
175 		dma_map_single(dev, state->digest_buff,
176 			       ctx->inter_digestsize, DMA_BIDIRECTIONAL);
177 	if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
178 		dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
179 			ctx->inter_digestsize, state->digest_buff);
180 		return -EINVAL;
181 	}
182 	dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
183 		ctx->inter_digestsize, state->digest_buff,
184 		&state->digest_buff_dma_addr);
185 
186 	if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
187 		state->digest_bytes_len_dma_addr =
188 			dma_map_single(dev, state->digest_bytes_len,
189 				       HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
190 		if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
191 			dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
192 				HASH_MAX_LEN_SIZE, state->digest_bytes_len);
193 			goto unmap_digest_buf;
194 		}
195 		dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
196 			HASH_MAX_LEN_SIZE, state->digest_bytes_len,
197 			&state->digest_bytes_len_dma_addr);
198 	}
199 
200 	if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
201 		state->opad_digest_dma_addr =
202 			dma_map_single(dev, state->opad_digest_buff,
203 				       ctx->inter_digestsize,
204 				       DMA_BIDIRECTIONAL);
205 		if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
206 			dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
207 				ctx->inter_digestsize,
208 				state->opad_digest_buff);
209 			goto unmap_digest_len;
210 		}
211 		dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
212 			ctx->inter_digestsize, state->opad_digest_buff,
213 			&state->opad_digest_dma_addr);
214 	}
215 
216 	return 0;
217 
218 unmap_digest_len:
219 	if (state->digest_bytes_len_dma_addr) {
220 		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
221 				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
222 		state->digest_bytes_len_dma_addr = 0;
223 	}
224 unmap_digest_buf:
225 	if (state->digest_buff_dma_addr) {
226 		dma_unmap_single(dev, state->digest_buff_dma_addr,
227 				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
228 		state->digest_buff_dma_addr = 0;
229 	}
230 
231 	return -EINVAL;
232 }
233 
234 static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
235 			 struct cc_hash_ctx *ctx)
236 {
237 	if (state->digest_buff_dma_addr) {
238 		dma_unmap_single(dev, state->digest_buff_dma_addr,
239 				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
240 		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
241 			&state->digest_buff_dma_addr);
242 		state->digest_buff_dma_addr = 0;
243 	}
244 	if (state->digest_bytes_len_dma_addr) {
245 		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
246 				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
247 		dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
248 			&state->digest_bytes_len_dma_addr);
249 		state->digest_bytes_len_dma_addr = 0;
250 	}
251 	if (state->opad_digest_dma_addr) {
252 		dma_unmap_single(dev, state->opad_digest_dma_addr,
253 				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
254 		dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
255 			&state->opad_digest_dma_addr);
256 		state->opad_digest_dma_addr = 0;
257 	}
258 }
259 
260 static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
261 			    unsigned int digestsize, u8 *result)
262 {
263 	if (state->digest_result_dma_addr) {
264 		dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
265 				 DMA_BIDIRECTIONAL);
266 		dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
267 			state->digest_result_buff,
268 			&state->digest_result_dma_addr, digestsize);
269 		memcpy(result, state->digest_result_buff, digestsize);
270 	}
271 	state->digest_result_dma_addr = 0;
272 }
273 
274 static void cc_update_complete(struct device *dev, void *cc_req, int err)
275 {
276 	struct ahash_request *req = (struct ahash_request *)cc_req;
277 	struct ahash_req_ctx *state = ahash_request_ctx(req);
278 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
279 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
280 
281 	dev_dbg(dev, "req=%pK\n", req);
282 
283 	cc_unmap_hash_request(dev, state, req->src, false);
284 	cc_unmap_req(dev, state, ctx);
285 	req->base.complete(&req->base, err);
286 }
287 
288 static void cc_digest_complete(struct device *dev, void *cc_req, int err)
289 {
290 	struct ahash_request *req = (struct ahash_request *)cc_req;
291 	struct ahash_req_ctx *state = ahash_request_ctx(req);
292 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
293 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
294 	u32 digestsize = crypto_ahash_digestsize(tfm);
295 
296 	dev_dbg(dev, "req=%pK\n", req);
297 
298 	cc_unmap_hash_request(dev, state, req->src, false);
299 	cc_unmap_result(dev, state, digestsize, req->result);
300 	cc_unmap_req(dev, state, ctx);
301 	req->base.complete(&req->base, err);
302 }
303 
304 static void cc_hash_complete(struct device *dev, void *cc_req, int err)
305 {
306 	struct ahash_request *req = (struct ahash_request *)cc_req;
307 	struct ahash_req_ctx *state = ahash_request_ctx(req);
308 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
309 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
310 	u32 digestsize = crypto_ahash_digestsize(tfm);
311 
312 	dev_dbg(dev, "req=%pK\n", req);
313 
314 	cc_unmap_hash_request(dev, state, req->src, false);
315 	cc_unmap_result(dev, state, digestsize, req->result);
316 	cc_unmap_req(dev, state, ctx);
317 	req->base.complete(&req->base, err);
318 }
319 
320 static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
321 			 int idx)
322 {
323 	struct ahash_req_ctx *state = ahash_request_ctx(req);
324 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
325 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
326 	u32 digestsize = crypto_ahash_digestsize(tfm);
327 
328 	/* Get final MAC result */
329 	hw_desc_init(&desc[idx]);
330 	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
331 	/* TODO */
332 	set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
333 		      NS_BIT, 1);
334 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
335 	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
336 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
337 	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
338 	cc_set_endianity(ctx->hash_mode, &desc[idx]);
339 	idx++;
340 
341 	return idx;
342 }
343 
344 static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
345 		       int idx)
346 {
347 	struct ahash_req_ctx *state = ahash_request_ctx(req);
348 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
349 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
350 	u32 digestsize = crypto_ahash_digestsize(tfm);
351 
352 	/* store the hash digest result in the context */
353 	hw_desc_init(&desc[idx]);
354 	set_cipher_mode(&desc[idx], ctx->hw_mode);
355 	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
356 		      NS_BIT, 0);
357 	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
358 	cc_set_endianity(ctx->hash_mode, &desc[idx]);
359 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
360 	idx++;
361 
362 	/* Loading hash opad xor key state */
363 	hw_desc_init(&desc[idx]);
364 	set_cipher_mode(&desc[idx], ctx->hw_mode);
365 	set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
366 		     ctx->inter_digestsize, NS_BIT);
367 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
368 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
369 	idx++;
370 
371 	/* Load the hash current length */
372 	hw_desc_init(&desc[idx]);
373 	set_cipher_mode(&desc[idx], ctx->hw_mode);
374 	set_din_sram(&desc[idx],
375 		     cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
376 		     ctx->hash_len);
377 	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
378 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
379 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
380 	idx++;
381 
382 	/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
383 	hw_desc_init(&desc[idx]);
384 	set_din_no_dma(&desc[idx], 0, 0xfffff0);
385 	set_dout_no_dma(&desc[idx], 0, 0, 1);
386 	idx++;
387 
388 	/* Perform HASH update */
389 	hw_desc_init(&desc[idx]);
390 	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
391 		     digestsize, NS_BIT);
392 	set_flow_mode(&desc[idx], DIN_HASH);
393 	idx++;
394 
395 	return idx;
396 }
397 
398 static int cc_hash_digest(struct ahash_request *req)
399 {
400 	struct ahash_req_ctx *state = ahash_request_ctx(req);
401 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
402 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
403 	u32 digestsize = crypto_ahash_digestsize(tfm);
404 	struct scatterlist *src = req->src;
405 	unsigned int nbytes = req->nbytes;
406 	u8 *result = req->result;
407 	struct device *dev = drvdata_to_dev(ctx->drvdata);
408 	bool is_hmac = ctx->is_hmac;
409 	struct cc_crypto_req cc_req = {};
410 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
411 	cc_sram_addr_t larval_digest_addr =
412 		cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
413 	int idx = 0;
414 	int rc = 0;
415 	gfp_t flags = cc_gfp_flags(&req->base);
416 
417 	dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
418 		nbytes);
419 
420 	cc_init_req(dev, state, ctx);
421 
422 	if (cc_map_req(dev, state, ctx)) {
423 		dev_err(dev, "map_ahash_source() failed\n");
424 		return -ENOMEM;
425 	}
426 
427 	if (cc_map_result(dev, state, digestsize)) {
428 		dev_err(dev, "map_ahash_digest() failed\n");
429 		cc_unmap_req(dev, state, ctx);
430 		return -ENOMEM;
431 	}
432 
433 	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
434 				      flags)) {
435 		dev_err(dev, "map_ahash_request_final() failed\n");
436 		cc_unmap_result(dev, state, digestsize, result);
437 		cc_unmap_req(dev, state, ctx);
438 		return -ENOMEM;
439 	}
440 
441 	/* Setup request structure */
442 	cc_req.user_cb = cc_digest_complete;
443 	cc_req.user_arg = req;
444 
445 	/* If HMAC then load hash IPAD xor key, if HASH then load initial
446 	 * digest
447 	 */
448 	hw_desc_init(&desc[idx]);
449 	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
450 	if (is_hmac) {
451 		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
452 			     ctx->inter_digestsize, NS_BIT);
453 	} else {
454 		set_din_sram(&desc[idx], larval_digest_addr,
455 			     ctx->inter_digestsize);
456 	}
457 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
458 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
459 	idx++;
460 
461 	/* Load the hash current length */
462 	hw_desc_init(&desc[idx]);
463 	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
464 
465 	if (is_hmac) {
466 		set_din_type(&desc[idx], DMA_DLLI,
467 			     state->digest_bytes_len_dma_addr,
468 			     ctx->hash_len, NS_BIT);
469 	} else {
470 		set_din_const(&desc[idx], 0, ctx->hash_len);
471 		if (nbytes)
472 			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
473 		else
474 			set_cipher_do(&desc[idx], DO_PAD);
475 	}
476 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
477 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
478 	idx++;
479 
480 	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
481 
482 	if (is_hmac) {
483 		/* HW last hash block padding (aka. "DO_PAD") */
484 		hw_desc_init(&desc[idx]);
485 		set_cipher_mode(&desc[idx], ctx->hw_mode);
486 		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
487 			      ctx->hash_len, NS_BIT, 0);
488 		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
489 		set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
490 		set_cipher_do(&desc[idx], DO_PAD);
491 		idx++;
492 
493 		idx = cc_fin_hmac(desc, req, idx);
494 	}
495 
496 	idx = cc_fin_result(desc, req, idx);
497 
498 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
499 	if (rc != -EINPROGRESS && rc != -EBUSY) {
500 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
501 		cc_unmap_hash_request(dev, state, src, true);
502 		cc_unmap_result(dev, state, digestsize, result);
503 		cc_unmap_req(dev, state, ctx);
504 	}
505 	return rc;
506 }
507 
508 static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
509 			   struct ahash_req_ctx *state, unsigned int idx)
510 {
511 	/* Restore hash digest */
512 	hw_desc_init(&desc[idx]);
513 	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
514 	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
515 		     ctx->inter_digestsize, NS_BIT);
516 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
517 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
518 	idx++;
519 
520 	/* Restore hash current length */
521 	hw_desc_init(&desc[idx]);
522 	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
523 	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
524 	set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
525 		     ctx->hash_len, NS_BIT);
526 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
527 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
528 	idx++;
529 
530 	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
531 
532 	return idx;
533 }
534 
535 static int cc_hash_update(struct ahash_request *req)
536 {
537 	struct ahash_req_ctx *state = ahash_request_ctx(req);
538 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
539 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
540 	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
541 	struct scatterlist *src = req->src;
542 	unsigned int nbytes = req->nbytes;
543 	struct device *dev = drvdata_to_dev(ctx->drvdata);
544 	struct cc_crypto_req cc_req = {};
545 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
546 	u32 idx = 0;
547 	int rc;
548 	gfp_t flags = cc_gfp_flags(&req->base);
549 
550 	dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
551 		"hmac" : "hash", nbytes);
552 
553 	if (nbytes == 0) {
554 		/* no real updates required */
555 		return 0;
556 	}
557 
558 	rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
559 					block_size, flags);
560 	if (rc) {
561 		if (rc == 1) {
562 			dev_dbg(dev, " data size not require HW update %x\n",
563 				nbytes);
564 			/* No hardware updates are required */
565 			return 0;
566 		}
567 		dev_err(dev, "map_ahash_request_update() failed\n");
568 		return -ENOMEM;
569 	}
570 
571 	if (cc_map_req(dev, state, ctx)) {
572 		dev_err(dev, "map_ahash_source() failed\n");
573 		cc_unmap_hash_request(dev, state, src, true);
574 		return -EINVAL;
575 	}
576 
577 	/* Setup request structure */
578 	cc_req.user_cb = cc_update_complete;
579 	cc_req.user_arg = req;
580 
581 	idx = cc_restore_hash(desc, ctx, state, idx);
582 
583 	/* store the hash digest result in context */
584 	hw_desc_init(&desc[idx]);
585 	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
586 	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
587 		      ctx->inter_digestsize, NS_BIT, 0);
588 	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
589 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
590 	idx++;
591 
592 	/* store current hash length in context */
593 	hw_desc_init(&desc[idx]);
594 	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
595 	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
596 		      ctx->hash_len, NS_BIT, 1);
597 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
598 	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
599 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
600 	idx++;
601 
602 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
603 	if (rc != -EINPROGRESS && rc != -EBUSY) {
604 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
605 		cc_unmap_hash_request(dev, state, src, true);
606 		cc_unmap_req(dev, state, ctx);
607 	}
608 	return rc;
609 }
610 
611 static int cc_do_finup(struct ahash_request *req, bool update)
612 {
613 	struct ahash_req_ctx *state = ahash_request_ctx(req);
614 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
615 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
616 	u32 digestsize = crypto_ahash_digestsize(tfm);
617 	struct scatterlist *src = req->src;
618 	unsigned int nbytes = req->nbytes;
619 	u8 *result = req->result;
620 	struct device *dev = drvdata_to_dev(ctx->drvdata);
621 	bool is_hmac = ctx->is_hmac;
622 	struct cc_crypto_req cc_req = {};
623 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
624 	unsigned int idx = 0;
625 	int rc;
626 	gfp_t flags = cc_gfp_flags(&req->base);
627 
628 	dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
629 		update ? "finup" : "final", nbytes);
630 
631 	if (cc_map_req(dev, state, ctx)) {
632 		dev_err(dev, "map_ahash_source() failed\n");
633 		return -EINVAL;
634 	}
635 
636 	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
637 				      flags)) {
638 		dev_err(dev, "map_ahash_request_final() failed\n");
639 		cc_unmap_req(dev, state, ctx);
640 		return -ENOMEM;
641 	}
642 	if (cc_map_result(dev, state, digestsize)) {
643 		dev_err(dev, "map_ahash_digest() failed\n");
644 		cc_unmap_hash_request(dev, state, src, true);
645 		cc_unmap_req(dev, state, ctx);
646 		return -ENOMEM;
647 	}
648 
649 	/* Setup request structure */
650 	cc_req.user_cb = cc_hash_complete;
651 	cc_req.user_arg = req;
652 
653 	idx = cc_restore_hash(desc, ctx, state, idx);
654 
655 	/* Pad the hash */
656 	hw_desc_init(&desc[idx]);
657 	set_cipher_do(&desc[idx], DO_PAD);
658 	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
659 	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
660 		      ctx->hash_len, NS_BIT, 0);
661 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
662 	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
663 	idx++;
664 
665 	if (is_hmac)
666 		idx = cc_fin_hmac(desc, req, idx);
667 
668 	idx = cc_fin_result(desc, req, idx);
669 
670 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
671 	if (rc != -EINPROGRESS && rc != -EBUSY) {
672 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
673 		cc_unmap_hash_request(dev, state, src, true);
674 		cc_unmap_result(dev, state, digestsize, result);
675 		cc_unmap_req(dev, state, ctx);
676 	}
677 	return rc;
678 }
679 
680 static int cc_hash_finup(struct ahash_request *req)
681 {
682 	return cc_do_finup(req, true);
683 }
684 
685 
686 static int cc_hash_final(struct ahash_request *req)
687 {
688 	return cc_do_finup(req, false);
689 }
690 
691 static int cc_hash_init(struct ahash_request *req)
692 {
693 	struct ahash_req_ctx *state = ahash_request_ctx(req);
694 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
695 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
696 	struct device *dev = drvdata_to_dev(ctx->drvdata);
697 
698 	dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
699 
700 	cc_init_req(dev, state, ctx);
701 
702 	return 0;
703 }
704 
705 static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
706 			  unsigned int keylen)
707 {
708 	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
709 	struct cc_crypto_req cc_req = {};
710 	struct cc_hash_ctx *ctx = NULL;
711 	int blocksize = 0;
712 	int digestsize = 0;
713 	int i, idx = 0, rc = 0;
714 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
715 	cc_sram_addr_t larval_addr;
716 	struct device *dev;
717 
718 	ctx = crypto_ahash_ctx(ahash);
719 	dev = drvdata_to_dev(ctx->drvdata);
720 	dev_dbg(dev, "start keylen: %d", keylen);
721 
722 	blocksize = crypto_tfm_alg_blocksize(&ahash->base);
723 	digestsize = crypto_ahash_digestsize(ahash);
724 
725 	larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
726 
727 	/* The keylen value distinguishes HASH in case keylen is ZERO bytes,
728 	 * any NON-ZERO value utilizes HMAC flow
729 	 */
730 	ctx->key_params.keylen = keylen;
731 	ctx->key_params.key_dma_addr = 0;
732 	ctx->is_hmac = true;
733 
734 	if (keylen) {
735 		ctx->key_params.key_dma_addr =
736 			dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
737 		if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
738 			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
739 				key, keylen);
740 			return -ENOMEM;
741 		}
742 		dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
743 			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
744 
745 		if (keylen > blocksize) {
746 			/* Load hash initial state */
747 			hw_desc_init(&desc[idx]);
748 			set_cipher_mode(&desc[idx], ctx->hw_mode);
749 			set_din_sram(&desc[idx], larval_addr,
750 				     ctx->inter_digestsize);
751 			set_flow_mode(&desc[idx], S_DIN_to_HASH);
752 			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
753 			idx++;
754 
755 			/* Load the hash current length*/
756 			hw_desc_init(&desc[idx]);
757 			set_cipher_mode(&desc[idx], ctx->hw_mode);
758 			set_din_const(&desc[idx], 0, ctx->hash_len);
759 			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
760 			set_flow_mode(&desc[idx], S_DIN_to_HASH);
761 			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
762 			idx++;
763 
764 			hw_desc_init(&desc[idx]);
765 			set_din_type(&desc[idx], DMA_DLLI,
766 				     ctx->key_params.key_dma_addr, keylen,
767 				     NS_BIT);
768 			set_flow_mode(&desc[idx], DIN_HASH);
769 			idx++;
770 
771 			/* Get hashed key */
772 			hw_desc_init(&desc[idx]);
773 			set_cipher_mode(&desc[idx], ctx->hw_mode);
774 			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
775 				      digestsize, NS_BIT, 0);
776 			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
777 			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
778 			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
779 			cc_set_endianity(ctx->hash_mode, &desc[idx]);
780 			idx++;
781 
782 			hw_desc_init(&desc[idx]);
783 			set_din_const(&desc[idx], 0, (blocksize - digestsize));
784 			set_flow_mode(&desc[idx], BYPASS);
785 			set_dout_dlli(&desc[idx],
786 				      (ctx->opad_tmp_keys_dma_addr +
787 				       digestsize),
788 				      (blocksize - digestsize), NS_BIT, 0);
789 			idx++;
790 		} else {
791 			hw_desc_init(&desc[idx]);
792 			set_din_type(&desc[idx], DMA_DLLI,
793 				     ctx->key_params.key_dma_addr, keylen,
794 				     NS_BIT);
795 			set_flow_mode(&desc[idx], BYPASS);
796 			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
797 				      keylen, NS_BIT, 0);
798 			idx++;
799 
800 			if ((blocksize - keylen)) {
801 				hw_desc_init(&desc[idx]);
802 				set_din_const(&desc[idx], 0,
803 					      (blocksize - keylen));
804 				set_flow_mode(&desc[idx], BYPASS);
805 				set_dout_dlli(&desc[idx],
806 					      (ctx->opad_tmp_keys_dma_addr +
807 					       keylen), (blocksize - keylen),
808 					      NS_BIT, 0);
809 				idx++;
810 			}
811 		}
812 	} else {
813 		hw_desc_init(&desc[idx]);
814 		set_din_const(&desc[idx], 0, blocksize);
815 		set_flow_mode(&desc[idx], BYPASS);
816 		set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
817 			      blocksize, NS_BIT, 0);
818 		idx++;
819 	}
820 
821 	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
822 	if (rc) {
823 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
824 		goto out;
825 	}
826 
827 	/* calc derived HMAC key */
828 	for (idx = 0, i = 0; i < 2; i++) {
829 		/* Load hash initial state */
830 		hw_desc_init(&desc[idx]);
831 		set_cipher_mode(&desc[idx], ctx->hw_mode);
832 		set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
833 		set_flow_mode(&desc[idx], S_DIN_to_HASH);
834 		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
835 		idx++;
836 
837 		/* Load the hash current length*/
838 		hw_desc_init(&desc[idx]);
839 		set_cipher_mode(&desc[idx], ctx->hw_mode);
840 		set_din_const(&desc[idx], 0, ctx->hash_len);
841 		set_flow_mode(&desc[idx], S_DIN_to_HASH);
842 		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
843 		idx++;
844 
845 		/* Prepare ipad key */
846 		hw_desc_init(&desc[idx]);
847 		set_xor_val(&desc[idx], hmac_pad_const[i]);
848 		set_cipher_mode(&desc[idx], ctx->hw_mode);
849 		set_flow_mode(&desc[idx], S_DIN_to_HASH);
850 		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
851 		idx++;
852 
853 		/* Perform HASH update */
854 		hw_desc_init(&desc[idx]);
855 		set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
856 			     blocksize, NS_BIT);
857 		set_cipher_mode(&desc[idx], ctx->hw_mode);
858 		set_xor_active(&desc[idx]);
859 		set_flow_mode(&desc[idx], DIN_HASH);
860 		idx++;
861 
862 		/* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
863 		 * of the first HASH "update" state)
864 		 */
865 		hw_desc_init(&desc[idx]);
866 		set_cipher_mode(&desc[idx], ctx->hw_mode);
867 		if (i > 0) /* Not first iteration */
868 			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
869 				      ctx->inter_digestsize, NS_BIT, 0);
870 		else /* First iteration */
871 			set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
872 				      ctx->inter_digestsize, NS_BIT, 0);
873 		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
874 		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
875 		idx++;
876 	}
877 
878 	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
879 
880 out:
881 	if (rc)
882 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
883 
884 	if (ctx->key_params.key_dma_addr) {
885 		dma_unmap_single(dev, ctx->key_params.key_dma_addr,
886 				 ctx->key_params.keylen, DMA_TO_DEVICE);
887 		dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
888 			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
889 	}
890 	return rc;
891 }
892 
893 static int cc_xcbc_setkey(struct crypto_ahash *ahash,
894 			  const u8 *key, unsigned int keylen)
895 {
896 	struct cc_crypto_req cc_req = {};
897 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
898 	struct device *dev = drvdata_to_dev(ctx->drvdata);
899 	int rc = 0;
900 	unsigned int idx = 0;
901 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
902 
903 	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
904 
905 	switch (keylen) {
906 	case AES_KEYSIZE_128:
907 	case AES_KEYSIZE_192:
908 	case AES_KEYSIZE_256:
909 		break;
910 	default:
911 		return -EINVAL;
912 	}
913 
914 	ctx->key_params.keylen = keylen;
915 
916 	ctx->key_params.key_dma_addr =
917 		dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
918 	if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
919 		dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
920 			key, keylen);
921 		return -ENOMEM;
922 	}
923 	dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
924 		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
925 
926 	ctx->is_hmac = true;
927 	/* 1. Load the AES key */
928 	hw_desc_init(&desc[idx]);
929 	set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
930 		     keylen, NS_BIT);
931 	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
932 	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
933 	set_key_size_aes(&desc[idx], keylen);
934 	set_flow_mode(&desc[idx], S_DIN_to_AES);
935 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
936 	idx++;
937 
938 	hw_desc_init(&desc[idx]);
939 	set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
940 	set_flow_mode(&desc[idx], DIN_AES_DOUT);
941 	set_dout_dlli(&desc[idx],
942 		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
943 		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
944 	idx++;
945 
946 	hw_desc_init(&desc[idx]);
947 	set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
948 	set_flow_mode(&desc[idx], DIN_AES_DOUT);
949 	set_dout_dlli(&desc[idx],
950 		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
951 		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
952 	idx++;
953 
954 	hw_desc_init(&desc[idx]);
955 	set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
956 	set_flow_mode(&desc[idx], DIN_AES_DOUT);
957 	set_dout_dlli(&desc[idx],
958 		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
959 		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
960 	idx++;
961 
962 	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
963 
964 	if (rc)
965 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
966 
967 	dma_unmap_single(dev, ctx->key_params.key_dma_addr,
968 			 ctx->key_params.keylen, DMA_TO_DEVICE);
969 	dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
970 		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
971 
972 	return rc;
973 }
974 
975 static int cc_cmac_setkey(struct crypto_ahash *ahash,
976 			  const u8 *key, unsigned int keylen)
977 {
978 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
979 	struct device *dev = drvdata_to_dev(ctx->drvdata);
980 
981 	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
982 
983 	ctx->is_hmac = true;
984 
985 	switch (keylen) {
986 	case AES_KEYSIZE_128:
987 	case AES_KEYSIZE_192:
988 	case AES_KEYSIZE_256:
989 		break;
990 	default:
991 		return -EINVAL;
992 	}
993 
994 	ctx->key_params.keylen = keylen;
995 
996 	/* STAT_PHASE_1: Copy key to ctx */
997 
998 	dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
999 				keylen, DMA_TO_DEVICE);
1000 
1001 	memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1002 	if (keylen == 24) {
1003 		memset(ctx->opad_tmp_keys_buff + 24, 0,
1004 		       CC_AES_KEY_SIZE_MAX - 24);
1005 	}
1006 
1007 	dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1008 				   keylen, DMA_TO_DEVICE);
1009 
1010 	ctx->key_params.keylen = keylen;
1011 
1012 	return 0;
1013 }
1014 
1015 static void cc_free_ctx(struct cc_hash_ctx *ctx)
1016 {
1017 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1018 
1019 	if (ctx->digest_buff_dma_addr) {
1020 		dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1021 				 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1022 		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1023 			&ctx->digest_buff_dma_addr);
1024 		ctx->digest_buff_dma_addr = 0;
1025 	}
1026 	if (ctx->opad_tmp_keys_dma_addr) {
1027 		dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1028 				 sizeof(ctx->opad_tmp_keys_buff),
1029 				 DMA_BIDIRECTIONAL);
1030 		dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1031 			&ctx->opad_tmp_keys_dma_addr);
1032 		ctx->opad_tmp_keys_dma_addr = 0;
1033 	}
1034 
1035 	ctx->key_params.keylen = 0;
1036 }
1037 
1038 static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1039 {
1040 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1041 
1042 	ctx->key_params.keylen = 0;
1043 
1044 	ctx->digest_buff_dma_addr =
1045 		dma_map_single(dev, (void *)ctx->digest_buff,
1046 			       sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1047 	if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1048 		dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1049 			sizeof(ctx->digest_buff), ctx->digest_buff);
1050 		goto fail;
1051 	}
1052 	dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1053 		sizeof(ctx->digest_buff), ctx->digest_buff,
1054 		&ctx->digest_buff_dma_addr);
1055 
1056 	ctx->opad_tmp_keys_dma_addr =
1057 		dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
1058 			       sizeof(ctx->opad_tmp_keys_buff),
1059 			       DMA_BIDIRECTIONAL);
1060 	if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1061 		dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1062 			sizeof(ctx->opad_tmp_keys_buff),
1063 			ctx->opad_tmp_keys_buff);
1064 		goto fail;
1065 	}
1066 	dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1067 		sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1068 		&ctx->opad_tmp_keys_dma_addr);
1069 
1070 	ctx->is_hmac = false;
1071 	return 0;
1072 
1073 fail:
1074 	cc_free_ctx(ctx);
1075 	return -ENOMEM;
1076 }
1077 
1078 static int cc_get_hash_len(struct crypto_tfm *tfm)
1079 {
1080 	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1081 
1082 	if (ctx->hash_mode == DRV_HASH_SM3)
1083 		return CC_SM3_HASH_LEN_SIZE;
1084 	else
1085 		return cc_get_default_hash_len(ctx->drvdata);
1086 }
1087 
1088 static int cc_cra_init(struct crypto_tfm *tfm)
1089 {
1090 	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1091 	struct hash_alg_common *hash_alg_common =
1092 		container_of(tfm->__crt_alg, struct hash_alg_common, base);
1093 	struct ahash_alg *ahash_alg =
1094 		container_of(hash_alg_common, struct ahash_alg, halg);
1095 	struct cc_hash_alg *cc_alg =
1096 			container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1097 
1098 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1099 				 sizeof(struct ahash_req_ctx));
1100 
1101 	ctx->hash_mode = cc_alg->hash_mode;
1102 	ctx->hw_mode = cc_alg->hw_mode;
1103 	ctx->inter_digestsize = cc_alg->inter_digestsize;
1104 	ctx->drvdata = cc_alg->drvdata;
1105 	ctx->hash_len = cc_get_hash_len(tfm);
1106 	return cc_alloc_ctx(ctx);
1107 }
1108 
1109 static void cc_cra_exit(struct crypto_tfm *tfm)
1110 {
1111 	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1112 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1113 
1114 	dev_dbg(dev, "cc_cra_exit");
1115 	cc_free_ctx(ctx);
1116 }
1117 
1118 static int cc_mac_update(struct ahash_request *req)
1119 {
1120 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1121 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1122 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1123 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1124 	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1125 	struct cc_crypto_req cc_req = {};
1126 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1127 	int rc;
1128 	u32 idx = 0;
1129 	gfp_t flags = cc_gfp_flags(&req->base);
1130 
1131 	if (req->nbytes == 0) {
1132 		/* no real updates required */
1133 		return 0;
1134 	}
1135 
1136 	state->xcbc_count++;
1137 
1138 	rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1139 					req->nbytes, block_size, flags);
1140 	if (rc) {
1141 		if (rc == 1) {
1142 			dev_dbg(dev, " data size not require HW update %x\n",
1143 				req->nbytes);
1144 			/* No hardware updates are required */
1145 			return 0;
1146 		}
1147 		dev_err(dev, "map_ahash_request_update() failed\n");
1148 		return -ENOMEM;
1149 	}
1150 
1151 	if (cc_map_req(dev, state, ctx)) {
1152 		dev_err(dev, "map_ahash_source() failed\n");
1153 		return -EINVAL;
1154 	}
1155 
1156 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1157 		cc_setup_xcbc(req, desc, &idx);
1158 	else
1159 		cc_setup_cmac(req, desc, &idx);
1160 
1161 	cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1162 
1163 	/* store the hash digest result in context */
1164 	hw_desc_init(&desc[idx]);
1165 	set_cipher_mode(&desc[idx], ctx->hw_mode);
1166 	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1167 		      ctx->inter_digestsize, NS_BIT, 1);
1168 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1169 	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1170 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1171 	idx++;
1172 
1173 	/* Setup request structure */
1174 	cc_req.user_cb = (void *)cc_update_complete;
1175 	cc_req.user_arg = (void *)req;
1176 
1177 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1178 	if (rc != -EINPROGRESS && rc != -EBUSY) {
1179 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1180 		cc_unmap_hash_request(dev, state, req->src, true);
1181 		cc_unmap_req(dev, state, ctx);
1182 	}
1183 	return rc;
1184 }
1185 
1186 static int cc_mac_final(struct ahash_request *req)
1187 {
1188 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1189 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1190 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1191 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1192 	struct cc_crypto_req cc_req = {};
1193 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1194 	int idx = 0;
1195 	int rc = 0;
1196 	u32 key_size, key_len;
1197 	u32 digestsize = crypto_ahash_digestsize(tfm);
1198 	gfp_t flags = cc_gfp_flags(&req->base);
1199 	u32 rem_cnt = *cc_hash_buf_cnt(state);
1200 
1201 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1202 		key_size = CC_AES_128_BIT_KEY_SIZE;
1203 		key_len  = CC_AES_128_BIT_KEY_SIZE;
1204 	} else {
1205 		key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1206 			ctx->key_params.keylen;
1207 		key_len =  ctx->key_params.keylen;
1208 	}
1209 
1210 	dev_dbg(dev, "===== final  xcbc reminder (%d) ====\n", rem_cnt);
1211 
1212 	if (cc_map_req(dev, state, ctx)) {
1213 		dev_err(dev, "map_ahash_source() failed\n");
1214 		return -EINVAL;
1215 	}
1216 
1217 	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1218 				      req->nbytes, 0, flags)) {
1219 		dev_err(dev, "map_ahash_request_final() failed\n");
1220 		cc_unmap_req(dev, state, ctx);
1221 		return -ENOMEM;
1222 	}
1223 
1224 	if (cc_map_result(dev, state, digestsize)) {
1225 		dev_err(dev, "map_ahash_digest() failed\n");
1226 		cc_unmap_hash_request(dev, state, req->src, true);
1227 		cc_unmap_req(dev, state, ctx);
1228 		return -ENOMEM;
1229 	}
1230 
1231 	/* Setup request structure */
1232 	cc_req.user_cb = (void *)cc_hash_complete;
1233 	cc_req.user_arg = (void *)req;
1234 
1235 	if (state->xcbc_count && rem_cnt == 0) {
1236 		/* Load key for ECB decryption */
1237 		hw_desc_init(&desc[idx]);
1238 		set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1239 		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1240 		set_din_type(&desc[idx], DMA_DLLI,
1241 			     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1242 			     key_size, NS_BIT);
1243 		set_key_size_aes(&desc[idx], key_len);
1244 		set_flow_mode(&desc[idx], S_DIN_to_AES);
1245 		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1246 		idx++;
1247 
1248 		/* Initiate decryption of block state to previous
1249 		 * block_state-XOR-M[n]
1250 		 */
1251 		hw_desc_init(&desc[idx]);
1252 		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1253 			     CC_AES_BLOCK_SIZE, NS_BIT);
1254 		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1255 			      CC_AES_BLOCK_SIZE, NS_BIT, 0);
1256 		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1257 		idx++;
1258 
1259 		/* Memory Barrier: wait for axi write to complete */
1260 		hw_desc_init(&desc[idx]);
1261 		set_din_no_dma(&desc[idx], 0, 0xfffff0);
1262 		set_dout_no_dma(&desc[idx], 0, 0, 1);
1263 		idx++;
1264 	}
1265 
1266 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1267 		cc_setup_xcbc(req, desc, &idx);
1268 	else
1269 		cc_setup_cmac(req, desc, &idx);
1270 
1271 	if (state->xcbc_count == 0) {
1272 		hw_desc_init(&desc[idx]);
1273 		set_cipher_mode(&desc[idx], ctx->hw_mode);
1274 		set_key_size_aes(&desc[idx], key_len);
1275 		set_cmac_size0_mode(&desc[idx]);
1276 		set_flow_mode(&desc[idx], S_DIN_to_AES);
1277 		idx++;
1278 	} else if (rem_cnt > 0) {
1279 		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1280 	} else {
1281 		hw_desc_init(&desc[idx]);
1282 		set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1283 		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1284 		idx++;
1285 	}
1286 
1287 	/* Get final MAC result */
1288 	hw_desc_init(&desc[idx]);
1289 	/* TODO */
1290 	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1291 		      digestsize, NS_BIT, 1);
1292 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1293 	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1294 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1295 	set_cipher_mode(&desc[idx], ctx->hw_mode);
1296 	idx++;
1297 
1298 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1299 	if (rc != -EINPROGRESS && rc != -EBUSY) {
1300 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1301 		cc_unmap_hash_request(dev, state, req->src, true);
1302 		cc_unmap_result(dev, state, digestsize, req->result);
1303 		cc_unmap_req(dev, state, ctx);
1304 	}
1305 	return rc;
1306 }
1307 
1308 static int cc_mac_finup(struct ahash_request *req)
1309 {
1310 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1311 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1312 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1313 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1314 	struct cc_crypto_req cc_req = {};
1315 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1316 	int idx = 0;
1317 	int rc = 0;
1318 	u32 key_len = 0;
1319 	u32 digestsize = crypto_ahash_digestsize(tfm);
1320 	gfp_t flags = cc_gfp_flags(&req->base);
1321 
1322 	dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1323 	if (state->xcbc_count > 0 && req->nbytes == 0) {
1324 		dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1325 		return cc_mac_final(req);
1326 	}
1327 
1328 	if (cc_map_req(dev, state, ctx)) {
1329 		dev_err(dev, "map_ahash_source() failed\n");
1330 		return -EINVAL;
1331 	}
1332 
1333 	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1334 				      req->nbytes, 1, flags)) {
1335 		dev_err(dev, "map_ahash_request_final() failed\n");
1336 		cc_unmap_req(dev, state, ctx);
1337 		return -ENOMEM;
1338 	}
1339 	if (cc_map_result(dev, state, digestsize)) {
1340 		dev_err(dev, "map_ahash_digest() failed\n");
1341 		cc_unmap_hash_request(dev, state, req->src, true);
1342 		cc_unmap_req(dev, state, ctx);
1343 		return -ENOMEM;
1344 	}
1345 
1346 	/* Setup request structure */
1347 	cc_req.user_cb = (void *)cc_hash_complete;
1348 	cc_req.user_arg = (void *)req;
1349 
1350 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1351 		key_len = CC_AES_128_BIT_KEY_SIZE;
1352 		cc_setup_xcbc(req, desc, &idx);
1353 	} else {
1354 		key_len = ctx->key_params.keylen;
1355 		cc_setup_cmac(req, desc, &idx);
1356 	}
1357 
1358 	if (req->nbytes == 0) {
1359 		hw_desc_init(&desc[idx]);
1360 		set_cipher_mode(&desc[idx], ctx->hw_mode);
1361 		set_key_size_aes(&desc[idx], key_len);
1362 		set_cmac_size0_mode(&desc[idx]);
1363 		set_flow_mode(&desc[idx], S_DIN_to_AES);
1364 		idx++;
1365 	} else {
1366 		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1367 	}
1368 
1369 	/* Get final MAC result */
1370 	hw_desc_init(&desc[idx]);
1371 	/* TODO */
1372 	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1373 		      digestsize, NS_BIT, 1);
1374 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1375 	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1376 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1377 	set_cipher_mode(&desc[idx], ctx->hw_mode);
1378 	idx++;
1379 
1380 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1381 	if (rc != -EINPROGRESS && rc != -EBUSY) {
1382 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1383 		cc_unmap_hash_request(dev, state, req->src, true);
1384 		cc_unmap_result(dev, state, digestsize, req->result);
1385 		cc_unmap_req(dev, state, ctx);
1386 	}
1387 	return rc;
1388 }
1389 
1390 static int cc_mac_digest(struct ahash_request *req)
1391 {
1392 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1393 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1394 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1395 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1396 	u32 digestsize = crypto_ahash_digestsize(tfm);
1397 	struct cc_crypto_req cc_req = {};
1398 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1399 	u32 key_len;
1400 	unsigned int idx = 0;
1401 	int rc;
1402 	gfp_t flags = cc_gfp_flags(&req->base);
1403 
1404 	dev_dbg(dev, "===== -digest mac (%d) ====\n",  req->nbytes);
1405 
1406 	cc_init_req(dev, state, ctx);
1407 
1408 	if (cc_map_req(dev, state, ctx)) {
1409 		dev_err(dev, "map_ahash_source() failed\n");
1410 		return -ENOMEM;
1411 	}
1412 	if (cc_map_result(dev, state, digestsize)) {
1413 		dev_err(dev, "map_ahash_digest() failed\n");
1414 		cc_unmap_req(dev, state, ctx);
1415 		return -ENOMEM;
1416 	}
1417 
1418 	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1419 				      req->nbytes, 1, flags)) {
1420 		dev_err(dev, "map_ahash_request_final() failed\n");
1421 		cc_unmap_req(dev, state, ctx);
1422 		return -ENOMEM;
1423 	}
1424 
1425 	/* Setup request structure */
1426 	cc_req.user_cb = (void *)cc_digest_complete;
1427 	cc_req.user_arg = (void *)req;
1428 
1429 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1430 		key_len = CC_AES_128_BIT_KEY_SIZE;
1431 		cc_setup_xcbc(req, desc, &idx);
1432 	} else {
1433 		key_len = ctx->key_params.keylen;
1434 		cc_setup_cmac(req, desc, &idx);
1435 	}
1436 
1437 	if (req->nbytes == 0) {
1438 		hw_desc_init(&desc[idx]);
1439 		set_cipher_mode(&desc[idx], ctx->hw_mode);
1440 		set_key_size_aes(&desc[idx], key_len);
1441 		set_cmac_size0_mode(&desc[idx]);
1442 		set_flow_mode(&desc[idx], S_DIN_to_AES);
1443 		idx++;
1444 	} else {
1445 		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1446 	}
1447 
1448 	/* Get final MAC result */
1449 	hw_desc_init(&desc[idx]);
1450 	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1451 		      CC_AES_BLOCK_SIZE, NS_BIT, 1);
1452 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1453 	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1454 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1455 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1456 	set_cipher_mode(&desc[idx], ctx->hw_mode);
1457 	idx++;
1458 
1459 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1460 	if (rc != -EINPROGRESS && rc != -EBUSY) {
1461 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1462 		cc_unmap_hash_request(dev, state, req->src, true);
1463 		cc_unmap_result(dev, state, digestsize, req->result);
1464 		cc_unmap_req(dev, state, ctx);
1465 	}
1466 	return rc;
1467 }
1468 
1469 static int cc_hash_export(struct ahash_request *req, void *out)
1470 {
1471 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1472 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1473 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1474 	u8 *curr_buff = cc_hash_buf(state);
1475 	u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1476 	const u32 tmp = CC_EXPORT_MAGIC;
1477 
1478 	memcpy(out, &tmp, sizeof(u32));
1479 	out += sizeof(u32);
1480 
1481 	memcpy(out, state->digest_buff, ctx->inter_digestsize);
1482 	out += ctx->inter_digestsize;
1483 
1484 	memcpy(out, state->digest_bytes_len, ctx->hash_len);
1485 	out += ctx->hash_len;
1486 
1487 	memcpy(out, &curr_buff_cnt, sizeof(u32));
1488 	out += sizeof(u32);
1489 
1490 	memcpy(out, curr_buff, curr_buff_cnt);
1491 
1492 	return 0;
1493 }
1494 
1495 static int cc_hash_import(struct ahash_request *req, const void *in)
1496 {
1497 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1498 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1499 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1500 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1501 	u32 tmp;
1502 
1503 	memcpy(&tmp, in, sizeof(u32));
1504 	if (tmp != CC_EXPORT_MAGIC)
1505 		return -EINVAL;
1506 	in += sizeof(u32);
1507 
1508 	cc_init_req(dev, state, ctx);
1509 
1510 	memcpy(state->digest_buff, in, ctx->inter_digestsize);
1511 	in += ctx->inter_digestsize;
1512 
1513 	memcpy(state->digest_bytes_len, in, ctx->hash_len);
1514 	in += ctx->hash_len;
1515 
1516 	/* Sanity check the data as much as possible */
1517 	memcpy(&tmp, in, sizeof(u32));
1518 	if (tmp > CC_MAX_HASH_BLCK_SIZE)
1519 		return -EINVAL;
1520 	in += sizeof(u32);
1521 
1522 	state->buf_cnt[0] = tmp;
1523 	memcpy(state->buffers[0], in, tmp);
1524 
1525 	return 0;
1526 }
1527 
1528 struct cc_hash_template {
1529 	char name[CRYPTO_MAX_ALG_NAME];
1530 	char driver_name[CRYPTO_MAX_ALG_NAME];
1531 	char mac_name[CRYPTO_MAX_ALG_NAME];
1532 	char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1533 	unsigned int blocksize;
1534 	bool is_mac;
1535 	bool synchronize;
1536 	struct ahash_alg template_ahash;
1537 	int hash_mode;
1538 	int hw_mode;
1539 	int inter_digestsize;
1540 	struct cc_drvdata *drvdata;
1541 	u32 min_hw_rev;
1542 	enum cc_std_body std_body;
1543 };
1544 
1545 #define CC_STATE_SIZE(_x) \
1546 	((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1547 
1548 /* hash descriptors */
1549 static struct cc_hash_template driver_hash[] = {
1550 	//Asynchronize hash template
1551 	{
1552 		.name = "sha1",
1553 		.driver_name = "sha1-ccree",
1554 		.mac_name = "hmac(sha1)",
1555 		.mac_driver_name = "hmac-sha1-ccree",
1556 		.blocksize = SHA1_BLOCK_SIZE,
1557 		.is_mac = true,
1558 		.synchronize = false,
1559 		.template_ahash = {
1560 			.init = cc_hash_init,
1561 			.update = cc_hash_update,
1562 			.final = cc_hash_final,
1563 			.finup = cc_hash_finup,
1564 			.digest = cc_hash_digest,
1565 			.export = cc_hash_export,
1566 			.import = cc_hash_import,
1567 			.setkey = cc_hash_setkey,
1568 			.halg = {
1569 				.digestsize = SHA1_DIGEST_SIZE,
1570 				.statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1571 			},
1572 		},
1573 		.hash_mode = DRV_HASH_SHA1,
1574 		.hw_mode = DRV_HASH_HW_SHA1,
1575 		.inter_digestsize = SHA1_DIGEST_SIZE,
1576 		.min_hw_rev = CC_HW_REV_630,
1577 		.std_body = CC_STD_NIST,
1578 	},
1579 	{
1580 		.name = "sha256",
1581 		.driver_name = "sha256-ccree",
1582 		.mac_name = "hmac(sha256)",
1583 		.mac_driver_name = "hmac-sha256-ccree",
1584 		.blocksize = SHA256_BLOCK_SIZE,
1585 		.is_mac = true,
1586 		.template_ahash = {
1587 			.init = cc_hash_init,
1588 			.update = cc_hash_update,
1589 			.final = cc_hash_final,
1590 			.finup = cc_hash_finup,
1591 			.digest = cc_hash_digest,
1592 			.export = cc_hash_export,
1593 			.import = cc_hash_import,
1594 			.setkey = cc_hash_setkey,
1595 			.halg = {
1596 				.digestsize = SHA256_DIGEST_SIZE,
1597 				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1598 			},
1599 		},
1600 		.hash_mode = DRV_HASH_SHA256,
1601 		.hw_mode = DRV_HASH_HW_SHA256,
1602 		.inter_digestsize = SHA256_DIGEST_SIZE,
1603 		.min_hw_rev = CC_HW_REV_630,
1604 		.std_body = CC_STD_NIST,
1605 	},
1606 	{
1607 		.name = "sha224",
1608 		.driver_name = "sha224-ccree",
1609 		.mac_name = "hmac(sha224)",
1610 		.mac_driver_name = "hmac-sha224-ccree",
1611 		.blocksize = SHA224_BLOCK_SIZE,
1612 		.is_mac = true,
1613 		.template_ahash = {
1614 			.init = cc_hash_init,
1615 			.update = cc_hash_update,
1616 			.final = cc_hash_final,
1617 			.finup = cc_hash_finup,
1618 			.digest = cc_hash_digest,
1619 			.export = cc_hash_export,
1620 			.import = cc_hash_import,
1621 			.setkey = cc_hash_setkey,
1622 			.halg = {
1623 				.digestsize = SHA224_DIGEST_SIZE,
1624 				.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1625 			},
1626 		},
1627 		.hash_mode = DRV_HASH_SHA224,
1628 		.hw_mode = DRV_HASH_HW_SHA256,
1629 		.inter_digestsize = SHA256_DIGEST_SIZE,
1630 		.min_hw_rev = CC_HW_REV_630,
1631 		.std_body = CC_STD_NIST,
1632 	},
1633 	{
1634 		.name = "sha384",
1635 		.driver_name = "sha384-ccree",
1636 		.mac_name = "hmac(sha384)",
1637 		.mac_driver_name = "hmac-sha384-ccree",
1638 		.blocksize = SHA384_BLOCK_SIZE,
1639 		.is_mac = true,
1640 		.template_ahash = {
1641 			.init = cc_hash_init,
1642 			.update = cc_hash_update,
1643 			.final = cc_hash_final,
1644 			.finup = cc_hash_finup,
1645 			.digest = cc_hash_digest,
1646 			.export = cc_hash_export,
1647 			.import = cc_hash_import,
1648 			.setkey = cc_hash_setkey,
1649 			.halg = {
1650 				.digestsize = SHA384_DIGEST_SIZE,
1651 				.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1652 			},
1653 		},
1654 		.hash_mode = DRV_HASH_SHA384,
1655 		.hw_mode = DRV_HASH_HW_SHA512,
1656 		.inter_digestsize = SHA512_DIGEST_SIZE,
1657 		.min_hw_rev = CC_HW_REV_712,
1658 		.std_body = CC_STD_NIST,
1659 	},
1660 	{
1661 		.name = "sha512",
1662 		.driver_name = "sha512-ccree",
1663 		.mac_name = "hmac(sha512)",
1664 		.mac_driver_name = "hmac-sha512-ccree",
1665 		.blocksize = SHA512_BLOCK_SIZE,
1666 		.is_mac = true,
1667 		.template_ahash = {
1668 			.init = cc_hash_init,
1669 			.update = cc_hash_update,
1670 			.final = cc_hash_final,
1671 			.finup = cc_hash_finup,
1672 			.digest = cc_hash_digest,
1673 			.export = cc_hash_export,
1674 			.import = cc_hash_import,
1675 			.setkey = cc_hash_setkey,
1676 			.halg = {
1677 				.digestsize = SHA512_DIGEST_SIZE,
1678 				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1679 			},
1680 		},
1681 		.hash_mode = DRV_HASH_SHA512,
1682 		.hw_mode = DRV_HASH_HW_SHA512,
1683 		.inter_digestsize = SHA512_DIGEST_SIZE,
1684 		.min_hw_rev = CC_HW_REV_712,
1685 		.std_body = CC_STD_NIST,
1686 	},
1687 	{
1688 		.name = "md5",
1689 		.driver_name = "md5-ccree",
1690 		.mac_name = "hmac(md5)",
1691 		.mac_driver_name = "hmac-md5-ccree",
1692 		.blocksize = MD5_HMAC_BLOCK_SIZE,
1693 		.is_mac = true,
1694 		.template_ahash = {
1695 			.init = cc_hash_init,
1696 			.update = cc_hash_update,
1697 			.final = cc_hash_final,
1698 			.finup = cc_hash_finup,
1699 			.digest = cc_hash_digest,
1700 			.export = cc_hash_export,
1701 			.import = cc_hash_import,
1702 			.setkey = cc_hash_setkey,
1703 			.halg = {
1704 				.digestsize = MD5_DIGEST_SIZE,
1705 				.statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1706 			},
1707 		},
1708 		.hash_mode = DRV_HASH_MD5,
1709 		.hw_mode = DRV_HASH_HW_MD5,
1710 		.inter_digestsize = MD5_DIGEST_SIZE,
1711 		.min_hw_rev = CC_HW_REV_630,
1712 		.std_body = CC_STD_NIST,
1713 	},
1714 	{
1715 		.name = "sm3",
1716 		.driver_name = "sm3-ccree",
1717 		.blocksize = SM3_BLOCK_SIZE,
1718 		.is_mac = false,
1719 		.template_ahash = {
1720 			.init = cc_hash_init,
1721 			.update = cc_hash_update,
1722 			.final = cc_hash_final,
1723 			.finup = cc_hash_finup,
1724 			.digest = cc_hash_digest,
1725 			.export = cc_hash_export,
1726 			.import = cc_hash_import,
1727 			.setkey = cc_hash_setkey,
1728 			.halg = {
1729 				.digestsize = SM3_DIGEST_SIZE,
1730 				.statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
1731 			},
1732 		},
1733 		.hash_mode = DRV_HASH_SM3,
1734 		.hw_mode = DRV_HASH_HW_SM3,
1735 		.inter_digestsize = SM3_DIGEST_SIZE,
1736 		.min_hw_rev = CC_HW_REV_713,
1737 		.std_body = CC_STD_OSCCA,
1738 	},
1739 	{
1740 		.mac_name = "xcbc(aes)",
1741 		.mac_driver_name = "xcbc-aes-ccree",
1742 		.blocksize = AES_BLOCK_SIZE,
1743 		.is_mac = true,
1744 		.template_ahash = {
1745 			.init = cc_hash_init,
1746 			.update = cc_mac_update,
1747 			.final = cc_mac_final,
1748 			.finup = cc_mac_finup,
1749 			.digest = cc_mac_digest,
1750 			.setkey = cc_xcbc_setkey,
1751 			.export = cc_hash_export,
1752 			.import = cc_hash_import,
1753 			.halg = {
1754 				.digestsize = AES_BLOCK_SIZE,
1755 				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1756 			},
1757 		},
1758 		.hash_mode = DRV_HASH_NULL,
1759 		.hw_mode = DRV_CIPHER_XCBC_MAC,
1760 		.inter_digestsize = AES_BLOCK_SIZE,
1761 		.min_hw_rev = CC_HW_REV_630,
1762 		.std_body = CC_STD_NIST,
1763 	},
1764 	{
1765 		.mac_name = "cmac(aes)",
1766 		.mac_driver_name = "cmac-aes-ccree",
1767 		.blocksize = AES_BLOCK_SIZE,
1768 		.is_mac = true,
1769 		.template_ahash = {
1770 			.init = cc_hash_init,
1771 			.update = cc_mac_update,
1772 			.final = cc_mac_final,
1773 			.finup = cc_mac_finup,
1774 			.digest = cc_mac_digest,
1775 			.setkey = cc_cmac_setkey,
1776 			.export = cc_hash_export,
1777 			.import = cc_hash_import,
1778 			.halg = {
1779 				.digestsize = AES_BLOCK_SIZE,
1780 				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1781 			},
1782 		},
1783 		.hash_mode = DRV_HASH_NULL,
1784 		.hw_mode = DRV_CIPHER_CMAC,
1785 		.inter_digestsize = AES_BLOCK_SIZE,
1786 		.min_hw_rev = CC_HW_REV_630,
1787 		.std_body = CC_STD_NIST,
1788 	},
1789 };
1790 
1791 static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1792 					     struct device *dev, bool keyed)
1793 {
1794 	struct cc_hash_alg *t_crypto_alg;
1795 	struct crypto_alg *alg;
1796 	struct ahash_alg *halg;
1797 
1798 	t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1799 	if (!t_crypto_alg)
1800 		return ERR_PTR(-ENOMEM);
1801 
1802 	t_crypto_alg->ahash_alg = template->template_ahash;
1803 	halg = &t_crypto_alg->ahash_alg;
1804 	alg = &halg->halg.base;
1805 
1806 	if (keyed) {
1807 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1808 			 template->mac_name);
1809 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1810 			 template->mac_driver_name);
1811 	} else {
1812 		halg->setkey = NULL;
1813 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1814 			 template->name);
1815 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1816 			 template->driver_name);
1817 	}
1818 	alg->cra_module = THIS_MODULE;
1819 	alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1820 	alg->cra_priority = CC_CRA_PRIO;
1821 	alg->cra_blocksize = template->blocksize;
1822 	alg->cra_alignmask = 0;
1823 	alg->cra_exit = cc_cra_exit;
1824 
1825 	alg->cra_init = cc_cra_init;
1826 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
1827 
1828 	t_crypto_alg->hash_mode = template->hash_mode;
1829 	t_crypto_alg->hw_mode = template->hw_mode;
1830 	t_crypto_alg->inter_digestsize = template->inter_digestsize;
1831 
1832 	return t_crypto_alg;
1833 }
1834 
1835 int cc_init_hash_sram(struct cc_drvdata *drvdata)
1836 {
1837 	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1838 	cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
1839 	unsigned int larval_seq_len = 0;
1840 	struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1841 	bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
1842 	bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
1843 	int rc = 0;
1844 
1845 	/* Copy-to-sram digest-len */
1846 	cc_set_sram_desc(digest_len_init, sram_buff_ofs,
1847 			 ARRAY_SIZE(digest_len_init), larval_seq,
1848 			 &larval_seq_len);
1849 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1850 	if (rc)
1851 		goto init_digest_const_err;
1852 
1853 	sram_buff_ofs += sizeof(digest_len_init);
1854 	larval_seq_len = 0;
1855 
1856 	if (large_sha_supported) {
1857 		/* Copy-to-sram digest-len for sha384/512 */
1858 		cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
1859 				 ARRAY_SIZE(digest_len_sha512_init),
1860 				 larval_seq, &larval_seq_len);
1861 		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1862 		if (rc)
1863 			goto init_digest_const_err;
1864 
1865 		sram_buff_ofs += sizeof(digest_len_sha512_init);
1866 		larval_seq_len = 0;
1867 	}
1868 
1869 	/* The initial digests offset */
1870 	hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1871 
1872 	/* Copy-to-sram initial SHA* digests */
1873 	cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
1874 			 larval_seq, &larval_seq_len);
1875 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1876 	if (rc)
1877 		goto init_digest_const_err;
1878 	sram_buff_ofs += sizeof(md5_init);
1879 	larval_seq_len = 0;
1880 
1881 	cc_set_sram_desc(sha1_init, sram_buff_ofs,
1882 			 ARRAY_SIZE(sha1_init), larval_seq,
1883 			 &larval_seq_len);
1884 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1885 	if (rc)
1886 		goto init_digest_const_err;
1887 	sram_buff_ofs += sizeof(sha1_init);
1888 	larval_seq_len = 0;
1889 
1890 	cc_set_sram_desc(sha224_init, sram_buff_ofs,
1891 			 ARRAY_SIZE(sha224_init), larval_seq,
1892 			 &larval_seq_len);
1893 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1894 	if (rc)
1895 		goto init_digest_const_err;
1896 	sram_buff_ofs += sizeof(sha224_init);
1897 	larval_seq_len = 0;
1898 
1899 	cc_set_sram_desc(sha256_init, sram_buff_ofs,
1900 			 ARRAY_SIZE(sha256_init), larval_seq,
1901 			 &larval_seq_len);
1902 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1903 	if (rc)
1904 		goto init_digest_const_err;
1905 	sram_buff_ofs += sizeof(sha256_init);
1906 	larval_seq_len = 0;
1907 
1908 	if (sm3_supported) {
1909 		cc_set_sram_desc(sm3_init, sram_buff_ofs,
1910 				 ARRAY_SIZE(sm3_init), larval_seq,
1911 				 &larval_seq_len);
1912 		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1913 		if (rc)
1914 			goto init_digest_const_err;
1915 		sram_buff_ofs += sizeof(sm3_init);
1916 		larval_seq_len = 0;
1917 	}
1918 
1919 	if (large_sha_supported) {
1920 		cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
1921 				 (ARRAY_SIZE(sha384_init) * 2), larval_seq,
1922 				 &larval_seq_len);
1923 		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1924 		if (rc)
1925 			goto init_digest_const_err;
1926 		sram_buff_ofs += sizeof(sha384_init);
1927 		larval_seq_len = 0;
1928 
1929 		cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
1930 				 (ARRAY_SIZE(sha512_init) * 2), larval_seq,
1931 				 &larval_seq_len);
1932 		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1933 		if (rc)
1934 			goto init_digest_const_err;
1935 	}
1936 
1937 init_digest_const_err:
1938 	return rc;
1939 }
1940 
1941 static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1942 {
1943 	int i;
1944 	u32 tmp;
1945 
1946 	for (i = 0; i < size; i += 2) {
1947 		tmp = buf[i];
1948 		buf[i] = buf[i + 1];
1949 		buf[i + 1] = tmp;
1950 	}
1951 }
1952 
1953 /*
1954  * Due to the way the HW works we need to swap every
1955  * double word in the SHA384 and SHA512 larval hashes
1956  */
1957 void __init cc_hash_global_init(void)
1958 {
1959 	cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
1960 	cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
1961 }
1962 
1963 int cc_hash_alloc(struct cc_drvdata *drvdata)
1964 {
1965 	struct cc_hash_handle *hash_handle;
1966 	cc_sram_addr_t sram_buff;
1967 	u32 sram_size_to_alloc;
1968 	struct device *dev = drvdata_to_dev(drvdata);
1969 	int rc = 0;
1970 	int alg;
1971 
1972 	hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
1973 	if (!hash_handle)
1974 		return -ENOMEM;
1975 
1976 	INIT_LIST_HEAD(&hash_handle->hash_list);
1977 	drvdata->hash_handle = hash_handle;
1978 
1979 	sram_size_to_alloc = sizeof(digest_len_init) +
1980 			sizeof(md5_init) +
1981 			sizeof(sha1_init) +
1982 			sizeof(sha224_init) +
1983 			sizeof(sha256_init);
1984 
1985 	if (drvdata->hw_rev >= CC_HW_REV_713)
1986 		sram_size_to_alloc += sizeof(sm3_init);
1987 
1988 	if (drvdata->hw_rev >= CC_HW_REV_712)
1989 		sram_size_to_alloc += sizeof(digest_len_sha512_init) +
1990 			sizeof(sha384_init) + sizeof(sha512_init);
1991 
1992 	sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1993 	if (sram_buff == NULL_SRAM_ADDR) {
1994 		dev_err(dev, "SRAM pool exhausted\n");
1995 		rc = -ENOMEM;
1996 		goto fail;
1997 	}
1998 
1999 	/* The initial digest-len offset */
2000 	hash_handle->digest_len_sram_addr = sram_buff;
2001 
2002 	/*must be set before the alg registration as it is being used there*/
2003 	rc = cc_init_hash_sram(drvdata);
2004 	if (rc) {
2005 		dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
2006 		goto fail;
2007 	}
2008 
2009 	/* ahash registration */
2010 	for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2011 		struct cc_hash_alg *t_alg;
2012 		int hw_mode = driver_hash[alg].hw_mode;
2013 
2014 		/* Check that the HW revision and variants are suitable */
2015 		if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
2016 		    !(drvdata->std_bodies & driver_hash[alg].std_body))
2017 			continue;
2018 
2019 		if (driver_hash[alg].is_mac) {
2020 			/* register hmac version */
2021 			t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
2022 			if (IS_ERR(t_alg)) {
2023 				rc = PTR_ERR(t_alg);
2024 				dev_err(dev, "%s alg allocation failed\n",
2025 					driver_hash[alg].driver_name);
2026 				goto fail;
2027 			}
2028 			t_alg->drvdata = drvdata;
2029 
2030 			rc = crypto_register_ahash(&t_alg->ahash_alg);
2031 			if (rc) {
2032 				dev_err(dev, "%s alg registration failed\n",
2033 					driver_hash[alg].driver_name);
2034 				kfree(t_alg);
2035 				goto fail;
2036 			} else {
2037 				list_add_tail(&t_alg->entry,
2038 					      &hash_handle->hash_list);
2039 			}
2040 		}
2041 		if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2042 		    hw_mode == DRV_CIPHER_CMAC)
2043 			continue;
2044 
2045 		/* register hash version */
2046 		t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2047 		if (IS_ERR(t_alg)) {
2048 			rc = PTR_ERR(t_alg);
2049 			dev_err(dev, "%s alg allocation failed\n",
2050 				driver_hash[alg].driver_name);
2051 			goto fail;
2052 		}
2053 		t_alg->drvdata = drvdata;
2054 
2055 		rc = crypto_register_ahash(&t_alg->ahash_alg);
2056 		if (rc) {
2057 			dev_err(dev, "%s alg registration failed\n",
2058 				driver_hash[alg].driver_name);
2059 			kfree(t_alg);
2060 			goto fail;
2061 		} else {
2062 			list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2063 		}
2064 	}
2065 
2066 	return 0;
2067 
2068 fail:
2069 	kfree(drvdata->hash_handle);
2070 	drvdata->hash_handle = NULL;
2071 	return rc;
2072 }
2073 
2074 int cc_hash_free(struct cc_drvdata *drvdata)
2075 {
2076 	struct cc_hash_alg *t_hash_alg, *hash_n;
2077 	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2078 
2079 	if (hash_handle) {
2080 		list_for_each_entry_safe(t_hash_alg, hash_n,
2081 					 &hash_handle->hash_list, entry) {
2082 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2083 			list_del(&t_hash_alg->entry);
2084 			kfree(t_hash_alg);
2085 		}
2086 
2087 		kfree(hash_handle);
2088 		drvdata->hash_handle = NULL;
2089 	}
2090 	return 0;
2091 }
2092 
2093 static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2094 			  unsigned int *seq_size)
2095 {
2096 	unsigned int idx = *seq_size;
2097 	struct ahash_req_ctx *state = ahash_request_ctx(areq);
2098 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2099 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2100 
2101 	/* Setup XCBC MAC K1 */
2102 	hw_desc_init(&desc[idx]);
2103 	set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2104 					    XCBC_MAC_K1_OFFSET),
2105 		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2106 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2107 	set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
2108 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2109 	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2110 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2111 	idx++;
2112 
2113 	/* Setup XCBC MAC K2 */
2114 	hw_desc_init(&desc[idx]);
2115 	set_din_type(&desc[idx], DMA_DLLI,
2116 		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2117 		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2118 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2119 	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2120 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2121 	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2122 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2123 	idx++;
2124 
2125 	/* Setup XCBC MAC K3 */
2126 	hw_desc_init(&desc[idx]);
2127 	set_din_type(&desc[idx], DMA_DLLI,
2128 		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2129 		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2130 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2131 	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2132 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2133 	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2134 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2135 	idx++;
2136 
2137 	/* Loading MAC state */
2138 	hw_desc_init(&desc[idx]);
2139 	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2140 		     CC_AES_BLOCK_SIZE, NS_BIT);
2141 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2142 	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2143 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2144 	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2145 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2146 	idx++;
2147 	*seq_size = idx;
2148 }
2149 
2150 static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2151 			  unsigned int *seq_size)
2152 {
2153 	unsigned int idx = *seq_size;
2154 	struct ahash_req_ctx *state = ahash_request_ctx(areq);
2155 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2156 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2157 
2158 	/* Setup CMAC Key */
2159 	hw_desc_init(&desc[idx]);
2160 	set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2161 		     ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2162 		      ctx->key_params.keylen), NS_BIT);
2163 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2164 	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2165 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2166 	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2167 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2168 	idx++;
2169 
2170 	/* Load MAC state */
2171 	hw_desc_init(&desc[idx]);
2172 	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2173 		     CC_AES_BLOCK_SIZE, NS_BIT);
2174 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2175 	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2176 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2177 	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2178 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2179 	idx++;
2180 	*seq_size = idx;
2181 }
2182 
2183 static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2184 			struct cc_hash_ctx *ctx, unsigned int flow_mode,
2185 			struct cc_hw_desc desc[], bool is_not_last_data,
2186 			unsigned int *seq_size)
2187 {
2188 	unsigned int idx = *seq_size;
2189 	struct device *dev = drvdata_to_dev(ctx->drvdata);
2190 
2191 	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2192 		hw_desc_init(&desc[idx]);
2193 		set_din_type(&desc[idx], DMA_DLLI,
2194 			     sg_dma_address(areq_ctx->curr_sg),
2195 			     areq_ctx->curr_sg->length, NS_BIT);
2196 		set_flow_mode(&desc[idx], flow_mode);
2197 		idx++;
2198 	} else {
2199 		if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2200 			dev_dbg(dev, " NULL mode\n");
2201 			/* nothing to build */
2202 			return;
2203 		}
2204 		/* bypass */
2205 		hw_desc_init(&desc[idx]);
2206 		set_din_type(&desc[idx], DMA_DLLI,
2207 			     areq_ctx->mlli_params.mlli_dma_addr,
2208 			     areq_ctx->mlli_params.mlli_len, NS_BIT);
2209 		set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2210 			      areq_ctx->mlli_params.mlli_len);
2211 		set_flow_mode(&desc[idx], BYPASS);
2212 		idx++;
2213 		/* process */
2214 		hw_desc_init(&desc[idx]);
2215 		set_din_type(&desc[idx], DMA_MLLI,
2216 			     ctx->drvdata->mlli_sram_addr,
2217 			     areq_ctx->mlli_nents, NS_BIT);
2218 		set_flow_mode(&desc[idx], flow_mode);
2219 		idx++;
2220 	}
2221 	if (is_not_last_data)
2222 		set_din_not_last_indication(&desc[(idx - 1)]);
2223 	/* return updated desc sequence size */
2224 	*seq_size = idx;
2225 }
2226 
2227 static const void *cc_larval_digest(struct device *dev, u32 mode)
2228 {
2229 	switch (mode) {
2230 	case DRV_HASH_MD5:
2231 		return md5_init;
2232 	case DRV_HASH_SHA1:
2233 		return sha1_init;
2234 	case DRV_HASH_SHA224:
2235 		return sha224_init;
2236 	case DRV_HASH_SHA256:
2237 		return sha256_init;
2238 	case DRV_HASH_SHA384:
2239 		return sha384_init;
2240 	case DRV_HASH_SHA512:
2241 		return sha512_init;
2242 	case DRV_HASH_SM3:
2243 		return sm3_init;
2244 	default:
2245 		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2246 		return md5_init;
2247 	}
2248 }
2249 
2250 /*!
2251  * Gets the address of the initial digest in SRAM
2252  * according to the given hash mode
2253  *
2254  * \param drvdata
2255  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2256  *
2257  * \return u32 The address of the initial digest in SRAM
2258  */
2259 cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2260 {
2261 	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2262 	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2263 	struct device *dev = drvdata_to_dev(_drvdata);
2264 	bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
2265 	cc_sram_addr_t addr;
2266 
2267 	switch (mode) {
2268 	case DRV_HASH_NULL:
2269 		break; /*Ignore*/
2270 	case DRV_HASH_MD5:
2271 		return (hash_handle->larval_digest_sram_addr);
2272 	case DRV_HASH_SHA1:
2273 		return (hash_handle->larval_digest_sram_addr +
2274 			sizeof(md5_init));
2275 	case DRV_HASH_SHA224:
2276 		return (hash_handle->larval_digest_sram_addr +
2277 			sizeof(md5_init) +
2278 			sizeof(sha1_init));
2279 	case DRV_HASH_SHA256:
2280 		return (hash_handle->larval_digest_sram_addr +
2281 			sizeof(md5_init) +
2282 			sizeof(sha1_init) +
2283 			sizeof(sha224_init));
2284 	case DRV_HASH_SM3:
2285 		return (hash_handle->larval_digest_sram_addr +
2286 			sizeof(md5_init) +
2287 			sizeof(sha1_init) +
2288 			sizeof(sha224_init) +
2289 			sizeof(sha256_init));
2290 	case DRV_HASH_SHA384:
2291 		addr = (hash_handle->larval_digest_sram_addr +
2292 			sizeof(md5_init) +
2293 			sizeof(sha1_init) +
2294 			sizeof(sha224_init) +
2295 			sizeof(sha256_init));
2296 		if (sm3_supported)
2297 			addr += sizeof(sm3_init);
2298 		return addr;
2299 	case DRV_HASH_SHA512:
2300 		addr = (hash_handle->larval_digest_sram_addr +
2301 			sizeof(md5_init) +
2302 			sizeof(sha1_init) +
2303 			sizeof(sha224_init) +
2304 			sizeof(sha256_init) +
2305 			sizeof(sha384_init));
2306 		if (sm3_supported)
2307 			addr += sizeof(sm3_init);
2308 		return addr;
2309 	default:
2310 		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2311 	}
2312 
2313 	/*This is valid wrong value to avoid kernel crash*/
2314 	return hash_handle->larval_digest_sram_addr;
2315 }
2316 
2317 cc_sram_addr_t
2318 cc_digest_len_addr(void *drvdata, u32 mode)
2319 {
2320 	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2321 	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2322 	cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2323 
2324 	switch (mode) {
2325 	case DRV_HASH_SHA1:
2326 	case DRV_HASH_SHA224:
2327 	case DRV_HASH_SHA256:
2328 	case DRV_HASH_MD5:
2329 		return digest_len_addr;
2330 #if (CC_DEV_SHA_MAX > 256)
2331 	case DRV_HASH_SHA384:
2332 	case DRV_HASH_SHA512:
2333 		return  digest_len_addr + sizeof(digest_len_init);
2334 #endif
2335 	default:
2336 		return digest_len_addr; /*to avoid kernel crash*/
2337 	}
2338 }
2339