xref: /openbmc/linux/drivers/crypto/ccree/cc_hash.c (revision 965f22bc)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/hash.h>
8 #include <crypto/md5.h>
9 #include <crypto/internal/hash.h>
10 
11 #include "cc_driver.h"
12 #include "cc_request_mgr.h"
13 #include "cc_buffer_mgr.h"
14 #include "cc_hash.h"
15 #include "cc_sram_mgr.h"
16 
17 #define CC_MAX_HASH_SEQ_LEN 12
18 #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
19 
20 struct cc_hash_handle {
21 	cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
22 	cc_sram_addr_t larval_digest_sram_addr;   /* const value in SRAM */
23 	struct list_head hash_list;
24 };
25 
26 static const u32 digest_len_init[] = {
27 	0x00000040, 0x00000000, 0x00000000, 0x00000000 };
28 static const u32 md5_init[] = {
29 	SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
30 static const u32 sha1_init[] = {
31 	SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
32 static const u32 sha224_init[] = {
33 	SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
34 	SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
35 static const u32 sha256_init[] = {
36 	SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
37 	SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
38 static const u32 digest_len_sha512_init[] = {
39 	0x00000080, 0x00000000, 0x00000000, 0x00000000 };
40 static u64 sha384_init[] = {
41 	SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
42 	SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
43 static u64 sha512_init[] = {
44 	SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
45 	SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
46 
47 static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
48 			  unsigned int *seq_size);
49 
50 static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
51 			  unsigned int *seq_size);
52 
53 static const void *cc_larval_digest(struct device *dev, u32 mode);
54 
55 struct cc_hash_alg {
56 	struct list_head entry;
57 	int hash_mode;
58 	int hw_mode;
59 	int inter_digestsize;
60 	struct cc_drvdata *drvdata;
61 	struct ahash_alg ahash_alg;
62 };
63 
64 struct hash_key_req_ctx {
65 	u32 keylen;
66 	dma_addr_t key_dma_addr;
67 };
68 
69 /* hash per-session context */
70 struct cc_hash_ctx {
71 	struct cc_drvdata *drvdata;
72 	/* holds the origin digest; the digest after "setkey" if HMAC,*
73 	 * the initial digest if HASH.
74 	 */
75 	u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
76 	u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE]  ____cacheline_aligned;
77 
78 	dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
79 	dma_addr_t digest_buff_dma_addr;
80 	/* use for hmac with key large then mode block size */
81 	struct hash_key_req_ctx key_params;
82 	int hash_mode;
83 	int hw_mode;
84 	int inter_digestsize;
85 	struct completion setkey_comp;
86 	bool is_hmac;
87 };
88 
89 static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
90 			unsigned int flow_mode, struct cc_hw_desc desc[],
91 			bool is_not_last_data, unsigned int *seq_size);
92 
93 static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
94 {
95 	if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
96 	    mode == DRV_HASH_SHA512) {
97 		set_bytes_swap(desc, 1);
98 	} else {
99 		set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
100 	}
101 }
102 
103 static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
104 			 unsigned int digestsize)
105 {
106 	state->digest_result_dma_addr =
107 		dma_map_single(dev, state->digest_result_buff,
108 			       digestsize, DMA_BIDIRECTIONAL);
109 	if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
110 		dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
111 			digestsize);
112 		return -ENOMEM;
113 	}
114 	dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
115 		digestsize, state->digest_result_buff,
116 		&state->digest_result_dma_addr);
117 
118 	return 0;
119 }
120 
121 static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
122 			struct cc_hash_ctx *ctx)
123 {
124 	bool is_hmac = ctx->is_hmac;
125 
126 	memset(state, 0, sizeof(*state));
127 
128 	if (is_hmac) {
129 		if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
130 		    ctx->hw_mode != DRV_CIPHER_CMAC) {
131 			dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
132 						ctx->inter_digestsize,
133 						DMA_BIDIRECTIONAL);
134 
135 			memcpy(state->digest_buff, ctx->digest_buff,
136 			       ctx->inter_digestsize);
137 			if (ctx->hash_mode == DRV_HASH_SHA512 ||
138 			    ctx->hash_mode == DRV_HASH_SHA384)
139 				memcpy(state->digest_bytes_len,
140 				       digest_len_sha512_init,
141 				       ctx->drvdata->hash_len_sz);
142 			else
143 				memcpy(state->digest_bytes_len, digest_len_init,
144 				       ctx->drvdata->hash_len_sz);
145 		}
146 
147 		if (ctx->hash_mode != DRV_HASH_NULL) {
148 			dma_sync_single_for_cpu(dev,
149 						ctx->opad_tmp_keys_dma_addr,
150 						ctx->inter_digestsize,
151 						DMA_BIDIRECTIONAL);
152 			memcpy(state->opad_digest_buff,
153 			       ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
154 		}
155 	} else { /*hash*/
156 		/* Copy the initial digests if hash flow. */
157 		const void *larval = cc_larval_digest(dev, ctx->hash_mode);
158 
159 		memcpy(state->digest_buff, larval, ctx->inter_digestsize);
160 	}
161 }
162 
163 static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
164 		      struct cc_hash_ctx *ctx)
165 {
166 	bool is_hmac = ctx->is_hmac;
167 
168 	state->digest_buff_dma_addr =
169 		dma_map_single(dev, state->digest_buff,
170 			       ctx->inter_digestsize, DMA_BIDIRECTIONAL);
171 	if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
172 		dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
173 			ctx->inter_digestsize, state->digest_buff);
174 		return -EINVAL;
175 	}
176 	dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
177 		ctx->inter_digestsize, state->digest_buff,
178 		&state->digest_buff_dma_addr);
179 
180 	if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
181 		state->digest_bytes_len_dma_addr =
182 			dma_map_single(dev, state->digest_bytes_len,
183 				       HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
184 		if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
185 			dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
186 				HASH_MAX_LEN_SIZE, state->digest_bytes_len);
187 			goto unmap_digest_buf;
188 		}
189 		dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
190 			HASH_MAX_LEN_SIZE, state->digest_bytes_len,
191 			&state->digest_bytes_len_dma_addr);
192 	}
193 
194 	if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
195 		state->opad_digest_dma_addr =
196 			dma_map_single(dev, state->opad_digest_buff,
197 				       ctx->inter_digestsize,
198 				       DMA_BIDIRECTIONAL);
199 		if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
200 			dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
201 				ctx->inter_digestsize,
202 				state->opad_digest_buff);
203 			goto unmap_digest_len;
204 		}
205 		dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
206 			ctx->inter_digestsize, state->opad_digest_buff,
207 			&state->opad_digest_dma_addr);
208 	}
209 
210 	return 0;
211 
212 unmap_digest_len:
213 	if (state->digest_bytes_len_dma_addr) {
214 		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
215 				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
216 		state->digest_bytes_len_dma_addr = 0;
217 	}
218 unmap_digest_buf:
219 	if (state->digest_buff_dma_addr) {
220 		dma_unmap_single(dev, state->digest_buff_dma_addr,
221 				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
222 		state->digest_buff_dma_addr = 0;
223 	}
224 
225 	return -EINVAL;
226 }
227 
228 static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
229 			 struct cc_hash_ctx *ctx)
230 {
231 	if (state->digest_buff_dma_addr) {
232 		dma_unmap_single(dev, state->digest_buff_dma_addr,
233 				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
234 		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
235 			&state->digest_buff_dma_addr);
236 		state->digest_buff_dma_addr = 0;
237 	}
238 	if (state->digest_bytes_len_dma_addr) {
239 		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
240 				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
241 		dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
242 			&state->digest_bytes_len_dma_addr);
243 		state->digest_bytes_len_dma_addr = 0;
244 	}
245 	if (state->opad_digest_dma_addr) {
246 		dma_unmap_single(dev, state->opad_digest_dma_addr,
247 				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
248 		dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
249 			&state->opad_digest_dma_addr);
250 		state->opad_digest_dma_addr = 0;
251 	}
252 }
253 
254 static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
255 			    unsigned int digestsize, u8 *result)
256 {
257 	if (state->digest_result_dma_addr) {
258 		dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
259 				 DMA_BIDIRECTIONAL);
260 		dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
261 			state->digest_result_buff,
262 			&state->digest_result_dma_addr, digestsize);
263 		memcpy(result, state->digest_result_buff, digestsize);
264 	}
265 	state->digest_result_dma_addr = 0;
266 }
267 
268 static void cc_update_complete(struct device *dev, void *cc_req, int err)
269 {
270 	struct ahash_request *req = (struct ahash_request *)cc_req;
271 	struct ahash_req_ctx *state = ahash_request_ctx(req);
272 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
273 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
274 
275 	dev_dbg(dev, "req=%pK\n", req);
276 
277 	cc_unmap_hash_request(dev, state, req->src, false);
278 	cc_unmap_req(dev, state, ctx);
279 	req->base.complete(&req->base, err);
280 }
281 
282 static void cc_digest_complete(struct device *dev, void *cc_req, int err)
283 {
284 	struct ahash_request *req = (struct ahash_request *)cc_req;
285 	struct ahash_req_ctx *state = ahash_request_ctx(req);
286 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
287 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
288 	u32 digestsize = crypto_ahash_digestsize(tfm);
289 
290 	dev_dbg(dev, "req=%pK\n", req);
291 
292 	cc_unmap_hash_request(dev, state, req->src, false);
293 	cc_unmap_result(dev, state, digestsize, req->result);
294 	cc_unmap_req(dev, state, ctx);
295 	req->base.complete(&req->base, err);
296 }
297 
298 static void cc_hash_complete(struct device *dev, void *cc_req, int err)
299 {
300 	struct ahash_request *req = (struct ahash_request *)cc_req;
301 	struct ahash_req_ctx *state = ahash_request_ctx(req);
302 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
303 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
304 	u32 digestsize = crypto_ahash_digestsize(tfm);
305 
306 	dev_dbg(dev, "req=%pK\n", req);
307 
308 	cc_unmap_hash_request(dev, state, req->src, false);
309 	cc_unmap_result(dev, state, digestsize, req->result);
310 	cc_unmap_req(dev, state, ctx);
311 	req->base.complete(&req->base, err);
312 }
313 
314 static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
315 			 int idx)
316 {
317 	struct ahash_req_ctx *state = ahash_request_ctx(req);
318 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
319 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
320 	u32 digestsize = crypto_ahash_digestsize(tfm);
321 
322 	/* Get final MAC result */
323 	hw_desc_init(&desc[idx]);
324 	set_cipher_mode(&desc[idx], ctx->hw_mode);
325 	/* TODO */
326 	set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
327 		      NS_BIT, 1);
328 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
329 	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
330 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
331 	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
332 	cc_set_endianity(ctx->hash_mode, &desc[idx]);
333 	idx++;
334 
335 	return idx;
336 }
337 
338 static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
339 		       int idx)
340 {
341 	struct ahash_req_ctx *state = ahash_request_ctx(req);
342 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
343 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
344 	u32 digestsize = crypto_ahash_digestsize(tfm);
345 
346 	/* store the hash digest result in the context */
347 	hw_desc_init(&desc[idx]);
348 	set_cipher_mode(&desc[idx], ctx->hw_mode);
349 	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
350 		      NS_BIT, 0);
351 	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
352 	cc_set_endianity(ctx->hash_mode, &desc[idx]);
353 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
354 	idx++;
355 
356 	/* Loading hash opad xor key state */
357 	hw_desc_init(&desc[idx]);
358 	set_cipher_mode(&desc[idx], ctx->hw_mode);
359 	set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
360 		     ctx->inter_digestsize, NS_BIT);
361 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
362 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
363 	idx++;
364 
365 	/* Load the hash current length */
366 	hw_desc_init(&desc[idx]);
367 	set_cipher_mode(&desc[idx], ctx->hw_mode);
368 	set_din_sram(&desc[idx],
369 		     cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
370 		     ctx->drvdata->hash_len_sz);
371 	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
372 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
373 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
374 	idx++;
375 
376 	/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
377 	hw_desc_init(&desc[idx]);
378 	set_din_no_dma(&desc[idx], 0, 0xfffff0);
379 	set_dout_no_dma(&desc[idx], 0, 0, 1);
380 	idx++;
381 
382 	/* Perform HASH update */
383 	hw_desc_init(&desc[idx]);
384 	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
385 		     digestsize, NS_BIT);
386 	set_flow_mode(&desc[idx], DIN_HASH);
387 	idx++;
388 
389 	return idx;
390 }
391 
392 static int cc_hash_digest(struct ahash_request *req)
393 {
394 	struct ahash_req_ctx *state = ahash_request_ctx(req);
395 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
396 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
397 	u32 digestsize = crypto_ahash_digestsize(tfm);
398 	struct scatterlist *src = req->src;
399 	unsigned int nbytes = req->nbytes;
400 	u8 *result = req->result;
401 	struct device *dev = drvdata_to_dev(ctx->drvdata);
402 	bool is_hmac = ctx->is_hmac;
403 	struct cc_crypto_req cc_req = {};
404 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
405 	cc_sram_addr_t larval_digest_addr =
406 		cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
407 	int idx = 0;
408 	int rc = 0;
409 	gfp_t flags = cc_gfp_flags(&req->base);
410 
411 	dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
412 		nbytes);
413 
414 	cc_init_req(dev, state, ctx);
415 
416 	if (cc_map_req(dev, state, ctx)) {
417 		dev_err(dev, "map_ahash_source() failed\n");
418 		return -ENOMEM;
419 	}
420 
421 	if (cc_map_result(dev, state, digestsize)) {
422 		dev_err(dev, "map_ahash_digest() failed\n");
423 		cc_unmap_req(dev, state, ctx);
424 		return -ENOMEM;
425 	}
426 
427 	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
428 				      flags)) {
429 		dev_err(dev, "map_ahash_request_final() failed\n");
430 		cc_unmap_result(dev, state, digestsize, result);
431 		cc_unmap_req(dev, state, ctx);
432 		return -ENOMEM;
433 	}
434 
435 	/* Setup request structure */
436 	cc_req.user_cb = cc_digest_complete;
437 	cc_req.user_arg = req;
438 
439 	/* If HMAC then load hash IPAD xor key, if HASH then load initial
440 	 * digest
441 	 */
442 	hw_desc_init(&desc[idx]);
443 	set_cipher_mode(&desc[idx], ctx->hw_mode);
444 	if (is_hmac) {
445 		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
446 			     ctx->inter_digestsize, NS_BIT);
447 	} else {
448 		set_din_sram(&desc[idx], larval_digest_addr,
449 			     ctx->inter_digestsize);
450 	}
451 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
452 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
453 	idx++;
454 
455 	/* Load the hash current length */
456 	hw_desc_init(&desc[idx]);
457 	set_cipher_mode(&desc[idx], ctx->hw_mode);
458 
459 	if (is_hmac) {
460 		set_din_type(&desc[idx], DMA_DLLI,
461 			     state->digest_bytes_len_dma_addr,
462 			     ctx->drvdata->hash_len_sz, NS_BIT);
463 	} else {
464 		set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
465 		if (nbytes)
466 			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
467 		else
468 			set_cipher_do(&desc[idx], DO_PAD);
469 	}
470 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
471 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
472 	idx++;
473 
474 	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
475 
476 	if (is_hmac) {
477 		/* HW last hash block padding (aka. "DO_PAD") */
478 		hw_desc_init(&desc[idx]);
479 		set_cipher_mode(&desc[idx], ctx->hw_mode);
480 		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
481 			      ctx->drvdata->hash_len_sz, NS_BIT, 0);
482 		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
483 		set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
484 		set_cipher_do(&desc[idx], DO_PAD);
485 		idx++;
486 
487 		idx = cc_fin_hmac(desc, req, idx);
488 	}
489 
490 	idx = cc_fin_result(desc, req, idx);
491 
492 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
493 	if (rc != -EINPROGRESS && rc != -EBUSY) {
494 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
495 		cc_unmap_hash_request(dev, state, src, true);
496 		cc_unmap_result(dev, state, digestsize, result);
497 		cc_unmap_req(dev, state, ctx);
498 	}
499 	return rc;
500 }
501 
502 static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
503 			   struct ahash_req_ctx *state, unsigned int idx)
504 {
505 	/* Restore hash digest */
506 	hw_desc_init(&desc[idx]);
507 	set_cipher_mode(&desc[idx], ctx->hw_mode);
508 	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
509 		     ctx->inter_digestsize, NS_BIT);
510 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
511 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
512 	idx++;
513 
514 	/* Restore hash current length */
515 	hw_desc_init(&desc[idx]);
516 	set_cipher_mode(&desc[idx], ctx->hw_mode);
517 	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
518 	set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
519 		     ctx->drvdata->hash_len_sz, NS_BIT);
520 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
521 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
522 	idx++;
523 
524 	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
525 
526 	return idx;
527 }
528 
529 static int cc_hash_update(struct ahash_request *req)
530 {
531 	struct ahash_req_ctx *state = ahash_request_ctx(req);
532 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
533 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
534 	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
535 	struct scatterlist *src = req->src;
536 	unsigned int nbytes = req->nbytes;
537 	struct device *dev = drvdata_to_dev(ctx->drvdata);
538 	struct cc_crypto_req cc_req = {};
539 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
540 	u32 idx = 0;
541 	int rc;
542 	gfp_t flags = cc_gfp_flags(&req->base);
543 
544 	dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
545 		"hmac" : "hash", nbytes);
546 
547 	if (nbytes == 0) {
548 		/* no real updates required */
549 		return 0;
550 	}
551 
552 	rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
553 					block_size, flags);
554 	if (rc) {
555 		if (rc == 1) {
556 			dev_dbg(dev, " data size not require HW update %x\n",
557 				nbytes);
558 			/* No hardware updates are required */
559 			return 0;
560 		}
561 		dev_err(dev, "map_ahash_request_update() failed\n");
562 		return -ENOMEM;
563 	}
564 
565 	if (cc_map_req(dev, state, ctx)) {
566 		dev_err(dev, "map_ahash_source() failed\n");
567 		cc_unmap_hash_request(dev, state, src, true);
568 		return -EINVAL;
569 	}
570 
571 	/* Setup request structure */
572 	cc_req.user_cb = cc_update_complete;
573 	cc_req.user_arg = req;
574 
575 	idx = cc_restore_hash(desc, ctx, state, idx);
576 
577 	/* store the hash digest result in context */
578 	hw_desc_init(&desc[idx]);
579 	set_cipher_mode(&desc[idx], ctx->hw_mode);
580 	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
581 		      ctx->inter_digestsize, NS_BIT, 0);
582 	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
583 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
584 	idx++;
585 
586 	/* store current hash length in context */
587 	hw_desc_init(&desc[idx]);
588 	set_cipher_mode(&desc[idx], ctx->hw_mode);
589 	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
590 		      ctx->drvdata->hash_len_sz, NS_BIT, 1);
591 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
592 	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
593 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
594 	idx++;
595 
596 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
597 	if (rc != -EINPROGRESS && rc != -EBUSY) {
598 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
599 		cc_unmap_hash_request(dev, state, src, true);
600 		cc_unmap_req(dev, state, ctx);
601 	}
602 	return rc;
603 }
604 
605 static int cc_do_finup(struct ahash_request *req, bool update)
606 {
607 	struct ahash_req_ctx *state = ahash_request_ctx(req);
608 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
609 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
610 	u32 digestsize = crypto_ahash_digestsize(tfm);
611 	struct scatterlist *src = req->src;
612 	unsigned int nbytes = req->nbytes;
613 	u8 *result = req->result;
614 	struct device *dev = drvdata_to_dev(ctx->drvdata);
615 	bool is_hmac = ctx->is_hmac;
616 	struct cc_crypto_req cc_req = {};
617 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
618 	unsigned int idx = 0;
619 	int rc;
620 	gfp_t flags = cc_gfp_flags(&req->base);
621 
622 	dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
623 		update ? "finup" : "final", nbytes);
624 
625 	if (cc_map_req(dev, state, ctx)) {
626 		dev_err(dev, "map_ahash_source() failed\n");
627 		return -EINVAL;
628 	}
629 
630 	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
631 				      flags)) {
632 		dev_err(dev, "map_ahash_request_final() failed\n");
633 		cc_unmap_req(dev, state, ctx);
634 		return -ENOMEM;
635 	}
636 	if (cc_map_result(dev, state, digestsize)) {
637 		dev_err(dev, "map_ahash_digest() failed\n");
638 		cc_unmap_hash_request(dev, state, src, true);
639 		cc_unmap_req(dev, state, ctx);
640 		return -ENOMEM;
641 	}
642 
643 	/* Setup request structure */
644 	cc_req.user_cb = cc_hash_complete;
645 	cc_req.user_arg = req;
646 
647 	idx = cc_restore_hash(desc, ctx, state, idx);
648 
649 	/* Pad the hash */
650 	hw_desc_init(&desc[idx]);
651 	set_cipher_do(&desc[idx], DO_PAD);
652 	set_cipher_mode(&desc[idx], ctx->hw_mode);
653 	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
654 		      ctx->drvdata->hash_len_sz, NS_BIT, 0);
655 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
656 	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
657 	idx++;
658 
659 	if (is_hmac)
660 		idx = cc_fin_hmac(desc, req, idx);
661 
662 	idx = cc_fin_result(desc, req, idx);
663 
664 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
665 	if (rc != -EINPROGRESS && rc != -EBUSY) {
666 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
667 		cc_unmap_hash_request(dev, state, src, true);
668 		cc_unmap_result(dev, state, digestsize, result);
669 		cc_unmap_req(dev, state, ctx);
670 	}
671 	return rc;
672 }
673 
674 static int cc_hash_finup(struct ahash_request *req)
675 {
676 	return cc_do_finup(req, true);
677 }
678 
679 
680 static int cc_hash_final(struct ahash_request *req)
681 {
682 	return cc_do_finup(req, false);
683 }
684 
685 static int cc_hash_init(struct ahash_request *req)
686 {
687 	struct ahash_req_ctx *state = ahash_request_ctx(req);
688 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
689 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
690 	struct device *dev = drvdata_to_dev(ctx->drvdata);
691 
692 	dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
693 
694 	cc_init_req(dev, state, ctx);
695 
696 	return 0;
697 }
698 
699 static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
700 			  unsigned int keylen)
701 {
702 	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
703 	struct cc_crypto_req cc_req = {};
704 	struct cc_hash_ctx *ctx = NULL;
705 	int blocksize = 0;
706 	int digestsize = 0;
707 	int i, idx = 0, rc = 0;
708 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
709 	cc_sram_addr_t larval_addr;
710 	struct device *dev;
711 
712 	ctx = crypto_ahash_ctx(ahash);
713 	dev = drvdata_to_dev(ctx->drvdata);
714 	dev_dbg(dev, "start keylen: %d", keylen);
715 
716 	blocksize = crypto_tfm_alg_blocksize(&ahash->base);
717 	digestsize = crypto_ahash_digestsize(ahash);
718 
719 	larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
720 
721 	/* The keylen value distinguishes HASH in case keylen is ZERO bytes,
722 	 * any NON-ZERO value utilizes HMAC flow
723 	 */
724 	ctx->key_params.keylen = keylen;
725 	ctx->key_params.key_dma_addr = 0;
726 	ctx->is_hmac = true;
727 
728 	if (keylen) {
729 		ctx->key_params.key_dma_addr =
730 			dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
731 		if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
732 			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
733 				key, keylen);
734 			return -ENOMEM;
735 		}
736 		dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
737 			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
738 
739 		if (keylen > blocksize) {
740 			/* Load hash initial state */
741 			hw_desc_init(&desc[idx]);
742 			set_cipher_mode(&desc[idx], ctx->hw_mode);
743 			set_din_sram(&desc[idx], larval_addr,
744 				     ctx->inter_digestsize);
745 			set_flow_mode(&desc[idx], S_DIN_to_HASH);
746 			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
747 			idx++;
748 
749 			/* Load the hash current length*/
750 			hw_desc_init(&desc[idx]);
751 			set_cipher_mode(&desc[idx], ctx->hw_mode);
752 			set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
753 			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
754 			set_flow_mode(&desc[idx], S_DIN_to_HASH);
755 			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
756 			idx++;
757 
758 			hw_desc_init(&desc[idx]);
759 			set_din_type(&desc[idx], DMA_DLLI,
760 				     ctx->key_params.key_dma_addr, keylen,
761 				     NS_BIT);
762 			set_flow_mode(&desc[idx], DIN_HASH);
763 			idx++;
764 
765 			/* Get hashed key */
766 			hw_desc_init(&desc[idx]);
767 			set_cipher_mode(&desc[idx], ctx->hw_mode);
768 			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
769 				      digestsize, NS_BIT, 0);
770 			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
771 			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
772 			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
773 			cc_set_endianity(ctx->hash_mode, &desc[idx]);
774 			idx++;
775 
776 			hw_desc_init(&desc[idx]);
777 			set_din_const(&desc[idx], 0, (blocksize - digestsize));
778 			set_flow_mode(&desc[idx], BYPASS);
779 			set_dout_dlli(&desc[idx],
780 				      (ctx->opad_tmp_keys_dma_addr +
781 				       digestsize),
782 				      (blocksize - digestsize), NS_BIT, 0);
783 			idx++;
784 		} else {
785 			hw_desc_init(&desc[idx]);
786 			set_din_type(&desc[idx], DMA_DLLI,
787 				     ctx->key_params.key_dma_addr, keylen,
788 				     NS_BIT);
789 			set_flow_mode(&desc[idx], BYPASS);
790 			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
791 				      keylen, NS_BIT, 0);
792 			idx++;
793 
794 			if ((blocksize - keylen)) {
795 				hw_desc_init(&desc[idx]);
796 				set_din_const(&desc[idx], 0,
797 					      (blocksize - keylen));
798 				set_flow_mode(&desc[idx], BYPASS);
799 				set_dout_dlli(&desc[idx],
800 					      (ctx->opad_tmp_keys_dma_addr +
801 					       keylen), (blocksize - keylen),
802 					      NS_BIT, 0);
803 				idx++;
804 			}
805 		}
806 	} else {
807 		hw_desc_init(&desc[idx]);
808 		set_din_const(&desc[idx], 0, blocksize);
809 		set_flow_mode(&desc[idx], BYPASS);
810 		set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
811 			      blocksize, NS_BIT, 0);
812 		idx++;
813 	}
814 
815 	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
816 	if (rc) {
817 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
818 		goto out;
819 	}
820 
821 	/* calc derived HMAC key */
822 	for (idx = 0, i = 0; i < 2; i++) {
823 		/* Load hash initial state */
824 		hw_desc_init(&desc[idx]);
825 		set_cipher_mode(&desc[idx], ctx->hw_mode);
826 		set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
827 		set_flow_mode(&desc[idx], S_DIN_to_HASH);
828 		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
829 		idx++;
830 
831 		/* Load the hash current length*/
832 		hw_desc_init(&desc[idx]);
833 		set_cipher_mode(&desc[idx], ctx->hw_mode);
834 		set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
835 		set_flow_mode(&desc[idx], S_DIN_to_HASH);
836 		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
837 		idx++;
838 
839 		/* Prepare ipad key */
840 		hw_desc_init(&desc[idx]);
841 		set_xor_val(&desc[idx], hmac_pad_const[i]);
842 		set_cipher_mode(&desc[idx], ctx->hw_mode);
843 		set_flow_mode(&desc[idx], S_DIN_to_HASH);
844 		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
845 		idx++;
846 
847 		/* Perform HASH update */
848 		hw_desc_init(&desc[idx]);
849 		set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
850 			     blocksize, NS_BIT);
851 		set_cipher_mode(&desc[idx], ctx->hw_mode);
852 		set_xor_active(&desc[idx]);
853 		set_flow_mode(&desc[idx], DIN_HASH);
854 		idx++;
855 
856 		/* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
857 		 * of the first HASH "update" state)
858 		 */
859 		hw_desc_init(&desc[idx]);
860 		set_cipher_mode(&desc[idx], ctx->hw_mode);
861 		if (i > 0) /* Not first iteration */
862 			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
863 				      ctx->inter_digestsize, NS_BIT, 0);
864 		else /* First iteration */
865 			set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
866 				      ctx->inter_digestsize, NS_BIT, 0);
867 		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
868 		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
869 		idx++;
870 	}
871 
872 	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
873 
874 out:
875 	if (rc)
876 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
877 
878 	if (ctx->key_params.key_dma_addr) {
879 		dma_unmap_single(dev, ctx->key_params.key_dma_addr,
880 				 ctx->key_params.keylen, DMA_TO_DEVICE);
881 		dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
882 			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
883 	}
884 	return rc;
885 }
886 
887 static int cc_xcbc_setkey(struct crypto_ahash *ahash,
888 			  const u8 *key, unsigned int keylen)
889 {
890 	struct cc_crypto_req cc_req = {};
891 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
892 	struct device *dev = drvdata_to_dev(ctx->drvdata);
893 	int rc = 0;
894 	unsigned int idx = 0;
895 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
896 
897 	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
898 
899 	switch (keylen) {
900 	case AES_KEYSIZE_128:
901 	case AES_KEYSIZE_192:
902 	case AES_KEYSIZE_256:
903 		break;
904 	default:
905 		return -EINVAL;
906 	}
907 
908 	ctx->key_params.keylen = keylen;
909 
910 	ctx->key_params.key_dma_addr =
911 		dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
912 	if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
913 		dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
914 			key, keylen);
915 		return -ENOMEM;
916 	}
917 	dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
918 		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
919 
920 	ctx->is_hmac = true;
921 	/* 1. Load the AES key */
922 	hw_desc_init(&desc[idx]);
923 	set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
924 		     keylen, NS_BIT);
925 	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
926 	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
927 	set_key_size_aes(&desc[idx], keylen);
928 	set_flow_mode(&desc[idx], S_DIN_to_AES);
929 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
930 	idx++;
931 
932 	hw_desc_init(&desc[idx]);
933 	set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
934 	set_flow_mode(&desc[idx], DIN_AES_DOUT);
935 	set_dout_dlli(&desc[idx],
936 		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
937 		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
938 	idx++;
939 
940 	hw_desc_init(&desc[idx]);
941 	set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
942 	set_flow_mode(&desc[idx], DIN_AES_DOUT);
943 	set_dout_dlli(&desc[idx],
944 		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
945 		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
946 	idx++;
947 
948 	hw_desc_init(&desc[idx]);
949 	set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
950 	set_flow_mode(&desc[idx], DIN_AES_DOUT);
951 	set_dout_dlli(&desc[idx],
952 		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
953 		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
954 	idx++;
955 
956 	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
957 
958 	if (rc)
959 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
960 
961 	dma_unmap_single(dev, ctx->key_params.key_dma_addr,
962 			 ctx->key_params.keylen, DMA_TO_DEVICE);
963 	dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
964 		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
965 
966 	return rc;
967 }
968 
969 static int cc_cmac_setkey(struct crypto_ahash *ahash,
970 			  const u8 *key, unsigned int keylen)
971 {
972 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
973 	struct device *dev = drvdata_to_dev(ctx->drvdata);
974 
975 	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
976 
977 	ctx->is_hmac = true;
978 
979 	switch (keylen) {
980 	case AES_KEYSIZE_128:
981 	case AES_KEYSIZE_192:
982 	case AES_KEYSIZE_256:
983 		break;
984 	default:
985 		return -EINVAL;
986 	}
987 
988 	ctx->key_params.keylen = keylen;
989 
990 	/* STAT_PHASE_1: Copy key to ctx */
991 
992 	dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
993 				keylen, DMA_TO_DEVICE);
994 
995 	memcpy(ctx->opad_tmp_keys_buff, key, keylen);
996 	if (keylen == 24) {
997 		memset(ctx->opad_tmp_keys_buff + 24, 0,
998 		       CC_AES_KEY_SIZE_MAX - 24);
999 	}
1000 
1001 	dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1002 				   keylen, DMA_TO_DEVICE);
1003 
1004 	ctx->key_params.keylen = keylen;
1005 
1006 	return 0;
1007 }
1008 
1009 static void cc_free_ctx(struct cc_hash_ctx *ctx)
1010 {
1011 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1012 
1013 	if (ctx->digest_buff_dma_addr) {
1014 		dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1015 				 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1016 		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1017 			&ctx->digest_buff_dma_addr);
1018 		ctx->digest_buff_dma_addr = 0;
1019 	}
1020 	if (ctx->opad_tmp_keys_dma_addr) {
1021 		dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1022 				 sizeof(ctx->opad_tmp_keys_buff),
1023 				 DMA_BIDIRECTIONAL);
1024 		dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1025 			&ctx->opad_tmp_keys_dma_addr);
1026 		ctx->opad_tmp_keys_dma_addr = 0;
1027 	}
1028 
1029 	ctx->key_params.keylen = 0;
1030 }
1031 
1032 static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1033 {
1034 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1035 
1036 	ctx->key_params.keylen = 0;
1037 
1038 	ctx->digest_buff_dma_addr =
1039 		dma_map_single(dev, (void *)ctx->digest_buff,
1040 			       sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1041 	if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1042 		dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1043 			sizeof(ctx->digest_buff), ctx->digest_buff);
1044 		goto fail;
1045 	}
1046 	dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1047 		sizeof(ctx->digest_buff), ctx->digest_buff,
1048 		&ctx->digest_buff_dma_addr);
1049 
1050 	ctx->opad_tmp_keys_dma_addr =
1051 		dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
1052 			       sizeof(ctx->opad_tmp_keys_buff),
1053 			       DMA_BIDIRECTIONAL);
1054 	if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1055 		dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1056 			sizeof(ctx->opad_tmp_keys_buff),
1057 			ctx->opad_tmp_keys_buff);
1058 		goto fail;
1059 	}
1060 	dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1061 		sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1062 		&ctx->opad_tmp_keys_dma_addr);
1063 
1064 	ctx->is_hmac = false;
1065 	return 0;
1066 
1067 fail:
1068 	cc_free_ctx(ctx);
1069 	return -ENOMEM;
1070 }
1071 
1072 static int cc_cra_init(struct crypto_tfm *tfm)
1073 {
1074 	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1075 	struct hash_alg_common *hash_alg_common =
1076 		container_of(tfm->__crt_alg, struct hash_alg_common, base);
1077 	struct ahash_alg *ahash_alg =
1078 		container_of(hash_alg_common, struct ahash_alg, halg);
1079 	struct cc_hash_alg *cc_alg =
1080 			container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1081 
1082 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1083 				 sizeof(struct ahash_req_ctx));
1084 
1085 	ctx->hash_mode = cc_alg->hash_mode;
1086 	ctx->hw_mode = cc_alg->hw_mode;
1087 	ctx->inter_digestsize = cc_alg->inter_digestsize;
1088 	ctx->drvdata = cc_alg->drvdata;
1089 
1090 	return cc_alloc_ctx(ctx);
1091 }
1092 
1093 static void cc_cra_exit(struct crypto_tfm *tfm)
1094 {
1095 	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1096 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1097 
1098 	dev_dbg(dev, "cc_cra_exit");
1099 	cc_free_ctx(ctx);
1100 }
1101 
1102 static int cc_mac_update(struct ahash_request *req)
1103 {
1104 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1105 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1106 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1107 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1108 	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1109 	struct cc_crypto_req cc_req = {};
1110 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1111 	int rc;
1112 	u32 idx = 0;
1113 	gfp_t flags = cc_gfp_flags(&req->base);
1114 
1115 	if (req->nbytes == 0) {
1116 		/* no real updates required */
1117 		return 0;
1118 	}
1119 
1120 	state->xcbc_count++;
1121 
1122 	rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1123 					req->nbytes, block_size, flags);
1124 	if (rc) {
1125 		if (rc == 1) {
1126 			dev_dbg(dev, " data size not require HW update %x\n",
1127 				req->nbytes);
1128 			/* No hardware updates are required */
1129 			return 0;
1130 		}
1131 		dev_err(dev, "map_ahash_request_update() failed\n");
1132 		return -ENOMEM;
1133 	}
1134 
1135 	if (cc_map_req(dev, state, ctx)) {
1136 		dev_err(dev, "map_ahash_source() failed\n");
1137 		return -EINVAL;
1138 	}
1139 
1140 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1141 		cc_setup_xcbc(req, desc, &idx);
1142 	else
1143 		cc_setup_cmac(req, desc, &idx);
1144 
1145 	cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1146 
1147 	/* store the hash digest result in context */
1148 	hw_desc_init(&desc[idx]);
1149 	set_cipher_mode(&desc[idx], ctx->hw_mode);
1150 	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1151 		      ctx->inter_digestsize, NS_BIT, 1);
1152 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1153 	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1154 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1155 	idx++;
1156 
1157 	/* Setup request structure */
1158 	cc_req.user_cb = (void *)cc_update_complete;
1159 	cc_req.user_arg = (void *)req;
1160 
1161 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1162 	if (rc != -EINPROGRESS && rc != -EBUSY) {
1163 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1164 		cc_unmap_hash_request(dev, state, req->src, true);
1165 		cc_unmap_req(dev, state, ctx);
1166 	}
1167 	return rc;
1168 }
1169 
1170 static int cc_mac_final(struct ahash_request *req)
1171 {
1172 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1173 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1174 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1175 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1176 	struct cc_crypto_req cc_req = {};
1177 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1178 	int idx = 0;
1179 	int rc = 0;
1180 	u32 key_size, key_len;
1181 	u32 digestsize = crypto_ahash_digestsize(tfm);
1182 	gfp_t flags = cc_gfp_flags(&req->base);
1183 	u32 rem_cnt = *cc_hash_buf_cnt(state);
1184 
1185 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1186 		key_size = CC_AES_128_BIT_KEY_SIZE;
1187 		key_len  = CC_AES_128_BIT_KEY_SIZE;
1188 	} else {
1189 		key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1190 			ctx->key_params.keylen;
1191 		key_len =  ctx->key_params.keylen;
1192 	}
1193 
1194 	dev_dbg(dev, "===== final  xcbc reminder (%d) ====\n", rem_cnt);
1195 
1196 	if (cc_map_req(dev, state, ctx)) {
1197 		dev_err(dev, "map_ahash_source() failed\n");
1198 		return -EINVAL;
1199 	}
1200 
1201 	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1202 				      req->nbytes, 0, flags)) {
1203 		dev_err(dev, "map_ahash_request_final() failed\n");
1204 		cc_unmap_req(dev, state, ctx);
1205 		return -ENOMEM;
1206 	}
1207 
1208 	if (cc_map_result(dev, state, digestsize)) {
1209 		dev_err(dev, "map_ahash_digest() failed\n");
1210 		cc_unmap_hash_request(dev, state, req->src, true);
1211 		cc_unmap_req(dev, state, ctx);
1212 		return -ENOMEM;
1213 	}
1214 
1215 	/* Setup request structure */
1216 	cc_req.user_cb = (void *)cc_hash_complete;
1217 	cc_req.user_arg = (void *)req;
1218 
1219 	if (state->xcbc_count && rem_cnt == 0) {
1220 		/* Load key for ECB decryption */
1221 		hw_desc_init(&desc[idx]);
1222 		set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1223 		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1224 		set_din_type(&desc[idx], DMA_DLLI,
1225 			     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1226 			     key_size, NS_BIT);
1227 		set_key_size_aes(&desc[idx], key_len);
1228 		set_flow_mode(&desc[idx], S_DIN_to_AES);
1229 		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1230 		idx++;
1231 
1232 		/* Initiate decryption of block state to previous
1233 		 * block_state-XOR-M[n]
1234 		 */
1235 		hw_desc_init(&desc[idx]);
1236 		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1237 			     CC_AES_BLOCK_SIZE, NS_BIT);
1238 		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1239 			      CC_AES_BLOCK_SIZE, NS_BIT, 0);
1240 		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1241 		idx++;
1242 
1243 		/* Memory Barrier: wait for axi write to complete */
1244 		hw_desc_init(&desc[idx]);
1245 		set_din_no_dma(&desc[idx], 0, 0xfffff0);
1246 		set_dout_no_dma(&desc[idx], 0, 0, 1);
1247 		idx++;
1248 	}
1249 
1250 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1251 		cc_setup_xcbc(req, desc, &idx);
1252 	else
1253 		cc_setup_cmac(req, desc, &idx);
1254 
1255 	if (state->xcbc_count == 0) {
1256 		hw_desc_init(&desc[idx]);
1257 		set_cipher_mode(&desc[idx], ctx->hw_mode);
1258 		set_key_size_aes(&desc[idx], key_len);
1259 		set_cmac_size0_mode(&desc[idx]);
1260 		set_flow_mode(&desc[idx], S_DIN_to_AES);
1261 		idx++;
1262 	} else if (rem_cnt > 0) {
1263 		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1264 	} else {
1265 		hw_desc_init(&desc[idx]);
1266 		set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1267 		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1268 		idx++;
1269 	}
1270 
1271 	/* Get final MAC result */
1272 	hw_desc_init(&desc[idx]);
1273 	/* TODO */
1274 	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1275 		      digestsize, NS_BIT, 1);
1276 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1277 	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1278 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1279 	set_cipher_mode(&desc[idx], ctx->hw_mode);
1280 	idx++;
1281 
1282 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1283 	if (rc != -EINPROGRESS && rc != -EBUSY) {
1284 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1285 		cc_unmap_hash_request(dev, state, req->src, true);
1286 		cc_unmap_result(dev, state, digestsize, req->result);
1287 		cc_unmap_req(dev, state, ctx);
1288 	}
1289 	return rc;
1290 }
1291 
1292 static int cc_mac_finup(struct ahash_request *req)
1293 {
1294 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1295 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1296 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1297 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1298 	struct cc_crypto_req cc_req = {};
1299 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1300 	int idx = 0;
1301 	int rc = 0;
1302 	u32 key_len = 0;
1303 	u32 digestsize = crypto_ahash_digestsize(tfm);
1304 	gfp_t flags = cc_gfp_flags(&req->base);
1305 
1306 	dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1307 	if (state->xcbc_count > 0 && req->nbytes == 0) {
1308 		dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1309 		return cc_mac_final(req);
1310 	}
1311 
1312 	if (cc_map_req(dev, state, ctx)) {
1313 		dev_err(dev, "map_ahash_source() failed\n");
1314 		return -EINVAL;
1315 	}
1316 
1317 	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1318 				      req->nbytes, 1, flags)) {
1319 		dev_err(dev, "map_ahash_request_final() failed\n");
1320 		cc_unmap_req(dev, state, ctx);
1321 		return -ENOMEM;
1322 	}
1323 	if (cc_map_result(dev, state, digestsize)) {
1324 		dev_err(dev, "map_ahash_digest() failed\n");
1325 		cc_unmap_hash_request(dev, state, req->src, true);
1326 		cc_unmap_req(dev, state, ctx);
1327 		return -ENOMEM;
1328 	}
1329 
1330 	/* Setup request structure */
1331 	cc_req.user_cb = (void *)cc_hash_complete;
1332 	cc_req.user_arg = (void *)req;
1333 
1334 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1335 		key_len = CC_AES_128_BIT_KEY_SIZE;
1336 		cc_setup_xcbc(req, desc, &idx);
1337 	} else {
1338 		key_len = ctx->key_params.keylen;
1339 		cc_setup_cmac(req, desc, &idx);
1340 	}
1341 
1342 	if (req->nbytes == 0) {
1343 		hw_desc_init(&desc[idx]);
1344 		set_cipher_mode(&desc[idx], ctx->hw_mode);
1345 		set_key_size_aes(&desc[idx], key_len);
1346 		set_cmac_size0_mode(&desc[idx]);
1347 		set_flow_mode(&desc[idx], S_DIN_to_AES);
1348 		idx++;
1349 	} else {
1350 		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1351 	}
1352 
1353 	/* Get final MAC result */
1354 	hw_desc_init(&desc[idx]);
1355 	/* TODO */
1356 	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1357 		      digestsize, NS_BIT, 1);
1358 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1359 	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1360 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1361 	set_cipher_mode(&desc[idx], ctx->hw_mode);
1362 	idx++;
1363 
1364 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1365 	if (rc != -EINPROGRESS && rc != -EBUSY) {
1366 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1367 		cc_unmap_hash_request(dev, state, req->src, true);
1368 		cc_unmap_result(dev, state, digestsize, req->result);
1369 		cc_unmap_req(dev, state, ctx);
1370 	}
1371 	return rc;
1372 }
1373 
1374 static int cc_mac_digest(struct ahash_request *req)
1375 {
1376 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1377 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1378 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1379 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1380 	u32 digestsize = crypto_ahash_digestsize(tfm);
1381 	struct cc_crypto_req cc_req = {};
1382 	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1383 	u32 key_len;
1384 	unsigned int idx = 0;
1385 	int rc;
1386 	gfp_t flags = cc_gfp_flags(&req->base);
1387 
1388 	dev_dbg(dev, "===== -digest mac (%d) ====\n",  req->nbytes);
1389 
1390 	cc_init_req(dev, state, ctx);
1391 
1392 	if (cc_map_req(dev, state, ctx)) {
1393 		dev_err(dev, "map_ahash_source() failed\n");
1394 		return -ENOMEM;
1395 	}
1396 	if (cc_map_result(dev, state, digestsize)) {
1397 		dev_err(dev, "map_ahash_digest() failed\n");
1398 		cc_unmap_req(dev, state, ctx);
1399 		return -ENOMEM;
1400 	}
1401 
1402 	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1403 				      req->nbytes, 1, flags)) {
1404 		dev_err(dev, "map_ahash_request_final() failed\n");
1405 		cc_unmap_req(dev, state, ctx);
1406 		return -ENOMEM;
1407 	}
1408 
1409 	/* Setup request structure */
1410 	cc_req.user_cb = (void *)cc_digest_complete;
1411 	cc_req.user_arg = (void *)req;
1412 
1413 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1414 		key_len = CC_AES_128_BIT_KEY_SIZE;
1415 		cc_setup_xcbc(req, desc, &idx);
1416 	} else {
1417 		key_len = ctx->key_params.keylen;
1418 		cc_setup_cmac(req, desc, &idx);
1419 	}
1420 
1421 	if (req->nbytes == 0) {
1422 		hw_desc_init(&desc[idx]);
1423 		set_cipher_mode(&desc[idx], ctx->hw_mode);
1424 		set_key_size_aes(&desc[idx], key_len);
1425 		set_cmac_size0_mode(&desc[idx]);
1426 		set_flow_mode(&desc[idx], S_DIN_to_AES);
1427 		idx++;
1428 	} else {
1429 		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1430 	}
1431 
1432 	/* Get final MAC result */
1433 	hw_desc_init(&desc[idx]);
1434 	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1435 		      CC_AES_BLOCK_SIZE, NS_BIT, 1);
1436 	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1437 	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1438 	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1439 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1440 	set_cipher_mode(&desc[idx], ctx->hw_mode);
1441 	idx++;
1442 
1443 	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1444 	if (rc != -EINPROGRESS && rc != -EBUSY) {
1445 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1446 		cc_unmap_hash_request(dev, state, req->src, true);
1447 		cc_unmap_result(dev, state, digestsize, req->result);
1448 		cc_unmap_req(dev, state, ctx);
1449 	}
1450 	return rc;
1451 }
1452 
1453 static int cc_hash_export(struct ahash_request *req, void *out)
1454 {
1455 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1456 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1457 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1458 	u8 *curr_buff = cc_hash_buf(state);
1459 	u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1460 	const u32 tmp = CC_EXPORT_MAGIC;
1461 
1462 	memcpy(out, &tmp, sizeof(u32));
1463 	out += sizeof(u32);
1464 
1465 	memcpy(out, state->digest_buff, ctx->inter_digestsize);
1466 	out += ctx->inter_digestsize;
1467 
1468 	memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz);
1469 	out += ctx->drvdata->hash_len_sz;
1470 
1471 	memcpy(out, &curr_buff_cnt, sizeof(u32));
1472 	out += sizeof(u32);
1473 
1474 	memcpy(out, curr_buff, curr_buff_cnt);
1475 
1476 	return 0;
1477 }
1478 
1479 static int cc_hash_import(struct ahash_request *req, const void *in)
1480 {
1481 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1482 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1483 	struct device *dev = drvdata_to_dev(ctx->drvdata);
1484 	struct ahash_req_ctx *state = ahash_request_ctx(req);
1485 	u32 tmp;
1486 
1487 	memcpy(&tmp, in, sizeof(u32));
1488 	if (tmp != CC_EXPORT_MAGIC)
1489 		return -EINVAL;
1490 	in += sizeof(u32);
1491 
1492 	cc_init_req(dev, state, ctx);
1493 
1494 	memcpy(state->digest_buff, in, ctx->inter_digestsize);
1495 	in += ctx->inter_digestsize;
1496 
1497 	memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz);
1498 	in += ctx->drvdata->hash_len_sz;
1499 
1500 	/* Sanity check the data as much as possible */
1501 	memcpy(&tmp, in, sizeof(u32));
1502 	if (tmp > CC_MAX_HASH_BLCK_SIZE)
1503 		return -EINVAL;
1504 	in += sizeof(u32);
1505 
1506 	state->buf_cnt[0] = tmp;
1507 	memcpy(state->buffers[0], in, tmp);
1508 
1509 	return 0;
1510 }
1511 
1512 struct cc_hash_template {
1513 	char name[CRYPTO_MAX_ALG_NAME];
1514 	char driver_name[CRYPTO_MAX_ALG_NAME];
1515 	char mac_name[CRYPTO_MAX_ALG_NAME];
1516 	char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1517 	unsigned int blocksize;
1518 	bool synchronize;
1519 	struct ahash_alg template_ahash;
1520 	int hash_mode;
1521 	int hw_mode;
1522 	int inter_digestsize;
1523 	struct cc_drvdata *drvdata;
1524 	u32 min_hw_rev;
1525 };
1526 
1527 #define CC_STATE_SIZE(_x) \
1528 	((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1529 
1530 /* hash descriptors */
1531 static struct cc_hash_template driver_hash[] = {
1532 	//Asynchronize hash template
1533 	{
1534 		.name = "sha1",
1535 		.driver_name = "sha1-ccree",
1536 		.mac_name = "hmac(sha1)",
1537 		.mac_driver_name = "hmac-sha1-ccree",
1538 		.blocksize = SHA1_BLOCK_SIZE,
1539 		.synchronize = false,
1540 		.template_ahash = {
1541 			.init = cc_hash_init,
1542 			.update = cc_hash_update,
1543 			.final = cc_hash_final,
1544 			.finup = cc_hash_finup,
1545 			.digest = cc_hash_digest,
1546 			.export = cc_hash_export,
1547 			.import = cc_hash_import,
1548 			.setkey = cc_hash_setkey,
1549 			.halg = {
1550 				.digestsize = SHA1_DIGEST_SIZE,
1551 				.statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1552 			},
1553 		},
1554 		.hash_mode = DRV_HASH_SHA1,
1555 		.hw_mode = DRV_HASH_HW_SHA1,
1556 		.inter_digestsize = SHA1_DIGEST_SIZE,
1557 		.min_hw_rev = CC_HW_REV_630,
1558 	},
1559 	{
1560 		.name = "sha256",
1561 		.driver_name = "sha256-ccree",
1562 		.mac_name = "hmac(sha256)",
1563 		.mac_driver_name = "hmac-sha256-ccree",
1564 		.blocksize = SHA256_BLOCK_SIZE,
1565 		.template_ahash = {
1566 			.init = cc_hash_init,
1567 			.update = cc_hash_update,
1568 			.final = cc_hash_final,
1569 			.finup = cc_hash_finup,
1570 			.digest = cc_hash_digest,
1571 			.export = cc_hash_export,
1572 			.import = cc_hash_import,
1573 			.setkey = cc_hash_setkey,
1574 			.halg = {
1575 				.digestsize = SHA256_DIGEST_SIZE,
1576 				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1577 			},
1578 		},
1579 		.hash_mode = DRV_HASH_SHA256,
1580 		.hw_mode = DRV_HASH_HW_SHA256,
1581 		.inter_digestsize = SHA256_DIGEST_SIZE,
1582 		.min_hw_rev = CC_HW_REV_630,
1583 	},
1584 	{
1585 		.name = "sha224",
1586 		.driver_name = "sha224-ccree",
1587 		.mac_name = "hmac(sha224)",
1588 		.mac_driver_name = "hmac-sha224-ccree",
1589 		.blocksize = SHA224_BLOCK_SIZE,
1590 		.template_ahash = {
1591 			.init = cc_hash_init,
1592 			.update = cc_hash_update,
1593 			.final = cc_hash_final,
1594 			.finup = cc_hash_finup,
1595 			.digest = cc_hash_digest,
1596 			.export = cc_hash_export,
1597 			.import = cc_hash_import,
1598 			.setkey = cc_hash_setkey,
1599 			.halg = {
1600 				.digestsize = SHA224_DIGEST_SIZE,
1601 				.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1602 			},
1603 		},
1604 		.hash_mode = DRV_HASH_SHA224,
1605 		.hw_mode = DRV_HASH_HW_SHA256,
1606 		.inter_digestsize = SHA256_DIGEST_SIZE,
1607 		.min_hw_rev = CC_HW_REV_630,
1608 	},
1609 	{
1610 		.name = "sha384",
1611 		.driver_name = "sha384-ccree",
1612 		.mac_name = "hmac(sha384)",
1613 		.mac_driver_name = "hmac-sha384-ccree",
1614 		.blocksize = SHA384_BLOCK_SIZE,
1615 		.template_ahash = {
1616 			.init = cc_hash_init,
1617 			.update = cc_hash_update,
1618 			.final = cc_hash_final,
1619 			.finup = cc_hash_finup,
1620 			.digest = cc_hash_digest,
1621 			.export = cc_hash_export,
1622 			.import = cc_hash_import,
1623 			.setkey = cc_hash_setkey,
1624 			.halg = {
1625 				.digestsize = SHA384_DIGEST_SIZE,
1626 				.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1627 			},
1628 		},
1629 		.hash_mode = DRV_HASH_SHA384,
1630 		.hw_mode = DRV_HASH_HW_SHA512,
1631 		.inter_digestsize = SHA512_DIGEST_SIZE,
1632 		.min_hw_rev = CC_HW_REV_712,
1633 	},
1634 	{
1635 		.name = "sha512",
1636 		.driver_name = "sha512-ccree",
1637 		.mac_name = "hmac(sha512)",
1638 		.mac_driver_name = "hmac-sha512-ccree",
1639 		.blocksize = SHA512_BLOCK_SIZE,
1640 		.template_ahash = {
1641 			.init = cc_hash_init,
1642 			.update = cc_hash_update,
1643 			.final = cc_hash_final,
1644 			.finup = cc_hash_finup,
1645 			.digest = cc_hash_digest,
1646 			.export = cc_hash_export,
1647 			.import = cc_hash_import,
1648 			.setkey = cc_hash_setkey,
1649 			.halg = {
1650 				.digestsize = SHA512_DIGEST_SIZE,
1651 				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1652 			},
1653 		},
1654 		.hash_mode = DRV_HASH_SHA512,
1655 		.hw_mode = DRV_HASH_HW_SHA512,
1656 		.inter_digestsize = SHA512_DIGEST_SIZE,
1657 		.min_hw_rev = CC_HW_REV_712,
1658 	},
1659 	{
1660 		.name = "md5",
1661 		.driver_name = "md5-ccree",
1662 		.mac_name = "hmac(md5)",
1663 		.mac_driver_name = "hmac-md5-ccree",
1664 		.blocksize = MD5_HMAC_BLOCK_SIZE,
1665 		.template_ahash = {
1666 			.init = cc_hash_init,
1667 			.update = cc_hash_update,
1668 			.final = cc_hash_final,
1669 			.finup = cc_hash_finup,
1670 			.digest = cc_hash_digest,
1671 			.export = cc_hash_export,
1672 			.import = cc_hash_import,
1673 			.setkey = cc_hash_setkey,
1674 			.halg = {
1675 				.digestsize = MD5_DIGEST_SIZE,
1676 				.statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1677 			},
1678 		},
1679 		.hash_mode = DRV_HASH_MD5,
1680 		.hw_mode = DRV_HASH_HW_MD5,
1681 		.inter_digestsize = MD5_DIGEST_SIZE,
1682 		.min_hw_rev = CC_HW_REV_630,
1683 	},
1684 	{
1685 		.mac_name = "xcbc(aes)",
1686 		.mac_driver_name = "xcbc-aes-ccree",
1687 		.blocksize = AES_BLOCK_SIZE,
1688 		.template_ahash = {
1689 			.init = cc_hash_init,
1690 			.update = cc_mac_update,
1691 			.final = cc_mac_final,
1692 			.finup = cc_mac_finup,
1693 			.digest = cc_mac_digest,
1694 			.setkey = cc_xcbc_setkey,
1695 			.export = cc_hash_export,
1696 			.import = cc_hash_import,
1697 			.halg = {
1698 				.digestsize = AES_BLOCK_SIZE,
1699 				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1700 			},
1701 		},
1702 		.hash_mode = DRV_HASH_NULL,
1703 		.hw_mode = DRV_CIPHER_XCBC_MAC,
1704 		.inter_digestsize = AES_BLOCK_SIZE,
1705 		.min_hw_rev = CC_HW_REV_630,
1706 	},
1707 	{
1708 		.mac_name = "cmac(aes)",
1709 		.mac_driver_name = "cmac-aes-ccree",
1710 		.blocksize = AES_BLOCK_SIZE,
1711 		.template_ahash = {
1712 			.init = cc_hash_init,
1713 			.update = cc_mac_update,
1714 			.final = cc_mac_final,
1715 			.finup = cc_mac_finup,
1716 			.digest = cc_mac_digest,
1717 			.setkey = cc_cmac_setkey,
1718 			.export = cc_hash_export,
1719 			.import = cc_hash_import,
1720 			.halg = {
1721 				.digestsize = AES_BLOCK_SIZE,
1722 				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1723 			},
1724 		},
1725 		.hash_mode = DRV_HASH_NULL,
1726 		.hw_mode = DRV_CIPHER_CMAC,
1727 		.inter_digestsize = AES_BLOCK_SIZE,
1728 		.min_hw_rev = CC_HW_REV_630,
1729 	},
1730 };
1731 
1732 static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1733 					     struct device *dev, bool keyed)
1734 {
1735 	struct cc_hash_alg *t_crypto_alg;
1736 	struct crypto_alg *alg;
1737 	struct ahash_alg *halg;
1738 
1739 	t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1740 	if (!t_crypto_alg)
1741 		return ERR_PTR(-ENOMEM);
1742 
1743 	t_crypto_alg->ahash_alg = template->template_ahash;
1744 	halg = &t_crypto_alg->ahash_alg;
1745 	alg = &halg->halg.base;
1746 
1747 	if (keyed) {
1748 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1749 			 template->mac_name);
1750 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1751 			 template->mac_driver_name);
1752 	} else {
1753 		halg->setkey = NULL;
1754 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1755 			 template->name);
1756 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1757 			 template->driver_name);
1758 	}
1759 	alg->cra_module = THIS_MODULE;
1760 	alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1761 	alg->cra_priority = CC_CRA_PRIO;
1762 	alg->cra_blocksize = template->blocksize;
1763 	alg->cra_alignmask = 0;
1764 	alg->cra_exit = cc_cra_exit;
1765 
1766 	alg->cra_init = cc_cra_init;
1767 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
1768 
1769 	t_crypto_alg->hash_mode = template->hash_mode;
1770 	t_crypto_alg->hw_mode = template->hw_mode;
1771 	t_crypto_alg->inter_digestsize = template->inter_digestsize;
1772 
1773 	return t_crypto_alg;
1774 }
1775 
1776 int cc_init_hash_sram(struct cc_drvdata *drvdata)
1777 {
1778 	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1779 	cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
1780 	unsigned int larval_seq_len = 0;
1781 	struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1782 	bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
1783 	int rc = 0;
1784 
1785 	/* Copy-to-sram digest-len */
1786 	cc_set_sram_desc(digest_len_init, sram_buff_ofs,
1787 			 ARRAY_SIZE(digest_len_init), larval_seq,
1788 			 &larval_seq_len);
1789 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1790 	if (rc)
1791 		goto init_digest_const_err;
1792 
1793 	sram_buff_ofs += sizeof(digest_len_init);
1794 	larval_seq_len = 0;
1795 
1796 	if (large_sha_supported) {
1797 		/* Copy-to-sram digest-len for sha384/512 */
1798 		cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
1799 				 ARRAY_SIZE(digest_len_sha512_init),
1800 				 larval_seq, &larval_seq_len);
1801 		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1802 		if (rc)
1803 			goto init_digest_const_err;
1804 
1805 		sram_buff_ofs += sizeof(digest_len_sha512_init);
1806 		larval_seq_len = 0;
1807 	}
1808 
1809 	/* The initial digests offset */
1810 	hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1811 
1812 	/* Copy-to-sram initial SHA* digests */
1813 	cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
1814 			 larval_seq, &larval_seq_len);
1815 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1816 	if (rc)
1817 		goto init_digest_const_err;
1818 	sram_buff_ofs += sizeof(md5_init);
1819 	larval_seq_len = 0;
1820 
1821 	cc_set_sram_desc(sha1_init, sram_buff_ofs,
1822 			 ARRAY_SIZE(sha1_init), larval_seq,
1823 			 &larval_seq_len);
1824 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1825 	if (rc)
1826 		goto init_digest_const_err;
1827 	sram_buff_ofs += sizeof(sha1_init);
1828 	larval_seq_len = 0;
1829 
1830 	cc_set_sram_desc(sha224_init, sram_buff_ofs,
1831 			 ARRAY_SIZE(sha224_init), larval_seq,
1832 			 &larval_seq_len);
1833 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1834 	if (rc)
1835 		goto init_digest_const_err;
1836 	sram_buff_ofs += sizeof(sha224_init);
1837 	larval_seq_len = 0;
1838 
1839 	cc_set_sram_desc(sha256_init, sram_buff_ofs,
1840 			 ARRAY_SIZE(sha256_init), larval_seq,
1841 			 &larval_seq_len);
1842 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1843 	if (rc)
1844 		goto init_digest_const_err;
1845 	sram_buff_ofs += sizeof(sha256_init);
1846 	larval_seq_len = 0;
1847 
1848 	if (large_sha_supported) {
1849 		cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
1850 				 (ARRAY_SIZE(sha384_init) * 2), larval_seq,
1851 				 &larval_seq_len);
1852 		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1853 		if (rc)
1854 			goto init_digest_const_err;
1855 		sram_buff_ofs += sizeof(sha384_init);
1856 		larval_seq_len = 0;
1857 
1858 		cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
1859 				 (ARRAY_SIZE(sha512_init) * 2), larval_seq,
1860 				 &larval_seq_len);
1861 		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1862 		if (rc)
1863 			goto init_digest_const_err;
1864 	}
1865 
1866 init_digest_const_err:
1867 	return rc;
1868 }
1869 
1870 static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1871 {
1872 	int i;
1873 	u32 tmp;
1874 
1875 	for (i = 0; i < size; i += 2) {
1876 		tmp = buf[i];
1877 		buf[i] = buf[i + 1];
1878 		buf[i + 1] = tmp;
1879 	}
1880 }
1881 
1882 /*
1883  * Due to the way the HW works we need to swap every
1884  * double word in the SHA384 and SHA512 larval hashes
1885  */
1886 void __init cc_hash_global_init(void)
1887 {
1888 	cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
1889 	cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
1890 }
1891 
1892 int cc_hash_alloc(struct cc_drvdata *drvdata)
1893 {
1894 	struct cc_hash_handle *hash_handle;
1895 	cc_sram_addr_t sram_buff;
1896 	u32 sram_size_to_alloc;
1897 	struct device *dev = drvdata_to_dev(drvdata);
1898 	int rc = 0;
1899 	int alg;
1900 
1901 	hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
1902 	if (!hash_handle)
1903 		return -ENOMEM;
1904 
1905 	INIT_LIST_HEAD(&hash_handle->hash_list);
1906 	drvdata->hash_handle = hash_handle;
1907 
1908 	sram_size_to_alloc = sizeof(digest_len_init) +
1909 			sizeof(md5_init) +
1910 			sizeof(sha1_init) +
1911 			sizeof(sha224_init) +
1912 			sizeof(sha256_init);
1913 
1914 	if (drvdata->hw_rev >= CC_HW_REV_712)
1915 		sram_size_to_alloc += sizeof(digest_len_sha512_init) +
1916 			sizeof(sha384_init) + sizeof(sha512_init);
1917 
1918 	sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1919 	if (sram_buff == NULL_SRAM_ADDR) {
1920 		dev_err(dev, "SRAM pool exhausted\n");
1921 		rc = -ENOMEM;
1922 		goto fail;
1923 	}
1924 
1925 	/* The initial digest-len offset */
1926 	hash_handle->digest_len_sram_addr = sram_buff;
1927 
1928 	/*must be set before the alg registration as it is being used there*/
1929 	rc = cc_init_hash_sram(drvdata);
1930 	if (rc) {
1931 		dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
1932 		goto fail;
1933 	}
1934 
1935 	/* ahash registration */
1936 	for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
1937 		struct cc_hash_alg *t_alg;
1938 		int hw_mode = driver_hash[alg].hw_mode;
1939 
1940 		/* We either support both HASH and MAC or none */
1941 		if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
1942 			continue;
1943 
1944 		/* register hmac version */
1945 		t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
1946 		if (IS_ERR(t_alg)) {
1947 			rc = PTR_ERR(t_alg);
1948 			dev_err(dev, "%s alg allocation failed\n",
1949 				driver_hash[alg].driver_name);
1950 			goto fail;
1951 		}
1952 		t_alg->drvdata = drvdata;
1953 
1954 		rc = crypto_register_ahash(&t_alg->ahash_alg);
1955 		if (rc) {
1956 			dev_err(dev, "%s alg registration failed\n",
1957 				driver_hash[alg].driver_name);
1958 			kfree(t_alg);
1959 			goto fail;
1960 		} else {
1961 			list_add_tail(&t_alg->entry, &hash_handle->hash_list);
1962 		}
1963 
1964 		if (hw_mode == DRV_CIPHER_XCBC_MAC ||
1965 		    hw_mode == DRV_CIPHER_CMAC)
1966 			continue;
1967 
1968 		/* register hash version */
1969 		t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
1970 		if (IS_ERR(t_alg)) {
1971 			rc = PTR_ERR(t_alg);
1972 			dev_err(dev, "%s alg allocation failed\n",
1973 				driver_hash[alg].driver_name);
1974 			goto fail;
1975 		}
1976 		t_alg->drvdata = drvdata;
1977 
1978 		rc = crypto_register_ahash(&t_alg->ahash_alg);
1979 		if (rc) {
1980 			dev_err(dev, "%s alg registration failed\n",
1981 				driver_hash[alg].driver_name);
1982 			kfree(t_alg);
1983 			goto fail;
1984 		} else {
1985 			list_add_tail(&t_alg->entry, &hash_handle->hash_list);
1986 		}
1987 	}
1988 
1989 	return 0;
1990 
1991 fail:
1992 	kfree(drvdata->hash_handle);
1993 	drvdata->hash_handle = NULL;
1994 	return rc;
1995 }
1996 
1997 int cc_hash_free(struct cc_drvdata *drvdata)
1998 {
1999 	struct cc_hash_alg *t_hash_alg, *hash_n;
2000 	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2001 
2002 	if (hash_handle) {
2003 		list_for_each_entry_safe(t_hash_alg, hash_n,
2004 					 &hash_handle->hash_list, entry) {
2005 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2006 			list_del(&t_hash_alg->entry);
2007 			kfree(t_hash_alg);
2008 		}
2009 
2010 		kfree(hash_handle);
2011 		drvdata->hash_handle = NULL;
2012 	}
2013 	return 0;
2014 }
2015 
2016 static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2017 			  unsigned int *seq_size)
2018 {
2019 	unsigned int idx = *seq_size;
2020 	struct ahash_req_ctx *state = ahash_request_ctx(areq);
2021 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2022 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2023 
2024 	/* Setup XCBC MAC K1 */
2025 	hw_desc_init(&desc[idx]);
2026 	set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2027 					    XCBC_MAC_K1_OFFSET),
2028 		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2029 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2030 	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2031 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2032 	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2033 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2034 	idx++;
2035 
2036 	/* Setup XCBC MAC K2 */
2037 	hw_desc_init(&desc[idx]);
2038 	set_din_type(&desc[idx], DMA_DLLI,
2039 		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2040 		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2041 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2042 	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2043 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2044 	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2045 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2046 	idx++;
2047 
2048 	/* Setup XCBC MAC K3 */
2049 	hw_desc_init(&desc[idx]);
2050 	set_din_type(&desc[idx], DMA_DLLI,
2051 		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2052 		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2053 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2054 	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2055 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2056 	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2057 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2058 	idx++;
2059 
2060 	/* Loading MAC state */
2061 	hw_desc_init(&desc[idx]);
2062 	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2063 		     CC_AES_BLOCK_SIZE, NS_BIT);
2064 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2065 	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2066 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2067 	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2068 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2069 	idx++;
2070 	*seq_size = idx;
2071 }
2072 
2073 static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2074 			  unsigned int *seq_size)
2075 {
2076 	unsigned int idx = *seq_size;
2077 	struct ahash_req_ctx *state = ahash_request_ctx(areq);
2078 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2079 	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2080 
2081 	/* Setup CMAC Key */
2082 	hw_desc_init(&desc[idx]);
2083 	set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2084 		     ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2085 		      ctx->key_params.keylen), NS_BIT);
2086 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2087 	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2088 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2089 	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2090 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2091 	idx++;
2092 
2093 	/* Load MAC state */
2094 	hw_desc_init(&desc[idx]);
2095 	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2096 		     CC_AES_BLOCK_SIZE, NS_BIT);
2097 	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2098 	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2099 	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2100 	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2101 	set_flow_mode(&desc[idx], S_DIN_to_AES);
2102 	idx++;
2103 	*seq_size = idx;
2104 }
2105 
2106 static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2107 			struct cc_hash_ctx *ctx, unsigned int flow_mode,
2108 			struct cc_hw_desc desc[], bool is_not_last_data,
2109 			unsigned int *seq_size)
2110 {
2111 	unsigned int idx = *seq_size;
2112 	struct device *dev = drvdata_to_dev(ctx->drvdata);
2113 
2114 	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2115 		hw_desc_init(&desc[idx]);
2116 		set_din_type(&desc[idx], DMA_DLLI,
2117 			     sg_dma_address(areq_ctx->curr_sg),
2118 			     areq_ctx->curr_sg->length, NS_BIT);
2119 		set_flow_mode(&desc[idx], flow_mode);
2120 		idx++;
2121 	} else {
2122 		if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2123 			dev_dbg(dev, " NULL mode\n");
2124 			/* nothing to build */
2125 			return;
2126 		}
2127 		/* bypass */
2128 		hw_desc_init(&desc[idx]);
2129 		set_din_type(&desc[idx], DMA_DLLI,
2130 			     areq_ctx->mlli_params.mlli_dma_addr,
2131 			     areq_ctx->mlli_params.mlli_len, NS_BIT);
2132 		set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2133 			      areq_ctx->mlli_params.mlli_len);
2134 		set_flow_mode(&desc[idx], BYPASS);
2135 		idx++;
2136 		/* process */
2137 		hw_desc_init(&desc[idx]);
2138 		set_din_type(&desc[idx], DMA_MLLI,
2139 			     ctx->drvdata->mlli_sram_addr,
2140 			     areq_ctx->mlli_nents, NS_BIT);
2141 		set_flow_mode(&desc[idx], flow_mode);
2142 		idx++;
2143 	}
2144 	if (is_not_last_data)
2145 		set_din_not_last_indication(&desc[(idx - 1)]);
2146 	/* return updated desc sequence size */
2147 	*seq_size = idx;
2148 }
2149 
2150 static const void *cc_larval_digest(struct device *dev, u32 mode)
2151 {
2152 	switch (mode) {
2153 	case DRV_HASH_MD5:
2154 		return md5_init;
2155 	case DRV_HASH_SHA1:
2156 		return sha1_init;
2157 	case DRV_HASH_SHA224:
2158 		return sha224_init;
2159 	case DRV_HASH_SHA256:
2160 		return sha256_init;
2161 	case DRV_HASH_SHA384:
2162 		return sha384_init;
2163 	case DRV_HASH_SHA512:
2164 		return sha512_init;
2165 	default:
2166 		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2167 		return md5_init;
2168 	}
2169 }
2170 
2171 /*!
2172  * Gets the address of the initial digest in SRAM
2173  * according to the given hash mode
2174  *
2175  * \param drvdata
2176  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2177  *
2178  * \return u32 The address of the initial digest in SRAM
2179  */
2180 cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2181 {
2182 	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2183 	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2184 	struct device *dev = drvdata_to_dev(_drvdata);
2185 
2186 	switch (mode) {
2187 	case DRV_HASH_NULL:
2188 		break; /*Ignore*/
2189 	case DRV_HASH_MD5:
2190 		return (hash_handle->larval_digest_sram_addr);
2191 	case DRV_HASH_SHA1:
2192 		return (hash_handle->larval_digest_sram_addr +
2193 			sizeof(md5_init));
2194 	case DRV_HASH_SHA224:
2195 		return (hash_handle->larval_digest_sram_addr +
2196 			sizeof(md5_init) +
2197 			sizeof(sha1_init));
2198 	case DRV_HASH_SHA256:
2199 		return (hash_handle->larval_digest_sram_addr +
2200 			sizeof(md5_init) +
2201 			sizeof(sha1_init) +
2202 			sizeof(sha224_init));
2203 	case DRV_HASH_SHA384:
2204 		return (hash_handle->larval_digest_sram_addr +
2205 			sizeof(md5_init) +
2206 			sizeof(sha1_init) +
2207 			sizeof(sha224_init) +
2208 			sizeof(sha256_init));
2209 	case DRV_HASH_SHA512:
2210 		return (hash_handle->larval_digest_sram_addr +
2211 			sizeof(md5_init) +
2212 			sizeof(sha1_init) +
2213 			sizeof(sha224_init) +
2214 			sizeof(sha256_init) +
2215 			sizeof(sha384_init));
2216 	default:
2217 		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2218 	}
2219 
2220 	/*This is valid wrong value to avoid kernel crash*/
2221 	return hash_handle->larval_digest_sram_addr;
2222 }
2223 
2224 cc_sram_addr_t
2225 cc_digest_len_addr(void *drvdata, u32 mode)
2226 {
2227 	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2228 	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2229 	cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2230 
2231 	switch (mode) {
2232 	case DRV_HASH_SHA1:
2233 	case DRV_HASH_SHA224:
2234 	case DRV_HASH_SHA256:
2235 	case DRV_HASH_MD5:
2236 		return digest_len_addr;
2237 #if (CC_DEV_SHA_MAX > 256)
2238 	case DRV_HASH_SHA384:
2239 	case DRV_HASH_SHA512:
2240 		return  digest_len_addr + sizeof(digest_len_init);
2241 #endif
2242 	default:
2243 		return digest_len_addr; /*to avoid kernel crash*/
2244 	}
2245 }
2246