1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of STM32 Crypto driver for Linux.
4  *
5  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/crypto.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/reset.h>
23 
24 #include <crypto/engine.h>
25 #include <crypto/hash.h>
26 #include <crypto/md5.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/sha.h>
29 #include <crypto/internal/hash.h>
30 
31 #define HASH_CR				0x00
32 #define HASH_DIN			0x04
33 #define HASH_STR			0x08
34 #define HASH_IMR			0x20
35 #define HASH_SR				0x24
36 #define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
37 #define HASH_HREG(x)			(0x310 + ((x) * 0x04))
38 #define HASH_HWCFGR			0x3F0
39 #define HASH_VER			0x3F4
40 #define HASH_ID				0x3F8
41 
42 /* Control Register */
43 #define HASH_CR_INIT			BIT(2)
44 #define HASH_CR_DMAE			BIT(3)
45 #define HASH_CR_DATATYPE_POS		4
46 #define HASH_CR_MODE			BIT(6)
47 #define HASH_CR_MDMAT			BIT(13)
48 #define HASH_CR_DMAA			BIT(14)
49 #define HASH_CR_LKEY			BIT(16)
50 
51 #define HASH_CR_ALGO_SHA1		0x0
52 #define HASH_CR_ALGO_MD5		0x80
53 #define HASH_CR_ALGO_SHA224		0x40000
54 #define HASH_CR_ALGO_SHA256		0x40080
55 
56 /* Interrupt */
57 #define HASH_DINIE			BIT(0)
58 #define HASH_DCIE			BIT(1)
59 
60 /* Interrupt Mask */
61 #define HASH_MASK_CALC_COMPLETION	BIT(0)
62 #define HASH_MASK_DATA_INPUT		BIT(1)
63 
64 /* Context swap register */
65 #define HASH_CSR_REGISTER_NUMBER	53
66 
67 /* Status Flags */
68 #define HASH_SR_DATA_INPUT_READY	BIT(0)
69 #define HASH_SR_OUTPUT_READY		BIT(1)
70 #define HASH_SR_DMA_ACTIVE		BIT(2)
71 #define HASH_SR_BUSY			BIT(3)
72 
73 /* STR Register */
74 #define HASH_STR_NBLW_MASK		GENMASK(4, 0)
75 #define HASH_STR_DCAL			BIT(8)
76 
77 #define HASH_FLAGS_INIT			BIT(0)
78 #define HASH_FLAGS_OUTPUT_READY		BIT(1)
79 #define HASH_FLAGS_CPU			BIT(2)
80 #define HASH_FLAGS_DMA_READY		BIT(3)
81 #define HASH_FLAGS_DMA_ACTIVE		BIT(4)
82 #define HASH_FLAGS_HMAC_INIT		BIT(5)
83 #define HASH_FLAGS_HMAC_FINAL		BIT(6)
84 #define HASH_FLAGS_HMAC_KEY		BIT(7)
85 
86 #define HASH_FLAGS_FINAL		BIT(15)
87 #define HASH_FLAGS_FINUP		BIT(16)
88 #define HASH_FLAGS_ALGO_MASK		GENMASK(21, 18)
89 #define HASH_FLAGS_MD5			BIT(18)
90 #define HASH_FLAGS_SHA1			BIT(19)
91 #define HASH_FLAGS_SHA224		BIT(20)
92 #define HASH_FLAGS_SHA256		BIT(21)
93 #define HASH_FLAGS_ERRORS		BIT(22)
94 #define HASH_FLAGS_HMAC			BIT(23)
95 
96 #define HASH_OP_UPDATE			1
97 #define HASH_OP_FINAL			2
98 
99 enum stm32_hash_data_format {
100 	HASH_DATA_32_BITS		= 0x0,
101 	HASH_DATA_16_BITS		= 0x1,
102 	HASH_DATA_8_BITS		= 0x2,
103 	HASH_DATA_1_BIT			= 0x3
104 };
105 
106 #define HASH_BUFLEN			256
107 #define HASH_LONG_KEY			64
108 #define HASH_MAX_KEY_SIZE		(SHA256_BLOCK_SIZE * 8)
109 #define HASH_QUEUE_LENGTH		16
110 #define HASH_DMA_THRESHOLD		50
111 
112 #define HASH_AUTOSUSPEND_DELAY		50
113 
114 struct stm32_hash_ctx {
115 	struct crypto_engine_ctx enginectx;
116 	struct stm32_hash_dev	*hdev;
117 	unsigned long		flags;
118 
119 	u8			key[HASH_MAX_KEY_SIZE];
120 	int			keylen;
121 };
122 
123 struct stm32_hash_request_ctx {
124 	struct stm32_hash_dev	*hdev;
125 	unsigned long		flags;
126 	unsigned long		op;
127 
128 	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
129 	size_t			digcnt;
130 	size_t			bufcnt;
131 	size_t			buflen;
132 
133 	/* DMA */
134 	struct scatterlist	*sg;
135 	unsigned int		offset;
136 	unsigned int		total;
137 	struct scatterlist	sg_key;
138 
139 	dma_addr_t		dma_addr;
140 	size_t			dma_ct;
141 	int			nents;
142 
143 	u8			data_type;
144 
145 	u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
146 
147 	/* Export Context */
148 	u32			*hw_context;
149 };
150 
151 struct stm32_hash_algs_info {
152 	struct ahash_alg	*algs_list;
153 	size_t			size;
154 };
155 
156 struct stm32_hash_pdata {
157 	struct stm32_hash_algs_info	*algs_info;
158 	size_t				algs_info_size;
159 };
160 
161 struct stm32_hash_dev {
162 	struct list_head	list;
163 	struct device		*dev;
164 	struct clk		*clk;
165 	struct reset_control	*rst;
166 	void __iomem		*io_base;
167 	phys_addr_t		phys_base;
168 	u32			dma_mode;
169 	u32			dma_maxburst;
170 
171 	struct ahash_request	*req;
172 	struct crypto_engine	*engine;
173 
174 	int			err;
175 	unsigned long		flags;
176 
177 	struct dma_chan		*dma_lch;
178 	struct completion	dma_completion;
179 
180 	const struct stm32_hash_pdata	*pdata;
181 };
182 
183 struct stm32_hash_drv {
184 	struct list_head	dev_list;
185 	spinlock_t		lock; /* List protection access */
186 };
187 
188 static struct stm32_hash_drv stm32_hash = {
189 	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
190 	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
191 };
192 
193 static void stm32_hash_dma_callback(void *param);
194 
195 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
196 {
197 	return readl_relaxed(hdev->io_base + offset);
198 }
199 
200 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
201 				    u32 offset, u32 value)
202 {
203 	writel_relaxed(value, hdev->io_base + offset);
204 }
205 
206 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
207 {
208 	u32 status;
209 
210 	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
211 				   !(status & HASH_SR_BUSY), 10, 10000);
212 }
213 
214 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
215 {
216 	u32 reg;
217 
218 	reg = stm32_hash_read(hdev, HASH_STR);
219 	reg &= ~(HASH_STR_NBLW_MASK);
220 	reg |= (8U * ((length) % 4U));
221 	stm32_hash_write(hdev, HASH_STR, reg);
222 }
223 
224 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
225 {
226 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
227 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
228 	u32 reg;
229 	int keylen = ctx->keylen;
230 	void *key = ctx->key;
231 
232 	if (keylen) {
233 		stm32_hash_set_nblw(hdev, keylen);
234 
235 		while (keylen > 0) {
236 			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
237 			keylen -= 4;
238 			key += 4;
239 		}
240 
241 		reg = stm32_hash_read(hdev, HASH_STR);
242 		reg |= HASH_STR_DCAL;
243 		stm32_hash_write(hdev, HASH_STR, reg);
244 
245 		return -EINPROGRESS;
246 	}
247 
248 	return 0;
249 }
250 
251 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
252 {
253 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
254 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
255 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
256 
257 	u32 reg = HASH_CR_INIT;
258 
259 	if (!(hdev->flags & HASH_FLAGS_INIT)) {
260 		switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
261 		case HASH_FLAGS_MD5:
262 			reg |= HASH_CR_ALGO_MD5;
263 			break;
264 		case HASH_FLAGS_SHA1:
265 			reg |= HASH_CR_ALGO_SHA1;
266 			break;
267 		case HASH_FLAGS_SHA224:
268 			reg |= HASH_CR_ALGO_SHA224;
269 			break;
270 		case HASH_FLAGS_SHA256:
271 			reg |= HASH_CR_ALGO_SHA256;
272 			break;
273 		default:
274 			reg |= HASH_CR_ALGO_MD5;
275 		}
276 
277 		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
278 
279 		if (rctx->flags & HASH_FLAGS_HMAC) {
280 			hdev->flags |= HASH_FLAGS_HMAC;
281 			reg |= HASH_CR_MODE;
282 			if (ctx->keylen > HASH_LONG_KEY)
283 				reg |= HASH_CR_LKEY;
284 		}
285 
286 		stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
287 
288 		stm32_hash_write(hdev, HASH_CR, reg);
289 
290 		hdev->flags |= HASH_FLAGS_INIT;
291 
292 		dev_dbg(hdev->dev, "Write Control %x\n", reg);
293 	}
294 }
295 
296 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
297 {
298 	size_t count;
299 
300 	while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
301 		count = min(rctx->sg->length - rctx->offset, rctx->total);
302 		count = min(count, rctx->buflen - rctx->bufcnt);
303 
304 		if (count <= 0) {
305 			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
306 				rctx->sg = sg_next(rctx->sg);
307 				continue;
308 			} else {
309 				break;
310 			}
311 		}
312 
313 		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
314 					 rctx->offset, count, 0);
315 
316 		rctx->bufcnt += count;
317 		rctx->offset += count;
318 		rctx->total -= count;
319 
320 		if (rctx->offset == rctx->sg->length) {
321 			rctx->sg = sg_next(rctx->sg);
322 			if (rctx->sg)
323 				rctx->offset = 0;
324 			else
325 				rctx->total = 0;
326 		}
327 	}
328 }
329 
330 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
331 			       const u8 *buf, size_t length, int final)
332 {
333 	unsigned int count, len32;
334 	const u32 *buffer = (const u32 *)buf;
335 	u32 reg;
336 
337 	if (final)
338 		hdev->flags |= HASH_FLAGS_FINAL;
339 
340 	len32 = DIV_ROUND_UP(length, sizeof(u32));
341 
342 	dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
343 		__func__, length, final, len32);
344 
345 	hdev->flags |= HASH_FLAGS_CPU;
346 
347 	stm32_hash_write_ctrl(hdev);
348 
349 	if (stm32_hash_wait_busy(hdev))
350 		return -ETIMEDOUT;
351 
352 	if ((hdev->flags & HASH_FLAGS_HMAC) &&
353 	    (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
354 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
355 		stm32_hash_write_key(hdev);
356 		if (stm32_hash_wait_busy(hdev))
357 			return -ETIMEDOUT;
358 	}
359 
360 	for (count = 0; count < len32; count++)
361 		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
362 
363 	if (final) {
364 		stm32_hash_set_nblw(hdev, length);
365 		reg = stm32_hash_read(hdev, HASH_STR);
366 		reg |= HASH_STR_DCAL;
367 		stm32_hash_write(hdev, HASH_STR, reg);
368 		if (hdev->flags & HASH_FLAGS_HMAC) {
369 			if (stm32_hash_wait_busy(hdev))
370 				return -ETIMEDOUT;
371 			stm32_hash_write_key(hdev);
372 		}
373 		return -EINPROGRESS;
374 	}
375 
376 	return 0;
377 }
378 
379 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
380 {
381 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
382 	int bufcnt, err = 0, final;
383 
384 	dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
385 
386 	final = (rctx->flags & HASH_FLAGS_FINUP);
387 
388 	while ((rctx->total >= rctx->buflen) ||
389 	       (rctx->bufcnt + rctx->total >= rctx->buflen)) {
390 		stm32_hash_append_sg(rctx);
391 		bufcnt = rctx->bufcnt;
392 		rctx->bufcnt = 0;
393 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
394 	}
395 
396 	stm32_hash_append_sg(rctx);
397 
398 	if (final) {
399 		bufcnt = rctx->bufcnt;
400 		rctx->bufcnt = 0;
401 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
402 					  (rctx->flags & HASH_FLAGS_FINUP));
403 	}
404 
405 	return err;
406 }
407 
408 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
409 			       struct scatterlist *sg, int length, int mdma)
410 {
411 	struct dma_async_tx_descriptor *in_desc;
412 	dma_cookie_t cookie;
413 	u32 reg;
414 	int err;
415 
416 	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
417 					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
418 					  DMA_CTRL_ACK);
419 	if (!in_desc) {
420 		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
421 		return -ENOMEM;
422 	}
423 
424 	reinit_completion(&hdev->dma_completion);
425 	in_desc->callback = stm32_hash_dma_callback;
426 	in_desc->callback_param = hdev;
427 
428 	hdev->flags |= HASH_FLAGS_FINAL;
429 	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
430 
431 	reg = stm32_hash_read(hdev, HASH_CR);
432 
433 	if (mdma)
434 		reg |= HASH_CR_MDMAT;
435 	else
436 		reg &= ~HASH_CR_MDMAT;
437 
438 	reg |= HASH_CR_DMAE;
439 
440 	stm32_hash_write(hdev, HASH_CR, reg);
441 
442 	stm32_hash_set_nblw(hdev, length);
443 
444 	cookie = dmaengine_submit(in_desc);
445 	err = dma_submit_error(cookie);
446 	if (err)
447 		return -ENOMEM;
448 
449 	dma_async_issue_pending(hdev->dma_lch);
450 
451 	if (!wait_for_completion_timeout(&hdev->dma_completion,
452 					 msecs_to_jiffies(100)))
453 		err = -ETIMEDOUT;
454 
455 	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
456 				     NULL, NULL) != DMA_COMPLETE)
457 		err = -ETIMEDOUT;
458 
459 	if (err) {
460 		dev_err(hdev->dev, "DMA Error %i\n", err);
461 		dmaengine_terminate_all(hdev->dma_lch);
462 		return err;
463 	}
464 
465 	return -EINPROGRESS;
466 }
467 
468 static void stm32_hash_dma_callback(void *param)
469 {
470 	struct stm32_hash_dev *hdev = param;
471 
472 	complete(&hdev->dma_completion);
473 
474 	hdev->flags |= HASH_FLAGS_DMA_READY;
475 }
476 
477 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
478 {
479 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
480 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
481 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
482 	int err;
483 
484 	if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
485 		err = stm32_hash_write_key(hdev);
486 		if (stm32_hash_wait_busy(hdev))
487 			return -ETIMEDOUT;
488 	} else {
489 		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
490 			sg_init_one(&rctx->sg_key, ctx->key,
491 				    ALIGN(ctx->keylen, sizeof(u32)));
492 
493 		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
494 					  DMA_TO_DEVICE);
495 		if (rctx->dma_ct == 0) {
496 			dev_err(hdev->dev, "dma_map_sg error\n");
497 			return -ENOMEM;
498 		}
499 
500 		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
501 
502 		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
503 	}
504 
505 	return err;
506 }
507 
508 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
509 {
510 	struct dma_slave_config dma_conf;
511 	struct dma_chan *chan;
512 	int err;
513 
514 	memset(&dma_conf, 0, sizeof(dma_conf));
515 
516 	dma_conf.direction = DMA_MEM_TO_DEV;
517 	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
518 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
519 	dma_conf.src_maxburst = hdev->dma_maxburst;
520 	dma_conf.dst_maxburst = hdev->dma_maxburst;
521 	dma_conf.device_fc = false;
522 
523 	chan = dma_request_chan(hdev->dev, "in");
524 	if (IS_ERR(chan))
525 		return PTR_ERR(chan);
526 
527 	hdev->dma_lch = chan;
528 
529 	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
530 	if (err) {
531 		dma_release_channel(hdev->dma_lch);
532 		hdev->dma_lch = NULL;
533 		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
534 		return err;
535 	}
536 
537 	init_completion(&hdev->dma_completion);
538 
539 	return 0;
540 }
541 
542 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
543 {
544 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
545 	struct scatterlist sg[1], *tsg;
546 	int err = 0, len = 0, reg, ncp = 0;
547 	unsigned int i;
548 	u32 *buffer = (void *)rctx->buffer;
549 
550 	rctx->sg = hdev->req->src;
551 	rctx->total = hdev->req->nbytes;
552 
553 	rctx->nents = sg_nents(rctx->sg);
554 
555 	if (rctx->nents < 0)
556 		return -EINVAL;
557 
558 	stm32_hash_write_ctrl(hdev);
559 
560 	if (hdev->flags & HASH_FLAGS_HMAC) {
561 		err = stm32_hash_hmac_dma_send(hdev);
562 		if (err != -EINPROGRESS)
563 			return err;
564 	}
565 
566 	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
567 		len = sg->length;
568 
569 		sg[0] = *tsg;
570 		if (sg_is_last(sg)) {
571 			if (hdev->dma_mode == 1) {
572 				len = (ALIGN(sg->length, 16) - 16);
573 
574 				ncp = sg_pcopy_to_buffer(
575 					rctx->sg, rctx->nents,
576 					rctx->buffer, sg->length - len,
577 					rctx->total - sg->length + len);
578 
579 				sg->length = len;
580 			} else {
581 				if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
582 					len = sg->length;
583 					sg->length = ALIGN(sg->length,
584 							   sizeof(u32));
585 				}
586 			}
587 		}
588 
589 		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
590 					  DMA_TO_DEVICE);
591 		if (rctx->dma_ct == 0) {
592 			dev_err(hdev->dev, "dma_map_sg error\n");
593 			return -ENOMEM;
594 		}
595 
596 		err = stm32_hash_xmit_dma(hdev, sg, len,
597 					  !sg_is_last(sg));
598 
599 		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
600 
601 		if (err == -ENOMEM)
602 			return err;
603 	}
604 
605 	if (hdev->dma_mode == 1) {
606 		if (stm32_hash_wait_busy(hdev))
607 			return -ETIMEDOUT;
608 		reg = stm32_hash_read(hdev, HASH_CR);
609 		reg &= ~HASH_CR_DMAE;
610 		reg |= HASH_CR_DMAA;
611 		stm32_hash_write(hdev, HASH_CR, reg);
612 
613 		if (ncp) {
614 			memset(buffer + ncp, 0,
615 			       DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
616 			writesl(hdev->io_base + HASH_DIN, buffer,
617 				DIV_ROUND_UP(ncp, sizeof(u32)));
618 		}
619 		stm32_hash_set_nblw(hdev, ncp);
620 		reg = stm32_hash_read(hdev, HASH_STR);
621 		reg |= HASH_STR_DCAL;
622 		stm32_hash_write(hdev, HASH_STR, reg);
623 		err = -EINPROGRESS;
624 	}
625 
626 	if (hdev->flags & HASH_FLAGS_HMAC) {
627 		if (stm32_hash_wait_busy(hdev))
628 			return -ETIMEDOUT;
629 		err = stm32_hash_hmac_dma_send(hdev);
630 	}
631 
632 	return err;
633 }
634 
635 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
636 {
637 	struct stm32_hash_dev *hdev = NULL, *tmp;
638 
639 	spin_lock_bh(&stm32_hash.lock);
640 	if (!ctx->hdev) {
641 		list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
642 			hdev = tmp;
643 			break;
644 		}
645 		ctx->hdev = hdev;
646 	} else {
647 		hdev = ctx->hdev;
648 	}
649 
650 	spin_unlock_bh(&stm32_hash.lock);
651 
652 	return hdev;
653 }
654 
655 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
656 {
657 	struct scatterlist *sg;
658 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
659 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
660 	int i;
661 
662 	if (req->nbytes <= HASH_DMA_THRESHOLD)
663 		return false;
664 
665 	if (sg_nents(req->src) > 1) {
666 		if (hdev->dma_mode == 1)
667 			return false;
668 		for_each_sg(req->src, sg, sg_nents(req->src), i) {
669 			if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
670 			    (!sg_is_last(sg)))
671 				return false;
672 		}
673 	}
674 
675 	if (req->src->offset % 4)
676 		return false;
677 
678 	return true;
679 }
680 
681 static int stm32_hash_init(struct ahash_request *req)
682 {
683 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
684 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
685 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
686 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
687 
688 	rctx->hdev = hdev;
689 
690 	rctx->flags = HASH_FLAGS_CPU;
691 
692 	rctx->digcnt = crypto_ahash_digestsize(tfm);
693 	switch (rctx->digcnt) {
694 	case MD5_DIGEST_SIZE:
695 		rctx->flags |= HASH_FLAGS_MD5;
696 		break;
697 	case SHA1_DIGEST_SIZE:
698 		rctx->flags |= HASH_FLAGS_SHA1;
699 		break;
700 	case SHA224_DIGEST_SIZE:
701 		rctx->flags |= HASH_FLAGS_SHA224;
702 		break;
703 	case SHA256_DIGEST_SIZE:
704 		rctx->flags |= HASH_FLAGS_SHA256;
705 		break;
706 	default:
707 		return -EINVAL;
708 	}
709 
710 	rctx->bufcnt = 0;
711 	rctx->buflen = HASH_BUFLEN;
712 	rctx->total = 0;
713 	rctx->offset = 0;
714 	rctx->data_type = HASH_DATA_8_BITS;
715 
716 	memset(rctx->buffer, 0, HASH_BUFLEN);
717 
718 	if (ctx->flags & HASH_FLAGS_HMAC)
719 		rctx->flags |= HASH_FLAGS_HMAC;
720 
721 	dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
722 
723 	return 0;
724 }
725 
726 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
727 {
728 	return stm32_hash_update_cpu(hdev);
729 }
730 
731 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
732 {
733 	struct ahash_request *req = hdev->req;
734 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
735 	int err;
736 	int buflen = rctx->bufcnt;
737 
738 	rctx->bufcnt = 0;
739 
740 	if (!(rctx->flags & HASH_FLAGS_CPU))
741 		err = stm32_hash_dma_send(hdev);
742 	else
743 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
744 
745 
746 	return err;
747 }
748 
749 static void stm32_hash_copy_hash(struct ahash_request *req)
750 {
751 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
752 	u32 *hash = (u32 *)rctx->digest;
753 	unsigned int i, hashsize;
754 
755 	switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
756 	case HASH_FLAGS_MD5:
757 		hashsize = MD5_DIGEST_SIZE;
758 		break;
759 	case HASH_FLAGS_SHA1:
760 		hashsize = SHA1_DIGEST_SIZE;
761 		break;
762 	case HASH_FLAGS_SHA224:
763 		hashsize = SHA224_DIGEST_SIZE;
764 		break;
765 	case HASH_FLAGS_SHA256:
766 		hashsize = SHA256_DIGEST_SIZE;
767 		break;
768 	default:
769 		return;
770 	}
771 
772 	for (i = 0; i < hashsize / sizeof(u32); i++)
773 		hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
774 						      HASH_HREG(i)));
775 }
776 
777 static int stm32_hash_finish(struct ahash_request *req)
778 {
779 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
780 
781 	if (!req->result)
782 		return -EINVAL;
783 
784 	memcpy(req->result, rctx->digest, rctx->digcnt);
785 
786 	return 0;
787 }
788 
789 static void stm32_hash_finish_req(struct ahash_request *req, int err)
790 {
791 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
792 	struct stm32_hash_dev *hdev = rctx->hdev;
793 
794 	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
795 		stm32_hash_copy_hash(req);
796 		err = stm32_hash_finish(req);
797 		hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
798 				 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
799 				 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
800 				 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
801 				 HASH_FLAGS_HMAC_KEY);
802 	} else {
803 		rctx->flags |= HASH_FLAGS_ERRORS;
804 	}
805 
806 	pm_runtime_mark_last_busy(hdev->dev);
807 	pm_runtime_put_autosuspend(hdev->dev);
808 
809 	crypto_finalize_hash_request(hdev->engine, req, err);
810 }
811 
812 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
813 			      struct stm32_hash_request_ctx *rctx)
814 {
815 	pm_runtime_get_sync(hdev->dev);
816 
817 	if (!(HASH_FLAGS_INIT & hdev->flags)) {
818 		stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
819 		stm32_hash_write(hdev, HASH_STR, 0);
820 		stm32_hash_write(hdev, HASH_DIN, 0);
821 		stm32_hash_write(hdev, HASH_IMR, 0);
822 		hdev->err = 0;
823 	}
824 
825 	return 0;
826 }
827 
828 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
829 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
830 
831 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
832 				   struct ahash_request *req)
833 {
834 	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
835 }
836 
837 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
838 {
839 	struct ahash_request *req = container_of(areq, struct ahash_request,
840 						 base);
841 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
842 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
843 	struct stm32_hash_request_ctx *rctx;
844 
845 	if (!hdev)
846 		return -ENODEV;
847 
848 	hdev->req = req;
849 
850 	rctx = ahash_request_ctx(req);
851 
852 	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
853 		rctx->op, req->nbytes);
854 
855 	return stm32_hash_hw_init(hdev, rctx);
856 }
857 
858 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
859 {
860 	struct ahash_request *req = container_of(areq, struct ahash_request,
861 						 base);
862 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
863 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
864 	struct stm32_hash_request_ctx *rctx;
865 	int err = 0;
866 
867 	if (!hdev)
868 		return -ENODEV;
869 
870 	hdev->req = req;
871 
872 	rctx = ahash_request_ctx(req);
873 
874 	if (rctx->op == HASH_OP_UPDATE)
875 		err = stm32_hash_update_req(hdev);
876 	else if (rctx->op == HASH_OP_FINAL)
877 		err = stm32_hash_final_req(hdev);
878 
879 	if (err != -EINPROGRESS)
880 	/* done task will not finish it, so do it here */
881 		stm32_hash_finish_req(req, err);
882 
883 	return 0;
884 }
885 
886 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
887 {
888 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
889 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
890 	struct stm32_hash_dev *hdev = ctx->hdev;
891 
892 	rctx->op = op;
893 
894 	return stm32_hash_handle_queue(hdev, req);
895 }
896 
897 static int stm32_hash_update(struct ahash_request *req)
898 {
899 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
900 
901 	if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
902 		return 0;
903 
904 	rctx->total = req->nbytes;
905 	rctx->sg = req->src;
906 	rctx->offset = 0;
907 
908 	if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
909 		stm32_hash_append_sg(rctx);
910 		return 0;
911 	}
912 
913 	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
914 }
915 
916 static int stm32_hash_final(struct ahash_request *req)
917 {
918 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
919 
920 	rctx->flags |= HASH_FLAGS_FINUP;
921 
922 	return stm32_hash_enqueue(req, HASH_OP_FINAL);
923 }
924 
925 static int stm32_hash_finup(struct ahash_request *req)
926 {
927 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
928 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
929 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
930 	int err1, err2;
931 
932 	rctx->flags |= HASH_FLAGS_FINUP;
933 
934 	if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
935 		rctx->flags &= ~HASH_FLAGS_CPU;
936 
937 	err1 = stm32_hash_update(req);
938 
939 	if (err1 == -EINPROGRESS || err1 == -EBUSY)
940 		return err1;
941 
942 	/*
943 	 * final() has to be always called to cleanup resources
944 	 * even if update() failed, except EINPROGRESS
945 	 */
946 	err2 = stm32_hash_final(req);
947 
948 	return err1 ?: err2;
949 }
950 
951 static int stm32_hash_digest(struct ahash_request *req)
952 {
953 	return stm32_hash_init(req) ?: stm32_hash_finup(req);
954 }
955 
956 static int stm32_hash_export(struct ahash_request *req, void *out)
957 {
958 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
959 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
960 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
961 	u32 *preg;
962 	unsigned int i;
963 
964 	pm_runtime_get_sync(hdev->dev);
965 
966 	while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
967 		cpu_relax();
968 
969 	rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
970 					 sizeof(u32),
971 					 GFP_KERNEL);
972 
973 	preg = rctx->hw_context;
974 
975 	*preg++ = stm32_hash_read(hdev, HASH_IMR);
976 	*preg++ = stm32_hash_read(hdev, HASH_STR);
977 	*preg++ = stm32_hash_read(hdev, HASH_CR);
978 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
979 		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
980 
981 	pm_runtime_mark_last_busy(hdev->dev);
982 	pm_runtime_put_autosuspend(hdev->dev);
983 
984 	memcpy(out, rctx, sizeof(*rctx));
985 
986 	return 0;
987 }
988 
989 static int stm32_hash_import(struct ahash_request *req, const void *in)
990 {
991 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
992 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
993 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
994 	const u32 *preg = in;
995 	u32 reg;
996 	unsigned int i;
997 
998 	memcpy(rctx, in, sizeof(*rctx));
999 
1000 	preg = rctx->hw_context;
1001 
1002 	pm_runtime_get_sync(hdev->dev);
1003 
1004 	stm32_hash_write(hdev, HASH_IMR, *preg++);
1005 	stm32_hash_write(hdev, HASH_STR, *preg++);
1006 	stm32_hash_write(hdev, HASH_CR, *preg);
1007 	reg = *preg++ | HASH_CR_INIT;
1008 	stm32_hash_write(hdev, HASH_CR, reg);
1009 
1010 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1011 		stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1012 
1013 	pm_runtime_mark_last_busy(hdev->dev);
1014 	pm_runtime_put_autosuspend(hdev->dev);
1015 
1016 	kfree(rctx->hw_context);
1017 
1018 	return 0;
1019 }
1020 
1021 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1022 			     const u8 *key, unsigned int keylen)
1023 {
1024 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1025 
1026 	if (keylen <= HASH_MAX_KEY_SIZE) {
1027 		memcpy(ctx->key, key, keylen);
1028 		ctx->keylen = keylen;
1029 	} else {
1030 		return -ENOMEM;
1031 	}
1032 
1033 	return 0;
1034 }
1035 
1036 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1037 				    const char *algs_hmac_name)
1038 {
1039 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1040 
1041 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1042 				 sizeof(struct stm32_hash_request_ctx));
1043 
1044 	ctx->keylen = 0;
1045 
1046 	if (algs_hmac_name)
1047 		ctx->flags |= HASH_FLAGS_HMAC;
1048 
1049 	ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1050 	ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1051 	ctx->enginectx.op.unprepare_request = NULL;
1052 	return 0;
1053 }
1054 
1055 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1056 {
1057 	return stm32_hash_cra_init_algs(tfm, NULL);
1058 }
1059 
1060 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1061 {
1062 	return stm32_hash_cra_init_algs(tfm, "md5");
1063 }
1064 
1065 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1066 {
1067 	return stm32_hash_cra_init_algs(tfm, "sha1");
1068 }
1069 
1070 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1071 {
1072 	return stm32_hash_cra_init_algs(tfm, "sha224");
1073 }
1074 
1075 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1076 {
1077 	return stm32_hash_cra_init_algs(tfm, "sha256");
1078 }
1079 
1080 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1081 {
1082 	struct stm32_hash_dev *hdev = dev_id;
1083 
1084 	if (HASH_FLAGS_CPU & hdev->flags) {
1085 		if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1086 			hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1087 			goto finish;
1088 		}
1089 	} else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1090 		if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1091 			hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1092 				goto finish;
1093 		}
1094 	}
1095 
1096 	return IRQ_HANDLED;
1097 
1098 finish:
1099 	/* Finish current request */
1100 	stm32_hash_finish_req(hdev->req, 0);
1101 
1102 	return IRQ_HANDLED;
1103 }
1104 
1105 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1106 {
1107 	struct stm32_hash_dev *hdev = dev_id;
1108 	u32 reg;
1109 
1110 	reg = stm32_hash_read(hdev, HASH_SR);
1111 	if (reg & HASH_SR_OUTPUT_READY) {
1112 		reg &= ~HASH_SR_OUTPUT_READY;
1113 		stm32_hash_write(hdev, HASH_SR, reg);
1114 		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1115 		/* Disable IT*/
1116 		stm32_hash_write(hdev, HASH_IMR, 0);
1117 		return IRQ_WAKE_THREAD;
1118 	}
1119 
1120 	return IRQ_NONE;
1121 }
1122 
1123 static struct ahash_alg algs_md5_sha1[] = {
1124 	{
1125 		.init = stm32_hash_init,
1126 		.update = stm32_hash_update,
1127 		.final = stm32_hash_final,
1128 		.finup = stm32_hash_finup,
1129 		.digest = stm32_hash_digest,
1130 		.export = stm32_hash_export,
1131 		.import = stm32_hash_import,
1132 		.halg = {
1133 			.digestsize = MD5_DIGEST_SIZE,
1134 			.statesize = sizeof(struct stm32_hash_request_ctx),
1135 			.base = {
1136 				.cra_name = "md5",
1137 				.cra_driver_name = "stm32-md5",
1138 				.cra_priority = 200,
1139 				.cra_flags = CRYPTO_ALG_ASYNC |
1140 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1141 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1142 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1143 				.cra_alignmask = 3,
1144 				.cra_init = stm32_hash_cra_init,
1145 				.cra_module = THIS_MODULE,
1146 			}
1147 		}
1148 	},
1149 	{
1150 		.init = stm32_hash_init,
1151 		.update = stm32_hash_update,
1152 		.final = stm32_hash_final,
1153 		.finup = stm32_hash_finup,
1154 		.digest = stm32_hash_digest,
1155 		.export = stm32_hash_export,
1156 		.import = stm32_hash_import,
1157 		.setkey = stm32_hash_setkey,
1158 		.halg = {
1159 			.digestsize = MD5_DIGEST_SIZE,
1160 			.statesize = sizeof(struct stm32_hash_request_ctx),
1161 			.base = {
1162 				.cra_name = "hmac(md5)",
1163 				.cra_driver_name = "stm32-hmac-md5",
1164 				.cra_priority = 200,
1165 				.cra_flags = CRYPTO_ALG_ASYNC |
1166 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1167 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1168 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1169 				.cra_alignmask = 3,
1170 				.cra_init = stm32_hash_cra_md5_init,
1171 				.cra_module = THIS_MODULE,
1172 			}
1173 		}
1174 	},
1175 	{
1176 		.init = stm32_hash_init,
1177 		.update = stm32_hash_update,
1178 		.final = stm32_hash_final,
1179 		.finup = stm32_hash_finup,
1180 		.digest = stm32_hash_digest,
1181 		.export = stm32_hash_export,
1182 		.import = stm32_hash_import,
1183 		.halg = {
1184 			.digestsize = SHA1_DIGEST_SIZE,
1185 			.statesize = sizeof(struct stm32_hash_request_ctx),
1186 			.base = {
1187 				.cra_name = "sha1",
1188 				.cra_driver_name = "stm32-sha1",
1189 				.cra_priority = 200,
1190 				.cra_flags = CRYPTO_ALG_ASYNC |
1191 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1192 				.cra_blocksize = SHA1_BLOCK_SIZE,
1193 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1194 				.cra_alignmask = 3,
1195 				.cra_init = stm32_hash_cra_init,
1196 				.cra_module = THIS_MODULE,
1197 			}
1198 		}
1199 	},
1200 	{
1201 		.init = stm32_hash_init,
1202 		.update = stm32_hash_update,
1203 		.final = stm32_hash_final,
1204 		.finup = stm32_hash_finup,
1205 		.digest = stm32_hash_digest,
1206 		.export = stm32_hash_export,
1207 		.import = stm32_hash_import,
1208 		.setkey = stm32_hash_setkey,
1209 		.halg = {
1210 			.digestsize = SHA1_DIGEST_SIZE,
1211 			.statesize = sizeof(struct stm32_hash_request_ctx),
1212 			.base = {
1213 				.cra_name = "hmac(sha1)",
1214 				.cra_driver_name = "stm32-hmac-sha1",
1215 				.cra_priority = 200,
1216 				.cra_flags = CRYPTO_ALG_ASYNC |
1217 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1218 				.cra_blocksize = SHA1_BLOCK_SIZE,
1219 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1220 				.cra_alignmask = 3,
1221 				.cra_init = stm32_hash_cra_sha1_init,
1222 				.cra_module = THIS_MODULE,
1223 			}
1224 		}
1225 	},
1226 };
1227 
1228 static struct ahash_alg algs_sha224_sha256[] = {
1229 	{
1230 		.init = stm32_hash_init,
1231 		.update = stm32_hash_update,
1232 		.final = stm32_hash_final,
1233 		.finup = stm32_hash_finup,
1234 		.digest = stm32_hash_digest,
1235 		.export = stm32_hash_export,
1236 		.import = stm32_hash_import,
1237 		.halg = {
1238 			.digestsize = SHA224_DIGEST_SIZE,
1239 			.statesize = sizeof(struct stm32_hash_request_ctx),
1240 			.base = {
1241 				.cra_name = "sha224",
1242 				.cra_driver_name = "stm32-sha224",
1243 				.cra_priority = 200,
1244 				.cra_flags = CRYPTO_ALG_ASYNC |
1245 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1246 				.cra_blocksize = SHA224_BLOCK_SIZE,
1247 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1248 				.cra_alignmask = 3,
1249 				.cra_init = stm32_hash_cra_init,
1250 				.cra_module = THIS_MODULE,
1251 			}
1252 		}
1253 	},
1254 	{
1255 		.init = stm32_hash_init,
1256 		.update = stm32_hash_update,
1257 		.final = stm32_hash_final,
1258 		.finup = stm32_hash_finup,
1259 		.digest = stm32_hash_digest,
1260 		.setkey = stm32_hash_setkey,
1261 		.export = stm32_hash_export,
1262 		.import = stm32_hash_import,
1263 		.halg = {
1264 			.digestsize = SHA224_DIGEST_SIZE,
1265 			.statesize = sizeof(struct stm32_hash_request_ctx),
1266 			.base = {
1267 				.cra_name = "hmac(sha224)",
1268 				.cra_driver_name = "stm32-hmac-sha224",
1269 				.cra_priority = 200,
1270 				.cra_flags = CRYPTO_ALG_ASYNC |
1271 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1272 				.cra_blocksize = SHA224_BLOCK_SIZE,
1273 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1274 				.cra_alignmask = 3,
1275 				.cra_init = stm32_hash_cra_sha224_init,
1276 				.cra_module = THIS_MODULE,
1277 			}
1278 		}
1279 	},
1280 	{
1281 		.init = stm32_hash_init,
1282 		.update = stm32_hash_update,
1283 		.final = stm32_hash_final,
1284 		.finup = stm32_hash_finup,
1285 		.digest = stm32_hash_digest,
1286 		.export = stm32_hash_export,
1287 		.import = stm32_hash_import,
1288 		.halg = {
1289 			.digestsize = SHA256_DIGEST_SIZE,
1290 			.statesize = sizeof(struct stm32_hash_request_ctx),
1291 			.base = {
1292 				.cra_name = "sha256",
1293 				.cra_driver_name = "stm32-sha256",
1294 				.cra_priority = 200,
1295 				.cra_flags = CRYPTO_ALG_ASYNC |
1296 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1297 				.cra_blocksize = SHA256_BLOCK_SIZE,
1298 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1299 				.cra_alignmask = 3,
1300 				.cra_init = stm32_hash_cra_init,
1301 				.cra_module = THIS_MODULE,
1302 			}
1303 		}
1304 	},
1305 	{
1306 		.init = stm32_hash_init,
1307 		.update = stm32_hash_update,
1308 		.final = stm32_hash_final,
1309 		.finup = stm32_hash_finup,
1310 		.digest = stm32_hash_digest,
1311 		.export = stm32_hash_export,
1312 		.import = stm32_hash_import,
1313 		.setkey = stm32_hash_setkey,
1314 		.halg = {
1315 			.digestsize = SHA256_DIGEST_SIZE,
1316 			.statesize = sizeof(struct stm32_hash_request_ctx),
1317 			.base = {
1318 				.cra_name = "hmac(sha256)",
1319 				.cra_driver_name = "stm32-hmac-sha256",
1320 				.cra_priority = 200,
1321 				.cra_flags = CRYPTO_ALG_ASYNC |
1322 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1323 				.cra_blocksize = SHA256_BLOCK_SIZE,
1324 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1325 				.cra_alignmask = 3,
1326 				.cra_init = stm32_hash_cra_sha256_init,
1327 				.cra_module = THIS_MODULE,
1328 			}
1329 		}
1330 	},
1331 };
1332 
1333 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1334 {
1335 	unsigned int i, j;
1336 	int err;
1337 
1338 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1339 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1340 			err = crypto_register_ahash(
1341 				&hdev->pdata->algs_info[i].algs_list[j]);
1342 			if (err)
1343 				goto err_algs;
1344 		}
1345 	}
1346 
1347 	return 0;
1348 err_algs:
1349 	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1350 	for (; i--; ) {
1351 		for (; j--;)
1352 			crypto_unregister_ahash(
1353 				&hdev->pdata->algs_info[i].algs_list[j]);
1354 	}
1355 
1356 	return err;
1357 }
1358 
1359 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1360 {
1361 	unsigned int i, j;
1362 
1363 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1364 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1365 			crypto_unregister_ahash(
1366 				&hdev->pdata->algs_info[i].algs_list[j]);
1367 	}
1368 
1369 	return 0;
1370 }
1371 
1372 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1373 	{
1374 		.algs_list	= algs_md5_sha1,
1375 		.size		= ARRAY_SIZE(algs_md5_sha1),
1376 	},
1377 };
1378 
1379 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1380 	.algs_info	= stm32_hash_algs_info_stm32f4,
1381 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1382 };
1383 
1384 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1385 	{
1386 		.algs_list	= algs_md5_sha1,
1387 		.size		= ARRAY_SIZE(algs_md5_sha1),
1388 	},
1389 	{
1390 		.algs_list	= algs_sha224_sha256,
1391 		.size		= ARRAY_SIZE(algs_sha224_sha256),
1392 	},
1393 };
1394 
1395 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1396 	.algs_info	= stm32_hash_algs_info_stm32f7,
1397 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1398 };
1399 
1400 static const struct of_device_id stm32_hash_of_match[] = {
1401 	{
1402 		.compatible = "st,stm32f456-hash",
1403 		.data = &stm32_hash_pdata_stm32f4,
1404 	},
1405 	{
1406 		.compatible = "st,stm32f756-hash",
1407 		.data = &stm32_hash_pdata_stm32f7,
1408 	},
1409 	{},
1410 };
1411 
1412 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1413 
1414 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1415 				   struct device *dev)
1416 {
1417 	hdev->pdata = of_device_get_match_data(dev);
1418 	if (!hdev->pdata) {
1419 		dev_err(dev, "no compatible OF match\n");
1420 		return -EINVAL;
1421 	}
1422 
1423 	if (of_property_read_u32(dev->of_node, "dma-maxburst",
1424 				 &hdev->dma_maxburst)) {
1425 		dev_info(dev, "dma-maxburst not specified, using 0\n");
1426 		hdev->dma_maxburst = 0;
1427 	}
1428 
1429 	return 0;
1430 }
1431 
1432 static int stm32_hash_probe(struct platform_device *pdev)
1433 {
1434 	struct stm32_hash_dev *hdev;
1435 	struct device *dev = &pdev->dev;
1436 	struct resource *res;
1437 	int ret, irq;
1438 
1439 	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1440 	if (!hdev)
1441 		return -ENOMEM;
1442 
1443 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1444 	hdev->io_base = devm_ioremap_resource(dev, res);
1445 	if (IS_ERR(hdev->io_base))
1446 		return PTR_ERR(hdev->io_base);
1447 
1448 	hdev->phys_base = res->start;
1449 
1450 	ret = stm32_hash_get_of_match(hdev, dev);
1451 	if (ret)
1452 		return ret;
1453 
1454 	irq = platform_get_irq(pdev, 0);
1455 	if (irq < 0)
1456 		return irq;
1457 
1458 	ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
1459 					stm32_hash_irq_thread, IRQF_ONESHOT,
1460 					dev_name(dev), hdev);
1461 	if (ret) {
1462 		dev_err(dev, "Cannot grab IRQ\n");
1463 		return ret;
1464 	}
1465 
1466 	hdev->clk = devm_clk_get(&pdev->dev, NULL);
1467 	if (IS_ERR(hdev->clk)) {
1468 		if (PTR_ERR(hdev->clk) != -EPROBE_DEFER) {
1469 			dev_err(dev, "failed to get clock for hash (%lu)\n",
1470 				PTR_ERR(hdev->clk));
1471 		}
1472 
1473 		return PTR_ERR(hdev->clk);
1474 	}
1475 
1476 	ret = clk_prepare_enable(hdev->clk);
1477 	if (ret) {
1478 		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1479 		return ret;
1480 	}
1481 
1482 	pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1483 	pm_runtime_use_autosuspend(dev);
1484 
1485 	pm_runtime_get_noresume(dev);
1486 	pm_runtime_set_active(dev);
1487 	pm_runtime_enable(dev);
1488 
1489 	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1490 	if (IS_ERR(hdev->rst)) {
1491 		if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
1492 			ret = -EPROBE_DEFER;
1493 			goto err_reset;
1494 		}
1495 	} else {
1496 		reset_control_assert(hdev->rst);
1497 		udelay(2);
1498 		reset_control_deassert(hdev->rst);
1499 	}
1500 
1501 	hdev->dev = dev;
1502 
1503 	platform_set_drvdata(pdev, hdev);
1504 
1505 	ret = stm32_hash_dma_init(hdev);
1506 	switch (ret) {
1507 	case 0:
1508 		break;
1509 	case -ENOENT:
1510 		dev_dbg(dev, "DMA mode not available\n");
1511 		break;
1512 	default:
1513 		goto err_dma;
1514 	}
1515 
1516 	spin_lock(&stm32_hash.lock);
1517 	list_add_tail(&hdev->list, &stm32_hash.dev_list);
1518 	spin_unlock(&stm32_hash.lock);
1519 
1520 	/* Initialize crypto engine */
1521 	hdev->engine = crypto_engine_alloc_init(dev, 1);
1522 	if (!hdev->engine) {
1523 		ret = -ENOMEM;
1524 		goto err_engine;
1525 	}
1526 
1527 	ret = crypto_engine_start(hdev->engine);
1528 	if (ret)
1529 		goto err_engine_start;
1530 
1531 	hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1532 
1533 	/* Register algos */
1534 	ret = stm32_hash_register_algs(hdev);
1535 	if (ret)
1536 		goto err_algs;
1537 
1538 	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1539 		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1540 
1541 	pm_runtime_put_sync(dev);
1542 
1543 	return 0;
1544 
1545 err_algs:
1546 err_engine_start:
1547 	crypto_engine_exit(hdev->engine);
1548 err_engine:
1549 	spin_lock(&stm32_hash.lock);
1550 	list_del(&hdev->list);
1551 	spin_unlock(&stm32_hash.lock);
1552 err_dma:
1553 	if (hdev->dma_lch)
1554 		dma_release_channel(hdev->dma_lch);
1555 err_reset:
1556 	pm_runtime_disable(dev);
1557 	pm_runtime_put_noidle(dev);
1558 
1559 	clk_disable_unprepare(hdev->clk);
1560 
1561 	return ret;
1562 }
1563 
1564 static int stm32_hash_remove(struct platform_device *pdev)
1565 {
1566 	struct stm32_hash_dev *hdev;
1567 	int ret;
1568 
1569 	hdev = platform_get_drvdata(pdev);
1570 	if (!hdev)
1571 		return -ENODEV;
1572 
1573 	ret = pm_runtime_get_sync(hdev->dev);
1574 	if (ret < 0)
1575 		return ret;
1576 
1577 	stm32_hash_unregister_algs(hdev);
1578 
1579 	crypto_engine_exit(hdev->engine);
1580 
1581 	spin_lock(&stm32_hash.lock);
1582 	list_del(&hdev->list);
1583 	spin_unlock(&stm32_hash.lock);
1584 
1585 	if (hdev->dma_lch)
1586 		dma_release_channel(hdev->dma_lch);
1587 
1588 	pm_runtime_disable(hdev->dev);
1589 	pm_runtime_put_noidle(hdev->dev);
1590 
1591 	clk_disable_unprepare(hdev->clk);
1592 
1593 	return 0;
1594 }
1595 
1596 #ifdef CONFIG_PM
1597 static int stm32_hash_runtime_suspend(struct device *dev)
1598 {
1599 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1600 
1601 	clk_disable_unprepare(hdev->clk);
1602 
1603 	return 0;
1604 }
1605 
1606 static int stm32_hash_runtime_resume(struct device *dev)
1607 {
1608 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1609 	int ret;
1610 
1611 	ret = clk_prepare_enable(hdev->clk);
1612 	if (ret) {
1613 		dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1614 		return ret;
1615 	}
1616 
1617 	return 0;
1618 }
1619 #endif
1620 
1621 static const struct dev_pm_ops stm32_hash_pm_ops = {
1622 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1623 				pm_runtime_force_resume)
1624 	SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1625 			   stm32_hash_runtime_resume, NULL)
1626 };
1627 
1628 static struct platform_driver stm32_hash_driver = {
1629 	.probe		= stm32_hash_probe,
1630 	.remove		= stm32_hash_remove,
1631 	.driver		= {
1632 		.name	= "stm32-hash",
1633 		.pm = &stm32_hash_pm_ops,
1634 		.of_match_table	= stm32_hash_of_match,
1635 	}
1636 };
1637 
1638 module_platform_driver(stm32_hash_driver);
1639 
1640 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1641 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1642 MODULE_LICENSE("GPL v2");
1643