1 /*
2  * This file is part of STM32 Crypto driver for Linux.
3  *
4  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
5  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
6  *
7  * License terms: GPL V2.0.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License version 2 as published by
11  * the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16  * details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program. If not, see <http://www.gnu.org/licenses/>.
20  *
21  */
22 
23 #include <linux/clk.h>
24 #include <linux/crypto.h>
25 #include <linux/delay.h>
26 #include <linux/dmaengine.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/iopoll.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/of_device.h>
33 #include <linux/platform_device.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/reset.h>
36 
37 #include <crypto/engine.h>
38 #include <crypto/hash.h>
39 #include <crypto/md5.h>
40 #include <crypto/scatterwalk.h>
41 #include <crypto/sha.h>
42 #include <crypto/internal/hash.h>
43 
44 #define HASH_CR				0x00
45 #define HASH_DIN			0x04
46 #define HASH_STR			0x08
47 #define HASH_IMR			0x20
48 #define HASH_SR				0x24
49 #define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
50 #define HASH_HREG(x)			(0x310 + ((x) * 0x04))
51 #define HASH_HWCFGR			0x3F0
52 #define HASH_VER			0x3F4
53 #define HASH_ID				0x3F8
54 
55 /* Control Register */
56 #define HASH_CR_INIT			BIT(2)
57 #define HASH_CR_DMAE			BIT(3)
58 #define HASH_CR_DATATYPE_POS		4
59 #define HASH_CR_MODE			BIT(6)
60 #define HASH_CR_MDMAT			BIT(13)
61 #define HASH_CR_DMAA			BIT(14)
62 #define HASH_CR_LKEY			BIT(16)
63 
64 #define HASH_CR_ALGO_SHA1		0x0
65 #define HASH_CR_ALGO_MD5		0x80
66 #define HASH_CR_ALGO_SHA224		0x40000
67 #define HASH_CR_ALGO_SHA256		0x40080
68 
69 /* Interrupt */
70 #define HASH_DINIE			BIT(0)
71 #define HASH_DCIE			BIT(1)
72 
73 /* Interrupt Mask */
74 #define HASH_MASK_CALC_COMPLETION	BIT(0)
75 #define HASH_MASK_DATA_INPUT		BIT(1)
76 
77 /* Context swap register */
78 #define HASH_CSR_REGISTER_NUMBER	53
79 
80 /* Status Flags */
81 #define HASH_SR_DATA_INPUT_READY	BIT(0)
82 #define HASH_SR_OUTPUT_READY		BIT(1)
83 #define HASH_SR_DMA_ACTIVE		BIT(2)
84 #define HASH_SR_BUSY			BIT(3)
85 
86 /* STR Register */
87 #define HASH_STR_NBLW_MASK		GENMASK(4, 0)
88 #define HASH_STR_DCAL			BIT(8)
89 
90 #define HASH_FLAGS_INIT			BIT(0)
91 #define HASH_FLAGS_OUTPUT_READY		BIT(1)
92 #define HASH_FLAGS_CPU			BIT(2)
93 #define HASH_FLAGS_DMA_READY		BIT(3)
94 #define HASH_FLAGS_DMA_ACTIVE		BIT(4)
95 #define HASH_FLAGS_HMAC_INIT		BIT(5)
96 #define HASH_FLAGS_HMAC_FINAL		BIT(6)
97 #define HASH_FLAGS_HMAC_KEY		BIT(7)
98 
99 #define HASH_FLAGS_FINAL		BIT(15)
100 #define HASH_FLAGS_FINUP		BIT(16)
101 #define HASH_FLAGS_ALGO_MASK		GENMASK(21, 18)
102 #define HASH_FLAGS_MD5			BIT(18)
103 #define HASH_FLAGS_SHA1			BIT(19)
104 #define HASH_FLAGS_SHA224		BIT(20)
105 #define HASH_FLAGS_SHA256		BIT(21)
106 #define HASH_FLAGS_ERRORS		BIT(22)
107 #define HASH_FLAGS_HMAC			BIT(23)
108 
109 #define HASH_OP_UPDATE			1
110 #define HASH_OP_FINAL			2
111 
112 enum stm32_hash_data_format {
113 	HASH_DATA_32_BITS		= 0x0,
114 	HASH_DATA_16_BITS		= 0x1,
115 	HASH_DATA_8_BITS		= 0x2,
116 	HASH_DATA_1_BIT			= 0x3
117 };
118 
119 #define HASH_BUFLEN			256
120 #define HASH_LONG_KEY			64
121 #define HASH_MAX_KEY_SIZE		(SHA256_BLOCK_SIZE * 8)
122 #define HASH_QUEUE_LENGTH		16
123 #define HASH_DMA_THRESHOLD		50
124 
125 #define HASH_AUTOSUSPEND_DELAY		50
126 
127 struct stm32_hash_ctx {
128 	struct crypto_engine_ctx enginectx;
129 	struct stm32_hash_dev	*hdev;
130 	unsigned long		flags;
131 
132 	u8			key[HASH_MAX_KEY_SIZE];
133 	int			keylen;
134 };
135 
136 struct stm32_hash_request_ctx {
137 	struct stm32_hash_dev	*hdev;
138 	unsigned long		flags;
139 	unsigned long		op;
140 
141 	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
142 	size_t			digcnt;
143 	size_t			bufcnt;
144 	size_t			buflen;
145 
146 	/* DMA */
147 	struct scatterlist	*sg;
148 	unsigned int		offset;
149 	unsigned int		total;
150 	struct scatterlist	sg_key;
151 
152 	dma_addr_t		dma_addr;
153 	size_t			dma_ct;
154 	int			nents;
155 
156 	u8			data_type;
157 
158 	u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
159 
160 	/* Export Context */
161 	u32			*hw_context;
162 };
163 
164 struct stm32_hash_algs_info {
165 	struct ahash_alg	*algs_list;
166 	size_t			size;
167 };
168 
169 struct stm32_hash_pdata {
170 	struct stm32_hash_algs_info	*algs_info;
171 	size_t				algs_info_size;
172 };
173 
174 struct stm32_hash_dev {
175 	struct list_head	list;
176 	struct device		*dev;
177 	struct clk		*clk;
178 	struct reset_control	*rst;
179 	void __iomem		*io_base;
180 	phys_addr_t		phys_base;
181 	u32			dma_mode;
182 	u32			dma_maxburst;
183 
184 	spinlock_t		lock; /* lock to protect queue */
185 
186 	struct ahash_request	*req;
187 	struct crypto_engine	*engine;
188 
189 	int			err;
190 	unsigned long		flags;
191 
192 	struct dma_chan		*dma_lch;
193 	struct completion	dma_completion;
194 
195 	const struct stm32_hash_pdata	*pdata;
196 };
197 
198 struct stm32_hash_drv {
199 	struct list_head	dev_list;
200 	spinlock_t		lock; /* List protection access */
201 };
202 
203 static struct stm32_hash_drv stm32_hash = {
204 	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
205 	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
206 };
207 
208 static void stm32_hash_dma_callback(void *param);
209 
210 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
211 {
212 	return readl_relaxed(hdev->io_base + offset);
213 }
214 
215 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
216 				    u32 offset, u32 value)
217 {
218 	writel_relaxed(value, hdev->io_base + offset);
219 }
220 
221 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
222 {
223 	u32 status;
224 
225 	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
226 				   !(status & HASH_SR_BUSY), 10, 10000);
227 }
228 
229 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
230 {
231 	u32 reg;
232 
233 	reg = stm32_hash_read(hdev, HASH_STR);
234 	reg &= ~(HASH_STR_NBLW_MASK);
235 	reg |= (8U * ((length) % 4U));
236 	stm32_hash_write(hdev, HASH_STR, reg);
237 }
238 
239 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
240 {
241 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
242 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
243 	u32 reg;
244 	int keylen = ctx->keylen;
245 	void *key = ctx->key;
246 
247 	if (keylen) {
248 		stm32_hash_set_nblw(hdev, keylen);
249 
250 		while (keylen > 0) {
251 			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
252 			keylen -= 4;
253 			key += 4;
254 		}
255 
256 		reg = stm32_hash_read(hdev, HASH_STR);
257 		reg |= HASH_STR_DCAL;
258 		stm32_hash_write(hdev, HASH_STR, reg);
259 
260 		return -EINPROGRESS;
261 	}
262 
263 	return 0;
264 }
265 
266 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
267 {
268 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
269 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
270 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
271 
272 	u32 reg = HASH_CR_INIT;
273 
274 	if (!(hdev->flags & HASH_FLAGS_INIT)) {
275 		switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
276 		case HASH_FLAGS_MD5:
277 			reg |= HASH_CR_ALGO_MD5;
278 			break;
279 		case HASH_FLAGS_SHA1:
280 			reg |= HASH_CR_ALGO_SHA1;
281 			break;
282 		case HASH_FLAGS_SHA224:
283 			reg |= HASH_CR_ALGO_SHA224;
284 			break;
285 		case HASH_FLAGS_SHA256:
286 			reg |= HASH_CR_ALGO_SHA256;
287 			break;
288 		default:
289 			reg |= HASH_CR_ALGO_MD5;
290 		}
291 
292 		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
293 
294 		if (rctx->flags & HASH_FLAGS_HMAC) {
295 			hdev->flags |= HASH_FLAGS_HMAC;
296 			reg |= HASH_CR_MODE;
297 			if (ctx->keylen > HASH_LONG_KEY)
298 				reg |= HASH_CR_LKEY;
299 		}
300 
301 		stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
302 
303 		stm32_hash_write(hdev, HASH_CR, reg);
304 
305 		hdev->flags |= HASH_FLAGS_INIT;
306 
307 		dev_dbg(hdev->dev, "Write Control %x\n", reg);
308 	}
309 }
310 
311 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
312 {
313 	size_t count;
314 
315 	while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
316 		count = min(rctx->sg->length - rctx->offset, rctx->total);
317 		count = min(count, rctx->buflen - rctx->bufcnt);
318 
319 		if (count <= 0) {
320 			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
321 				rctx->sg = sg_next(rctx->sg);
322 				continue;
323 			} else {
324 				break;
325 			}
326 		}
327 
328 		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
329 					 rctx->offset, count, 0);
330 
331 		rctx->bufcnt += count;
332 		rctx->offset += count;
333 		rctx->total -= count;
334 
335 		if (rctx->offset == rctx->sg->length) {
336 			rctx->sg = sg_next(rctx->sg);
337 			if (rctx->sg)
338 				rctx->offset = 0;
339 			else
340 				rctx->total = 0;
341 		}
342 	}
343 }
344 
345 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
346 			       const u8 *buf, size_t length, int final)
347 {
348 	unsigned int count, len32;
349 	const u32 *buffer = (const u32 *)buf;
350 	u32 reg;
351 
352 	if (final)
353 		hdev->flags |= HASH_FLAGS_FINAL;
354 
355 	len32 = DIV_ROUND_UP(length, sizeof(u32));
356 
357 	dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n",
358 		__func__, length, final, len32);
359 
360 	hdev->flags |= HASH_FLAGS_CPU;
361 
362 	stm32_hash_write_ctrl(hdev);
363 
364 	if (stm32_hash_wait_busy(hdev))
365 		return -ETIMEDOUT;
366 
367 	if ((hdev->flags & HASH_FLAGS_HMAC) &&
368 	    (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
369 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
370 		stm32_hash_write_key(hdev);
371 		if (stm32_hash_wait_busy(hdev))
372 			return -ETIMEDOUT;
373 	}
374 
375 	for (count = 0; count < len32; count++)
376 		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
377 
378 	if (final) {
379 		stm32_hash_set_nblw(hdev, length);
380 		reg = stm32_hash_read(hdev, HASH_STR);
381 		reg |= HASH_STR_DCAL;
382 		stm32_hash_write(hdev, HASH_STR, reg);
383 		if (hdev->flags & HASH_FLAGS_HMAC) {
384 			if (stm32_hash_wait_busy(hdev))
385 				return -ETIMEDOUT;
386 			stm32_hash_write_key(hdev);
387 		}
388 		return -EINPROGRESS;
389 	}
390 
391 	return 0;
392 }
393 
394 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
395 {
396 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
397 	int bufcnt, err = 0, final;
398 
399 	dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
400 
401 	final = (rctx->flags & HASH_FLAGS_FINUP);
402 
403 	while ((rctx->total >= rctx->buflen) ||
404 	       (rctx->bufcnt + rctx->total >= rctx->buflen)) {
405 		stm32_hash_append_sg(rctx);
406 		bufcnt = rctx->bufcnt;
407 		rctx->bufcnt = 0;
408 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
409 	}
410 
411 	stm32_hash_append_sg(rctx);
412 
413 	if (final) {
414 		bufcnt = rctx->bufcnt;
415 		rctx->bufcnt = 0;
416 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
417 					  (rctx->flags & HASH_FLAGS_FINUP));
418 	}
419 
420 	return err;
421 }
422 
423 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
424 			       struct scatterlist *sg, int length, int mdma)
425 {
426 	struct dma_async_tx_descriptor *in_desc;
427 	dma_cookie_t cookie;
428 	u32 reg;
429 	int err;
430 
431 	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
432 					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
433 					  DMA_CTRL_ACK);
434 	if (!in_desc) {
435 		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
436 		return -ENOMEM;
437 	}
438 
439 	reinit_completion(&hdev->dma_completion);
440 	in_desc->callback = stm32_hash_dma_callback;
441 	in_desc->callback_param = hdev;
442 
443 	hdev->flags |= HASH_FLAGS_FINAL;
444 	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
445 
446 	reg = stm32_hash_read(hdev, HASH_CR);
447 
448 	if (mdma)
449 		reg |= HASH_CR_MDMAT;
450 	else
451 		reg &= ~HASH_CR_MDMAT;
452 
453 	reg |= HASH_CR_DMAE;
454 
455 	stm32_hash_write(hdev, HASH_CR, reg);
456 
457 	stm32_hash_set_nblw(hdev, length);
458 
459 	cookie = dmaengine_submit(in_desc);
460 	err = dma_submit_error(cookie);
461 	if (err)
462 		return -ENOMEM;
463 
464 	dma_async_issue_pending(hdev->dma_lch);
465 
466 	if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
467 						       msecs_to_jiffies(100)))
468 		err = -ETIMEDOUT;
469 
470 	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
471 				     NULL, NULL) != DMA_COMPLETE)
472 		err = -ETIMEDOUT;
473 
474 	if (err) {
475 		dev_err(hdev->dev, "DMA Error %i\n", err);
476 		dmaengine_terminate_all(hdev->dma_lch);
477 		return err;
478 	}
479 
480 	return -EINPROGRESS;
481 }
482 
483 static void stm32_hash_dma_callback(void *param)
484 {
485 	struct stm32_hash_dev *hdev = param;
486 
487 	complete(&hdev->dma_completion);
488 
489 	hdev->flags |= HASH_FLAGS_DMA_READY;
490 }
491 
492 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
493 {
494 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
495 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
496 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
497 	int err;
498 
499 	if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
500 		err = stm32_hash_write_key(hdev);
501 		if (stm32_hash_wait_busy(hdev))
502 			return -ETIMEDOUT;
503 	} else {
504 		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
505 			sg_init_one(&rctx->sg_key, ctx->key,
506 				    ALIGN(ctx->keylen, sizeof(u32)));
507 
508 		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
509 					  DMA_TO_DEVICE);
510 		if (rctx->dma_ct == 0) {
511 			dev_err(hdev->dev, "dma_map_sg error\n");
512 			return -ENOMEM;
513 		}
514 
515 		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
516 
517 		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
518 	}
519 
520 	return err;
521 }
522 
523 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
524 {
525 	struct dma_slave_config dma_conf;
526 	int err;
527 
528 	memset(&dma_conf, 0, sizeof(dma_conf));
529 
530 	dma_conf.direction = DMA_MEM_TO_DEV;
531 	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
532 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
533 	dma_conf.src_maxburst = hdev->dma_maxburst;
534 	dma_conf.dst_maxburst = hdev->dma_maxburst;
535 	dma_conf.device_fc = false;
536 
537 	hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
538 	if (!hdev->dma_lch) {
539 		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
540 		return -EBUSY;
541 	}
542 
543 	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
544 	if (err) {
545 		dma_release_channel(hdev->dma_lch);
546 		hdev->dma_lch = NULL;
547 		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
548 		return err;
549 	}
550 
551 	init_completion(&hdev->dma_completion);
552 
553 	return 0;
554 }
555 
556 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
557 {
558 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
559 	struct scatterlist sg[1], *tsg;
560 	int err = 0, len = 0, reg, ncp = 0;
561 	unsigned int i;
562 	u32 *buffer = (void *)rctx->buffer;
563 
564 	rctx->sg = hdev->req->src;
565 	rctx->total = hdev->req->nbytes;
566 
567 	rctx->nents = sg_nents(rctx->sg);
568 
569 	if (rctx->nents < 0)
570 		return -EINVAL;
571 
572 	stm32_hash_write_ctrl(hdev);
573 
574 	if (hdev->flags & HASH_FLAGS_HMAC) {
575 		err = stm32_hash_hmac_dma_send(hdev);
576 		if (err != -EINPROGRESS)
577 			return err;
578 	}
579 
580 	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
581 		len = sg->length;
582 
583 		sg[0] = *tsg;
584 		if (sg_is_last(sg)) {
585 			if (hdev->dma_mode == 1) {
586 				len = (ALIGN(sg->length, 16) - 16);
587 
588 				ncp = sg_pcopy_to_buffer(
589 					rctx->sg, rctx->nents,
590 					rctx->buffer, sg->length - len,
591 					rctx->total - sg->length + len);
592 
593 				sg->length = len;
594 			} else {
595 				if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
596 					len = sg->length;
597 					sg->length = ALIGN(sg->length,
598 							   sizeof(u32));
599 				}
600 			}
601 		}
602 
603 		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
604 					  DMA_TO_DEVICE);
605 		if (rctx->dma_ct == 0) {
606 			dev_err(hdev->dev, "dma_map_sg error\n");
607 			return -ENOMEM;
608 		}
609 
610 		err = stm32_hash_xmit_dma(hdev, sg, len,
611 					  !sg_is_last(sg));
612 
613 		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
614 
615 		if (err == -ENOMEM)
616 			return err;
617 	}
618 
619 	if (hdev->dma_mode == 1) {
620 		if (stm32_hash_wait_busy(hdev))
621 			return -ETIMEDOUT;
622 		reg = stm32_hash_read(hdev, HASH_CR);
623 		reg &= ~HASH_CR_DMAE;
624 		reg |= HASH_CR_DMAA;
625 		stm32_hash_write(hdev, HASH_CR, reg);
626 
627 		if (ncp) {
628 			memset(buffer + ncp, 0,
629 			       DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
630 			writesl(hdev->io_base + HASH_DIN, buffer,
631 				DIV_ROUND_UP(ncp, sizeof(u32)));
632 		}
633 		stm32_hash_set_nblw(hdev, ncp);
634 		reg = stm32_hash_read(hdev, HASH_STR);
635 		reg |= HASH_STR_DCAL;
636 		stm32_hash_write(hdev, HASH_STR, reg);
637 		err = -EINPROGRESS;
638 	}
639 
640 	if (hdev->flags & HASH_FLAGS_HMAC) {
641 		if (stm32_hash_wait_busy(hdev))
642 			return -ETIMEDOUT;
643 		err = stm32_hash_hmac_dma_send(hdev);
644 	}
645 
646 	return err;
647 }
648 
649 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
650 {
651 	struct stm32_hash_dev *hdev = NULL, *tmp;
652 
653 	spin_lock_bh(&stm32_hash.lock);
654 	if (!ctx->hdev) {
655 		list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
656 			hdev = tmp;
657 			break;
658 		}
659 		ctx->hdev = hdev;
660 	} else {
661 		hdev = ctx->hdev;
662 	}
663 
664 	spin_unlock_bh(&stm32_hash.lock);
665 
666 	return hdev;
667 }
668 
669 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
670 {
671 	struct scatterlist *sg;
672 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
673 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
674 	int i;
675 
676 	if (req->nbytes <= HASH_DMA_THRESHOLD)
677 		return false;
678 
679 	if (sg_nents(req->src) > 1) {
680 		if (hdev->dma_mode == 1)
681 			return false;
682 		for_each_sg(req->src, sg, sg_nents(req->src), i) {
683 			if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
684 			    (!sg_is_last(sg)))
685 				return false;
686 		}
687 	}
688 
689 	if (req->src->offset % 4)
690 		return false;
691 
692 	return true;
693 }
694 
695 static int stm32_hash_init(struct ahash_request *req)
696 {
697 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
698 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
699 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
700 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
701 
702 	rctx->hdev = hdev;
703 
704 	rctx->flags = HASH_FLAGS_CPU;
705 
706 	rctx->digcnt = crypto_ahash_digestsize(tfm);
707 	switch (rctx->digcnt) {
708 	case MD5_DIGEST_SIZE:
709 		rctx->flags |= HASH_FLAGS_MD5;
710 		break;
711 	case SHA1_DIGEST_SIZE:
712 		rctx->flags |= HASH_FLAGS_SHA1;
713 		break;
714 	case SHA224_DIGEST_SIZE:
715 		rctx->flags |= HASH_FLAGS_SHA224;
716 		break;
717 	case SHA256_DIGEST_SIZE:
718 		rctx->flags |= HASH_FLAGS_SHA256;
719 		break;
720 	default:
721 		return -EINVAL;
722 	}
723 
724 	rctx->bufcnt = 0;
725 	rctx->buflen = HASH_BUFLEN;
726 	rctx->total = 0;
727 	rctx->offset = 0;
728 	rctx->data_type = HASH_DATA_8_BITS;
729 
730 	memset(rctx->buffer, 0, HASH_BUFLEN);
731 
732 	if (ctx->flags & HASH_FLAGS_HMAC)
733 		rctx->flags |= HASH_FLAGS_HMAC;
734 
735 	dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
736 
737 	return 0;
738 }
739 
740 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
741 {
742 	return stm32_hash_update_cpu(hdev);
743 }
744 
745 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
746 {
747 	struct ahash_request *req = hdev->req;
748 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
749 	int err;
750 	int buflen = rctx->bufcnt;
751 
752 	rctx->bufcnt = 0;
753 
754 	if (!(rctx->flags & HASH_FLAGS_CPU))
755 		err = stm32_hash_dma_send(hdev);
756 	else
757 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
758 
759 
760 	return err;
761 }
762 
763 static void stm32_hash_copy_hash(struct ahash_request *req)
764 {
765 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
766 	u32 *hash = (u32 *)rctx->digest;
767 	unsigned int i, hashsize;
768 
769 	switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
770 	case HASH_FLAGS_MD5:
771 		hashsize = MD5_DIGEST_SIZE;
772 		break;
773 	case HASH_FLAGS_SHA1:
774 		hashsize = SHA1_DIGEST_SIZE;
775 		break;
776 	case HASH_FLAGS_SHA224:
777 		hashsize = SHA224_DIGEST_SIZE;
778 		break;
779 	case HASH_FLAGS_SHA256:
780 		hashsize = SHA256_DIGEST_SIZE;
781 		break;
782 	default:
783 		return;
784 	}
785 
786 	for (i = 0; i < hashsize / sizeof(u32); i++)
787 		hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
788 						      HASH_HREG(i)));
789 }
790 
791 static int stm32_hash_finish(struct ahash_request *req)
792 {
793 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
794 
795 	if (!req->result)
796 		return -EINVAL;
797 
798 	memcpy(req->result, rctx->digest, rctx->digcnt);
799 
800 	return 0;
801 }
802 
803 static void stm32_hash_finish_req(struct ahash_request *req, int err)
804 {
805 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
806 	struct stm32_hash_dev *hdev = rctx->hdev;
807 
808 	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
809 		stm32_hash_copy_hash(req);
810 		err = stm32_hash_finish(req);
811 		hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
812 				 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
813 				 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
814 				 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
815 				 HASH_FLAGS_HMAC_KEY);
816 	} else {
817 		rctx->flags |= HASH_FLAGS_ERRORS;
818 	}
819 
820 	pm_runtime_mark_last_busy(hdev->dev);
821 	pm_runtime_put_autosuspend(hdev->dev);
822 
823 	crypto_finalize_hash_request(hdev->engine, req, err);
824 }
825 
826 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
827 			      struct stm32_hash_request_ctx *rctx)
828 {
829 	pm_runtime_get_sync(hdev->dev);
830 
831 	if (!(HASH_FLAGS_INIT & hdev->flags)) {
832 		stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
833 		stm32_hash_write(hdev, HASH_STR, 0);
834 		stm32_hash_write(hdev, HASH_DIN, 0);
835 		stm32_hash_write(hdev, HASH_IMR, 0);
836 		hdev->err = 0;
837 	}
838 
839 	return 0;
840 }
841 
842 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
843 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
844 
845 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
846 				   struct ahash_request *req)
847 {
848 	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
849 }
850 
851 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
852 {
853 	struct ahash_request *req = container_of(areq, struct ahash_request,
854 						 base);
855 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
856 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
857 	struct stm32_hash_request_ctx *rctx;
858 
859 	if (!hdev)
860 		return -ENODEV;
861 
862 	hdev->req = req;
863 
864 	rctx = ahash_request_ctx(req);
865 
866 	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
867 		rctx->op, req->nbytes);
868 
869 	return stm32_hash_hw_init(hdev, rctx);
870 }
871 
872 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
873 {
874 	struct ahash_request *req = container_of(areq, struct ahash_request,
875 						 base);
876 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
877 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
878 	struct stm32_hash_request_ctx *rctx;
879 	int err = 0;
880 
881 	if (!hdev)
882 		return -ENODEV;
883 
884 	hdev->req = req;
885 
886 	rctx = ahash_request_ctx(req);
887 
888 	if (rctx->op == HASH_OP_UPDATE)
889 		err = stm32_hash_update_req(hdev);
890 	else if (rctx->op == HASH_OP_FINAL)
891 		err = stm32_hash_final_req(hdev);
892 
893 	if (err != -EINPROGRESS)
894 	/* done task will not finish it, so do it here */
895 		stm32_hash_finish_req(req, err);
896 
897 	return 0;
898 }
899 
900 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
901 {
902 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
903 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
904 	struct stm32_hash_dev *hdev = ctx->hdev;
905 
906 	rctx->op = op;
907 
908 	return stm32_hash_handle_queue(hdev, req);
909 }
910 
911 static int stm32_hash_update(struct ahash_request *req)
912 {
913 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
914 
915 	if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
916 		return 0;
917 
918 	rctx->total = req->nbytes;
919 	rctx->sg = req->src;
920 	rctx->offset = 0;
921 
922 	if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
923 		stm32_hash_append_sg(rctx);
924 		return 0;
925 	}
926 
927 	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
928 }
929 
930 static int stm32_hash_final(struct ahash_request *req)
931 {
932 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
933 
934 	rctx->flags |= HASH_FLAGS_FINUP;
935 
936 	return stm32_hash_enqueue(req, HASH_OP_FINAL);
937 }
938 
939 static int stm32_hash_finup(struct ahash_request *req)
940 {
941 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
942 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
943 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
944 	int err1, err2;
945 
946 	rctx->flags |= HASH_FLAGS_FINUP;
947 
948 	if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
949 		rctx->flags &= ~HASH_FLAGS_CPU;
950 
951 	err1 = stm32_hash_update(req);
952 
953 	if (err1 == -EINPROGRESS || err1 == -EBUSY)
954 		return err1;
955 
956 	/*
957 	 * final() has to be always called to cleanup resources
958 	 * even if update() failed, except EINPROGRESS
959 	 */
960 	err2 = stm32_hash_final(req);
961 
962 	return err1 ?: err2;
963 }
964 
965 static int stm32_hash_digest(struct ahash_request *req)
966 {
967 	return stm32_hash_init(req) ?: stm32_hash_finup(req);
968 }
969 
970 static int stm32_hash_export(struct ahash_request *req, void *out)
971 {
972 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
973 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
974 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
975 	u32 *preg;
976 	unsigned int i;
977 
978 	pm_runtime_get_sync(hdev->dev);
979 
980 	while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
981 		cpu_relax();
982 
983 	rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
984 					 sizeof(u32),
985 					 GFP_KERNEL);
986 
987 	preg = rctx->hw_context;
988 
989 	*preg++ = stm32_hash_read(hdev, HASH_IMR);
990 	*preg++ = stm32_hash_read(hdev, HASH_STR);
991 	*preg++ = stm32_hash_read(hdev, HASH_CR);
992 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
993 		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
994 
995 	pm_runtime_mark_last_busy(hdev->dev);
996 	pm_runtime_put_autosuspend(hdev->dev);
997 
998 	memcpy(out, rctx, sizeof(*rctx));
999 
1000 	return 0;
1001 }
1002 
1003 static int stm32_hash_import(struct ahash_request *req, const void *in)
1004 {
1005 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1006 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1007 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1008 	const u32 *preg = in;
1009 	u32 reg;
1010 	unsigned int i;
1011 
1012 	memcpy(rctx, in, sizeof(*rctx));
1013 
1014 	preg = rctx->hw_context;
1015 
1016 	pm_runtime_get_sync(hdev->dev);
1017 
1018 	stm32_hash_write(hdev, HASH_IMR, *preg++);
1019 	stm32_hash_write(hdev, HASH_STR, *preg++);
1020 	stm32_hash_write(hdev, HASH_CR, *preg);
1021 	reg = *preg++ | HASH_CR_INIT;
1022 	stm32_hash_write(hdev, HASH_CR, reg);
1023 
1024 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1025 		stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1026 
1027 	pm_runtime_mark_last_busy(hdev->dev);
1028 	pm_runtime_put_autosuspend(hdev->dev);
1029 
1030 	kfree(rctx->hw_context);
1031 
1032 	return 0;
1033 }
1034 
1035 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1036 			     const u8 *key, unsigned int keylen)
1037 {
1038 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1039 
1040 	if (keylen <= HASH_MAX_KEY_SIZE) {
1041 		memcpy(ctx->key, key, keylen);
1042 		ctx->keylen = keylen;
1043 	} else {
1044 		return -ENOMEM;
1045 	}
1046 
1047 	return 0;
1048 }
1049 
1050 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1051 				    const char *algs_hmac_name)
1052 {
1053 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1054 
1055 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1056 				 sizeof(struct stm32_hash_request_ctx));
1057 
1058 	ctx->keylen = 0;
1059 
1060 	if (algs_hmac_name)
1061 		ctx->flags |= HASH_FLAGS_HMAC;
1062 
1063 	ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1064 	ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1065 	ctx->enginectx.op.unprepare_request = NULL;
1066 	return 0;
1067 }
1068 
1069 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1070 {
1071 	return stm32_hash_cra_init_algs(tfm, NULL);
1072 }
1073 
1074 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1075 {
1076 	return stm32_hash_cra_init_algs(tfm, "md5");
1077 }
1078 
1079 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1080 {
1081 	return stm32_hash_cra_init_algs(tfm, "sha1");
1082 }
1083 
1084 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1085 {
1086 	return stm32_hash_cra_init_algs(tfm, "sha224");
1087 }
1088 
1089 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1090 {
1091 	return stm32_hash_cra_init_algs(tfm, "sha256");
1092 }
1093 
1094 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1095 {
1096 	struct stm32_hash_dev *hdev = dev_id;
1097 
1098 	if (HASH_FLAGS_CPU & hdev->flags) {
1099 		if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1100 			hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1101 			goto finish;
1102 		}
1103 	} else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1104 		if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1105 			hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1106 				goto finish;
1107 		}
1108 	}
1109 
1110 	return IRQ_HANDLED;
1111 
1112 finish:
1113 	/* Finish current request */
1114 	stm32_hash_finish_req(hdev->req, 0);
1115 
1116 	return IRQ_HANDLED;
1117 }
1118 
1119 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1120 {
1121 	struct stm32_hash_dev *hdev = dev_id;
1122 	u32 reg;
1123 
1124 	reg = stm32_hash_read(hdev, HASH_SR);
1125 	if (reg & HASH_SR_OUTPUT_READY) {
1126 		reg &= ~HASH_SR_OUTPUT_READY;
1127 		stm32_hash_write(hdev, HASH_SR, reg);
1128 		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1129 		/* Disable IT*/
1130 		stm32_hash_write(hdev, HASH_IMR, 0);
1131 		return IRQ_WAKE_THREAD;
1132 	}
1133 
1134 	return IRQ_NONE;
1135 }
1136 
1137 static struct ahash_alg algs_md5_sha1[] = {
1138 	{
1139 		.init = stm32_hash_init,
1140 		.update = stm32_hash_update,
1141 		.final = stm32_hash_final,
1142 		.finup = stm32_hash_finup,
1143 		.digest = stm32_hash_digest,
1144 		.export = stm32_hash_export,
1145 		.import = stm32_hash_import,
1146 		.halg = {
1147 			.digestsize = MD5_DIGEST_SIZE,
1148 			.statesize = sizeof(struct stm32_hash_request_ctx),
1149 			.base = {
1150 				.cra_name = "md5",
1151 				.cra_driver_name = "stm32-md5",
1152 				.cra_priority = 200,
1153 				.cra_flags = CRYPTO_ALG_ASYNC |
1154 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1155 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1156 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1157 				.cra_alignmask = 3,
1158 				.cra_init = stm32_hash_cra_init,
1159 				.cra_module = THIS_MODULE,
1160 			}
1161 		}
1162 	},
1163 	{
1164 		.init = stm32_hash_init,
1165 		.update = stm32_hash_update,
1166 		.final = stm32_hash_final,
1167 		.finup = stm32_hash_finup,
1168 		.digest = stm32_hash_digest,
1169 		.export = stm32_hash_export,
1170 		.import = stm32_hash_import,
1171 		.setkey = stm32_hash_setkey,
1172 		.halg = {
1173 			.digestsize = MD5_DIGEST_SIZE,
1174 			.statesize = sizeof(struct stm32_hash_request_ctx),
1175 			.base = {
1176 				.cra_name = "hmac(md5)",
1177 				.cra_driver_name = "stm32-hmac-md5",
1178 				.cra_priority = 200,
1179 				.cra_flags = CRYPTO_ALG_ASYNC |
1180 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1181 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1182 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1183 				.cra_alignmask = 3,
1184 				.cra_init = stm32_hash_cra_md5_init,
1185 				.cra_module = THIS_MODULE,
1186 			}
1187 		}
1188 	},
1189 	{
1190 		.init = stm32_hash_init,
1191 		.update = stm32_hash_update,
1192 		.final = stm32_hash_final,
1193 		.finup = stm32_hash_finup,
1194 		.digest = stm32_hash_digest,
1195 		.export = stm32_hash_export,
1196 		.import = stm32_hash_import,
1197 		.halg = {
1198 			.digestsize = SHA1_DIGEST_SIZE,
1199 			.statesize = sizeof(struct stm32_hash_request_ctx),
1200 			.base = {
1201 				.cra_name = "sha1",
1202 				.cra_driver_name = "stm32-sha1",
1203 				.cra_priority = 200,
1204 				.cra_flags = CRYPTO_ALG_ASYNC |
1205 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1206 				.cra_blocksize = SHA1_BLOCK_SIZE,
1207 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1208 				.cra_alignmask = 3,
1209 				.cra_init = stm32_hash_cra_init,
1210 				.cra_module = THIS_MODULE,
1211 			}
1212 		}
1213 	},
1214 	{
1215 		.init = stm32_hash_init,
1216 		.update = stm32_hash_update,
1217 		.final = stm32_hash_final,
1218 		.finup = stm32_hash_finup,
1219 		.digest = stm32_hash_digest,
1220 		.export = stm32_hash_export,
1221 		.import = stm32_hash_import,
1222 		.setkey = stm32_hash_setkey,
1223 		.halg = {
1224 			.digestsize = SHA1_DIGEST_SIZE,
1225 			.statesize = sizeof(struct stm32_hash_request_ctx),
1226 			.base = {
1227 				.cra_name = "hmac(sha1)",
1228 				.cra_driver_name = "stm32-hmac-sha1",
1229 				.cra_priority = 200,
1230 				.cra_flags = CRYPTO_ALG_ASYNC |
1231 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1232 				.cra_blocksize = SHA1_BLOCK_SIZE,
1233 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1234 				.cra_alignmask = 3,
1235 				.cra_init = stm32_hash_cra_sha1_init,
1236 				.cra_module = THIS_MODULE,
1237 			}
1238 		}
1239 	},
1240 };
1241 
1242 static struct ahash_alg algs_sha224_sha256[] = {
1243 	{
1244 		.init = stm32_hash_init,
1245 		.update = stm32_hash_update,
1246 		.final = stm32_hash_final,
1247 		.finup = stm32_hash_finup,
1248 		.digest = stm32_hash_digest,
1249 		.export = stm32_hash_export,
1250 		.import = stm32_hash_import,
1251 		.halg = {
1252 			.digestsize = SHA224_DIGEST_SIZE,
1253 			.statesize = sizeof(struct stm32_hash_request_ctx),
1254 			.base = {
1255 				.cra_name = "sha224",
1256 				.cra_driver_name = "stm32-sha224",
1257 				.cra_priority = 200,
1258 				.cra_flags = CRYPTO_ALG_ASYNC |
1259 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1260 				.cra_blocksize = SHA224_BLOCK_SIZE,
1261 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1262 				.cra_alignmask = 3,
1263 				.cra_init = stm32_hash_cra_init,
1264 				.cra_module = THIS_MODULE,
1265 			}
1266 		}
1267 	},
1268 	{
1269 		.init = stm32_hash_init,
1270 		.update = stm32_hash_update,
1271 		.final = stm32_hash_final,
1272 		.finup = stm32_hash_finup,
1273 		.digest = stm32_hash_digest,
1274 		.setkey = stm32_hash_setkey,
1275 		.export = stm32_hash_export,
1276 		.import = stm32_hash_import,
1277 		.halg = {
1278 			.digestsize = SHA224_DIGEST_SIZE,
1279 			.statesize = sizeof(struct stm32_hash_request_ctx),
1280 			.base = {
1281 				.cra_name = "hmac(sha224)",
1282 				.cra_driver_name = "stm32-hmac-sha224",
1283 				.cra_priority = 200,
1284 				.cra_flags = CRYPTO_ALG_ASYNC |
1285 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1286 				.cra_blocksize = SHA224_BLOCK_SIZE,
1287 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1288 				.cra_alignmask = 3,
1289 				.cra_init = stm32_hash_cra_sha224_init,
1290 				.cra_module = THIS_MODULE,
1291 			}
1292 		}
1293 	},
1294 	{
1295 		.init = stm32_hash_init,
1296 		.update = stm32_hash_update,
1297 		.final = stm32_hash_final,
1298 		.finup = stm32_hash_finup,
1299 		.digest = stm32_hash_digest,
1300 		.export = stm32_hash_export,
1301 		.import = stm32_hash_import,
1302 		.halg = {
1303 			.digestsize = SHA256_DIGEST_SIZE,
1304 			.statesize = sizeof(struct stm32_hash_request_ctx),
1305 			.base = {
1306 				.cra_name = "sha256",
1307 				.cra_driver_name = "stm32-sha256",
1308 				.cra_priority = 200,
1309 				.cra_flags = CRYPTO_ALG_ASYNC |
1310 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1311 				.cra_blocksize = SHA256_BLOCK_SIZE,
1312 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1313 				.cra_alignmask = 3,
1314 				.cra_init = stm32_hash_cra_init,
1315 				.cra_module = THIS_MODULE,
1316 			}
1317 		}
1318 	},
1319 	{
1320 		.init = stm32_hash_init,
1321 		.update = stm32_hash_update,
1322 		.final = stm32_hash_final,
1323 		.finup = stm32_hash_finup,
1324 		.digest = stm32_hash_digest,
1325 		.export = stm32_hash_export,
1326 		.import = stm32_hash_import,
1327 		.setkey = stm32_hash_setkey,
1328 		.halg = {
1329 			.digestsize = SHA256_DIGEST_SIZE,
1330 			.statesize = sizeof(struct stm32_hash_request_ctx),
1331 			.base = {
1332 				.cra_name = "hmac(sha256)",
1333 				.cra_driver_name = "stm32-hmac-sha256",
1334 				.cra_priority = 200,
1335 				.cra_flags = CRYPTO_ALG_ASYNC |
1336 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1337 				.cra_blocksize = SHA256_BLOCK_SIZE,
1338 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1339 				.cra_alignmask = 3,
1340 				.cra_init = stm32_hash_cra_sha256_init,
1341 				.cra_module = THIS_MODULE,
1342 			}
1343 		}
1344 	},
1345 };
1346 
1347 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1348 {
1349 	unsigned int i, j;
1350 	int err;
1351 
1352 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1353 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1354 			err = crypto_register_ahash(
1355 				&hdev->pdata->algs_info[i].algs_list[j]);
1356 			if (err)
1357 				goto err_algs;
1358 		}
1359 	}
1360 
1361 	return 0;
1362 err_algs:
1363 	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1364 	for (; i--; ) {
1365 		for (; j--;)
1366 			crypto_unregister_ahash(
1367 				&hdev->pdata->algs_info[i].algs_list[j]);
1368 	}
1369 
1370 	return err;
1371 }
1372 
1373 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1374 {
1375 	unsigned int i, j;
1376 
1377 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1378 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1379 			crypto_unregister_ahash(
1380 				&hdev->pdata->algs_info[i].algs_list[j]);
1381 	}
1382 
1383 	return 0;
1384 }
1385 
1386 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1387 	{
1388 		.algs_list	= algs_md5_sha1,
1389 		.size		= ARRAY_SIZE(algs_md5_sha1),
1390 	},
1391 };
1392 
1393 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1394 	.algs_info	= stm32_hash_algs_info_stm32f4,
1395 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1396 };
1397 
1398 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1399 	{
1400 		.algs_list	= algs_md5_sha1,
1401 		.size		= ARRAY_SIZE(algs_md5_sha1),
1402 	},
1403 	{
1404 		.algs_list	= algs_sha224_sha256,
1405 		.size		= ARRAY_SIZE(algs_sha224_sha256),
1406 	},
1407 };
1408 
1409 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1410 	.algs_info	= stm32_hash_algs_info_stm32f7,
1411 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1412 };
1413 
1414 static const struct of_device_id stm32_hash_of_match[] = {
1415 	{
1416 		.compatible = "st,stm32f456-hash",
1417 		.data = &stm32_hash_pdata_stm32f4,
1418 	},
1419 	{
1420 		.compatible = "st,stm32f756-hash",
1421 		.data = &stm32_hash_pdata_stm32f7,
1422 	},
1423 	{},
1424 };
1425 
1426 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1427 
1428 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1429 				   struct device *dev)
1430 {
1431 	hdev->pdata = of_device_get_match_data(dev);
1432 	if (!hdev->pdata) {
1433 		dev_err(dev, "no compatible OF match\n");
1434 		return -EINVAL;
1435 	}
1436 
1437 	if (of_property_read_u32(dev->of_node, "dma-maxburst",
1438 				 &hdev->dma_maxburst)) {
1439 		dev_info(dev, "dma-maxburst not specified, using 0\n");
1440 		hdev->dma_maxburst = 0;
1441 	}
1442 
1443 	return 0;
1444 }
1445 
1446 static int stm32_hash_probe(struct platform_device *pdev)
1447 {
1448 	struct stm32_hash_dev *hdev;
1449 	struct device *dev = &pdev->dev;
1450 	struct resource *res;
1451 	int ret, irq;
1452 
1453 	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1454 	if (!hdev)
1455 		return -ENOMEM;
1456 
1457 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1458 	hdev->io_base = devm_ioremap_resource(dev, res);
1459 	if (IS_ERR(hdev->io_base))
1460 		return PTR_ERR(hdev->io_base);
1461 
1462 	hdev->phys_base = res->start;
1463 
1464 	ret = stm32_hash_get_of_match(hdev, dev);
1465 	if (ret)
1466 		return ret;
1467 
1468 	irq = platform_get_irq(pdev, 0);
1469 	if (irq < 0) {
1470 		dev_err(dev, "Cannot get IRQ resource\n");
1471 		return irq;
1472 	}
1473 
1474 	ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
1475 					stm32_hash_irq_thread, IRQF_ONESHOT,
1476 					dev_name(dev), hdev);
1477 	if (ret) {
1478 		dev_err(dev, "Cannot grab IRQ\n");
1479 		return ret;
1480 	}
1481 
1482 	hdev->clk = devm_clk_get(&pdev->dev, NULL);
1483 	if (IS_ERR(hdev->clk)) {
1484 		dev_err(dev, "failed to get clock for hash (%lu)\n",
1485 			PTR_ERR(hdev->clk));
1486 		return PTR_ERR(hdev->clk);
1487 	}
1488 
1489 	ret = clk_prepare_enable(hdev->clk);
1490 	if (ret) {
1491 		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1492 		return ret;
1493 	}
1494 
1495 	pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1496 	pm_runtime_use_autosuspend(dev);
1497 
1498 	pm_runtime_get_noresume(dev);
1499 	pm_runtime_set_active(dev);
1500 	pm_runtime_enable(dev);
1501 
1502 	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1503 	if (!IS_ERR(hdev->rst)) {
1504 		reset_control_assert(hdev->rst);
1505 		udelay(2);
1506 		reset_control_deassert(hdev->rst);
1507 	}
1508 
1509 	hdev->dev = dev;
1510 
1511 	platform_set_drvdata(pdev, hdev);
1512 
1513 	ret = stm32_hash_dma_init(hdev);
1514 	if (ret)
1515 		dev_dbg(dev, "DMA mode not available\n");
1516 
1517 	spin_lock(&stm32_hash.lock);
1518 	list_add_tail(&hdev->list, &stm32_hash.dev_list);
1519 	spin_unlock(&stm32_hash.lock);
1520 
1521 	/* Initialize crypto engine */
1522 	hdev->engine = crypto_engine_alloc_init(dev, 1);
1523 	if (!hdev->engine) {
1524 		ret = -ENOMEM;
1525 		goto err_engine;
1526 	}
1527 
1528 	ret = crypto_engine_start(hdev->engine);
1529 	if (ret)
1530 		goto err_engine_start;
1531 
1532 	hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1533 
1534 	/* Register algos */
1535 	ret = stm32_hash_register_algs(hdev);
1536 	if (ret)
1537 		goto err_algs;
1538 
1539 	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1540 		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1541 
1542 	pm_runtime_put_sync(dev);
1543 
1544 	return 0;
1545 
1546 err_algs:
1547 err_engine_start:
1548 	crypto_engine_exit(hdev->engine);
1549 err_engine:
1550 	spin_lock(&stm32_hash.lock);
1551 	list_del(&hdev->list);
1552 	spin_unlock(&stm32_hash.lock);
1553 
1554 	if (hdev->dma_lch)
1555 		dma_release_channel(hdev->dma_lch);
1556 
1557 	pm_runtime_disable(dev);
1558 	pm_runtime_put_noidle(dev);
1559 
1560 	clk_disable_unprepare(hdev->clk);
1561 
1562 	return ret;
1563 }
1564 
1565 static int stm32_hash_remove(struct platform_device *pdev)
1566 {
1567 	struct stm32_hash_dev *hdev;
1568 	int ret;
1569 
1570 	hdev = platform_get_drvdata(pdev);
1571 	if (!hdev)
1572 		return -ENODEV;
1573 
1574 	ret = pm_runtime_get_sync(hdev->dev);
1575 	if (ret < 0)
1576 		return ret;
1577 
1578 	stm32_hash_unregister_algs(hdev);
1579 
1580 	crypto_engine_exit(hdev->engine);
1581 
1582 	spin_lock(&stm32_hash.lock);
1583 	list_del(&hdev->list);
1584 	spin_unlock(&stm32_hash.lock);
1585 
1586 	if (hdev->dma_lch)
1587 		dma_release_channel(hdev->dma_lch);
1588 
1589 	pm_runtime_disable(hdev->dev);
1590 	pm_runtime_put_noidle(hdev->dev);
1591 
1592 	clk_disable_unprepare(hdev->clk);
1593 
1594 	return 0;
1595 }
1596 
1597 #ifdef CONFIG_PM
1598 static int stm32_hash_runtime_suspend(struct device *dev)
1599 {
1600 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1601 
1602 	clk_disable_unprepare(hdev->clk);
1603 
1604 	return 0;
1605 }
1606 
1607 static int stm32_hash_runtime_resume(struct device *dev)
1608 {
1609 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1610 	int ret;
1611 
1612 	ret = clk_prepare_enable(hdev->clk);
1613 	if (ret) {
1614 		dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1615 		return ret;
1616 	}
1617 
1618 	return 0;
1619 }
1620 #endif
1621 
1622 static const struct dev_pm_ops stm32_hash_pm_ops = {
1623 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1624 				pm_runtime_force_resume)
1625 	SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1626 			   stm32_hash_runtime_resume, NULL)
1627 };
1628 
1629 static struct platform_driver stm32_hash_driver = {
1630 	.probe		= stm32_hash_probe,
1631 	.remove		= stm32_hash_remove,
1632 	.driver		= {
1633 		.name	= "stm32-hash",
1634 		.pm = &stm32_hash_pm_ops,
1635 		.of_match_table	= stm32_hash_of_match,
1636 	}
1637 };
1638 
1639 module_platform_driver(stm32_hash_driver);
1640 
1641 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1642 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1643 MODULE_LICENSE("GPL v2");
1644