1 /*
2  * This file is part of STM32 Crypto driver for Linux.
3  *
4  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
5  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
6  *
7  * License terms: GPL V2.0.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License version 2 as published by
11  * the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16  * details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program. If not, see <http://www.gnu.org/licenses/>.
20  *
21  */
22 
23 #include <linux/clk.h>
24 #include <linux/crypto.h>
25 #include <linux/delay.h>
26 #include <linux/dmaengine.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/iopoll.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/of_device.h>
33 #include <linux/platform_device.h>
34 #include <linux/reset.h>
35 
36 #include <crypto/engine.h>
37 #include <crypto/hash.h>
38 #include <crypto/md5.h>
39 #include <crypto/scatterwalk.h>
40 #include <crypto/sha.h>
41 #include <crypto/internal/hash.h>
42 
43 #define HASH_CR				0x00
44 #define HASH_DIN			0x04
45 #define HASH_STR			0x08
46 #define HASH_IMR			0x20
47 #define HASH_SR				0x24
48 #define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
49 #define HASH_HREG(x)			(0x310 + ((x) * 0x04))
50 #define HASH_HWCFGR			0x3F0
51 #define HASH_VER			0x3F4
52 #define HASH_ID				0x3F8
53 
54 /* Control Register */
55 #define HASH_CR_INIT			BIT(2)
56 #define HASH_CR_DMAE			BIT(3)
57 #define HASH_CR_DATATYPE_POS		4
58 #define HASH_CR_MODE			BIT(6)
59 #define HASH_CR_MDMAT			BIT(13)
60 #define HASH_CR_DMAA			BIT(14)
61 #define HASH_CR_LKEY			BIT(16)
62 
63 #define HASH_CR_ALGO_SHA1		0x0
64 #define HASH_CR_ALGO_MD5		0x80
65 #define HASH_CR_ALGO_SHA224		0x40000
66 #define HASH_CR_ALGO_SHA256		0x40080
67 
68 /* Interrupt */
69 #define HASH_DINIE			BIT(0)
70 #define HASH_DCIE			BIT(1)
71 
72 /* Interrupt Mask */
73 #define HASH_MASK_CALC_COMPLETION	BIT(0)
74 #define HASH_MASK_DATA_INPUT		BIT(1)
75 
76 /* Context swap register */
77 #define HASH_CSR_REGISTER_NUMBER	53
78 
79 /* Status Flags */
80 #define HASH_SR_DATA_INPUT_READY	BIT(0)
81 #define HASH_SR_OUTPUT_READY		BIT(1)
82 #define HASH_SR_DMA_ACTIVE		BIT(2)
83 #define HASH_SR_BUSY			BIT(3)
84 
85 /* STR Register */
86 #define HASH_STR_NBLW_MASK		GENMASK(4, 0)
87 #define HASH_STR_DCAL			BIT(8)
88 
89 #define HASH_FLAGS_INIT			BIT(0)
90 #define HASH_FLAGS_OUTPUT_READY		BIT(1)
91 #define HASH_FLAGS_CPU			BIT(2)
92 #define HASH_FLAGS_DMA_READY		BIT(3)
93 #define HASH_FLAGS_DMA_ACTIVE		BIT(4)
94 #define HASH_FLAGS_HMAC_INIT		BIT(5)
95 #define HASH_FLAGS_HMAC_FINAL		BIT(6)
96 #define HASH_FLAGS_HMAC_KEY		BIT(7)
97 
98 #define HASH_FLAGS_FINAL		BIT(15)
99 #define HASH_FLAGS_FINUP		BIT(16)
100 #define HASH_FLAGS_ALGO_MASK		GENMASK(21, 18)
101 #define HASH_FLAGS_MD5			BIT(18)
102 #define HASH_FLAGS_SHA1			BIT(19)
103 #define HASH_FLAGS_SHA224		BIT(20)
104 #define HASH_FLAGS_SHA256		BIT(21)
105 #define HASH_FLAGS_ERRORS		BIT(22)
106 #define HASH_FLAGS_HMAC			BIT(23)
107 
108 #define HASH_OP_UPDATE			1
109 #define HASH_OP_FINAL			2
110 
111 enum stm32_hash_data_format {
112 	HASH_DATA_32_BITS		= 0x0,
113 	HASH_DATA_16_BITS		= 0x1,
114 	HASH_DATA_8_BITS		= 0x2,
115 	HASH_DATA_1_BIT			= 0x3
116 };
117 
118 #define HASH_BUFLEN			256
119 #define HASH_LONG_KEY			64
120 #define HASH_MAX_KEY_SIZE		(SHA256_BLOCK_SIZE * 8)
121 #define HASH_QUEUE_LENGTH		16
122 #define HASH_DMA_THRESHOLD		50
123 
124 struct stm32_hash_ctx {
125 	struct stm32_hash_dev	*hdev;
126 	unsigned long		flags;
127 
128 	u8			key[HASH_MAX_KEY_SIZE];
129 	int			keylen;
130 };
131 
132 struct stm32_hash_request_ctx {
133 	struct stm32_hash_dev	*hdev;
134 	unsigned long		flags;
135 	unsigned long		op;
136 
137 	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
138 	size_t			digcnt;
139 	size_t			bufcnt;
140 	size_t			buflen;
141 
142 	/* DMA */
143 	struct scatterlist	*sg;
144 	unsigned int		offset;
145 	unsigned int		total;
146 	struct scatterlist	sg_key;
147 
148 	dma_addr_t		dma_addr;
149 	size_t			dma_ct;
150 	int			nents;
151 
152 	u8			data_type;
153 
154 	u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
155 
156 	/* Export Context */
157 	u32			*hw_context;
158 };
159 
160 struct stm32_hash_algs_info {
161 	struct ahash_alg	*algs_list;
162 	size_t			size;
163 };
164 
165 struct stm32_hash_pdata {
166 	struct stm32_hash_algs_info	*algs_info;
167 	size_t				algs_info_size;
168 };
169 
170 struct stm32_hash_dev {
171 	struct list_head	list;
172 	struct device		*dev;
173 	struct clk		*clk;
174 	struct reset_control	*rst;
175 	void __iomem		*io_base;
176 	phys_addr_t		phys_base;
177 	u32			dma_mode;
178 	u32			dma_maxburst;
179 
180 	spinlock_t		lock; /* lock to protect queue */
181 
182 	struct ahash_request	*req;
183 	struct crypto_engine	*engine;
184 
185 	int			err;
186 	unsigned long		flags;
187 
188 	struct dma_chan		*dma_lch;
189 	struct completion	dma_completion;
190 
191 	const struct stm32_hash_pdata	*pdata;
192 };
193 
194 struct stm32_hash_drv {
195 	struct list_head	dev_list;
196 	spinlock_t		lock; /* List protection access */
197 };
198 
199 static struct stm32_hash_drv stm32_hash = {
200 	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
201 	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
202 };
203 
204 static void stm32_hash_dma_callback(void *param);
205 
206 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
207 {
208 	return readl_relaxed(hdev->io_base + offset);
209 }
210 
211 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
212 				    u32 offset, u32 value)
213 {
214 	writel_relaxed(value, hdev->io_base + offset);
215 }
216 
217 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
218 {
219 	u32 status;
220 
221 	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
222 				   !(status & HASH_SR_BUSY), 10, 10000);
223 }
224 
225 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
226 {
227 	u32 reg;
228 
229 	reg = stm32_hash_read(hdev, HASH_STR);
230 	reg &= ~(HASH_STR_NBLW_MASK);
231 	reg |= (8U * ((length) % 4U));
232 	stm32_hash_write(hdev, HASH_STR, reg);
233 }
234 
235 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
236 {
237 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
238 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
239 	u32 reg;
240 	int keylen = ctx->keylen;
241 	void *key = ctx->key;
242 
243 	if (keylen) {
244 		stm32_hash_set_nblw(hdev, keylen);
245 
246 		while (keylen > 0) {
247 			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
248 			keylen -= 4;
249 			key += 4;
250 		}
251 
252 		reg = stm32_hash_read(hdev, HASH_STR);
253 		reg |= HASH_STR_DCAL;
254 		stm32_hash_write(hdev, HASH_STR, reg);
255 
256 		return -EINPROGRESS;
257 	}
258 
259 	return 0;
260 }
261 
262 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
263 {
264 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
265 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
266 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
267 
268 	u32 reg = HASH_CR_INIT;
269 
270 	if (!(hdev->flags & HASH_FLAGS_INIT)) {
271 		switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
272 		case HASH_FLAGS_MD5:
273 			reg |= HASH_CR_ALGO_MD5;
274 			break;
275 		case HASH_FLAGS_SHA1:
276 			reg |= HASH_CR_ALGO_SHA1;
277 			break;
278 		case HASH_FLAGS_SHA224:
279 			reg |= HASH_CR_ALGO_SHA224;
280 			break;
281 		case HASH_FLAGS_SHA256:
282 			reg |= HASH_CR_ALGO_SHA256;
283 			break;
284 		default:
285 			reg |= HASH_CR_ALGO_MD5;
286 		}
287 
288 		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
289 
290 		if (rctx->flags & HASH_FLAGS_HMAC) {
291 			hdev->flags |= HASH_FLAGS_HMAC;
292 			reg |= HASH_CR_MODE;
293 			if (ctx->keylen > HASH_LONG_KEY)
294 				reg |= HASH_CR_LKEY;
295 		}
296 
297 		stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
298 
299 		stm32_hash_write(hdev, HASH_CR, reg);
300 
301 		hdev->flags |= HASH_FLAGS_INIT;
302 
303 		dev_dbg(hdev->dev, "Write Control %x\n", reg);
304 	}
305 }
306 
307 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
308 {
309 	size_t count;
310 
311 	while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
312 		count = min(rctx->sg->length - rctx->offset, rctx->total);
313 		count = min(count, rctx->buflen - rctx->bufcnt);
314 
315 		if (count <= 0) {
316 			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
317 				rctx->sg = sg_next(rctx->sg);
318 				continue;
319 			} else {
320 				break;
321 			}
322 		}
323 
324 		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
325 					 rctx->offset, count, 0);
326 
327 		rctx->bufcnt += count;
328 		rctx->offset += count;
329 		rctx->total -= count;
330 
331 		if (rctx->offset == rctx->sg->length) {
332 			rctx->sg = sg_next(rctx->sg);
333 			if (rctx->sg)
334 				rctx->offset = 0;
335 			else
336 				rctx->total = 0;
337 		}
338 	}
339 }
340 
341 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
342 			       const u8 *buf, size_t length, int final)
343 {
344 	unsigned int count, len32;
345 	const u32 *buffer = (const u32 *)buf;
346 	u32 reg;
347 
348 	if (final)
349 		hdev->flags |= HASH_FLAGS_FINAL;
350 
351 	len32 = DIV_ROUND_UP(length, sizeof(u32));
352 
353 	dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n",
354 		__func__, length, final, len32);
355 
356 	hdev->flags |= HASH_FLAGS_CPU;
357 
358 	stm32_hash_write_ctrl(hdev);
359 
360 	if (stm32_hash_wait_busy(hdev))
361 		return -ETIMEDOUT;
362 
363 	if ((hdev->flags & HASH_FLAGS_HMAC) &&
364 	    (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
365 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
366 		stm32_hash_write_key(hdev);
367 		if (stm32_hash_wait_busy(hdev))
368 			return -ETIMEDOUT;
369 	}
370 
371 	for (count = 0; count < len32; count++)
372 		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
373 
374 	if (final) {
375 		stm32_hash_set_nblw(hdev, length);
376 		reg = stm32_hash_read(hdev, HASH_STR);
377 		reg |= HASH_STR_DCAL;
378 		stm32_hash_write(hdev, HASH_STR, reg);
379 		if (hdev->flags & HASH_FLAGS_HMAC) {
380 			if (stm32_hash_wait_busy(hdev))
381 				return -ETIMEDOUT;
382 			stm32_hash_write_key(hdev);
383 		}
384 		return -EINPROGRESS;
385 	}
386 
387 	return 0;
388 }
389 
390 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
391 {
392 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
393 	int bufcnt, err = 0, final;
394 
395 	dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
396 
397 	final = (rctx->flags & HASH_FLAGS_FINUP);
398 
399 	while ((rctx->total >= rctx->buflen) ||
400 	       (rctx->bufcnt + rctx->total >= rctx->buflen)) {
401 		stm32_hash_append_sg(rctx);
402 		bufcnt = rctx->bufcnt;
403 		rctx->bufcnt = 0;
404 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
405 	}
406 
407 	stm32_hash_append_sg(rctx);
408 
409 	if (final) {
410 		bufcnt = rctx->bufcnt;
411 		rctx->bufcnt = 0;
412 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
413 					  (rctx->flags & HASH_FLAGS_FINUP));
414 	}
415 
416 	return err;
417 }
418 
419 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
420 			       struct scatterlist *sg, int length, int mdma)
421 {
422 	struct dma_async_tx_descriptor *in_desc;
423 	dma_cookie_t cookie;
424 	u32 reg;
425 	int err;
426 
427 	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
428 					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
429 					  DMA_CTRL_ACK);
430 	if (!in_desc) {
431 		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
432 		return -ENOMEM;
433 	}
434 
435 	reinit_completion(&hdev->dma_completion);
436 	in_desc->callback = stm32_hash_dma_callback;
437 	in_desc->callback_param = hdev;
438 
439 	hdev->flags |= HASH_FLAGS_FINAL;
440 	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
441 
442 	reg = stm32_hash_read(hdev, HASH_CR);
443 
444 	if (mdma)
445 		reg |= HASH_CR_MDMAT;
446 	else
447 		reg &= ~HASH_CR_MDMAT;
448 
449 	reg |= HASH_CR_DMAE;
450 
451 	stm32_hash_write(hdev, HASH_CR, reg);
452 
453 	stm32_hash_set_nblw(hdev, length);
454 
455 	cookie = dmaengine_submit(in_desc);
456 	err = dma_submit_error(cookie);
457 	if (err)
458 		return -ENOMEM;
459 
460 	dma_async_issue_pending(hdev->dma_lch);
461 
462 	if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
463 						       msecs_to_jiffies(100)))
464 		err = -ETIMEDOUT;
465 
466 	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
467 				     NULL, NULL) != DMA_COMPLETE)
468 		err = -ETIMEDOUT;
469 
470 	if (err) {
471 		dev_err(hdev->dev, "DMA Error %i\n", err);
472 		dmaengine_terminate_all(hdev->dma_lch);
473 		return err;
474 	}
475 
476 	return -EINPROGRESS;
477 }
478 
479 static void stm32_hash_dma_callback(void *param)
480 {
481 	struct stm32_hash_dev *hdev = param;
482 
483 	complete(&hdev->dma_completion);
484 
485 	hdev->flags |= HASH_FLAGS_DMA_READY;
486 }
487 
488 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
489 {
490 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
491 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
492 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
493 	int err;
494 
495 	if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
496 		err = stm32_hash_write_key(hdev);
497 		if (stm32_hash_wait_busy(hdev))
498 			return -ETIMEDOUT;
499 	} else {
500 		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
501 			sg_init_one(&rctx->sg_key, ctx->key,
502 				    ALIGN(ctx->keylen, sizeof(u32)));
503 
504 		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
505 					  DMA_TO_DEVICE);
506 		if (rctx->dma_ct == 0) {
507 			dev_err(hdev->dev, "dma_map_sg error\n");
508 			return -ENOMEM;
509 		}
510 
511 		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
512 
513 		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
514 	}
515 
516 	return err;
517 }
518 
519 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
520 {
521 	struct dma_slave_config dma_conf;
522 	int err;
523 
524 	memset(&dma_conf, 0, sizeof(dma_conf));
525 
526 	dma_conf.direction = DMA_MEM_TO_DEV;
527 	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
528 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
529 	dma_conf.src_maxburst = hdev->dma_maxburst;
530 	dma_conf.dst_maxburst = hdev->dma_maxburst;
531 	dma_conf.device_fc = false;
532 
533 	hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
534 	if (!hdev->dma_lch) {
535 		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
536 		return -EBUSY;
537 	}
538 
539 	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
540 	if (err) {
541 		dma_release_channel(hdev->dma_lch);
542 		hdev->dma_lch = NULL;
543 		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
544 		return err;
545 	}
546 
547 	init_completion(&hdev->dma_completion);
548 
549 	return 0;
550 }
551 
552 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
553 {
554 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
555 	struct scatterlist sg[1], *tsg;
556 	int err = 0, len = 0, reg, ncp = 0;
557 	unsigned int i;
558 	u32 *buffer = (void *)rctx->buffer;
559 
560 	rctx->sg = hdev->req->src;
561 	rctx->total = hdev->req->nbytes;
562 
563 	rctx->nents = sg_nents(rctx->sg);
564 
565 	if (rctx->nents < 0)
566 		return -EINVAL;
567 
568 	stm32_hash_write_ctrl(hdev);
569 
570 	if (hdev->flags & HASH_FLAGS_HMAC) {
571 		err = stm32_hash_hmac_dma_send(hdev);
572 		if (err != -EINPROGRESS)
573 			return err;
574 	}
575 
576 	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
577 		len = sg->length;
578 
579 		sg[0] = *tsg;
580 		if (sg_is_last(sg)) {
581 			if (hdev->dma_mode == 1) {
582 				len = (ALIGN(sg->length, 16) - 16);
583 
584 				ncp = sg_pcopy_to_buffer(
585 					rctx->sg, rctx->nents,
586 					rctx->buffer, sg->length - len,
587 					rctx->total - sg->length + len);
588 
589 				sg->length = len;
590 			} else {
591 				if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
592 					len = sg->length;
593 					sg->length = ALIGN(sg->length,
594 							   sizeof(u32));
595 				}
596 			}
597 		}
598 
599 		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
600 					  DMA_TO_DEVICE);
601 		if (rctx->dma_ct == 0) {
602 			dev_err(hdev->dev, "dma_map_sg error\n");
603 			return -ENOMEM;
604 		}
605 
606 		err = stm32_hash_xmit_dma(hdev, sg, len,
607 					  !sg_is_last(sg));
608 
609 		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
610 
611 		if (err == -ENOMEM)
612 			return err;
613 	}
614 
615 	if (hdev->dma_mode == 1) {
616 		if (stm32_hash_wait_busy(hdev))
617 			return -ETIMEDOUT;
618 		reg = stm32_hash_read(hdev, HASH_CR);
619 		reg &= ~HASH_CR_DMAE;
620 		reg |= HASH_CR_DMAA;
621 		stm32_hash_write(hdev, HASH_CR, reg);
622 
623 		if (ncp) {
624 			memset(buffer + ncp, 0,
625 			       DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
626 			writesl(hdev->io_base + HASH_DIN, buffer,
627 				DIV_ROUND_UP(ncp, sizeof(u32)));
628 		}
629 		stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
630 		reg = stm32_hash_read(hdev, HASH_STR);
631 		reg |= HASH_STR_DCAL;
632 		stm32_hash_write(hdev, HASH_STR, reg);
633 		err = -EINPROGRESS;
634 	}
635 
636 	if (hdev->flags & HASH_FLAGS_HMAC) {
637 		if (stm32_hash_wait_busy(hdev))
638 			return -ETIMEDOUT;
639 		err = stm32_hash_hmac_dma_send(hdev);
640 	}
641 
642 	return err;
643 }
644 
645 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
646 {
647 	struct stm32_hash_dev *hdev = NULL, *tmp;
648 
649 	spin_lock_bh(&stm32_hash.lock);
650 	if (!ctx->hdev) {
651 		list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
652 			hdev = tmp;
653 			break;
654 		}
655 		ctx->hdev = hdev;
656 	} else {
657 		hdev = ctx->hdev;
658 	}
659 
660 	spin_unlock_bh(&stm32_hash.lock);
661 
662 	return hdev;
663 }
664 
665 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
666 {
667 	struct scatterlist *sg;
668 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
669 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
670 	int i;
671 
672 	if (req->nbytes <= HASH_DMA_THRESHOLD)
673 		return false;
674 
675 	if (sg_nents(req->src) > 1) {
676 		if (hdev->dma_mode == 1)
677 			return false;
678 		for_each_sg(req->src, sg, sg_nents(req->src), i) {
679 			if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
680 			    (!sg_is_last(sg)))
681 				return false;
682 		}
683 	}
684 
685 	if (req->src->offset % 4)
686 		return false;
687 
688 	return true;
689 }
690 
691 static int stm32_hash_init(struct ahash_request *req)
692 {
693 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
694 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
695 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
696 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
697 
698 	rctx->hdev = hdev;
699 
700 	rctx->flags = HASH_FLAGS_CPU;
701 
702 	rctx->digcnt = crypto_ahash_digestsize(tfm);
703 	switch (rctx->digcnt) {
704 	case MD5_DIGEST_SIZE:
705 		rctx->flags |= HASH_FLAGS_MD5;
706 		break;
707 	case SHA1_DIGEST_SIZE:
708 		rctx->flags |= HASH_FLAGS_SHA1;
709 		break;
710 	case SHA224_DIGEST_SIZE:
711 		rctx->flags |= HASH_FLAGS_SHA224;
712 		break;
713 	case SHA256_DIGEST_SIZE:
714 		rctx->flags |= HASH_FLAGS_SHA256;
715 		break;
716 	default:
717 		return -EINVAL;
718 	}
719 
720 	rctx->bufcnt = 0;
721 	rctx->buflen = HASH_BUFLEN;
722 	rctx->total = 0;
723 	rctx->offset = 0;
724 	rctx->data_type = HASH_DATA_8_BITS;
725 
726 	memset(rctx->buffer, 0, HASH_BUFLEN);
727 
728 	if (ctx->flags & HASH_FLAGS_HMAC)
729 		rctx->flags |= HASH_FLAGS_HMAC;
730 
731 	dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
732 
733 	return 0;
734 }
735 
736 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
737 {
738 	return stm32_hash_update_cpu(hdev);
739 }
740 
741 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
742 {
743 	struct ahash_request *req = hdev->req;
744 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
745 	int err;
746 
747 	if (!(rctx->flags & HASH_FLAGS_CPU))
748 		err = stm32_hash_dma_send(hdev);
749 	else
750 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, rctx->bufcnt, 1);
751 
752 	rctx->bufcnt = 0;
753 
754 	return err;
755 }
756 
757 static void stm32_hash_copy_hash(struct ahash_request *req)
758 {
759 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
760 	u32 *hash = (u32 *)rctx->digest;
761 	unsigned int i, hashsize;
762 
763 	switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
764 	case HASH_FLAGS_MD5:
765 		hashsize = MD5_DIGEST_SIZE;
766 		break;
767 	case HASH_FLAGS_SHA1:
768 		hashsize = SHA1_DIGEST_SIZE;
769 		break;
770 	case HASH_FLAGS_SHA224:
771 		hashsize = SHA224_DIGEST_SIZE;
772 		break;
773 	case HASH_FLAGS_SHA256:
774 		hashsize = SHA256_DIGEST_SIZE;
775 		break;
776 	default:
777 		return;
778 	}
779 
780 	for (i = 0; i < hashsize / sizeof(u32); i++)
781 		hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
782 						      HASH_HREG(i)));
783 }
784 
785 static int stm32_hash_finish(struct ahash_request *req)
786 {
787 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
788 
789 	if (!req->result)
790 		return -EINVAL;
791 
792 	memcpy(req->result, rctx->digest, rctx->digcnt);
793 
794 	return 0;
795 }
796 
797 static void stm32_hash_finish_req(struct ahash_request *req, int err)
798 {
799 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
800 	struct stm32_hash_dev *hdev = rctx->hdev;
801 
802 	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
803 		stm32_hash_copy_hash(req);
804 		err = stm32_hash_finish(req);
805 		hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
806 				 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
807 				 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
808 				 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
809 				 HASH_FLAGS_HMAC_KEY);
810 	} else {
811 		rctx->flags |= HASH_FLAGS_ERRORS;
812 	}
813 
814 	crypto_finalize_hash_request(hdev->engine, req, err);
815 }
816 
817 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
818 			      struct stm32_hash_request_ctx *rctx)
819 {
820 	if (!(HASH_FLAGS_INIT & hdev->flags)) {
821 		stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
822 		stm32_hash_write(hdev, HASH_STR, 0);
823 		stm32_hash_write(hdev, HASH_DIN, 0);
824 		stm32_hash_write(hdev, HASH_IMR, 0);
825 		hdev->err = 0;
826 	}
827 
828 	return 0;
829 }
830 
831 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
832 				   struct ahash_request *req)
833 {
834 	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
835 }
836 
837 static int stm32_hash_prepare_req(struct crypto_engine *engine,
838 				  struct ahash_request *req)
839 {
840 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
841 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
842 	struct stm32_hash_request_ctx *rctx;
843 
844 	if (!hdev)
845 		return -ENODEV;
846 
847 	hdev->req = req;
848 
849 	rctx = ahash_request_ctx(req);
850 
851 	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
852 		rctx->op, req->nbytes);
853 
854 	return stm32_hash_hw_init(hdev, rctx);
855 }
856 
857 static int stm32_hash_one_request(struct crypto_engine *engine,
858 				  struct ahash_request *req)
859 {
860 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
861 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
862 	struct stm32_hash_request_ctx *rctx;
863 	int err = 0;
864 
865 	if (!hdev)
866 		return -ENODEV;
867 
868 	hdev->req = req;
869 
870 	rctx = ahash_request_ctx(req);
871 
872 	if (rctx->op == HASH_OP_UPDATE)
873 		err = stm32_hash_update_req(hdev);
874 	else if (rctx->op == HASH_OP_FINAL)
875 		err = stm32_hash_final_req(hdev);
876 
877 	if (err != -EINPROGRESS)
878 	/* done task will not finish it, so do it here */
879 		stm32_hash_finish_req(req, err);
880 
881 	return 0;
882 }
883 
884 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
885 {
886 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
887 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
888 	struct stm32_hash_dev *hdev = ctx->hdev;
889 
890 	rctx->op = op;
891 
892 	return stm32_hash_handle_queue(hdev, req);
893 }
894 
895 static int stm32_hash_update(struct ahash_request *req)
896 {
897 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
898 	int ret;
899 
900 	if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
901 		return 0;
902 
903 	rctx->total = req->nbytes;
904 	rctx->sg = req->src;
905 	rctx->offset = 0;
906 
907 	if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
908 		stm32_hash_append_sg(rctx);
909 		return 0;
910 	}
911 
912 	ret = stm32_hash_enqueue(req, HASH_OP_UPDATE);
913 
914 	if (rctx->flags & HASH_FLAGS_FINUP)
915 		return ret;
916 
917 	return 0;
918 }
919 
920 static int stm32_hash_final(struct ahash_request *req)
921 {
922 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
923 
924 	rctx->flags |= HASH_FLAGS_FINUP;
925 
926 	return stm32_hash_enqueue(req, HASH_OP_FINAL);
927 }
928 
929 static int stm32_hash_finup(struct ahash_request *req)
930 {
931 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
932 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
933 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
934 	int err1, err2;
935 
936 	rctx->flags |= HASH_FLAGS_FINUP;
937 
938 	if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
939 		rctx->flags &= ~HASH_FLAGS_CPU;
940 
941 	err1 = stm32_hash_update(req);
942 
943 	if (err1 == -EINPROGRESS || err1 == -EBUSY)
944 		return err1;
945 
946 	/*
947 	 * final() has to be always called to cleanup resources
948 	 * even if update() failed, except EINPROGRESS
949 	 */
950 	err2 = stm32_hash_final(req);
951 
952 	return err1 ?: err2;
953 }
954 
955 static int stm32_hash_digest(struct ahash_request *req)
956 {
957 	return stm32_hash_init(req) ?: stm32_hash_finup(req);
958 }
959 
960 static int stm32_hash_export(struct ahash_request *req, void *out)
961 {
962 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
963 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
964 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
965 	u32 *preg;
966 	unsigned int i;
967 
968 	while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
969 		cpu_relax();
970 
971 	rctx->hw_context = kmalloc(sizeof(u32) * (3 + HASH_CSR_REGISTER_NUMBER),
972 				   GFP_KERNEL);
973 
974 	preg = rctx->hw_context;
975 
976 	*preg++ = stm32_hash_read(hdev, HASH_IMR);
977 	*preg++ = stm32_hash_read(hdev, HASH_STR);
978 	*preg++ = stm32_hash_read(hdev, HASH_CR);
979 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
980 		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
981 
982 	memcpy(out, rctx, sizeof(*rctx));
983 
984 	return 0;
985 }
986 
987 static int stm32_hash_import(struct ahash_request *req, const void *in)
988 {
989 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
990 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
991 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
992 	const u32 *preg = in;
993 	u32 reg;
994 	unsigned int i;
995 
996 	memcpy(rctx, in, sizeof(*rctx));
997 
998 	preg = rctx->hw_context;
999 
1000 	stm32_hash_write(hdev, HASH_IMR, *preg++);
1001 	stm32_hash_write(hdev, HASH_STR, *preg++);
1002 	stm32_hash_write(hdev, HASH_CR, *preg);
1003 	reg = *preg++ | HASH_CR_INIT;
1004 	stm32_hash_write(hdev, HASH_CR, reg);
1005 
1006 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1007 		stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1008 
1009 	kfree(rctx->hw_context);
1010 
1011 	return 0;
1012 }
1013 
1014 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1015 			     const u8 *key, unsigned int keylen)
1016 {
1017 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1018 
1019 	if (keylen <= HASH_MAX_KEY_SIZE) {
1020 		memcpy(ctx->key, key, keylen);
1021 		ctx->keylen = keylen;
1022 	} else {
1023 		return -ENOMEM;
1024 	}
1025 
1026 	return 0;
1027 }
1028 
1029 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1030 				    const char *algs_hmac_name)
1031 {
1032 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1033 
1034 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1035 				 sizeof(struct stm32_hash_request_ctx));
1036 
1037 	ctx->keylen = 0;
1038 
1039 	if (algs_hmac_name)
1040 		ctx->flags |= HASH_FLAGS_HMAC;
1041 
1042 	return 0;
1043 }
1044 
1045 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1046 {
1047 	return stm32_hash_cra_init_algs(tfm, NULL);
1048 }
1049 
1050 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1051 {
1052 	return stm32_hash_cra_init_algs(tfm, "md5");
1053 }
1054 
1055 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1056 {
1057 	return stm32_hash_cra_init_algs(tfm, "sha1");
1058 }
1059 
1060 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1061 {
1062 	return stm32_hash_cra_init_algs(tfm, "sha224");
1063 }
1064 
1065 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1066 {
1067 	return stm32_hash_cra_init_algs(tfm, "sha256");
1068 }
1069 
1070 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1071 {
1072 	struct stm32_hash_dev *hdev = dev_id;
1073 	int err;
1074 
1075 	if (HASH_FLAGS_CPU & hdev->flags) {
1076 		if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1077 			hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1078 			goto finish;
1079 		}
1080 	} else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1081 		if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1082 			hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1083 				goto finish;
1084 		}
1085 	}
1086 
1087 	return IRQ_HANDLED;
1088 
1089 finish:
1090 	/*Finish current request */
1091 	stm32_hash_finish_req(hdev->req, err);
1092 
1093 	return IRQ_HANDLED;
1094 }
1095 
1096 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1097 {
1098 	struct stm32_hash_dev *hdev = dev_id;
1099 	u32 reg;
1100 
1101 	reg = stm32_hash_read(hdev, HASH_SR);
1102 	if (reg & HASH_SR_OUTPUT_READY) {
1103 		reg &= ~HASH_SR_OUTPUT_READY;
1104 		stm32_hash_write(hdev, HASH_SR, reg);
1105 		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1106 		return IRQ_WAKE_THREAD;
1107 	}
1108 
1109 	return IRQ_NONE;
1110 }
1111 
1112 static struct ahash_alg algs_md5_sha1[] = {
1113 	{
1114 		.init = stm32_hash_init,
1115 		.update = stm32_hash_update,
1116 		.final = stm32_hash_final,
1117 		.finup = stm32_hash_finup,
1118 		.digest = stm32_hash_digest,
1119 		.export = stm32_hash_export,
1120 		.import = stm32_hash_import,
1121 		.halg = {
1122 			.digestsize = MD5_DIGEST_SIZE,
1123 			.statesize = sizeof(struct stm32_hash_request_ctx),
1124 			.base = {
1125 				.cra_name = "md5",
1126 				.cra_driver_name = "stm32-md5",
1127 				.cra_priority = 200,
1128 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1129 					CRYPTO_ALG_ASYNC |
1130 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1131 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1132 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1133 				.cra_alignmask = 3,
1134 				.cra_init = stm32_hash_cra_init,
1135 				.cra_module = THIS_MODULE,
1136 			}
1137 		}
1138 	},
1139 	{
1140 		.init = stm32_hash_init,
1141 		.update = stm32_hash_update,
1142 		.final = stm32_hash_final,
1143 		.finup = stm32_hash_finup,
1144 		.digest = stm32_hash_digest,
1145 		.export = stm32_hash_export,
1146 		.import = stm32_hash_import,
1147 		.setkey = stm32_hash_setkey,
1148 		.halg = {
1149 			.digestsize = MD5_DIGEST_SIZE,
1150 			.statesize = sizeof(struct stm32_hash_request_ctx),
1151 			.base = {
1152 				.cra_name = "hmac(md5)",
1153 				.cra_driver_name = "stm32-hmac-md5",
1154 				.cra_priority = 200,
1155 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1156 					CRYPTO_ALG_ASYNC |
1157 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1158 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1159 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1160 				.cra_alignmask = 3,
1161 				.cra_init = stm32_hash_cra_md5_init,
1162 				.cra_module = THIS_MODULE,
1163 			}
1164 		}
1165 	},
1166 	{
1167 		.init = stm32_hash_init,
1168 		.update = stm32_hash_update,
1169 		.final = stm32_hash_final,
1170 		.finup = stm32_hash_finup,
1171 		.digest = stm32_hash_digest,
1172 		.export = stm32_hash_export,
1173 		.import = stm32_hash_import,
1174 		.halg = {
1175 			.digestsize = SHA1_DIGEST_SIZE,
1176 			.statesize = sizeof(struct stm32_hash_request_ctx),
1177 			.base = {
1178 				.cra_name = "sha1",
1179 				.cra_driver_name = "stm32-sha1",
1180 				.cra_priority = 200,
1181 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1182 					CRYPTO_ALG_ASYNC |
1183 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1184 				.cra_blocksize = SHA1_BLOCK_SIZE,
1185 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1186 				.cra_alignmask = 3,
1187 				.cra_init = stm32_hash_cra_init,
1188 				.cra_module = THIS_MODULE,
1189 			}
1190 		}
1191 	},
1192 	{
1193 		.init = stm32_hash_init,
1194 		.update = stm32_hash_update,
1195 		.final = stm32_hash_final,
1196 		.finup = stm32_hash_finup,
1197 		.digest = stm32_hash_digest,
1198 		.export = stm32_hash_export,
1199 		.import = stm32_hash_import,
1200 		.setkey = stm32_hash_setkey,
1201 		.halg = {
1202 			.digestsize = SHA1_DIGEST_SIZE,
1203 			.statesize = sizeof(struct stm32_hash_request_ctx),
1204 			.base = {
1205 				.cra_name = "hmac(sha1)",
1206 				.cra_driver_name = "stm32-hmac-sha1",
1207 				.cra_priority = 200,
1208 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1209 					CRYPTO_ALG_ASYNC |
1210 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1211 				.cra_blocksize = SHA1_BLOCK_SIZE,
1212 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1213 				.cra_alignmask = 3,
1214 				.cra_init = stm32_hash_cra_sha1_init,
1215 				.cra_module = THIS_MODULE,
1216 			}
1217 		}
1218 	},
1219 };
1220 
1221 static struct ahash_alg algs_sha224_sha256[] = {
1222 	{
1223 		.init = stm32_hash_init,
1224 		.update = stm32_hash_update,
1225 		.final = stm32_hash_final,
1226 		.finup = stm32_hash_finup,
1227 		.digest = stm32_hash_digest,
1228 		.export = stm32_hash_export,
1229 		.import = stm32_hash_import,
1230 		.halg = {
1231 			.digestsize = SHA224_DIGEST_SIZE,
1232 			.statesize = sizeof(struct stm32_hash_request_ctx),
1233 			.base = {
1234 				.cra_name = "sha224",
1235 				.cra_driver_name = "stm32-sha224",
1236 				.cra_priority = 200,
1237 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1238 					CRYPTO_ALG_ASYNC |
1239 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1240 				.cra_blocksize = SHA224_BLOCK_SIZE,
1241 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1242 				.cra_alignmask = 3,
1243 				.cra_init = stm32_hash_cra_init,
1244 				.cra_module = THIS_MODULE,
1245 			}
1246 		}
1247 	},
1248 	{
1249 		.init = stm32_hash_init,
1250 		.update = stm32_hash_update,
1251 		.final = stm32_hash_final,
1252 		.finup = stm32_hash_finup,
1253 		.digest = stm32_hash_digest,
1254 		.setkey = stm32_hash_setkey,
1255 		.export = stm32_hash_export,
1256 		.import = stm32_hash_import,
1257 		.halg = {
1258 			.digestsize = SHA224_DIGEST_SIZE,
1259 			.statesize = sizeof(struct stm32_hash_request_ctx),
1260 			.base = {
1261 				.cra_name = "hmac(sha224)",
1262 				.cra_driver_name = "stm32-hmac-sha224",
1263 				.cra_priority = 200,
1264 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1265 					CRYPTO_ALG_ASYNC |
1266 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1267 				.cra_blocksize = SHA224_BLOCK_SIZE,
1268 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1269 				.cra_alignmask = 3,
1270 				.cra_init = stm32_hash_cra_sha224_init,
1271 				.cra_module = THIS_MODULE,
1272 			}
1273 		}
1274 	},
1275 	{
1276 		.init = stm32_hash_init,
1277 		.update = stm32_hash_update,
1278 		.final = stm32_hash_final,
1279 		.finup = stm32_hash_finup,
1280 		.digest = stm32_hash_digest,
1281 		.export = stm32_hash_export,
1282 		.import = stm32_hash_import,
1283 		.halg = {
1284 			.digestsize = SHA256_DIGEST_SIZE,
1285 			.statesize = sizeof(struct stm32_hash_request_ctx),
1286 			.base = {
1287 				.cra_name = "sha256",
1288 				.cra_driver_name = "stm32-sha256",
1289 				.cra_priority = 200,
1290 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1291 					CRYPTO_ALG_ASYNC |
1292 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1293 				.cra_blocksize = SHA256_BLOCK_SIZE,
1294 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1295 				.cra_alignmask = 3,
1296 				.cra_init = stm32_hash_cra_init,
1297 				.cra_module = THIS_MODULE,
1298 			}
1299 		}
1300 	},
1301 	{
1302 		.init = stm32_hash_init,
1303 		.update = stm32_hash_update,
1304 		.final = stm32_hash_final,
1305 		.finup = stm32_hash_finup,
1306 		.digest = stm32_hash_digest,
1307 		.export = stm32_hash_export,
1308 		.import = stm32_hash_import,
1309 		.setkey = stm32_hash_setkey,
1310 		.halg = {
1311 			.digestsize = SHA256_DIGEST_SIZE,
1312 			.statesize = sizeof(struct stm32_hash_request_ctx),
1313 			.base = {
1314 				.cra_name = "hmac(sha256)",
1315 				.cra_driver_name = "stm32-hmac-sha256",
1316 				.cra_priority = 200,
1317 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1318 					CRYPTO_ALG_ASYNC |
1319 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1320 				.cra_blocksize = SHA256_BLOCK_SIZE,
1321 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1322 				.cra_alignmask = 3,
1323 				.cra_init = stm32_hash_cra_sha256_init,
1324 				.cra_module = THIS_MODULE,
1325 			}
1326 		}
1327 	},
1328 };
1329 
1330 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1331 {
1332 	unsigned int i, j;
1333 	int err;
1334 
1335 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1336 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1337 			err = crypto_register_ahash(
1338 				&hdev->pdata->algs_info[i].algs_list[j]);
1339 			if (err)
1340 				goto err_algs;
1341 		}
1342 	}
1343 
1344 	return 0;
1345 err_algs:
1346 	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1347 	for (; i--; ) {
1348 		for (; j--;)
1349 			crypto_unregister_ahash(
1350 				&hdev->pdata->algs_info[i].algs_list[j]);
1351 	}
1352 
1353 	return err;
1354 }
1355 
1356 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1357 {
1358 	unsigned int i, j;
1359 
1360 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1361 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1362 			crypto_unregister_ahash(
1363 				&hdev->pdata->algs_info[i].algs_list[j]);
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1370 	{
1371 		.algs_list	= algs_md5_sha1,
1372 		.size		= ARRAY_SIZE(algs_md5_sha1),
1373 	},
1374 };
1375 
1376 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1377 	.algs_info	= stm32_hash_algs_info_stm32f4,
1378 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1379 };
1380 
1381 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1382 	{
1383 		.algs_list	= algs_md5_sha1,
1384 		.size		= ARRAY_SIZE(algs_md5_sha1),
1385 	},
1386 	{
1387 		.algs_list	= algs_sha224_sha256,
1388 		.size		= ARRAY_SIZE(algs_sha224_sha256),
1389 	},
1390 };
1391 
1392 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1393 	.algs_info	= stm32_hash_algs_info_stm32f7,
1394 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1395 };
1396 
1397 static const struct of_device_id stm32_hash_of_match[] = {
1398 	{
1399 		.compatible = "st,stm32f456-hash",
1400 		.data = &stm32_hash_pdata_stm32f4,
1401 	},
1402 	{
1403 		.compatible = "st,stm32f756-hash",
1404 		.data = &stm32_hash_pdata_stm32f7,
1405 	},
1406 	{},
1407 };
1408 
1409 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1410 
1411 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1412 				   struct device *dev)
1413 {
1414 	const struct of_device_id *match;
1415 	int err;
1416 
1417 	match = of_match_device(stm32_hash_of_match, dev);
1418 	if (!match) {
1419 		dev_err(dev, "no compatible OF match\n");
1420 		return -EINVAL;
1421 	}
1422 
1423 	err = of_property_read_u32(dev->of_node, "dma-maxburst",
1424 				   &hdev->dma_maxburst);
1425 
1426 	hdev->pdata = match->data;
1427 
1428 	return err;
1429 }
1430 
1431 static int stm32_hash_probe(struct platform_device *pdev)
1432 {
1433 	struct stm32_hash_dev *hdev;
1434 	struct device *dev = &pdev->dev;
1435 	struct resource *res;
1436 	int ret, irq;
1437 
1438 	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1439 	if (!hdev)
1440 		return -ENOMEM;
1441 
1442 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1443 	hdev->io_base = devm_ioremap_resource(dev, res);
1444 	if (IS_ERR(hdev->io_base))
1445 		return PTR_ERR(hdev->io_base);
1446 
1447 	hdev->phys_base = res->start;
1448 
1449 	ret = stm32_hash_get_of_match(hdev, dev);
1450 	if (ret)
1451 		return ret;
1452 
1453 	irq = platform_get_irq(pdev, 0);
1454 	if (irq < 0) {
1455 		dev_err(dev, "Cannot get IRQ resource\n");
1456 		return irq;
1457 	}
1458 
1459 	ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
1460 					stm32_hash_irq_thread, IRQF_ONESHOT,
1461 					dev_name(dev), hdev);
1462 	if (ret) {
1463 		dev_err(dev, "Cannot grab IRQ\n");
1464 		return ret;
1465 	}
1466 
1467 	hdev->clk = devm_clk_get(&pdev->dev, NULL);
1468 	if (IS_ERR(hdev->clk)) {
1469 		dev_err(dev, "failed to get clock for hash (%lu)\n",
1470 			PTR_ERR(hdev->clk));
1471 		return PTR_ERR(hdev->clk);
1472 	}
1473 
1474 	ret = clk_prepare_enable(hdev->clk);
1475 	if (ret) {
1476 		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1477 		return ret;
1478 	}
1479 
1480 	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1481 	if (!IS_ERR(hdev->rst)) {
1482 		reset_control_assert(hdev->rst);
1483 		udelay(2);
1484 		reset_control_deassert(hdev->rst);
1485 	}
1486 
1487 	hdev->dev = dev;
1488 
1489 	platform_set_drvdata(pdev, hdev);
1490 
1491 	ret = stm32_hash_dma_init(hdev);
1492 	if (ret)
1493 		dev_dbg(dev, "DMA mode not available\n");
1494 
1495 	spin_lock(&stm32_hash.lock);
1496 	list_add_tail(&hdev->list, &stm32_hash.dev_list);
1497 	spin_unlock(&stm32_hash.lock);
1498 
1499 	/* Initialize crypto engine */
1500 	hdev->engine = crypto_engine_alloc_init(dev, 1);
1501 	if (!hdev->engine) {
1502 		ret = -ENOMEM;
1503 		goto err_engine;
1504 	}
1505 
1506 	hdev->engine->prepare_hash_request = stm32_hash_prepare_req;
1507 	hdev->engine->hash_one_request = stm32_hash_one_request;
1508 
1509 	ret = crypto_engine_start(hdev->engine);
1510 	if (ret)
1511 		goto err_engine_start;
1512 
1513 	hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1514 
1515 	/* Register algos */
1516 	ret = stm32_hash_register_algs(hdev);
1517 	if (ret)
1518 		goto err_algs;
1519 
1520 	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1521 		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1522 
1523 	return 0;
1524 
1525 err_algs:
1526 err_engine_start:
1527 	crypto_engine_exit(hdev->engine);
1528 err_engine:
1529 	spin_lock(&stm32_hash.lock);
1530 	list_del(&hdev->list);
1531 	spin_unlock(&stm32_hash.lock);
1532 
1533 	if (hdev->dma_lch)
1534 		dma_release_channel(hdev->dma_lch);
1535 
1536 	clk_disable_unprepare(hdev->clk);
1537 
1538 	return ret;
1539 }
1540 
1541 static int stm32_hash_remove(struct platform_device *pdev)
1542 {
1543 	static struct stm32_hash_dev *hdev;
1544 
1545 	hdev = platform_get_drvdata(pdev);
1546 	if (!hdev)
1547 		return -ENODEV;
1548 
1549 	stm32_hash_unregister_algs(hdev);
1550 
1551 	crypto_engine_exit(hdev->engine);
1552 
1553 	spin_lock(&stm32_hash.lock);
1554 	list_del(&hdev->list);
1555 	spin_unlock(&stm32_hash.lock);
1556 
1557 	if (hdev->dma_lch)
1558 		dma_release_channel(hdev->dma_lch);
1559 
1560 	clk_disable_unprepare(hdev->clk);
1561 
1562 	return 0;
1563 }
1564 
1565 static struct platform_driver stm32_hash_driver = {
1566 	.probe		= stm32_hash_probe,
1567 	.remove		= stm32_hash_remove,
1568 	.driver		= {
1569 		.name	= "stm32-hash",
1570 		.of_match_table	= stm32_hash_of_match,
1571 	}
1572 };
1573 
1574 module_platform_driver(stm32_hash_driver);
1575 
1576 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1577 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1578 MODULE_LICENSE("GPL v2");
1579