1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of STM32 Crypto driver for Linux.
4  *
5  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/reset.h>
22 
23 #include <crypto/engine.h>
24 #include <crypto/hash.h>
25 #include <crypto/md5.h>
26 #include <crypto/scatterwalk.h>
27 #include <crypto/sha1.h>
28 #include <crypto/sha2.h>
29 #include <crypto/internal/hash.h>
30 
31 #define HASH_CR				0x00
32 #define HASH_DIN			0x04
33 #define HASH_STR			0x08
34 #define HASH_UX500_HREG(x)		(0x0c + ((x) * 0x04))
35 #define HASH_IMR			0x20
36 #define HASH_SR				0x24
37 #define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
38 #define HASH_HREG(x)			(0x310 + ((x) * 0x04))
39 #define HASH_HWCFGR			0x3F0
40 #define HASH_VER			0x3F4
41 #define HASH_ID				0x3F8
42 
43 /* Control Register */
44 #define HASH_CR_INIT			BIT(2)
45 #define HASH_CR_DMAE			BIT(3)
46 #define HASH_CR_DATATYPE_POS		4
47 #define HASH_CR_MODE			BIT(6)
48 #define HASH_CR_MDMAT			BIT(13)
49 #define HASH_CR_DMAA			BIT(14)
50 #define HASH_CR_LKEY			BIT(16)
51 
52 #define HASH_CR_ALGO_SHA1		0x0
53 #define HASH_CR_ALGO_MD5		0x80
54 #define HASH_CR_ALGO_SHA224		0x40000
55 #define HASH_CR_ALGO_SHA256		0x40080
56 
57 #define HASH_CR_UX500_EMPTYMSG		BIT(20)
58 #define HASH_CR_UX500_ALGO_SHA1		BIT(7)
59 #define HASH_CR_UX500_ALGO_SHA256	0x0
60 
61 /* Interrupt */
62 #define HASH_DINIE			BIT(0)
63 #define HASH_DCIE			BIT(1)
64 
65 /* Interrupt Mask */
66 #define HASH_MASK_CALC_COMPLETION	BIT(0)
67 #define HASH_MASK_DATA_INPUT		BIT(1)
68 
69 /* Context swap register */
70 #define HASH_CSR_REGISTER_NUMBER	54
71 
72 /* Status Flags */
73 #define HASH_SR_DATA_INPUT_READY	BIT(0)
74 #define HASH_SR_OUTPUT_READY		BIT(1)
75 #define HASH_SR_DMA_ACTIVE		BIT(2)
76 #define HASH_SR_BUSY			BIT(3)
77 
78 /* STR Register */
79 #define HASH_STR_NBLW_MASK		GENMASK(4, 0)
80 #define HASH_STR_DCAL			BIT(8)
81 
82 #define HASH_FLAGS_INIT			BIT(0)
83 #define HASH_FLAGS_OUTPUT_READY		BIT(1)
84 #define HASH_FLAGS_CPU			BIT(2)
85 #define HASH_FLAGS_DMA_READY		BIT(3)
86 #define HASH_FLAGS_DMA_ACTIVE		BIT(4)
87 #define HASH_FLAGS_HMAC_INIT		BIT(5)
88 #define HASH_FLAGS_HMAC_FINAL		BIT(6)
89 #define HASH_FLAGS_HMAC_KEY		BIT(7)
90 
91 #define HASH_FLAGS_FINAL		BIT(15)
92 #define HASH_FLAGS_FINUP		BIT(16)
93 #define HASH_FLAGS_ALGO_MASK		GENMASK(21, 18)
94 #define HASH_FLAGS_MD5			BIT(18)
95 #define HASH_FLAGS_SHA1			BIT(19)
96 #define HASH_FLAGS_SHA224		BIT(20)
97 #define HASH_FLAGS_SHA256		BIT(21)
98 #define HASH_FLAGS_ERRORS		BIT(22)
99 #define HASH_FLAGS_HMAC			BIT(23)
100 
101 #define HASH_OP_UPDATE			1
102 #define HASH_OP_FINAL			2
103 
104 enum stm32_hash_data_format {
105 	HASH_DATA_32_BITS		= 0x0,
106 	HASH_DATA_16_BITS		= 0x1,
107 	HASH_DATA_8_BITS		= 0x2,
108 	HASH_DATA_1_BIT			= 0x3
109 };
110 
111 #define HASH_BUFLEN			256
112 #define HASH_LONG_KEY			64
113 #define HASH_MAX_KEY_SIZE		(SHA256_BLOCK_SIZE * 8)
114 #define HASH_QUEUE_LENGTH		16
115 #define HASH_DMA_THRESHOLD		50
116 
117 #define HASH_AUTOSUSPEND_DELAY		50
118 
119 struct stm32_hash_ctx {
120 	struct crypto_engine_ctx enginectx;
121 	struct stm32_hash_dev	*hdev;
122 	struct crypto_shash	*xtfm;
123 	unsigned long		flags;
124 
125 	u8			key[HASH_MAX_KEY_SIZE];
126 	int			keylen;
127 };
128 
129 struct stm32_hash_state {
130 	u32			flags;
131 
132 	u16			bufcnt;
133 	u16			buflen;
134 
135 	u8 buffer[HASH_BUFLEN] __aligned(4);
136 
137 	/* hash state */
138 	u32			*hw_context;
139 };
140 
141 struct stm32_hash_request_ctx {
142 	struct stm32_hash_dev	*hdev;
143 	unsigned long		op;
144 
145 	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
146 	size_t			digcnt;
147 
148 	/* DMA */
149 	struct scatterlist	*sg;
150 	unsigned int		offset;
151 	unsigned int		total;
152 	struct scatterlist	sg_key;
153 
154 	dma_addr_t		dma_addr;
155 	size_t			dma_ct;
156 	int			nents;
157 
158 	u8			data_type;
159 
160 	struct stm32_hash_state state;
161 };
162 
163 struct stm32_hash_algs_info {
164 	struct ahash_alg	*algs_list;
165 	size_t			size;
166 };
167 
168 struct stm32_hash_pdata {
169 	struct stm32_hash_algs_info	*algs_info;
170 	size_t				algs_info_size;
171 	bool				has_sr;
172 	bool				has_mdmat;
173 	bool				broken_emptymsg;
174 	bool				ux500;
175 };
176 
177 struct stm32_hash_dev {
178 	struct list_head	list;
179 	struct device		*dev;
180 	struct clk		*clk;
181 	struct reset_control	*rst;
182 	void __iomem		*io_base;
183 	phys_addr_t		phys_base;
184 	u32			dma_mode;
185 	u32			dma_maxburst;
186 	bool			polled;
187 
188 	struct ahash_request	*req;
189 	struct crypto_engine	*engine;
190 
191 	unsigned long		flags;
192 
193 	struct dma_chan		*dma_lch;
194 	struct completion	dma_completion;
195 
196 	const struct stm32_hash_pdata	*pdata;
197 };
198 
199 struct stm32_hash_drv {
200 	struct list_head	dev_list;
201 	spinlock_t		lock; /* List protection access */
202 };
203 
204 static struct stm32_hash_drv stm32_hash = {
205 	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
206 	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
207 };
208 
209 static void stm32_hash_dma_callback(void *param);
210 
211 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
212 {
213 	return readl_relaxed(hdev->io_base + offset);
214 }
215 
216 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
217 				    u32 offset, u32 value)
218 {
219 	writel_relaxed(value, hdev->io_base + offset);
220 }
221 
222 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
223 {
224 	u32 status;
225 
226 	/* The Ux500 lacks the special status register, we poll the DCAL bit instead */
227 	if (!hdev->pdata->has_sr)
228 		return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
229 						  !(status & HASH_STR_DCAL), 10, 10000);
230 
231 	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
232 				   !(status & HASH_SR_BUSY), 10, 10000);
233 }
234 
235 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
236 {
237 	u32 reg;
238 
239 	reg = stm32_hash_read(hdev, HASH_STR);
240 	reg &= ~(HASH_STR_NBLW_MASK);
241 	reg |= (8U * ((length) % 4U));
242 	stm32_hash_write(hdev, HASH_STR, reg);
243 }
244 
245 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
246 {
247 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
248 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
249 	u32 reg;
250 	int keylen = ctx->keylen;
251 	void *key = ctx->key;
252 
253 	if (keylen) {
254 		stm32_hash_set_nblw(hdev, keylen);
255 
256 		while (keylen > 0) {
257 			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
258 			keylen -= 4;
259 			key += 4;
260 		}
261 
262 		reg = stm32_hash_read(hdev, HASH_STR);
263 		reg |= HASH_STR_DCAL;
264 		stm32_hash_write(hdev, HASH_STR, reg);
265 
266 		return -EINPROGRESS;
267 	}
268 
269 	return 0;
270 }
271 
272 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
273 {
274 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
275 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
276 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
277 	struct stm32_hash_state *state = &rctx->state;
278 
279 	u32 reg = HASH_CR_INIT;
280 
281 	if (!(hdev->flags & HASH_FLAGS_INIT)) {
282 		switch (state->flags & HASH_FLAGS_ALGO_MASK) {
283 		case HASH_FLAGS_MD5:
284 			reg |= HASH_CR_ALGO_MD5;
285 			break;
286 		case HASH_FLAGS_SHA1:
287 			if (hdev->pdata->ux500)
288 				reg |= HASH_CR_UX500_ALGO_SHA1;
289 			else
290 				reg |= HASH_CR_ALGO_SHA1;
291 			break;
292 		case HASH_FLAGS_SHA224:
293 			reg |= HASH_CR_ALGO_SHA224;
294 			break;
295 		case HASH_FLAGS_SHA256:
296 			if (hdev->pdata->ux500)
297 				reg |= HASH_CR_UX500_ALGO_SHA256;
298 			else
299 				reg |= HASH_CR_ALGO_SHA256;
300 			break;
301 		default:
302 			reg |= HASH_CR_ALGO_MD5;
303 		}
304 
305 		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
306 
307 		if (state->flags & HASH_FLAGS_HMAC) {
308 			hdev->flags |= HASH_FLAGS_HMAC;
309 			reg |= HASH_CR_MODE;
310 			if (ctx->keylen > HASH_LONG_KEY)
311 				reg |= HASH_CR_LKEY;
312 		}
313 
314 		/*
315 		 * On the Ux500 we need to set a special flag to indicate that
316 		 * the message is zero length.
317 		 */
318 		if (hdev->pdata->ux500 && bufcnt == 0)
319 			reg |= HASH_CR_UX500_EMPTYMSG;
320 
321 		if (!hdev->polled)
322 			stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
323 
324 		stm32_hash_write(hdev, HASH_CR, reg);
325 
326 		hdev->flags |= HASH_FLAGS_INIT;
327 
328 		dev_dbg(hdev->dev, "Write Control %x\n", reg);
329 	}
330 }
331 
332 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
333 {
334 	struct stm32_hash_state *state = &rctx->state;
335 	size_t count;
336 
337 	while ((state->bufcnt < state->buflen) && rctx->total) {
338 		count = min(rctx->sg->length - rctx->offset, rctx->total);
339 		count = min_t(size_t, count, state->buflen - state->bufcnt);
340 
341 		if (count <= 0) {
342 			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
343 				rctx->sg = sg_next(rctx->sg);
344 				continue;
345 			} else {
346 				break;
347 			}
348 		}
349 
350 		scatterwalk_map_and_copy(state->buffer + state->bufcnt,
351 					 rctx->sg, rctx->offset, count, 0);
352 
353 		state->bufcnt += count;
354 		rctx->offset += count;
355 		rctx->total -= count;
356 
357 		if (rctx->offset == rctx->sg->length) {
358 			rctx->sg = sg_next(rctx->sg);
359 			if (rctx->sg)
360 				rctx->offset = 0;
361 			else
362 				rctx->total = 0;
363 		}
364 	}
365 }
366 
367 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
368 			       const u8 *buf, size_t length, int final)
369 {
370 	unsigned int count, len32;
371 	const u32 *buffer = (const u32 *)buf;
372 	u32 reg;
373 
374 	if (final)
375 		hdev->flags |= HASH_FLAGS_FINAL;
376 
377 	len32 = DIV_ROUND_UP(length, sizeof(u32));
378 
379 	dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
380 		__func__, length, final, len32);
381 
382 	hdev->flags |= HASH_FLAGS_CPU;
383 
384 	stm32_hash_write_ctrl(hdev, length);
385 
386 	if (stm32_hash_wait_busy(hdev))
387 		return -ETIMEDOUT;
388 
389 	if ((hdev->flags & HASH_FLAGS_HMAC) &&
390 	    (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
391 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
392 		stm32_hash_write_key(hdev);
393 		if (stm32_hash_wait_busy(hdev))
394 			return -ETIMEDOUT;
395 	}
396 
397 	for (count = 0; count < len32; count++)
398 		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
399 
400 	if (final) {
401 		if (stm32_hash_wait_busy(hdev))
402 			return -ETIMEDOUT;
403 
404 		stm32_hash_set_nblw(hdev, length);
405 		reg = stm32_hash_read(hdev, HASH_STR);
406 		reg |= HASH_STR_DCAL;
407 		stm32_hash_write(hdev, HASH_STR, reg);
408 		if (hdev->flags & HASH_FLAGS_HMAC) {
409 			if (stm32_hash_wait_busy(hdev))
410 				return -ETIMEDOUT;
411 			stm32_hash_write_key(hdev);
412 		}
413 		return -EINPROGRESS;
414 	}
415 
416 	return 0;
417 }
418 
419 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
420 {
421 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
422 	struct stm32_hash_state *state = &rctx->state;
423 	int bufcnt, err = 0, final;
424 
425 	dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
426 
427 	final = state->flags & HASH_FLAGS_FINAL;
428 
429 	while ((rctx->total >= state->buflen) ||
430 	       (state->bufcnt + rctx->total >= state->buflen)) {
431 		stm32_hash_append_sg(rctx);
432 		bufcnt = state->bufcnt;
433 		state->bufcnt = 0;
434 		err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
435 		if (err)
436 			return err;
437 	}
438 
439 	stm32_hash_append_sg(rctx);
440 
441 	if (final) {
442 		bufcnt = state->bufcnt;
443 		state->bufcnt = 0;
444 		err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
445 	}
446 
447 	return err;
448 }
449 
450 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
451 			       struct scatterlist *sg, int length, int mdma)
452 {
453 	struct dma_async_tx_descriptor *in_desc;
454 	dma_cookie_t cookie;
455 	u32 reg;
456 	int err;
457 
458 	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
459 					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
460 					  DMA_CTRL_ACK);
461 	if (!in_desc) {
462 		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
463 		return -ENOMEM;
464 	}
465 
466 	reinit_completion(&hdev->dma_completion);
467 	in_desc->callback = stm32_hash_dma_callback;
468 	in_desc->callback_param = hdev;
469 
470 	hdev->flags |= HASH_FLAGS_FINAL;
471 	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
472 
473 	reg = stm32_hash_read(hdev, HASH_CR);
474 
475 	if (!hdev->pdata->has_mdmat) {
476 		if (mdma)
477 			reg |= HASH_CR_MDMAT;
478 		else
479 			reg &= ~HASH_CR_MDMAT;
480 	}
481 	reg |= HASH_CR_DMAE;
482 
483 	stm32_hash_write(hdev, HASH_CR, reg);
484 
485 	stm32_hash_set_nblw(hdev, length);
486 
487 	cookie = dmaengine_submit(in_desc);
488 	err = dma_submit_error(cookie);
489 	if (err)
490 		return -ENOMEM;
491 
492 	dma_async_issue_pending(hdev->dma_lch);
493 
494 	if (!wait_for_completion_timeout(&hdev->dma_completion,
495 					 msecs_to_jiffies(100)))
496 		err = -ETIMEDOUT;
497 
498 	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
499 				     NULL, NULL) != DMA_COMPLETE)
500 		err = -ETIMEDOUT;
501 
502 	if (err) {
503 		dev_err(hdev->dev, "DMA Error %i\n", err);
504 		dmaengine_terminate_all(hdev->dma_lch);
505 		return err;
506 	}
507 
508 	return -EINPROGRESS;
509 }
510 
511 static void stm32_hash_dma_callback(void *param)
512 {
513 	struct stm32_hash_dev *hdev = param;
514 
515 	complete(&hdev->dma_completion);
516 
517 	hdev->flags |= HASH_FLAGS_DMA_READY;
518 }
519 
520 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
521 {
522 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
523 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
524 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
525 	int err;
526 
527 	if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
528 		err = stm32_hash_write_key(hdev);
529 		if (stm32_hash_wait_busy(hdev))
530 			return -ETIMEDOUT;
531 	} else {
532 		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
533 			sg_init_one(&rctx->sg_key, ctx->key,
534 				    ALIGN(ctx->keylen, sizeof(u32)));
535 
536 		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
537 					  DMA_TO_DEVICE);
538 		if (rctx->dma_ct == 0) {
539 			dev_err(hdev->dev, "dma_map_sg error\n");
540 			return -ENOMEM;
541 		}
542 
543 		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
544 
545 		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
546 	}
547 
548 	return err;
549 }
550 
551 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
552 {
553 	struct dma_slave_config dma_conf;
554 	struct dma_chan *chan;
555 	int err;
556 
557 	memset(&dma_conf, 0, sizeof(dma_conf));
558 
559 	dma_conf.direction = DMA_MEM_TO_DEV;
560 	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
561 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
562 	dma_conf.src_maxburst = hdev->dma_maxburst;
563 	dma_conf.dst_maxburst = hdev->dma_maxburst;
564 	dma_conf.device_fc = false;
565 
566 	chan = dma_request_chan(hdev->dev, "in");
567 	if (IS_ERR(chan))
568 		return PTR_ERR(chan);
569 
570 	hdev->dma_lch = chan;
571 
572 	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
573 	if (err) {
574 		dma_release_channel(hdev->dma_lch);
575 		hdev->dma_lch = NULL;
576 		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
577 		return err;
578 	}
579 
580 	init_completion(&hdev->dma_completion);
581 
582 	return 0;
583 }
584 
585 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
586 {
587 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
588 	u32 *buffer = (void *)rctx->state.buffer;
589 	struct scatterlist sg[1], *tsg;
590 	int err = 0, len = 0, reg, ncp = 0;
591 	unsigned int i;
592 
593 	rctx->sg = hdev->req->src;
594 	rctx->total = hdev->req->nbytes;
595 
596 	rctx->nents = sg_nents(rctx->sg);
597 
598 	if (rctx->nents < 0)
599 		return -EINVAL;
600 
601 	stm32_hash_write_ctrl(hdev, rctx->total);
602 
603 	if (hdev->flags & HASH_FLAGS_HMAC) {
604 		err = stm32_hash_hmac_dma_send(hdev);
605 		if (err != -EINPROGRESS)
606 			return err;
607 	}
608 
609 	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
610 		len = sg->length;
611 
612 		sg[0] = *tsg;
613 		if (sg_is_last(sg)) {
614 			if (hdev->dma_mode == 1) {
615 				len = (ALIGN(sg->length, 16) - 16);
616 
617 				ncp = sg_pcopy_to_buffer(
618 					rctx->sg, rctx->nents,
619 					rctx->state.buffer, sg->length - len,
620 					rctx->total - sg->length + len);
621 
622 				sg->length = len;
623 			} else {
624 				if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
625 					len = sg->length;
626 					sg->length = ALIGN(sg->length,
627 							   sizeof(u32));
628 				}
629 			}
630 		}
631 
632 		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
633 					  DMA_TO_DEVICE);
634 		if (rctx->dma_ct == 0) {
635 			dev_err(hdev->dev, "dma_map_sg error\n");
636 			return -ENOMEM;
637 		}
638 
639 		err = stm32_hash_xmit_dma(hdev, sg, len,
640 					  !sg_is_last(sg));
641 
642 		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
643 
644 		if (err == -ENOMEM)
645 			return err;
646 	}
647 
648 	if (hdev->dma_mode == 1) {
649 		if (stm32_hash_wait_busy(hdev))
650 			return -ETIMEDOUT;
651 		reg = stm32_hash_read(hdev, HASH_CR);
652 		reg &= ~HASH_CR_DMAE;
653 		reg |= HASH_CR_DMAA;
654 		stm32_hash_write(hdev, HASH_CR, reg);
655 
656 		if (ncp) {
657 			memset(buffer + ncp, 0,
658 			       DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
659 			writesl(hdev->io_base + HASH_DIN, buffer,
660 				DIV_ROUND_UP(ncp, sizeof(u32)));
661 		}
662 		stm32_hash_set_nblw(hdev, ncp);
663 		reg = stm32_hash_read(hdev, HASH_STR);
664 		reg |= HASH_STR_DCAL;
665 		stm32_hash_write(hdev, HASH_STR, reg);
666 		err = -EINPROGRESS;
667 	}
668 
669 	if (hdev->flags & HASH_FLAGS_HMAC) {
670 		if (stm32_hash_wait_busy(hdev))
671 			return -ETIMEDOUT;
672 		err = stm32_hash_hmac_dma_send(hdev);
673 	}
674 
675 	return err;
676 }
677 
678 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
679 {
680 	struct stm32_hash_dev *hdev = NULL, *tmp;
681 
682 	spin_lock_bh(&stm32_hash.lock);
683 	if (!ctx->hdev) {
684 		list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
685 			hdev = tmp;
686 			break;
687 		}
688 		ctx->hdev = hdev;
689 	} else {
690 		hdev = ctx->hdev;
691 	}
692 
693 	spin_unlock_bh(&stm32_hash.lock);
694 
695 	return hdev;
696 }
697 
698 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
699 {
700 	struct scatterlist *sg;
701 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
702 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
703 	int i;
704 
705 	if (req->nbytes <= HASH_DMA_THRESHOLD)
706 		return false;
707 
708 	if (sg_nents(req->src) > 1) {
709 		if (hdev->dma_mode == 1)
710 			return false;
711 		for_each_sg(req->src, sg, sg_nents(req->src), i) {
712 			if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
713 			    (!sg_is_last(sg)))
714 				return false;
715 		}
716 	}
717 
718 	if (req->src->offset % 4)
719 		return false;
720 
721 	return true;
722 }
723 
724 static int stm32_hash_init(struct ahash_request *req)
725 {
726 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
727 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
728 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
729 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
730 	struct stm32_hash_state *state = &rctx->state;
731 
732 	rctx->hdev = hdev;
733 
734 	state->flags = HASH_FLAGS_CPU;
735 
736 	rctx->digcnt = crypto_ahash_digestsize(tfm);
737 	switch (rctx->digcnt) {
738 	case MD5_DIGEST_SIZE:
739 		state->flags |= HASH_FLAGS_MD5;
740 		break;
741 	case SHA1_DIGEST_SIZE:
742 		state->flags |= HASH_FLAGS_SHA1;
743 		break;
744 	case SHA224_DIGEST_SIZE:
745 		state->flags |= HASH_FLAGS_SHA224;
746 		break;
747 	case SHA256_DIGEST_SIZE:
748 		state->flags |= HASH_FLAGS_SHA256;
749 		break;
750 	default:
751 		return -EINVAL;
752 	}
753 
754 	rctx->state.bufcnt = 0;
755 	rctx->state.buflen = HASH_BUFLEN;
756 	rctx->total = 0;
757 	rctx->offset = 0;
758 	rctx->data_type = HASH_DATA_8_BITS;
759 
760 	if (ctx->flags & HASH_FLAGS_HMAC)
761 		state->flags |= HASH_FLAGS_HMAC;
762 
763 	dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
764 
765 	return 0;
766 }
767 
768 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
769 {
770 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
771 	struct stm32_hash_state *state = &rctx->state;
772 
773 	if (!(state->flags & HASH_FLAGS_CPU))
774 		return stm32_hash_dma_send(hdev);
775 
776 	return stm32_hash_update_cpu(hdev);
777 }
778 
779 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
780 {
781 	struct ahash_request *req = hdev->req;
782 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
783 	struct stm32_hash_state *state = &rctx->state;
784 	int buflen = state->bufcnt;
785 
786 	if (state->flags & HASH_FLAGS_FINUP)
787 		return stm32_hash_update_req(hdev);
788 
789 	state->bufcnt = 0;
790 
791 	return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
792 }
793 
794 static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
795 {
796 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
797 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
798 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
799 	struct stm32_hash_dev *hdev = rctx->hdev;
800 	int ret;
801 
802 	dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
803 		ctx->keylen);
804 
805 	if (!ctx->xtfm) {
806 		dev_err(hdev->dev, "no fallback engine\n");
807 		return;
808 	}
809 
810 	if (ctx->keylen) {
811 		ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
812 		if (ret) {
813 			dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
814 			return;
815 		}
816 	}
817 
818 	ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
819 	if (ret)
820 		dev_err(hdev->dev, "shash digest error\n");
821 }
822 
823 static void stm32_hash_copy_hash(struct ahash_request *req)
824 {
825 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
826 	struct stm32_hash_state *state = &rctx->state;
827 	struct stm32_hash_dev *hdev = rctx->hdev;
828 	__be32 *hash = (void *)rctx->digest;
829 	unsigned int i, hashsize;
830 
831 	if (hdev->pdata->broken_emptymsg && !req->nbytes)
832 		return stm32_hash_emptymsg_fallback(req);
833 
834 	switch (state->flags & HASH_FLAGS_ALGO_MASK) {
835 	case HASH_FLAGS_MD5:
836 		hashsize = MD5_DIGEST_SIZE;
837 		break;
838 	case HASH_FLAGS_SHA1:
839 		hashsize = SHA1_DIGEST_SIZE;
840 		break;
841 	case HASH_FLAGS_SHA224:
842 		hashsize = SHA224_DIGEST_SIZE;
843 		break;
844 	case HASH_FLAGS_SHA256:
845 		hashsize = SHA256_DIGEST_SIZE;
846 		break;
847 	default:
848 		return;
849 	}
850 
851 	for (i = 0; i < hashsize / sizeof(u32); i++) {
852 		if (hdev->pdata->ux500)
853 			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
854 					      HASH_UX500_HREG(i)));
855 		else
856 			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
857 					      HASH_HREG(i)));
858 	}
859 }
860 
861 static int stm32_hash_finish(struct ahash_request *req)
862 {
863 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
864 
865 	if (!req->result)
866 		return -EINVAL;
867 
868 	memcpy(req->result, rctx->digest, rctx->digcnt);
869 
870 	return 0;
871 }
872 
873 static void stm32_hash_finish_req(struct ahash_request *req, int err)
874 {
875 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
876 	struct stm32_hash_state *state = &rctx->state;
877 	struct stm32_hash_dev *hdev = rctx->hdev;
878 
879 	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
880 		stm32_hash_copy_hash(req);
881 		err = stm32_hash_finish(req);
882 		hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
883 				 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
884 				 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
885 				 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
886 				 HASH_FLAGS_HMAC_KEY);
887 	} else {
888 		state->flags |= HASH_FLAGS_ERRORS;
889 	}
890 
891 	pm_runtime_mark_last_busy(hdev->dev);
892 	pm_runtime_put_autosuspend(hdev->dev);
893 
894 	crypto_finalize_hash_request(hdev->engine, req, err);
895 }
896 
897 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
898 			      struct stm32_hash_request_ctx *rctx)
899 {
900 	pm_runtime_get_sync(hdev->dev);
901 
902 	if (!(HASH_FLAGS_INIT & hdev->flags)) {
903 		stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
904 		stm32_hash_write(hdev, HASH_STR, 0);
905 		stm32_hash_write(hdev, HASH_DIN, 0);
906 		stm32_hash_write(hdev, HASH_IMR, 0);
907 	}
908 
909 	return 0;
910 }
911 
912 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
913 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
914 
915 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
916 				   struct ahash_request *req)
917 {
918 	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
919 }
920 
921 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
922 {
923 	struct ahash_request *req = container_of(areq, struct ahash_request,
924 						 base);
925 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
926 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
927 	struct stm32_hash_request_ctx *rctx;
928 
929 	if (!hdev)
930 		return -ENODEV;
931 
932 	hdev->req = req;
933 
934 	rctx = ahash_request_ctx(req);
935 
936 	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
937 		rctx->op, req->nbytes);
938 
939 	return stm32_hash_hw_init(hdev, rctx);
940 }
941 
942 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
943 {
944 	struct ahash_request *req = container_of(areq, struct ahash_request,
945 						 base);
946 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
947 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
948 	struct stm32_hash_request_ctx *rctx;
949 	int err = 0;
950 
951 	if (!hdev)
952 		return -ENODEV;
953 
954 	hdev->req = req;
955 
956 	rctx = ahash_request_ctx(req);
957 
958 	if (rctx->op == HASH_OP_UPDATE)
959 		err = stm32_hash_update_req(hdev);
960 	else if (rctx->op == HASH_OP_FINAL)
961 		err = stm32_hash_final_req(hdev);
962 
963 	/* If we have an IRQ, wait for that, else poll for completion */
964 	if (err == -EINPROGRESS && hdev->polled) {
965 		if (stm32_hash_wait_busy(hdev))
966 			err = -ETIMEDOUT;
967 		else {
968 			hdev->flags |= HASH_FLAGS_OUTPUT_READY;
969 			err = 0;
970 		}
971 	}
972 
973 	if (err != -EINPROGRESS)
974 	/* done task will not finish it, so do it here */
975 		stm32_hash_finish_req(req, err);
976 
977 	return 0;
978 }
979 
980 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
981 {
982 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
983 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
984 	struct stm32_hash_dev *hdev = ctx->hdev;
985 
986 	rctx->op = op;
987 
988 	return stm32_hash_handle_queue(hdev, req);
989 }
990 
991 static int stm32_hash_update(struct ahash_request *req)
992 {
993 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
994 	struct stm32_hash_state *state = &rctx->state;
995 
996 	if (!req->nbytes || !(state->flags & HASH_FLAGS_CPU))
997 		return 0;
998 
999 	rctx->total = req->nbytes;
1000 	rctx->sg = req->src;
1001 	rctx->offset = 0;
1002 
1003 	if ((state->bufcnt + rctx->total < state->buflen)) {
1004 		stm32_hash_append_sg(rctx);
1005 		return 0;
1006 	}
1007 
1008 	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
1009 }
1010 
1011 static int stm32_hash_final(struct ahash_request *req)
1012 {
1013 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1014 	struct stm32_hash_state *state = &rctx->state;
1015 
1016 	state->flags |= HASH_FLAGS_FINAL;
1017 
1018 	return stm32_hash_enqueue(req, HASH_OP_FINAL);
1019 }
1020 
1021 static int stm32_hash_finup(struct ahash_request *req)
1022 {
1023 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1024 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1025 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1026 	struct stm32_hash_state *state = &rctx->state;
1027 
1028 	if (!req->nbytes)
1029 		goto out;
1030 
1031 	state->flags |= HASH_FLAGS_FINUP;
1032 	rctx->total = req->nbytes;
1033 	rctx->sg = req->src;
1034 	rctx->offset = 0;
1035 
1036 	if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
1037 		state->flags &= ~HASH_FLAGS_CPU;
1038 
1039 out:
1040 	return stm32_hash_final(req);
1041 }
1042 
1043 static int stm32_hash_digest(struct ahash_request *req)
1044 {
1045 	return stm32_hash_init(req) ?: stm32_hash_finup(req);
1046 }
1047 
1048 static int stm32_hash_export(struct ahash_request *req, void *out)
1049 {
1050 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1051 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1052 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1053 	struct stm32_hash_state *state = &rctx->state;
1054 	u32 *preg;
1055 	unsigned int i;
1056 	int ret;
1057 
1058 	pm_runtime_get_sync(hdev->dev);
1059 
1060 	ret = stm32_hash_wait_busy(hdev);
1061 	if (ret)
1062 		return ret;
1063 
1064 	state->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
1065 					  sizeof(u32), GFP_KERNEL);
1066 	preg = state->hw_context;
1067 
1068 	if (!hdev->pdata->ux500)
1069 		*preg++ = stm32_hash_read(hdev, HASH_IMR);
1070 	*preg++ = stm32_hash_read(hdev, HASH_STR);
1071 	*preg++ = stm32_hash_read(hdev, HASH_CR);
1072 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1073 		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
1074 
1075 	pm_runtime_mark_last_busy(hdev->dev);
1076 	pm_runtime_put_autosuspend(hdev->dev);
1077 
1078 	memcpy(out, rctx, sizeof(*rctx));
1079 
1080 	return 0;
1081 }
1082 
1083 static int stm32_hash_import(struct ahash_request *req, const void *in)
1084 {
1085 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1086 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1087 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1088 	struct stm32_hash_state *state = &rctx->state;
1089 	const u32 *preg = in;
1090 	u32 reg;
1091 	unsigned int i;
1092 
1093 	memcpy(rctx, in, sizeof(*rctx));
1094 
1095 	preg = state->hw_context;
1096 
1097 	pm_runtime_get_sync(hdev->dev);
1098 
1099 	if (!hdev->pdata->ux500)
1100 		stm32_hash_write(hdev, HASH_IMR, *preg++);
1101 	stm32_hash_write(hdev, HASH_STR, *preg++);
1102 	stm32_hash_write(hdev, HASH_CR, *preg);
1103 	reg = *preg++ | HASH_CR_INIT;
1104 	stm32_hash_write(hdev, HASH_CR, reg);
1105 
1106 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1107 		stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1108 
1109 	pm_runtime_mark_last_busy(hdev->dev);
1110 	pm_runtime_put_autosuspend(hdev->dev);
1111 
1112 	kfree(state->hw_context);
1113 
1114 	return 0;
1115 }
1116 
1117 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1118 			     const u8 *key, unsigned int keylen)
1119 {
1120 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1121 
1122 	if (keylen <= HASH_MAX_KEY_SIZE) {
1123 		memcpy(ctx->key, key, keylen);
1124 		ctx->keylen = keylen;
1125 	} else {
1126 		return -ENOMEM;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
1133 {
1134 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1135 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1136 	const char *name = crypto_tfm_alg_name(tfm);
1137 	struct crypto_shash *xtfm;
1138 
1139 	/* The fallback is only needed on Ux500 */
1140 	if (!hdev->pdata->ux500)
1141 		return 0;
1142 
1143 	xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
1144 	if (IS_ERR(xtfm)) {
1145 		dev_err(hdev->dev, "failed to allocate %s fallback\n",
1146 			name);
1147 		return PTR_ERR(xtfm);
1148 	}
1149 	dev_info(hdev->dev, "allocated %s fallback\n", name);
1150 	ctx->xtfm = xtfm;
1151 
1152 	return 0;
1153 }
1154 
1155 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1156 				    const char *algs_hmac_name)
1157 {
1158 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1159 
1160 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1161 				 sizeof(struct stm32_hash_request_ctx));
1162 
1163 	ctx->keylen = 0;
1164 
1165 	if (algs_hmac_name)
1166 		ctx->flags |= HASH_FLAGS_HMAC;
1167 
1168 	ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1169 	ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1170 	ctx->enginectx.op.unprepare_request = NULL;
1171 
1172 	return stm32_hash_init_fallback(tfm);
1173 }
1174 
1175 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1176 {
1177 	return stm32_hash_cra_init_algs(tfm, NULL);
1178 }
1179 
1180 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1181 {
1182 	return stm32_hash_cra_init_algs(tfm, "md5");
1183 }
1184 
1185 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1186 {
1187 	return stm32_hash_cra_init_algs(tfm, "sha1");
1188 }
1189 
1190 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1191 {
1192 	return stm32_hash_cra_init_algs(tfm, "sha224");
1193 }
1194 
1195 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1196 {
1197 	return stm32_hash_cra_init_algs(tfm, "sha256");
1198 }
1199 
1200 static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
1201 {
1202 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1203 
1204 	if (ctx->xtfm)
1205 		crypto_free_shash(ctx->xtfm);
1206 }
1207 
1208 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1209 {
1210 	struct stm32_hash_dev *hdev = dev_id;
1211 
1212 	if (HASH_FLAGS_CPU & hdev->flags) {
1213 		if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1214 			hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1215 			goto finish;
1216 		}
1217 	} else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1218 		if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1219 			hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1220 				goto finish;
1221 		}
1222 	}
1223 
1224 	return IRQ_HANDLED;
1225 
1226 finish:
1227 	/* Finish current request */
1228 	stm32_hash_finish_req(hdev->req, 0);
1229 
1230 	return IRQ_HANDLED;
1231 }
1232 
1233 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1234 {
1235 	struct stm32_hash_dev *hdev = dev_id;
1236 	u32 reg;
1237 
1238 	reg = stm32_hash_read(hdev, HASH_SR);
1239 	if (reg & HASH_SR_OUTPUT_READY) {
1240 		reg &= ~HASH_SR_OUTPUT_READY;
1241 		stm32_hash_write(hdev, HASH_SR, reg);
1242 		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1243 		/* Disable IT*/
1244 		stm32_hash_write(hdev, HASH_IMR, 0);
1245 		return IRQ_WAKE_THREAD;
1246 	}
1247 
1248 	return IRQ_NONE;
1249 }
1250 
1251 static struct ahash_alg algs_md5[] = {
1252 	{
1253 		.init = stm32_hash_init,
1254 		.update = stm32_hash_update,
1255 		.final = stm32_hash_final,
1256 		.finup = stm32_hash_finup,
1257 		.digest = stm32_hash_digest,
1258 		.export = stm32_hash_export,
1259 		.import = stm32_hash_import,
1260 		.halg = {
1261 			.digestsize = MD5_DIGEST_SIZE,
1262 			.statesize = sizeof(struct stm32_hash_request_ctx),
1263 			.base = {
1264 				.cra_name = "md5",
1265 				.cra_driver_name = "stm32-md5",
1266 				.cra_priority = 200,
1267 				.cra_flags = CRYPTO_ALG_ASYNC |
1268 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1269 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1270 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1271 				.cra_alignmask = 3,
1272 				.cra_init = stm32_hash_cra_init,
1273 				.cra_exit = stm32_hash_cra_exit,
1274 				.cra_module = THIS_MODULE,
1275 			}
1276 		}
1277 	},
1278 	{
1279 		.init = stm32_hash_init,
1280 		.update = stm32_hash_update,
1281 		.final = stm32_hash_final,
1282 		.finup = stm32_hash_finup,
1283 		.digest = stm32_hash_digest,
1284 		.export = stm32_hash_export,
1285 		.import = stm32_hash_import,
1286 		.setkey = stm32_hash_setkey,
1287 		.halg = {
1288 			.digestsize = MD5_DIGEST_SIZE,
1289 			.statesize = sizeof(struct stm32_hash_request_ctx),
1290 			.base = {
1291 				.cra_name = "hmac(md5)",
1292 				.cra_driver_name = "stm32-hmac-md5",
1293 				.cra_priority = 200,
1294 				.cra_flags = CRYPTO_ALG_ASYNC |
1295 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1296 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1297 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1298 				.cra_alignmask = 3,
1299 				.cra_init = stm32_hash_cra_md5_init,
1300 				.cra_exit = stm32_hash_cra_exit,
1301 				.cra_module = THIS_MODULE,
1302 			}
1303 		}
1304 	},
1305 };
1306 
1307 static struct ahash_alg algs_sha1[] = {
1308 	{
1309 		.init = stm32_hash_init,
1310 		.update = stm32_hash_update,
1311 		.final = stm32_hash_final,
1312 		.finup = stm32_hash_finup,
1313 		.digest = stm32_hash_digest,
1314 		.export = stm32_hash_export,
1315 		.import = stm32_hash_import,
1316 		.halg = {
1317 			.digestsize = SHA1_DIGEST_SIZE,
1318 			.statesize = sizeof(struct stm32_hash_request_ctx),
1319 			.base = {
1320 				.cra_name = "sha1",
1321 				.cra_driver_name = "stm32-sha1",
1322 				.cra_priority = 200,
1323 				.cra_flags = CRYPTO_ALG_ASYNC |
1324 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1325 				.cra_blocksize = SHA1_BLOCK_SIZE,
1326 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1327 				.cra_alignmask = 3,
1328 				.cra_init = stm32_hash_cra_init,
1329 				.cra_exit = stm32_hash_cra_exit,
1330 				.cra_module = THIS_MODULE,
1331 			}
1332 		}
1333 	},
1334 	{
1335 		.init = stm32_hash_init,
1336 		.update = stm32_hash_update,
1337 		.final = stm32_hash_final,
1338 		.finup = stm32_hash_finup,
1339 		.digest = stm32_hash_digest,
1340 		.export = stm32_hash_export,
1341 		.import = stm32_hash_import,
1342 		.setkey = stm32_hash_setkey,
1343 		.halg = {
1344 			.digestsize = SHA1_DIGEST_SIZE,
1345 			.statesize = sizeof(struct stm32_hash_request_ctx),
1346 			.base = {
1347 				.cra_name = "hmac(sha1)",
1348 				.cra_driver_name = "stm32-hmac-sha1",
1349 				.cra_priority = 200,
1350 				.cra_flags = CRYPTO_ALG_ASYNC |
1351 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1352 				.cra_blocksize = SHA1_BLOCK_SIZE,
1353 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1354 				.cra_alignmask = 3,
1355 				.cra_init = stm32_hash_cra_sha1_init,
1356 				.cra_exit = stm32_hash_cra_exit,
1357 				.cra_module = THIS_MODULE,
1358 			}
1359 		}
1360 	},
1361 };
1362 
1363 static struct ahash_alg algs_sha224[] = {
1364 	{
1365 		.init = stm32_hash_init,
1366 		.update = stm32_hash_update,
1367 		.final = stm32_hash_final,
1368 		.finup = stm32_hash_finup,
1369 		.digest = stm32_hash_digest,
1370 		.export = stm32_hash_export,
1371 		.import = stm32_hash_import,
1372 		.halg = {
1373 			.digestsize = SHA224_DIGEST_SIZE,
1374 			.statesize = sizeof(struct stm32_hash_request_ctx),
1375 			.base = {
1376 				.cra_name = "sha224",
1377 				.cra_driver_name = "stm32-sha224",
1378 				.cra_priority = 200,
1379 				.cra_flags = CRYPTO_ALG_ASYNC |
1380 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1381 				.cra_blocksize = SHA224_BLOCK_SIZE,
1382 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1383 				.cra_alignmask = 3,
1384 				.cra_init = stm32_hash_cra_init,
1385 				.cra_exit = stm32_hash_cra_exit,
1386 				.cra_module = THIS_MODULE,
1387 			}
1388 		}
1389 	},
1390 	{
1391 		.init = stm32_hash_init,
1392 		.update = stm32_hash_update,
1393 		.final = stm32_hash_final,
1394 		.finup = stm32_hash_finup,
1395 		.digest = stm32_hash_digest,
1396 		.setkey = stm32_hash_setkey,
1397 		.export = stm32_hash_export,
1398 		.import = stm32_hash_import,
1399 		.halg = {
1400 			.digestsize = SHA224_DIGEST_SIZE,
1401 			.statesize = sizeof(struct stm32_hash_request_ctx),
1402 			.base = {
1403 				.cra_name = "hmac(sha224)",
1404 				.cra_driver_name = "stm32-hmac-sha224",
1405 				.cra_priority = 200,
1406 				.cra_flags = CRYPTO_ALG_ASYNC |
1407 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1408 				.cra_blocksize = SHA224_BLOCK_SIZE,
1409 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1410 				.cra_alignmask = 3,
1411 				.cra_init = stm32_hash_cra_sha224_init,
1412 				.cra_exit = stm32_hash_cra_exit,
1413 				.cra_module = THIS_MODULE,
1414 			}
1415 		}
1416 	},
1417 };
1418 
1419 static struct ahash_alg algs_sha256[] = {
1420 	{
1421 		.init = stm32_hash_init,
1422 		.update = stm32_hash_update,
1423 		.final = stm32_hash_final,
1424 		.finup = stm32_hash_finup,
1425 		.digest = stm32_hash_digest,
1426 		.export = stm32_hash_export,
1427 		.import = stm32_hash_import,
1428 		.halg = {
1429 			.digestsize = SHA256_DIGEST_SIZE,
1430 			.statesize = sizeof(struct stm32_hash_request_ctx),
1431 			.base = {
1432 				.cra_name = "sha256",
1433 				.cra_driver_name = "stm32-sha256",
1434 				.cra_priority = 200,
1435 				.cra_flags = CRYPTO_ALG_ASYNC |
1436 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1437 				.cra_blocksize = SHA256_BLOCK_SIZE,
1438 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1439 				.cra_alignmask = 3,
1440 				.cra_init = stm32_hash_cra_init,
1441 				.cra_exit = stm32_hash_cra_exit,
1442 				.cra_module = THIS_MODULE,
1443 			}
1444 		}
1445 	},
1446 	{
1447 		.init = stm32_hash_init,
1448 		.update = stm32_hash_update,
1449 		.final = stm32_hash_final,
1450 		.finup = stm32_hash_finup,
1451 		.digest = stm32_hash_digest,
1452 		.export = stm32_hash_export,
1453 		.import = stm32_hash_import,
1454 		.setkey = stm32_hash_setkey,
1455 		.halg = {
1456 			.digestsize = SHA256_DIGEST_SIZE,
1457 			.statesize = sizeof(struct stm32_hash_request_ctx),
1458 			.base = {
1459 				.cra_name = "hmac(sha256)",
1460 				.cra_driver_name = "stm32-hmac-sha256",
1461 				.cra_priority = 200,
1462 				.cra_flags = CRYPTO_ALG_ASYNC |
1463 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1464 				.cra_blocksize = SHA256_BLOCK_SIZE,
1465 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1466 				.cra_alignmask = 3,
1467 				.cra_init = stm32_hash_cra_sha256_init,
1468 				.cra_exit = stm32_hash_cra_exit,
1469 				.cra_module = THIS_MODULE,
1470 			}
1471 		}
1472 	},
1473 };
1474 
1475 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1476 {
1477 	unsigned int i, j;
1478 	int err;
1479 
1480 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1481 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1482 			err = crypto_register_ahash(
1483 				&hdev->pdata->algs_info[i].algs_list[j]);
1484 			if (err)
1485 				goto err_algs;
1486 		}
1487 	}
1488 
1489 	return 0;
1490 err_algs:
1491 	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1492 	for (; i--; ) {
1493 		for (; j--;)
1494 			crypto_unregister_ahash(
1495 				&hdev->pdata->algs_info[i].algs_list[j]);
1496 	}
1497 
1498 	return err;
1499 }
1500 
1501 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1502 {
1503 	unsigned int i, j;
1504 
1505 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1506 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1507 			crypto_unregister_ahash(
1508 				&hdev->pdata->algs_info[i].algs_list[j]);
1509 	}
1510 
1511 	return 0;
1512 }
1513 
1514 static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
1515 	{
1516 		.algs_list	= algs_sha1,
1517 		.size		= ARRAY_SIZE(algs_sha1),
1518 	},
1519 	{
1520 		.algs_list	= algs_sha256,
1521 		.size		= ARRAY_SIZE(algs_sha256),
1522 	},
1523 };
1524 
1525 static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
1526 	.algs_info	= stm32_hash_algs_info_ux500,
1527 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_ux500),
1528 	.broken_emptymsg = true,
1529 	.ux500		= true,
1530 };
1531 
1532 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1533 	{
1534 		.algs_list	= algs_md5,
1535 		.size		= ARRAY_SIZE(algs_md5),
1536 	},
1537 	{
1538 		.algs_list	= algs_sha1,
1539 		.size		= ARRAY_SIZE(algs_sha1),
1540 	},
1541 };
1542 
1543 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1544 	.algs_info	= stm32_hash_algs_info_stm32f4,
1545 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1546 	.has_sr		= true,
1547 	.has_mdmat	= true,
1548 };
1549 
1550 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1551 	{
1552 		.algs_list	= algs_md5,
1553 		.size		= ARRAY_SIZE(algs_md5),
1554 	},
1555 	{
1556 		.algs_list	= algs_sha1,
1557 		.size		= ARRAY_SIZE(algs_sha1),
1558 	},
1559 	{
1560 		.algs_list	= algs_sha224,
1561 		.size		= ARRAY_SIZE(algs_sha224),
1562 	},
1563 	{
1564 		.algs_list	= algs_sha256,
1565 		.size		= ARRAY_SIZE(algs_sha256),
1566 	},
1567 };
1568 
1569 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1570 	.algs_info	= stm32_hash_algs_info_stm32f7,
1571 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1572 	.has_sr		= true,
1573 	.has_mdmat	= true,
1574 };
1575 
1576 static const struct of_device_id stm32_hash_of_match[] = {
1577 	{
1578 		.compatible = "stericsson,ux500-hash",
1579 		.data = &stm32_hash_pdata_ux500,
1580 	},
1581 	{
1582 		.compatible = "st,stm32f456-hash",
1583 		.data = &stm32_hash_pdata_stm32f4,
1584 	},
1585 	{
1586 		.compatible = "st,stm32f756-hash",
1587 		.data = &stm32_hash_pdata_stm32f7,
1588 	},
1589 	{},
1590 };
1591 
1592 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1593 
1594 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1595 				   struct device *dev)
1596 {
1597 	hdev->pdata = of_device_get_match_data(dev);
1598 	if (!hdev->pdata) {
1599 		dev_err(dev, "no compatible OF match\n");
1600 		return -EINVAL;
1601 	}
1602 
1603 	if (of_property_read_u32(dev->of_node, "dma-maxburst",
1604 				 &hdev->dma_maxburst)) {
1605 		dev_info(dev, "dma-maxburst not specified, using 0\n");
1606 		hdev->dma_maxburst = 0;
1607 	}
1608 
1609 	return 0;
1610 }
1611 
1612 static int stm32_hash_probe(struct platform_device *pdev)
1613 {
1614 	struct stm32_hash_dev *hdev;
1615 	struct device *dev = &pdev->dev;
1616 	struct resource *res;
1617 	int ret, irq;
1618 
1619 	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1620 	if (!hdev)
1621 		return -ENOMEM;
1622 
1623 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1624 	hdev->io_base = devm_ioremap_resource(dev, res);
1625 	if (IS_ERR(hdev->io_base))
1626 		return PTR_ERR(hdev->io_base);
1627 
1628 	hdev->phys_base = res->start;
1629 
1630 	ret = stm32_hash_get_of_match(hdev, dev);
1631 	if (ret)
1632 		return ret;
1633 
1634 	irq = platform_get_irq_optional(pdev, 0);
1635 	if (irq < 0 && irq != -ENXIO)
1636 		return irq;
1637 
1638 	if (irq > 0) {
1639 		ret = devm_request_threaded_irq(dev, irq,
1640 						stm32_hash_irq_handler,
1641 						stm32_hash_irq_thread,
1642 						IRQF_ONESHOT,
1643 						dev_name(dev), hdev);
1644 		if (ret) {
1645 			dev_err(dev, "Cannot grab IRQ\n");
1646 			return ret;
1647 		}
1648 	} else {
1649 		dev_info(dev, "No IRQ, use polling mode\n");
1650 		hdev->polled = true;
1651 	}
1652 
1653 	hdev->clk = devm_clk_get(&pdev->dev, NULL);
1654 	if (IS_ERR(hdev->clk))
1655 		return dev_err_probe(dev, PTR_ERR(hdev->clk),
1656 				     "failed to get clock for hash\n");
1657 
1658 	ret = clk_prepare_enable(hdev->clk);
1659 	if (ret) {
1660 		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1661 		return ret;
1662 	}
1663 
1664 	pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1665 	pm_runtime_use_autosuspend(dev);
1666 
1667 	pm_runtime_get_noresume(dev);
1668 	pm_runtime_set_active(dev);
1669 	pm_runtime_enable(dev);
1670 
1671 	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1672 	if (IS_ERR(hdev->rst)) {
1673 		if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
1674 			ret = -EPROBE_DEFER;
1675 			goto err_reset;
1676 		}
1677 	} else {
1678 		reset_control_assert(hdev->rst);
1679 		udelay(2);
1680 		reset_control_deassert(hdev->rst);
1681 	}
1682 
1683 	hdev->dev = dev;
1684 
1685 	platform_set_drvdata(pdev, hdev);
1686 
1687 	ret = stm32_hash_dma_init(hdev);
1688 	switch (ret) {
1689 	case 0:
1690 		break;
1691 	case -ENOENT:
1692 	case -ENODEV:
1693 		dev_info(dev, "DMA mode not available\n");
1694 		break;
1695 	default:
1696 		dev_err(dev, "DMA init error %d\n", ret);
1697 		goto err_dma;
1698 	}
1699 
1700 	spin_lock(&stm32_hash.lock);
1701 	list_add_tail(&hdev->list, &stm32_hash.dev_list);
1702 	spin_unlock(&stm32_hash.lock);
1703 
1704 	/* Initialize crypto engine */
1705 	hdev->engine = crypto_engine_alloc_init(dev, 1);
1706 	if (!hdev->engine) {
1707 		ret = -ENOMEM;
1708 		goto err_engine;
1709 	}
1710 
1711 	ret = crypto_engine_start(hdev->engine);
1712 	if (ret)
1713 		goto err_engine_start;
1714 
1715 	if (hdev->pdata->ux500)
1716 		/* FIXME: implement DMA mode for Ux500 */
1717 		hdev->dma_mode = 0;
1718 	else
1719 		hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1720 
1721 	/* Register algos */
1722 	ret = stm32_hash_register_algs(hdev);
1723 	if (ret)
1724 		goto err_algs;
1725 
1726 	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1727 		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1728 
1729 	pm_runtime_put_sync(dev);
1730 
1731 	return 0;
1732 
1733 err_algs:
1734 err_engine_start:
1735 	crypto_engine_exit(hdev->engine);
1736 err_engine:
1737 	spin_lock(&stm32_hash.lock);
1738 	list_del(&hdev->list);
1739 	spin_unlock(&stm32_hash.lock);
1740 err_dma:
1741 	if (hdev->dma_lch)
1742 		dma_release_channel(hdev->dma_lch);
1743 err_reset:
1744 	pm_runtime_disable(dev);
1745 	pm_runtime_put_noidle(dev);
1746 
1747 	clk_disable_unprepare(hdev->clk);
1748 
1749 	return ret;
1750 }
1751 
1752 static int stm32_hash_remove(struct platform_device *pdev)
1753 {
1754 	struct stm32_hash_dev *hdev;
1755 	int ret;
1756 
1757 	hdev = platform_get_drvdata(pdev);
1758 	if (!hdev)
1759 		return -ENODEV;
1760 
1761 	ret = pm_runtime_resume_and_get(hdev->dev);
1762 	if (ret < 0)
1763 		return ret;
1764 
1765 	stm32_hash_unregister_algs(hdev);
1766 
1767 	crypto_engine_exit(hdev->engine);
1768 
1769 	spin_lock(&stm32_hash.lock);
1770 	list_del(&hdev->list);
1771 	spin_unlock(&stm32_hash.lock);
1772 
1773 	if (hdev->dma_lch)
1774 		dma_release_channel(hdev->dma_lch);
1775 
1776 	pm_runtime_disable(hdev->dev);
1777 	pm_runtime_put_noidle(hdev->dev);
1778 
1779 	clk_disable_unprepare(hdev->clk);
1780 
1781 	return 0;
1782 }
1783 
1784 #ifdef CONFIG_PM
1785 static int stm32_hash_runtime_suspend(struct device *dev)
1786 {
1787 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1788 
1789 	clk_disable_unprepare(hdev->clk);
1790 
1791 	return 0;
1792 }
1793 
1794 static int stm32_hash_runtime_resume(struct device *dev)
1795 {
1796 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1797 	int ret;
1798 
1799 	ret = clk_prepare_enable(hdev->clk);
1800 	if (ret) {
1801 		dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1802 		return ret;
1803 	}
1804 
1805 	return 0;
1806 }
1807 #endif
1808 
1809 static const struct dev_pm_ops stm32_hash_pm_ops = {
1810 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1811 				pm_runtime_force_resume)
1812 	SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1813 			   stm32_hash_runtime_resume, NULL)
1814 };
1815 
1816 static struct platform_driver stm32_hash_driver = {
1817 	.probe		= stm32_hash_probe,
1818 	.remove		= stm32_hash_remove,
1819 	.driver		= {
1820 		.name	= "stm32-hash",
1821 		.pm = &stm32_hash_pm_ops,
1822 		.of_match_table	= stm32_hash_of_match,
1823 	}
1824 };
1825 
1826 module_platform_driver(stm32_hash_driver);
1827 
1828 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1829 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1830 MODULE_LICENSE("GPL v2");
1831