1 /*
2  * This file is part of STM32 Crypto driver for Linux.
3  *
4  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
5  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
6  *
7  * License terms: GPL V2.0.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License version 2 as published by
11  * the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16  * details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program. If not, see <http://www.gnu.org/licenses/>.
20  *
21  */
22 
23 #include <linux/clk.h>
24 #include <linux/crypto.h>
25 #include <linux/delay.h>
26 #include <linux/dmaengine.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/iopoll.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/of_device.h>
33 #include <linux/platform_device.h>
34 #include <linux/reset.h>
35 
36 #include <crypto/engine.h>
37 #include <crypto/hash.h>
38 #include <crypto/md5.h>
39 #include <crypto/scatterwalk.h>
40 #include <crypto/sha.h>
41 #include <crypto/internal/hash.h>
42 
43 #define HASH_CR				0x00
44 #define HASH_DIN			0x04
45 #define HASH_STR			0x08
46 #define HASH_IMR			0x20
47 #define HASH_SR				0x24
48 #define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
49 #define HASH_HREG(x)			(0x310 + ((x) * 0x04))
50 #define HASH_HWCFGR			0x3F0
51 #define HASH_VER			0x3F4
52 #define HASH_ID				0x3F8
53 
54 /* Control Register */
55 #define HASH_CR_INIT			BIT(2)
56 #define HASH_CR_DMAE			BIT(3)
57 #define HASH_CR_DATATYPE_POS		4
58 #define HASH_CR_MODE			BIT(6)
59 #define HASH_CR_MDMAT			BIT(13)
60 #define HASH_CR_DMAA			BIT(14)
61 #define HASH_CR_LKEY			BIT(16)
62 
63 #define HASH_CR_ALGO_SHA1		0x0
64 #define HASH_CR_ALGO_MD5		0x80
65 #define HASH_CR_ALGO_SHA224		0x40000
66 #define HASH_CR_ALGO_SHA256		0x40080
67 
68 /* Interrupt */
69 #define HASH_DINIE			BIT(0)
70 #define HASH_DCIE			BIT(1)
71 
72 /* Interrupt Mask */
73 #define HASH_MASK_CALC_COMPLETION	BIT(0)
74 #define HASH_MASK_DATA_INPUT		BIT(1)
75 
76 /* Context swap register */
77 #define HASH_CSR_REGISTER_NUMBER	53
78 
79 /* Status Flags */
80 #define HASH_SR_DATA_INPUT_READY	BIT(0)
81 #define HASH_SR_OUTPUT_READY		BIT(1)
82 #define HASH_SR_DMA_ACTIVE		BIT(2)
83 #define HASH_SR_BUSY			BIT(3)
84 
85 /* STR Register */
86 #define HASH_STR_NBLW_MASK		GENMASK(4, 0)
87 #define HASH_STR_DCAL			BIT(8)
88 
89 #define HASH_FLAGS_INIT			BIT(0)
90 #define HASH_FLAGS_OUTPUT_READY		BIT(1)
91 #define HASH_FLAGS_CPU			BIT(2)
92 #define HASH_FLAGS_DMA_READY		BIT(3)
93 #define HASH_FLAGS_DMA_ACTIVE		BIT(4)
94 #define HASH_FLAGS_HMAC_INIT		BIT(5)
95 #define HASH_FLAGS_HMAC_FINAL		BIT(6)
96 #define HASH_FLAGS_HMAC_KEY		BIT(7)
97 
98 #define HASH_FLAGS_FINAL		BIT(15)
99 #define HASH_FLAGS_FINUP		BIT(16)
100 #define HASH_FLAGS_ALGO_MASK		GENMASK(21, 18)
101 #define HASH_FLAGS_MD5			BIT(18)
102 #define HASH_FLAGS_SHA1			BIT(19)
103 #define HASH_FLAGS_SHA224		BIT(20)
104 #define HASH_FLAGS_SHA256		BIT(21)
105 #define HASH_FLAGS_ERRORS		BIT(22)
106 #define HASH_FLAGS_HMAC			BIT(23)
107 
108 #define HASH_OP_UPDATE			1
109 #define HASH_OP_FINAL			2
110 
111 enum stm32_hash_data_format {
112 	HASH_DATA_32_BITS		= 0x0,
113 	HASH_DATA_16_BITS		= 0x1,
114 	HASH_DATA_8_BITS		= 0x2,
115 	HASH_DATA_1_BIT			= 0x3
116 };
117 
118 #define HASH_BUFLEN			256
119 #define HASH_LONG_KEY			64
120 #define HASH_MAX_KEY_SIZE		(SHA256_BLOCK_SIZE * 8)
121 #define HASH_QUEUE_LENGTH		16
122 #define HASH_DMA_THRESHOLD		50
123 
124 struct stm32_hash_ctx {
125 	struct crypto_engine_ctx enginectx;
126 	struct stm32_hash_dev	*hdev;
127 	unsigned long		flags;
128 
129 	u8			key[HASH_MAX_KEY_SIZE];
130 	int			keylen;
131 };
132 
133 struct stm32_hash_request_ctx {
134 	struct stm32_hash_dev	*hdev;
135 	unsigned long		flags;
136 	unsigned long		op;
137 
138 	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
139 	size_t			digcnt;
140 	size_t			bufcnt;
141 	size_t			buflen;
142 
143 	/* DMA */
144 	struct scatterlist	*sg;
145 	unsigned int		offset;
146 	unsigned int		total;
147 	struct scatterlist	sg_key;
148 
149 	dma_addr_t		dma_addr;
150 	size_t			dma_ct;
151 	int			nents;
152 
153 	u8			data_type;
154 
155 	u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
156 
157 	/* Export Context */
158 	u32			*hw_context;
159 };
160 
161 struct stm32_hash_algs_info {
162 	struct ahash_alg	*algs_list;
163 	size_t			size;
164 };
165 
166 struct stm32_hash_pdata {
167 	struct stm32_hash_algs_info	*algs_info;
168 	size_t				algs_info_size;
169 };
170 
171 struct stm32_hash_dev {
172 	struct list_head	list;
173 	struct device		*dev;
174 	struct clk		*clk;
175 	struct reset_control	*rst;
176 	void __iomem		*io_base;
177 	phys_addr_t		phys_base;
178 	u32			dma_mode;
179 	u32			dma_maxburst;
180 
181 	spinlock_t		lock; /* lock to protect queue */
182 
183 	struct ahash_request	*req;
184 	struct crypto_engine	*engine;
185 
186 	int			err;
187 	unsigned long		flags;
188 
189 	struct dma_chan		*dma_lch;
190 	struct completion	dma_completion;
191 
192 	const struct stm32_hash_pdata	*pdata;
193 };
194 
195 struct stm32_hash_drv {
196 	struct list_head	dev_list;
197 	spinlock_t		lock; /* List protection access */
198 };
199 
200 static struct stm32_hash_drv stm32_hash = {
201 	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
202 	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
203 };
204 
205 static void stm32_hash_dma_callback(void *param);
206 
207 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
208 {
209 	return readl_relaxed(hdev->io_base + offset);
210 }
211 
212 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
213 				    u32 offset, u32 value)
214 {
215 	writel_relaxed(value, hdev->io_base + offset);
216 }
217 
218 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
219 {
220 	u32 status;
221 
222 	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
223 				   !(status & HASH_SR_BUSY), 10, 10000);
224 }
225 
226 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
227 {
228 	u32 reg;
229 
230 	reg = stm32_hash_read(hdev, HASH_STR);
231 	reg &= ~(HASH_STR_NBLW_MASK);
232 	reg |= (8U * ((length) % 4U));
233 	stm32_hash_write(hdev, HASH_STR, reg);
234 }
235 
236 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
237 {
238 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
239 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
240 	u32 reg;
241 	int keylen = ctx->keylen;
242 	void *key = ctx->key;
243 
244 	if (keylen) {
245 		stm32_hash_set_nblw(hdev, keylen);
246 
247 		while (keylen > 0) {
248 			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
249 			keylen -= 4;
250 			key += 4;
251 		}
252 
253 		reg = stm32_hash_read(hdev, HASH_STR);
254 		reg |= HASH_STR_DCAL;
255 		stm32_hash_write(hdev, HASH_STR, reg);
256 
257 		return -EINPROGRESS;
258 	}
259 
260 	return 0;
261 }
262 
263 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
264 {
265 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
266 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
267 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
268 
269 	u32 reg = HASH_CR_INIT;
270 
271 	if (!(hdev->flags & HASH_FLAGS_INIT)) {
272 		switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
273 		case HASH_FLAGS_MD5:
274 			reg |= HASH_CR_ALGO_MD5;
275 			break;
276 		case HASH_FLAGS_SHA1:
277 			reg |= HASH_CR_ALGO_SHA1;
278 			break;
279 		case HASH_FLAGS_SHA224:
280 			reg |= HASH_CR_ALGO_SHA224;
281 			break;
282 		case HASH_FLAGS_SHA256:
283 			reg |= HASH_CR_ALGO_SHA256;
284 			break;
285 		default:
286 			reg |= HASH_CR_ALGO_MD5;
287 		}
288 
289 		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
290 
291 		if (rctx->flags & HASH_FLAGS_HMAC) {
292 			hdev->flags |= HASH_FLAGS_HMAC;
293 			reg |= HASH_CR_MODE;
294 			if (ctx->keylen > HASH_LONG_KEY)
295 				reg |= HASH_CR_LKEY;
296 		}
297 
298 		stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
299 
300 		stm32_hash_write(hdev, HASH_CR, reg);
301 
302 		hdev->flags |= HASH_FLAGS_INIT;
303 
304 		dev_dbg(hdev->dev, "Write Control %x\n", reg);
305 	}
306 }
307 
308 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
309 {
310 	size_t count;
311 
312 	while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
313 		count = min(rctx->sg->length - rctx->offset, rctx->total);
314 		count = min(count, rctx->buflen - rctx->bufcnt);
315 
316 		if (count <= 0) {
317 			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
318 				rctx->sg = sg_next(rctx->sg);
319 				continue;
320 			} else {
321 				break;
322 			}
323 		}
324 
325 		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
326 					 rctx->offset, count, 0);
327 
328 		rctx->bufcnt += count;
329 		rctx->offset += count;
330 		rctx->total -= count;
331 
332 		if (rctx->offset == rctx->sg->length) {
333 			rctx->sg = sg_next(rctx->sg);
334 			if (rctx->sg)
335 				rctx->offset = 0;
336 			else
337 				rctx->total = 0;
338 		}
339 	}
340 }
341 
342 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
343 			       const u8 *buf, size_t length, int final)
344 {
345 	unsigned int count, len32;
346 	const u32 *buffer = (const u32 *)buf;
347 	u32 reg;
348 
349 	if (final)
350 		hdev->flags |= HASH_FLAGS_FINAL;
351 
352 	len32 = DIV_ROUND_UP(length, sizeof(u32));
353 
354 	dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n",
355 		__func__, length, final, len32);
356 
357 	hdev->flags |= HASH_FLAGS_CPU;
358 
359 	stm32_hash_write_ctrl(hdev);
360 
361 	if (stm32_hash_wait_busy(hdev))
362 		return -ETIMEDOUT;
363 
364 	if ((hdev->flags & HASH_FLAGS_HMAC) &&
365 	    (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
366 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
367 		stm32_hash_write_key(hdev);
368 		if (stm32_hash_wait_busy(hdev))
369 			return -ETIMEDOUT;
370 	}
371 
372 	for (count = 0; count < len32; count++)
373 		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
374 
375 	if (final) {
376 		stm32_hash_set_nblw(hdev, length);
377 		reg = stm32_hash_read(hdev, HASH_STR);
378 		reg |= HASH_STR_DCAL;
379 		stm32_hash_write(hdev, HASH_STR, reg);
380 		if (hdev->flags & HASH_FLAGS_HMAC) {
381 			if (stm32_hash_wait_busy(hdev))
382 				return -ETIMEDOUT;
383 			stm32_hash_write_key(hdev);
384 		}
385 		return -EINPROGRESS;
386 	}
387 
388 	return 0;
389 }
390 
391 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
392 {
393 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
394 	int bufcnt, err = 0, final;
395 
396 	dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
397 
398 	final = (rctx->flags & HASH_FLAGS_FINUP);
399 
400 	while ((rctx->total >= rctx->buflen) ||
401 	       (rctx->bufcnt + rctx->total >= rctx->buflen)) {
402 		stm32_hash_append_sg(rctx);
403 		bufcnt = rctx->bufcnt;
404 		rctx->bufcnt = 0;
405 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
406 	}
407 
408 	stm32_hash_append_sg(rctx);
409 
410 	if (final) {
411 		bufcnt = rctx->bufcnt;
412 		rctx->bufcnt = 0;
413 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
414 					  (rctx->flags & HASH_FLAGS_FINUP));
415 	}
416 
417 	return err;
418 }
419 
420 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
421 			       struct scatterlist *sg, int length, int mdma)
422 {
423 	struct dma_async_tx_descriptor *in_desc;
424 	dma_cookie_t cookie;
425 	u32 reg;
426 	int err;
427 
428 	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
429 					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
430 					  DMA_CTRL_ACK);
431 	if (!in_desc) {
432 		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
433 		return -ENOMEM;
434 	}
435 
436 	reinit_completion(&hdev->dma_completion);
437 	in_desc->callback = stm32_hash_dma_callback;
438 	in_desc->callback_param = hdev;
439 
440 	hdev->flags |= HASH_FLAGS_FINAL;
441 	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
442 
443 	reg = stm32_hash_read(hdev, HASH_CR);
444 
445 	if (mdma)
446 		reg |= HASH_CR_MDMAT;
447 	else
448 		reg &= ~HASH_CR_MDMAT;
449 
450 	reg |= HASH_CR_DMAE;
451 
452 	stm32_hash_write(hdev, HASH_CR, reg);
453 
454 	stm32_hash_set_nblw(hdev, length);
455 
456 	cookie = dmaengine_submit(in_desc);
457 	err = dma_submit_error(cookie);
458 	if (err)
459 		return -ENOMEM;
460 
461 	dma_async_issue_pending(hdev->dma_lch);
462 
463 	if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
464 						       msecs_to_jiffies(100)))
465 		err = -ETIMEDOUT;
466 
467 	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
468 				     NULL, NULL) != DMA_COMPLETE)
469 		err = -ETIMEDOUT;
470 
471 	if (err) {
472 		dev_err(hdev->dev, "DMA Error %i\n", err);
473 		dmaengine_terminate_all(hdev->dma_lch);
474 		return err;
475 	}
476 
477 	return -EINPROGRESS;
478 }
479 
480 static void stm32_hash_dma_callback(void *param)
481 {
482 	struct stm32_hash_dev *hdev = param;
483 
484 	complete(&hdev->dma_completion);
485 
486 	hdev->flags |= HASH_FLAGS_DMA_READY;
487 }
488 
489 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
490 {
491 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
492 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
493 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
494 	int err;
495 
496 	if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
497 		err = stm32_hash_write_key(hdev);
498 		if (stm32_hash_wait_busy(hdev))
499 			return -ETIMEDOUT;
500 	} else {
501 		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
502 			sg_init_one(&rctx->sg_key, ctx->key,
503 				    ALIGN(ctx->keylen, sizeof(u32)));
504 
505 		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
506 					  DMA_TO_DEVICE);
507 		if (rctx->dma_ct == 0) {
508 			dev_err(hdev->dev, "dma_map_sg error\n");
509 			return -ENOMEM;
510 		}
511 
512 		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
513 
514 		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
515 	}
516 
517 	return err;
518 }
519 
520 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
521 {
522 	struct dma_slave_config dma_conf;
523 	int err;
524 
525 	memset(&dma_conf, 0, sizeof(dma_conf));
526 
527 	dma_conf.direction = DMA_MEM_TO_DEV;
528 	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
529 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
530 	dma_conf.src_maxburst = hdev->dma_maxburst;
531 	dma_conf.dst_maxburst = hdev->dma_maxburst;
532 	dma_conf.device_fc = false;
533 
534 	hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
535 	if (!hdev->dma_lch) {
536 		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
537 		return -EBUSY;
538 	}
539 
540 	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
541 	if (err) {
542 		dma_release_channel(hdev->dma_lch);
543 		hdev->dma_lch = NULL;
544 		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
545 		return err;
546 	}
547 
548 	init_completion(&hdev->dma_completion);
549 
550 	return 0;
551 }
552 
553 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
554 {
555 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
556 	struct scatterlist sg[1], *tsg;
557 	int err = 0, len = 0, reg, ncp = 0;
558 	unsigned int i;
559 	u32 *buffer = (void *)rctx->buffer;
560 
561 	rctx->sg = hdev->req->src;
562 	rctx->total = hdev->req->nbytes;
563 
564 	rctx->nents = sg_nents(rctx->sg);
565 
566 	if (rctx->nents < 0)
567 		return -EINVAL;
568 
569 	stm32_hash_write_ctrl(hdev);
570 
571 	if (hdev->flags & HASH_FLAGS_HMAC) {
572 		err = stm32_hash_hmac_dma_send(hdev);
573 		if (err != -EINPROGRESS)
574 			return err;
575 	}
576 
577 	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
578 		len = sg->length;
579 
580 		sg[0] = *tsg;
581 		if (sg_is_last(sg)) {
582 			if (hdev->dma_mode == 1) {
583 				len = (ALIGN(sg->length, 16) - 16);
584 
585 				ncp = sg_pcopy_to_buffer(
586 					rctx->sg, rctx->nents,
587 					rctx->buffer, sg->length - len,
588 					rctx->total - sg->length + len);
589 
590 				sg->length = len;
591 			} else {
592 				if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
593 					len = sg->length;
594 					sg->length = ALIGN(sg->length,
595 							   sizeof(u32));
596 				}
597 			}
598 		}
599 
600 		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
601 					  DMA_TO_DEVICE);
602 		if (rctx->dma_ct == 0) {
603 			dev_err(hdev->dev, "dma_map_sg error\n");
604 			return -ENOMEM;
605 		}
606 
607 		err = stm32_hash_xmit_dma(hdev, sg, len,
608 					  !sg_is_last(sg));
609 
610 		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
611 
612 		if (err == -ENOMEM)
613 			return err;
614 	}
615 
616 	if (hdev->dma_mode == 1) {
617 		if (stm32_hash_wait_busy(hdev))
618 			return -ETIMEDOUT;
619 		reg = stm32_hash_read(hdev, HASH_CR);
620 		reg &= ~HASH_CR_DMAE;
621 		reg |= HASH_CR_DMAA;
622 		stm32_hash_write(hdev, HASH_CR, reg);
623 
624 		if (ncp) {
625 			memset(buffer + ncp, 0,
626 			       DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
627 			writesl(hdev->io_base + HASH_DIN, buffer,
628 				DIV_ROUND_UP(ncp, sizeof(u32)));
629 		}
630 		stm32_hash_set_nblw(hdev, ncp);
631 		reg = stm32_hash_read(hdev, HASH_STR);
632 		reg |= HASH_STR_DCAL;
633 		stm32_hash_write(hdev, HASH_STR, reg);
634 		err = -EINPROGRESS;
635 	}
636 
637 	if (hdev->flags & HASH_FLAGS_HMAC) {
638 		if (stm32_hash_wait_busy(hdev))
639 			return -ETIMEDOUT;
640 		err = stm32_hash_hmac_dma_send(hdev);
641 	}
642 
643 	return err;
644 }
645 
646 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
647 {
648 	struct stm32_hash_dev *hdev = NULL, *tmp;
649 
650 	spin_lock_bh(&stm32_hash.lock);
651 	if (!ctx->hdev) {
652 		list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
653 			hdev = tmp;
654 			break;
655 		}
656 		ctx->hdev = hdev;
657 	} else {
658 		hdev = ctx->hdev;
659 	}
660 
661 	spin_unlock_bh(&stm32_hash.lock);
662 
663 	return hdev;
664 }
665 
666 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
667 {
668 	struct scatterlist *sg;
669 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
670 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
671 	int i;
672 
673 	if (req->nbytes <= HASH_DMA_THRESHOLD)
674 		return false;
675 
676 	if (sg_nents(req->src) > 1) {
677 		if (hdev->dma_mode == 1)
678 			return false;
679 		for_each_sg(req->src, sg, sg_nents(req->src), i) {
680 			if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
681 			    (!sg_is_last(sg)))
682 				return false;
683 		}
684 	}
685 
686 	if (req->src->offset % 4)
687 		return false;
688 
689 	return true;
690 }
691 
692 static int stm32_hash_init(struct ahash_request *req)
693 {
694 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
695 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
696 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
697 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
698 
699 	rctx->hdev = hdev;
700 
701 	rctx->flags = HASH_FLAGS_CPU;
702 
703 	rctx->digcnt = crypto_ahash_digestsize(tfm);
704 	switch (rctx->digcnt) {
705 	case MD5_DIGEST_SIZE:
706 		rctx->flags |= HASH_FLAGS_MD5;
707 		break;
708 	case SHA1_DIGEST_SIZE:
709 		rctx->flags |= HASH_FLAGS_SHA1;
710 		break;
711 	case SHA224_DIGEST_SIZE:
712 		rctx->flags |= HASH_FLAGS_SHA224;
713 		break;
714 	case SHA256_DIGEST_SIZE:
715 		rctx->flags |= HASH_FLAGS_SHA256;
716 		break;
717 	default:
718 		return -EINVAL;
719 	}
720 
721 	rctx->bufcnt = 0;
722 	rctx->buflen = HASH_BUFLEN;
723 	rctx->total = 0;
724 	rctx->offset = 0;
725 	rctx->data_type = HASH_DATA_8_BITS;
726 
727 	memset(rctx->buffer, 0, HASH_BUFLEN);
728 
729 	if (ctx->flags & HASH_FLAGS_HMAC)
730 		rctx->flags |= HASH_FLAGS_HMAC;
731 
732 	dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
733 
734 	return 0;
735 }
736 
737 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
738 {
739 	return stm32_hash_update_cpu(hdev);
740 }
741 
742 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
743 {
744 	struct ahash_request *req = hdev->req;
745 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
746 	int err;
747 	int buflen = rctx->bufcnt;
748 
749 	rctx->bufcnt = 0;
750 
751 	if (!(rctx->flags & HASH_FLAGS_CPU))
752 		err = stm32_hash_dma_send(hdev);
753 	else
754 		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
755 
756 
757 	return err;
758 }
759 
760 static void stm32_hash_copy_hash(struct ahash_request *req)
761 {
762 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
763 	u32 *hash = (u32 *)rctx->digest;
764 	unsigned int i, hashsize;
765 
766 	switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
767 	case HASH_FLAGS_MD5:
768 		hashsize = MD5_DIGEST_SIZE;
769 		break;
770 	case HASH_FLAGS_SHA1:
771 		hashsize = SHA1_DIGEST_SIZE;
772 		break;
773 	case HASH_FLAGS_SHA224:
774 		hashsize = SHA224_DIGEST_SIZE;
775 		break;
776 	case HASH_FLAGS_SHA256:
777 		hashsize = SHA256_DIGEST_SIZE;
778 		break;
779 	default:
780 		return;
781 	}
782 
783 	for (i = 0; i < hashsize / sizeof(u32); i++)
784 		hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
785 						      HASH_HREG(i)));
786 }
787 
788 static int stm32_hash_finish(struct ahash_request *req)
789 {
790 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
791 
792 	if (!req->result)
793 		return -EINVAL;
794 
795 	memcpy(req->result, rctx->digest, rctx->digcnt);
796 
797 	return 0;
798 }
799 
800 static void stm32_hash_finish_req(struct ahash_request *req, int err)
801 {
802 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
803 	struct stm32_hash_dev *hdev = rctx->hdev;
804 
805 	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
806 		stm32_hash_copy_hash(req);
807 		err = stm32_hash_finish(req);
808 		hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
809 				 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
810 				 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
811 				 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
812 				 HASH_FLAGS_HMAC_KEY);
813 	} else {
814 		rctx->flags |= HASH_FLAGS_ERRORS;
815 	}
816 
817 	crypto_finalize_hash_request(hdev->engine, req, err);
818 }
819 
820 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
821 			      struct stm32_hash_request_ctx *rctx)
822 {
823 	if (!(HASH_FLAGS_INIT & hdev->flags)) {
824 		stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
825 		stm32_hash_write(hdev, HASH_STR, 0);
826 		stm32_hash_write(hdev, HASH_DIN, 0);
827 		stm32_hash_write(hdev, HASH_IMR, 0);
828 		hdev->err = 0;
829 	}
830 
831 	return 0;
832 }
833 
834 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
835 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
836 
837 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
838 				   struct ahash_request *req)
839 {
840 	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
841 }
842 
843 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
844 {
845 	struct ahash_request *req = container_of(areq, struct ahash_request,
846 						 base);
847 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
848 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
849 	struct stm32_hash_request_ctx *rctx;
850 
851 	if (!hdev)
852 		return -ENODEV;
853 
854 	hdev->req = req;
855 
856 	rctx = ahash_request_ctx(req);
857 
858 	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
859 		rctx->op, req->nbytes);
860 
861 	return stm32_hash_hw_init(hdev, rctx);
862 }
863 
864 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
865 {
866 	struct ahash_request *req = container_of(areq, struct ahash_request,
867 						 base);
868 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
869 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
870 	struct stm32_hash_request_ctx *rctx;
871 	int err = 0;
872 
873 	if (!hdev)
874 		return -ENODEV;
875 
876 	hdev->req = req;
877 
878 	rctx = ahash_request_ctx(req);
879 
880 	if (rctx->op == HASH_OP_UPDATE)
881 		err = stm32_hash_update_req(hdev);
882 	else if (rctx->op == HASH_OP_FINAL)
883 		err = stm32_hash_final_req(hdev);
884 
885 	if (err != -EINPROGRESS)
886 	/* done task will not finish it, so do it here */
887 		stm32_hash_finish_req(req, err);
888 
889 	return 0;
890 }
891 
892 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
893 {
894 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
895 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
896 	struct stm32_hash_dev *hdev = ctx->hdev;
897 
898 	rctx->op = op;
899 
900 	return stm32_hash_handle_queue(hdev, req);
901 }
902 
903 static int stm32_hash_update(struct ahash_request *req)
904 {
905 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
906 
907 	if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
908 		return 0;
909 
910 	rctx->total = req->nbytes;
911 	rctx->sg = req->src;
912 	rctx->offset = 0;
913 
914 	if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
915 		stm32_hash_append_sg(rctx);
916 		return 0;
917 	}
918 
919 	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
920 }
921 
922 static int stm32_hash_final(struct ahash_request *req)
923 {
924 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
925 
926 	rctx->flags |= HASH_FLAGS_FINUP;
927 
928 	return stm32_hash_enqueue(req, HASH_OP_FINAL);
929 }
930 
931 static int stm32_hash_finup(struct ahash_request *req)
932 {
933 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
934 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
935 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
936 	int err1, err2;
937 
938 	rctx->flags |= HASH_FLAGS_FINUP;
939 
940 	if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
941 		rctx->flags &= ~HASH_FLAGS_CPU;
942 
943 	err1 = stm32_hash_update(req);
944 
945 	if (err1 == -EINPROGRESS || err1 == -EBUSY)
946 		return err1;
947 
948 	/*
949 	 * final() has to be always called to cleanup resources
950 	 * even if update() failed, except EINPROGRESS
951 	 */
952 	err2 = stm32_hash_final(req);
953 
954 	return err1 ?: err2;
955 }
956 
957 static int stm32_hash_digest(struct ahash_request *req)
958 {
959 	return stm32_hash_init(req) ?: stm32_hash_finup(req);
960 }
961 
962 static int stm32_hash_export(struct ahash_request *req, void *out)
963 {
964 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
965 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
966 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
967 	u32 *preg;
968 	unsigned int i;
969 
970 	while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
971 		cpu_relax();
972 
973 	rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
974 					 sizeof(u32),
975 					 GFP_KERNEL);
976 
977 	preg = rctx->hw_context;
978 
979 	*preg++ = stm32_hash_read(hdev, HASH_IMR);
980 	*preg++ = stm32_hash_read(hdev, HASH_STR);
981 	*preg++ = stm32_hash_read(hdev, HASH_CR);
982 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
983 		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
984 
985 	memcpy(out, rctx, sizeof(*rctx));
986 
987 	return 0;
988 }
989 
990 static int stm32_hash_import(struct ahash_request *req, const void *in)
991 {
992 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
993 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
994 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
995 	const u32 *preg = in;
996 	u32 reg;
997 	unsigned int i;
998 
999 	memcpy(rctx, in, sizeof(*rctx));
1000 
1001 	preg = rctx->hw_context;
1002 
1003 	stm32_hash_write(hdev, HASH_IMR, *preg++);
1004 	stm32_hash_write(hdev, HASH_STR, *preg++);
1005 	stm32_hash_write(hdev, HASH_CR, *preg);
1006 	reg = *preg++ | HASH_CR_INIT;
1007 	stm32_hash_write(hdev, HASH_CR, reg);
1008 
1009 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1010 		stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1011 
1012 	kfree(rctx->hw_context);
1013 
1014 	return 0;
1015 }
1016 
1017 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1018 			     const u8 *key, unsigned int keylen)
1019 {
1020 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1021 
1022 	if (keylen <= HASH_MAX_KEY_SIZE) {
1023 		memcpy(ctx->key, key, keylen);
1024 		ctx->keylen = keylen;
1025 	} else {
1026 		return -ENOMEM;
1027 	}
1028 
1029 	return 0;
1030 }
1031 
1032 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1033 				    const char *algs_hmac_name)
1034 {
1035 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1036 
1037 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1038 				 sizeof(struct stm32_hash_request_ctx));
1039 
1040 	ctx->keylen = 0;
1041 
1042 	if (algs_hmac_name)
1043 		ctx->flags |= HASH_FLAGS_HMAC;
1044 
1045 	ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1046 	ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1047 	ctx->enginectx.op.unprepare_request = NULL;
1048 	return 0;
1049 }
1050 
1051 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1052 {
1053 	return stm32_hash_cra_init_algs(tfm, NULL);
1054 }
1055 
1056 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1057 {
1058 	return stm32_hash_cra_init_algs(tfm, "md5");
1059 }
1060 
1061 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1062 {
1063 	return stm32_hash_cra_init_algs(tfm, "sha1");
1064 }
1065 
1066 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1067 {
1068 	return stm32_hash_cra_init_algs(tfm, "sha224");
1069 }
1070 
1071 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1072 {
1073 	return stm32_hash_cra_init_algs(tfm, "sha256");
1074 }
1075 
1076 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1077 {
1078 	struct stm32_hash_dev *hdev = dev_id;
1079 
1080 	if (HASH_FLAGS_CPU & hdev->flags) {
1081 		if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1082 			hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1083 			goto finish;
1084 		}
1085 	} else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1086 		if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1087 			hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1088 				goto finish;
1089 		}
1090 	}
1091 
1092 	return IRQ_HANDLED;
1093 
1094 finish:
1095 	/* Finish current request */
1096 	stm32_hash_finish_req(hdev->req, 0);
1097 
1098 	return IRQ_HANDLED;
1099 }
1100 
1101 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1102 {
1103 	struct stm32_hash_dev *hdev = dev_id;
1104 	u32 reg;
1105 
1106 	reg = stm32_hash_read(hdev, HASH_SR);
1107 	if (reg & HASH_SR_OUTPUT_READY) {
1108 		reg &= ~HASH_SR_OUTPUT_READY;
1109 		stm32_hash_write(hdev, HASH_SR, reg);
1110 		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1111 		/* Disable IT*/
1112 		stm32_hash_write(hdev, HASH_IMR, 0);
1113 		return IRQ_WAKE_THREAD;
1114 	}
1115 
1116 	return IRQ_NONE;
1117 }
1118 
1119 static struct ahash_alg algs_md5_sha1[] = {
1120 	{
1121 		.init = stm32_hash_init,
1122 		.update = stm32_hash_update,
1123 		.final = stm32_hash_final,
1124 		.finup = stm32_hash_finup,
1125 		.digest = stm32_hash_digest,
1126 		.export = stm32_hash_export,
1127 		.import = stm32_hash_import,
1128 		.halg = {
1129 			.digestsize = MD5_DIGEST_SIZE,
1130 			.statesize = sizeof(struct stm32_hash_request_ctx),
1131 			.base = {
1132 				.cra_name = "md5",
1133 				.cra_driver_name = "stm32-md5",
1134 				.cra_priority = 200,
1135 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1136 					CRYPTO_ALG_ASYNC |
1137 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1138 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1139 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1140 				.cra_alignmask = 3,
1141 				.cra_init = stm32_hash_cra_init,
1142 				.cra_module = THIS_MODULE,
1143 			}
1144 		}
1145 	},
1146 	{
1147 		.init = stm32_hash_init,
1148 		.update = stm32_hash_update,
1149 		.final = stm32_hash_final,
1150 		.finup = stm32_hash_finup,
1151 		.digest = stm32_hash_digest,
1152 		.export = stm32_hash_export,
1153 		.import = stm32_hash_import,
1154 		.setkey = stm32_hash_setkey,
1155 		.halg = {
1156 			.digestsize = MD5_DIGEST_SIZE,
1157 			.statesize = sizeof(struct stm32_hash_request_ctx),
1158 			.base = {
1159 				.cra_name = "hmac(md5)",
1160 				.cra_driver_name = "stm32-hmac-md5",
1161 				.cra_priority = 200,
1162 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1163 					CRYPTO_ALG_ASYNC |
1164 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1165 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1166 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1167 				.cra_alignmask = 3,
1168 				.cra_init = stm32_hash_cra_md5_init,
1169 				.cra_module = THIS_MODULE,
1170 			}
1171 		}
1172 	},
1173 	{
1174 		.init = stm32_hash_init,
1175 		.update = stm32_hash_update,
1176 		.final = stm32_hash_final,
1177 		.finup = stm32_hash_finup,
1178 		.digest = stm32_hash_digest,
1179 		.export = stm32_hash_export,
1180 		.import = stm32_hash_import,
1181 		.halg = {
1182 			.digestsize = SHA1_DIGEST_SIZE,
1183 			.statesize = sizeof(struct stm32_hash_request_ctx),
1184 			.base = {
1185 				.cra_name = "sha1",
1186 				.cra_driver_name = "stm32-sha1",
1187 				.cra_priority = 200,
1188 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1189 					CRYPTO_ALG_ASYNC |
1190 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1191 				.cra_blocksize = SHA1_BLOCK_SIZE,
1192 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1193 				.cra_alignmask = 3,
1194 				.cra_init = stm32_hash_cra_init,
1195 				.cra_module = THIS_MODULE,
1196 			}
1197 		}
1198 	},
1199 	{
1200 		.init = stm32_hash_init,
1201 		.update = stm32_hash_update,
1202 		.final = stm32_hash_final,
1203 		.finup = stm32_hash_finup,
1204 		.digest = stm32_hash_digest,
1205 		.export = stm32_hash_export,
1206 		.import = stm32_hash_import,
1207 		.setkey = stm32_hash_setkey,
1208 		.halg = {
1209 			.digestsize = SHA1_DIGEST_SIZE,
1210 			.statesize = sizeof(struct stm32_hash_request_ctx),
1211 			.base = {
1212 				.cra_name = "hmac(sha1)",
1213 				.cra_driver_name = "stm32-hmac-sha1",
1214 				.cra_priority = 200,
1215 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1216 					CRYPTO_ALG_ASYNC |
1217 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1218 				.cra_blocksize = SHA1_BLOCK_SIZE,
1219 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1220 				.cra_alignmask = 3,
1221 				.cra_init = stm32_hash_cra_sha1_init,
1222 				.cra_module = THIS_MODULE,
1223 			}
1224 		}
1225 	},
1226 };
1227 
1228 static struct ahash_alg algs_sha224_sha256[] = {
1229 	{
1230 		.init = stm32_hash_init,
1231 		.update = stm32_hash_update,
1232 		.final = stm32_hash_final,
1233 		.finup = stm32_hash_finup,
1234 		.digest = stm32_hash_digest,
1235 		.export = stm32_hash_export,
1236 		.import = stm32_hash_import,
1237 		.halg = {
1238 			.digestsize = SHA224_DIGEST_SIZE,
1239 			.statesize = sizeof(struct stm32_hash_request_ctx),
1240 			.base = {
1241 				.cra_name = "sha224",
1242 				.cra_driver_name = "stm32-sha224",
1243 				.cra_priority = 200,
1244 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1245 					CRYPTO_ALG_ASYNC |
1246 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1247 				.cra_blocksize = SHA224_BLOCK_SIZE,
1248 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1249 				.cra_alignmask = 3,
1250 				.cra_init = stm32_hash_cra_init,
1251 				.cra_module = THIS_MODULE,
1252 			}
1253 		}
1254 	},
1255 	{
1256 		.init = stm32_hash_init,
1257 		.update = stm32_hash_update,
1258 		.final = stm32_hash_final,
1259 		.finup = stm32_hash_finup,
1260 		.digest = stm32_hash_digest,
1261 		.setkey = stm32_hash_setkey,
1262 		.export = stm32_hash_export,
1263 		.import = stm32_hash_import,
1264 		.halg = {
1265 			.digestsize = SHA224_DIGEST_SIZE,
1266 			.statesize = sizeof(struct stm32_hash_request_ctx),
1267 			.base = {
1268 				.cra_name = "hmac(sha224)",
1269 				.cra_driver_name = "stm32-hmac-sha224",
1270 				.cra_priority = 200,
1271 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1272 					CRYPTO_ALG_ASYNC |
1273 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1274 				.cra_blocksize = SHA224_BLOCK_SIZE,
1275 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1276 				.cra_alignmask = 3,
1277 				.cra_init = stm32_hash_cra_sha224_init,
1278 				.cra_module = THIS_MODULE,
1279 			}
1280 		}
1281 	},
1282 	{
1283 		.init = stm32_hash_init,
1284 		.update = stm32_hash_update,
1285 		.final = stm32_hash_final,
1286 		.finup = stm32_hash_finup,
1287 		.digest = stm32_hash_digest,
1288 		.export = stm32_hash_export,
1289 		.import = stm32_hash_import,
1290 		.halg = {
1291 			.digestsize = SHA256_DIGEST_SIZE,
1292 			.statesize = sizeof(struct stm32_hash_request_ctx),
1293 			.base = {
1294 				.cra_name = "sha256",
1295 				.cra_driver_name = "stm32-sha256",
1296 				.cra_priority = 200,
1297 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1298 					CRYPTO_ALG_ASYNC |
1299 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1300 				.cra_blocksize = SHA256_BLOCK_SIZE,
1301 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1302 				.cra_alignmask = 3,
1303 				.cra_init = stm32_hash_cra_init,
1304 				.cra_module = THIS_MODULE,
1305 			}
1306 		}
1307 	},
1308 	{
1309 		.init = stm32_hash_init,
1310 		.update = stm32_hash_update,
1311 		.final = stm32_hash_final,
1312 		.finup = stm32_hash_finup,
1313 		.digest = stm32_hash_digest,
1314 		.export = stm32_hash_export,
1315 		.import = stm32_hash_import,
1316 		.setkey = stm32_hash_setkey,
1317 		.halg = {
1318 			.digestsize = SHA256_DIGEST_SIZE,
1319 			.statesize = sizeof(struct stm32_hash_request_ctx),
1320 			.base = {
1321 				.cra_name = "hmac(sha256)",
1322 				.cra_driver_name = "stm32-hmac-sha256",
1323 				.cra_priority = 200,
1324 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1325 					CRYPTO_ALG_ASYNC |
1326 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1327 				.cra_blocksize = SHA256_BLOCK_SIZE,
1328 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1329 				.cra_alignmask = 3,
1330 				.cra_init = stm32_hash_cra_sha256_init,
1331 				.cra_module = THIS_MODULE,
1332 			}
1333 		}
1334 	},
1335 };
1336 
1337 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1338 {
1339 	unsigned int i, j;
1340 	int err;
1341 
1342 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1343 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1344 			err = crypto_register_ahash(
1345 				&hdev->pdata->algs_info[i].algs_list[j]);
1346 			if (err)
1347 				goto err_algs;
1348 		}
1349 	}
1350 
1351 	return 0;
1352 err_algs:
1353 	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1354 	for (; i--; ) {
1355 		for (; j--;)
1356 			crypto_unregister_ahash(
1357 				&hdev->pdata->algs_info[i].algs_list[j]);
1358 	}
1359 
1360 	return err;
1361 }
1362 
1363 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1364 {
1365 	unsigned int i, j;
1366 
1367 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1368 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1369 			crypto_unregister_ahash(
1370 				&hdev->pdata->algs_info[i].algs_list[j]);
1371 	}
1372 
1373 	return 0;
1374 }
1375 
1376 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1377 	{
1378 		.algs_list	= algs_md5_sha1,
1379 		.size		= ARRAY_SIZE(algs_md5_sha1),
1380 	},
1381 };
1382 
1383 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1384 	.algs_info	= stm32_hash_algs_info_stm32f4,
1385 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1386 };
1387 
1388 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1389 	{
1390 		.algs_list	= algs_md5_sha1,
1391 		.size		= ARRAY_SIZE(algs_md5_sha1),
1392 	},
1393 	{
1394 		.algs_list	= algs_sha224_sha256,
1395 		.size		= ARRAY_SIZE(algs_sha224_sha256),
1396 	},
1397 };
1398 
1399 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1400 	.algs_info	= stm32_hash_algs_info_stm32f7,
1401 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1402 };
1403 
1404 static const struct of_device_id stm32_hash_of_match[] = {
1405 	{
1406 		.compatible = "st,stm32f456-hash",
1407 		.data = &stm32_hash_pdata_stm32f4,
1408 	},
1409 	{
1410 		.compatible = "st,stm32f756-hash",
1411 		.data = &stm32_hash_pdata_stm32f7,
1412 	},
1413 	{},
1414 };
1415 
1416 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1417 
1418 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1419 				   struct device *dev)
1420 {
1421 	hdev->pdata = of_device_get_match_data(dev);
1422 	if (!hdev->pdata) {
1423 		dev_err(dev, "no compatible OF match\n");
1424 		return -EINVAL;
1425 	}
1426 
1427 	if (of_property_read_u32(dev->of_node, "dma-maxburst",
1428 				 &hdev->dma_maxburst)) {
1429 		dev_info(dev, "dma-maxburst not specified, using 0\n");
1430 		hdev->dma_maxburst = 0;
1431 	}
1432 
1433 	return 0;
1434 }
1435 
1436 static int stm32_hash_probe(struct platform_device *pdev)
1437 {
1438 	struct stm32_hash_dev *hdev;
1439 	struct device *dev = &pdev->dev;
1440 	struct resource *res;
1441 	int ret, irq;
1442 
1443 	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1444 	if (!hdev)
1445 		return -ENOMEM;
1446 
1447 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1448 	hdev->io_base = devm_ioremap_resource(dev, res);
1449 	if (IS_ERR(hdev->io_base))
1450 		return PTR_ERR(hdev->io_base);
1451 
1452 	hdev->phys_base = res->start;
1453 
1454 	ret = stm32_hash_get_of_match(hdev, dev);
1455 	if (ret)
1456 		return ret;
1457 
1458 	irq = platform_get_irq(pdev, 0);
1459 	if (irq < 0) {
1460 		dev_err(dev, "Cannot get IRQ resource\n");
1461 		return irq;
1462 	}
1463 
1464 	ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
1465 					stm32_hash_irq_thread, IRQF_ONESHOT,
1466 					dev_name(dev), hdev);
1467 	if (ret) {
1468 		dev_err(dev, "Cannot grab IRQ\n");
1469 		return ret;
1470 	}
1471 
1472 	hdev->clk = devm_clk_get(&pdev->dev, NULL);
1473 	if (IS_ERR(hdev->clk)) {
1474 		dev_err(dev, "failed to get clock for hash (%lu)\n",
1475 			PTR_ERR(hdev->clk));
1476 		return PTR_ERR(hdev->clk);
1477 	}
1478 
1479 	ret = clk_prepare_enable(hdev->clk);
1480 	if (ret) {
1481 		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1482 		return ret;
1483 	}
1484 
1485 	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1486 	if (!IS_ERR(hdev->rst)) {
1487 		reset_control_assert(hdev->rst);
1488 		udelay(2);
1489 		reset_control_deassert(hdev->rst);
1490 	}
1491 
1492 	hdev->dev = dev;
1493 
1494 	platform_set_drvdata(pdev, hdev);
1495 
1496 	ret = stm32_hash_dma_init(hdev);
1497 	if (ret)
1498 		dev_dbg(dev, "DMA mode not available\n");
1499 
1500 	spin_lock(&stm32_hash.lock);
1501 	list_add_tail(&hdev->list, &stm32_hash.dev_list);
1502 	spin_unlock(&stm32_hash.lock);
1503 
1504 	/* Initialize crypto engine */
1505 	hdev->engine = crypto_engine_alloc_init(dev, 1);
1506 	if (!hdev->engine) {
1507 		ret = -ENOMEM;
1508 		goto err_engine;
1509 	}
1510 
1511 	ret = crypto_engine_start(hdev->engine);
1512 	if (ret)
1513 		goto err_engine_start;
1514 
1515 	hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1516 
1517 	/* Register algos */
1518 	ret = stm32_hash_register_algs(hdev);
1519 	if (ret)
1520 		goto err_algs;
1521 
1522 	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1523 		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1524 
1525 	return 0;
1526 
1527 err_algs:
1528 err_engine_start:
1529 	crypto_engine_exit(hdev->engine);
1530 err_engine:
1531 	spin_lock(&stm32_hash.lock);
1532 	list_del(&hdev->list);
1533 	spin_unlock(&stm32_hash.lock);
1534 
1535 	if (hdev->dma_lch)
1536 		dma_release_channel(hdev->dma_lch);
1537 
1538 	clk_disable_unprepare(hdev->clk);
1539 
1540 	return ret;
1541 }
1542 
1543 static int stm32_hash_remove(struct platform_device *pdev)
1544 {
1545 	static struct stm32_hash_dev *hdev;
1546 
1547 	hdev = platform_get_drvdata(pdev);
1548 	if (!hdev)
1549 		return -ENODEV;
1550 
1551 	stm32_hash_unregister_algs(hdev);
1552 
1553 	crypto_engine_exit(hdev->engine);
1554 
1555 	spin_lock(&stm32_hash.lock);
1556 	list_del(&hdev->list);
1557 	spin_unlock(&stm32_hash.lock);
1558 
1559 	if (hdev->dma_lch)
1560 		dma_release_channel(hdev->dma_lch);
1561 
1562 	clk_disable_unprepare(hdev->clk);
1563 
1564 	return 0;
1565 }
1566 
1567 static struct platform_driver stm32_hash_driver = {
1568 	.probe		= stm32_hash_probe,
1569 	.remove		= stm32_hash_remove,
1570 	.driver		= {
1571 		.name	= "stm32-hash",
1572 		.of_match_table	= stm32_hash_of_match,
1573 	}
1574 };
1575 
1576 module_platform_driver(stm32_hash_driver);
1577 
1578 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1579 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1580 MODULE_LICENSE("GPL v2");
1581