xref: /openbmc/linux/drivers/crypto/atmel-sha.c (revision aebe5bd7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cryptographic API.
4  *
5  * Support for ATMEL SHA1/SHA256 HW acceleration.
6  *
7  * Copyright (c) 2012 Eukréa Electromatique - ATMEL
8  * Author: Nicolas Royer <nicolas@eukrea.com>
9  *
10  * Some ideas are from omap-sham.c drivers.
11  */
12 
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/clk.h>
19 #include <linux/io.h>
20 #include <linux/hw_random.h>
21 #include <linux/platform_device.h>
22 
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/scatterlist.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/of_device.h>
31 #include <linux/delay.h>
32 #include <linux/crypto.h>
33 #include <linux/cryptohash.h>
34 #include <crypto/scatterwalk.h>
35 #include <crypto/algapi.h>
36 #include <crypto/sha.h>
37 #include <crypto/hash.h>
38 #include <crypto/internal/hash.h>
39 #include <linux/platform_data/crypto-atmel.h>
40 #include "atmel-sha-regs.h"
41 #include "atmel-authenc.h"
42 
43 #define ATMEL_SHA_PRIORITY	300
44 
45 /* SHA flags */
46 #define SHA_FLAGS_BUSY			BIT(0)
47 #define	SHA_FLAGS_FINAL			BIT(1)
48 #define SHA_FLAGS_DMA_ACTIVE	BIT(2)
49 #define SHA_FLAGS_OUTPUT_READY	BIT(3)
50 #define SHA_FLAGS_INIT			BIT(4)
51 #define SHA_FLAGS_CPU			BIT(5)
52 #define SHA_FLAGS_DMA_READY		BIT(6)
53 #define SHA_FLAGS_DUMP_REG	BIT(7)
54 
55 /* bits[11:8] are reserved. */
56 
57 #define SHA_FLAGS_FINUP		BIT(16)
58 #define SHA_FLAGS_SG		BIT(17)
59 #define SHA_FLAGS_ERROR		BIT(23)
60 #define SHA_FLAGS_PAD		BIT(24)
61 #define SHA_FLAGS_RESTORE	BIT(25)
62 #define SHA_FLAGS_IDATAR0	BIT(26)
63 #define SHA_FLAGS_WAIT_DATARDY	BIT(27)
64 
65 #define SHA_OP_INIT	0
66 #define SHA_OP_UPDATE	1
67 #define SHA_OP_FINAL	2
68 #define SHA_OP_DIGEST	3
69 
70 #define SHA_BUFFER_LEN		(PAGE_SIZE / 16)
71 
72 #define ATMEL_SHA_DMA_THRESHOLD		56
73 
74 struct atmel_sha_caps {
75 	bool	has_dma;
76 	bool	has_dualbuff;
77 	bool	has_sha224;
78 	bool	has_sha_384_512;
79 	bool	has_uihv;
80 	bool	has_hmac;
81 };
82 
83 struct atmel_sha_dev;
84 
85 /*
86  * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
87  * tested by the ahash_prepare_alg() function.
88  */
89 struct atmel_sha_reqctx {
90 	struct atmel_sha_dev	*dd;
91 	unsigned long	flags;
92 	unsigned long	op;
93 
94 	u8	digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
95 	u64	digcnt[2];
96 	size_t	bufcnt;
97 	size_t	buflen;
98 	dma_addr_t	dma_addr;
99 
100 	/* walk state */
101 	struct scatterlist	*sg;
102 	unsigned int	offset;	/* offset in current sg */
103 	unsigned int	total;	/* total request */
104 
105 	size_t block_size;
106 	size_t hash_size;
107 
108 	u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
109 };
110 
111 typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *);
112 
113 struct atmel_sha_ctx {
114 	struct atmel_sha_dev	*dd;
115 	atmel_sha_fn_t		start;
116 
117 	unsigned long		flags;
118 };
119 
120 #define ATMEL_SHA_QUEUE_LENGTH	50
121 
122 struct atmel_sha_dma {
123 	struct dma_chan			*chan;
124 	struct dma_slave_config dma_conf;
125 	struct scatterlist	*sg;
126 	int			nents;
127 	unsigned int		last_sg_length;
128 };
129 
130 struct atmel_sha_dev {
131 	struct list_head	list;
132 	unsigned long		phys_base;
133 	struct device		*dev;
134 	struct clk			*iclk;
135 	int					irq;
136 	void __iomem		*io_base;
137 
138 	spinlock_t		lock;
139 	struct tasklet_struct	done_task;
140 	struct tasklet_struct	queue_task;
141 
142 	unsigned long		flags;
143 	struct crypto_queue	queue;
144 	struct ahash_request	*req;
145 	bool			is_async;
146 	bool			force_complete;
147 	atmel_sha_fn_t		resume;
148 	atmel_sha_fn_t		cpu_transfer_complete;
149 
150 	struct atmel_sha_dma	dma_lch_in;
151 
152 	struct atmel_sha_caps	caps;
153 
154 	struct scatterlist	tmp;
155 
156 	u32	hw_version;
157 };
158 
159 struct atmel_sha_drv {
160 	struct list_head	dev_list;
161 	spinlock_t		lock;
162 };
163 
164 static struct atmel_sha_drv atmel_sha = {
165 	.dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
166 	.lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
167 };
168 
169 #ifdef VERBOSE_DEBUG
170 static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr)
171 {
172 	switch (offset) {
173 	case SHA_CR:
174 		return "CR";
175 
176 	case SHA_MR:
177 		return "MR";
178 
179 	case SHA_IER:
180 		return "IER";
181 
182 	case SHA_IDR:
183 		return "IDR";
184 
185 	case SHA_IMR:
186 		return "IMR";
187 
188 	case SHA_ISR:
189 		return "ISR";
190 
191 	case SHA_MSR:
192 		return "MSR";
193 
194 	case SHA_BCR:
195 		return "BCR";
196 
197 	case SHA_REG_DIN(0):
198 	case SHA_REG_DIN(1):
199 	case SHA_REG_DIN(2):
200 	case SHA_REG_DIN(3):
201 	case SHA_REG_DIN(4):
202 	case SHA_REG_DIN(5):
203 	case SHA_REG_DIN(6):
204 	case SHA_REG_DIN(7):
205 	case SHA_REG_DIN(8):
206 	case SHA_REG_DIN(9):
207 	case SHA_REG_DIN(10):
208 	case SHA_REG_DIN(11):
209 	case SHA_REG_DIN(12):
210 	case SHA_REG_DIN(13):
211 	case SHA_REG_DIN(14):
212 	case SHA_REG_DIN(15):
213 		snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2);
214 		break;
215 
216 	case SHA_REG_DIGEST(0):
217 	case SHA_REG_DIGEST(1):
218 	case SHA_REG_DIGEST(2):
219 	case SHA_REG_DIGEST(3):
220 	case SHA_REG_DIGEST(4):
221 	case SHA_REG_DIGEST(5):
222 	case SHA_REG_DIGEST(6):
223 	case SHA_REG_DIGEST(7):
224 	case SHA_REG_DIGEST(8):
225 	case SHA_REG_DIGEST(9):
226 	case SHA_REG_DIGEST(10):
227 	case SHA_REG_DIGEST(11):
228 	case SHA_REG_DIGEST(12):
229 	case SHA_REG_DIGEST(13):
230 	case SHA_REG_DIGEST(14):
231 	case SHA_REG_DIGEST(15):
232 		if (wr)
233 			snprintf(tmp, sz, "IDATAR[%u]",
234 				 16u + ((offset - SHA_REG_DIGEST(0)) >> 2));
235 		else
236 			snprintf(tmp, sz, "ODATAR[%u]",
237 				 (offset - SHA_REG_DIGEST(0)) >> 2);
238 		break;
239 
240 	case SHA_HW_VERSION:
241 		return "HWVER";
242 
243 	default:
244 		snprintf(tmp, sz, "0x%02x", offset);
245 		break;
246 	}
247 
248 	return tmp;
249 }
250 
251 #endif /* VERBOSE_DEBUG */
252 
253 static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
254 {
255 	u32 value = readl_relaxed(dd->io_base + offset);
256 
257 #ifdef VERBOSE_DEBUG
258 	if (dd->flags & SHA_FLAGS_DUMP_REG) {
259 		char tmp[16];
260 
261 		dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
262 			 atmel_sha_reg_name(offset, tmp, sizeof(tmp), false));
263 	}
264 #endif /* VERBOSE_DEBUG */
265 
266 	return value;
267 }
268 
269 static inline void atmel_sha_write(struct atmel_sha_dev *dd,
270 					u32 offset, u32 value)
271 {
272 #ifdef VERBOSE_DEBUG
273 	if (dd->flags & SHA_FLAGS_DUMP_REG) {
274 		char tmp[16];
275 
276 		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
277 			 atmel_sha_reg_name(offset, tmp, sizeof(tmp), true));
278 	}
279 #endif /* VERBOSE_DEBUG */
280 
281 	writel_relaxed(value, dd->io_base + offset);
282 }
283 
284 static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
285 {
286 	struct ahash_request *req = dd->req;
287 
288 	dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
289 		       SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY |
290 		       SHA_FLAGS_DUMP_REG);
291 
292 	clk_disable(dd->iclk);
293 
294 	if ((dd->is_async || dd->force_complete) && req->base.complete)
295 		req->base.complete(&req->base, err);
296 
297 	/* handle new request */
298 	tasklet_schedule(&dd->queue_task);
299 
300 	return err;
301 }
302 
303 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
304 {
305 	size_t count;
306 
307 	while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
308 		count = min(ctx->sg->length - ctx->offset, ctx->total);
309 		count = min(count, ctx->buflen - ctx->bufcnt);
310 
311 		if (count <= 0) {
312 			/*
313 			* Check if count <= 0 because the buffer is full or
314 			* because the sg length is 0. In the latest case,
315 			* check if there is another sg in the list, a 0 length
316 			* sg doesn't necessarily mean the end of the sg list.
317 			*/
318 			if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
319 				ctx->sg = sg_next(ctx->sg);
320 				continue;
321 			} else {
322 				break;
323 			}
324 		}
325 
326 		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
327 			ctx->offset, count, 0);
328 
329 		ctx->bufcnt += count;
330 		ctx->offset += count;
331 		ctx->total -= count;
332 
333 		if (ctx->offset == ctx->sg->length) {
334 			ctx->sg = sg_next(ctx->sg);
335 			if (ctx->sg)
336 				ctx->offset = 0;
337 			else
338 				ctx->total = 0;
339 		}
340 	}
341 
342 	return 0;
343 }
344 
345 /*
346  * The purpose of this padding is to ensure that the padded message is a
347  * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
348  * The bit "1" is appended at the end of the message followed by
349  * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
350  * 128 bits block (SHA384/SHA512) equals to the message length in bits
351  * is appended.
352  *
353  * For SHA1/SHA224/SHA256, padlen is calculated as followed:
354  *  - if message length < 56 bytes then padlen = 56 - message length
355  *  - else padlen = 64 + 56 - message length
356  *
357  * For SHA384/SHA512, padlen is calculated as followed:
358  *  - if message length < 112 bytes then padlen = 112 - message length
359  *  - else padlen = 128 + 112 - message length
360  */
361 static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
362 {
363 	unsigned int index, padlen;
364 	__be64 bits[2];
365 	u64 size[2];
366 
367 	size[0] = ctx->digcnt[0];
368 	size[1] = ctx->digcnt[1];
369 
370 	size[0] += ctx->bufcnt;
371 	if (size[0] < ctx->bufcnt)
372 		size[1]++;
373 
374 	size[0] += length;
375 	if (size[0]  < length)
376 		size[1]++;
377 
378 	bits[1] = cpu_to_be64(size[0] << 3);
379 	bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
380 
381 	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
382 	case SHA_FLAGS_SHA384:
383 	case SHA_FLAGS_SHA512:
384 		index = ctx->bufcnt & 0x7f;
385 		padlen = (index < 112) ? (112 - index) : ((128+112) - index);
386 		*(ctx->buffer + ctx->bufcnt) = 0x80;
387 		memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
388 		memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
389 		ctx->bufcnt += padlen + 16;
390 		ctx->flags |= SHA_FLAGS_PAD;
391 		break;
392 
393 	default:
394 		index = ctx->bufcnt & 0x3f;
395 		padlen = (index < 56) ? (56 - index) : ((64+56) - index);
396 		*(ctx->buffer + ctx->bufcnt) = 0x80;
397 		memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
398 		memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
399 		ctx->bufcnt += padlen + 8;
400 		ctx->flags |= SHA_FLAGS_PAD;
401 		break;
402 	}
403 }
404 
405 static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
406 {
407 	struct atmel_sha_dev *dd = NULL;
408 	struct atmel_sha_dev *tmp;
409 
410 	spin_lock_bh(&atmel_sha.lock);
411 	if (!tctx->dd) {
412 		list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
413 			dd = tmp;
414 			break;
415 		}
416 		tctx->dd = dd;
417 	} else {
418 		dd = tctx->dd;
419 	}
420 
421 	spin_unlock_bh(&atmel_sha.lock);
422 
423 	return dd;
424 }
425 
426 static int atmel_sha_init(struct ahash_request *req)
427 {
428 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
429 	struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
430 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
431 	struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx);
432 
433 	ctx->dd = dd;
434 
435 	ctx->flags = 0;
436 
437 	dev_dbg(dd->dev, "init: digest size: %d\n",
438 		crypto_ahash_digestsize(tfm));
439 
440 	switch (crypto_ahash_digestsize(tfm)) {
441 	case SHA1_DIGEST_SIZE:
442 		ctx->flags |= SHA_FLAGS_SHA1;
443 		ctx->block_size = SHA1_BLOCK_SIZE;
444 		break;
445 	case SHA224_DIGEST_SIZE:
446 		ctx->flags |= SHA_FLAGS_SHA224;
447 		ctx->block_size = SHA224_BLOCK_SIZE;
448 		break;
449 	case SHA256_DIGEST_SIZE:
450 		ctx->flags |= SHA_FLAGS_SHA256;
451 		ctx->block_size = SHA256_BLOCK_SIZE;
452 		break;
453 	case SHA384_DIGEST_SIZE:
454 		ctx->flags |= SHA_FLAGS_SHA384;
455 		ctx->block_size = SHA384_BLOCK_SIZE;
456 		break;
457 	case SHA512_DIGEST_SIZE:
458 		ctx->flags |= SHA_FLAGS_SHA512;
459 		ctx->block_size = SHA512_BLOCK_SIZE;
460 		break;
461 	default:
462 		return -EINVAL;
463 		break;
464 	}
465 
466 	ctx->bufcnt = 0;
467 	ctx->digcnt[0] = 0;
468 	ctx->digcnt[1] = 0;
469 	ctx->buflen = SHA_BUFFER_LEN;
470 
471 	return 0;
472 }
473 
474 static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
475 {
476 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
477 	u32 valmr = SHA_MR_MODE_AUTO;
478 	unsigned int i, hashsize = 0;
479 
480 	if (likely(dma)) {
481 		if (!dd->caps.has_dma)
482 			atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
483 		valmr = SHA_MR_MODE_PDC;
484 		if (dd->caps.has_dualbuff)
485 			valmr |= SHA_MR_DUALBUFF;
486 	} else {
487 		atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
488 	}
489 
490 	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
491 	case SHA_FLAGS_SHA1:
492 		valmr |= SHA_MR_ALGO_SHA1;
493 		hashsize = SHA1_DIGEST_SIZE;
494 		break;
495 
496 	case SHA_FLAGS_SHA224:
497 		valmr |= SHA_MR_ALGO_SHA224;
498 		hashsize = SHA256_DIGEST_SIZE;
499 		break;
500 
501 	case SHA_FLAGS_SHA256:
502 		valmr |= SHA_MR_ALGO_SHA256;
503 		hashsize = SHA256_DIGEST_SIZE;
504 		break;
505 
506 	case SHA_FLAGS_SHA384:
507 		valmr |= SHA_MR_ALGO_SHA384;
508 		hashsize = SHA512_DIGEST_SIZE;
509 		break;
510 
511 	case SHA_FLAGS_SHA512:
512 		valmr |= SHA_MR_ALGO_SHA512;
513 		hashsize = SHA512_DIGEST_SIZE;
514 		break;
515 
516 	default:
517 		break;
518 	}
519 
520 	/* Setting CR_FIRST only for the first iteration */
521 	if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
522 		atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
523 	} else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
524 		const u32 *hash = (const u32 *)ctx->digest;
525 
526 		/*
527 		 * Restore the hardware context: update the User Initialize
528 		 * Hash Value (UIHV) with the value saved when the latest
529 		 * 'update' operation completed on this very same crypto
530 		 * request.
531 		 */
532 		ctx->flags &= ~SHA_FLAGS_RESTORE;
533 		atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
534 		for (i = 0; i < hashsize / sizeof(u32); ++i)
535 			atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
536 		atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
537 		valmr |= SHA_MR_UIHV;
538 	}
539 	/*
540 	 * WARNING: If the UIHV feature is not available, the hardware CANNOT
541 	 * process concurrent requests: the internal registers used to store
542 	 * the hash/digest are still set to the partial digest output values
543 	 * computed during the latest round.
544 	 */
545 
546 	atmel_sha_write(dd, SHA_MR, valmr);
547 }
548 
549 static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd,
550 						atmel_sha_fn_t resume)
551 {
552 	u32 isr = atmel_sha_read(dd, SHA_ISR);
553 
554 	if (unlikely(isr & SHA_INT_DATARDY))
555 		return resume(dd);
556 
557 	dd->resume = resume;
558 	atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
559 	return -EINPROGRESS;
560 }
561 
562 static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
563 			      size_t length, int final)
564 {
565 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
566 	int count, len32;
567 	const u32 *buffer = (const u32 *)buf;
568 
569 	dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
570 		ctx->digcnt[1], ctx->digcnt[0], length, final);
571 
572 	atmel_sha_write_ctrl(dd, 0);
573 
574 	/* should be non-zero before next lines to disable clocks later */
575 	ctx->digcnt[0] += length;
576 	if (ctx->digcnt[0] < length)
577 		ctx->digcnt[1]++;
578 
579 	if (final)
580 		dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
581 
582 	len32 = DIV_ROUND_UP(length, sizeof(u32));
583 
584 	dd->flags |= SHA_FLAGS_CPU;
585 
586 	for (count = 0; count < len32; count++)
587 		atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
588 
589 	return -EINPROGRESS;
590 }
591 
592 static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
593 		size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
594 {
595 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
596 	int len32;
597 
598 	dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
599 		ctx->digcnt[1], ctx->digcnt[0], length1, final);
600 
601 	len32 = DIV_ROUND_UP(length1, sizeof(u32));
602 	atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
603 	atmel_sha_write(dd, SHA_TPR, dma_addr1);
604 	atmel_sha_write(dd, SHA_TCR, len32);
605 
606 	len32 = DIV_ROUND_UP(length2, sizeof(u32));
607 	atmel_sha_write(dd, SHA_TNPR, dma_addr2);
608 	atmel_sha_write(dd, SHA_TNCR, len32);
609 
610 	atmel_sha_write_ctrl(dd, 1);
611 
612 	/* should be non-zero before next lines to disable clocks later */
613 	ctx->digcnt[0] += length1;
614 	if (ctx->digcnt[0] < length1)
615 		ctx->digcnt[1]++;
616 
617 	if (final)
618 		dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
619 
620 	dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
621 
622 	/* Start DMA transfer */
623 	atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
624 
625 	return -EINPROGRESS;
626 }
627 
628 static void atmel_sha_dma_callback(void *data)
629 {
630 	struct atmel_sha_dev *dd = data;
631 
632 	dd->is_async = true;
633 
634 	/* dma_lch_in - completed - wait DATRDY */
635 	atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
636 }
637 
638 static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
639 		size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
640 {
641 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
642 	struct dma_async_tx_descriptor	*in_desc;
643 	struct scatterlist sg[2];
644 
645 	dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
646 		ctx->digcnt[1], ctx->digcnt[0], length1, final);
647 
648 	dd->dma_lch_in.dma_conf.src_maxburst = 16;
649 	dd->dma_lch_in.dma_conf.dst_maxburst = 16;
650 
651 	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
652 
653 	if (length2) {
654 		sg_init_table(sg, 2);
655 		sg_dma_address(&sg[0]) = dma_addr1;
656 		sg_dma_len(&sg[0]) = length1;
657 		sg_dma_address(&sg[1]) = dma_addr2;
658 		sg_dma_len(&sg[1]) = length2;
659 		in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
660 			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
661 	} else {
662 		sg_init_table(sg, 1);
663 		sg_dma_address(&sg[0]) = dma_addr1;
664 		sg_dma_len(&sg[0]) = length1;
665 		in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
666 			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
667 	}
668 	if (!in_desc)
669 		return atmel_sha_complete(dd, -EINVAL);
670 
671 	in_desc->callback = atmel_sha_dma_callback;
672 	in_desc->callback_param = dd;
673 
674 	atmel_sha_write_ctrl(dd, 1);
675 
676 	/* should be non-zero before next lines to disable clocks later */
677 	ctx->digcnt[0] += length1;
678 	if (ctx->digcnt[0] < length1)
679 		ctx->digcnt[1]++;
680 
681 	if (final)
682 		dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
683 
684 	dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
685 
686 	/* Start DMA transfer */
687 	dmaengine_submit(in_desc);
688 	dma_async_issue_pending(dd->dma_lch_in.chan);
689 
690 	return -EINPROGRESS;
691 }
692 
693 static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
694 		size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
695 {
696 	if (dd->caps.has_dma)
697 		return atmel_sha_xmit_dma(dd, dma_addr1, length1,
698 				dma_addr2, length2, final);
699 	else
700 		return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
701 				dma_addr2, length2, final);
702 }
703 
704 static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
705 {
706 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
707 	int bufcnt;
708 
709 	atmel_sha_append_sg(ctx);
710 	atmel_sha_fill_padding(ctx, 0);
711 	bufcnt = ctx->bufcnt;
712 	ctx->bufcnt = 0;
713 
714 	return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
715 }
716 
717 static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
718 					struct atmel_sha_reqctx *ctx,
719 					size_t length, int final)
720 {
721 	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
722 				ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
723 	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
724 		dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen +
725 				ctx->block_size);
726 		return atmel_sha_complete(dd, -EINVAL);
727 	}
728 
729 	ctx->flags &= ~SHA_FLAGS_SG;
730 
731 	/* next call does not fail... so no unmap in the case of error */
732 	return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
733 }
734 
735 static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
736 {
737 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
738 	unsigned int final;
739 	size_t count;
740 
741 	atmel_sha_append_sg(ctx);
742 
743 	final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
744 
745 	dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n",
746 		 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
747 
748 	if (final)
749 		atmel_sha_fill_padding(ctx, 0);
750 
751 	if (final || (ctx->bufcnt == ctx->buflen)) {
752 		count = ctx->bufcnt;
753 		ctx->bufcnt = 0;
754 		return atmel_sha_xmit_dma_map(dd, ctx, count, final);
755 	}
756 
757 	return 0;
758 }
759 
760 static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
761 {
762 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
763 	unsigned int length, final, tail;
764 	struct scatterlist *sg;
765 	unsigned int count;
766 
767 	if (!ctx->total)
768 		return 0;
769 
770 	if (ctx->bufcnt || ctx->offset)
771 		return atmel_sha_update_dma_slow(dd);
772 
773 	dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n",
774 		ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
775 
776 	sg = ctx->sg;
777 
778 	if (!IS_ALIGNED(sg->offset, sizeof(u32)))
779 		return atmel_sha_update_dma_slow(dd);
780 
781 	if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
782 		/* size is not ctx->block_size aligned */
783 		return atmel_sha_update_dma_slow(dd);
784 
785 	length = min(ctx->total, sg->length);
786 
787 	if (sg_is_last(sg)) {
788 		if (!(ctx->flags & SHA_FLAGS_FINUP)) {
789 			/* not last sg must be ctx->block_size aligned */
790 			tail = length & (ctx->block_size - 1);
791 			length -= tail;
792 		}
793 	}
794 
795 	ctx->total -= length;
796 	ctx->offset = length; /* offset where to start slow */
797 
798 	final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
799 
800 	/* Add padding */
801 	if (final) {
802 		tail = length & (ctx->block_size - 1);
803 		length -= tail;
804 		ctx->total += tail;
805 		ctx->offset = length; /* offset where to start slow */
806 
807 		sg = ctx->sg;
808 		atmel_sha_append_sg(ctx);
809 
810 		atmel_sha_fill_padding(ctx, length);
811 
812 		ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
813 			ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
814 		if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
815 			dev_err(dd->dev, "dma %zu bytes error\n",
816 				ctx->buflen + ctx->block_size);
817 			return atmel_sha_complete(dd, -EINVAL);
818 		}
819 
820 		if (length == 0) {
821 			ctx->flags &= ~SHA_FLAGS_SG;
822 			count = ctx->bufcnt;
823 			ctx->bufcnt = 0;
824 			return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
825 					0, final);
826 		} else {
827 			ctx->sg = sg;
828 			if (!dma_map_sg(dd->dev, ctx->sg, 1,
829 				DMA_TO_DEVICE)) {
830 					dev_err(dd->dev, "dma_map_sg  error\n");
831 					return atmel_sha_complete(dd, -EINVAL);
832 			}
833 
834 			ctx->flags |= SHA_FLAGS_SG;
835 
836 			count = ctx->bufcnt;
837 			ctx->bufcnt = 0;
838 			return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
839 					length, ctx->dma_addr, count, final);
840 		}
841 	}
842 
843 	if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
844 		dev_err(dd->dev, "dma_map_sg  error\n");
845 		return atmel_sha_complete(dd, -EINVAL);
846 	}
847 
848 	ctx->flags |= SHA_FLAGS_SG;
849 
850 	/* next call does not fail... so no unmap in the case of error */
851 	return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
852 								0, final);
853 }
854 
855 static void atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
856 {
857 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
858 
859 	if (ctx->flags & SHA_FLAGS_SG) {
860 		dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
861 		if (ctx->sg->length == ctx->offset) {
862 			ctx->sg = sg_next(ctx->sg);
863 			if (ctx->sg)
864 				ctx->offset = 0;
865 		}
866 		if (ctx->flags & SHA_FLAGS_PAD) {
867 			dma_unmap_single(dd->dev, ctx->dma_addr,
868 				ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
869 		}
870 	} else {
871 		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
872 						ctx->block_size, DMA_TO_DEVICE);
873 	}
874 }
875 
876 static int atmel_sha_update_req(struct atmel_sha_dev *dd)
877 {
878 	struct ahash_request *req = dd->req;
879 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
880 	int err;
881 
882 	dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
883 		ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
884 
885 	if (ctx->flags & SHA_FLAGS_CPU)
886 		err = atmel_sha_update_cpu(dd);
887 	else
888 		err = atmel_sha_update_dma_start(dd);
889 
890 	/* wait for dma completion before can take more data */
891 	dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
892 			err, ctx->digcnt[1], ctx->digcnt[0]);
893 
894 	return err;
895 }
896 
897 static int atmel_sha_final_req(struct atmel_sha_dev *dd)
898 {
899 	struct ahash_request *req = dd->req;
900 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
901 	int err = 0;
902 	int count;
903 
904 	if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
905 		atmel_sha_fill_padding(ctx, 0);
906 		count = ctx->bufcnt;
907 		ctx->bufcnt = 0;
908 		err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
909 	}
910 	/* faster to handle last block with cpu */
911 	else {
912 		atmel_sha_fill_padding(ctx, 0);
913 		count = ctx->bufcnt;
914 		ctx->bufcnt = 0;
915 		err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
916 	}
917 
918 	dev_dbg(dd->dev, "final_req: err: %d\n", err);
919 
920 	return err;
921 }
922 
923 static void atmel_sha_copy_hash(struct ahash_request *req)
924 {
925 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
926 	u32 *hash = (u32 *)ctx->digest;
927 	unsigned int i, hashsize;
928 
929 	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
930 	case SHA_FLAGS_SHA1:
931 		hashsize = SHA1_DIGEST_SIZE;
932 		break;
933 
934 	case SHA_FLAGS_SHA224:
935 	case SHA_FLAGS_SHA256:
936 		hashsize = SHA256_DIGEST_SIZE;
937 		break;
938 
939 	case SHA_FLAGS_SHA384:
940 	case SHA_FLAGS_SHA512:
941 		hashsize = SHA512_DIGEST_SIZE;
942 		break;
943 
944 	default:
945 		/* Should not happen... */
946 		return;
947 	}
948 
949 	for (i = 0; i < hashsize / sizeof(u32); ++i)
950 		hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
951 	ctx->flags |= SHA_FLAGS_RESTORE;
952 }
953 
954 static void atmel_sha_copy_ready_hash(struct ahash_request *req)
955 {
956 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
957 
958 	if (!req->result)
959 		return;
960 
961 	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
962 	default:
963 	case SHA_FLAGS_SHA1:
964 		memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
965 		break;
966 
967 	case SHA_FLAGS_SHA224:
968 		memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
969 		break;
970 
971 	case SHA_FLAGS_SHA256:
972 		memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
973 		break;
974 
975 	case SHA_FLAGS_SHA384:
976 		memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
977 		break;
978 
979 	case SHA_FLAGS_SHA512:
980 		memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
981 		break;
982 	}
983 }
984 
985 static int atmel_sha_finish(struct ahash_request *req)
986 {
987 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
988 	struct atmel_sha_dev *dd = ctx->dd;
989 
990 	if (ctx->digcnt[0] || ctx->digcnt[1])
991 		atmel_sha_copy_ready_hash(req);
992 
993 	dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1],
994 		ctx->digcnt[0], ctx->bufcnt);
995 
996 	return 0;
997 }
998 
999 static void atmel_sha_finish_req(struct ahash_request *req, int err)
1000 {
1001 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1002 	struct atmel_sha_dev *dd = ctx->dd;
1003 
1004 	if (!err) {
1005 		atmel_sha_copy_hash(req);
1006 		if (SHA_FLAGS_FINAL & dd->flags)
1007 			err = atmel_sha_finish(req);
1008 	} else {
1009 		ctx->flags |= SHA_FLAGS_ERROR;
1010 	}
1011 
1012 	/* atomic operation is not needed here */
1013 	(void)atmel_sha_complete(dd, err);
1014 }
1015 
1016 static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
1017 {
1018 	int err;
1019 
1020 	err = clk_enable(dd->iclk);
1021 	if (err)
1022 		return err;
1023 
1024 	if (!(SHA_FLAGS_INIT & dd->flags)) {
1025 		atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
1026 		dd->flags |= SHA_FLAGS_INIT;
1027 	}
1028 
1029 	return 0;
1030 }
1031 
1032 static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
1033 {
1034 	return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
1035 }
1036 
1037 static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
1038 {
1039 	int err;
1040 
1041 	err = atmel_sha_hw_init(dd);
1042 	if (err)
1043 		return err;
1044 
1045 	dd->hw_version = atmel_sha_get_version(dd);
1046 
1047 	dev_info(dd->dev,
1048 			"version: 0x%x\n", dd->hw_version);
1049 
1050 	clk_disable(dd->iclk);
1051 
1052 	return 0;
1053 }
1054 
1055 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
1056 				  struct ahash_request *req)
1057 {
1058 	struct crypto_async_request *async_req, *backlog;
1059 	struct atmel_sha_ctx *ctx;
1060 	unsigned long flags;
1061 	bool start_async;
1062 	int err = 0, ret = 0;
1063 
1064 	spin_lock_irqsave(&dd->lock, flags);
1065 	if (req)
1066 		ret = ahash_enqueue_request(&dd->queue, req);
1067 
1068 	if (SHA_FLAGS_BUSY & dd->flags) {
1069 		spin_unlock_irqrestore(&dd->lock, flags);
1070 		return ret;
1071 	}
1072 
1073 	backlog = crypto_get_backlog(&dd->queue);
1074 	async_req = crypto_dequeue_request(&dd->queue);
1075 	if (async_req)
1076 		dd->flags |= SHA_FLAGS_BUSY;
1077 
1078 	spin_unlock_irqrestore(&dd->lock, flags);
1079 
1080 	if (!async_req)
1081 		return ret;
1082 
1083 	if (backlog)
1084 		backlog->complete(backlog, -EINPROGRESS);
1085 
1086 	ctx = crypto_tfm_ctx(async_req->tfm);
1087 
1088 	dd->req = ahash_request_cast(async_req);
1089 	start_async = (dd->req != req);
1090 	dd->is_async = start_async;
1091 	dd->force_complete = false;
1092 
1093 	/* WARNING: ctx->start() MAY change dd->is_async. */
1094 	err = ctx->start(dd);
1095 	return (start_async) ? ret : err;
1096 }
1097 
1098 static int atmel_sha_done(struct atmel_sha_dev *dd);
1099 
1100 static int atmel_sha_start(struct atmel_sha_dev *dd)
1101 {
1102 	struct ahash_request *req = dd->req;
1103 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1104 	int err;
1105 
1106 	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1107 						ctx->op, req->nbytes);
1108 
1109 	err = atmel_sha_hw_init(dd);
1110 	if (err)
1111 		return atmel_sha_complete(dd, err);
1112 
1113 	/*
1114 	 * atmel_sha_update_req() and atmel_sha_final_req() can return either:
1115 	 *  -EINPROGRESS: the hardware is busy and the SHA driver will resume
1116 	 *                its job later in the done_task.
1117 	 *                This is the main path.
1118 	 *
1119 	 * 0: the SHA driver can continue its job then release the hardware
1120 	 *    later, if needed, with atmel_sha_finish_req().
1121 	 *    This is the alternate path.
1122 	 *
1123 	 * < 0: an error has occurred so atmel_sha_complete(dd, err) has already
1124 	 *      been called, hence the hardware has been released.
1125 	 *      The SHA driver must stop its job without calling
1126 	 *      atmel_sha_finish_req(), otherwise atmel_sha_complete() would be
1127 	 *      called a second time.
1128 	 *
1129 	 * Please note that currently, atmel_sha_final_req() never returns 0.
1130 	 */
1131 
1132 	dd->resume = atmel_sha_done;
1133 	if (ctx->op == SHA_OP_UPDATE) {
1134 		err = atmel_sha_update_req(dd);
1135 		if (!err && (ctx->flags & SHA_FLAGS_FINUP))
1136 			/* no final() after finup() */
1137 			err = atmel_sha_final_req(dd);
1138 	} else if (ctx->op == SHA_OP_FINAL) {
1139 		err = atmel_sha_final_req(dd);
1140 	}
1141 
1142 	if (!err)
1143 		/* done_task will not finish it, so do it here */
1144 		atmel_sha_finish_req(req, err);
1145 
1146 	dev_dbg(dd->dev, "exit, err: %d\n", err);
1147 
1148 	return err;
1149 }
1150 
1151 static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
1152 {
1153 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1154 	struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1155 	struct atmel_sha_dev *dd = tctx->dd;
1156 
1157 	ctx->op = op;
1158 
1159 	return atmel_sha_handle_queue(dd, req);
1160 }
1161 
1162 static int atmel_sha_update(struct ahash_request *req)
1163 {
1164 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1165 
1166 	if (!req->nbytes)
1167 		return 0;
1168 
1169 	ctx->total = req->nbytes;
1170 	ctx->sg = req->src;
1171 	ctx->offset = 0;
1172 
1173 	if (ctx->flags & SHA_FLAGS_FINUP) {
1174 		if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
1175 			/* faster to use CPU for short transfers */
1176 			ctx->flags |= SHA_FLAGS_CPU;
1177 	} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
1178 		atmel_sha_append_sg(ctx);
1179 		return 0;
1180 	}
1181 	return atmel_sha_enqueue(req, SHA_OP_UPDATE);
1182 }
1183 
1184 static int atmel_sha_final(struct ahash_request *req)
1185 {
1186 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1187 
1188 	ctx->flags |= SHA_FLAGS_FINUP;
1189 
1190 	if (ctx->flags & SHA_FLAGS_ERROR)
1191 		return 0; /* uncompleted hash is not needed */
1192 
1193 	if (ctx->flags & SHA_FLAGS_PAD)
1194 		/* copy ready hash (+ finalize hmac) */
1195 		return atmel_sha_finish(req);
1196 
1197 	return atmel_sha_enqueue(req, SHA_OP_FINAL);
1198 }
1199 
1200 static int atmel_sha_finup(struct ahash_request *req)
1201 {
1202 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1203 	int err1, err2;
1204 
1205 	ctx->flags |= SHA_FLAGS_FINUP;
1206 
1207 	err1 = atmel_sha_update(req);
1208 	if (err1 == -EINPROGRESS ||
1209 	    (err1 == -EBUSY && (ahash_request_flags(req) &
1210 				CRYPTO_TFM_REQ_MAY_BACKLOG)))
1211 		return err1;
1212 
1213 	/*
1214 	 * final() has to be always called to cleanup resources
1215 	 * even if udpate() failed, except EINPROGRESS
1216 	 */
1217 	err2 = atmel_sha_final(req);
1218 
1219 	return err1 ?: err2;
1220 }
1221 
1222 static int atmel_sha_digest(struct ahash_request *req)
1223 {
1224 	return atmel_sha_init(req) ?: atmel_sha_finup(req);
1225 }
1226 
1227 
1228 static int atmel_sha_export(struct ahash_request *req, void *out)
1229 {
1230 	const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1231 
1232 	memcpy(out, ctx, sizeof(*ctx));
1233 	return 0;
1234 }
1235 
1236 static int atmel_sha_import(struct ahash_request *req, const void *in)
1237 {
1238 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1239 
1240 	memcpy(ctx, in, sizeof(*ctx));
1241 	return 0;
1242 }
1243 
1244 static int atmel_sha_cra_init(struct crypto_tfm *tfm)
1245 {
1246 	struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm);
1247 
1248 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1249 				 sizeof(struct atmel_sha_reqctx));
1250 	ctx->start = atmel_sha_start;
1251 
1252 	return 0;
1253 }
1254 
1255 static void atmel_sha_alg_init(struct ahash_alg *alg)
1256 {
1257 	alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
1258 	alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
1259 	alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
1260 	alg->halg.base.cra_module = THIS_MODULE;
1261 	alg->halg.base.cra_init = atmel_sha_cra_init;
1262 
1263 	alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
1264 
1265 	alg->init = atmel_sha_init;
1266 	alg->update = atmel_sha_update;
1267 	alg->final = atmel_sha_final;
1268 	alg->finup = atmel_sha_finup;
1269 	alg->digest = atmel_sha_digest;
1270 	alg->export = atmel_sha_export;
1271 	alg->import = atmel_sha_import;
1272 }
1273 
1274 static struct ahash_alg sha_1_256_algs[] = {
1275 {
1276 	.halg.base.cra_name		= "sha1",
1277 	.halg.base.cra_driver_name	= "atmel-sha1",
1278 	.halg.base.cra_blocksize	= SHA1_BLOCK_SIZE,
1279 
1280 	.halg.digestsize = SHA1_DIGEST_SIZE,
1281 },
1282 {
1283 	.halg.base.cra_name		= "sha256",
1284 	.halg.base.cra_driver_name	= "atmel-sha256",
1285 	.halg.base.cra_blocksize	= SHA256_BLOCK_SIZE,
1286 
1287 	.halg.digestsize = SHA256_DIGEST_SIZE,
1288 },
1289 };
1290 
1291 static struct ahash_alg sha_224_alg = {
1292 	.halg.base.cra_name		= "sha224",
1293 	.halg.base.cra_driver_name	= "atmel-sha224",
1294 	.halg.base.cra_blocksize	= SHA224_BLOCK_SIZE,
1295 
1296 	.halg.digestsize = SHA224_DIGEST_SIZE,
1297 };
1298 
1299 static struct ahash_alg sha_384_512_algs[] = {
1300 {
1301 	.halg.base.cra_name		= "sha384",
1302 	.halg.base.cra_driver_name	= "atmel-sha384",
1303 	.halg.base.cra_blocksize	= SHA384_BLOCK_SIZE,
1304 	.halg.base.cra_alignmask	= 0x3,
1305 
1306 	.halg.digestsize = SHA384_DIGEST_SIZE,
1307 },
1308 {
1309 	.halg.base.cra_name		= "sha512",
1310 	.halg.base.cra_driver_name	= "atmel-sha512",
1311 	.halg.base.cra_blocksize	= SHA512_BLOCK_SIZE,
1312 	.halg.base.cra_alignmask	= 0x3,
1313 
1314 	.halg.digestsize = SHA512_DIGEST_SIZE,
1315 },
1316 };
1317 
1318 static void atmel_sha_queue_task(unsigned long data)
1319 {
1320 	struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1321 
1322 	atmel_sha_handle_queue(dd, NULL);
1323 }
1324 
1325 static int atmel_sha_done(struct atmel_sha_dev *dd)
1326 {
1327 	int err = 0;
1328 
1329 	if (SHA_FLAGS_CPU & dd->flags) {
1330 		if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1331 			dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1332 			goto finish;
1333 		}
1334 	} else if (SHA_FLAGS_DMA_READY & dd->flags) {
1335 		if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1336 			dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1337 			atmel_sha_update_dma_stop(dd);
1338 		}
1339 		if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1340 			/* hash or semi-hash ready */
1341 			dd->flags &= ~(SHA_FLAGS_DMA_READY |
1342 						SHA_FLAGS_OUTPUT_READY);
1343 			err = atmel_sha_update_dma_start(dd);
1344 			if (err != -EINPROGRESS)
1345 				goto finish;
1346 		}
1347 	}
1348 	return err;
1349 
1350 finish:
1351 	/* finish curent request */
1352 	atmel_sha_finish_req(dd->req, err);
1353 
1354 	return err;
1355 }
1356 
1357 static void atmel_sha_done_task(unsigned long data)
1358 {
1359 	struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1360 
1361 	dd->is_async = true;
1362 	(void)dd->resume(dd);
1363 }
1364 
1365 static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1366 {
1367 	struct atmel_sha_dev *sha_dd = dev_id;
1368 	u32 reg;
1369 
1370 	reg = atmel_sha_read(sha_dd, SHA_ISR);
1371 	if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1372 		atmel_sha_write(sha_dd, SHA_IDR, reg);
1373 		if (SHA_FLAGS_BUSY & sha_dd->flags) {
1374 			sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1375 			if (!(SHA_FLAGS_CPU & sha_dd->flags))
1376 				sha_dd->flags |= SHA_FLAGS_DMA_READY;
1377 			tasklet_schedule(&sha_dd->done_task);
1378 		} else {
1379 			dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1380 		}
1381 		return IRQ_HANDLED;
1382 	}
1383 
1384 	return IRQ_NONE;
1385 }
1386 
1387 
1388 /* DMA transfer functions */
1389 
1390 static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd,
1391 					struct scatterlist *sg,
1392 					size_t len)
1393 {
1394 	struct atmel_sha_dma *dma = &dd->dma_lch_in;
1395 	struct ahash_request *req = dd->req;
1396 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1397 	size_t bs = ctx->block_size;
1398 	int nents;
1399 
1400 	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
1401 		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
1402 			return false;
1403 
1404 		/*
1405 		 * This is the last sg, the only one that is allowed to
1406 		 * have an unaligned length.
1407 		 */
1408 		if (len <= sg->length) {
1409 			dma->nents = nents + 1;
1410 			dma->last_sg_length = sg->length;
1411 			sg->length = ALIGN(len, sizeof(u32));
1412 			return true;
1413 		}
1414 
1415 		/* All other sg lengths MUST be aligned to the block size. */
1416 		if (!IS_ALIGNED(sg->length, bs))
1417 			return false;
1418 
1419 		len -= sg->length;
1420 	}
1421 
1422 	return false;
1423 }
1424 
1425 static void atmel_sha_dma_callback2(void *data)
1426 {
1427 	struct atmel_sha_dev *dd = data;
1428 	struct atmel_sha_dma *dma = &dd->dma_lch_in;
1429 	struct scatterlist *sg;
1430 	int nents;
1431 
1432 	dmaengine_terminate_all(dma->chan);
1433 	dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1434 
1435 	sg = dma->sg;
1436 	for (nents = 0; nents < dma->nents - 1; ++nents)
1437 		sg = sg_next(sg);
1438 	sg->length = dma->last_sg_length;
1439 
1440 	dd->is_async = true;
1441 	(void)atmel_sha_wait_for_data_ready(dd, dd->resume);
1442 }
1443 
1444 static int atmel_sha_dma_start(struct atmel_sha_dev *dd,
1445 			       struct scatterlist *src,
1446 			       size_t len,
1447 			       atmel_sha_fn_t resume)
1448 {
1449 	struct atmel_sha_dma *dma = &dd->dma_lch_in;
1450 	struct dma_slave_config *config = &dma->dma_conf;
1451 	struct dma_chan *chan = dma->chan;
1452 	struct dma_async_tx_descriptor *desc;
1453 	dma_cookie_t cookie;
1454 	unsigned int sg_len;
1455 	int err;
1456 
1457 	dd->resume = resume;
1458 
1459 	/*
1460 	 * dma->nents has already been initialized by
1461 	 * atmel_sha_dma_check_aligned().
1462 	 */
1463 	dma->sg = src;
1464 	sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1465 	if (!sg_len) {
1466 		err = -ENOMEM;
1467 		goto exit;
1468 	}
1469 
1470 	config->src_maxburst = 16;
1471 	config->dst_maxburst = 16;
1472 	err = dmaengine_slave_config(chan, config);
1473 	if (err)
1474 		goto unmap_sg;
1475 
1476 	desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV,
1477 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1478 	if (!desc) {
1479 		err = -ENOMEM;
1480 		goto unmap_sg;
1481 	}
1482 
1483 	desc->callback = atmel_sha_dma_callback2;
1484 	desc->callback_param = dd;
1485 	cookie = dmaengine_submit(desc);
1486 	err = dma_submit_error(cookie);
1487 	if (err)
1488 		goto unmap_sg;
1489 
1490 	dma_async_issue_pending(chan);
1491 
1492 	return -EINPROGRESS;
1493 
1494 unmap_sg:
1495 	dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1496 exit:
1497 	return atmel_sha_complete(dd, err);
1498 }
1499 
1500 
1501 /* CPU transfer functions */
1502 
1503 static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd)
1504 {
1505 	struct ahash_request *req = dd->req;
1506 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1507 	const u32 *words = (const u32 *)ctx->buffer;
1508 	size_t i, num_words;
1509 	u32 isr, din, din_inc;
1510 
1511 	din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1;
1512 	for (;;) {
1513 		/* Write data into the Input Data Registers. */
1514 		num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32));
1515 		for (i = 0, din = 0; i < num_words; ++i, din += din_inc)
1516 			atmel_sha_write(dd, SHA_REG_DIN(din), words[i]);
1517 
1518 		ctx->offset += ctx->bufcnt;
1519 		ctx->total -= ctx->bufcnt;
1520 
1521 		if (!ctx->total)
1522 			break;
1523 
1524 		/*
1525 		 * Prepare next block:
1526 		 * Fill ctx->buffer now with the next data to be written into
1527 		 * IDATARx: it gives time for the SHA hardware to process
1528 		 * the current data so the SHA_INT_DATARDY flag might be set
1529 		 * in SHA_ISR when polling this register at the beginning of
1530 		 * the next loop.
1531 		 */
1532 		ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1533 		scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1534 					 ctx->offset, ctx->bufcnt, 0);
1535 
1536 		/* Wait for hardware to be ready again. */
1537 		isr = atmel_sha_read(dd, SHA_ISR);
1538 		if (!(isr & SHA_INT_DATARDY)) {
1539 			/* Not ready yet. */
1540 			dd->resume = atmel_sha_cpu_transfer;
1541 			atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
1542 			return -EINPROGRESS;
1543 		}
1544 	}
1545 
1546 	if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY)))
1547 		return dd->cpu_transfer_complete(dd);
1548 
1549 	return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete);
1550 }
1551 
1552 static int atmel_sha_cpu_start(struct atmel_sha_dev *dd,
1553 			       struct scatterlist *sg,
1554 			       unsigned int len,
1555 			       bool idatar0_only,
1556 			       bool wait_data_ready,
1557 			       atmel_sha_fn_t resume)
1558 {
1559 	struct ahash_request *req = dd->req;
1560 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1561 
1562 	if (!len)
1563 		return resume(dd);
1564 
1565 	ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY);
1566 
1567 	if (idatar0_only)
1568 		ctx->flags |= SHA_FLAGS_IDATAR0;
1569 
1570 	if (wait_data_ready)
1571 		ctx->flags |= SHA_FLAGS_WAIT_DATARDY;
1572 
1573 	ctx->sg = sg;
1574 	ctx->total = len;
1575 	ctx->offset = 0;
1576 
1577 	/* Prepare the first block to be written. */
1578 	ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1579 	scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1580 				 ctx->offset, ctx->bufcnt, 0);
1581 
1582 	dd->cpu_transfer_complete = resume;
1583 	return atmel_sha_cpu_transfer(dd);
1584 }
1585 
1586 static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd,
1587 			      const void *data, unsigned int datalen,
1588 			      bool auto_padding,
1589 			      atmel_sha_fn_t resume)
1590 {
1591 	struct ahash_request *req = dd->req;
1592 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1593 	u32 msglen = (auto_padding) ? datalen : 0;
1594 	u32 mr = SHA_MR_MODE_AUTO;
1595 
1596 	if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding))
1597 		return atmel_sha_complete(dd, -EINVAL);
1598 
1599 	mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1600 	atmel_sha_write(dd, SHA_MR, mr);
1601 	atmel_sha_write(dd, SHA_MSR, msglen);
1602 	atmel_sha_write(dd, SHA_BCR, msglen);
1603 	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1604 
1605 	sg_init_one(&dd->tmp, data, datalen);
1606 	return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume);
1607 }
1608 
1609 
1610 /* hmac functions */
1611 
1612 struct atmel_sha_hmac_key {
1613 	bool			valid;
1614 	unsigned int		keylen;
1615 	u8			buffer[SHA512_BLOCK_SIZE];
1616 	u8			*keydup;
1617 };
1618 
1619 static inline void atmel_sha_hmac_key_init(struct atmel_sha_hmac_key *hkey)
1620 {
1621 	memset(hkey, 0, sizeof(*hkey));
1622 }
1623 
1624 static inline void atmel_sha_hmac_key_release(struct atmel_sha_hmac_key *hkey)
1625 {
1626 	kfree(hkey->keydup);
1627 	memset(hkey, 0, sizeof(*hkey));
1628 }
1629 
1630 static inline int atmel_sha_hmac_key_set(struct atmel_sha_hmac_key *hkey,
1631 					 const u8 *key,
1632 					 unsigned int keylen)
1633 {
1634 	atmel_sha_hmac_key_release(hkey);
1635 
1636 	if (keylen > sizeof(hkey->buffer)) {
1637 		hkey->keydup = kmemdup(key, keylen, GFP_KERNEL);
1638 		if (!hkey->keydup)
1639 			return -ENOMEM;
1640 
1641 	} else {
1642 		memcpy(hkey->buffer, key, keylen);
1643 	}
1644 
1645 	hkey->valid = true;
1646 	hkey->keylen = keylen;
1647 	return 0;
1648 }
1649 
1650 static inline bool atmel_sha_hmac_key_get(const struct atmel_sha_hmac_key *hkey,
1651 					  const u8 **key,
1652 					  unsigned int *keylen)
1653 {
1654 	if (!hkey->valid)
1655 		return false;
1656 
1657 	*keylen = hkey->keylen;
1658 	*key = (hkey->keydup) ? hkey->keydup : hkey->buffer;
1659 	return true;
1660 }
1661 
1662 
1663 struct atmel_sha_hmac_ctx {
1664 	struct atmel_sha_ctx	base;
1665 
1666 	struct atmel_sha_hmac_key	hkey;
1667 	u32			ipad[SHA512_BLOCK_SIZE / sizeof(u32)];
1668 	u32			opad[SHA512_BLOCK_SIZE / sizeof(u32)];
1669 	atmel_sha_fn_t		resume;
1670 };
1671 
1672 static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1673 				atmel_sha_fn_t resume);
1674 static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1675 				      const u8 *key, unsigned int keylen);
1676 static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd);
1677 static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd);
1678 static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd);
1679 static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd);
1680 
1681 static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd);
1682 static int atmel_sha_hmac_final(struct atmel_sha_dev *dd);
1683 static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd);
1684 static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd);
1685 
1686 static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1687 				atmel_sha_fn_t resume)
1688 {
1689 	struct ahash_request *req = dd->req;
1690 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1691 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1692 	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1693 	unsigned int keylen;
1694 	const u8 *key;
1695 	size_t bs;
1696 
1697 	hmac->resume = resume;
1698 	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
1699 	case SHA_FLAGS_SHA1:
1700 		ctx->block_size = SHA1_BLOCK_SIZE;
1701 		ctx->hash_size = SHA1_DIGEST_SIZE;
1702 		break;
1703 
1704 	case SHA_FLAGS_SHA224:
1705 		ctx->block_size = SHA224_BLOCK_SIZE;
1706 		ctx->hash_size = SHA256_DIGEST_SIZE;
1707 		break;
1708 
1709 	case SHA_FLAGS_SHA256:
1710 		ctx->block_size = SHA256_BLOCK_SIZE;
1711 		ctx->hash_size = SHA256_DIGEST_SIZE;
1712 		break;
1713 
1714 	case SHA_FLAGS_SHA384:
1715 		ctx->block_size = SHA384_BLOCK_SIZE;
1716 		ctx->hash_size = SHA512_DIGEST_SIZE;
1717 		break;
1718 
1719 	case SHA_FLAGS_SHA512:
1720 		ctx->block_size = SHA512_BLOCK_SIZE;
1721 		ctx->hash_size = SHA512_DIGEST_SIZE;
1722 		break;
1723 
1724 	default:
1725 		return atmel_sha_complete(dd, -EINVAL);
1726 	}
1727 	bs = ctx->block_size;
1728 
1729 	if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen)))
1730 		return resume(dd);
1731 
1732 	/* Compute K' from K. */
1733 	if (unlikely(keylen > bs))
1734 		return atmel_sha_hmac_prehash_key(dd, key, keylen);
1735 
1736 	/* Prepare ipad. */
1737 	memcpy((u8 *)hmac->ipad, key, keylen);
1738 	memset((u8 *)hmac->ipad + keylen, 0, bs - keylen);
1739 	return atmel_sha_hmac_compute_ipad_hash(dd);
1740 }
1741 
1742 static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1743 				      const u8 *key, unsigned int keylen)
1744 {
1745 	return atmel_sha_cpu_hash(dd, key, keylen, true,
1746 				  atmel_sha_hmac_prehash_key_done);
1747 }
1748 
1749 static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd)
1750 {
1751 	struct ahash_request *req = dd->req;
1752 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1753 	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1754 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1755 	size_t ds = crypto_ahash_digestsize(tfm);
1756 	size_t bs = ctx->block_size;
1757 	size_t i, num_words = ds / sizeof(u32);
1758 
1759 	/* Prepare ipad. */
1760 	for (i = 0; i < num_words; ++i)
1761 		hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1762 	memset((u8 *)hmac->ipad + ds, 0, bs - ds);
1763 	return atmel_sha_hmac_compute_ipad_hash(dd);
1764 }
1765 
1766 static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
1767 {
1768 	struct ahash_request *req = dd->req;
1769 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1770 	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1771 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1772 	size_t bs = ctx->block_size;
1773 	size_t i, num_words = bs / sizeof(u32);
1774 
1775 	memcpy(hmac->opad, hmac->ipad, bs);
1776 	for (i = 0; i < num_words; ++i) {
1777 		hmac->ipad[i] ^= 0x36363636;
1778 		hmac->opad[i] ^= 0x5c5c5c5c;
1779 	}
1780 
1781 	return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false,
1782 				  atmel_sha_hmac_compute_opad_hash);
1783 }
1784 
1785 static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd)
1786 {
1787 	struct ahash_request *req = dd->req;
1788 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1789 	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1790 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1791 	size_t bs = ctx->block_size;
1792 	size_t hs = ctx->hash_size;
1793 	size_t i, num_words = hs / sizeof(u32);
1794 
1795 	for (i = 0; i < num_words; ++i)
1796 		hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1797 	return atmel_sha_cpu_hash(dd, hmac->opad, bs, false,
1798 				  atmel_sha_hmac_setup_done);
1799 }
1800 
1801 static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd)
1802 {
1803 	struct ahash_request *req = dd->req;
1804 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1805 	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1806 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1807 	size_t hs = ctx->hash_size;
1808 	size_t i, num_words = hs / sizeof(u32);
1809 
1810 	for (i = 0; i < num_words; ++i)
1811 		hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1812 	atmel_sha_hmac_key_release(&hmac->hkey);
1813 	return hmac->resume(dd);
1814 }
1815 
1816 static int atmel_sha_hmac_start(struct atmel_sha_dev *dd)
1817 {
1818 	struct ahash_request *req = dd->req;
1819 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1820 	int err;
1821 
1822 	err = atmel_sha_hw_init(dd);
1823 	if (err)
1824 		return atmel_sha_complete(dd, err);
1825 
1826 	switch (ctx->op) {
1827 	case SHA_OP_INIT:
1828 		err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done);
1829 		break;
1830 
1831 	case SHA_OP_UPDATE:
1832 		dd->resume = atmel_sha_done;
1833 		err = atmel_sha_update_req(dd);
1834 		break;
1835 
1836 	case SHA_OP_FINAL:
1837 		dd->resume = atmel_sha_hmac_final;
1838 		err = atmel_sha_final_req(dd);
1839 		break;
1840 
1841 	case SHA_OP_DIGEST:
1842 		err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2);
1843 		break;
1844 
1845 	default:
1846 		return atmel_sha_complete(dd, -EINVAL);
1847 	}
1848 
1849 	return err;
1850 }
1851 
1852 static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1853 				 unsigned int keylen)
1854 {
1855 	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1856 
1857 	if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
1858 		crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1859 		return -EINVAL;
1860 	}
1861 
1862 	return 0;
1863 }
1864 
1865 static int atmel_sha_hmac_init(struct ahash_request *req)
1866 {
1867 	int err;
1868 
1869 	err = atmel_sha_init(req);
1870 	if (err)
1871 		return err;
1872 
1873 	return atmel_sha_enqueue(req, SHA_OP_INIT);
1874 }
1875 
1876 static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd)
1877 {
1878 	struct ahash_request *req = dd->req;
1879 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1880 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1881 	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1882 	size_t bs = ctx->block_size;
1883 	size_t hs = ctx->hash_size;
1884 
1885 	ctx->bufcnt = 0;
1886 	ctx->digcnt[0] = bs;
1887 	ctx->digcnt[1] = 0;
1888 	ctx->flags |= SHA_FLAGS_RESTORE;
1889 	memcpy(ctx->digest, hmac->ipad, hs);
1890 	return atmel_sha_complete(dd, 0);
1891 }
1892 
1893 static int atmel_sha_hmac_final(struct atmel_sha_dev *dd)
1894 {
1895 	struct ahash_request *req = dd->req;
1896 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1897 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1898 	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1899 	u32 *digest = (u32 *)ctx->digest;
1900 	size_t ds = crypto_ahash_digestsize(tfm);
1901 	size_t bs = ctx->block_size;
1902 	size_t hs = ctx->hash_size;
1903 	size_t i, num_words;
1904 	u32 mr;
1905 
1906 	/* Save d = SHA((K' + ipad) | msg). */
1907 	num_words = ds / sizeof(u32);
1908 	for (i = 0; i < num_words; ++i)
1909 		digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1910 
1911 	/* Restore context to finish computing SHA((K' + opad) | d). */
1912 	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
1913 	num_words = hs / sizeof(u32);
1914 	for (i = 0; i < num_words; ++i)
1915 		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
1916 
1917 	mr = SHA_MR_MODE_AUTO | SHA_MR_UIHV;
1918 	mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1919 	atmel_sha_write(dd, SHA_MR, mr);
1920 	atmel_sha_write(dd, SHA_MSR, bs + ds);
1921 	atmel_sha_write(dd, SHA_BCR, ds);
1922 	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1923 
1924 	sg_init_one(&dd->tmp, digest, ds);
1925 	return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true,
1926 				   atmel_sha_hmac_final_done);
1927 }
1928 
1929 static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd)
1930 {
1931 	/*
1932 	 * req->result might not be sizeof(u32) aligned, so copy the
1933 	 * digest into ctx->digest[] before memcpy() the data into
1934 	 * req->result.
1935 	 */
1936 	atmel_sha_copy_hash(dd->req);
1937 	atmel_sha_copy_ready_hash(dd->req);
1938 	return atmel_sha_complete(dd, 0);
1939 }
1940 
1941 static int atmel_sha_hmac_digest(struct ahash_request *req)
1942 {
1943 	int err;
1944 
1945 	err = atmel_sha_init(req);
1946 	if (err)
1947 		return err;
1948 
1949 	return atmel_sha_enqueue(req, SHA_OP_DIGEST);
1950 }
1951 
1952 static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
1953 {
1954 	struct ahash_request *req = dd->req;
1955 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1956 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1957 	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1958 	size_t hs = ctx->hash_size;
1959 	size_t i, num_words = hs / sizeof(u32);
1960 	bool use_dma = false;
1961 	u32 mr;
1962 
1963 	/* Special case for empty message. */
1964 	if (!req->nbytes)
1965 		return atmel_sha_complete(dd, -EINVAL); // TODO:
1966 
1967 	/* Check DMA threshold and alignment. */
1968 	if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
1969 	    atmel_sha_dma_check_aligned(dd, req->src, req->nbytes))
1970 		use_dma = true;
1971 
1972 	/* Write both initial hash values to compute a HMAC. */
1973 	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
1974 	for (i = 0; i < num_words; ++i)
1975 		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
1976 
1977 	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
1978 	for (i = 0; i < num_words; ++i)
1979 		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
1980 
1981 	/* Write the Mode, Message Size, Bytes Count then Control Registers. */
1982 	mr = (SHA_MR_HMAC | SHA_MR_DUALBUFF);
1983 	mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
1984 	if (use_dma)
1985 		mr |= SHA_MR_MODE_IDATAR0;
1986 	else
1987 		mr |= SHA_MR_MODE_AUTO;
1988 	atmel_sha_write(dd, SHA_MR, mr);
1989 
1990 	atmel_sha_write(dd, SHA_MSR, req->nbytes);
1991 	atmel_sha_write(dd, SHA_BCR, req->nbytes);
1992 
1993 	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1994 
1995 	/* Process data. */
1996 	if (use_dma)
1997 		return atmel_sha_dma_start(dd, req->src, req->nbytes,
1998 					   atmel_sha_hmac_final_done);
1999 
2000 	return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
2001 				   atmel_sha_hmac_final_done);
2002 }
2003 
2004 static int atmel_sha_hmac_cra_init(struct crypto_tfm *tfm)
2005 {
2006 	struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
2007 
2008 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2009 				 sizeof(struct atmel_sha_reqctx));
2010 	hmac->base.start = atmel_sha_hmac_start;
2011 	atmel_sha_hmac_key_init(&hmac->hkey);
2012 
2013 	return 0;
2014 }
2015 
2016 static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
2017 {
2018 	struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
2019 
2020 	atmel_sha_hmac_key_release(&hmac->hkey);
2021 }
2022 
2023 static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
2024 {
2025 	alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
2026 	alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
2027 	alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
2028 	alg->halg.base.cra_module = THIS_MODULE;
2029 	alg->halg.base.cra_init	= atmel_sha_hmac_cra_init;
2030 	alg->halg.base.cra_exit	= atmel_sha_hmac_cra_exit;
2031 
2032 	alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
2033 
2034 	alg->init = atmel_sha_hmac_init;
2035 	alg->update = atmel_sha_update;
2036 	alg->final = atmel_sha_final;
2037 	alg->digest = atmel_sha_hmac_digest;
2038 	alg->setkey = atmel_sha_hmac_setkey;
2039 	alg->export = atmel_sha_export;
2040 	alg->import = atmel_sha_import;
2041 }
2042 
2043 static struct ahash_alg sha_hmac_algs[] = {
2044 {
2045 	.halg.base.cra_name		= "hmac(sha1)",
2046 	.halg.base.cra_driver_name	= "atmel-hmac-sha1",
2047 	.halg.base.cra_blocksize	= SHA1_BLOCK_SIZE,
2048 
2049 	.halg.digestsize = SHA1_DIGEST_SIZE,
2050 },
2051 {
2052 	.halg.base.cra_name		= "hmac(sha224)",
2053 	.halg.base.cra_driver_name	= "atmel-hmac-sha224",
2054 	.halg.base.cra_blocksize	= SHA224_BLOCK_SIZE,
2055 
2056 	.halg.digestsize = SHA224_DIGEST_SIZE,
2057 },
2058 {
2059 	.halg.base.cra_name		= "hmac(sha256)",
2060 	.halg.base.cra_driver_name	= "atmel-hmac-sha256",
2061 	.halg.base.cra_blocksize	= SHA256_BLOCK_SIZE,
2062 
2063 	.halg.digestsize = SHA256_DIGEST_SIZE,
2064 },
2065 {
2066 	.halg.base.cra_name		= "hmac(sha384)",
2067 	.halg.base.cra_driver_name	= "atmel-hmac-sha384",
2068 	.halg.base.cra_blocksize	= SHA384_BLOCK_SIZE,
2069 
2070 	.halg.digestsize = SHA384_DIGEST_SIZE,
2071 },
2072 {
2073 	.halg.base.cra_name		= "hmac(sha512)",
2074 	.halg.base.cra_driver_name	= "atmel-hmac-sha512",
2075 	.halg.base.cra_blocksize	= SHA512_BLOCK_SIZE,
2076 
2077 	.halg.digestsize = SHA512_DIGEST_SIZE,
2078 },
2079 };
2080 
2081 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2082 /* authenc functions */
2083 
2084 static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
2085 static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd);
2086 static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd);
2087 
2088 
2089 struct atmel_sha_authenc_ctx {
2090 	struct crypto_ahash	*tfm;
2091 };
2092 
2093 struct atmel_sha_authenc_reqctx {
2094 	struct atmel_sha_reqctx	base;
2095 
2096 	atmel_aes_authenc_fn_t	cb;
2097 	struct atmel_aes_dev	*aes_dev;
2098 
2099 	/* _init() parameters. */
2100 	struct scatterlist	*assoc;
2101 	u32			assoclen;
2102 	u32			textlen;
2103 
2104 	/* _final() parameters. */
2105 	u32			*digest;
2106 	unsigned int		digestlen;
2107 };
2108 
2109 static void atmel_sha_authenc_complete(struct crypto_async_request *areq,
2110 				       int err)
2111 {
2112 	struct ahash_request *req = areq->data;
2113 	struct atmel_sha_authenc_reqctx *authctx  = ahash_request_ctx(req);
2114 
2115 	authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async);
2116 }
2117 
2118 static int atmel_sha_authenc_start(struct atmel_sha_dev *dd)
2119 {
2120 	struct ahash_request *req = dd->req;
2121 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2122 	int err;
2123 
2124 	/*
2125 	 * Force atmel_sha_complete() to call req->base.complete(), ie
2126 	 * atmel_sha_authenc_complete(), which in turn calls authctx->cb().
2127 	 */
2128 	dd->force_complete = true;
2129 
2130 	err = atmel_sha_hw_init(dd);
2131 	return authctx->cb(authctx->aes_dev, err, dd->is_async);
2132 }
2133 
2134 bool atmel_sha_authenc_is_ready(void)
2135 {
2136 	struct atmel_sha_ctx dummy;
2137 
2138 	dummy.dd = NULL;
2139 	return (atmel_sha_find_dev(&dummy) != NULL);
2140 }
2141 EXPORT_SYMBOL_GPL(atmel_sha_authenc_is_ready);
2142 
2143 unsigned int atmel_sha_authenc_get_reqsize(void)
2144 {
2145 	return sizeof(struct atmel_sha_authenc_reqctx);
2146 }
2147 EXPORT_SYMBOL_GPL(atmel_sha_authenc_get_reqsize);
2148 
2149 struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
2150 {
2151 	struct atmel_sha_authenc_ctx *auth;
2152 	struct crypto_ahash *tfm;
2153 	struct atmel_sha_ctx *tctx;
2154 	const char *name;
2155 	int err = -EINVAL;
2156 
2157 	switch (mode & SHA_FLAGS_MODE_MASK) {
2158 	case SHA_FLAGS_HMAC_SHA1:
2159 		name = "atmel-hmac-sha1";
2160 		break;
2161 
2162 	case SHA_FLAGS_HMAC_SHA224:
2163 		name = "atmel-hmac-sha224";
2164 		break;
2165 
2166 	case SHA_FLAGS_HMAC_SHA256:
2167 		name = "atmel-hmac-sha256";
2168 		break;
2169 
2170 	case SHA_FLAGS_HMAC_SHA384:
2171 		name = "atmel-hmac-sha384";
2172 		break;
2173 
2174 	case SHA_FLAGS_HMAC_SHA512:
2175 		name = "atmel-hmac-sha512";
2176 		break;
2177 
2178 	default:
2179 		goto error;
2180 	}
2181 
2182 	tfm = crypto_alloc_ahash(name, 0, 0);
2183 	if (IS_ERR(tfm)) {
2184 		err = PTR_ERR(tfm);
2185 		goto error;
2186 	}
2187 	tctx = crypto_ahash_ctx(tfm);
2188 	tctx->start = atmel_sha_authenc_start;
2189 	tctx->flags = mode;
2190 
2191 	auth = kzalloc(sizeof(*auth), GFP_KERNEL);
2192 	if (!auth) {
2193 		err = -ENOMEM;
2194 		goto err_free_ahash;
2195 	}
2196 	auth->tfm = tfm;
2197 
2198 	return auth;
2199 
2200 err_free_ahash:
2201 	crypto_free_ahash(tfm);
2202 error:
2203 	return ERR_PTR(err);
2204 }
2205 EXPORT_SYMBOL_GPL(atmel_sha_authenc_spawn);
2206 
2207 void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
2208 {
2209 	if (auth)
2210 		crypto_free_ahash(auth->tfm);
2211 	kfree(auth);
2212 }
2213 EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
2214 
2215 int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
2216 			     const u8 *key, unsigned int keylen,
2217 			     u32 *flags)
2218 {
2219 	struct crypto_ahash *tfm = auth->tfm;
2220 	int err;
2221 
2222 	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
2223 	crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK);
2224 	err = crypto_ahash_setkey(tfm, key, keylen);
2225 	*flags = crypto_ahash_get_flags(tfm);
2226 
2227 	return err;
2228 }
2229 EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
2230 
2231 int atmel_sha_authenc_schedule(struct ahash_request *req,
2232 			       struct atmel_sha_authenc_ctx *auth,
2233 			       atmel_aes_authenc_fn_t cb,
2234 			       struct atmel_aes_dev *aes_dev)
2235 {
2236 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2237 	struct atmel_sha_reqctx *ctx = &authctx->base;
2238 	struct crypto_ahash *tfm = auth->tfm;
2239 	struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
2240 	struct atmel_sha_dev *dd;
2241 
2242 	/* Reset request context (MUST be done first). */
2243 	memset(authctx, 0, sizeof(*authctx));
2244 
2245 	/* Get SHA device. */
2246 	dd = atmel_sha_find_dev(tctx);
2247 	if (!dd)
2248 		return cb(aes_dev, -ENODEV, false);
2249 
2250 	/* Init request context. */
2251 	ctx->dd = dd;
2252 	ctx->buflen = SHA_BUFFER_LEN;
2253 	authctx->cb = cb;
2254 	authctx->aes_dev = aes_dev;
2255 	ahash_request_set_tfm(req, tfm);
2256 	ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req);
2257 
2258 	return atmel_sha_handle_queue(dd, req);
2259 }
2260 EXPORT_SYMBOL_GPL(atmel_sha_authenc_schedule);
2261 
2262 int atmel_sha_authenc_init(struct ahash_request *req,
2263 			   struct scatterlist *assoc, unsigned int assoclen,
2264 			   unsigned int textlen,
2265 			   atmel_aes_authenc_fn_t cb,
2266 			   struct atmel_aes_dev *aes_dev)
2267 {
2268 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2269 	struct atmel_sha_reqctx *ctx = &authctx->base;
2270 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2271 	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2272 	struct atmel_sha_dev *dd = ctx->dd;
2273 
2274 	if (unlikely(!IS_ALIGNED(assoclen, sizeof(u32))))
2275 		return atmel_sha_complete(dd, -EINVAL);
2276 
2277 	authctx->cb = cb;
2278 	authctx->aes_dev = aes_dev;
2279 	authctx->assoc = assoc;
2280 	authctx->assoclen = assoclen;
2281 	authctx->textlen = textlen;
2282 
2283 	ctx->flags = hmac->base.flags;
2284 	return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2);
2285 }
2286 EXPORT_SYMBOL_GPL(atmel_sha_authenc_init);
2287 
2288 static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd)
2289 {
2290 	struct ahash_request *req = dd->req;
2291 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2292 	struct atmel_sha_reqctx *ctx = &authctx->base;
2293 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2294 	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2295 	size_t hs = ctx->hash_size;
2296 	size_t i, num_words = hs / sizeof(u32);
2297 	u32 mr, msg_size;
2298 
2299 	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
2300 	for (i = 0; i < num_words; ++i)
2301 		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
2302 
2303 	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
2304 	for (i = 0; i < num_words; ++i)
2305 		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
2306 
2307 	mr = (SHA_MR_MODE_IDATAR0 |
2308 	      SHA_MR_HMAC |
2309 	      SHA_MR_DUALBUFF);
2310 	mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
2311 	atmel_sha_write(dd, SHA_MR, mr);
2312 
2313 	msg_size = authctx->assoclen + authctx->textlen;
2314 	atmel_sha_write(dd, SHA_MSR, msg_size);
2315 	atmel_sha_write(dd, SHA_BCR, msg_size);
2316 
2317 	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
2318 
2319 	/* Process assoc data. */
2320 	return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen,
2321 				   true, false,
2322 				   atmel_sha_authenc_init_done);
2323 }
2324 
2325 static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd)
2326 {
2327 	struct ahash_request *req = dd->req;
2328 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2329 
2330 	return authctx->cb(authctx->aes_dev, 0, dd->is_async);
2331 }
2332 
2333 int atmel_sha_authenc_final(struct ahash_request *req,
2334 			    u32 *digest, unsigned int digestlen,
2335 			    atmel_aes_authenc_fn_t cb,
2336 			    struct atmel_aes_dev *aes_dev)
2337 {
2338 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2339 	struct atmel_sha_reqctx *ctx = &authctx->base;
2340 	struct atmel_sha_dev *dd = ctx->dd;
2341 
2342 	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
2343 	case SHA_FLAGS_SHA1:
2344 		authctx->digestlen = SHA1_DIGEST_SIZE;
2345 		break;
2346 
2347 	case SHA_FLAGS_SHA224:
2348 		authctx->digestlen = SHA224_DIGEST_SIZE;
2349 		break;
2350 
2351 	case SHA_FLAGS_SHA256:
2352 		authctx->digestlen = SHA256_DIGEST_SIZE;
2353 		break;
2354 
2355 	case SHA_FLAGS_SHA384:
2356 		authctx->digestlen = SHA384_DIGEST_SIZE;
2357 		break;
2358 
2359 	case SHA_FLAGS_SHA512:
2360 		authctx->digestlen = SHA512_DIGEST_SIZE;
2361 		break;
2362 
2363 	default:
2364 		return atmel_sha_complete(dd, -EINVAL);
2365 	}
2366 	if (authctx->digestlen > digestlen)
2367 		authctx->digestlen = digestlen;
2368 
2369 	authctx->cb = cb;
2370 	authctx->aes_dev = aes_dev;
2371 	authctx->digest = digest;
2372 	return atmel_sha_wait_for_data_ready(dd,
2373 					     atmel_sha_authenc_final_done);
2374 }
2375 EXPORT_SYMBOL_GPL(atmel_sha_authenc_final);
2376 
2377 static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd)
2378 {
2379 	struct ahash_request *req = dd->req;
2380 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2381 	size_t i, num_words = authctx->digestlen / sizeof(u32);
2382 
2383 	for (i = 0; i < num_words; ++i)
2384 		authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
2385 
2386 	return atmel_sha_complete(dd, 0);
2387 }
2388 
2389 void atmel_sha_authenc_abort(struct ahash_request *req)
2390 {
2391 	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2392 	struct atmel_sha_reqctx *ctx = &authctx->base;
2393 	struct atmel_sha_dev *dd = ctx->dd;
2394 
2395 	/* Prevent atmel_sha_complete() from calling req->base.complete(). */
2396 	dd->is_async = false;
2397 	dd->force_complete = false;
2398 	(void)atmel_sha_complete(dd, 0);
2399 }
2400 EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort);
2401 
2402 #endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
2403 
2404 
2405 static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
2406 {
2407 	int i;
2408 
2409 	if (dd->caps.has_hmac)
2410 		for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++)
2411 			crypto_unregister_ahash(&sha_hmac_algs[i]);
2412 
2413 	for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
2414 		crypto_unregister_ahash(&sha_1_256_algs[i]);
2415 
2416 	if (dd->caps.has_sha224)
2417 		crypto_unregister_ahash(&sha_224_alg);
2418 
2419 	if (dd->caps.has_sha_384_512) {
2420 		for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
2421 			crypto_unregister_ahash(&sha_384_512_algs[i]);
2422 	}
2423 }
2424 
2425 static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
2426 {
2427 	int err, i, j;
2428 
2429 	for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
2430 		atmel_sha_alg_init(&sha_1_256_algs[i]);
2431 
2432 		err = crypto_register_ahash(&sha_1_256_algs[i]);
2433 		if (err)
2434 			goto err_sha_1_256_algs;
2435 	}
2436 
2437 	if (dd->caps.has_sha224) {
2438 		atmel_sha_alg_init(&sha_224_alg);
2439 
2440 		err = crypto_register_ahash(&sha_224_alg);
2441 		if (err)
2442 			goto err_sha_224_algs;
2443 	}
2444 
2445 	if (dd->caps.has_sha_384_512) {
2446 		for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
2447 			atmel_sha_alg_init(&sha_384_512_algs[i]);
2448 
2449 			err = crypto_register_ahash(&sha_384_512_algs[i]);
2450 			if (err)
2451 				goto err_sha_384_512_algs;
2452 		}
2453 	}
2454 
2455 	if (dd->caps.has_hmac) {
2456 		for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
2457 			atmel_sha_hmac_alg_init(&sha_hmac_algs[i]);
2458 
2459 			err = crypto_register_ahash(&sha_hmac_algs[i]);
2460 			if (err)
2461 				goto err_sha_hmac_algs;
2462 		}
2463 	}
2464 
2465 	return 0;
2466 
2467 	/*i = ARRAY_SIZE(sha_hmac_algs);*/
2468 err_sha_hmac_algs:
2469 	for (j = 0; j < i; j++)
2470 		crypto_unregister_ahash(&sha_hmac_algs[j]);
2471 	i = ARRAY_SIZE(sha_384_512_algs);
2472 err_sha_384_512_algs:
2473 	for (j = 0; j < i; j++)
2474 		crypto_unregister_ahash(&sha_384_512_algs[j]);
2475 	crypto_unregister_ahash(&sha_224_alg);
2476 err_sha_224_algs:
2477 	i = ARRAY_SIZE(sha_1_256_algs);
2478 err_sha_1_256_algs:
2479 	for (j = 0; j < i; j++)
2480 		crypto_unregister_ahash(&sha_1_256_algs[j]);
2481 
2482 	return err;
2483 }
2484 
2485 static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
2486 				struct crypto_platform_data *pdata)
2487 {
2488 	dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
2489 	if (IS_ERR(dd->dma_lch_in.chan)) {
2490 		int ret = PTR_ERR(dd->dma_lch_in.chan);
2491 
2492 		if (ret != -EPROBE_DEFER)
2493 			dev_warn(dd->dev, "no DMA channel available\n");
2494 		return ret;
2495 	}
2496 
2497 	dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
2498 	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
2499 		SHA_REG_DIN(0);
2500 	dd->dma_lch_in.dma_conf.src_maxburst = 1;
2501 	dd->dma_lch_in.dma_conf.src_addr_width =
2502 		DMA_SLAVE_BUSWIDTH_4_BYTES;
2503 	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
2504 	dd->dma_lch_in.dma_conf.dst_addr_width =
2505 		DMA_SLAVE_BUSWIDTH_4_BYTES;
2506 	dd->dma_lch_in.dma_conf.device_fc = false;
2507 
2508 	return 0;
2509 }
2510 
2511 static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
2512 {
2513 	dma_release_channel(dd->dma_lch_in.chan);
2514 }
2515 
2516 static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
2517 {
2518 
2519 	dd->caps.has_dma = 0;
2520 	dd->caps.has_dualbuff = 0;
2521 	dd->caps.has_sha224 = 0;
2522 	dd->caps.has_sha_384_512 = 0;
2523 	dd->caps.has_uihv = 0;
2524 	dd->caps.has_hmac = 0;
2525 
2526 	/* keep only major version number */
2527 	switch (dd->hw_version & 0xff0) {
2528 	case 0x510:
2529 		dd->caps.has_dma = 1;
2530 		dd->caps.has_dualbuff = 1;
2531 		dd->caps.has_sha224 = 1;
2532 		dd->caps.has_sha_384_512 = 1;
2533 		dd->caps.has_uihv = 1;
2534 		dd->caps.has_hmac = 1;
2535 		break;
2536 	case 0x420:
2537 		dd->caps.has_dma = 1;
2538 		dd->caps.has_dualbuff = 1;
2539 		dd->caps.has_sha224 = 1;
2540 		dd->caps.has_sha_384_512 = 1;
2541 		dd->caps.has_uihv = 1;
2542 		break;
2543 	case 0x410:
2544 		dd->caps.has_dma = 1;
2545 		dd->caps.has_dualbuff = 1;
2546 		dd->caps.has_sha224 = 1;
2547 		dd->caps.has_sha_384_512 = 1;
2548 		break;
2549 	case 0x400:
2550 		dd->caps.has_dma = 1;
2551 		dd->caps.has_dualbuff = 1;
2552 		dd->caps.has_sha224 = 1;
2553 		break;
2554 	case 0x320:
2555 		break;
2556 	default:
2557 		dev_warn(dd->dev,
2558 				"Unmanaged sha version, set minimum capabilities\n");
2559 		break;
2560 	}
2561 }
2562 
2563 #if defined(CONFIG_OF)
2564 static const struct of_device_id atmel_sha_dt_ids[] = {
2565 	{ .compatible = "atmel,at91sam9g46-sha" },
2566 	{ /* sentinel */ }
2567 };
2568 
2569 MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
2570 
2571 static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
2572 {
2573 	struct device_node *np = pdev->dev.of_node;
2574 	struct crypto_platform_data *pdata;
2575 
2576 	if (!np) {
2577 		dev_err(&pdev->dev, "device node not found\n");
2578 		return ERR_PTR(-EINVAL);
2579 	}
2580 
2581 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2582 	if (!pdata)
2583 		return ERR_PTR(-ENOMEM);
2584 
2585 	return pdata;
2586 }
2587 #else /* CONFIG_OF */
2588 static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
2589 {
2590 	return ERR_PTR(-EINVAL);
2591 }
2592 #endif
2593 
2594 static int atmel_sha_probe(struct platform_device *pdev)
2595 {
2596 	struct atmel_sha_dev *sha_dd;
2597 	struct crypto_platform_data	*pdata;
2598 	struct device *dev = &pdev->dev;
2599 	struct resource *sha_res;
2600 	int err;
2601 
2602 	sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
2603 	if (!sha_dd)
2604 		return -ENOMEM;
2605 
2606 	sha_dd->dev = dev;
2607 
2608 	platform_set_drvdata(pdev, sha_dd);
2609 
2610 	INIT_LIST_HEAD(&sha_dd->list);
2611 	spin_lock_init(&sha_dd->lock);
2612 
2613 	tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
2614 					(unsigned long)sha_dd);
2615 	tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
2616 					(unsigned long)sha_dd);
2617 
2618 	crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
2619 
2620 	/* Get the base address */
2621 	sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2622 	if (!sha_res) {
2623 		dev_err(dev, "no MEM resource info\n");
2624 		err = -ENODEV;
2625 		goto err_tasklet_kill;
2626 	}
2627 	sha_dd->phys_base = sha_res->start;
2628 
2629 	/* Get the IRQ */
2630 	sha_dd->irq = platform_get_irq(pdev,  0);
2631 	if (sha_dd->irq < 0) {
2632 		err = sha_dd->irq;
2633 		goto err_tasklet_kill;
2634 	}
2635 
2636 	err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
2637 			       IRQF_SHARED, "atmel-sha", sha_dd);
2638 	if (err) {
2639 		dev_err(dev, "unable to request sha irq.\n");
2640 		goto err_tasklet_kill;
2641 	}
2642 
2643 	/* Initializing the clock */
2644 	sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
2645 	if (IS_ERR(sha_dd->iclk)) {
2646 		dev_err(dev, "clock initialization failed.\n");
2647 		err = PTR_ERR(sha_dd->iclk);
2648 		goto err_tasklet_kill;
2649 	}
2650 
2651 	sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
2652 	if (IS_ERR(sha_dd->io_base)) {
2653 		dev_err(dev, "can't ioremap\n");
2654 		err = PTR_ERR(sha_dd->io_base);
2655 		goto err_tasklet_kill;
2656 	}
2657 
2658 	err = clk_prepare(sha_dd->iclk);
2659 	if (err)
2660 		goto err_tasklet_kill;
2661 
2662 	err = atmel_sha_hw_version_init(sha_dd);
2663 	if (err)
2664 		goto err_iclk_unprepare;
2665 
2666 	atmel_sha_get_cap(sha_dd);
2667 
2668 	if (sha_dd->caps.has_dma) {
2669 		pdata = pdev->dev.platform_data;
2670 		if (!pdata) {
2671 			pdata = atmel_sha_of_init(pdev);
2672 			if (IS_ERR(pdata)) {
2673 				dev_err(&pdev->dev, "platform data not available\n");
2674 				err = PTR_ERR(pdata);
2675 				goto err_iclk_unprepare;
2676 			}
2677 		}
2678 
2679 		err = atmel_sha_dma_init(sha_dd, pdata);
2680 		if (err)
2681 			goto err_iclk_unprepare;
2682 
2683 		dev_info(dev, "using %s for DMA transfers\n",
2684 				dma_chan_name(sha_dd->dma_lch_in.chan));
2685 	}
2686 
2687 	spin_lock(&atmel_sha.lock);
2688 	list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
2689 	spin_unlock(&atmel_sha.lock);
2690 
2691 	err = atmel_sha_register_algs(sha_dd);
2692 	if (err)
2693 		goto err_algs;
2694 
2695 	dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
2696 			sha_dd->caps.has_sha224 ? "/SHA224" : "",
2697 			sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
2698 
2699 	return 0;
2700 
2701 err_algs:
2702 	spin_lock(&atmel_sha.lock);
2703 	list_del(&sha_dd->list);
2704 	spin_unlock(&atmel_sha.lock);
2705 	if (sha_dd->caps.has_dma)
2706 		atmel_sha_dma_cleanup(sha_dd);
2707 err_iclk_unprepare:
2708 	clk_unprepare(sha_dd->iclk);
2709 err_tasklet_kill:
2710 	tasklet_kill(&sha_dd->queue_task);
2711 	tasklet_kill(&sha_dd->done_task);
2712 
2713 	return err;
2714 }
2715 
2716 static int atmel_sha_remove(struct platform_device *pdev)
2717 {
2718 	struct atmel_sha_dev *sha_dd;
2719 
2720 	sha_dd = platform_get_drvdata(pdev);
2721 	if (!sha_dd)
2722 		return -ENODEV;
2723 	spin_lock(&atmel_sha.lock);
2724 	list_del(&sha_dd->list);
2725 	spin_unlock(&atmel_sha.lock);
2726 
2727 	atmel_sha_unregister_algs(sha_dd);
2728 
2729 	tasklet_kill(&sha_dd->queue_task);
2730 	tasklet_kill(&sha_dd->done_task);
2731 
2732 	if (sha_dd->caps.has_dma)
2733 		atmel_sha_dma_cleanup(sha_dd);
2734 
2735 	clk_unprepare(sha_dd->iclk);
2736 
2737 	return 0;
2738 }
2739 
2740 static struct platform_driver atmel_sha_driver = {
2741 	.probe		= atmel_sha_probe,
2742 	.remove		= atmel_sha_remove,
2743 	.driver		= {
2744 		.name	= "atmel_sha",
2745 		.of_match_table	= of_match_ptr(atmel_sha_dt_ids),
2746 	},
2747 };
2748 
2749 module_platform_driver(atmel_sha_driver);
2750 
2751 MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
2752 MODULE_LICENSE("GPL v2");
2753 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
2754