xref: /openbmc/linux/drivers/crypto/omap-sham.c (revision 4e74eeb2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for OMAP SHA1/MD5 HW acceleration.
6  *
7  * Copyright (c) 2010 Nokia Corporation
8  * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
9  * Copyright (c) 2011 Texas Instruments Incorporated
10  *
11  * Some ideas are from old omap-sha1-md5.c driver.
12  */
13 
14 #define pr_fmt(fmt) "%s: " fmt, __func__
15 
16 #include <linux/err.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/irq.h>
24 #include <linux/io.h>
25 #include <linux/platform_device.h>
26 #include <linux/scatterlist.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_address.h>
33 #include <linux/of_irq.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/sha.h>
39 #include <crypto/hash.h>
40 #include <crypto/hmac.h>
41 #include <crypto/internal/hash.h>
42 
43 #define MD5_DIGEST_SIZE			16
44 
45 #define SHA_REG_IDIGEST(dd, x)		((dd)->pdata->idigest_ofs + ((x)*0x04))
46 #define SHA_REG_DIN(dd, x)		((dd)->pdata->din_ofs + ((x) * 0x04))
47 #define SHA_REG_DIGCNT(dd)		((dd)->pdata->digcnt_ofs)
48 
49 #define SHA_REG_ODIGEST(dd, x)		((dd)->pdata->odigest_ofs + (x * 0x04))
50 
51 #define SHA_REG_CTRL			0x18
52 #define SHA_REG_CTRL_LENGTH		(0xFFFFFFFF << 5)
53 #define SHA_REG_CTRL_CLOSE_HASH		(1 << 4)
54 #define SHA_REG_CTRL_ALGO_CONST		(1 << 3)
55 #define SHA_REG_CTRL_ALGO		(1 << 2)
56 #define SHA_REG_CTRL_INPUT_READY	(1 << 1)
57 #define SHA_REG_CTRL_OUTPUT_READY	(1 << 0)
58 
59 #define SHA_REG_REV(dd)			((dd)->pdata->rev_ofs)
60 
61 #define SHA_REG_MASK(dd)		((dd)->pdata->mask_ofs)
62 #define SHA_REG_MASK_DMA_EN		(1 << 3)
63 #define SHA_REG_MASK_IT_EN		(1 << 2)
64 #define SHA_REG_MASK_SOFTRESET		(1 << 1)
65 #define SHA_REG_AUTOIDLE		(1 << 0)
66 
67 #define SHA_REG_SYSSTATUS(dd)		((dd)->pdata->sysstatus_ofs)
68 #define SHA_REG_SYSSTATUS_RESETDONE	(1 << 0)
69 
70 #define SHA_REG_MODE(dd)		((dd)->pdata->mode_ofs)
71 #define SHA_REG_MODE_HMAC_OUTER_HASH	(1 << 7)
72 #define SHA_REG_MODE_HMAC_KEY_PROC	(1 << 5)
73 #define SHA_REG_MODE_CLOSE_HASH		(1 << 4)
74 #define SHA_REG_MODE_ALGO_CONSTANT	(1 << 3)
75 
76 #define SHA_REG_MODE_ALGO_MASK		(7 << 0)
77 #define SHA_REG_MODE_ALGO_MD5_128	(0 << 1)
78 #define SHA_REG_MODE_ALGO_SHA1_160	(1 << 1)
79 #define SHA_REG_MODE_ALGO_SHA2_224	(2 << 1)
80 #define SHA_REG_MODE_ALGO_SHA2_256	(3 << 1)
81 #define SHA_REG_MODE_ALGO_SHA2_384	(1 << 0)
82 #define SHA_REG_MODE_ALGO_SHA2_512	(3 << 0)
83 
84 #define SHA_REG_LENGTH(dd)		((dd)->pdata->length_ofs)
85 
86 #define SHA_REG_IRQSTATUS		0x118
87 #define SHA_REG_IRQSTATUS_CTX_RDY	(1 << 3)
88 #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
89 #define SHA_REG_IRQSTATUS_INPUT_RDY	(1 << 1)
90 #define SHA_REG_IRQSTATUS_OUTPUT_RDY	(1 << 0)
91 
92 #define SHA_REG_IRQENA			0x11C
93 #define SHA_REG_IRQENA_CTX_RDY		(1 << 3)
94 #define SHA_REG_IRQENA_PARTHASH_RDY	(1 << 2)
95 #define SHA_REG_IRQENA_INPUT_RDY	(1 << 1)
96 #define SHA_REG_IRQENA_OUTPUT_RDY	(1 << 0)
97 
98 #define DEFAULT_TIMEOUT_INTERVAL	HZ
99 
100 #define DEFAULT_AUTOSUSPEND_DELAY	1000
101 
102 /* mostly device flags */
103 #define FLAGS_BUSY		0
104 #define FLAGS_FINAL		1
105 #define FLAGS_DMA_ACTIVE	2
106 #define FLAGS_OUTPUT_READY	3
107 #define FLAGS_INIT		4
108 #define FLAGS_CPU		5
109 #define FLAGS_DMA_READY		6
110 #define FLAGS_AUTO_XOR		7
111 #define FLAGS_BE32_SHA1		8
112 #define FLAGS_SGS_COPIED	9
113 #define FLAGS_SGS_ALLOCED	10
114 #define FLAGS_HUGE		11
115 
116 /* context flags */
117 #define FLAGS_FINUP		16
118 
119 #define FLAGS_MODE_SHIFT	18
120 #define FLAGS_MODE_MASK		(SHA_REG_MODE_ALGO_MASK	<< FLAGS_MODE_SHIFT)
121 #define FLAGS_MODE_MD5		(SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
122 #define FLAGS_MODE_SHA1		(SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
123 #define FLAGS_MODE_SHA224	(SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
124 #define FLAGS_MODE_SHA256	(SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
125 #define FLAGS_MODE_SHA384	(SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
126 #define FLAGS_MODE_SHA512	(SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
127 
128 #define FLAGS_HMAC		21
129 #define FLAGS_ERROR		22
130 
131 #define OP_UPDATE		1
132 #define OP_FINAL		2
133 
134 #define OMAP_ALIGN_MASK		(sizeof(u32)-1)
135 #define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
136 
137 #define BUFLEN			SHA512_BLOCK_SIZE
138 #define OMAP_SHA_DMA_THRESHOLD	256
139 
140 #define OMAP_SHA_MAX_DMA_LEN	(1024 * 2048)
141 
142 struct omap_sham_dev;
143 
144 struct omap_sham_reqctx {
145 	struct omap_sham_dev	*dd;
146 	unsigned long		flags;
147 	unsigned long		op;
148 
149 	u8			digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
150 	size_t			digcnt;
151 	size_t			bufcnt;
152 	size_t			buflen;
153 
154 	/* walk state */
155 	struct scatterlist	*sg;
156 	struct scatterlist	sgl[2];
157 	int			offset;	/* offset in current sg */
158 	int			sg_len;
159 	unsigned int		total;	/* total request */
160 
161 	u8			buffer[] OMAP_ALIGNED;
162 };
163 
164 struct omap_sham_hmac_ctx {
165 	struct crypto_shash	*shash;
166 	u8			ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
167 	u8			opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
168 };
169 
170 struct omap_sham_ctx {
171 	unsigned long		flags;
172 
173 	/* fallback stuff */
174 	struct crypto_shash	*fallback;
175 
176 	struct omap_sham_hmac_ctx base[];
177 };
178 
179 #define OMAP_SHAM_QUEUE_LENGTH	10
180 
181 struct omap_sham_algs_info {
182 	struct ahash_alg	*algs_list;
183 	unsigned int		size;
184 	unsigned int		registered;
185 };
186 
187 struct omap_sham_pdata {
188 	struct omap_sham_algs_info	*algs_info;
189 	unsigned int	algs_info_size;
190 	unsigned long	flags;
191 	int		digest_size;
192 
193 	void		(*copy_hash)(struct ahash_request *req, int out);
194 	void		(*write_ctrl)(struct omap_sham_dev *dd, size_t length,
195 				      int final, int dma);
196 	void		(*trigger)(struct omap_sham_dev *dd, size_t length);
197 	int		(*poll_irq)(struct omap_sham_dev *dd);
198 	irqreturn_t	(*intr_hdlr)(int irq, void *dev_id);
199 
200 	u32		odigest_ofs;
201 	u32		idigest_ofs;
202 	u32		din_ofs;
203 	u32		digcnt_ofs;
204 	u32		rev_ofs;
205 	u32		mask_ofs;
206 	u32		sysstatus_ofs;
207 	u32		mode_ofs;
208 	u32		length_ofs;
209 
210 	u32		major_mask;
211 	u32		major_shift;
212 	u32		minor_mask;
213 	u32		minor_shift;
214 };
215 
216 struct omap_sham_dev {
217 	struct list_head	list;
218 	unsigned long		phys_base;
219 	struct device		*dev;
220 	void __iomem		*io_base;
221 	int			irq;
222 	spinlock_t		lock;
223 	int			err;
224 	struct dma_chan		*dma_lch;
225 	struct tasklet_struct	done_task;
226 	u8			polling_mode;
227 	u8			xmit_buf[BUFLEN] OMAP_ALIGNED;
228 
229 	unsigned long		flags;
230 	int			fallback_sz;
231 	struct crypto_queue	queue;
232 	struct ahash_request	*req;
233 
234 	const struct omap_sham_pdata	*pdata;
235 };
236 
237 struct omap_sham_drv {
238 	struct list_head	dev_list;
239 	spinlock_t		lock;
240 	unsigned long		flags;
241 };
242 
243 static struct omap_sham_drv sham = {
244 	.dev_list = LIST_HEAD_INIT(sham.dev_list),
245 	.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
246 };
247 
248 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
249 {
250 	return __raw_readl(dd->io_base + offset);
251 }
252 
253 static inline void omap_sham_write(struct omap_sham_dev *dd,
254 					u32 offset, u32 value)
255 {
256 	__raw_writel(value, dd->io_base + offset);
257 }
258 
259 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
260 					u32 value, u32 mask)
261 {
262 	u32 val;
263 
264 	val = omap_sham_read(dd, address);
265 	val &= ~mask;
266 	val |= value;
267 	omap_sham_write(dd, address, val);
268 }
269 
270 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
271 {
272 	unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
273 
274 	while (!(omap_sham_read(dd, offset) & bit)) {
275 		if (time_is_before_jiffies(timeout))
276 			return -ETIMEDOUT;
277 	}
278 
279 	return 0;
280 }
281 
282 static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
283 {
284 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
285 	struct omap_sham_dev *dd = ctx->dd;
286 	u32 *hash = (u32 *)ctx->digest;
287 	int i;
288 
289 	for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
290 		if (out)
291 			hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
292 		else
293 			omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
294 	}
295 }
296 
297 static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
298 {
299 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
300 	struct omap_sham_dev *dd = ctx->dd;
301 	int i;
302 
303 	if (ctx->flags & BIT(FLAGS_HMAC)) {
304 		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
305 		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
306 		struct omap_sham_hmac_ctx *bctx = tctx->base;
307 		u32 *opad = (u32 *)bctx->opad;
308 
309 		for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
310 			if (out)
311 				opad[i] = omap_sham_read(dd,
312 						SHA_REG_ODIGEST(dd, i));
313 			else
314 				omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
315 						opad[i]);
316 		}
317 	}
318 
319 	omap_sham_copy_hash_omap2(req, out);
320 }
321 
322 static void omap_sham_copy_ready_hash(struct ahash_request *req)
323 {
324 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
325 	u32 *in = (u32 *)ctx->digest;
326 	u32 *hash = (u32 *)req->result;
327 	int i, d, big_endian = 0;
328 
329 	if (!hash)
330 		return;
331 
332 	switch (ctx->flags & FLAGS_MODE_MASK) {
333 	case FLAGS_MODE_MD5:
334 		d = MD5_DIGEST_SIZE / sizeof(u32);
335 		break;
336 	case FLAGS_MODE_SHA1:
337 		/* OMAP2 SHA1 is big endian */
338 		if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
339 			big_endian = 1;
340 		d = SHA1_DIGEST_SIZE / sizeof(u32);
341 		break;
342 	case FLAGS_MODE_SHA224:
343 		d = SHA224_DIGEST_SIZE / sizeof(u32);
344 		break;
345 	case FLAGS_MODE_SHA256:
346 		d = SHA256_DIGEST_SIZE / sizeof(u32);
347 		break;
348 	case FLAGS_MODE_SHA384:
349 		d = SHA384_DIGEST_SIZE / sizeof(u32);
350 		break;
351 	case FLAGS_MODE_SHA512:
352 		d = SHA512_DIGEST_SIZE / sizeof(u32);
353 		break;
354 	default:
355 		d = 0;
356 	}
357 
358 	if (big_endian)
359 		for (i = 0; i < d; i++)
360 			hash[i] = be32_to_cpu(in[i]);
361 	else
362 		for (i = 0; i < d; i++)
363 			hash[i] = le32_to_cpu(in[i]);
364 }
365 
366 static int omap_sham_hw_init(struct omap_sham_dev *dd)
367 {
368 	int err;
369 
370 	err = pm_runtime_get_sync(dd->dev);
371 	if (err < 0) {
372 		dev_err(dd->dev, "failed to get sync: %d\n", err);
373 		return err;
374 	}
375 
376 	if (!test_bit(FLAGS_INIT, &dd->flags)) {
377 		set_bit(FLAGS_INIT, &dd->flags);
378 		dd->err = 0;
379 	}
380 
381 	return 0;
382 }
383 
384 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
385 				 int final, int dma)
386 {
387 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
388 	u32 val = length << 5, mask;
389 
390 	if (likely(ctx->digcnt))
391 		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
392 
393 	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
394 		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
395 		SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
396 	/*
397 	 * Setting ALGO_CONST only for the first iteration
398 	 * and CLOSE_HASH only for the last one.
399 	 */
400 	if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
401 		val |= SHA_REG_CTRL_ALGO;
402 	if (!ctx->digcnt)
403 		val |= SHA_REG_CTRL_ALGO_CONST;
404 	if (final)
405 		val |= SHA_REG_CTRL_CLOSE_HASH;
406 
407 	mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
408 			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
409 
410 	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
411 }
412 
413 static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
414 {
415 }
416 
417 static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
418 {
419 	return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
420 }
421 
422 static int get_block_size(struct omap_sham_reqctx *ctx)
423 {
424 	int d;
425 
426 	switch (ctx->flags & FLAGS_MODE_MASK) {
427 	case FLAGS_MODE_MD5:
428 	case FLAGS_MODE_SHA1:
429 		d = SHA1_BLOCK_SIZE;
430 		break;
431 	case FLAGS_MODE_SHA224:
432 	case FLAGS_MODE_SHA256:
433 		d = SHA256_BLOCK_SIZE;
434 		break;
435 	case FLAGS_MODE_SHA384:
436 	case FLAGS_MODE_SHA512:
437 		d = SHA512_BLOCK_SIZE;
438 		break;
439 	default:
440 		d = 0;
441 	}
442 
443 	return d;
444 }
445 
446 static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
447 				    u32 *value, int count)
448 {
449 	for (; count--; value++, offset += 4)
450 		omap_sham_write(dd, offset, *value);
451 }
452 
453 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
454 				 int final, int dma)
455 {
456 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
457 	u32 val, mask;
458 
459 	/*
460 	 * Setting ALGO_CONST only for the first iteration and
461 	 * CLOSE_HASH only for the last one. Note that flags mode bits
462 	 * correspond to algorithm encoding in mode register.
463 	 */
464 	val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
465 	if (!ctx->digcnt) {
466 		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
467 		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
468 		struct omap_sham_hmac_ctx *bctx = tctx->base;
469 		int bs, nr_dr;
470 
471 		val |= SHA_REG_MODE_ALGO_CONSTANT;
472 
473 		if (ctx->flags & BIT(FLAGS_HMAC)) {
474 			bs = get_block_size(ctx);
475 			nr_dr = bs / (2 * sizeof(u32));
476 			val |= SHA_REG_MODE_HMAC_KEY_PROC;
477 			omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
478 					  (u32 *)bctx->ipad, nr_dr);
479 			omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
480 					  (u32 *)bctx->ipad + nr_dr, nr_dr);
481 			ctx->digcnt += bs;
482 		}
483 	}
484 
485 	if (final) {
486 		val |= SHA_REG_MODE_CLOSE_HASH;
487 
488 		if (ctx->flags & BIT(FLAGS_HMAC))
489 			val |= SHA_REG_MODE_HMAC_OUTER_HASH;
490 	}
491 
492 	mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
493 	       SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
494 	       SHA_REG_MODE_HMAC_KEY_PROC;
495 
496 	dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
497 	omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
498 	omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
499 	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
500 			     SHA_REG_MASK_IT_EN |
501 				     (dma ? SHA_REG_MASK_DMA_EN : 0),
502 			     SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
503 }
504 
505 static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
506 {
507 	omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
508 }
509 
510 static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
511 {
512 	return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
513 			      SHA_REG_IRQSTATUS_INPUT_RDY);
514 }
515 
516 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
517 			      int final)
518 {
519 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
520 	int count, len32, bs32, offset = 0;
521 	const u32 *buffer;
522 	int mlen;
523 	struct sg_mapping_iter mi;
524 
525 	dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
526 						ctx->digcnt, length, final);
527 
528 	dd->pdata->write_ctrl(dd, length, final, 0);
529 	dd->pdata->trigger(dd, length);
530 
531 	/* should be non-zero before next lines to disable clocks later */
532 	ctx->digcnt += length;
533 	ctx->total -= length;
534 
535 	if (final)
536 		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
537 
538 	set_bit(FLAGS_CPU, &dd->flags);
539 
540 	len32 = DIV_ROUND_UP(length, sizeof(u32));
541 	bs32 = get_block_size(ctx) / sizeof(u32);
542 
543 	sg_miter_start(&mi, ctx->sg, ctx->sg_len,
544 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
545 
546 	mlen = 0;
547 
548 	while (len32) {
549 		if (dd->pdata->poll_irq(dd))
550 			return -ETIMEDOUT;
551 
552 		for (count = 0; count < min(len32, bs32); count++, offset++) {
553 			if (!mlen) {
554 				sg_miter_next(&mi);
555 				mlen = mi.length;
556 				if (!mlen) {
557 					pr_err("sg miter failure.\n");
558 					return -EINVAL;
559 				}
560 				offset = 0;
561 				buffer = mi.addr;
562 			}
563 			omap_sham_write(dd, SHA_REG_DIN(dd, count),
564 					buffer[offset]);
565 			mlen -= 4;
566 		}
567 		len32 -= min(len32, bs32);
568 	}
569 
570 	sg_miter_stop(&mi);
571 
572 	return -EINPROGRESS;
573 }
574 
575 static void omap_sham_dma_callback(void *param)
576 {
577 	struct omap_sham_dev *dd = param;
578 
579 	set_bit(FLAGS_DMA_READY, &dd->flags);
580 	tasklet_schedule(&dd->done_task);
581 }
582 
583 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
584 			      int final)
585 {
586 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
587 	struct dma_async_tx_descriptor *tx;
588 	struct dma_slave_config cfg;
589 	int ret;
590 
591 	dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
592 						ctx->digcnt, length, final);
593 
594 	if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
595 		dev_err(dd->dev, "dma_map_sg error\n");
596 		return -EINVAL;
597 	}
598 
599 	memset(&cfg, 0, sizeof(cfg));
600 
601 	cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
602 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
603 	cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
604 
605 	ret = dmaengine_slave_config(dd->dma_lch, &cfg);
606 	if (ret) {
607 		pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
608 		return ret;
609 	}
610 
611 	tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
612 				     DMA_MEM_TO_DEV,
613 				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
614 
615 	if (!tx) {
616 		dev_err(dd->dev, "prep_slave_sg failed\n");
617 		return -EINVAL;
618 	}
619 
620 	tx->callback = omap_sham_dma_callback;
621 	tx->callback_param = dd;
622 
623 	dd->pdata->write_ctrl(dd, length, final, 1);
624 
625 	ctx->digcnt += length;
626 	ctx->total -= length;
627 
628 	if (final)
629 		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
630 
631 	set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
632 
633 	dmaengine_submit(tx);
634 	dma_async_issue_pending(dd->dma_lch);
635 
636 	dd->pdata->trigger(dd, length);
637 
638 	return -EINPROGRESS;
639 }
640 
641 static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
642 				   struct scatterlist *sg, int bs, int new_len)
643 {
644 	int n = sg_nents(sg);
645 	struct scatterlist *tmp;
646 	int offset = ctx->offset;
647 
648 	ctx->total = new_len;
649 
650 	if (ctx->bufcnt)
651 		n++;
652 
653 	ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
654 	if (!ctx->sg)
655 		return -ENOMEM;
656 
657 	sg_init_table(ctx->sg, n);
658 
659 	tmp = ctx->sg;
660 
661 	ctx->sg_len = 0;
662 
663 	if (ctx->bufcnt) {
664 		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
665 		tmp = sg_next(tmp);
666 		ctx->sg_len++;
667 		new_len -= ctx->bufcnt;
668 	}
669 
670 	while (sg && new_len) {
671 		int len = sg->length - offset;
672 
673 		if (len <= 0) {
674 			offset -= sg->length;
675 			sg = sg_next(sg);
676 			continue;
677 		}
678 
679 		if (new_len < len)
680 			len = new_len;
681 
682 		if (len > 0) {
683 			new_len -= len;
684 			sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
685 			offset = 0;
686 			ctx->offset = 0;
687 			ctx->sg_len++;
688 			if (new_len <= 0)
689 				break;
690 			tmp = sg_next(tmp);
691 		}
692 
693 		sg = sg_next(sg);
694 	}
695 
696 	if (tmp)
697 		sg_mark_end(tmp);
698 
699 	set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
700 
701 	ctx->offset += new_len - ctx->bufcnt;
702 	ctx->bufcnt = 0;
703 
704 	return 0;
705 }
706 
707 static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
708 			      struct scatterlist *sg, int bs,
709 			      unsigned int new_len)
710 {
711 	int pages;
712 	void *buf;
713 
714 	pages = get_order(new_len);
715 
716 	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
717 	if (!buf) {
718 		pr_err("Couldn't allocate pages for unaligned cases.\n");
719 		return -ENOMEM;
720 	}
721 
722 	if (ctx->bufcnt)
723 		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
724 
725 	scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
726 				 min(new_len, ctx->total) - ctx->bufcnt, 0);
727 	sg_init_table(ctx->sgl, 1);
728 	sg_set_buf(ctx->sgl, buf, new_len);
729 	ctx->sg = ctx->sgl;
730 	set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
731 	ctx->sg_len = 1;
732 	ctx->offset += new_len - ctx->bufcnt;
733 	ctx->bufcnt = 0;
734 	ctx->total = new_len;
735 
736 	return 0;
737 }
738 
739 static int omap_sham_align_sgs(struct scatterlist *sg,
740 			       int nbytes, int bs, bool final,
741 			       struct omap_sham_reqctx *rctx)
742 {
743 	int n = 0;
744 	bool aligned = true;
745 	bool list_ok = true;
746 	struct scatterlist *sg_tmp = sg;
747 	int new_len;
748 	int offset = rctx->offset;
749 	int bufcnt = rctx->bufcnt;
750 
751 	if (!sg || !sg->length || !nbytes) {
752 		if (bufcnt) {
753 			bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
754 			sg_init_table(rctx->sgl, 1);
755 			sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
756 			rctx->sg = rctx->sgl;
757 			rctx->sg_len = 1;
758 		}
759 
760 		return 0;
761 	}
762 
763 	new_len = nbytes;
764 
765 	if (offset)
766 		list_ok = false;
767 
768 	if (final)
769 		new_len = DIV_ROUND_UP(new_len, bs) * bs;
770 	else
771 		new_len = (new_len - 1) / bs * bs;
772 
773 	if (!new_len)
774 		return 0;
775 
776 	if (nbytes != new_len)
777 		list_ok = false;
778 
779 	while (nbytes > 0 && sg_tmp) {
780 		n++;
781 
782 		if (bufcnt) {
783 			if (!IS_ALIGNED(bufcnt, bs)) {
784 				aligned = false;
785 				break;
786 			}
787 			nbytes -= bufcnt;
788 			bufcnt = 0;
789 			if (!nbytes)
790 				list_ok = false;
791 
792 			continue;
793 		}
794 
795 #ifdef CONFIG_ZONE_DMA
796 		if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
797 			aligned = false;
798 			break;
799 		}
800 #endif
801 
802 		if (offset < sg_tmp->length) {
803 			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
804 				aligned = false;
805 				break;
806 			}
807 
808 			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
809 				aligned = false;
810 				break;
811 			}
812 		}
813 
814 		if (offset) {
815 			offset -= sg_tmp->length;
816 			if (offset < 0) {
817 				nbytes += offset;
818 				offset = 0;
819 			}
820 		} else {
821 			nbytes -= sg_tmp->length;
822 		}
823 
824 		sg_tmp = sg_next(sg_tmp);
825 
826 		if (nbytes < 0) {
827 			list_ok = false;
828 			break;
829 		}
830 	}
831 
832 	if (new_len > OMAP_SHA_MAX_DMA_LEN) {
833 		new_len = OMAP_SHA_MAX_DMA_LEN;
834 		aligned = false;
835 	}
836 
837 	if (!aligned)
838 		return omap_sham_copy_sgs(rctx, sg, bs, new_len);
839 	else if (!list_ok)
840 		return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
841 
842 	rctx->total = new_len;
843 	rctx->offset += new_len;
844 	rctx->sg_len = n;
845 	if (rctx->bufcnt) {
846 		sg_init_table(rctx->sgl, 2);
847 		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
848 		sg_chain(rctx->sgl, 2, sg);
849 		rctx->sg = rctx->sgl;
850 	} else {
851 		rctx->sg = sg;
852 	}
853 
854 	return 0;
855 }
856 
857 static int omap_sham_prepare_request(struct ahash_request *req, bool update)
858 {
859 	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
860 	int bs;
861 	int ret;
862 	unsigned int nbytes;
863 	bool final = rctx->flags & BIT(FLAGS_FINUP);
864 	int hash_later;
865 
866 	bs = get_block_size(rctx);
867 
868 	nbytes = rctx->bufcnt;
869 
870 	if (update)
871 		nbytes += req->nbytes - rctx->offset;
872 
873 	dev_dbg(rctx->dd->dev,
874 		"%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
875 		__func__, nbytes, bs, rctx->total, rctx->offset,
876 		rctx->bufcnt);
877 
878 	if (!nbytes)
879 		return 0;
880 
881 	rctx->total = nbytes;
882 
883 	if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
884 		int len = bs - rctx->bufcnt % bs;
885 
886 		if (len > req->nbytes)
887 			len = req->nbytes;
888 		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
889 					 0, len, 0);
890 		rctx->bufcnt += len;
891 		rctx->offset = len;
892 	}
893 
894 	if (rctx->bufcnt)
895 		memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
896 
897 	ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
898 	if (ret)
899 		return ret;
900 
901 	hash_later = nbytes - rctx->total;
902 	if (hash_later < 0)
903 		hash_later = 0;
904 
905 	if (hash_later && hash_later <= rctx->buflen) {
906 		scatterwalk_map_and_copy(rctx->buffer,
907 					 req->src,
908 					 req->nbytes - hash_later,
909 					 hash_later, 0);
910 
911 		rctx->bufcnt = hash_later;
912 	} else {
913 		rctx->bufcnt = 0;
914 	}
915 
916 	if (hash_later > rctx->buflen)
917 		set_bit(FLAGS_HUGE, &rctx->dd->flags);
918 
919 	rctx->total = min(nbytes, rctx->total);
920 
921 	return 0;
922 }
923 
924 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
925 {
926 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
927 
928 	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
929 
930 	clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
931 
932 	return 0;
933 }
934 
935 struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
936 {
937 	struct omap_sham_dev *dd;
938 
939 	if (ctx->dd)
940 		return ctx->dd;
941 
942 	spin_lock_bh(&sham.lock);
943 	dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
944 	list_move_tail(&dd->list, &sham.dev_list);
945 	ctx->dd = dd;
946 	spin_unlock_bh(&sham.lock);
947 
948 	return dd;
949 }
950 
951 static int omap_sham_init(struct ahash_request *req)
952 {
953 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
954 	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
955 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
956 	struct omap_sham_dev *dd;
957 	int bs = 0;
958 
959 	ctx->dd = NULL;
960 
961 	dd = omap_sham_find_dev(ctx);
962 	if (!dd)
963 		return -ENODEV;
964 
965 	ctx->flags = 0;
966 
967 	dev_dbg(dd->dev, "init: digest size: %d\n",
968 		crypto_ahash_digestsize(tfm));
969 
970 	switch (crypto_ahash_digestsize(tfm)) {
971 	case MD5_DIGEST_SIZE:
972 		ctx->flags |= FLAGS_MODE_MD5;
973 		bs = SHA1_BLOCK_SIZE;
974 		break;
975 	case SHA1_DIGEST_SIZE:
976 		ctx->flags |= FLAGS_MODE_SHA1;
977 		bs = SHA1_BLOCK_SIZE;
978 		break;
979 	case SHA224_DIGEST_SIZE:
980 		ctx->flags |= FLAGS_MODE_SHA224;
981 		bs = SHA224_BLOCK_SIZE;
982 		break;
983 	case SHA256_DIGEST_SIZE:
984 		ctx->flags |= FLAGS_MODE_SHA256;
985 		bs = SHA256_BLOCK_SIZE;
986 		break;
987 	case SHA384_DIGEST_SIZE:
988 		ctx->flags |= FLAGS_MODE_SHA384;
989 		bs = SHA384_BLOCK_SIZE;
990 		break;
991 	case SHA512_DIGEST_SIZE:
992 		ctx->flags |= FLAGS_MODE_SHA512;
993 		bs = SHA512_BLOCK_SIZE;
994 		break;
995 	}
996 
997 	ctx->bufcnt = 0;
998 	ctx->digcnt = 0;
999 	ctx->total = 0;
1000 	ctx->offset = 0;
1001 	ctx->buflen = BUFLEN;
1002 
1003 	if (tctx->flags & BIT(FLAGS_HMAC)) {
1004 		if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1005 			struct omap_sham_hmac_ctx *bctx = tctx->base;
1006 
1007 			memcpy(ctx->buffer, bctx->ipad, bs);
1008 			ctx->bufcnt = bs;
1009 		}
1010 
1011 		ctx->flags |= BIT(FLAGS_HMAC);
1012 	}
1013 
1014 	return 0;
1015 
1016 }
1017 
1018 static int omap_sham_update_req(struct omap_sham_dev *dd)
1019 {
1020 	struct ahash_request *req = dd->req;
1021 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1022 	int err;
1023 	bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1024 			!(dd->flags & BIT(FLAGS_HUGE));
1025 
1026 	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d",
1027 		ctx->total, ctx->digcnt, final);
1028 
1029 	if (ctx->total < get_block_size(ctx) ||
1030 	    ctx->total < dd->fallback_sz)
1031 		ctx->flags |= BIT(FLAGS_CPU);
1032 
1033 	if (ctx->flags & BIT(FLAGS_CPU))
1034 		err = omap_sham_xmit_cpu(dd, ctx->total, final);
1035 	else
1036 		err = omap_sham_xmit_dma(dd, ctx->total, final);
1037 
1038 	/* wait for dma completion before can take more data */
1039 	dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
1040 
1041 	return err;
1042 }
1043 
1044 static int omap_sham_final_req(struct omap_sham_dev *dd)
1045 {
1046 	struct ahash_request *req = dd->req;
1047 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1048 	int err = 0, use_dma = 1;
1049 
1050 	if (dd->flags & BIT(FLAGS_HUGE))
1051 		return 0;
1052 
1053 	if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
1054 		/*
1055 		 * faster to handle last block with cpu or
1056 		 * use cpu when dma is not present.
1057 		 */
1058 		use_dma = 0;
1059 
1060 	if (use_dma)
1061 		err = omap_sham_xmit_dma(dd, ctx->total, 1);
1062 	else
1063 		err = omap_sham_xmit_cpu(dd, ctx->total, 1);
1064 
1065 	ctx->bufcnt = 0;
1066 
1067 	dev_dbg(dd->dev, "final_req: err: %d\n", err);
1068 
1069 	return err;
1070 }
1071 
1072 static int omap_sham_finish_hmac(struct ahash_request *req)
1073 {
1074 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1075 	struct omap_sham_hmac_ctx *bctx = tctx->base;
1076 	int bs = crypto_shash_blocksize(bctx->shash);
1077 	int ds = crypto_shash_digestsize(bctx->shash);
1078 	SHASH_DESC_ON_STACK(shash, bctx->shash);
1079 
1080 	shash->tfm = bctx->shash;
1081 
1082 	return crypto_shash_init(shash) ?:
1083 	       crypto_shash_update(shash, bctx->opad, bs) ?:
1084 	       crypto_shash_finup(shash, req->result, ds, req->result);
1085 }
1086 
1087 static int omap_sham_finish(struct ahash_request *req)
1088 {
1089 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1090 	struct omap_sham_dev *dd = ctx->dd;
1091 	int err = 0;
1092 
1093 	if (ctx->digcnt) {
1094 		omap_sham_copy_ready_hash(req);
1095 		if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1096 				!test_bit(FLAGS_AUTO_XOR, &dd->flags))
1097 			err = omap_sham_finish_hmac(req);
1098 	}
1099 
1100 	dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
1101 
1102 	return err;
1103 }
1104 
1105 static void omap_sham_finish_req(struct ahash_request *req, int err)
1106 {
1107 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1108 	struct omap_sham_dev *dd = ctx->dd;
1109 
1110 	if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1111 		free_pages((unsigned long)sg_virt(ctx->sg),
1112 			   get_order(ctx->sg->length));
1113 
1114 	if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1115 		kfree(ctx->sg);
1116 
1117 	ctx->sg = NULL;
1118 
1119 	dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
1120 
1121 	if (dd->flags & BIT(FLAGS_HUGE)) {
1122 		dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
1123 				BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE));
1124 		omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
1125 		if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
1126 			err = omap_sham_update_req(dd);
1127 			if (err != -EINPROGRESS &&
1128 			    (ctx->flags & BIT(FLAGS_FINUP)))
1129 				err = omap_sham_final_req(dd);
1130 		} else if (ctx->op == OP_FINAL) {
1131 			omap_sham_final_req(dd);
1132 		}
1133 		return;
1134 	}
1135 
1136 	if (!err) {
1137 		dd->pdata->copy_hash(req, 1);
1138 		if (test_bit(FLAGS_FINAL, &dd->flags))
1139 			err = omap_sham_finish(req);
1140 	} else {
1141 		ctx->flags |= BIT(FLAGS_ERROR);
1142 	}
1143 
1144 	/* atomic operation is not needed here */
1145 	dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1146 			BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1147 
1148 	pm_runtime_mark_last_busy(dd->dev);
1149 	pm_runtime_put_autosuspend(dd->dev);
1150 
1151 	ctx->offset = 0;
1152 
1153 	if (req->base.complete)
1154 		req->base.complete(&req->base, err);
1155 }
1156 
1157 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1158 				  struct ahash_request *req)
1159 {
1160 	struct crypto_async_request *async_req, *backlog;
1161 	struct omap_sham_reqctx *ctx;
1162 	unsigned long flags;
1163 	int err = 0, ret = 0;
1164 
1165 retry:
1166 	spin_lock_irqsave(&dd->lock, flags);
1167 	if (req)
1168 		ret = ahash_enqueue_request(&dd->queue, req);
1169 	if (test_bit(FLAGS_BUSY, &dd->flags)) {
1170 		spin_unlock_irqrestore(&dd->lock, flags);
1171 		return ret;
1172 	}
1173 	backlog = crypto_get_backlog(&dd->queue);
1174 	async_req = crypto_dequeue_request(&dd->queue);
1175 	if (async_req)
1176 		set_bit(FLAGS_BUSY, &dd->flags);
1177 	spin_unlock_irqrestore(&dd->lock, flags);
1178 
1179 	if (!async_req)
1180 		return ret;
1181 
1182 	if (backlog)
1183 		backlog->complete(backlog, -EINPROGRESS);
1184 
1185 	req = ahash_request_cast(async_req);
1186 	dd->req = req;
1187 	ctx = ahash_request_ctx(req);
1188 
1189 	err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
1190 	if (err || !ctx->total)
1191 		goto err1;
1192 
1193 	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1194 						ctx->op, req->nbytes);
1195 
1196 	err = omap_sham_hw_init(dd);
1197 	if (err)
1198 		goto err1;
1199 
1200 	if (ctx->digcnt)
1201 		/* request has changed - restore hash */
1202 		dd->pdata->copy_hash(req, 0);
1203 
1204 	if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
1205 		err = omap_sham_update_req(dd);
1206 		if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
1207 			/* no final() after finup() */
1208 			err = omap_sham_final_req(dd);
1209 	} else if (ctx->op == OP_FINAL) {
1210 		err = omap_sham_final_req(dd);
1211 	}
1212 err1:
1213 	dev_dbg(dd->dev, "exit, err: %d\n", err);
1214 
1215 	if (err != -EINPROGRESS) {
1216 		/* done_task will not finish it, so do it here */
1217 		omap_sham_finish_req(req, err);
1218 		req = NULL;
1219 
1220 		/*
1221 		 * Execute next request immediately if there is anything
1222 		 * in queue.
1223 		 */
1224 		goto retry;
1225 	}
1226 
1227 	return ret;
1228 }
1229 
1230 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1231 {
1232 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1233 	struct omap_sham_dev *dd = ctx->dd;
1234 
1235 	ctx->op = op;
1236 
1237 	return omap_sham_handle_queue(dd, req);
1238 }
1239 
1240 static int omap_sham_update(struct ahash_request *req)
1241 {
1242 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1243 	struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
1244 
1245 	if (!req->nbytes)
1246 		return 0;
1247 
1248 	if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
1249 		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1250 					 0, req->nbytes, 0);
1251 		ctx->bufcnt += req->nbytes;
1252 		return 0;
1253 	}
1254 
1255 	if (dd->polling_mode)
1256 		ctx->flags |= BIT(FLAGS_CPU);
1257 
1258 	return omap_sham_enqueue(req, OP_UPDATE);
1259 }
1260 
1261 static int omap_sham_final_shash(struct ahash_request *req)
1262 {
1263 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1264 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1265 	int offset = 0;
1266 
1267 	/*
1268 	 * If we are running HMAC on limited hardware support, skip
1269 	 * the ipad in the beginning of the buffer if we are going for
1270 	 * software fallback algorithm.
1271 	 */
1272 	if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1273 	    !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1274 		offset = get_block_size(ctx);
1275 
1276 	return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
1277 				       ctx->bufcnt - offset, req->result);
1278 }
1279 
1280 static int omap_sham_final(struct ahash_request *req)
1281 {
1282 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1283 
1284 	ctx->flags |= BIT(FLAGS_FINUP);
1285 
1286 	if (ctx->flags & BIT(FLAGS_ERROR))
1287 		return 0; /* uncompleted hash is not needed */
1288 
1289 	/*
1290 	 * OMAP HW accel works only with buffers >= 9.
1291 	 * HMAC is always >= 9 because ipad == block size.
1292 	 * If buffersize is less than fallback_sz, we use fallback
1293 	 * SW encoding, as using DMA + HW in this case doesn't provide
1294 	 * any benefit.
1295 	 */
1296 	if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
1297 		return omap_sham_final_shash(req);
1298 	else if (ctx->bufcnt)
1299 		return omap_sham_enqueue(req, OP_FINAL);
1300 
1301 	/* copy ready hash (+ finalize hmac) */
1302 	return omap_sham_finish(req);
1303 }
1304 
1305 static int omap_sham_finup(struct ahash_request *req)
1306 {
1307 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1308 	int err1, err2;
1309 
1310 	ctx->flags |= BIT(FLAGS_FINUP);
1311 
1312 	err1 = omap_sham_update(req);
1313 	if (err1 == -EINPROGRESS || err1 == -EBUSY)
1314 		return err1;
1315 	/*
1316 	 * final() has to be always called to cleanup resources
1317 	 * even if udpate() failed, except EINPROGRESS
1318 	 */
1319 	err2 = omap_sham_final(req);
1320 
1321 	return err1 ?: err2;
1322 }
1323 
1324 static int omap_sham_digest(struct ahash_request *req)
1325 {
1326 	return omap_sham_init(req) ?: omap_sham_finup(req);
1327 }
1328 
1329 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1330 		      unsigned int keylen)
1331 {
1332 	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1333 	struct omap_sham_hmac_ctx *bctx = tctx->base;
1334 	int bs = crypto_shash_blocksize(bctx->shash);
1335 	int ds = crypto_shash_digestsize(bctx->shash);
1336 	int err, i;
1337 
1338 	err = crypto_shash_setkey(tctx->fallback, key, keylen);
1339 	if (err)
1340 		return err;
1341 
1342 	if (keylen > bs) {
1343 		err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
1344 					      bctx->ipad);
1345 		if (err)
1346 			return err;
1347 		keylen = ds;
1348 	} else {
1349 		memcpy(bctx->ipad, key, keylen);
1350 	}
1351 
1352 	memset(bctx->ipad + keylen, 0, bs - keylen);
1353 
1354 	if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
1355 		memcpy(bctx->opad, bctx->ipad, bs);
1356 
1357 		for (i = 0; i < bs; i++) {
1358 			bctx->ipad[i] ^= HMAC_IPAD_VALUE;
1359 			bctx->opad[i] ^= HMAC_OPAD_VALUE;
1360 		}
1361 	}
1362 
1363 	return err;
1364 }
1365 
1366 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1367 {
1368 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1369 	const char *alg_name = crypto_tfm_alg_name(tfm);
1370 
1371 	/* Allocate a fallback and abort if it failed. */
1372 	tctx->fallback = crypto_alloc_shash(alg_name, 0,
1373 					    CRYPTO_ALG_NEED_FALLBACK);
1374 	if (IS_ERR(tctx->fallback)) {
1375 		pr_err("omap-sham: fallback driver '%s' "
1376 				"could not be loaded.\n", alg_name);
1377 		return PTR_ERR(tctx->fallback);
1378 	}
1379 
1380 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1381 				 sizeof(struct omap_sham_reqctx) + BUFLEN);
1382 
1383 	if (alg_base) {
1384 		struct omap_sham_hmac_ctx *bctx = tctx->base;
1385 		tctx->flags |= BIT(FLAGS_HMAC);
1386 		bctx->shash = crypto_alloc_shash(alg_base, 0,
1387 						CRYPTO_ALG_NEED_FALLBACK);
1388 		if (IS_ERR(bctx->shash)) {
1389 			pr_err("omap-sham: base driver '%s' "
1390 					"could not be loaded.\n", alg_base);
1391 			crypto_free_shash(tctx->fallback);
1392 			return PTR_ERR(bctx->shash);
1393 		}
1394 
1395 	}
1396 
1397 	return 0;
1398 }
1399 
1400 static int omap_sham_cra_init(struct crypto_tfm *tfm)
1401 {
1402 	return omap_sham_cra_init_alg(tfm, NULL);
1403 }
1404 
1405 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1406 {
1407 	return omap_sham_cra_init_alg(tfm, "sha1");
1408 }
1409 
1410 static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1411 {
1412 	return omap_sham_cra_init_alg(tfm, "sha224");
1413 }
1414 
1415 static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1416 {
1417 	return omap_sham_cra_init_alg(tfm, "sha256");
1418 }
1419 
1420 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1421 {
1422 	return omap_sham_cra_init_alg(tfm, "md5");
1423 }
1424 
1425 static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1426 {
1427 	return omap_sham_cra_init_alg(tfm, "sha384");
1428 }
1429 
1430 static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1431 {
1432 	return omap_sham_cra_init_alg(tfm, "sha512");
1433 }
1434 
1435 static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1436 {
1437 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1438 
1439 	crypto_free_shash(tctx->fallback);
1440 	tctx->fallback = NULL;
1441 
1442 	if (tctx->flags & BIT(FLAGS_HMAC)) {
1443 		struct omap_sham_hmac_ctx *bctx = tctx->base;
1444 		crypto_free_shash(bctx->shash);
1445 	}
1446 }
1447 
1448 static int omap_sham_export(struct ahash_request *req, void *out)
1449 {
1450 	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1451 
1452 	memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1453 
1454 	return 0;
1455 }
1456 
1457 static int omap_sham_import(struct ahash_request *req, const void *in)
1458 {
1459 	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1460 	const struct omap_sham_reqctx *ctx_in = in;
1461 
1462 	memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1463 
1464 	return 0;
1465 }
1466 
1467 static struct ahash_alg algs_sha1_md5[] = {
1468 {
1469 	.init		= omap_sham_init,
1470 	.update		= omap_sham_update,
1471 	.final		= omap_sham_final,
1472 	.finup		= omap_sham_finup,
1473 	.digest		= omap_sham_digest,
1474 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1475 	.halg.base	= {
1476 		.cra_name		= "sha1",
1477 		.cra_driver_name	= "omap-sha1",
1478 		.cra_priority		= 400,
1479 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1480 						CRYPTO_ALG_ASYNC |
1481 						CRYPTO_ALG_NEED_FALLBACK,
1482 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1483 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1484 		.cra_alignmask		= OMAP_ALIGN_MASK,
1485 		.cra_module		= THIS_MODULE,
1486 		.cra_init		= omap_sham_cra_init,
1487 		.cra_exit		= omap_sham_cra_exit,
1488 	}
1489 },
1490 {
1491 	.init		= omap_sham_init,
1492 	.update		= omap_sham_update,
1493 	.final		= omap_sham_final,
1494 	.finup		= omap_sham_finup,
1495 	.digest		= omap_sham_digest,
1496 	.halg.digestsize	= MD5_DIGEST_SIZE,
1497 	.halg.base	= {
1498 		.cra_name		= "md5",
1499 		.cra_driver_name	= "omap-md5",
1500 		.cra_priority		= 400,
1501 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1502 						CRYPTO_ALG_ASYNC |
1503 						CRYPTO_ALG_NEED_FALLBACK,
1504 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1505 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1506 		.cra_alignmask		= OMAP_ALIGN_MASK,
1507 		.cra_module		= THIS_MODULE,
1508 		.cra_init		= omap_sham_cra_init,
1509 		.cra_exit		= omap_sham_cra_exit,
1510 	}
1511 },
1512 {
1513 	.init		= omap_sham_init,
1514 	.update		= omap_sham_update,
1515 	.final		= omap_sham_final,
1516 	.finup		= omap_sham_finup,
1517 	.digest		= omap_sham_digest,
1518 	.setkey		= omap_sham_setkey,
1519 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1520 	.halg.base	= {
1521 		.cra_name		= "hmac(sha1)",
1522 		.cra_driver_name	= "omap-hmac-sha1",
1523 		.cra_priority		= 400,
1524 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1525 						CRYPTO_ALG_ASYNC |
1526 						CRYPTO_ALG_NEED_FALLBACK,
1527 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1528 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1529 					sizeof(struct omap_sham_hmac_ctx),
1530 		.cra_alignmask		= OMAP_ALIGN_MASK,
1531 		.cra_module		= THIS_MODULE,
1532 		.cra_init		= omap_sham_cra_sha1_init,
1533 		.cra_exit		= omap_sham_cra_exit,
1534 	}
1535 },
1536 {
1537 	.init		= omap_sham_init,
1538 	.update		= omap_sham_update,
1539 	.final		= omap_sham_final,
1540 	.finup		= omap_sham_finup,
1541 	.digest		= omap_sham_digest,
1542 	.setkey		= omap_sham_setkey,
1543 	.halg.digestsize	= MD5_DIGEST_SIZE,
1544 	.halg.base	= {
1545 		.cra_name		= "hmac(md5)",
1546 		.cra_driver_name	= "omap-hmac-md5",
1547 		.cra_priority		= 400,
1548 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1549 						CRYPTO_ALG_ASYNC |
1550 						CRYPTO_ALG_NEED_FALLBACK,
1551 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1552 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1553 					sizeof(struct omap_sham_hmac_ctx),
1554 		.cra_alignmask		= OMAP_ALIGN_MASK,
1555 		.cra_module		= THIS_MODULE,
1556 		.cra_init		= omap_sham_cra_md5_init,
1557 		.cra_exit		= omap_sham_cra_exit,
1558 	}
1559 }
1560 };
1561 
1562 /* OMAP4 has some algs in addition to what OMAP2 has */
1563 static struct ahash_alg algs_sha224_sha256[] = {
1564 {
1565 	.init		= omap_sham_init,
1566 	.update		= omap_sham_update,
1567 	.final		= omap_sham_final,
1568 	.finup		= omap_sham_finup,
1569 	.digest		= omap_sham_digest,
1570 	.halg.digestsize	= SHA224_DIGEST_SIZE,
1571 	.halg.base	= {
1572 		.cra_name		= "sha224",
1573 		.cra_driver_name	= "omap-sha224",
1574 		.cra_priority		= 400,
1575 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1576 						CRYPTO_ALG_ASYNC |
1577 						CRYPTO_ALG_NEED_FALLBACK,
1578 		.cra_blocksize		= SHA224_BLOCK_SIZE,
1579 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1580 		.cra_alignmask		= OMAP_ALIGN_MASK,
1581 		.cra_module		= THIS_MODULE,
1582 		.cra_init		= omap_sham_cra_init,
1583 		.cra_exit		= omap_sham_cra_exit,
1584 	}
1585 },
1586 {
1587 	.init		= omap_sham_init,
1588 	.update		= omap_sham_update,
1589 	.final		= omap_sham_final,
1590 	.finup		= omap_sham_finup,
1591 	.digest		= omap_sham_digest,
1592 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1593 	.halg.base	= {
1594 		.cra_name		= "sha256",
1595 		.cra_driver_name	= "omap-sha256",
1596 		.cra_priority		= 400,
1597 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1598 						CRYPTO_ALG_ASYNC |
1599 						CRYPTO_ALG_NEED_FALLBACK,
1600 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1601 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1602 		.cra_alignmask		= OMAP_ALIGN_MASK,
1603 		.cra_module		= THIS_MODULE,
1604 		.cra_init		= omap_sham_cra_init,
1605 		.cra_exit		= omap_sham_cra_exit,
1606 	}
1607 },
1608 {
1609 	.init		= omap_sham_init,
1610 	.update		= omap_sham_update,
1611 	.final		= omap_sham_final,
1612 	.finup		= omap_sham_finup,
1613 	.digest		= omap_sham_digest,
1614 	.setkey		= omap_sham_setkey,
1615 	.halg.digestsize	= SHA224_DIGEST_SIZE,
1616 	.halg.base	= {
1617 		.cra_name		= "hmac(sha224)",
1618 		.cra_driver_name	= "omap-hmac-sha224",
1619 		.cra_priority		= 400,
1620 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1621 						CRYPTO_ALG_ASYNC |
1622 						CRYPTO_ALG_NEED_FALLBACK,
1623 		.cra_blocksize		= SHA224_BLOCK_SIZE,
1624 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1625 					sizeof(struct omap_sham_hmac_ctx),
1626 		.cra_alignmask		= OMAP_ALIGN_MASK,
1627 		.cra_module		= THIS_MODULE,
1628 		.cra_init		= omap_sham_cra_sha224_init,
1629 		.cra_exit		= omap_sham_cra_exit,
1630 	}
1631 },
1632 {
1633 	.init		= omap_sham_init,
1634 	.update		= omap_sham_update,
1635 	.final		= omap_sham_final,
1636 	.finup		= omap_sham_finup,
1637 	.digest		= omap_sham_digest,
1638 	.setkey		= omap_sham_setkey,
1639 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1640 	.halg.base	= {
1641 		.cra_name		= "hmac(sha256)",
1642 		.cra_driver_name	= "omap-hmac-sha256",
1643 		.cra_priority		= 400,
1644 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1645 						CRYPTO_ALG_ASYNC |
1646 						CRYPTO_ALG_NEED_FALLBACK,
1647 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1648 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1649 					sizeof(struct omap_sham_hmac_ctx),
1650 		.cra_alignmask		= OMAP_ALIGN_MASK,
1651 		.cra_module		= THIS_MODULE,
1652 		.cra_init		= omap_sham_cra_sha256_init,
1653 		.cra_exit		= omap_sham_cra_exit,
1654 	}
1655 },
1656 };
1657 
1658 static struct ahash_alg algs_sha384_sha512[] = {
1659 {
1660 	.init		= omap_sham_init,
1661 	.update		= omap_sham_update,
1662 	.final		= omap_sham_final,
1663 	.finup		= omap_sham_finup,
1664 	.digest		= omap_sham_digest,
1665 	.halg.digestsize	= SHA384_DIGEST_SIZE,
1666 	.halg.base	= {
1667 		.cra_name		= "sha384",
1668 		.cra_driver_name	= "omap-sha384",
1669 		.cra_priority		= 400,
1670 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1671 						CRYPTO_ALG_ASYNC |
1672 						CRYPTO_ALG_NEED_FALLBACK,
1673 		.cra_blocksize		= SHA384_BLOCK_SIZE,
1674 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1675 		.cra_alignmask		= OMAP_ALIGN_MASK,
1676 		.cra_module		= THIS_MODULE,
1677 		.cra_init		= omap_sham_cra_init,
1678 		.cra_exit		= omap_sham_cra_exit,
1679 	}
1680 },
1681 {
1682 	.init		= omap_sham_init,
1683 	.update		= omap_sham_update,
1684 	.final		= omap_sham_final,
1685 	.finup		= omap_sham_finup,
1686 	.digest		= omap_sham_digest,
1687 	.halg.digestsize	= SHA512_DIGEST_SIZE,
1688 	.halg.base	= {
1689 		.cra_name		= "sha512",
1690 		.cra_driver_name	= "omap-sha512",
1691 		.cra_priority		= 400,
1692 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1693 						CRYPTO_ALG_ASYNC |
1694 						CRYPTO_ALG_NEED_FALLBACK,
1695 		.cra_blocksize		= SHA512_BLOCK_SIZE,
1696 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1697 		.cra_alignmask		= OMAP_ALIGN_MASK,
1698 		.cra_module		= THIS_MODULE,
1699 		.cra_init		= omap_sham_cra_init,
1700 		.cra_exit		= omap_sham_cra_exit,
1701 	}
1702 },
1703 {
1704 	.init		= omap_sham_init,
1705 	.update		= omap_sham_update,
1706 	.final		= omap_sham_final,
1707 	.finup		= omap_sham_finup,
1708 	.digest		= omap_sham_digest,
1709 	.setkey		= omap_sham_setkey,
1710 	.halg.digestsize	= SHA384_DIGEST_SIZE,
1711 	.halg.base	= {
1712 		.cra_name		= "hmac(sha384)",
1713 		.cra_driver_name	= "omap-hmac-sha384",
1714 		.cra_priority		= 400,
1715 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1716 						CRYPTO_ALG_ASYNC |
1717 						CRYPTO_ALG_NEED_FALLBACK,
1718 		.cra_blocksize		= SHA384_BLOCK_SIZE,
1719 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1720 					sizeof(struct omap_sham_hmac_ctx),
1721 		.cra_alignmask		= OMAP_ALIGN_MASK,
1722 		.cra_module		= THIS_MODULE,
1723 		.cra_init		= omap_sham_cra_sha384_init,
1724 		.cra_exit		= omap_sham_cra_exit,
1725 	}
1726 },
1727 {
1728 	.init		= omap_sham_init,
1729 	.update		= omap_sham_update,
1730 	.final		= omap_sham_final,
1731 	.finup		= omap_sham_finup,
1732 	.digest		= omap_sham_digest,
1733 	.setkey		= omap_sham_setkey,
1734 	.halg.digestsize	= SHA512_DIGEST_SIZE,
1735 	.halg.base	= {
1736 		.cra_name		= "hmac(sha512)",
1737 		.cra_driver_name	= "omap-hmac-sha512",
1738 		.cra_priority		= 400,
1739 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1740 						CRYPTO_ALG_ASYNC |
1741 						CRYPTO_ALG_NEED_FALLBACK,
1742 		.cra_blocksize		= SHA512_BLOCK_SIZE,
1743 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1744 					sizeof(struct omap_sham_hmac_ctx),
1745 		.cra_alignmask		= OMAP_ALIGN_MASK,
1746 		.cra_module		= THIS_MODULE,
1747 		.cra_init		= omap_sham_cra_sha512_init,
1748 		.cra_exit		= omap_sham_cra_exit,
1749 	}
1750 },
1751 };
1752 
1753 static void omap_sham_done_task(unsigned long data)
1754 {
1755 	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1756 	int err = 0;
1757 
1758 	dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
1759 
1760 	if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1761 		omap_sham_handle_queue(dd, NULL);
1762 		return;
1763 	}
1764 
1765 	if (test_bit(FLAGS_CPU, &dd->flags)) {
1766 		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1767 			goto finish;
1768 	} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1769 		if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1770 			omap_sham_update_dma_stop(dd);
1771 			if (dd->err) {
1772 				err = dd->err;
1773 				goto finish;
1774 			}
1775 		}
1776 		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1777 			/* hash or semi-hash ready */
1778 			clear_bit(FLAGS_DMA_READY, &dd->flags);
1779 			goto finish;
1780 		}
1781 	}
1782 
1783 	return;
1784 
1785 finish:
1786 	dev_dbg(dd->dev, "update done: err: %d\n", err);
1787 	/* finish curent request */
1788 	omap_sham_finish_req(dd->req, err);
1789 
1790 	/* If we are not busy, process next req */
1791 	if (!test_bit(FLAGS_BUSY, &dd->flags))
1792 		omap_sham_handle_queue(dd, NULL);
1793 }
1794 
1795 static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1796 {
1797 	if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1798 		dev_warn(dd->dev, "Interrupt when no active requests.\n");
1799 	} else {
1800 		set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1801 		tasklet_schedule(&dd->done_task);
1802 	}
1803 
1804 	return IRQ_HANDLED;
1805 }
1806 
1807 static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1808 {
1809 	struct omap_sham_dev *dd = dev_id;
1810 
1811 	if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1812 		/* final -> allow device to go to power-saving mode */
1813 		omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1814 
1815 	omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1816 				 SHA_REG_CTRL_OUTPUT_READY);
1817 	omap_sham_read(dd, SHA_REG_CTRL);
1818 
1819 	return omap_sham_irq_common(dd);
1820 }
1821 
1822 static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1823 {
1824 	struct omap_sham_dev *dd = dev_id;
1825 
1826 	omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1827 
1828 	return omap_sham_irq_common(dd);
1829 }
1830 
1831 static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1832 	{
1833 		.algs_list	= algs_sha1_md5,
1834 		.size		= ARRAY_SIZE(algs_sha1_md5),
1835 	},
1836 };
1837 
1838 static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1839 	.algs_info	= omap_sham_algs_info_omap2,
1840 	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap2),
1841 	.flags		= BIT(FLAGS_BE32_SHA1),
1842 	.digest_size	= SHA1_DIGEST_SIZE,
1843 	.copy_hash	= omap_sham_copy_hash_omap2,
1844 	.write_ctrl	= omap_sham_write_ctrl_omap2,
1845 	.trigger	= omap_sham_trigger_omap2,
1846 	.poll_irq	= omap_sham_poll_irq_omap2,
1847 	.intr_hdlr	= omap_sham_irq_omap2,
1848 	.idigest_ofs	= 0x00,
1849 	.din_ofs	= 0x1c,
1850 	.digcnt_ofs	= 0x14,
1851 	.rev_ofs	= 0x5c,
1852 	.mask_ofs	= 0x60,
1853 	.sysstatus_ofs	= 0x64,
1854 	.major_mask	= 0xf0,
1855 	.major_shift	= 4,
1856 	.minor_mask	= 0x0f,
1857 	.minor_shift	= 0,
1858 };
1859 
1860 #ifdef CONFIG_OF
1861 static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1862 	{
1863 		.algs_list	= algs_sha1_md5,
1864 		.size		= ARRAY_SIZE(algs_sha1_md5),
1865 	},
1866 	{
1867 		.algs_list	= algs_sha224_sha256,
1868 		.size		= ARRAY_SIZE(algs_sha224_sha256),
1869 	},
1870 };
1871 
1872 static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1873 	.algs_info	= omap_sham_algs_info_omap4,
1874 	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap4),
1875 	.flags		= BIT(FLAGS_AUTO_XOR),
1876 	.digest_size	= SHA256_DIGEST_SIZE,
1877 	.copy_hash	= omap_sham_copy_hash_omap4,
1878 	.write_ctrl	= omap_sham_write_ctrl_omap4,
1879 	.trigger	= omap_sham_trigger_omap4,
1880 	.poll_irq	= omap_sham_poll_irq_omap4,
1881 	.intr_hdlr	= omap_sham_irq_omap4,
1882 	.idigest_ofs	= 0x020,
1883 	.odigest_ofs	= 0x0,
1884 	.din_ofs	= 0x080,
1885 	.digcnt_ofs	= 0x040,
1886 	.rev_ofs	= 0x100,
1887 	.mask_ofs	= 0x110,
1888 	.sysstatus_ofs	= 0x114,
1889 	.mode_ofs	= 0x44,
1890 	.length_ofs	= 0x48,
1891 	.major_mask	= 0x0700,
1892 	.major_shift	= 8,
1893 	.minor_mask	= 0x003f,
1894 	.minor_shift	= 0,
1895 };
1896 
1897 static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1898 	{
1899 		.algs_list	= algs_sha1_md5,
1900 		.size		= ARRAY_SIZE(algs_sha1_md5),
1901 	},
1902 	{
1903 		.algs_list	= algs_sha224_sha256,
1904 		.size		= ARRAY_SIZE(algs_sha224_sha256),
1905 	},
1906 	{
1907 		.algs_list	= algs_sha384_sha512,
1908 		.size		= ARRAY_SIZE(algs_sha384_sha512),
1909 	},
1910 };
1911 
1912 static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1913 	.algs_info	= omap_sham_algs_info_omap5,
1914 	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap5),
1915 	.flags		= BIT(FLAGS_AUTO_XOR),
1916 	.digest_size	= SHA512_DIGEST_SIZE,
1917 	.copy_hash	= omap_sham_copy_hash_omap4,
1918 	.write_ctrl	= omap_sham_write_ctrl_omap4,
1919 	.trigger	= omap_sham_trigger_omap4,
1920 	.poll_irq	= omap_sham_poll_irq_omap4,
1921 	.intr_hdlr	= omap_sham_irq_omap4,
1922 	.idigest_ofs	= 0x240,
1923 	.odigest_ofs	= 0x200,
1924 	.din_ofs	= 0x080,
1925 	.digcnt_ofs	= 0x280,
1926 	.rev_ofs	= 0x100,
1927 	.mask_ofs	= 0x110,
1928 	.sysstatus_ofs	= 0x114,
1929 	.mode_ofs	= 0x284,
1930 	.length_ofs	= 0x288,
1931 	.major_mask	= 0x0700,
1932 	.major_shift	= 8,
1933 	.minor_mask	= 0x003f,
1934 	.minor_shift	= 0,
1935 };
1936 
1937 static const struct of_device_id omap_sham_of_match[] = {
1938 	{
1939 		.compatible	= "ti,omap2-sham",
1940 		.data		= &omap_sham_pdata_omap2,
1941 	},
1942 	{
1943 		.compatible	= "ti,omap3-sham",
1944 		.data		= &omap_sham_pdata_omap2,
1945 	},
1946 	{
1947 		.compatible	= "ti,omap4-sham",
1948 		.data		= &omap_sham_pdata_omap4,
1949 	},
1950 	{
1951 		.compatible	= "ti,omap5-sham",
1952 		.data		= &omap_sham_pdata_omap5,
1953 	},
1954 	{},
1955 };
1956 MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1957 
1958 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1959 		struct device *dev, struct resource *res)
1960 {
1961 	struct device_node *node = dev->of_node;
1962 	int err = 0;
1963 
1964 	dd->pdata = of_device_get_match_data(dev);
1965 	if (!dd->pdata) {
1966 		dev_err(dev, "no compatible OF match\n");
1967 		err = -EINVAL;
1968 		goto err;
1969 	}
1970 
1971 	err = of_address_to_resource(node, 0, res);
1972 	if (err < 0) {
1973 		dev_err(dev, "can't translate OF node address\n");
1974 		err = -EINVAL;
1975 		goto err;
1976 	}
1977 
1978 	dd->irq = irq_of_parse_and_map(node, 0);
1979 	if (!dd->irq) {
1980 		dev_err(dev, "can't translate OF irq value\n");
1981 		err = -EINVAL;
1982 		goto err;
1983 	}
1984 
1985 err:
1986 	return err;
1987 }
1988 #else
1989 static const struct of_device_id omap_sham_of_match[] = {
1990 	{},
1991 };
1992 
1993 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1994 		struct device *dev, struct resource *res)
1995 {
1996 	return -EINVAL;
1997 }
1998 #endif
1999 
2000 static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
2001 		struct platform_device *pdev, struct resource *res)
2002 {
2003 	struct device *dev = &pdev->dev;
2004 	struct resource *r;
2005 	int err = 0;
2006 
2007 	/* Get the base address */
2008 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2009 	if (!r) {
2010 		dev_err(dev, "no MEM resource info\n");
2011 		err = -ENODEV;
2012 		goto err;
2013 	}
2014 	memcpy(res, r, sizeof(*res));
2015 
2016 	/* Get the IRQ */
2017 	dd->irq = platform_get_irq(pdev, 0);
2018 	if (dd->irq < 0) {
2019 		err = dd->irq;
2020 		goto err;
2021 	}
2022 
2023 	/* Only OMAP2/3 can be non-DT */
2024 	dd->pdata = &omap_sham_pdata_omap2;
2025 
2026 err:
2027 	return err;
2028 }
2029 
2030 static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
2031 			     char *buf)
2032 {
2033 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2034 
2035 	return sprintf(buf, "%d\n", dd->fallback_sz);
2036 }
2037 
2038 static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
2039 			      const char *buf, size_t size)
2040 {
2041 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2042 	ssize_t status;
2043 	long value;
2044 
2045 	status = kstrtol(buf, 0, &value);
2046 	if (status)
2047 		return status;
2048 
2049 	/* HW accelerator only works with buffers > 9 */
2050 	if (value < 9) {
2051 		dev_err(dev, "minimum fallback size 9\n");
2052 		return -EINVAL;
2053 	}
2054 
2055 	dd->fallback_sz = value;
2056 
2057 	return size;
2058 }
2059 
2060 static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2061 			      char *buf)
2062 {
2063 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2064 
2065 	return sprintf(buf, "%d\n", dd->queue.max_qlen);
2066 }
2067 
2068 static ssize_t queue_len_store(struct device *dev,
2069 			       struct device_attribute *attr, const char *buf,
2070 			       size_t size)
2071 {
2072 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2073 	ssize_t status;
2074 	long value;
2075 	unsigned long flags;
2076 
2077 	status = kstrtol(buf, 0, &value);
2078 	if (status)
2079 		return status;
2080 
2081 	if (value < 1)
2082 		return -EINVAL;
2083 
2084 	/*
2085 	 * Changing the queue size in fly is safe, if size becomes smaller
2086 	 * than current size, it will just not accept new entries until
2087 	 * it has shrank enough.
2088 	 */
2089 	spin_lock_irqsave(&dd->lock, flags);
2090 	dd->queue.max_qlen = value;
2091 	spin_unlock_irqrestore(&dd->lock, flags);
2092 
2093 	return size;
2094 }
2095 
2096 static DEVICE_ATTR_RW(queue_len);
2097 static DEVICE_ATTR_RW(fallback);
2098 
2099 static struct attribute *omap_sham_attrs[] = {
2100 	&dev_attr_queue_len.attr,
2101 	&dev_attr_fallback.attr,
2102 	NULL,
2103 };
2104 
2105 static struct attribute_group omap_sham_attr_group = {
2106 	.attrs = omap_sham_attrs,
2107 };
2108 
2109 static int omap_sham_probe(struct platform_device *pdev)
2110 {
2111 	struct omap_sham_dev *dd;
2112 	struct device *dev = &pdev->dev;
2113 	struct resource res;
2114 	dma_cap_mask_t mask;
2115 	int err, i, j;
2116 	u32 rev;
2117 
2118 	dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
2119 	if (dd == NULL) {
2120 		dev_err(dev, "unable to alloc data struct.\n");
2121 		err = -ENOMEM;
2122 		goto data_err;
2123 	}
2124 	dd->dev = dev;
2125 	platform_set_drvdata(pdev, dd);
2126 
2127 	INIT_LIST_HEAD(&dd->list);
2128 	spin_lock_init(&dd->lock);
2129 	tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
2130 	crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2131 
2132 	err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2133 			       omap_sham_get_res_pdev(dd, pdev, &res);
2134 	if (err)
2135 		goto data_err;
2136 
2137 	dd->io_base = devm_ioremap_resource(dev, &res);
2138 	if (IS_ERR(dd->io_base)) {
2139 		err = PTR_ERR(dd->io_base);
2140 		goto data_err;
2141 	}
2142 	dd->phys_base = res.start;
2143 
2144 	err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2145 			       IRQF_TRIGGER_NONE, dev_name(dev), dd);
2146 	if (err) {
2147 		dev_err(dev, "unable to request irq %d, err = %d\n",
2148 			dd->irq, err);
2149 		goto data_err;
2150 	}
2151 
2152 	dma_cap_zero(mask);
2153 	dma_cap_set(DMA_SLAVE, mask);
2154 
2155 	dd->dma_lch = dma_request_chan(dev, "rx");
2156 	if (IS_ERR(dd->dma_lch)) {
2157 		err = PTR_ERR(dd->dma_lch);
2158 		if (err == -EPROBE_DEFER)
2159 			goto data_err;
2160 
2161 		dd->polling_mode = 1;
2162 		dev_dbg(dev, "using polling mode instead of dma\n");
2163 	}
2164 
2165 	dd->flags |= dd->pdata->flags;
2166 	sham.flags |= dd->pdata->flags;
2167 
2168 	pm_runtime_use_autosuspend(dev);
2169 	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2170 
2171 	dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2172 
2173 	pm_runtime_enable(dev);
2174 	pm_runtime_irq_safe(dev);
2175 
2176 	err = pm_runtime_get_sync(dev);
2177 	if (err < 0) {
2178 		dev_err(dev, "failed to get sync: %d\n", err);
2179 		goto err_pm;
2180 	}
2181 
2182 	rev = omap_sham_read(dd, SHA_REG_REV(dd));
2183 	pm_runtime_put_sync(&pdev->dev);
2184 
2185 	dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2186 		(rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2187 		(rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2188 
2189 	spin_lock(&sham.lock);
2190 	list_add_tail(&dd->list, &sham.dev_list);
2191 	spin_unlock(&sham.lock);
2192 
2193 	for (i = 0; i < dd->pdata->algs_info_size; i++) {
2194 		if (dd->pdata->algs_info[i].registered)
2195 			break;
2196 
2197 		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2198 			struct ahash_alg *alg;
2199 
2200 			alg = &dd->pdata->algs_info[i].algs_list[j];
2201 			alg->export = omap_sham_export;
2202 			alg->import = omap_sham_import;
2203 			alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2204 					      BUFLEN;
2205 			err = crypto_register_ahash(alg);
2206 			if (err)
2207 				goto err_algs;
2208 
2209 			dd->pdata->algs_info[i].registered++;
2210 		}
2211 	}
2212 
2213 	err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
2214 	if (err) {
2215 		dev_err(dev, "could not create sysfs device attrs\n");
2216 		goto err_algs;
2217 	}
2218 
2219 	return 0;
2220 
2221 err_algs:
2222 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2223 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2224 			crypto_unregister_ahash(
2225 					&dd->pdata->algs_info[i].algs_list[j]);
2226 err_pm:
2227 	pm_runtime_disable(dev);
2228 	if (!dd->polling_mode)
2229 		dma_release_channel(dd->dma_lch);
2230 data_err:
2231 	dev_err(dev, "initialization failed.\n");
2232 
2233 	return err;
2234 }
2235 
2236 static int omap_sham_remove(struct platform_device *pdev)
2237 {
2238 	struct omap_sham_dev *dd;
2239 	int i, j;
2240 
2241 	dd = platform_get_drvdata(pdev);
2242 	if (!dd)
2243 		return -ENODEV;
2244 	spin_lock(&sham.lock);
2245 	list_del(&dd->list);
2246 	spin_unlock(&sham.lock);
2247 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2248 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
2249 			crypto_unregister_ahash(
2250 					&dd->pdata->algs_info[i].algs_list[j]);
2251 			dd->pdata->algs_info[i].registered--;
2252 		}
2253 	tasklet_kill(&dd->done_task);
2254 	pm_runtime_disable(&pdev->dev);
2255 
2256 	if (!dd->polling_mode)
2257 		dma_release_channel(dd->dma_lch);
2258 
2259 	sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
2260 
2261 	return 0;
2262 }
2263 
2264 #ifdef CONFIG_PM_SLEEP
2265 static int omap_sham_suspend(struct device *dev)
2266 {
2267 	pm_runtime_put_sync(dev);
2268 	return 0;
2269 }
2270 
2271 static int omap_sham_resume(struct device *dev)
2272 {
2273 	int err = pm_runtime_get_sync(dev);
2274 	if (err < 0) {
2275 		dev_err(dev, "failed to get sync: %d\n", err);
2276 		return err;
2277 	}
2278 	return 0;
2279 }
2280 #endif
2281 
2282 static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
2283 
2284 static struct platform_driver omap_sham_driver = {
2285 	.probe	= omap_sham_probe,
2286 	.remove	= omap_sham_remove,
2287 	.driver	= {
2288 		.name	= "omap-sham",
2289 		.pm	= &omap_sham_pm_ops,
2290 		.of_match_table	= omap_sham_of_match,
2291 	},
2292 };
2293 
2294 module_platform_driver(omap_sham_driver);
2295 
2296 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2297 MODULE_LICENSE("GPL v2");
2298 MODULE_AUTHOR("Dmitry Kasatkin");
2299 MODULE_ALIAS("platform:omap-sham");
2300