xref: /openbmc/linux/drivers/crypto/sahara.c (revision b78412b8)
1 /*
2  * Cryptographic API.
3  *
4  * Support for SAHARA cryptographic accelerator.
5  *
6  * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
7  * Copyright (c) 2013 Vista Silicon S.L.
8  * Author: Javier Martin <javier.martin@vista-silicon.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as published
12  * by the Free Software Foundation.
13  *
14  * Based on omap-aes.c and tegra-aes.c
15  */
16 
17 #include <crypto/aes.h>
18 #include <crypto/internal/hash.h>
19 #include <crypto/internal/skcipher.h>
20 #include <crypto/scatterwalk.h>
21 #include <crypto/sha.h>
22 
23 #include <linux/clk.h>
24 #include <linux/crypto.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/irq.h>
28 #include <linux/kernel.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/platform_device.h>
35 
36 #define SHA_BUFFER_LEN		PAGE_SIZE
37 #define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
38 
39 #define SAHARA_NAME "sahara"
40 #define SAHARA_VERSION_3	3
41 #define SAHARA_VERSION_4	4
42 #define SAHARA_TIMEOUT_MS	1000
43 #define SAHARA_MAX_HW_DESC	2
44 #define SAHARA_MAX_HW_LINK	20
45 
46 #define FLAGS_MODE_MASK		0x000f
47 #define FLAGS_ENCRYPT		BIT(0)
48 #define FLAGS_CBC		BIT(1)
49 #define FLAGS_NEW_KEY		BIT(3)
50 
51 #define SAHARA_HDR_BASE			0x00800000
52 #define SAHARA_HDR_SKHA_ALG_AES	0
53 #define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
54 #define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
55 #define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
56 #define SAHARA_HDR_FORM_DATA		(5 << 16)
57 #define SAHARA_HDR_FORM_KEY		(8 << 16)
58 #define SAHARA_HDR_LLO			(1 << 24)
59 #define SAHARA_HDR_CHA_SKHA		(1 << 28)
60 #define SAHARA_HDR_CHA_MDHA		(2 << 28)
61 #define SAHARA_HDR_PARITY_BIT		(1 << 31)
62 
63 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
64 #define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
65 #define SAHARA_HDR_MDHA_HASH		0xA0850000
66 #define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
67 #define SAHARA_HDR_MDHA_ALG_SHA1	0
68 #define SAHARA_HDR_MDHA_ALG_MD5		1
69 #define SAHARA_HDR_MDHA_ALG_SHA256	2
70 #define SAHARA_HDR_MDHA_ALG_SHA224	3
71 #define SAHARA_HDR_MDHA_PDATA		(1 << 2)
72 #define SAHARA_HDR_MDHA_HMAC		(1 << 3)
73 #define SAHARA_HDR_MDHA_INIT		(1 << 5)
74 #define SAHARA_HDR_MDHA_IPAD		(1 << 6)
75 #define SAHARA_HDR_MDHA_OPAD		(1 << 7)
76 #define SAHARA_HDR_MDHA_SWAP		(1 << 8)
77 #define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
78 #define SAHARA_HDR_MDHA_SSL		(1 << 10)
79 
80 /* SAHARA can only process one request at a time */
81 #define SAHARA_QUEUE_LENGTH	1
82 
83 #define SAHARA_REG_VERSION	0x00
84 #define SAHARA_REG_DAR		0x04
85 #define SAHARA_REG_CONTROL	0x08
86 #define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
87 #define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
88 #define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
89 #define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
90 #define SAHARA_REG_CMD		0x0C
91 #define		SAHARA_CMD_RESET		(1 << 0)
92 #define		SAHARA_CMD_CLEAR_INT		(1 << 8)
93 #define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
94 #define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
95 #define		SAHARA_CMD_MODE_BATCH		(1 << 16)
96 #define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
97 #define	SAHARA_REG_STATUS	0x10
98 #define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
99 #define			SAHARA_STATE_IDLE	0
100 #define			SAHARA_STATE_BUSY	1
101 #define			SAHARA_STATE_ERR	2
102 #define			SAHARA_STATE_FAULT	3
103 #define			SAHARA_STATE_COMPLETE	4
104 #define			SAHARA_STATE_COMP_FLAG	(1 << 2)
105 #define		SAHARA_STATUS_DAR_FULL		(1 << 3)
106 #define		SAHARA_STATUS_ERROR		(1 << 4)
107 #define		SAHARA_STATUS_SECURE		(1 << 5)
108 #define		SAHARA_STATUS_FAIL		(1 << 6)
109 #define		SAHARA_STATUS_INIT		(1 << 7)
110 #define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
111 #define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
112 #define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
113 #define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
114 #define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
115 #define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
116 #define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
117 #define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
118 #define SAHARA_REG_ERRSTATUS	0x14
119 #define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
120 #define			SAHARA_ERRSOURCE_CHA	14
121 #define			SAHARA_ERRSOURCE_DMA	15
122 #define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
123 #define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
124 #define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
125 #define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
126 #define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
127 #define SAHARA_REG_FADDR	0x18
128 #define SAHARA_REG_CDAR		0x1C
129 #define SAHARA_REG_IDAR		0x20
130 
131 struct sahara_hw_desc {
132 	u32	hdr;
133 	u32	len1;
134 	u32	p1;
135 	u32	len2;
136 	u32	p2;
137 	u32	next;
138 };
139 
140 struct sahara_hw_link {
141 	u32	len;
142 	u32	p;
143 	u32	next;
144 };
145 
146 struct sahara_ctx {
147 	unsigned long flags;
148 
149 	/* AES-specific context */
150 	int keylen;
151 	u8 key[AES_KEYSIZE_128];
152 	struct crypto_skcipher *fallback;
153 };
154 
155 struct sahara_aes_reqctx {
156 	unsigned long mode;
157 };
158 
159 /*
160  * struct sahara_sha_reqctx - private data per request
161  * @buf: holds data for requests smaller than block_size
162  * @rembuf: used to prepare one block_size-aligned request
163  * @context: hw-specific context for request. Digest is extracted from this
164  * @mode: specifies what type of hw-descriptor needs to be built
165  * @digest_size: length of digest for this request
166  * @context_size: length of hw-context for this request.
167  *                Always digest_size + 4
168  * @buf_cnt: number of bytes saved in buf
169  * @sg_in_idx: number of hw links
170  * @in_sg: scatterlist for input data
171  * @in_sg_chain: scatterlists for chained input data
172  * @total: total number of bytes for transfer
173  * @last: is this the last block
174  * @first: is this the first block
175  * @active: inside a transfer
176  */
177 struct sahara_sha_reqctx {
178 	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
179 	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
180 	u8			context[SHA256_DIGEST_SIZE + 4];
181 	unsigned int		mode;
182 	unsigned int		digest_size;
183 	unsigned int		context_size;
184 	unsigned int		buf_cnt;
185 	unsigned int		sg_in_idx;
186 	struct scatterlist	*in_sg;
187 	struct scatterlist	in_sg_chain[2];
188 	size_t			total;
189 	unsigned int		last;
190 	unsigned int		first;
191 	unsigned int		active;
192 };
193 
194 struct sahara_dev {
195 	struct device		*device;
196 	unsigned int		version;
197 	void __iomem		*regs_base;
198 	struct clk		*clk_ipg;
199 	struct clk		*clk_ahb;
200 	struct mutex		queue_mutex;
201 	struct task_struct	*kthread;
202 	struct completion	dma_completion;
203 
204 	struct sahara_ctx	*ctx;
205 	struct crypto_queue	queue;
206 	unsigned long		flags;
207 
208 	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
209 	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
210 
211 	u8			*key_base;
212 	dma_addr_t		key_phys_base;
213 
214 	u8			*iv_base;
215 	dma_addr_t		iv_phys_base;
216 
217 	u8			*context_base;
218 	dma_addr_t		context_phys_base;
219 
220 	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
221 	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
222 
223 	size_t			total;
224 	struct scatterlist	*in_sg;
225 	int		nb_in_sg;
226 	struct scatterlist	*out_sg;
227 	int		nb_out_sg;
228 
229 	u32			error;
230 };
231 
232 static struct sahara_dev *dev_ptr;
233 
234 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
235 {
236 	writel(data, dev->regs_base + reg);
237 }
238 
239 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
240 {
241 	return readl(dev->regs_base + reg);
242 }
243 
244 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
245 {
246 	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
247 			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
248 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
249 
250 	if (dev->flags & FLAGS_CBC) {
251 		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
252 		hdr ^= SAHARA_HDR_PARITY_BIT;
253 	}
254 
255 	if (dev->flags & FLAGS_ENCRYPT) {
256 		hdr |= SAHARA_HDR_SKHA_OP_ENC;
257 		hdr ^= SAHARA_HDR_PARITY_BIT;
258 	}
259 
260 	return hdr;
261 }
262 
263 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
264 {
265 	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
266 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
267 }
268 
269 static const char *sahara_err_src[16] = {
270 	"No error",
271 	"Header error",
272 	"Descriptor length error",
273 	"Descriptor length or pointer error",
274 	"Link length error",
275 	"Link pointer error",
276 	"Input buffer error",
277 	"Output buffer error",
278 	"Output buffer starvation",
279 	"Internal state fault",
280 	"General descriptor problem",
281 	"Reserved",
282 	"Descriptor address error",
283 	"Link address error",
284 	"CHA error",
285 	"DMA error"
286 };
287 
288 static const char *sahara_err_dmasize[4] = {
289 	"Byte transfer",
290 	"Half-word transfer",
291 	"Word transfer",
292 	"Reserved"
293 };
294 
295 static const char *sahara_err_dmasrc[8] = {
296 	"No error",
297 	"AHB bus error",
298 	"Internal IP bus error",
299 	"Parity error",
300 	"DMA crosses 256 byte boundary",
301 	"DMA is busy",
302 	"Reserved",
303 	"DMA HW error"
304 };
305 
306 static const char *sahara_cha_errsrc[12] = {
307 	"Input buffer non-empty",
308 	"Illegal address",
309 	"Illegal mode",
310 	"Illegal data size",
311 	"Illegal key size",
312 	"Write during processing",
313 	"CTX read during processing",
314 	"HW error",
315 	"Input buffer disabled/underflow",
316 	"Output buffer disabled/overflow",
317 	"DES key parity error",
318 	"Reserved"
319 };
320 
321 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
322 
323 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
324 {
325 	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
326 	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
327 
328 	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
329 
330 	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
331 
332 	if (source == SAHARA_ERRSOURCE_DMA) {
333 		if (error & SAHARA_ERRSTATUS_DMA_DIR)
334 			dev_err(dev->device, "		* DMA read.\n");
335 		else
336 			dev_err(dev->device, "		* DMA write.\n");
337 
338 		dev_err(dev->device, "		* %s.\n",
339 		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
340 		dev_err(dev->device, "		* %s.\n",
341 		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
342 	} else if (source == SAHARA_ERRSOURCE_CHA) {
343 		dev_err(dev->device, "		* %s.\n",
344 			sahara_cha_errsrc[chasrc]);
345 		dev_err(dev->device, "		* %s.\n",
346 		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
347 	}
348 	dev_err(dev->device, "\n");
349 }
350 
351 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
352 
353 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
354 {
355 	u8 state;
356 
357 	if (!IS_ENABLED(DEBUG))
358 		return;
359 
360 	state = SAHARA_STATUS_GET_STATE(status);
361 
362 	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
363 		__func__, status);
364 
365 	dev_dbg(dev->device, "	- State = %d:\n", state);
366 	if (state & SAHARA_STATE_COMP_FLAG)
367 		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
368 
369 	dev_dbg(dev->device, "		* %s.\n",
370 	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
371 
372 	if (status & SAHARA_STATUS_DAR_FULL)
373 		dev_dbg(dev->device, "	- DAR Full.\n");
374 	if (status & SAHARA_STATUS_ERROR)
375 		dev_dbg(dev->device, "	- Error.\n");
376 	if (status & SAHARA_STATUS_SECURE)
377 		dev_dbg(dev->device, "	- Secure.\n");
378 	if (status & SAHARA_STATUS_FAIL)
379 		dev_dbg(dev->device, "	- Fail.\n");
380 	if (status & SAHARA_STATUS_RNG_RESEED)
381 		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
382 	if (status & SAHARA_STATUS_ACTIVE_RNG)
383 		dev_dbg(dev->device, "	- RNG Active.\n");
384 	if (status & SAHARA_STATUS_ACTIVE_MDHA)
385 		dev_dbg(dev->device, "	- MDHA Active.\n");
386 	if (status & SAHARA_STATUS_ACTIVE_SKHA)
387 		dev_dbg(dev->device, "	- SKHA Active.\n");
388 
389 	if (status & SAHARA_STATUS_MODE_BATCH)
390 		dev_dbg(dev->device, "	- Batch Mode.\n");
391 	else if (status & SAHARA_STATUS_MODE_DEDICATED)
392 		dev_dbg(dev->device, "	- Dedicated Mode.\n");
393 	else if (status & SAHARA_STATUS_MODE_DEBUG)
394 		dev_dbg(dev->device, "	- Debug Mode.\n");
395 
396 	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
397 	       SAHARA_STATUS_GET_ISTATE(status));
398 
399 	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
400 		sahara_read(dev, SAHARA_REG_CDAR));
401 	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
402 		sahara_read(dev, SAHARA_REG_IDAR));
403 }
404 
405 static void sahara_dump_descriptors(struct sahara_dev *dev)
406 {
407 	int i;
408 
409 	if (!IS_ENABLED(DEBUG))
410 		return;
411 
412 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
413 		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
414 			i, &dev->hw_phys_desc[i]);
415 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
416 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
417 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
418 		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
419 		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
420 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
421 			dev->hw_desc[i]->next);
422 	}
423 	dev_dbg(dev->device, "\n");
424 }
425 
426 static void sahara_dump_links(struct sahara_dev *dev)
427 {
428 	int i;
429 
430 	if (!IS_ENABLED(DEBUG))
431 		return;
432 
433 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
434 		dev_dbg(dev->device, "Link (%d) (%pad):\n",
435 			i, &dev->hw_phys_link[i]);
436 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
437 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
438 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
439 			dev->hw_link[i]->next);
440 	}
441 	dev_dbg(dev->device, "\n");
442 }
443 
444 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
445 {
446 	struct sahara_ctx *ctx = dev->ctx;
447 	struct scatterlist *sg;
448 	int ret;
449 	int i, j;
450 	int idx = 0;
451 
452 	/* Copy new key if necessary */
453 	if (ctx->flags & FLAGS_NEW_KEY) {
454 		memcpy(dev->key_base, ctx->key, ctx->keylen);
455 		ctx->flags &= ~FLAGS_NEW_KEY;
456 
457 		if (dev->flags & FLAGS_CBC) {
458 			dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
459 			dev->hw_desc[idx]->p1 = dev->iv_phys_base;
460 		} else {
461 			dev->hw_desc[idx]->len1 = 0;
462 			dev->hw_desc[idx]->p1 = 0;
463 		}
464 		dev->hw_desc[idx]->len2 = ctx->keylen;
465 		dev->hw_desc[idx]->p2 = dev->key_phys_base;
466 		dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
467 
468 		dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
469 
470 		idx++;
471 	}
472 
473 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
474 	if (dev->nb_in_sg < 0) {
475 		dev_err(dev->device, "Invalid numbers of src SG.\n");
476 		return dev->nb_in_sg;
477 	}
478 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
479 	if (dev->nb_out_sg < 0) {
480 		dev_err(dev->device, "Invalid numbers of dst SG.\n");
481 		return dev->nb_out_sg;
482 	}
483 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
484 		dev_err(dev->device, "not enough hw links (%d)\n",
485 			dev->nb_in_sg + dev->nb_out_sg);
486 		return -EINVAL;
487 	}
488 
489 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
490 			 DMA_TO_DEVICE);
491 	if (ret != dev->nb_in_sg) {
492 		dev_err(dev->device, "couldn't map in sg\n");
493 		goto unmap_in;
494 	}
495 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
496 			 DMA_FROM_DEVICE);
497 	if (ret != dev->nb_out_sg) {
498 		dev_err(dev->device, "couldn't map out sg\n");
499 		goto unmap_out;
500 	}
501 
502 	/* Create input links */
503 	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
504 	sg = dev->in_sg;
505 	for (i = 0; i < dev->nb_in_sg; i++) {
506 		dev->hw_link[i]->len = sg->length;
507 		dev->hw_link[i]->p = sg->dma_address;
508 		if (i == (dev->nb_in_sg - 1)) {
509 			dev->hw_link[i]->next = 0;
510 		} else {
511 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
512 			sg = sg_next(sg);
513 		}
514 	}
515 
516 	/* Create output links */
517 	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
518 	sg = dev->out_sg;
519 	for (j = i; j < dev->nb_out_sg + i; j++) {
520 		dev->hw_link[j]->len = sg->length;
521 		dev->hw_link[j]->p = sg->dma_address;
522 		if (j == (dev->nb_out_sg + i - 1)) {
523 			dev->hw_link[j]->next = 0;
524 		} else {
525 			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
526 			sg = sg_next(sg);
527 		}
528 	}
529 
530 	/* Fill remaining fields of hw_desc[1] */
531 	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
532 	dev->hw_desc[idx]->len1 = dev->total;
533 	dev->hw_desc[idx]->len2 = dev->total;
534 	dev->hw_desc[idx]->next = 0;
535 
536 	sahara_dump_descriptors(dev);
537 	sahara_dump_links(dev);
538 
539 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
540 
541 	return 0;
542 
543 unmap_out:
544 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
545 		DMA_FROM_DEVICE);
546 unmap_in:
547 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
548 		DMA_TO_DEVICE);
549 
550 	return -EINVAL;
551 }
552 
553 static int sahara_aes_process(struct ablkcipher_request *req)
554 {
555 	struct sahara_dev *dev = dev_ptr;
556 	struct sahara_ctx *ctx;
557 	struct sahara_aes_reqctx *rctx;
558 	int ret;
559 	unsigned long timeout;
560 
561 	/* Request is ready to be dispatched by the device */
562 	dev_dbg(dev->device,
563 		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
564 		req->nbytes, req->src, req->dst);
565 
566 	/* assign new request to device */
567 	dev->total = req->nbytes;
568 	dev->in_sg = req->src;
569 	dev->out_sg = req->dst;
570 
571 	rctx = ablkcipher_request_ctx(req);
572 	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
573 	rctx->mode &= FLAGS_MODE_MASK;
574 	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
575 
576 	if ((dev->flags & FLAGS_CBC) && req->info)
577 		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
578 
579 	/* assign new context to device */
580 	dev->ctx = ctx;
581 
582 	reinit_completion(&dev->dma_completion);
583 
584 	ret = sahara_hw_descriptor_create(dev);
585 	if (ret)
586 		return -EINVAL;
587 
588 	timeout = wait_for_completion_timeout(&dev->dma_completion,
589 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
590 	if (!timeout) {
591 		dev_err(dev->device, "AES timeout\n");
592 		return -ETIMEDOUT;
593 	}
594 
595 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
596 		DMA_FROM_DEVICE);
597 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
598 		DMA_TO_DEVICE);
599 
600 	return 0;
601 }
602 
603 static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
604 			     unsigned int keylen)
605 {
606 	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
607 	int ret;
608 
609 	ctx->keylen = keylen;
610 
611 	/* SAHARA only supports 128bit keys */
612 	if (keylen == AES_KEYSIZE_128) {
613 		memcpy(ctx->key, key, keylen);
614 		ctx->flags |= FLAGS_NEW_KEY;
615 		return 0;
616 	}
617 
618 	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
619 		return -EINVAL;
620 
621 	/*
622 	 * The requested key size is not supported by HW, do a fallback.
623 	 */
624 	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
625 	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
626 						 CRYPTO_TFM_REQ_MASK);
627 
628 	ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
629 
630 	tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
631 	tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
632 			       CRYPTO_TFM_RES_MASK;
633 	return ret;
634 }
635 
636 static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
637 {
638 	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
639 	struct sahara_dev *dev = dev_ptr;
640 	int err = 0;
641 
642 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
643 		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
644 
645 	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
646 		dev_err(dev->device,
647 			"request size is not exact amount of AES blocks\n");
648 		return -EINVAL;
649 	}
650 
651 	rctx->mode = mode;
652 
653 	mutex_lock(&dev->queue_mutex);
654 	err = ablkcipher_enqueue_request(&dev->queue, req);
655 	mutex_unlock(&dev->queue_mutex);
656 
657 	wake_up_process(dev->kthread);
658 
659 	return err;
660 }
661 
662 static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
663 {
664 	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
665 		crypto_ablkcipher_reqtfm(req));
666 	int err;
667 
668 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
669 		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
670 
671 		skcipher_request_set_tfm(subreq, ctx->fallback);
672 		skcipher_request_set_callback(subreq, req->base.flags,
673 					      NULL, NULL);
674 		skcipher_request_set_crypt(subreq, req->src, req->dst,
675 					   req->nbytes, req->info);
676 		err = crypto_skcipher_encrypt(subreq);
677 		skcipher_request_zero(subreq);
678 		return err;
679 	}
680 
681 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
682 }
683 
684 static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
685 {
686 	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
687 		crypto_ablkcipher_reqtfm(req));
688 	int err;
689 
690 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
691 		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
692 
693 		skcipher_request_set_tfm(subreq, ctx->fallback);
694 		skcipher_request_set_callback(subreq, req->base.flags,
695 					      NULL, NULL);
696 		skcipher_request_set_crypt(subreq, req->src, req->dst,
697 					   req->nbytes, req->info);
698 		err = crypto_skcipher_decrypt(subreq);
699 		skcipher_request_zero(subreq);
700 		return err;
701 	}
702 
703 	return sahara_aes_crypt(req, 0);
704 }
705 
706 static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
707 {
708 	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
709 		crypto_ablkcipher_reqtfm(req));
710 	int err;
711 
712 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
713 		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
714 
715 		skcipher_request_set_tfm(subreq, ctx->fallback);
716 		skcipher_request_set_callback(subreq, req->base.flags,
717 					      NULL, NULL);
718 		skcipher_request_set_crypt(subreq, req->src, req->dst,
719 					   req->nbytes, req->info);
720 		err = crypto_skcipher_encrypt(subreq);
721 		skcipher_request_zero(subreq);
722 		return err;
723 	}
724 
725 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
726 }
727 
728 static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
729 {
730 	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
731 		crypto_ablkcipher_reqtfm(req));
732 	int err;
733 
734 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
735 		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
736 
737 		skcipher_request_set_tfm(subreq, ctx->fallback);
738 		skcipher_request_set_callback(subreq, req->base.flags,
739 					      NULL, NULL);
740 		skcipher_request_set_crypt(subreq, req->src, req->dst,
741 					   req->nbytes, req->info);
742 		err = crypto_skcipher_decrypt(subreq);
743 		skcipher_request_zero(subreq);
744 		return err;
745 	}
746 
747 	return sahara_aes_crypt(req, FLAGS_CBC);
748 }
749 
750 static int sahara_aes_cra_init(struct crypto_tfm *tfm)
751 {
752 	const char *name = crypto_tfm_alg_name(tfm);
753 	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
754 
755 	ctx->fallback = crypto_alloc_skcipher(name, 0,
756 					      CRYPTO_ALG_ASYNC |
757 					      CRYPTO_ALG_NEED_FALLBACK);
758 	if (IS_ERR(ctx->fallback)) {
759 		pr_err("Error allocating fallback algo %s\n", name);
760 		return PTR_ERR(ctx->fallback);
761 	}
762 
763 	tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
764 
765 	return 0;
766 }
767 
768 static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
769 {
770 	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
771 
772 	crypto_free_skcipher(ctx->fallback);
773 }
774 
775 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
776 			      struct sahara_sha_reqctx *rctx)
777 {
778 	u32 hdr = 0;
779 
780 	hdr = rctx->mode;
781 
782 	if (rctx->first) {
783 		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
784 		hdr |= SAHARA_HDR_MDHA_INIT;
785 	} else {
786 		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
787 	}
788 
789 	if (rctx->last)
790 		hdr |= SAHARA_HDR_MDHA_PDATA;
791 
792 	if (hweight_long(hdr) % 2 == 0)
793 		hdr |= SAHARA_HDR_PARITY_BIT;
794 
795 	return hdr;
796 }
797 
798 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
799 				       struct sahara_sha_reqctx *rctx,
800 				       int start)
801 {
802 	struct scatterlist *sg;
803 	unsigned int i;
804 	int ret;
805 
806 	dev->in_sg = rctx->in_sg;
807 
808 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
809 	if (dev->nb_in_sg < 0) {
810 		dev_err(dev->device, "Invalid numbers of src SG.\n");
811 		return dev->nb_in_sg;
812 	}
813 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
814 		dev_err(dev->device, "not enough hw links (%d)\n",
815 			dev->nb_in_sg + dev->nb_out_sg);
816 		return -EINVAL;
817 	}
818 
819 	sg = dev->in_sg;
820 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
821 	if (!ret)
822 		return -EFAULT;
823 
824 	for (i = start; i < dev->nb_in_sg + start; i++) {
825 		dev->hw_link[i]->len = sg->length;
826 		dev->hw_link[i]->p = sg->dma_address;
827 		if (i == (dev->nb_in_sg + start - 1)) {
828 			dev->hw_link[i]->next = 0;
829 		} else {
830 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
831 			sg = sg_next(sg);
832 		}
833 	}
834 
835 	return i;
836 }
837 
838 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
839 						struct sahara_sha_reqctx *rctx,
840 						struct ahash_request *req,
841 						int index)
842 {
843 	unsigned result_len;
844 	int i = index;
845 
846 	if (rctx->first)
847 		/* Create initial descriptor: #8*/
848 		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
849 	else
850 		/* Create hash descriptor: #10. Must follow #6. */
851 		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
852 
853 	dev->hw_desc[index]->len1 = rctx->total;
854 	if (dev->hw_desc[index]->len1 == 0) {
855 		/* if len1 is 0, p1 must be 0, too */
856 		dev->hw_desc[index]->p1 = 0;
857 		rctx->sg_in_idx = 0;
858 	} else {
859 		/* Create input links */
860 		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
861 		i = sahara_sha_hw_links_create(dev, rctx, index);
862 
863 		rctx->sg_in_idx = index;
864 		if (i < 0)
865 			return i;
866 	}
867 
868 	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
869 
870 	/* Save the context for the next operation */
871 	result_len = rctx->context_size;
872 	dev->hw_link[i]->p = dev->context_phys_base;
873 
874 	dev->hw_link[i]->len = result_len;
875 	dev->hw_desc[index]->len2 = result_len;
876 
877 	dev->hw_link[i]->next = 0;
878 
879 	return 0;
880 }
881 
882 /*
883  * Load descriptor aka #6
884  *
885  * To load a previously saved context back to the MDHA unit
886  *
887  * p1: Saved Context
888  * p2: NULL
889  *
890  */
891 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
892 						struct sahara_sha_reqctx *rctx,
893 						struct ahash_request *req,
894 						int index)
895 {
896 	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
897 
898 	dev->hw_desc[index]->len1 = rctx->context_size;
899 	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
900 	dev->hw_desc[index]->len2 = 0;
901 	dev->hw_desc[index]->p2 = 0;
902 
903 	dev->hw_link[index]->len = rctx->context_size;
904 	dev->hw_link[index]->p = dev->context_phys_base;
905 	dev->hw_link[index]->next = 0;
906 
907 	return 0;
908 }
909 
910 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
911 {
912 	if (!sg || !sg->length)
913 		return nbytes;
914 
915 	while (nbytes && sg) {
916 		if (nbytes <= sg->length) {
917 			sg->length = nbytes;
918 			sg_mark_end(sg);
919 			break;
920 		}
921 		nbytes -= sg->length;
922 		sg = sg_next(sg);
923 	}
924 
925 	return nbytes;
926 }
927 
928 static int sahara_sha_prepare_request(struct ahash_request *req)
929 {
930 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
931 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
932 	unsigned int hash_later;
933 	unsigned int block_size;
934 	unsigned int len;
935 
936 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
937 
938 	/* append bytes from previous operation */
939 	len = rctx->buf_cnt + req->nbytes;
940 
941 	/* only the last transfer can be padded in hardware */
942 	if (!rctx->last && (len < block_size)) {
943 		/* to few data, save for next operation */
944 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
945 					 0, req->nbytes, 0);
946 		rctx->buf_cnt += req->nbytes;
947 
948 		return 0;
949 	}
950 
951 	/* add data from previous operation first */
952 	if (rctx->buf_cnt)
953 		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
954 
955 	/* data must always be a multiple of block_size */
956 	hash_later = rctx->last ? 0 : len & (block_size - 1);
957 	if (hash_later) {
958 		unsigned int offset = req->nbytes - hash_later;
959 		/* Save remaining bytes for later use */
960 		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
961 					hash_later, 0);
962 	}
963 
964 	/* nbytes should now be multiple of blocksize */
965 	req->nbytes = req->nbytes - hash_later;
966 
967 	sahara_walk_and_recalc(req->src, req->nbytes);
968 
969 	/* have data from previous operation and current */
970 	if (rctx->buf_cnt && req->nbytes) {
971 		sg_init_table(rctx->in_sg_chain, 2);
972 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
973 
974 		sg_chain(rctx->in_sg_chain, 2, req->src);
975 
976 		rctx->total = req->nbytes + rctx->buf_cnt;
977 		rctx->in_sg = rctx->in_sg_chain;
978 
979 		req->src = rctx->in_sg_chain;
980 	/* only data from previous operation */
981 	} else if (rctx->buf_cnt) {
982 		if (req->src)
983 			rctx->in_sg = req->src;
984 		else
985 			rctx->in_sg = rctx->in_sg_chain;
986 		/* buf was copied into rembuf above */
987 		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
988 		rctx->total = rctx->buf_cnt;
989 	/* no data from previous operation */
990 	} else {
991 		rctx->in_sg = req->src;
992 		rctx->total = req->nbytes;
993 		req->src = rctx->in_sg;
994 	}
995 
996 	/* on next call, we only have the remaining data in the buffer */
997 	rctx->buf_cnt = hash_later;
998 
999 	return -EINPROGRESS;
1000 }
1001 
1002 static int sahara_sha_process(struct ahash_request *req)
1003 {
1004 	struct sahara_dev *dev = dev_ptr;
1005 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1006 	int ret;
1007 	unsigned long timeout;
1008 
1009 	ret = sahara_sha_prepare_request(req);
1010 	if (!ret)
1011 		return ret;
1012 
1013 	if (rctx->first) {
1014 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1015 		dev->hw_desc[0]->next = 0;
1016 		rctx->first = 0;
1017 	} else {
1018 		memcpy(dev->context_base, rctx->context, rctx->context_size);
1019 
1020 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1021 		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1022 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1023 		dev->hw_desc[1]->next = 0;
1024 	}
1025 
1026 	sahara_dump_descriptors(dev);
1027 	sahara_dump_links(dev);
1028 
1029 	reinit_completion(&dev->dma_completion);
1030 
1031 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1032 
1033 	timeout = wait_for_completion_timeout(&dev->dma_completion,
1034 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1035 	if (!timeout) {
1036 		dev_err(dev->device, "SHA timeout\n");
1037 		return -ETIMEDOUT;
1038 	}
1039 
1040 	if (rctx->sg_in_idx)
1041 		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1042 			     DMA_TO_DEVICE);
1043 
1044 	memcpy(rctx->context, dev->context_base, rctx->context_size);
1045 
1046 	if (req->result)
1047 		memcpy(req->result, rctx->context, rctx->digest_size);
1048 
1049 	return 0;
1050 }
1051 
1052 static int sahara_queue_manage(void *data)
1053 {
1054 	struct sahara_dev *dev = (struct sahara_dev *)data;
1055 	struct crypto_async_request *async_req;
1056 	struct crypto_async_request *backlog;
1057 	int ret = 0;
1058 
1059 	do {
1060 		__set_current_state(TASK_INTERRUPTIBLE);
1061 
1062 		mutex_lock(&dev->queue_mutex);
1063 		backlog = crypto_get_backlog(&dev->queue);
1064 		async_req = crypto_dequeue_request(&dev->queue);
1065 		mutex_unlock(&dev->queue_mutex);
1066 
1067 		if (backlog)
1068 			backlog->complete(backlog, -EINPROGRESS);
1069 
1070 		if (async_req) {
1071 			if (crypto_tfm_alg_type(async_req->tfm) ==
1072 			    CRYPTO_ALG_TYPE_AHASH) {
1073 				struct ahash_request *req =
1074 					ahash_request_cast(async_req);
1075 
1076 				ret = sahara_sha_process(req);
1077 			} else {
1078 				struct ablkcipher_request *req =
1079 					ablkcipher_request_cast(async_req);
1080 
1081 				ret = sahara_aes_process(req);
1082 			}
1083 
1084 			async_req->complete(async_req, ret);
1085 
1086 			continue;
1087 		}
1088 
1089 		schedule();
1090 	} while (!kthread_should_stop());
1091 
1092 	return 0;
1093 }
1094 
1095 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1096 {
1097 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1098 	struct sahara_dev *dev = dev_ptr;
1099 	int ret;
1100 
1101 	if (!req->nbytes && !last)
1102 		return 0;
1103 
1104 	rctx->last = last;
1105 
1106 	if (!rctx->active) {
1107 		rctx->active = 1;
1108 		rctx->first = 1;
1109 	}
1110 
1111 	mutex_lock(&dev->queue_mutex);
1112 	ret = crypto_enqueue_request(&dev->queue, &req->base);
1113 	mutex_unlock(&dev->queue_mutex);
1114 
1115 	wake_up_process(dev->kthread);
1116 
1117 	return ret;
1118 }
1119 
1120 static int sahara_sha_init(struct ahash_request *req)
1121 {
1122 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1123 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1124 
1125 	memset(rctx, 0, sizeof(*rctx));
1126 
1127 	switch (crypto_ahash_digestsize(tfm)) {
1128 	case SHA1_DIGEST_SIZE:
1129 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1130 		rctx->digest_size = SHA1_DIGEST_SIZE;
1131 		break;
1132 	case SHA256_DIGEST_SIZE:
1133 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1134 		rctx->digest_size = SHA256_DIGEST_SIZE;
1135 		break;
1136 	default:
1137 		return -EINVAL;
1138 	}
1139 
1140 	rctx->context_size = rctx->digest_size + 4;
1141 	rctx->active = 0;
1142 
1143 	return 0;
1144 }
1145 
1146 static int sahara_sha_update(struct ahash_request *req)
1147 {
1148 	return sahara_sha_enqueue(req, 0);
1149 }
1150 
1151 static int sahara_sha_final(struct ahash_request *req)
1152 {
1153 	req->nbytes = 0;
1154 	return sahara_sha_enqueue(req, 1);
1155 }
1156 
1157 static int sahara_sha_finup(struct ahash_request *req)
1158 {
1159 	return sahara_sha_enqueue(req, 1);
1160 }
1161 
1162 static int sahara_sha_digest(struct ahash_request *req)
1163 {
1164 	sahara_sha_init(req);
1165 
1166 	return sahara_sha_finup(req);
1167 }
1168 
1169 static int sahara_sha_export(struct ahash_request *req, void *out)
1170 {
1171 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1172 
1173 	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1174 
1175 	return 0;
1176 }
1177 
1178 static int sahara_sha_import(struct ahash_request *req, const void *in)
1179 {
1180 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1181 
1182 	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1183 
1184 	return 0;
1185 }
1186 
1187 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1188 {
1189 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1190 				 sizeof(struct sahara_sha_reqctx) +
1191 				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1192 
1193 	return 0;
1194 }
1195 
1196 static struct crypto_alg aes_algs[] = {
1197 {
1198 	.cra_name		= "ecb(aes)",
1199 	.cra_driver_name	= "sahara-ecb-aes",
1200 	.cra_priority		= 300,
1201 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
1202 			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1203 	.cra_blocksize		= AES_BLOCK_SIZE,
1204 	.cra_ctxsize		= sizeof(struct sahara_ctx),
1205 	.cra_alignmask		= 0x0,
1206 	.cra_type		= &crypto_ablkcipher_type,
1207 	.cra_module		= THIS_MODULE,
1208 	.cra_init		= sahara_aes_cra_init,
1209 	.cra_exit		= sahara_aes_cra_exit,
1210 	.cra_u.ablkcipher = {
1211 		.min_keysize	= AES_MIN_KEY_SIZE ,
1212 		.max_keysize	= AES_MAX_KEY_SIZE,
1213 		.setkey		= sahara_aes_setkey,
1214 		.encrypt	= sahara_aes_ecb_encrypt,
1215 		.decrypt	= sahara_aes_ecb_decrypt,
1216 	}
1217 }, {
1218 	.cra_name		= "cbc(aes)",
1219 	.cra_driver_name	= "sahara-cbc-aes",
1220 	.cra_priority		= 300,
1221 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
1222 			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1223 	.cra_blocksize		= AES_BLOCK_SIZE,
1224 	.cra_ctxsize		= sizeof(struct sahara_ctx),
1225 	.cra_alignmask		= 0x0,
1226 	.cra_type		= &crypto_ablkcipher_type,
1227 	.cra_module		= THIS_MODULE,
1228 	.cra_init		= sahara_aes_cra_init,
1229 	.cra_exit		= sahara_aes_cra_exit,
1230 	.cra_u.ablkcipher = {
1231 		.min_keysize	= AES_MIN_KEY_SIZE ,
1232 		.max_keysize	= AES_MAX_KEY_SIZE,
1233 		.ivsize		= AES_BLOCK_SIZE,
1234 		.setkey		= sahara_aes_setkey,
1235 		.encrypt	= sahara_aes_cbc_encrypt,
1236 		.decrypt	= sahara_aes_cbc_decrypt,
1237 	}
1238 }
1239 };
1240 
1241 static struct ahash_alg sha_v3_algs[] = {
1242 {
1243 	.init		= sahara_sha_init,
1244 	.update		= sahara_sha_update,
1245 	.final		= sahara_sha_final,
1246 	.finup		= sahara_sha_finup,
1247 	.digest		= sahara_sha_digest,
1248 	.export		= sahara_sha_export,
1249 	.import		= sahara_sha_import,
1250 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1251 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1252 	.halg.base	= {
1253 		.cra_name		= "sha1",
1254 		.cra_driver_name	= "sahara-sha1",
1255 		.cra_priority		= 300,
1256 		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1257 						CRYPTO_ALG_ASYNC |
1258 						CRYPTO_ALG_NEED_FALLBACK,
1259 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1260 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1261 		.cra_alignmask		= 0,
1262 		.cra_module		= THIS_MODULE,
1263 		.cra_init		= sahara_sha_cra_init,
1264 	}
1265 },
1266 };
1267 
1268 static struct ahash_alg sha_v4_algs[] = {
1269 {
1270 	.init		= sahara_sha_init,
1271 	.update		= sahara_sha_update,
1272 	.final		= sahara_sha_final,
1273 	.finup		= sahara_sha_finup,
1274 	.digest		= sahara_sha_digest,
1275 	.export		= sahara_sha_export,
1276 	.import		= sahara_sha_import,
1277 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1278 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1279 	.halg.base	= {
1280 		.cra_name		= "sha256",
1281 		.cra_driver_name	= "sahara-sha256",
1282 		.cra_priority		= 300,
1283 		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1284 						CRYPTO_ALG_ASYNC |
1285 						CRYPTO_ALG_NEED_FALLBACK,
1286 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1287 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1288 		.cra_alignmask		= 0,
1289 		.cra_module		= THIS_MODULE,
1290 		.cra_init		= sahara_sha_cra_init,
1291 	}
1292 },
1293 };
1294 
1295 static irqreturn_t sahara_irq_handler(int irq, void *data)
1296 {
1297 	struct sahara_dev *dev = (struct sahara_dev *)data;
1298 	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1299 	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1300 
1301 	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1302 		     SAHARA_REG_CMD);
1303 
1304 	sahara_decode_status(dev, stat);
1305 
1306 	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1307 		return IRQ_NONE;
1308 	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1309 		dev->error = 0;
1310 	} else {
1311 		sahara_decode_error(dev, err);
1312 		dev->error = -EINVAL;
1313 	}
1314 
1315 	complete(&dev->dma_completion);
1316 
1317 	return IRQ_HANDLED;
1318 }
1319 
1320 
1321 static int sahara_register_algs(struct sahara_dev *dev)
1322 {
1323 	int err;
1324 	unsigned int i, j, k, l;
1325 
1326 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1327 		INIT_LIST_HEAD(&aes_algs[i].cra_list);
1328 		err = crypto_register_alg(&aes_algs[i]);
1329 		if (err)
1330 			goto err_aes_algs;
1331 	}
1332 
1333 	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1334 		err = crypto_register_ahash(&sha_v3_algs[k]);
1335 		if (err)
1336 			goto err_sha_v3_algs;
1337 	}
1338 
1339 	if (dev->version > SAHARA_VERSION_3)
1340 		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1341 			err = crypto_register_ahash(&sha_v4_algs[l]);
1342 			if (err)
1343 				goto err_sha_v4_algs;
1344 		}
1345 
1346 	return 0;
1347 
1348 err_sha_v4_algs:
1349 	for (j = 0; j < l; j++)
1350 		crypto_unregister_ahash(&sha_v4_algs[j]);
1351 
1352 err_sha_v3_algs:
1353 	for (j = 0; j < k; j++)
1354 		crypto_unregister_ahash(&sha_v4_algs[j]);
1355 
1356 err_aes_algs:
1357 	for (j = 0; j < i; j++)
1358 		crypto_unregister_alg(&aes_algs[j]);
1359 
1360 	return err;
1361 }
1362 
1363 static void sahara_unregister_algs(struct sahara_dev *dev)
1364 {
1365 	unsigned int i;
1366 
1367 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1368 		crypto_unregister_alg(&aes_algs[i]);
1369 
1370 	for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1371 		crypto_unregister_ahash(&sha_v3_algs[i]);
1372 
1373 	if (dev->version > SAHARA_VERSION_3)
1374 		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1375 			crypto_unregister_ahash(&sha_v4_algs[i]);
1376 }
1377 
1378 static const struct platform_device_id sahara_platform_ids[] = {
1379 	{ .name = "sahara-imx27" },
1380 	{ /* sentinel */ }
1381 };
1382 MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1383 
1384 static const struct of_device_id sahara_dt_ids[] = {
1385 	{ .compatible = "fsl,imx53-sahara" },
1386 	{ .compatible = "fsl,imx27-sahara" },
1387 	{ /* sentinel */ }
1388 };
1389 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1390 
1391 static int sahara_probe(struct platform_device *pdev)
1392 {
1393 	struct sahara_dev *dev;
1394 	struct resource *res;
1395 	u32 version;
1396 	int irq;
1397 	int err;
1398 	int i;
1399 
1400 	dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1401 	if (dev == NULL) {
1402 		dev_err(&pdev->dev, "unable to alloc data struct.\n");
1403 		return -ENOMEM;
1404 	}
1405 
1406 	dev->device = &pdev->dev;
1407 	platform_set_drvdata(pdev, dev);
1408 
1409 	/* Get the base address */
1410 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1411 	dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1412 	if (IS_ERR(dev->regs_base))
1413 		return PTR_ERR(dev->regs_base);
1414 
1415 	/* Get the IRQ */
1416 	irq = platform_get_irq(pdev,  0);
1417 	if (irq < 0) {
1418 		dev_err(&pdev->dev, "failed to get irq resource\n");
1419 		return irq;
1420 	}
1421 
1422 	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1423 			       0, dev_name(&pdev->dev), dev);
1424 	if (err) {
1425 		dev_err(&pdev->dev, "failed to request irq\n");
1426 		return err;
1427 	}
1428 
1429 	/* clocks */
1430 	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1431 	if (IS_ERR(dev->clk_ipg)) {
1432 		dev_err(&pdev->dev, "Could not get ipg clock\n");
1433 		return PTR_ERR(dev->clk_ipg);
1434 	}
1435 
1436 	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1437 	if (IS_ERR(dev->clk_ahb)) {
1438 		dev_err(&pdev->dev, "Could not get ahb clock\n");
1439 		return PTR_ERR(dev->clk_ahb);
1440 	}
1441 
1442 	/* Allocate HW descriptors */
1443 	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1444 			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1445 			&dev->hw_phys_desc[0], GFP_KERNEL);
1446 	if (!dev->hw_desc[0]) {
1447 		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1448 		return -ENOMEM;
1449 	}
1450 	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1451 	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1452 				sizeof(struct sahara_hw_desc);
1453 
1454 	/* Allocate space for iv and key */
1455 	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1456 				&dev->key_phys_base, GFP_KERNEL);
1457 	if (!dev->key_base) {
1458 		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1459 		return -ENOMEM;
1460 	}
1461 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1462 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1463 
1464 	/* Allocate space for context: largest digest + message length field */
1465 	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1466 					SHA256_DIGEST_SIZE + 4,
1467 					&dev->context_phys_base, GFP_KERNEL);
1468 	if (!dev->context_base) {
1469 		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1470 		return -ENOMEM;
1471 	}
1472 
1473 	/* Allocate space for HW links */
1474 	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1475 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1476 			&dev->hw_phys_link[0], GFP_KERNEL);
1477 	if (!dev->hw_link[0]) {
1478 		dev_err(&pdev->dev, "Could not allocate hw links\n");
1479 		return -ENOMEM;
1480 	}
1481 	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1482 		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1483 					sizeof(struct sahara_hw_link);
1484 		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1485 	}
1486 
1487 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1488 
1489 	mutex_init(&dev->queue_mutex);
1490 
1491 	dev_ptr = dev;
1492 
1493 	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1494 	if (IS_ERR(dev->kthread)) {
1495 		return PTR_ERR(dev->kthread);
1496 	}
1497 
1498 	init_completion(&dev->dma_completion);
1499 
1500 	err = clk_prepare_enable(dev->clk_ipg);
1501 	if (err)
1502 		return err;
1503 	err = clk_prepare_enable(dev->clk_ahb);
1504 	if (err)
1505 		goto clk_ipg_disable;
1506 
1507 	version = sahara_read(dev, SAHARA_REG_VERSION);
1508 	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1509 		if (version != SAHARA_VERSION_3)
1510 			err = -ENODEV;
1511 	} else if (of_device_is_compatible(pdev->dev.of_node,
1512 			"fsl,imx53-sahara")) {
1513 		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1514 			err = -ENODEV;
1515 		version = (version >> 8) & 0xff;
1516 	}
1517 	if (err == -ENODEV) {
1518 		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1519 				version);
1520 		goto err_algs;
1521 	}
1522 
1523 	dev->version = version;
1524 
1525 	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1526 		     SAHARA_REG_CMD);
1527 	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1528 			SAHARA_CONTROL_SET_MAXBURST(8) |
1529 			SAHARA_CONTROL_RNG_AUTORSD |
1530 			SAHARA_CONTROL_ENABLE_INT,
1531 			SAHARA_REG_CONTROL);
1532 
1533 	err = sahara_register_algs(dev);
1534 	if (err)
1535 		goto err_algs;
1536 
1537 	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1538 
1539 	return 0;
1540 
1541 err_algs:
1542 	kthread_stop(dev->kthread);
1543 	dev_ptr = NULL;
1544 	clk_disable_unprepare(dev->clk_ahb);
1545 clk_ipg_disable:
1546 	clk_disable_unprepare(dev->clk_ipg);
1547 
1548 	return err;
1549 }
1550 
1551 static int sahara_remove(struct platform_device *pdev)
1552 {
1553 	struct sahara_dev *dev = platform_get_drvdata(pdev);
1554 
1555 	kthread_stop(dev->kthread);
1556 
1557 	sahara_unregister_algs(dev);
1558 
1559 	clk_disable_unprepare(dev->clk_ipg);
1560 	clk_disable_unprepare(dev->clk_ahb);
1561 
1562 	dev_ptr = NULL;
1563 
1564 	return 0;
1565 }
1566 
1567 static struct platform_driver sahara_driver = {
1568 	.probe		= sahara_probe,
1569 	.remove		= sahara_remove,
1570 	.driver		= {
1571 		.name	= SAHARA_NAME,
1572 		.of_match_table = sahara_dt_ids,
1573 	},
1574 	.id_table = sahara_platform_ids,
1575 };
1576 
1577 module_platform_driver(sahara_driver);
1578 
1579 MODULE_LICENSE("GPL");
1580 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1581 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1582 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1583