xref: /openbmc/linux/drivers/crypto/sahara.c (revision ffcdf473)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for SAHARA cryptographic accelerator.
6  *
7  * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8  * Copyright (c) 2013 Vista Silicon S.L.
9  * Author: Javier Martin <javier.martin@vista-silicon.com>
10  *
11  * Based on omap-aes.c and tegra-aes.c
12  */
13 
14 #include <crypto/aes.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/sha1.h>
19 #include <crypto/sha2.h>
20 
21 #include <linux/clk.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/irq.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/module.h>
29 #include <linux/of.h>
30 #include <linux/of_device.h>
31 #include <linux/platform_device.h>
32 #include <linux/spinlock.h>
33 
34 #define SHA_BUFFER_LEN		PAGE_SIZE
35 #define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
36 
37 #define SAHARA_NAME "sahara"
38 #define SAHARA_VERSION_3	3
39 #define SAHARA_VERSION_4	4
40 #define SAHARA_TIMEOUT_MS	1000
41 #define SAHARA_MAX_HW_DESC	2
42 #define SAHARA_MAX_HW_LINK	20
43 
44 #define FLAGS_MODE_MASK		0x000f
45 #define FLAGS_ENCRYPT		BIT(0)
46 #define FLAGS_CBC		BIT(1)
47 #define FLAGS_NEW_KEY		BIT(3)
48 
49 #define SAHARA_HDR_BASE			0x00800000
50 #define SAHARA_HDR_SKHA_ALG_AES	0
51 #define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
52 #define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
53 #define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
54 #define SAHARA_HDR_FORM_DATA		(5 << 16)
55 #define SAHARA_HDR_FORM_KEY		(8 << 16)
56 #define SAHARA_HDR_LLO			(1 << 24)
57 #define SAHARA_HDR_CHA_SKHA		(1 << 28)
58 #define SAHARA_HDR_CHA_MDHA		(2 << 28)
59 #define SAHARA_HDR_PARITY_BIT		(1 << 31)
60 
61 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
62 #define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
63 #define SAHARA_HDR_MDHA_HASH		0xA0850000
64 #define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
65 #define SAHARA_HDR_MDHA_ALG_SHA1	0
66 #define SAHARA_HDR_MDHA_ALG_MD5		1
67 #define SAHARA_HDR_MDHA_ALG_SHA256	2
68 #define SAHARA_HDR_MDHA_ALG_SHA224	3
69 #define SAHARA_HDR_MDHA_PDATA		(1 << 2)
70 #define SAHARA_HDR_MDHA_HMAC		(1 << 3)
71 #define SAHARA_HDR_MDHA_INIT		(1 << 5)
72 #define SAHARA_HDR_MDHA_IPAD		(1 << 6)
73 #define SAHARA_HDR_MDHA_OPAD		(1 << 7)
74 #define SAHARA_HDR_MDHA_SWAP		(1 << 8)
75 #define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
76 #define SAHARA_HDR_MDHA_SSL		(1 << 10)
77 
78 /* SAHARA can only process one request at a time */
79 #define SAHARA_QUEUE_LENGTH	1
80 
81 #define SAHARA_REG_VERSION	0x00
82 #define SAHARA_REG_DAR		0x04
83 #define SAHARA_REG_CONTROL	0x08
84 #define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
85 #define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
86 #define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
87 #define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
88 #define SAHARA_REG_CMD		0x0C
89 #define		SAHARA_CMD_RESET		(1 << 0)
90 #define		SAHARA_CMD_CLEAR_INT		(1 << 8)
91 #define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
92 #define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
93 #define		SAHARA_CMD_MODE_BATCH		(1 << 16)
94 #define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
95 #define	SAHARA_REG_STATUS	0x10
96 #define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
97 #define			SAHARA_STATE_IDLE	0
98 #define			SAHARA_STATE_BUSY	1
99 #define			SAHARA_STATE_ERR	2
100 #define			SAHARA_STATE_FAULT	3
101 #define			SAHARA_STATE_COMPLETE	4
102 #define			SAHARA_STATE_COMP_FLAG	(1 << 2)
103 #define		SAHARA_STATUS_DAR_FULL		(1 << 3)
104 #define		SAHARA_STATUS_ERROR		(1 << 4)
105 #define		SAHARA_STATUS_SECURE		(1 << 5)
106 #define		SAHARA_STATUS_FAIL		(1 << 6)
107 #define		SAHARA_STATUS_INIT		(1 << 7)
108 #define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
109 #define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
110 #define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
111 #define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
112 #define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
113 #define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
114 #define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
115 #define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
116 #define SAHARA_REG_ERRSTATUS	0x14
117 #define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
118 #define			SAHARA_ERRSOURCE_CHA	14
119 #define			SAHARA_ERRSOURCE_DMA	15
120 #define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
121 #define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
122 #define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
123 #define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
124 #define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
125 #define SAHARA_REG_FADDR	0x18
126 #define SAHARA_REG_CDAR		0x1C
127 #define SAHARA_REG_IDAR		0x20
128 
129 struct sahara_hw_desc {
130 	u32	hdr;
131 	u32	len1;
132 	u32	p1;
133 	u32	len2;
134 	u32	p2;
135 	u32	next;
136 };
137 
138 struct sahara_hw_link {
139 	u32	len;
140 	u32	p;
141 	u32	next;
142 };
143 
144 struct sahara_ctx {
145 	unsigned long flags;
146 
147 	/* AES-specific context */
148 	int keylen;
149 	u8 key[AES_KEYSIZE_128];
150 	struct crypto_skcipher *fallback;
151 };
152 
153 struct sahara_aes_reqctx {
154 	unsigned long mode;
155 	struct skcipher_request fallback_req;	// keep at the end
156 };
157 
158 /*
159  * struct sahara_sha_reqctx - private data per request
160  * @buf: holds data for requests smaller than block_size
161  * @rembuf: used to prepare one block_size-aligned request
162  * @context: hw-specific context for request. Digest is extracted from this
163  * @mode: specifies what type of hw-descriptor needs to be built
164  * @digest_size: length of digest for this request
165  * @context_size: length of hw-context for this request.
166  *                Always digest_size + 4
167  * @buf_cnt: number of bytes saved in buf
168  * @sg_in_idx: number of hw links
169  * @in_sg: scatterlist for input data
170  * @in_sg_chain: scatterlists for chained input data
171  * @total: total number of bytes for transfer
172  * @last: is this the last block
173  * @first: is this the first block
174  * @active: inside a transfer
175  */
176 struct sahara_sha_reqctx {
177 	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
178 	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
179 	u8			context[SHA256_DIGEST_SIZE + 4];
180 	unsigned int		mode;
181 	unsigned int		digest_size;
182 	unsigned int		context_size;
183 	unsigned int		buf_cnt;
184 	unsigned int		sg_in_idx;
185 	struct scatterlist	*in_sg;
186 	struct scatterlist	in_sg_chain[2];
187 	size_t			total;
188 	unsigned int		last;
189 	unsigned int		first;
190 	unsigned int		active;
191 };
192 
193 struct sahara_dev {
194 	struct device		*device;
195 	unsigned int		version;
196 	void __iomem		*regs_base;
197 	struct clk		*clk_ipg;
198 	struct clk		*clk_ahb;
199 	spinlock_t		queue_spinlock;
200 	struct task_struct	*kthread;
201 	struct completion	dma_completion;
202 
203 	struct sahara_ctx	*ctx;
204 	struct crypto_queue	queue;
205 	unsigned long		flags;
206 
207 	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
208 	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
209 
210 	u8			*key_base;
211 	dma_addr_t		key_phys_base;
212 
213 	u8			*iv_base;
214 	dma_addr_t		iv_phys_base;
215 
216 	u8			*context_base;
217 	dma_addr_t		context_phys_base;
218 
219 	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
220 	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
221 
222 	size_t			total;
223 	struct scatterlist	*in_sg;
224 	int		nb_in_sg;
225 	struct scatterlist	*out_sg;
226 	int		nb_out_sg;
227 
228 	u32			error;
229 };
230 
231 static struct sahara_dev *dev_ptr;
232 
233 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
234 {
235 	writel(data, dev->regs_base + reg);
236 }
237 
238 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
239 {
240 	return readl(dev->regs_base + reg);
241 }
242 
243 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
244 {
245 	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
246 			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
247 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
248 
249 	if (dev->flags & FLAGS_CBC) {
250 		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
251 		hdr ^= SAHARA_HDR_PARITY_BIT;
252 	}
253 
254 	if (dev->flags & FLAGS_ENCRYPT) {
255 		hdr |= SAHARA_HDR_SKHA_OP_ENC;
256 		hdr ^= SAHARA_HDR_PARITY_BIT;
257 	}
258 
259 	return hdr;
260 }
261 
262 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
263 {
264 	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
265 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
266 }
267 
268 static const char *sahara_err_src[16] = {
269 	"No error",
270 	"Header error",
271 	"Descriptor length error",
272 	"Descriptor length or pointer error",
273 	"Link length error",
274 	"Link pointer error",
275 	"Input buffer error",
276 	"Output buffer error",
277 	"Output buffer starvation",
278 	"Internal state fault",
279 	"General descriptor problem",
280 	"Reserved",
281 	"Descriptor address error",
282 	"Link address error",
283 	"CHA error",
284 	"DMA error"
285 };
286 
287 static const char *sahara_err_dmasize[4] = {
288 	"Byte transfer",
289 	"Half-word transfer",
290 	"Word transfer",
291 	"Reserved"
292 };
293 
294 static const char *sahara_err_dmasrc[8] = {
295 	"No error",
296 	"AHB bus error",
297 	"Internal IP bus error",
298 	"Parity error",
299 	"DMA crosses 256 byte boundary",
300 	"DMA is busy",
301 	"Reserved",
302 	"DMA HW error"
303 };
304 
305 static const char *sahara_cha_errsrc[12] = {
306 	"Input buffer non-empty",
307 	"Illegal address",
308 	"Illegal mode",
309 	"Illegal data size",
310 	"Illegal key size",
311 	"Write during processing",
312 	"CTX read during processing",
313 	"HW error",
314 	"Input buffer disabled/underflow",
315 	"Output buffer disabled/overflow",
316 	"DES key parity error",
317 	"Reserved"
318 };
319 
320 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
321 
322 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
323 {
324 	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
325 	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
326 
327 	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
328 
329 	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
330 
331 	if (source == SAHARA_ERRSOURCE_DMA) {
332 		if (error & SAHARA_ERRSTATUS_DMA_DIR)
333 			dev_err(dev->device, "		* DMA read.\n");
334 		else
335 			dev_err(dev->device, "		* DMA write.\n");
336 
337 		dev_err(dev->device, "		* %s.\n",
338 		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
339 		dev_err(dev->device, "		* %s.\n",
340 		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
341 	} else if (source == SAHARA_ERRSOURCE_CHA) {
342 		dev_err(dev->device, "		* %s.\n",
343 			sahara_cha_errsrc[chasrc]);
344 		dev_err(dev->device, "		* %s.\n",
345 		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
346 	}
347 	dev_err(dev->device, "\n");
348 }
349 
350 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
351 
352 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
353 {
354 	u8 state;
355 
356 	if (!__is_defined(DEBUG))
357 		return;
358 
359 	state = SAHARA_STATUS_GET_STATE(status);
360 
361 	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
362 		__func__, status);
363 
364 	dev_dbg(dev->device, "	- State = %d:\n", state);
365 	if (state & SAHARA_STATE_COMP_FLAG)
366 		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
367 
368 	dev_dbg(dev->device, "		* %s.\n",
369 	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
370 
371 	if (status & SAHARA_STATUS_DAR_FULL)
372 		dev_dbg(dev->device, "	- DAR Full.\n");
373 	if (status & SAHARA_STATUS_ERROR)
374 		dev_dbg(dev->device, "	- Error.\n");
375 	if (status & SAHARA_STATUS_SECURE)
376 		dev_dbg(dev->device, "	- Secure.\n");
377 	if (status & SAHARA_STATUS_FAIL)
378 		dev_dbg(dev->device, "	- Fail.\n");
379 	if (status & SAHARA_STATUS_RNG_RESEED)
380 		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
381 	if (status & SAHARA_STATUS_ACTIVE_RNG)
382 		dev_dbg(dev->device, "	- RNG Active.\n");
383 	if (status & SAHARA_STATUS_ACTIVE_MDHA)
384 		dev_dbg(dev->device, "	- MDHA Active.\n");
385 	if (status & SAHARA_STATUS_ACTIVE_SKHA)
386 		dev_dbg(dev->device, "	- SKHA Active.\n");
387 
388 	if (status & SAHARA_STATUS_MODE_BATCH)
389 		dev_dbg(dev->device, "	- Batch Mode.\n");
390 	else if (status & SAHARA_STATUS_MODE_DEDICATED)
391 		dev_dbg(dev->device, "	- Dedicated Mode.\n");
392 	else if (status & SAHARA_STATUS_MODE_DEBUG)
393 		dev_dbg(dev->device, "	- Debug Mode.\n");
394 
395 	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
396 	       SAHARA_STATUS_GET_ISTATE(status));
397 
398 	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
399 		sahara_read(dev, SAHARA_REG_CDAR));
400 	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
401 		sahara_read(dev, SAHARA_REG_IDAR));
402 }
403 
404 static void sahara_dump_descriptors(struct sahara_dev *dev)
405 {
406 	int i;
407 
408 	if (!__is_defined(DEBUG))
409 		return;
410 
411 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
412 		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
413 			i, &dev->hw_phys_desc[i]);
414 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
415 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
416 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
417 		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
418 		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
419 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
420 			dev->hw_desc[i]->next);
421 	}
422 	dev_dbg(dev->device, "\n");
423 }
424 
425 static void sahara_dump_links(struct sahara_dev *dev)
426 {
427 	int i;
428 
429 	if (!__is_defined(DEBUG))
430 		return;
431 
432 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
433 		dev_dbg(dev->device, "Link (%d) (%pad):\n",
434 			i, &dev->hw_phys_link[i]);
435 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
436 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
437 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
438 			dev->hw_link[i]->next);
439 	}
440 	dev_dbg(dev->device, "\n");
441 }
442 
443 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
444 {
445 	struct sahara_ctx *ctx = dev->ctx;
446 	struct scatterlist *sg;
447 	int ret;
448 	int i, j;
449 	int idx = 0;
450 
451 	/* Copy new key if necessary */
452 	if (ctx->flags & FLAGS_NEW_KEY) {
453 		memcpy(dev->key_base, ctx->key, ctx->keylen);
454 		ctx->flags &= ~FLAGS_NEW_KEY;
455 
456 		if (dev->flags & FLAGS_CBC) {
457 			dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
458 			dev->hw_desc[idx]->p1 = dev->iv_phys_base;
459 		} else {
460 			dev->hw_desc[idx]->len1 = 0;
461 			dev->hw_desc[idx]->p1 = 0;
462 		}
463 		dev->hw_desc[idx]->len2 = ctx->keylen;
464 		dev->hw_desc[idx]->p2 = dev->key_phys_base;
465 		dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
466 
467 		dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
468 
469 		idx++;
470 	}
471 
472 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
473 	if (dev->nb_in_sg < 0) {
474 		dev_err(dev->device, "Invalid numbers of src SG.\n");
475 		return dev->nb_in_sg;
476 	}
477 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
478 	if (dev->nb_out_sg < 0) {
479 		dev_err(dev->device, "Invalid numbers of dst SG.\n");
480 		return dev->nb_out_sg;
481 	}
482 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
483 		dev_err(dev->device, "not enough hw links (%d)\n",
484 			dev->nb_in_sg + dev->nb_out_sg);
485 		return -EINVAL;
486 	}
487 
488 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
489 			 DMA_TO_DEVICE);
490 	if (!ret) {
491 		dev_err(dev->device, "couldn't map in sg\n");
492 		goto unmap_in;
493 	}
494 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
495 			 DMA_FROM_DEVICE);
496 	if (!ret) {
497 		dev_err(dev->device, "couldn't map out sg\n");
498 		goto unmap_out;
499 	}
500 
501 	/* Create input links */
502 	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
503 	sg = dev->in_sg;
504 	for (i = 0; i < dev->nb_in_sg; i++) {
505 		dev->hw_link[i]->len = sg->length;
506 		dev->hw_link[i]->p = sg->dma_address;
507 		if (i == (dev->nb_in_sg - 1)) {
508 			dev->hw_link[i]->next = 0;
509 		} else {
510 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
511 			sg = sg_next(sg);
512 		}
513 	}
514 
515 	/* Create output links */
516 	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
517 	sg = dev->out_sg;
518 	for (j = i; j < dev->nb_out_sg + i; j++) {
519 		dev->hw_link[j]->len = sg->length;
520 		dev->hw_link[j]->p = sg->dma_address;
521 		if (j == (dev->nb_out_sg + i - 1)) {
522 			dev->hw_link[j]->next = 0;
523 		} else {
524 			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
525 			sg = sg_next(sg);
526 		}
527 	}
528 
529 	/* Fill remaining fields of hw_desc[1] */
530 	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
531 	dev->hw_desc[idx]->len1 = dev->total;
532 	dev->hw_desc[idx]->len2 = dev->total;
533 	dev->hw_desc[idx]->next = 0;
534 
535 	sahara_dump_descriptors(dev);
536 	sahara_dump_links(dev);
537 
538 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
539 
540 	return 0;
541 
542 unmap_out:
543 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
544 		DMA_FROM_DEVICE);
545 unmap_in:
546 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
547 		DMA_TO_DEVICE);
548 
549 	return -EINVAL;
550 }
551 
552 static int sahara_aes_process(struct skcipher_request *req)
553 {
554 	struct sahara_dev *dev = dev_ptr;
555 	struct sahara_ctx *ctx;
556 	struct sahara_aes_reqctx *rctx;
557 	int ret;
558 	unsigned long timeout;
559 
560 	/* Request is ready to be dispatched by the device */
561 	dev_dbg(dev->device,
562 		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
563 		req->cryptlen, req->src, req->dst);
564 
565 	/* assign new request to device */
566 	dev->total = req->cryptlen;
567 	dev->in_sg = req->src;
568 	dev->out_sg = req->dst;
569 
570 	rctx = skcipher_request_ctx(req);
571 	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
572 	rctx->mode &= FLAGS_MODE_MASK;
573 	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
574 
575 	if ((dev->flags & FLAGS_CBC) && req->iv)
576 		memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
577 
578 	/* assign new context to device */
579 	dev->ctx = ctx;
580 
581 	reinit_completion(&dev->dma_completion);
582 
583 	ret = sahara_hw_descriptor_create(dev);
584 	if (ret)
585 		return -EINVAL;
586 
587 	timeout = wait_for_completion_timeout(&dev->dma_completion,
588 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
589 	if (!timeout) {
590 		dev_err(dev->device, "AES timeout\n");
591 		return -ETIMEDOUT;
592 	}
593 
594 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
595 		DMA_FROM_DEVICE);
596 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
597 		DMA_TO_DEVICE);
598 
599 	return 0;
600 }
601 
602 static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
603 			     unsigned int keylen)
604 {
605 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
606 
607 	ctx->keylen = keylen;
608 
609 	/* SAHARA only supports 128bit keys */
610 	if (keylen == AES_KEYSIZE_128) {
611 		memcpy(ctx->key, key, keylen);
612 		ctx->flags |= FLAGS_NEW_KEY;
613 		return 0;
614 	}
615 
616 	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
617 		return -EINVAL;
618 
619 	/*
620 	 * The requested key size is not supported by HW, do a fallback.
621 	 */
622 	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
623 	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
624 						 CRYPTO_TFM_REQ_MASK);
625 	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
626 }
627 
628 static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
629 {
630 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
631 	struct sahara_dev *dev = dev_ptr;
632 	int err = 0;
633 
634 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
635 		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
636 
637 	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
638 		dev_err(dev->device,
639 			"request size is not exact amount of AES blocks\n");
640 		return -EINVAL;
641 	}
642 
643 	rctx->mode = mode;
644 
645 	spin_lock_bh(&dev->queue_spinlock);
646 	err = crypto_enqueue_request(&dev->queue, &req->base);
647 	spin_unlock_bh(&dev->queue_spinlock);
648 
649 	wake_up_process(dev->kthread);
650 
651 	return err;
652 }
653 
654 static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
655 {
656 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
657 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
658 		crypto_skcipher_reqtfm(req));
659 
660 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
661 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
662 		skcipher_request_set_callback(&rctx->fallback_req,
663 					      req->base.flags,
664 					      req->base.complete,
665 					      req->base.data);
666 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
667 					   req->dst, req->cryptlen, req->iv);
668 		return crypto_skcipher_encrypt(&rctx->fallback_req);
669 	}
670 
671 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
672 }
673 
674 static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
675 {
676 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
677 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
678 		crypto_skcipher_reqtfm(req));
679 
680 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
681 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
682 		skcipher_request_set_callback(&rctx->fallback_req,
683 					      req->base.flags,
684 					      req->base.complete,
685 					      req->base.data);
686 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
687 					   req->dst, req->cryptlen, req->iv);
688 		return crypto_skcipher_decrypt(&rctx->fallback_req);
689 	}
690 
691 	return sahara_aes_crypt(req, 0);
692 }
693 
694 static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
695 {
696 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
697 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
698 		crypto_skcipher_reqtfm(req));
699 
700 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
701 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
702 		skcipher_request_set_callback(&rctx->fallback_req,
703 					      req->base.flags,
704 					      req->base.complete,
705 					      req->base.data);
706 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
707 					   req->dst, req->cryptlen, req->iv);
708 		return crypto_skcipher_encrypt(&rctx->fallback_req);
709 	}
710 
711 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
712 }
713 
714 static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
715 {
716 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
717 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
718 		crypto_skcipher_reqtfm(req));
719 
720 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
721 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
722 		skcipher_request_set_callback(&rctx->fallback_req,
723 					      req->base.flags,
724 					      req->base.complete,
725 					      req->base.data);
726 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
727 					   req->dst, req->cryptlen, req->iv);
728 		return crypto_skcipher_decrypt(&rctx->fallback_req);
729 	}
730 
731 	return sahara_aes_crypt(req, FLAGS_CBC);
732 }
733 
734 static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
735 {
736 	const char *name = crypto_tfm_alg_name(&tfm->base);
737 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
738 
739 	ctx->fallback = crypto_alloc_skcipher(name, 0,
740 					      CRYPTO_ALG_NEED_FALLBACK);
741 	if (IS_ERR(ctx->fallback)) {
742 		pr_err("Error allocating fallback algo %s\n", name);
743 		return PTR_ERR(ctx->fallback);
744 	}
745 
746 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
747 					 crypto_skcipher_reqsize(ctx->fallback));
748 
749 	return 0;
750 }
751 
752 static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
753 {
754 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
755 
756 	crypto_free_skcipher(ctx->fallback);
757 }
758 
759 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
760 			      struct sahara_sha_reqctx *rctx)
761 {
762 	u32 hdr = 0;
763 
764 	hdr = rctx->mode;
765 
766 	if (rctx->first) {
767 		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
768 		hdr |= SAHARA_HDR_MDHA_INIT;
769 	} else {
770 		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
771 	}
772 
773 	if (rctx->last)
774 		hdr |= SAHARA_HDR_MDHA_PDATA;
775 
776 	if (hweight_long(hdr) % 2 == 0)
777 		hdr |= SAHARA_HDR_PARITY_BIT;
778 
779 	return hdr;
780 }
781 
782 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
783 				       struct sahara_sha_reqctx *rctx,
784 				       int start)
785 {
786 	struct scatterlist *sg;
787 	unsigned int i;
788 	int ret;
789 
790 	dev->in_sg = rctx->in_sg;
791 
792 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
793 	if (dev->nb_in_sg < 0) {
794 		dev_err(dev->device, "Invalid numbers of src SG.\n");
795 		return dev->nb_in_sg;
796 	}
797 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
798 		dev_err(dev->device, "not enough hw links (%d)\n",
799 			dev->nb_in_sg + dev->nb_out_sg);
800 		return -EINVAL;
801 	}
802 
803 	sg = dev->in_sg;
804 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
805 	if (!ret)
806 		return -EFAULT;
807 
808 	for (i = start; i < dev->nb_in_sg + start; i++) {
809 		dev->hw_link[i]->len = sg->length;
810 		dev->hw_link[i]->p = sg->dma_address;
811 		if (i == (dev->nb_in_sg + start - 1)) {
812 			dev->hw_link[i]->next = 0;
813 		} else {
814 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
815 			sg = sg_next(sg);
816 		}
817 	}
818 
819 	return i;
820 }
821 
822 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
823 						struct sahara_sha_reqctx *rctx,
824 						struct ahash_request *req,
825 						int index)
826 {
827 	unsigned result_len;
828 	int i = index;
829 
830 	if (rctx->first)
831 		/* Create initial descriptor: #8*/
832 		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
833 	else
834 		/* Create hash descriptor: #10. Must follow #6. */
835 		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
836 
837 	dev->hw_desc[index]->len1 = rctx->total;
838 	if (dev->hw_desc[index]->len1 == 0) {
839 		/* if len1 is 0, p1 must be 0, too */
840 		dev->hw_desc[index]->p1 = 0;
841 		rctx->sg_in_idx = 0;
842 	} else {
843 		/* Create input links */
844 		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
845 		i = sahara_sha_hw_links_create(dev, rctx, index);
846 
847 		rctx->sg_in_idx = index;
848 		if (i < 0)
849 			return i;
850 	}
851 
852 	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
853 
854 	/* Save the context for the next operation */
855 	result_len = rctx->context_size;
856 	dev->hw_link[i]->p = dev->context_phys_base;
857 
858 	dev->hw_link[i]->len = result_len;
859 	dev->hw_desc[index]->len2 = result_len;
860 
861 	dev->hw_link[i]->next = 0;
862 
863 	return 0;
864 }
865 
866 /*
867  * Load descriptor aka #6
868  *
869  * To load a previously saved context back to the MDHA unit
870  *
871  * p1: Saved Context
872  * p2: NULL
873  *
874  */
875 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
876 						struct sahara_sha_reqctx *rctx,
877 						struct ahash_request *req,
878 						int index)
879 {
880 	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
881 
882 	dev->hw_desc[index]->len1 = rctx->context_size;
883 	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
884 	dev->hw_desc[index]->len2 = 0;
885 	dev->hw_desc[index]->p2 = 0;
886 
887 	dev->hw_link[index]->len = rctx->context_size;
888 	dev->hw_link[index]->p = dev->context_phys_base;
889 	dev->hw_link[index]->next = 0;
890 
891 	return 0;
892 }
893 
894 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
895 {
896 	if (!sg || !sg->length)
897 		return nbytes;
898 
899 	while (nbytes && sg) {
900 		if (nbytes <= sg->length) {
901 			sg->length = nbytes;
902 			sg_mark_end(sg);
903 			break;
904 		}
905 		nbytes -= sg->length;
906 		sg = sg_next(sg);
907 	}
908 
909 	return nbytes;
910 }
911 
912 static int sahara_sha_prepare_request(struct ahash_request *req)
913 {
914 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
915 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
916 	unsigned int hash_later;
917 	unsigned int block_size;
918 	unsigned int len;
919 
920 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
921 
922 	/* append bytes from previous operation */
923 	len = rctx->buf_cnt + req->nbytes;
924 
925 	/* only the last transfer can be padded in hardware */
926 	if (!rctx->last && (len < block_size)) {
927 		/* to few data, save for next operation */
928 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
929 					 0, req->nbytes, 0);
930 		rctx->buf_cnt += req->nbytes;
931 
932 		return 0;
933 	}
934 
935 	/* add data from previous operation first */
936 	if (rctx->buf_cnt)
937 		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
938 
939 	/* data must always be a multiple of block_size */
940 	hash_later = rctx->last ? 0 : len & (block_size - 1);
941 	if (hash_later) {
942 		unsigned int offset = req->nbytes - hash_later;
943 		/* Save remaining bytes for later use */
944 		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
945 					hash_later, 0);
946 	}
947 
948 	/* nbytes should now be multiple of blocksize */
949 	req->nbytes = req->nbytes - hash_later;
950 
951 	sahara_walk_and_recalc(req->src, req->nbytes);
952 
953 	/* have data from previous operation and current */
954 	if (rctx->buf_cnt && req->nbytes) {
955 		sg_init_table(rctx->in_sg_chain, 2);
956 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
957 
958 		sg_chain(rctx->in_sg_chain, 2, req->src);
959 
960 		rctx->total = req->nbytes + rctx->buf_cnt;
961 		rctx->in_sg = rctx->in_sg_chain;
962 
963 		req->src = rctx->in_sg_chain;
964 	/* only data from previous operation */
965 	} else if (rctx->buf_cnt) {
966 		if (req->src)
967 			rctx->in_sg = req->src;
968 		else
969 			rctx->in_sg = rctx->in_sg_chain;
970 		/* buf was copied into rembuf above */
971 		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
972 		rctx->total = rctx->buf_cnt;
973 	/* no data from previous operation */
974 	} else {
975 		rctx->in_sg = req->src;
976 		rctx->total = req->nbytes;
977 		req->src = rctx->in_sg;
978 	}
979 
980 	/* on next call, we only have the remaining data in the buffer */
981 	rctx->buf_cnt = hash_later;
982 
983 	return -EINPROGRESS;
984 }
985 
986 static int sahara_sha_process(struct ahash_request *req)
987 {
988 	struct sahara_dev *dev = dev_ptr;
989 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
990 	int ret;
991 	unsigned long timeout;
992 
993 	ret = sahara_sha_prepare_request(req);
994 	if (!ret)
995 		return ret;
996 
997 	if (rctx->first) {
998 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
999 		dev->hw_desc[0]->next = 0;
1000 		rctx->first = 0;
1001 	} else {
1002 		memcpy(dev->context_base, rctx->context, rctx->context_size);
1003 
1004 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1005 		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1006 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1007 		dev->hw_desc[1]->next = 0;
1008 	}
1009 
1010 	sahara_dump_descriptors(dev);
1011 	sahara_dump_links(dev);
1012 
1013 	reinit_completion(&dev->dma_completion);
1014 
1015 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1016 
1017 	timeout = wait_for_completion_timeout(&dev->dma_completion,
1018 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1019 	if (!timeout) {
1020 		dev_err(dev->device, "SHA timeout\n");
1021 		return -ETIMEDOUT;
1022 	}
1023 
1024 	if (rctx->sg_in_idx)
1025 		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1026 			     DMA_TO_DEVICE);
1027 
1028 	memcpy(rctx->context, dev->context_base, rctx->context_size);
1029 
1030 	if (req->result)
1031 		memcpy(req->result, rctx->context, rctx->digest_size);
1032 
1033 	return 0;
1034 }
1035 
1036 static int sahara_queue_manage(void *data)
1037 {
1038 	struct sahara_dev *dev = data;
1039 	struct crypto_async_request *async_req;
1040 	struct crypto_async_request *backlog;
1041 	int ret = 0;
1042 
1043 	do {
1044 		__set_current_state(TASK_INTERRUPTIBLE);
1045 
1046 		spin_lock_bh(&dev->queue_spinlock);
1047 		backlog = crypto_get_backlog(&dev->queue);
1048 		async_req = crypto_dequeue_request(&dev->queue);
1049 		spin_unlock_bh(&dev->queue_spinlock);
1050 
1051 		if (backlog)
1052 			crypto_request_complete(backlog, -EINPROGRESS);
1053 
1054 		if (async_req) {
1055 			if (crypto_tfm_alg_type(async_req->tfm) ==
1056 			    CRYPTO_ALG_TYPE_AHASH) {
1057 				struct ahash_request *req =
1058 					ahash_request_cast(async_req);
1059 
1060 				ret = sahara_sha_process(req);
1061 			} else {
1062 				struct skcipher_request *req =
1063 					skcipher_request_cast(async_req);
1064 
1065 				ret = sahara_aes_process(req);
1066 			}
1067 
1068 			crypto_request_complete(async_req, ret);
1069 
1070 			continue;
1071 		}
1072 
1073 		schedule();
1074 	} while (!kthread_should_stop());
1075 
1076 	return 0;
1077 }
1078 
1079 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1080 {
1081 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1082 	struct sahara_dev *dev = dev_ptr;
1083 	int ret;
1084 
1085 	if (!req->nbytes && !last)
1086 		return 0;
1087 
1088 	rctx->last = last;
1089 
1090 	if (!rctx->active) {
1091 		rctx->active = 1;
1092 		rctx->first = 1;
1093 	}
1094 
1095 	spin_lock_bh(&dev->queue_spinlock);
1096 	ret = crypto_enqueue_request(&dev->queue, &req->base);
1097 	spin_unlock_bh(&dev->queue_spinlock);
1098 
1099 	wake_up_process(dev->kthread);
1100 
1101 	return ret;
1102 }
1103 
1104 static int sahara_sha_init(struct ahash_request *req)
1105 {
1106 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1107 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1108 
1109 	memset(rctx, 0, sizeof(*rctx));
1110 
1111 	switch (crypto_ahash_digestsize(tfm)) {
1112 	case SHA1_DIGEST_SIZE:
1113 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1114 		rctx->digest_size = SHA1_DIGEST_SIZE;
1115 		break;
1116 	case SHA256_DIGEST_SIZE:
1117 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1118 		rctx->digest_size = SHA256_DIGEST_SIZE;
1119 		break;
1120 	default:
1121 		return -EINVAL;
1122 	}
1123 
1124 	rctx->context_size = rctx->digest_size + 4;
1125 	rctx->active = 0;
1126 
1127 	return 0;
1128 }
1129 
1130 static int sahara_sha_update(struct ahash_request *req)
1131 {
1132 	return sahara_sha_enqueue(req, 0);
1133 }
1134 
1135 static int sahara_sha_final(struct ahash_request *req)
1136 {
1137 	req->nbytes = 0;
1138 	return sahara_sha_enqueue(req, 1);
1139 }
1140 
1141 static int sahara_sha_finup(struct ahash_request *req)
1142 {
1143 	return sahara_sha_enqueue(req, 1);
1144 }
1145 
1146 static int sahara_sha_digest(struct ahash_request *req)
1147 {
1148 	sahara_sha_init(req);
1149 
1150 	return sahara_sha_finup(req);
1151 }
1152 
1153 static int sahara_sha_export(struct ahash_request *req, void *out)
1154 {
1155 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1156 
1157 	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1158 
1159 	return 0;
1160 }
1161 
1162 static int sahara_sha_import(struct ahash_request *req, const void *in)
1163 {
1164 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1165 
1166 	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1167 
1168 	return 0;
1169 }
1170 
1171 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1172 {
1173 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1174 				 sizeof(struct sahara_sha_reqctx) +
1175 				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1176 
1177 	return 0;
1178 }
1179 
1180 static struct skcipher_alg aes_algs[] = {
1181 {
1182 	.base.cra_name		= "ecb(aes)",
1183 	.base.cra_driver_name	= "sahara-ecb-aes",
1184 	.base.cra_priority	= 300,
1185 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1186 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1187 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1188 	.base.cra_alignmask	= 0x0,
1189 	.base.cra_module	= THIS_MODULE,
1190 
1191 	.init			= sahara_aes_init_tfm,
1192 	.exit			= sahara_aes_exit_tfm,
1193 	.min_keysize		= AES_MIN_KEY_SIZE ,
1194 	.max_keysize		= AES_MAX_KEY_SIZE,
1195 	.setkey			= sahara_aes_setkey,
1196 	.encrypt		= sahara_aes_ecb_encrypt,
1197 	.decrypt		= sahara_aes_ecb_decrypt,
1198 }, {
1199 	.base.cra_name		= "cbc(aes)",
1200 	.base.cra_driver_name	= "sahara-cbc-aes",
1201 	.base.cra_priority	= 300,
1202 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1203 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1204 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1205 	.base.cra_alignmask	= 0x0,
1206 	.base.cra_module	= THIS_MODULE,
1207 
1208 	.init			= sahara_aes_init_tfm,
1209 	.exit			= sahara_aes_exit_tfm,
1210 	.min_keysize		= AES_MIN_KEY_SIZE ,
1211 	.max_keysize		= AES_MAX_KEY_SIZE,
1212 	.ivsize			= AES_BLOCK_SIZE,
1213 	.setkey			= sahara_aes_setkey,
1214 	.encrypt		= sahara_aes_cbc_encrypt,
1215 	.decrypt		= sahara_aes_cbc_decrypt,
1216 }
1217 };
1218 
1219 static struct ahash_alg sha_v3_algs[] = {
1220 {
1221 	.init		= sahara_sha_init,
1222 	.update		= sahara_sha_update,
1223 	.final		= sahara_sha_final,
1224 	.finup		= sahara_sha_finup,
1225 	.digest		= sahara_sha_digest,
1226 	.export		= sahara_sha_export,
1227 	.import		= sahara_sha_import,
1228 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1229 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1230 	.halg.base	= {
1231 		.cra_name		= "sha1",
1232 		.cra_driver_name	= "sahara-sha1",
1233 		.cra_priority		= 300,
1234 		.cra_flags		= CRYPTO_ALG_ASYNC |
1235 						CRYPTO_ALG_NEED_FALLBACK,
1236 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1237 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1238 		.cra_alignmask		= 0,
1239 		.cra_module		= THIS_MODULE,
1240 		.cra_init		= sahara_sha_cra_init,
1241 	}
1242 },
1243 };
1244 
1245 static struct ahash_alg sha_v4_algs[] = {
1246 {
1247 	.init		= sahara_sha_init,
1248 	.update		= sahara_sha_update,
1249 	.final		= sahara_sha_final,
1250 	.finup		= sahara_sha_finup,
1251 	.digest		= sahara_sha_digest,
1252 	.export		= sahara_sha_export,
1253 	.import		= sahara_sha_import,
1254 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1255 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1256 	.halg.base	= {
1257 		.cra_name		= "sha256",
1258 		.cra_driver_name	= "sahara-sha256",
1259 		.cra_priority		= 300,
1260 		.cra_flags		= CRYPTO_ALG_ASYNC |
1261 						CRYPTO_ALG_NEED_FALLBACK,
1262 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1263 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1264 		.cra_alignmask		= 0,
1265 		.cra_module		= THIS_MODULE,
1266 		.cra_init		= sahara_sha_cra_init,
1267 	}
1268 },
1269 };
1270 
1271 static irqreturn_t sahara_irq_handler(int irq, void *data)
1272 {
1273 	struct sahara_dev *dev = data;
1274 	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1275 	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1276 
1277 	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1278 		     SAHARA_REG_CMD);
1279 
1280 	sahara_decode_status(dev, stat);
1281 
1282 	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1283 		return IRQ_NONE;
1284 	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1285 		dev->error = 0;
1286 	} else {
1287 		sahara_decode_error(dev, err);
1288 		dev->error = -EINVAL;
1289 	}
1290 
1291 	complete(&dev->dma_completion);
1292 
1293 	return IRQ_HANDLED;
1294 }
1295 
1296 
1297 static int sahara_register_algs(struct sahara_dev *dev)
1298 {
1299 	int err;
1300 	unsigned int i, j, k, l;
1301 
1302 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1303 		err = crypto_register_skcipher(&aes_algs[i]);
1304 		if (err)
1305 			goto err_aes_algs;
1306 	}
1307 
1308 	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1309 		err = crypto_register_ahash(&sha_v3_algs[k]);
1310 		if (err)
1311 			goto err_sha_v3_algs;
1312 	}
1313 
1314 	if (dev->version > SAHARA_VERSION_3)
1315 		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1316 			err = crypto_register_ahash(&sha_v4_algs[l]);
1317 			if (err)
1318 				goto err_sha_v4_algs;
1319 		}
1320 
1321 	return 0;
1322 
1323 err_sha_v4_algs:
1324 	for (j = 0; j < l; j++)
1325 		crypto_unregister_ahash(&sha_v4_algs[j]);
1326 
1327 err_sha_v3_algs:
1328 	for (j = 0; j < k; j++)
1329 		crypto_unregister_ahash(&sha_v3_algs[j]);
1330 
1331 err_aes_algs:
1332 	for (j = 0; j < i; j++)
1333 		crypto_unregister_skcipher(&aes_algs[j]);
1334 
1335 	return err;
1336 }
1337 
1338 static void sahara_unregister_algs(struct sahara_dev *dev)
1339 {
1340 	unsigned int i;
1341 
1342 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1343 		crypto_unregister_skcipher(&aes_algs[i]);
1344 
1345 	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1346 		crypto_unregister_ahash(&sha_v3_algs[i]);
1347 
1348 	if (dev->version > SAHARA_VERSION_3)
1349 		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1350 			crypto_unregister_ahash(&sha_v4_algs[i]);
1351 }
1352 
1353 static const struct of_device_id sahara_dt_ids[] = {
1354 	{ .compatible = "fsl,imx53-sahara" },
1355 	{ .compatible = "fsl,imx27-sahara" },
1356 	{ /* sentinel */ }
1357 };
1358 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1359 
1360 static int sahara_probe(struct platform_device *pdev)
1361 {
1362 	struct sahara_dev *dev;
1363 	u32 version;
1364 	int irq;
1365 	int err;
1366 	int i;
1367 
1368 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1369 	if (!dev)
1370 		return -ENOMEM;
1371 
1372 	dev->device = &pdev->dev;
1373 	platform_set_drvdata(pdev, dev);
1374 
1375 	/* Get the base address */
1376 	dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1377 	if (IS_ERR(dev->regs_base))
1378 		return PTR_ERR(dev->regs_base);
1379 
1380 	/* Get the IRQ */
1381 	irq = platform_get_irq(pdev,  0);
1382 	if (irq < 0)
1383 		return irq;
1384 
1385 	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1386 			       0, dev_name(&pdev->dev), dev);
1387 	if (err) {
1388 		dev_err(&pdev->dev, "failed to request irq\n");
1389 		return err;
1390 	}
1391 
1392 	/* clocks */
1393 	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1394 	if (IS_ERR(dev->clk_ipg)) {
1395 		dev_err(&pdev->dev, "Could not get ipg clock\n");
1396 		return PTR_ERR(dev->clk_ipg);
1397 	}
1398 
1399 	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1400 	if (IS_ERR(dev->clk_ahb)) {
1401 		dev_err(&pdev->dev, "Could not get ahb clock\n");
1402 		return PTR_ERR(dev->clk_ahb);
1403 	}
1404 
1405 	/* Allocate HW descriptors */
1406 	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1407 			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1408 			&dev->hw_phys_desc[0], GFP_KERNEL);
1409 	if (!dev->hw_desc[0]) {
1410 		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1411 		return -ENOMEM;
1412 	}
1413 	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1414 	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1415 				sizeof(struct sahara_hw_desc);
1416 
1417 	/* Allocate space for iv and key */
1418 	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1419 				&dev->key_phys_base, GFP_KERNEL);
1420 	if (!dev->key_base) {
1421 		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1422 		return -ENOMEM;
1423 	}
1424 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1425 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1426 
1427 	/* Allocate space for context: largest digest + message length field */
1428 	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1429 					SHA256_DIGEST_SIZE + 4,
1430 					&dev->context_phys_base, GFP_KERNEL);
1431 	if (!dev->context_base) {
1432 		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1433 		return -ENOMEM;
1434 	}
1435 
1436 	/* Allocate space for HW links */
1437 	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1438 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1439 			&dev->hw_phys_link[0], GFP_KERNEL);
1440 	if (!dev->hw_link[0]) {
1441 		dev_err(&pdev->dev, "Could not allocate hw links\n");
1442 		return -ENOMEM;
1443 	}
1444 	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1445 		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1446 					sizeof(struct sahara_hw_link);
1447 		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1448 	}
1449 
1450 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1451 
1452 	spin_lock_init(&dev->queue_spinlock);
1453 
1454 	dev_ptr = dev;
1455 
1456 	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1457 	if (IS_ERR(dev->kthread)) {
1458 		return PTR_ERR(dev->kthread);
1459 	}
1460 
1461 	init_completion(&dev->dma_completion);
1462 
1463 	err = clk_prepare_enable(dev->clk_ipg);
1464 	if (err)
1465 		return err;
1466 	err = clk_prepare_enable(dev->clk_ahb);
1467 	if (err)
1468 		goto clk_ipg_disable;
1469 
1470 	version = sahara_read(dev, SAHARA_REG_VERSION);
1471 	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1472 		if (version != SAHARA_VERSION_3)
1473 			err = -ENODEV;
1474 	} else if (of_device_is_compatible(pdev->dev.of_node,
1475 			"fsl,imx53-sahara")) {
1476 		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1477 			err = -ENODEV;
1478 		version = (version >> 8) & 0xff;
1479 	}
1480 	if (err == -ENODEV) {
1481 		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1482 				version);
1483 		goto err_algs;
1484 	}
1485 
1486 	dev->version = version;
1487 
1488 	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1489 		     SAHARA_REG_CMD);
1490 	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1491 			SAHARA_CONTROL_SET_MAXBURST(8) |
1492 			SAHARA_CONTROL_RNG_AUTORSD |
1493 			SAHARA_CONTROL_ENABLE_INT,
1494 			SAHARA_REG_CONTROL);
1495 
1496 	err = sahara_register_algs(dev);
1497 	if (err)
1498 		goto err_algs;
1499 
1500 	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1501 
1502 	return 0;
1503 
1504 err_algs:
1505 	kthread_stop(dev->kthread);
1506 	dev_ptr = NULL;
1507 	clk_disable_unprepare(dev->clk_ahb);
1508 clk_ipg_disable:
1509 	clk_disable_unprepare(dev->clk_ipg);
1510 
1511 	return err;
1512 }
1513 
1514 static int sahara_remove(struct platform_device *pdev)
1515 {
1516 	struct sahara_dev *dev = platform_get_drvdata(pdev);
1517 
1518 	kthread_stop(dev->kthread);
1519 
1520 	sahara_unregister_algs(dev);
1521 
1522 	clk_disable_unprepare(dev->clk_ipg);
1523 	clk_disable_unprepare(dev->clk_ahb);
1524 
1525 	dev_ptr = NULL;
1526 
1527 	return 0;
1528 }
1529 
1530 static struct platform_driver sahara_driver = {
1531 	.probe		= sahara_probe,
1532 	.remove		= sahara_remove,
1533 	.driver		= {
1534 		.name	= SAHARA_NAME,
1535 		.of_match_table = sahara_dt_ids,
1536 	},
1537 };
1538 
1539 module_platform_driver(sahara_driver);
1540 
1541 MODULE_LICENSE("GPL");
1542 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1543 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1544 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1545