xref: /openbmc/linux/drivers/crypto/sahara.c (revision 32628841)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for SAHARA cryptographic accelerator.
6  *
7  * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8  * Copyright (c) 2013 Vista Silicon S.L.
9  * Author: Javier Martin <javier.martin@vista-silicon.com>
10  *
11  * Based on omap-aes.c and tegra-aes.c
12  */
13 
14 #include <crypto/aes.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/sha1.h>
19 #include <crypto/sha2.h>
20 
21 #include <linux/clk.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/irq.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/module.h>
29 #include <linux/of.h>
30 #include <linux/platform_device.h>
31 #include <linux/spinlock.h>
32 
33 #define SHA_BUFFER_LEN		PAGE_SIZE
34 #define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
35 
36 #define SAHARA_NAME "sahara"
37 #define SAHARA_VERSION_3	3
38 #define SAHARA_VERSION_4	4
39 #define SAHARA_TIMEOUT_MS	1000
40 #define SAHARA_MAX_HW_DESC	2
41 #define SAHARA_MAX_HW_LINK	20
42 
43 #define FLAGS_MODE_MASK		0x000f
44 #define FLAGS_ENCRYPT		BIT(0)
45 #define FLAGS_CBC		BIT(1)
46 
47 #define SAHARA_HDR_BASE			0x00800000
48 #define SAHARA_HDR_SKHA_ALG_AES	0
49 #define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
50 #define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
51 #define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
52 #define SAHARA_HDR_FORM_DATA		(5 << 16)
53 #define SAHARA_HDR_FORM_KEY		(8 << 16)
54 #define SAHARA_HDR_LLO			(1 << 24)
55 #define SAHARA_HDR_CHA_SKHA		(1 << 28)
56 #define SAHARA_HDR_CHA_MDHA		(2 << 28)
57 #define SAHARA_HDR_PARITY_BIT		(1 << 31)
58 
59 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
60 #define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
61 #define SAHARA_HDR_MDHA_HASH		0xA0850000
62 #define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
63 #define SAHARA_HDR_MDHA_ALG_SHA1	0
64 #define SAHARA_HDR_MDHA_ALG_MD5		1
65 #define SAHARA_HDR_MDHA_ALG_SHA256	2
66 #define SAHARA_HDR_MDHA_ALG_SHA224	3
67 #define SAHARA_HDR_MDHA_PDATA		(1 << 2)
68 #define SAHARA_HDR_MDHA_HMAC		(1 << 3)
69 #define SAHARA_HDR_MDHA_INIT		(1 << 5)
70 #define SAHARA_HDR_MDHA_IPAD		(1 << 6)
71 #define SAHARA_HDR_MDHA_OPAD		(1 << 7)
72 #define SAHARA_HDR_MDHA_SWAP		(1 << 8)
73 #define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
74 #define SAHARA_HDR_MDHA_SSL		(1 << 10)
75 
76 /* SAHARA can only process one request at a time */
77 #define SAHARA_QUEUE_LENGTH	1
78 
79 #define SAHARA_REG_VERSION	0x00
80 #define SAHARA_REG_DAR		0x04
81 #define SAHARA_REG_CONTROL	0x08
82 #define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
83 #define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
84 #define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
85 #define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
86 #define SAHARA_REG_CMD		0x0C
87 #define		SAHARA_CMD_RESET		(1 << 0)
88 #define		SAHARA_CMD_CLEAR_INT		(1 << 8)
89 #define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
90 #define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
91 #define		SAHARA_CMD_MODE_BATCH		(1 << 16)
92 #define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
93 #define	SAHARA_REG_STATUS	0x10
94 #define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
95 #define			SAHARA_STATE_IDLE	0
96 #define			SAHARA_STATE_BUSY	1
97 #define			SAHARA_STATE_ERR	2
98 #define			SAHARA_STATE_FAULT	3
99 #define			SAHARA_STATE_COMPLETE	4
100 #define			SAHARA_STATE_COMP_FLAG	(1 << 2)
101 #define		SAHARA_STATUS_DAR_FULL		(1 << 3)
102 #define		SAHARA_STATUS_ERROR		(1 << 4)
103 #define		SAHARA_STATUS_SECURE		(1 << 5)
104 #define		SAHARA_STATUS_FAIL		(1 << 6)
105 #define		SAHARA_STATUS_INIT		(1 << 7)
106 #define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
107 #define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
108 #define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
109 #define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
110 #define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
111 #define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
112 #define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
113 #define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
114 #define SAHARA_REG_ERRSTATUS	0x14
115 #define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
116 #define			SAHARA_ERRSOURCE_CHA	14
117 #define			SAHARA_ERRSOURCE_DMA	15
118 #define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
119 #define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
120 #define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
121 #define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
122 #define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
123 #define SAHARA_REG_FADDR	0x18
124 #define SAHARA_REG_CDAR		0x1C
125 #define SAHARA_REG_IDAR		0x20
126 
127 struct sahara_hw_desc {
128 	u32	hdr;
129 	u32	len1;
130 	u32	p1;
131 	u32	len2;
132 	u32	p2;
133 	u32	next;
134 };
135 
136 struct sahara_hw_link {
137 	u32	len;
138 	u32	p;
139 	u32	next;
140 };
141 
142 struct sahara_ctx {
143 	/* AES-specific context */
144 	int keylen;
145 	u8 key[AES_KEYSIZE_128];
146 	struct crypto_skcipher *fallback;
147 };
148 
149 struct sahara_aes_reqctx {
150 	unsigned long mode;
151 	u8 iv_out[AES_BLOCK_SIZE];
152 	struct skcipher_request fallback_req;	// keep at the end
153 };
154 
155 /*
156  * struct sahara_sha_reqctx - private data per request
157  * @buf: holds data for requests smaller than block_size
158  * @rembuf: used to prepare one block_size-aligned request
159  * @context: hw-specific context for request. Digest is extracted from this
160  * @mode: specifies what type of hw-descriptor needs to be built
161  * @digest_size: length of digest for this request
162  * @context_size: length of hw-context for this request.
163  *                Always digest_size + 4
164  * @buf_cnt: number of bytes saved in buf
165  * @sg_in_idx: number of hw links
166  * @in_sg: scatterlist for input data
167  * @in_sg_chain: scatterlists for chained input data
168  * @total: total number of bytes for transfer
169  * @last: is this the last block
170  * @first: is this the first block
171  * @active: inside a transfer
172  */
173 struct sahara_sha_reqctx {
174 	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
175 	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
176 	u8			context[SHA256_DIGEST_SIZE + 4];
177 	unsigned int		mode;
178 	unsigned int		digest_size;
179 	unsigned int		context_size;
180 	unsigned int		buf_cnt;
181 	unsigned int		sg_in_idx;
182 	struct scatterlist	*in_sg;
183 	struct scatterlist	in_sg_chain[2];
184 	size_t			total;
185 	unsigned int		last;
186 	unsigned int		first;
187 	unsigned int		active;
188 };
189 
190 struct sahara_dev {
191 	struct device		*device;
192 	unsigned int		version;
193 	void __iomem		*regs_base;
194 	struct clk		*clk_ipg;
195 	struct clk		*clk_ahb;
196 	spinlock_t		queue_spinlock;
197 	struct task_struct	*kthread;
198 	struct completion	dma_completion;
199 
200 	struct sahara_ctx	*ctx;
201 	struct crypto_queue	queue;
202 	unsigned long		flags;
203 
204 	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
205 	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
206 
207 	u8			*key_base;
208 	dma_addr_t		key_phys_base;
209 
210 	u8			*iv_base;
211 	dma_addr_t		iv_phys_base;
212 
213 	u8			*context_base;
214 	dma_addr_t		context_phys_base;
215 
216 	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
217 	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
218 
219 	size_t			total;
220 	struct scatterlist	*in_sg;
221 	int		nb_in_sg;
222 	struct scatterlist	*out_sg;
223 	int		nb_out_sg;
224 
225 	u32			error;
226 };
227 
228 static struct sahara_dev *dev_ptr;
229 
230 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
231 {
232 	writel(data, dev->regs_base + reg);
233 }
234 
235 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
236 {
237 	return readl(dev->regs_base + reg);
238 }
239 
240 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
241 {
242 	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
243 			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
244 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
245 
246 	if (dev->flags & FLAGS_CBC) {
247 		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
248 		hdr ^= SAHARA_HDR_PARITY_BIT;
249 	}
250 
251 	if (dev->flags & FLAGS_ENCRYPT) {
252 		hdr |= SAHARA_HDR_SKHA_OP_ENC;
253 		hdr ^= SAHARA_HDR_PARITY_BIT;
254 	}
255 
256 	return hdr;
257 }
258 
259 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
260 {
261 	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
262 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
263 }
264 
265 static const char *sahara_err_src[16] = {
266 	"No error",
267 	"Header error",
268 	"Descriptor length error",
269 	"Descriptor length or pointer error",
270 	"Link length error",
271 	"Link pointer error",
272 	"Input buffer error",
273 	"Output buffer error",
274 	"Output buffer starvation",
275 	"Internal state fault",
276 	"General descriptor problem",
277 	"Reserved",
278 	"Descriptor address error",
279 	"Link address error",
280 	"CHA error",
281 	"DMA error"
282 };
283 
284 static const char *sahara_err_dmasize[4] = {
285 	"Byte transfer",
286 	"Half-word transfer",
287 	"Word transfer",
288 	"Reserved"
289 };
290 
291 static const char *sahara_err_dmasrc[8] = {
292 	"No error",
293 	"AHB bus error",
294 	"Internal IP bus error",
295 	"Parity error",
296 	"DMA crosses 256 byte boundary",
297 	"DMA is busy",
298 	"Reserved",
299 	"DMA HW error"
300 };
301 
302 static const char *sahara_cha_errsrc[12] = {
303 	"Input buffer non-empty",
304 	"Illegal address",
305 	"Illegal mode",
306 	"Illegal data size",
307 	"Illegal key size",
308 	"Write during processing",
309 	"CTX read during processing",
310 	"HW error",
311 	"Input buffer disabled/underflow",
312 	"Output buffer disabled/overflow",
313 	"DES key parity error",
314 	"Reserved"
315 };
316 
317 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
318 
319 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
320 {
321 	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
322 	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
323 
324 	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
325 
326 	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
327 
328 	if (source == SAHARA_ERRSOURCE_DMA) {
329 		if (error & SAHARA_ERRSTATUS_DMA_DIR)
330 			dev_err(dev->device, "		* DMA read.\n");
331 		else
332 			dev_err(dev->device, "		* DMA write.\n");
333 
334 		dev_err(dev->device, "		* %s.\n",
335 		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
336 		dev_err(dev->device, "		* %s.\n",
337 		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
338 	} else if (source == SAHARA_ERRSOURCE_CHA) {
339 		dev_err(dev->device, "		* %s.\n",
340 			sahara_cha_errsrc[chasrc]);
341 		dev_err(dev->device, "		* %s.\n",
342 		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
343 	}
344 	dev_err(dev->device, "\n");
345 }
346 
347 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
348 
349 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
350 {
351 	u8 state;
352 
353 	if (!__is_defined(DEBUG))
354 		return;
355 
356 	state = SAHARA_STATUS_GET_STATE(status);
357 
358 	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
359 		__func__, status);
360 
361 	dev_dbg(dev->device, "	- State = %d:\n", state);
362 	if (state & SAHARA_STATE_COMP_FLAG)
363 		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
364 
365 	dev_dbg(dev->device, "		* %s.\n",
366 	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
367 
368 	if (status & SAHARA_STATUS_DAR_FULL)
369 		dev_dbg(dev->device, "	- DAR Full.\n");
370 	if (status & SAHARA_STATUS_ERROR)
371 		dev_dbg(dev->device, "	- Error.\n");
372 	if (status & SAHARA_STATUS_SECURE)
373 		dev_dbg(dev->device, "	- Secure.\n");
374 	if (status & SAHARA_STATUS_FAIL)
375 		dev_dbg(dev->device, "	- Fail.\n");
376 	if (status & SAHARA_STATUS_RNG_RESEED)
377 		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
378 	if (status & SAHARA_STATUS_ACTIVE_RNG)
379 		dev_dbg(dev->device, "	- RNG Active.\n");
380 	if (status & SAHARA_STATUS_ACTIVE_MDHA)
381 		dev_dbg(dev->device, "	- MDHA Active.\n");
382 	if (status & SAHARA_STATUS_ACTIVE_SKHA)
383 		dev_dbg(dev->device, "	- SKHA Active.\n");
384 
385 	if (status & SAHARA_STATUS_MODE_BATCH)
386 		dev_dbg(dev->device, "	- Batch Mode.\n");
387 	else if (status & SAHARA_STATUS_MODE_DEDICATED)
388 		dev_dbg(dev->device, "	- Dedicated Mode.\n");
389 	else if (status & SAHARA_STATUS_MODE_DEBUG)
390 		dev_dbg(dev->device, "	- Debug Mode.\n");
391 
392 	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
393 	       SAHARA_STATUS_GET_ISTATE(status));
394 
395 	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
396 		sahara_read(dev, SAHARA_REG_CDAR));
397 	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
398 		sahara_read(dev, SAHARA_REG_IDAR));
399 }
400 
401 static void sahara_dump_descriptors(struct sahara_dev *dev)
402 {
403 	int i;
404 
405 	if (!__is_defined(DEBUG))
406 		return;
407 
408 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
409 		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
410 			i, &dev->hw_phys_desc[i]);
411 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
412 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
413 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
414 		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
415 		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
416 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
417 			dev->hw_desc[i]->next);
418 	}
419 	dev_dbg(dev->device, "\n");
420 }
421 
422 static void sahara_dump_links(struct sahara_dev *dev)
423 {
424 	int i;
425 
426 	if (!__is_defined(DEBUG))
427 		return;
428 
429 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
430 		dev_dbg(dev->device, "Link (%d) (%pad):\n",
431 			i, &dev->hw_phys_link[i]);
432 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
433 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
434 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
435 			dev->hw_link[i]->next);
436 	}
437 	dev_dbg(dev->device, "\n");
438 }
439 
440 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
441 {
442 	struct sahara_ctx *ctx = dev->ctx;
443 	struct scatterlist *sg;
444 	int ret;
445 	int i, j;
446 	int idx = 0;
447 	u32 len;
448 
449 	memcpy(dev->key_base, ctx->key, ctx->keylen);
450 
451 	if (dev->flags & FLAGS_CBC) {
452 		dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
453 		dev->hw_desc[idx]->p1 = dev->iv_phys_base;
454 	} else {
455 		dev->hw_desc[idx]->len1 = 0;
456 		dev->hw_desc[idx]->p1 = 0;
457 	}
458 	dev->hw_desc[idx]->len2 = ctx->keylen;
459 	dev->hw_desc[idx]->p2 = dev->key_phys_base;
460 	dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
461 	dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
462 
463 	idx++;
464 
465 
466 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
467 	if (dev->nb_in_sg < 0) {
468 		dev_err(dev->device, "Invalid numbers of src SG.\n");
469 		return dev->nb_in_sg;
470 	}
471 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
472 	if (dev->nb_out_sg < 0) {
473 		dev_err(dev->device, "Invalid numbers of dst SG.\n");
474 		return dev->nb_out_sg;
475 	}
476 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
477 		dev_err(dev->device, "not enough hw links (%d)\n",
478 			dev->nb_in_sg + dev->nb_out_sg);
479 		return -EINVAL;
480 	}
481 
482 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
483 			 DMA_TO_DEVICE);
484 	if (!ret) {
485 		dev_err(dev->device, "couldn't map in sg\n");
486 		return -EINVAL;
487 	}
488 
489 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
490 			 DMA_FROM_DEVICE);
491 	if (!ret) {
492 		dev_err(dev->device, "couldn't map out sg\n");
493 		goto unmap_in;
494 	}
495 
496 	/* Create input links */
497 	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
498 	sg = dev->in_sg;
499 	len = dev->total;
500 	for (i = 0; i < dev->nb_in_sg; i++) {
501 		dev->hw_link[i]->len = min(len, sg->length);
502 		dev->hw_link[i]->p = sg->dma_address;
503 		if (i == (dev->nb_in_sg - 1)) {
504 			dev->hw_link[i]->next = 0;
505 		} else {
506 			len -= min(len, sg->length);
507 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
508 			sg = sg_next(sg);
509 		}
510 	}
511 
512 	/* Create output links */
513 	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
514 	sg = dev->out_sg;
515 	len = dev->total;
516 	for (j = i; j < dev->nb_out_sg + i; j++) {
517 		dev->hw_link[j]->len = min(len, sg->length);
518 		dev->hw_link[j]->p = sg->dma_address;
519 		if (j == (dev->nb_out_sg + i - 1)) {
520 			dev->hw_link[j]->next = 0;
521 		} else {
522 			len -= min(len, sg->length);
523 			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
524 			sg = sg_next(sg);
525 		}
526 	}
527 
528 	/* Fill remaining fields of hw_desc[1] */
529 	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
530 	dev->hw_desc[idx]->len1 = dev->total;
531 	dev->hw_desc[idx]->len2 = dev->total;
532 	dev->hw_desc[idx]->next = 0;
533 
534 	sahara_dump_descriptors(dev);
535 	sahara_dump_links(dev);
536 
537 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
538 
539 	return 0;
540 
541 unmap_in:
542 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
543 		DMA_TO_DEVICE);
544 
545 	return -EINVAL;
546 }
547 
548 static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
549 {
550 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
551 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
552 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
553 
554 	/* Update IV buffer to contain the last ciphertext block */
555 	if (rctx->mode & FLAGS_ENCRYPT) {
556 		sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
557 				   ivsize, req->cryptlen - ivsize);
558 	} else {
559 		memcpy(req->iv, rctx->iv_out, ivsize);
560 	}
561 }
562 
563 static int sahara_aes_process(struct skcipher_request *req)
564 {
565 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
566 	struct sahara_dev *dev = dev_ptr;
567 	struct sahara_ctx *ctx;
568 	struct sahara_aes_reqctx *rctx;
569 	int ret;
570 	unsigned long timeout;
571 
572 	/* Request is ready to be dispatched by the device */
573 	dev_dbg(dev->device,
574 		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
575 		req->cryptlen, req->src, req->dst);
576 
577 	/* assign new request to device */
578 	dev->total = req->cryptlen;
579 	dev->in_sg = req->src;
580 	dev->out_sg = req->dst;
581 
582 	rctx = skcipher_request_ctx(req);
583 	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
584 	rctx->mode &= FLAGS_MODE_MASK;
585 	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
586 
587 	if ((dev->flags & FLAGS_CBC) && req->iv) {
588 		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
589 
590 		memcpy(dev->iv_base, req->iv, ivsize);
591 
592 		if (!(dev->flags & FLAGS_ENCRYPT)) {
593 			sg_pcopy_to_buffer(req->src, sg_nents(req->src),
594 					   rctx->iv_out, ivsize,
595 					   req->cryptlen - ivsize);
596 		}
597 	}
598 
599 	/* assign new context to device */
600 	dev->ctx = ctx;
601 
602 	reinit_completion(&dev->dma_completion);
603 
604 	ret = sahara_hw_descriptor_create(dev);
605 	if (ret)
606 		return -EINVAL;
607 
608 	timeout = wait_for_completion_timeout(&dev->dma_completion,
609 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
610 	if (!timeout) {
611 		dev_err(dev->device, "AES timeout\n");
612 		return -ETIMEDOUT;
613 	}
614 
615 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
616 		DMA_FROM_DEVICE);
617 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
618 		DMA_TO_DEVICE);
619 
620 	if ((dev->flags & FLAGS_CBC) && req->iv)
621 		sahara_aes_cbc_update_iv(req);
622 
623 	return 0;
624 }
625 
626 static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
627 			     unsigned int keylen)
628 {
629 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
630 
631 	ctx->keylen = keylen;
632 
633 	/* SAHARA only supports 128bit keys */
634 	if (keylen == AES_KEYSIZE_128) {
635 		memcpy(ctx->key, key, keylen);
636 		return 0;
637 	}
638 
639 	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
640 		return -EINVAL;
641 
642 	/*
643 	 * The requested key size is not supported by HW, do a fallback.
644 	 */
645 	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
646 	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
647 						 CRYPTO_TFM_REQ_MASK);
648 	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
649 }
650 
651 static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
652 {
653 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
654 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
655 		crypto_skcipher_reqtfm(req));
656 
657 	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
658 	skcipher_request_set_callback(&rctx->fallback_req,
659 				      req->base.flags,
660 				      req->base.complete,
661 				      req->base.data);
662 	skcipher_request_set_crypt(&rctx->fallback_req, req->src,
663 				   req->dst, req->cryptlen, req->iv);
664 
665 	if (mode & FLAGS_ENCRYPT)
666 		return crypto_skcipher_encrypt(&rctx->fallback_req);
667 
668 	return crypto_skcipher_decrypt(&rctx->fallback_req);
669 }
670 
671 static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
672 {
673 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
674 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
675 		crypto_skcipher_reqtfm(req));
676 	struct sahara_dev *dev = dev_ptr;
677 	int err = 0;
678 
679 	if (unlikely(ctx->keylen != AES_KEYSIZE_128))
680 		return sahara_aes_fallback(req, mode);
681 
682 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
683 		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
684 
685 	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
686 		dev_err(dev->device,
687 			"request size is not exact amount of AES blocks\n");
688 		return -EINVAL;
689 	}
690 
691 	rctx->mode = mode;
692 
693 	spin_lock_bh(&dev->queue_spinlock);
694 	err = crypto_enqueue_request(&dev->queue, &req->base);
695 	spin_unlock_bh(&dev->queue_spinlock);
696 
697 	wake_up_process(dev->kthread);
698 
699 	return err;
700 }
701 
702 static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
703 {
704 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
705 }
706 
707 static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
708 {
709 	return sahara_aes_crypt(req, 0);
710 }
711 
712 static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
713 {
714 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
715 }
716 
717 static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
718 {
719 	return sahara_aes_crypt(req, FLAGS_CBC);
720 }
721 
722 static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
723 {
724 	const char *name = crypto_tfm_alg_name(&tfm->base);
725 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
726 
727 	ctx->fallback = crypto_alloc_skcipher(name, 0,
728 					      CRYPTO_ALG_NEED_FALLBACK);
729 	if (IS_ERR(ctx->fallback)) {
730 		pr_err("Error allocating fallback algo %s\n", name);
731 		return PTR_ERR(ctx->fallback);
732 	}
733 
734 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
735 					 crypto_skcipher_reqsize(ctx->fallback));
736 
737 	return 0;
738 }
739 
740 static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
741 {
742 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
743 
744 	crypto_free_skcipher(ctx->fallback);
745 }
746 
747 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
748 			      struct sahara_sha_reqctx *rctx)
749 {
750 	u32 hdr = 0;
751 
752 	hdr = rctx->mode;
753 
754 	if (rctx->first) {
755 		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
756 		hdr |= SAHARA_HDR_MDHA_INIT;
757 	} else {
758 		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
759 	}
760 
761 	if (rctx->last)
762 		hdr |= SAHARA_HDR_MDHA_PDATA;
763 
764 	if (hweight_long(hdr) % 2 == 0)
765 		hdr |= SAHARA_HDR_PARITY_BIT;
766 
767 	return hdr;
768 }
769 
770 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
771 				       struct sahara_sha_reqctx *rctx,
772 				       int start)
773 {
774 	struct scatterlist *sg;
775 	unsigned int i;
776 	int ret;
777 
778 	dev->in_sg = rctx->in_sg;
779 
780 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
781 	if (dev->nb_in_sg < 0) {
782 		dev_err(dev->device, "Invalid numbers of src SG.\n");
783 		return dev->nb_in_sg;
784 	}
785 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
786 		dev_err(dev->device, "not enough hw links (%d)\n",
787 			dev->nb_in_sg + dev->nb_out_sg);
788 		return -EINVAL;
789 	}
790 
791 	sg = dev->in_sg;
792 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
793 	if (!ret)
794 		return -EFAULT;
795 
796 	for (i = start; i < dev->nb_in_sg + start; i++) {
797 		dev->hw_link[i]->len = sg->length;
798 		dev->hw_link[i]->p = sg->dma_address;
799 		if (i == (dev->nb_in_sg + start - 1)) {
800 			dev->hw_link[i]->next = 0;
801 		} else {
802 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
803 			sg = sg_next(sg);
804 		}
805 	}
806 
807 	return i;
808 }
809 
810 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
811 						struct sahara_sha_reqctx *rctx,
812 						struct ahash_request *req,
813 						int index)
814 {
815 	unsigned result_len;
816 	int i = index;
817 
818 	if (rctx->first)
819 		/* Create initial descriptor: #8*/
820 		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
821 	else
822 		/* Create hash descriptor: #10. Must follow #6. */
823 		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
824 
825 	dev->hw_desc[index]->len1 = rctx->total;
826 	if (dev->hw_desc[index]->len1 == 0) {
827 		/* if len1 is 0, p1 must be 0, too */
828 		dev->hw_desc[index]->p1 = 0;
829 		rctx->sg_in_idx = 0;
830 	} else {
831 		/* Create input links */
832 		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
833 		i = sahara_sha_hw_links_create(dev, rctx, index);
834 
835 		rctx->sg_in_idx = index;
836 		if (i < 0)
837 			return i;
838 	}
839 
840 	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
841 
842 	/* Save the context for the next operation */
843 	result_len = rctx->context_size;
844 	dev->hw_link[i]->p = dev->context_phys_base;
845 
846 	dev->hw_link[i]->len = result_len;
847 	dev->hw_desc[index]->len2 = result_len;
848 
849 	dev->hw_link[i]->next = 0;
850 
851 	return 0;
852 }
853 
854 /*
855  * Load descriptor aka #6
856  *
857  * To load a previously saved context back to the MDHA unit
858  *
859  * p1: Saved Context
860  * p2: NULL
861  *
862  */
863 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
864 						struct sahara_sha_reqctx *rctx,
865 						struct ahash_request *req,
866 						int index)
867 {
868 	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
869 
870 	dev->hw_desc[index]->len1 = rctx->context_size;
871 	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
872 	dev->hw_desc[index]->len2 = 0;
873 	dev->hw_desc[index]->p2 = 0;
874 
875 	dev->hw_link[index]->len = rctx->context_size;
876 	dev->hw_link[index]->p = dev->context_phys_base;
877 	dev->hw_link[index]->next = 0;
878 
879 	return 0;
880 }
881 
882 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
883 {
884 	if (!sg || !sg->length)
885 		return nbytes;
886 
887 	while (nbytes && sg) {
888 		if (nbytes <= sg->length) {
889 			sg->length = nbytes;
890 			sg_mark_end(sg);
891 			break;
892 		}
893 		nbytes -= sg->length;
894 		sg = sg_next(sg);
895 	}
896 
897 	return nbytes;
898 }
899 
900 static int sahara_sha_prepare_request(struct ahash_request *req)
901 {
902 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
903 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
904 	unsigned int hash_later;
905 	unsigned int block_size;
906 	unsigned int len;
907 
908 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
909 
910 	/* append bytes from previous operation */
911 	len = rctx->buf_cnt + req->nbytes;
912 
913 	/* only the last transfer can be padded in hardware */
914 	if (!rctx->last && (len < block_size)) {
915 		/* to few data, save for next operation */
916 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
917 					 0, req->nbytes, 0);
918 		rctx->buf_cnt += req->nbytes;
919 
920 		return 0;
921 	}
922 
923 	/* add data from previous operation first */
924 	if (rctx->buf_cnt)
925 		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
926 
927 	/* data must always be a multiple of block_size */
928 	hash_later = rctx->last ? 0 : len & (block_size - 1);
929 	if (hash_later) {
930 		unsigned int offset = req->nbytes - hash_later;
931 		/* Save remaining bytes for later use */
932 		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
933 					hash_later, 0);
934 	}
935 
936 	/* nbytes should now be multiple of blocksize */
937 	req->nbytes = req->nbytes - hash_later;
938 
939 	sahara_walk_and_recalc(req->src, req->nbytes);
940 
941 	/* have data from previous operation and current */
942 	if (rctx->buf_cnt && req->nbytes) {
943 		sg_init_table(rctx->in_sg_chain, 2);
944 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
945 
946 		sg_chain(rctx->in_sg_chain, 2, req->src);
947 
948 		rctx->total = req->nbytes + rctx->buf_cnt;
949 		rctx->in_sg = rctx->in_sg_chain;
950 
951 		req->src = rctx->in_sg_chain;
952 	/* only data from previous operation */
953 	} else if (rctx->buf_cnt) {
954 		if (req->src)
955 			rctx->in_sg = req->src;
956 		else
957 			rctx->in_sg = rctx->in_sg_chain;
958 		/* buf was copied into rembuf above */
959 		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
960 		rctx->total = rctx->buf_cnt;
961 	/* no data from previous operation */
962 	} else {
963 		rctx->in_sg = req->src;
964 		rctx->total = req->nbytes;
965 		req->src = rctx->in_sg;
966 	}
967 
968 	/* on next call, we only have the remaining data in the buffer */
969 	rctx->buf_cnt = hash_later;
970 
971 	return -EINPROGRESS;
972 }
973 
974 static int sahara_sha_process(struct ahash_request *req)
975 {
976 	struct sahara_dev *dev = dev_ptr;
977 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
978 	int ret;
979 	unsigned long timeout;
980 
981 	ret = sahara_sha_prepare_request(req);
982 	if (!ret)
983 		return ret;
984 
985 	if (rctx->first) {
986 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
987 		dev->hw_desc[0]->next = 0;
988 		rctx->first = 0;
989 	} else {
990 		memcpy(dev->context_base, rctx->context, rctx->context_size);
991 
992 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
993 		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
994 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
995 		dev->hw_desc[1]->next = 0;
996 	}
997 
998 	sahara_dump_descriptors(dev);
999 	sahara_dump_links(dev);
1000 
1001 	reinit_completion(&dev->dma_completion);
1002 
1003 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1004 
1005 	timeout = wait_for_completion_timeout(&dev->dma_completion,
1006 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1007 	if (!timeout) {
1008 		dev_err(dev->device, "SHA timeout\n");
1009 		return -ETIMEDOUT;
1010 	}
1011 
1012 	if (rctx->sg_in_idx)
1013 		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1014 			     DMA_TO_DEVICE);
1015 
1016 	memcpy(rctx->context, dev->context_base, rctx->context_size);
1017 
1018 	if (req->result && rctx->last)
1019 		memcpy(req->result, rctx->context, rctx->digest_size);
1020 
1021 	return 0;
1022 }
1023 
1024 static int sahara_queue_manage(void *data)
1025 {
1026 	struct sahara_dev *dev = data;
1027 	struct crypto_async_request *async_req;
1028 	struct crypto_async_request *backlog;
1029 	int ret = 0;
1030 
1031 	do {
1032 		__set_current_state(TASK_INTERRUPTIBLE);
1033 
1034 		spin_lock_bh(&dev->queue_spinlock);
1035 		backlog = crypto_get_backlog(&dev->queue);
1036 		async_req = crypto_dequeue_request(&dev->queue);
1037 		spin_unlock_bh(&dev->queue_spinlock);
1038 
1039 		if (backlog)
1040 			crypto_request_complete(backlog, -EINPROGRESS);
1041 
1042 		if (async_req) {
1043 			if (crypto_tfm_alg_type(async_req->tfm) ==
1044 			    CRYPTO_ALG_TYPE_AHASH) {
1045 				struct ahash_request *req =
1046 					ahash_request_cast(async_req);
1047 
1048 				ret = sahara_sha_process(req);
1049 			} else {
1050 				struct skcipher_request *req =
1051 					skcipher_request_cast(async_req);
1052 
1053 				ret = sahara_aes_process(req);
1054 			}
1055 
1056 			crypto_request_complete(async_req, ret);
1057 
1058 			continue;
1059 		}
1060 
1061 		schedule();
1062 	} while (!kthread_should_stop());
1063 
1064 	return 0;
1065 }
1066 
1067 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1068 {
1069 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1070 	struct sahara_dev *dev = dev_ptr;
1071 	int ret;
1072 
1073 	if (!req->nbytes && !last)
1074 		return 0;
1075 
1076 	rctx->last = last;
1077 
1078 	if (!rctx->active) {
1079 		rctx->active = 1;
1080 		rctx->first = 1;
1081 	}
1082 
1083 	spin_lock_bh(&dev->queue_spinlock);
1084 	ret = crypto_enqueue_request(&dev->queue, &req->base);
1085 	spin_unlock_bh(&dev->queue_spinlock);
1086 
1087 	wake_up_process(dev->kthread);
1088 
1089 	return ret;
1090 }
1091 
1092 static int sahara_sha_init(struct ahash_request *req)
1093 {
1094 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1095 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1096 
1097 	memset(rctx, 0, sizeof(*rctx));
1098 
1099 	switch (crypto_ahash_digestsize(tfm)) {
1100 	case SHA1_DIGEST_SIZE:
1101 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1102 		rctx->digest_size = SHA1_DIGEST_SIZE;
1103 		break;
1104 	case SHA256_DIGEST_SIZE:
1105 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1106 		rctx->digest_size = SHA256_DIGEST_SIZE;
1107 		break;
1108 	default:
1109 		return -EINVAL;
1110 	}
1111 
1112 	rctx->context_size = rctx->digest_size + 4;
1113 	rctx->active = 0;
1114 
1115 	return 0;
1116 }
1117 
1118 static int sahara_sha_update(struct ahash_request *req)
1119 {
1120 	return sahara_sha_enqueue(req, 0);
1121 }
1122 
1123 static int sahara_sha_final(struct ahash_request *req)
1124 {
1125 	req->nbytes = 0;
1126 	return sahara_sha_enqueue(req, 1);
1127 }
1128 
1129 static int sahara_sha_finup(struct ahash_request *req)
1130 {
1131 	return sahara_sha_enqueue(req, 1);
1132 }
1133 
1134 static int sahara_sha_digest(struct ahash_request *req)
1135 {
1136 	sahara_sha_init(req);
1137 
1138 	return sahara_sha_finup(req);
1139 }
1140 
1141 static int sahara_sha_export(struct ahash_request *req, void *out)
1142 {
1143 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1144 
1145 	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1146 
1147 	return 0;
1148 }
1149 
1150 static int sahara_sha_import(struct ahash_request *req, const void *in)
1151 {
1152 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1153 
1154 	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1155 
1156 	return 0;
1157 }
1158 
1159 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1160 {
1161 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1162 				 sizeof(struct sahara_sha_reqctx) +
1163 				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1164 
1165 	return 0;
1166 }
1167 
1168 static struct skcipher_alg aes_algs[] = {
1169 {
1170 	.base.cra_name		= "ecb(aes)",
1171 	.base.cra_driver_name	= "sahara-ecb-aes",
1172 	.base.cra_priority	= 300,
1173 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1174 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1175 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1176 	.base.cra_alignmask	= 0x0,
1177 	.base.cra_module	= THIS_MODULE,
1178 
1179 	.init			= sahara_aes_init_tfm,
1180 	.exit			= sahara_aes_exit_tfm,
1181 	.min_keysize		= AES_MIN_KEY_SIZE ,
1182 	.max_keysize		= AES_MAX_KEY_SIZE,
1183 	.setkey			= sahara_aes_setkey,
1184 	.encrypt		= sahara_aes_ecb_encrypt,
1185 	.decrypt		= sahara_aes_ecb_decrypt,
1186 }, {
1187 	.base.cra_name		= "cbc(aes)",
1188 	.base.cra_driver_name	= "sahara-cbc-aes",
1189 	.base.cra_priority	= 300,
1190 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1191 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1192 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1193 	.base.cra_alignmask	= 0x0,
1194 	.base.cra_module	= THIS_MODULE,
1195 
1196 	.init			= sahara_aes_init_tfm,
1197 	.exit			= sahara_aes_exit_tfm,
1198 	.min_keysize		= AES_MIN_KEY_SIZE ,
1199 	.max_keysize		= AES_MAX_KEY_SIZE,
1200 	.ivsize			= AES_BLOCK_SIZE,
1201 	.setkey			= sahara_aes_setkey,
1202 	.encrypt		= sahara_aes_cbc_encrypt,
1203 	.decrypt		= sahara_aes_cbc_decrypt,
1204 }
1205 };
1206 
1207 static struct ahash_alg sha_v3_algs[] = {
1208 {
1209 	.init		= sahara_sha_init,
1210 	.update		= sahara_sha_update,
1211 	.final		= sahara_sha_final,
1212 	.finup		= sahara_sha_finup,
1213 	.digest		= sahara_sha_digest,
1214 	.export		= sahara_sha_export,
1215 	.import		= sahara_sha_import,
1216 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1217 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1218 	.halg.base	= {
1219 		.cra_name		= "sha1",
1220 		.cra_driver_name	= "sahara-sha1",
1221 		.cra_priority		= 300,
1222 		.cra_flags		= CRYPTO_ALG_ASYNC |
1223 						CRYPTO_ALG_NEED_FALLBACK,
1224 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1225 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1226 		.cra_alignmask		= 0,
1227 		.cra_module		= THIS_MODULE,
1228 		.cra_init		= sahara_sha_cra_init,
1229 	}
1230 },
1231 };
1232 
1233 static struct ahash_alg sha_v4_algs[] = {
1234 {
1235 	.init		= sahara_sha_init,
1236 	.update		= sahara_sha_update,
1237 	.final		= sahara_sha_final,
1238 	.finup		= sahara_sha_finup,
1239 	.digest		= sahara_sha_digest,
1240 	.export		= sahara_sha_export,
1241 	.import		= sahara_sha_import,
1242 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1243 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1244 	.halg.base	= {
1245 		.cra_name		= "sha256",
1246 		.cra_driver_name	= "sahara-sha256",
1247 		.cra_priority		= 300,
1248 		.cra_flags		= CRYPTO_ALG_ASYNC |
1249 						CRYPTO_ALG_NEED_FALLBACK,
1250 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1251 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1252 		.cra_alignmask		= 0,
1253 		.cra_module		= THIS_MODULE,
1254 		.cra_init		= sahara_sha_cra_init,
1255 	}
1256 },
1257 };
1258 
1259 static irqreturn_t sahara_irq_handler(int irq, void *data)
1260 {
1261 	struct sahara_dev *dev = data;
1262 	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1263 	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1264 
1265 	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1266 		     SAHARA_REG_CMD);
1267 
1268 	sahara_decode_status(dev, stat);
1269 
1270 	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1271 		return IRQ_NONE;
1272 	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1273 		dev->error = 0;
1274 	} else {
1275 		sahara_decode_error(dev, err);
1276 		dev->error = -EINVAL;
1277 	}
1278 
1279 	complete(&dev->dma_completion);
1280 
1281 	return IRQ_HANDLED;
1282 }
1283 
1284 
1285 static int sahara_register_algs(struct sahara_dev *dev)
1286 {
1287 	int err;
1288 	unsigned int i, j, k, l;
1289 
1290 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1291 		err = crypto_register_skcipher(&aes_algs[i]);
1292 		if (err)
1293 			goto err_aes_algs;
1294 	}
1295 
1296 	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1297 		err = crypto_register_ahash(&sha_v3_algs[k]);
1298 		if (err)
1299 			goto err_sha_v3_algs;
1300 	}
1301 
1302 	if (dev->version > SAHARA_VERSION_3)
1303 		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1304 			err = crypto_register_ahash(&sha_v4_algs[l]);
1305 			if (err)
1306 				goto err_sha_v4_algs;
1307 		}
1308 
1309 	return 0;
1310 
1311 err_sha_v4_algs:
1312 	for (j = 0; j < l; j++)
1313 		crypto_unregister_ahash(&sha_v4_algs[j]);
1314 
1315 err_sha_v3_algs:
1316 	for (j = 0; j < k; j++)
1317 		crypto_unregister_ahash(&sha_v3_algs[j]);
1318 
1319 err_aes_algs:
1320 	for (j = 0; j < i; j++)
1321 		crypto_unregister_skcipher(&aes_algs[j]);
1322 
1323 	return err;
1324 }
1325 
1326 static void sahara_unregister_algs(struct sahara_dev *dev)
1327 {
1328 	unsigned int i;
1329 
1330 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1331 		crypto_unregister_skcipher(&aes_algs[i]);
1332 
1333 	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1334 		crypto_unregister_ahash(&sha_v3_algs[i]);
1335 
1336 	if (dev->version > SAHARA_VERSION_3)
1337 		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1338 			crypto_unregister_ahash(&sha_v4_algs[i]);
1339 }
1340 
1341 static const struct of_device_id sahara_dt_ids[] = {
1342 	{ .compatible = "fsl,imx53-sahara" },
1343 	{ .compatible = "fsl,imx27-sahara" },
1344 	{ /* sentinel */ }
1345 };
1346 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1347 
1348 static int sahara_probe(struct platform_device *pdev)
1349 {
1350 	struct sahara_dev *dev;
1351 	u32 version;
1352 	int irq;
1353 	int err;
1354 	int i;
1355 
1356 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1357 	if (!dev)
1358 		return -ENOMEM;
1359 
1360 	dev->device = &pdev->dev;
1361 	platform_set_drvdata(pdev, dev);
1362 
1363 	/* Get the base address */
1364 	dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1365 	if (IS_ERR(dev->regs_base))
1366 		return PTR_ERR(dev->regs_base);
1367 
1368 	/* Get the IRQ */
1369 	irq = platform_get_irq(pdev,  0);
1370 	if (irq < 0)
1371 		return irq;
1372 
1373 	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1374 			       0, dev_name(&pdev->dev), dev);
1375 	if (err) {
1376 		dev_err(&pdev->dev, "failed to request irq\n");
1377 		return err;
1378 	}
1379 
1380 	/* clocks */
1381 	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1382 	if (IS_ERR(dev->clk_ipg)) {
1383 		dev_err(&pdev->dev, "Could not get ipg clock\n");
1384 		return PTR_ERR(dev->clk_ipg);
1385 	}
1386 
1387 	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1388 	if (IS_ERR(dev->clk_ahb)) {
1389 		dev_err(&pdev->dev, "Could not get ahb clock\n");
1390 		return PTR_ERR(dev->clk_ahb);
1391 	}
1392 
1393 	/* Allocate HW descriptors */
1394 	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1395 			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1396 			&dev->hw_phys_desc[0], GFP_KERNEL);
1397 	if (!dev->hw_desc[0]) {
1398 		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1399 		return -ENOMEM;
1400 	}
1401 	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1402 	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1403 				sizeof(struct sahara_hw_desc);
1404 
1405 	/* Allocate space for iv and key */
1406 	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1407 				&dev->key_phys_base, GFP_KERNEL);
1408 	if (!dev->key_base) {
1409 		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1410 		return -ENOMEM;
1411 	}
1412 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1413 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1414 
1415 	/* Allocate space for context: largest digest + message length field */
1416 	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1417 					SHA256_DIGEST_SIZE + 4,
1418 					&dev->context_phys_base, GFP_KERNEL);
1419 	if (!dev->context_base) {
1420 		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1421 		return -ENOMEM;
1422 	}
1423 
1424 	/* Allocate space for HW links */
1425 	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1426 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1427 			&dev->hw_phys_link[0], GFP_KERNEL);
1428 	if (!dev->hw_link[0]) {
1429 		dev_err(&pdev->dev, "Could not allocate hw links\n");
1430 		return -ENOMEM;
1431 	}
1432 	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1433 		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1434 					sizeof(struct sahara_hw_link);
1435 		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1436 	}
1437 
1438 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1439 
1440 	spin_lock_init(&dev->queue_spinlock);
1441 
1442 	dev_ptr = dev;
1443 
1444 	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1445 	if (IS_ERR(dev->kthread)) {
1446 		return PTR_ERR(dev->kthread);
1447 	}
1448 
1449 	init_completion(&dev->dma_completion);
1450 
1451 	err = clk_prepare_enable(dev->clk_ipg);
1452 	if (err)
1453 		return err;
1454 	err = clk_prepare_enable(dev->clk_ahb);
1455 	if (err)
1456 		goto clk_ipg_disable;
1457 
1458 	version = sahara_read(dev, SAHARA_REG_VERSION);
1459 	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1460 		if (version != SAHARA_VERSION_3)
1461 			err = -ENODEV;
1462 	} else if (of_device_is_compatible(pdev->dev.of_node,
1463 			"fsl,imx53-sahara")) {
1464 		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1465 			err = -ENODEV;
1466 		version = (version >> 8) & 0xff;
1467 	}
1468 	if (err == -ENODEV) {
1469 		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1470 				version);
1471 		goto err_algs;
1472 	}
1473 
1474 	dev->version = version;
1475 
1476 	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1477 		     SAHARA_REG_CMD);
1478 	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1479 			SAHARA_CONTROL_SET_MAXBURST(8) |
1480 			SAHARA_CONTROL_RNG_AUTORSD |
1481 			SAHARA_CONTROL_ENABLE_INT,
1482 			SAHARA_REG_CONTROL);
1483 
1484 	err = sahara_register_algs(dev);
1485 	if (err)
1486 		goto err_algs;
1487 
1488 	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1489 
1490 	return 0;
1491 
1492 err_algs:
1493 	kthread_stop(dev->kthread);
1494 	dev_ptr = NULL;
1495 	clk_disable_unprepare(dev->clk_ahb);
1496 clk_ipg_disable:
1497 	clk_disable_unprepare(dev->clk_ipg);
1498 
1499 	return err;
1500 }
1501 
1502 static int sahara_remove(struct platform_device *pdev)
1503 {
1504 	struct sahara_dev *dev = platform_get_drvdata(pdev);
1505 
1506 	kthread_stop(dev->kthread);
1507 
1508 	sahara_unregister_algs(dev);
1509 
1510 	clk_disable_unprepare(dev->clk_ipg);
1511 	clk_disable_unprepare(dev->clk_ahb);
1512 
1513 	dev_ptr = NULL;
1514 
1515 	return 0;
1516 }
1517 
1518 static struct platform_driver sahara_driver = {
1519 	.probe		= sahara_probe,
1520 	.remove		= sahara_remove,
1521 	.driver		= {
1522 		.name	= SAHARA_NAME,
1523 		.of_match_table = sahara_dt_ids,
1524 	},
1525 };
1526 
1527 module_platform_driver(sahara_driver);
1528 
1529 MODULE_LICENSE("GPL");
1530 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1531 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1532 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1533