xref: /openbmc/linux/drivers/crypto/sahara.c (revision ab82cb37)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for SAHARA cryptographic accelerator.
6  *
7  * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8  * Copyright (c) 2013 Vista Silicon S.L.
9  * Author: Javier Martin <javier.martin@vista-silicon.com>
10  *
11  * Based on omap-aes.c and tegra-aes.c
12  */
13 
14 #include <crypto/aes.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/sha1.h>
19 #include <crypto/sha2.h>
20 
21 #include <linux/clk.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/irq.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/module.h>
29 #include <linux/of.h>
30 #include <linux/platform_device.h>
31 #include <linux/spinlock.h>
32 
33 #define SHA_BUFFER_LEN		PAGE_SIZE
34 #define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
35 
36 #define SAHARA_NAME "sahara"
37 #define SAHARA_VERSION_3	3
38 #define SAHARA_VERSION_4	4
39 #define SAHARA_TIMEOUT_MS	1000
40 #define SAHARA_MAX_HW_DESC	2
41 #define SAHARA_MAX_HW_LINK	20
42 
43 #define FLAGS_MODE_MASK		0x000f
44 #define FLAGS_ENCRYPT		BIT(0)
45 #define FLAGS_CBC		BIT(1)
46 
47 #define SAHARA_HDR_BASE			0x00800000
48 #define SAHARA_HDR_SKHA_ALG_AES	0
49 #define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
50 #define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
51 #define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
52 #define SAHARA_HDR_FORM_DATA		(5 << 16)
53 #define SAHARA_HDR_FORM_KEY		(8 << 16)
54 #define SAHARA_HDR_LLO			(1 << 24)
55 #define SAHARA_HDR_CHA_SKHA		(1 << 28)
56 #define SAHARA_HDR_CHA_MDHA		(2 << 28)
57 #define SAHARA_HDR_PARITY_BIT		(1 << 31)
58 
59 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
60 #define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
61 #define SAHARA_HDR_MDHA_HASH		0xA0850000
62 #define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
63 #define SAHARA_HDR_MDHA_ALG_SHA1	0
64 #define SAHARA_HDR_MDHA_ALG_MD5		1
65 #define SAHARA_HDR_MDHA_ALG_SHA256	2
66 #define SAHARA_HDR_MDHA_ALG_SHA224	3
67 #define SAHARA_HDR_MDHA_PDATA		(1 << 2)
68 #define SAHARA_HDR_MDHA_HMAC		(1 << 3)
69 #define SAHARA_HDR_MDHA_INIT		(1 << 5)
70 #define SAHARA_HDR_MDHA_IPAD		(1 << 6)
71 #define SAHARA_HDR_MDHA_OPAD		(1 << 7)
72 #define SAHARA_HDR_MDHA_SWAP		(1 << 8)
73 #define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
74 #define SAHARA_HDR_MDHA_SSL		(1 << 10)
75 
76 /* SAHARA can only process one request at a time */
77 #define SAHARA_QUEUE_LENGTH	1
78 
79 #define SAHARA_REG_VERSION	0x00
80 #define SAHARA_REG_DAR		0x04
81 #define SAHARA_REG_CONTROL	0x08
82 #define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
83 #define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
84 #define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
85 #define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
86 #define SAHARA_REG_CMD		0x0C
87 #define		SAHARA_CMD_RESET		(1 << 0)
88 #define		SAHARA_CMD_CLEAR_INT		(1 << 8)
89 #define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
90 #define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
91 #define		SAHARA_CMD_MODE_BATCH		(1 << 16)
92 #define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
93 #define	SAHARA_REG_STATUS	0x10
94 #define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
95 #define			SAHARA_STATE_IDLE	0
96 #define			SAHARA_STATE_BUSY	1
97 #define			SAHARA_STATE_ERR	2
98 #define			SAHARA_STATE_FAULT	3
99 #define			SAHARA_STATE_COMPLETE	4
100 #define			SAHARA_STATE_COMP_FLAG	(1 << 2)
101 #define		SAHARA_STATUS_DAR_FULL		(1 << 3)
102 #define		SAHARA_STATUS_ERROR		(1 << 4)
103 #define		SAHARA_STATUS_SECURE		(1 << 5)
104 #define		SAHARA_STATUS_FAIL		(1 << 6)
105 #define		SAHARA_STATUS_INIT		(1 << 7)
106 #define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
107 #define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
108 #define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
109 #define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
110 #define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
111 #define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
112 #define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
113 #define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
114 #define SAHARA_REG_ERRSTATUS	0x14
115 #define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
116 #define			SAHARA_ERRSOURCE_CHA	14
117 #define			SAHARA_ERRSOURCE_DMA	15
118 #define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
119 #define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
120 #define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
121 #define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
122 #define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
123 #define SAHARA_REG_FADDR	0x18
124 #define SAHARA_REG_CDAR		0x1C
125 #define SAHARA_REG_IDAR		0x20
126 
127 struct sahara_hw_desc {
128 	u32	hdr;
129 	u32	len1;
130 	u32	p1;
131 	u32	len2;
132 	u32	p2;
133 	u32	next;
134 };
135 
136 struct sahara_hw_link {
137 	u32	len;
138 	u32	p;
139 	u32	next;
140 };
141 
142 struct sahara_ctx {
143 	/* AES-specific context */
144 	int keylen;
145 	u8 key[AES_KEYSIZE_128];
146 	struct crypto_skcipher *fallback;
147 };
148 
149 struct sahara_aes_reqctx {
150 	unsigned long mode;
151 	u8 iv_out[AES_BLOCK_SIZE];
152 	struct skcipher_request fallback_req;	// keep at the end
153 };
154 
155 /*
156  * struct sahara_sha_reqctx - private data per request
157  * @buf: holds data for requests smaller than block_size
158  * @rembuf: used to prepare one block_size-aligned request
159  * @context: hw-specific context for request. Digest is extracted from this
160  * @mode: specifies what type of hw-descriptor needs to be built
161  * @digest_size: length of digest for this request
162  * @context_size: length of hw-context for this request.
163  *                Always digest_size + 4
164  * @buf_cnt: number of bytes saved in buf
165  * @sg_in_idx: number of hw links
166  * @in_sg: scatterlist for input data
167  * @in_sg_chain: scatterlists for chained input data
168  * @total: total number of bytes for transfer
169  * @last: is this the last block
170  * @first: is this the first block
171  * @active: inside a transfer
172  */
173 struct sahara_sha_reqctx {
174 	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
175 	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
176 	u8			context[SHA256_DIGEST_SIZE + 4];
177 	unsigned int		mode;
178 	unsigned int		digest_size;
179 	unsigned int		context_size;
180 	unsigned int		buf_cnt;
181 	unsigned int		sg_in_idx;
182 	struct scatterlist	*in_sg;
183 	struct scatterlist	in_sg_chain[2];
184 	size_t			total;
185 	unsigned int		last;
186 	unsigned int		first;
187 	unsigned int		active;
188 };
189 
190 struct sahara_dev {
191 	struct device		*device;
192 	unsigned int		version;
193 	void __iomem		*regs_base;
194 	struct clk		*clk_ipg;
195 	struct clk		*clk_ahb;
196 	spinlock_t		queue_spinlock;
197 	struct task_struct	*kthread;
198 	struct completion	dma_completion;
199 
200 	struct sahara_ctx	*ctx;
201 	struct crypto_queue	queue;
202 	unsigned long		flags;
203 
204 	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
205 	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
206 
207 	u8			*key_base;
208 	dma_addr_t		key_phys_base;
209 
210 	u8			*iv_base;
211 	dma_addr_t		iv_phys_base;
212 
213 	u8			*context_base;
214 	dma_addr_t		context_phys_base;
215 
216 	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
217 	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
218 
219 	size_t			total;
220 	struct scatterlist	*in_sg;
221 	int		nb_in_sg;
222 	struct scatterlist	*out_sg;
223 	int		nb_out_sg;
224 
225 	u32			error;
226 };
227 
228 static struct sahara_dev *dev_ptr;
229 
230 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
231 {
232 	writel(data, dev->regs_base + reg);
233 }
234 
235 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
236 {
237 	return readl(dev->regs_base + reg);
238 }
239 
240 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
241 {
242 	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
243 			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
244 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
245 
246 	if (dev->flags & FLAGS_CBC) {
247 		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
248 		hdr ^= SAHARA_HDR_PARITY_BIT;
249 	}
250 
251 	if (dev->flags & FLAGS_ENCRYPT) {
252 		hdr |= SAHARA_HDR_SKHA_OP_ENC;
253 		hdr ^= SAHARA_HDR_PARITY_BIT;
254 	}
255 
256 	return hdr;
257 }
258 
259 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
260 {
261 	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
262 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
263 }
264 
265 static const char *sahara_err_src[16] = {
266 	"No error",
267 	"Header error",
268 	"Descriptor length error",
269 	"Descriptor length or pointer error",
270 	"Link length error",
271 	"Link pointer error",
272 	"Input buffer error",
273 	"Output buffer error",
274 	"Output buffer starvation",
275 	"Internal state fault",
276 	"General descriptor problem",
277 	"Reserved",
278 	"Descriptor address error",
279 	"Link address error",
280 	"CHA error",
281 	"DMA error"
282 };
283 
284 static const char *sahara_err_dmasize[4] = {
285 	"Byte transfer",
286 	"Half-word transfer",
287 	"Word transfer",
288 	"Reserved"
289 };
290 
291 static const char *sahara_err_dmasrc[8] = {
292 	"No error",
293 	"AHB bus error",
294 	"Internal IP bus error",
295 	"Parity error",
296 	"DMA crosses 256 byte boundary",
297 	"DMA is busy",
298 	"Reserved",
299 	"DMA HW error"
300 };
301 
302 static const char *sahara_cha_errsrc[12] = {
303 	"Input buffer non-empty",
304 	"Illegal address",
305 	"Illegal mode",
306 	"Illegal data size",
307 	"Illegal key size",
308 	"Write during processing",
309 	"CTX read during processing",
310 	"HW error",
311 	"Input buffer disabled/underflow",
312 	"Output buffer disabled/overflow",
313 	"DES key parity error",
314 	"Reserved"
315 };
316 
317 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
318 
319 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
320 {
321 	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
322 	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
323 
324 	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
325 
326 	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
327 
328 	if (source == SAHARA_ERRSOURCE_DMA) {
329 		if (error & SAHARA_ERRSTATUS_DMA_DIR)
330 			dev_err(dev->device, "		* DMA read.\n");
331 		else
332 			dev_err(dev->device, "		* DMA write.\n");
333 
334 		dev_err(dev->device, "		* %s.\n",
335 		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
336 		dev_err(dev->device, "		* %s.\n",
337 		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
338 	} else if (source == SAHARA_ERRSOURCE_CHA) {
339 		dev_err(dev->device, "		* %s.\n",
340 			sahara_cha_errsrc[chasrc]);
341 		dev_err(dev->device, "		* %s.\n",
342 		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
343 	}
344 	dev_err(dev->device, "\n");
345 }
346 
347 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
348 
349 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
350 {
351 	u8 state;
352 
353 	if (!__is_defined(DEBUG))
354 		return;
355 
356 	state = SAHARA_STATUS_GET_STATE(status);
357 
358 	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
359 		__func__, status);
360 
361 	dev_dbg(dev->device, "	- State = %d:\n", state);
362 	if (state & SAHARA_STATE_COMP_FLAG)
363 		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
364 
365 	dev_dbg(dev->device, "		* %s.\n",
366 	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
367 
368 	if (status & SAHARA_STATUS_DAR_FULL)
369 		dev_dbg(dev->device, "	- DAR Full.\n");
370 	if (status & SAHARA_STATUS_ERROR)
371 		dev_dbg(dev->device, "	- Error.\n");
372 	if (status & SAHARA_STATUS_SECURE)
373 		dev_dbg(dev->device, "	- Secure.\n");
374 	if (status & SAHARA_STATUS_FAIL)
375 		dev_dbg(dev->device, "	- Fail.\n");
376 	if (status & SAHARA_STATUS_RNG_RESEED)
377 		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
378 	if (status & SAHARA_STATUS_ACTIVE_RNG)
379 		dev_dbg(dev->device, "	- RNG Active.\n");
380 	if (status & SAHARA_STATUS_ACTIVE_MDHA)
381 		dev_dbg(dev->device, "	- MDHA Active.\n");
382 	if (status & SAHARA_STATUS_ACTIVE_SKHA)
383 		dev_dbg(dev->device, "	- SKHA Active.\n");
384 
385 	if (status & SAHARA_STATUS_MODE_BATCH)
386 		dev_dbg(dev->device, "	- Batch Mode.\n");
387 	else if (status & SAHARA_STATUS_MODE_DEDICATED)
388 		dev_dbg(dev->device, "	- Dedicated Mode.\n");
389 	else if (status & SAHARA_STATUS_MODE_DEBUG)
390 		dev_dbg(dev->device, "	- Debug Mode.\n");
391 
392 	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
393 	       SAHARA_STATUS_GET_ISTATE(status));
394 
395 	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
396 		sahara_read(dev, SAHARA_REG_CDAR));
397 	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
398 		sahara_read(dev, SAHARA_REG_IDAR));
399 }
400 
401 static void sahara_dump_descriptors(struct sahara_dev *dev)
402 {
403 	int i;
404 
405 	if (!__is_defined(DEBUG))
406 		return;
407 
408 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
409 		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
410 			i, &dev->hw_phys_desc[i]);
411 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
412 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
413 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
414 		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
415 		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
416 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
417 			dev->hw_desc[i]->next);
418 	}
419 	dev_dbg(dev->device, "\n");
420 }
421 
422 static void sahara_dump_links(struct sahara_dev *dev)
423 {
424 	int i;
425 
426 	if (!__is_defined(DEBUG))
427 		return;
428 
429 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
430 		dev_dbg(dev->device, "Link (%d) (%pad):\n",
431 			i, &dev->hw_phys_link[i]);
432 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
433 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
434 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
435 			dev->hw_link[i]->next);
436 	}
437 	dev_dbg(dev->device, "\n");
438 }
439 
440 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
441 {
442 	struct sahara_ctx *ctx = dev->ctx;
443 	struct scatterlist *sg;
444 	int ret;
445 	int i, j;
446 	int idx = 0;
447 
448 	memcpy(dev->key_base, ctx->key, ctx->keylen);
449 
450 	if (dev->flags & FLAGS_CBC) {
451 		dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
452 		dev->hw_desc[idx]->p1 = dev->iv_phys_base;
453 	} else {
454 		dev->hw_desc[idx]->len1 = 0;
455 		dev->hw_desc[idx]->p1 = 0;
456 	}
457 	dev->hw_desc[idx]->len2 = ctx->keylen;
458 	dev->hw_desc[idx]->p2 = dev->key_phys_base;
459 	dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
460 	dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
461 
462 	idx++;
463 
464 
465 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
466 	if (dev->nb_in_sg < 0) {
467 		dev_err(dev->device, "Invalid numbers of src SG.\n");
468 		return dev->nb_in_sg;
469 	}
470 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
471 	if (dev->nb_out_sg < 0) {
472 		dev_err(dev->device, "Invalid numbers of dst SG.\n");
473 		return dev->nb_out_sg;
474 	}
475 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
476 		dev_err(dev->device, "not enough hw links (%d)\n",
477 			dev->nb_in_sg + dev->nb_out_sg);
478 		return -EINVAL;
479 	}
480 
481 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
482 			 DMA_TO_DEVICE);
483 	if (!ret) {
484 		dev_err(dev->device, "couldn't map in sg\n");
485 		goto unmap_in;
486 	}
487 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
488 			 DMA_FROM_DEVICE);
489 	if (!ret) {
490 		dev_err(dev->device, "couldn't map out sg\n");
491 		goto unmap_out;
492 	}
493 
494 	/* Create input links */
495 	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
496 	sg = dev->in_sg;
497 	for (i = 0; i < dev->nb_in_sg; i++) {
498 		dev->hw_link[i]->len = sg->length;
499 		dev->hw_link[i]->p = sg->dma_address;
500 		if (i == (dev->nb_in_sg - 1)) {
501 			dev->hw_link[i]->next = 0;
502 		} else {
503 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
504 			sg = sg_next(sg);
505 		}
506 	}
507 
508 	/* Create output links */
509 	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
510 	sg = dev->out_sg;
511 	for (j = i; j < dev->nb_out_sg + i; j++) {
512 		dev->hw_link[j]->len = sg->length;
513 		dev->hw_link[j]->p = sg->dma_address;
514 		if (j == (dev->nb_out_sg + i - 1)) {
515 			dev->hw_link[j]->next = 0;
516 		} else {
517 			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
518 			sg = sg_next(sg);
519 		}
520 	}
521 
522 	/* Fill remaining fields of hw_desc[1] */
523 	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
524 	dev->hw_desc[idx]->len1 = dev->total;
525 	dev->hw_desc[idx]->len2 = dev->total;
526 	dev->hw_desc[idx]->next = 0;
527 
528 	sahara_dump_descriptors(dev);
529 	sahara_dump_links(dev);
530 
531 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
532 
533 	return 0;
534 
535 unmap_out:
536 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
537 		DMA_FROM_DEVICE);
538 unmap_in:
539 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
540 		DMA_TO_DEVICE);
541 
542 	return -EINVAL;
543 }
544 
545 static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
546 {
547 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
548 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
549 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
550 
551 	/* Update IV buffer to contain the last ciphertext block */
552 	if (rctx->mode & FLAGS_ENCRYPT) {
553 		sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
554 				   ivsize, req->cryptlen - ivsize);
555 	} else {
556 		memcpy(req->iv, rctx->iv_out, ivsize);
557 	}
558 }
559 
560 static int sahara_aes_process(struct skcipher_request *req)
561 {
562 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
563 	struct sahara_dev *dev = dev_ptr;
564 	struct sahara_ctx *ctx;
565 	struct sahara_aes_reqctx *rctx;
566 	int ret;
567 	unsigned long timeout;
568 
569 	/* Request is ready to be dispatched by the device */
570 	dev_dbg(dev->device,
571 		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
572 		req->cryptlen, req->src, req->dst);
573 
574 	/* assign new request to device */
575 	dev->total = req->cryptlen;
576 	dev->in_sg = req->src;
577 	dev->out_sg = req->dst;
578 
579 	rctx = skcipher_request_ctx(req);
580 	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
581 	rctx->mode &= FLAGS_MODE_MASK;
582 	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
583 
584 	if ((dev->flags & FLAGS_CBC) && req->iv) {
585 		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
586 
587 		memcpy(dev->iv_base, req->iv, ivsize);
588 
589 		if (!(dev->flags & FLAGS_ENCRYPT)) {
590 			sg_pcopy_to_buffer(req->src, sg_nents(req->src),
591 					   rctx->iv_out, ivsize,
592 					   req->cryptlen - ivsize);
593 		}
594 	}
595 
596 	/* assign new context to device */
597 	dev->ctx = ctx;
598 
599 	reinit_completion(&dev->dma_completion);
600 
601 	ret = sahara_hw_descriptor_create(dev);
602 	if (ret)
603 		return -EINVAL;
604 
605 	timeout = wait_for_completion_timeout(&dev->dma_completion,
606 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
607 	if (!timeout) {
608 		dev_err(dev->device, "AES timeout\n");
609 		return -ETIMEDOUT;
610 	}
611 
612 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
613 		DMA_FROM_DEVICE);
614 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
615 		DMA_TO_DEVICE);
616 
617 	if ((dev->flags & FLAGS_CBC) && req->iv)
618 		sahara_aes_cbc_update_iv(req);
619 
620 	return 0;
621 }
622 
623 static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
624 			     unsigned int keylen)
625 {
626 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
627 
628 	ctx->keylen = keylen;
629 
630 	/* SAHARA only supports 128bit keys */
631 	if (keylen == AES_KEYSIZE_128) {
632 		memcpy(ctx->key, key, keylen);
633 		return 0;
634 	}
635 
636 	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
637 		return -EINVAL;
638 
639 	/*
640 	 * The requested key size is not supported by HW, do a fallback.
641 	 */
642 	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
643 	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
644 						 CRYPTO_TFM_REQ_MASK);
645 	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
646 }
647 
648 static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
649 {
650 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
651 	struct sahara_dev *dev = dev_ptr;
652 	int err = 0;
653 
654 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
655 		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
656 
657 	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
658 		dev_err(dev->device,
659 			"request size is not exact amount of AES blocks\n");
660 		return -EINVAL;
661 	}
662 
663 	rctx->mode = mode;
664 
665 	spin_lock_bh(&dev->queue_spinlock);
666 	err = crypto_enqueue_request(&dev->queue, &req->base);
667 	spin_unlock_bh(&dev->queue_spinlock);
668 
669 	wake_up_process(dev->kthread);
670 
671 	return err;
672 }
673 
674 static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
675 {
676 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
677 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
678 		crypto_skcipher_reqtfm(req));
679 
680 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
681 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
682 		skcipher_request_set_callback(&rctx->fallback_req,
683 					      req->base.flags,
684 					      req->base.complete,
685 					      req->base.data);
686 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
687 					   req->dst, req->cryptlen, req->iv);
688 		return crypto_skcipher_encrypt(&rctx->fallback_req);
689 	}
690 
691 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
692 }
693 
694 static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
695 {
696 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
697 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
698 		crypto_skcipher_reqtfm(req));
699 
700 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
701 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
702 		skcipher_request_set_callback(&rctx->fallback_req,
703 					      req->base.flags,
704 					      req->base.complete,
705 					      req->base.data);
706 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
707 					   req->dst, req->cryptlen, req->iv);
708 		return crypto_skcipher_decrypt(&rctx->fallback_req);
709 	}
710 
711 	return sahara_aes_crypt(req, 0);
712 }
713 
714 static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
715 {
716 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
717 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
718 		crypto_skcipher_reqtfm(req));
719 
720 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
721 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
722 		skcipher_request_set_callback(&rctx->fallback_req,
723 					      req->base.flags,
724 					      req->base.complete,
725 					      req->base.data);
726 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
727 					   req->dst, req->cryptlen, req->iv);
728 		return crypto_skcipher_encrypt(&rctx->fallback_req);
729 	}
730 
731 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
732 }
733 
734 static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
735 {
736 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
737 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
738 		crypto_skcipher_reqtfm(req));
739 
740 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
741 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
742 		skcipher_request_set_callback(&rctx->fallback_req,
743 					      req->base.flags,
744 					      req->base.complete,
745 					      req->base.data);
746 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
747 					   req->dst, req->cryptlen, req->iv);
748 		return crypto_skcipher_decrypt(&rctx->fallback_req);
749 	}
750 
751 	return sahara_aes_crypt(req, FLAGS_CBC);
752 }
753 
754 static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
755 {
756 	const char *name = crypto_tfm_alg_name(&tfm->base);
757 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
758 
759 	ctx->fallback = crypto_alloc_skcipher(name, 0,
760 					      CRYPTO_ALG_NEED_FALLBACK);
761 	if (IS_ERR(ctx->fallback)) {
762 		pr_err("Error allocating fallback algo %s\n", name);
763 		return PTR_ERR(ctx->fallback);
764 	}
765 
766 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
767 					 crypto_skcipher_reqsize(ctx->fallback));
768 
769 	return 0;
770 }
771 
772 static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
773 {
774 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
775 
776 	crypto_free_skcipher(ctx->fallback);
777 }
778 
779 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
780 			      struct sahara_sha_reqctx *rctx)
781 {
782 	u32 hdr = 0;
783 
784 	hdr = rctx->mode;
785 
786 	if (rctx->first) {
787 		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
788 		hdr |= SAHARA_HDR_MDHA_INIT;
789 	} else {
790 		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
791 	}
792 
793 	if (rctx->last)
794 		hdr |= SAHARA_HDR_MDHA_PDATA;
795 
796 	if (hweight_long(hdr) % 2 == 0)
797 		hdr |= SAHARA_HDR_PARITY_BIT;
798 
799 	return hdr;
800 }
801 
802 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
803 				       struct sahara_sha_reqctx *rctx,
804 				       int start)
805 {
806 	struct scatterlist *sg;
807 	unsigned int i;
808 	int ret;
809 
810 	dev->in_sg = rctx->in_sg;
811 
812 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
813 	if (dev->nb_in_sg < 0) {
814 		dev_err(dev->device, "Invalid numbers of src SG.\n");
815 		return dev->nb_in_sg;
816 	}
817 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
818 		dev_err(dev->device, "not enough hw links (%d)\n",
819 			dev->nb_in_sg + dev->nb_out_sg);
820 		return -EINVAL;
821 	}
822 
823 	sg = dev->in_sg;
824 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
825 	if (!ret)
826 		return -EFAULT;
827 
828 	for (i = start; i < dev->nb_in_sg + start; i++) {
829 		dev->hw_link[i]->len = sg->length;
830 		dev->hw_link[i]->p = sg->dma_address;
831 		if (i == (dev->nb_in_sg + start - 1)) {
832 			dev->hw_link[i]->next = 0;
833 		} else {
834 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
835 			sg = sg_next(sg);
836 		}
837 	}
838 
839 	return i;
840 }
841 
842 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
843 						struct sahara_sha_reqctx *rctx,
844 						struct ahash_request *req,
845 						int index)
846 {
847 	unsigned result_len;
848 	int i = index;
849 
850 	if (rctx->first)
851 		/* Create initial descriptor: #8*/
852 		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
853 	else
854 		/* Create hash descriptor: #10. Must follow #6. */
855 		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
856 
857 	dev->hw_desc[index]->len1 = rctx->total;
858 	if (dev->hw_desc[index]->len1 == 0) {
859 		/* if len1 is 0, p1 must be 0, too */
860 		dev->hw_desc[index]->p1 = 0;
861 		rctx->sg_in_idx = 0;
862 	} else {
863 		/* Create input links */
864 		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
865 		i = sahara_sha_hw_links_create(dev, rctx, index);
866 
867 		rctx->sg_in_idx = index;
868 		if (i < 0)
869 			return i;
870 	}
871 
872 	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
873 
874 	/* Save the context for the next operation */
875 	result_len = rctx->context_size;
876 	dev->hw_link[i]->p = dev->context_phys_base;
877 
878 	dev->hw_link[i]->len = result_len;
879 	dev->hw_desc[index]->len2 = result_len;
880 
881 	dev->hw_link[i]->next = 0;
882 
883 	return 0;
884 }
885 
886 /*
887  * Load descriptor aka #6
888  *
889  * To load a previously saved context back to the MDHA unit
890  *
891  * p1: Saved Context
892  * p2: NULL
893  *
894  */
895 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
896 						struct sahara_sha_reqctx *rctx,
897 						struct ahash_request *req,
898 						int index)
899 {
900 	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
901 
902 	dev->hw_desc[index]->len1 = rctx->context_size;
903 	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
904 	dev->hw_desc[index]->len2 = 0;
905 	dev->hw_desc[index]->p2 = 0;
906 
907 	dev->hw_link[index]->len = rctx->context_size;
908 	dev->hw_link[index]->p = dev->context_phys_base;
909 	dev->hw_link[index]->next = 0;
910 
911 	return 0;
912 }
913 
914 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
915 {
916 	if (!sg || !sg->length)
917 		return nbytes;
918 
919 	while (nbytes && sg) {
920 		if (nbytes <= sg->length) {
921 			sg->length = nbytes;
922 			sg_mark_end(sg);
923 			break;
924 		}
925 		nbytes -= sg->length;
926 		sg = sg_next(sg);
927 	}
928 
929 	return nbytes;
930 }
931 
932 static int sahara_sha_prepare_request(struct ahash_request *req)
933 {
934 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
935 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
936 	unsigned int hash_later;
937 	unsigned int block_size;
938 	unsigned int len;
939 
940 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
941 
942 	/* append bytes from previous operation */
943 	len = rctx->buf_cnt + req->nbytes;
944 
945 	/* only the last transfer can be padded in hardware */
946 	if (!rctx->last && (len < block_size)) {
947 		/* to few data, save for next operation */
948 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
949 					 0, req->nbytes, 0);
950 		rctx->buf_cnt += req->nbytes;
951 
952 		return 0;
953 	}
954 
955 	/* add data from previous operation first */
956 	if (rctx->buf_cnt)
957 		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
958 
959 	/* data must always be a multiple of block_size */
960 	hash_later = rctx->last ? 0 : len & (block_size - 1);
961 	if (hash_later) {
962 		unsigned int offset = req->nbytes - hash_later;
963 		/* Save remaining bytes for later use */
964 		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
965 					hash_later, 0);
966 	}
967 
968 	/* nbytes should now be multiple of blocksize */
969 	req->nbytes = req->nbytes - hash_later;
970 
971 	sahara_walk_and_recalc(req->src, req->nbytes);
972 
973 	/* have data from previous operation and current */
974 	if (rctx->buf_cnt && req->nbytes) {
975 		sg_init_table(rctx->in_sg_chain, 2);
976 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
977 
978 		sg_chain(rctx->in_sg_chain, 2, req->src);
979 
980 		rctx->total = req->nbytes + rctx->buf_cnt;
981 		rctx->in_sg = rctx->in_sg_chain;
982 
983 		req->src = rctx->in_sg_chain;
984 	/* only data from previous operation */
985 	} else if (rctx->buf_cnt) {
986 		if (req->src)
987 			rctx->in_sg = req->src;
988 		else
989 			rctx->in_sg = rctx->in_sg_chain;
990 		/* buf was copied into rembuf above */
991 		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
992 		rctx->total = rctx->buf_cnt;
993 	/* no data from previous operation */
994 	} else {
995 		rctx->in_sg = req->src;
996 		rctx->total = req->nbytes;
997 		req->src = rctx->in_sg;
998 	}
999 
1000 	/* on next call, we only have the remaining data in the buffer */
1001 	rctx->buf_cnt = hash_later;
1002 
1003 	return -EINPROGRESS;
1004 }
1005 
1006 static int sahara_sha_process(struct ahash_request *req)
1007 {
1008 	struct sahara_dev *dev = dev_ptr;
1009 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1010 	int ret;
1011 	unsigned long timeout;
1012 
1013 	ret = sahara_sha_prepare_request(req);
1014 	if (!ret)
1015 		return ret;
1016 
1017 	if (rctx->first) {
1018 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1019 		dev->hw_desc[0]->next = 0;
1020 		rctx->first = 0;
1021 	} else {
1022 		memcpy(dev->context_base, rctx->context, rctx->context_size);
1023 
1024 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1025 		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1026 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1027 		dev->hw_desc[1]->next = 0;
1028 	}
1029 
1030 	sahara_dump_descriptors(dev);
1031 	sahara_dump_links(dev);
1032 
1033 	reinit_completion(&dev->dma_completion);
1034 
1035 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1036 
1037 	timeout = wait_for_completion_timeout(&dev->dma_completion,
1038 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1039 	if (!timeout) {
1040 		dev_err(dev->device, "SHA timeout\n");
1041 		return -ETIMEDOUT;
1042 	}
1043 
1044 	if (rctx->sg_in_idx)
1045 		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1046 			     DMA_TO_DEVICE);
1047 
1048 	memcpy(rctx->context, dev->context_base, rctx->context_size);
1049 
1050 	if (req->result && rctx->last)
1051 		memcpy(req->result, rctx->context, rctx->digest_size);
1052 
1053 	return 0;
1054 }
1055 
1056 static int sahara_queue_manage(void *data)
1057 {
1058 	struct sahara_dev *dev = data;
1059 	struct crypto_async_request *async_req;
1060 	struct crypto_async_request *backlog;
1061 	int ret = 0;
1062 
1063 	do {
1064 		__set_current_state(TASK_INTERRUPTIBLE);
1065 
1066 		spin_lock_bh(&dev->queue_spinlock);
1067 		backlog = crypto_get_backlog(&dev->queue);
1068 		async_req = crypto_dequeue_request(&dev->queue);
1069 		spin_unlock_bh(&dev->queue_spinlock);
1070 
1071 		if (backlog)
1072 			crypto_request_complete(backlog, -EINPROGRESS);
1073 
1074 		if (async_req) {
1075 			if (crypto_tfm_alg_type(async_req->tfm) ==
1076 			    CRYPTO_ALG_TYPE_AHASH) {
1077 				struct ahash_request *req =
1078 					ahash_request_cast(async_req);
1079 
1080 				ret = sahara_sha_process(req);
1081 			} else {
1082 				struct skcipher_request *req =
1083 					skcipher_request_cast(async_req);
1084 
1085 				ret = sahara_aes_process(req);
1086 			}
1087 
1088 			crypto_request_complete(async_req, ret);
1089 
1090 			continue;
1091 		}
1092 
1093 		schedule();
1094 	} while (!kthread_should_stop());
1095 
1096 	return 0;
1097 }
1098 
1099 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1100 {
1101 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1102 	struct sahara_dev *dev = dev_ptr;
1103 	int ret;
1104 
1105 	if (!req->nbytes && !last)
1106 		return 0;
1107 
1108 	rctx->last = last;
1109 
1110 	if (!rctx->active) {
1111 		rctx->active = 1;
1112 		rctx->first = 1;
1113 	}
1114 
1115 	spin_lock_bh(&dev->queue_spinlock);
1116 	ret = crypto_enqueue_request(&dev->queue, &req->base);
1117 	spin_unlock_bh(&dev->queue_spinlock);
1118 
1119 	wake_up_process(dev->kthread);
1120 
1121 	return ret;
1122 }
1123 
1124 static int sahara_sha_init(struct ahash_request *req)
1125 {
1126 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1127 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1128 
1129 	memset(rctx, 0, sizeof(*rctx));
1130 
1131 	switch (crypto_ahash_digestsize(tfm)) {
1132 	case SHA1_DIGEST_SIZE:
1133 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1134 		rctx->digest_size = SHA1_DIGEST_SIZE;
1135 		break;
1136 	case SHA256_DIGEST_SIZE:
1137 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1138 		rctx->digest_size = SHA256_DIGEST_SIZE;
1139 		break;
1140 	default:
1141 		return -EINVAL;
1142 	}
1143 
1144 	rctx->context_size = rctx->digest_size + 4;
1145 	rctx->active = 0;
1146 
1147 	return 0;
1148 }
1149 
1150 static int sahara_sha_update(struct ahash_request *req)
1151 {
1152 	return sahara_sha_enqueue(req, 0);
1153 }
1154 
1155 static int sahara_sha_final(struct ahash_request *req)
1156 {
1157 	req->nbytes = 0;
1158 	return sahara_sha_enqueue(req, 1);
1159 }
1160 
1161 static int sahara_sha_finup(struct ahash_request *req)
1162 {
1163 	return sahara_sha_enqueue(req, 1);
1164 }
1165 
1166 static int sahara_sha_digest(struct ahash_request *req)
1167 {
1168 	sahara_sha_init(req);
1169 
1170 	return sahara_sha_finup(req);
1171 }
1172 
1173 static int sahara_sha_export(struct ahash_request *req, void *out)
1174 {
1175 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1176 
1177 	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1178 
1179 	return 0;
1180 }
1181 
1182 static int sahara_sha_import(struct ahash_request *req, const void *in)
1183 {
1184 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1185 
1186 	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1187 
1188 	return 0;
1189 }
1190 
1191 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1192 {
1193 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1194 				 sizeof(struct sahara_sha_reqctx) +
1195 				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1196 
1197 	return 0;
1198 }
1199 
1200 static struct skcipher_alg aes_algs[] = {
1201 {
1202 	.base.cra_name		= "ecb(aes)",
1203 	.base.cra_driver_name	= "sahara-ecb-aes",
1204 	.base.cra_priority	= 300,
1205 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1206 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1207 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1208 	.base.cra_alignmask	= 0x0,
1209 	.base.cra_module	= THIS_MODULE,
1210 
1211 	.init			= sahara_aes_init_tfm,
1212 	.exit			= sahara_aes_exit_tfm,
1213 	.min_keysize		= AES_MIN_KEY_SIZE ,
1214 	.max_keysize		= AES_MAX_KEY_SIZE,
1215 	.setkey			= sahara_aes_setkey,
1216 	.encrypt		= sahara_aes_ecb_encrypt,
1217 	.decrypt		= sahara_aes_ecb_decrypt,
1218 }, {
1219 	.base.cra_name		= "cbc(aes)",
1220 	.base.cra_driver_name	= "sahara-cbc-aes",
1221 	.base.cra_priority	= 300,
1222 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1223 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1224 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1225 	.base.cra_alignmask	= 0x0,
1226 	.base.cra_module	= THIS_MODULE,
1227 
1228 	.init			= sahara_aes_init_tfm,
1229 	.exit			= sahara_aes_exit_tfm,
1230 	.min_keysize		= AES_MIN_KEY_SIZE ,
1231 	.max_keysize		= AES_MAX_KEY_SIZE,
1232 	.ivsize			= AES_BLOCK_SIZE,
1233 	.setkey			= sahara_aes_setkey,
1234 	.encrypt		= sahara_aes_cbc_encrypt,
1235 	.decrypt		= sahara_aes_cbc_decrypt,
1236 }
1237 };
1238 
1239 static struct ahash_alg sha_v3_algs[] = {
1240 {
1241 	.init		= sahara_sha_init,
1242 	.update		= sahara_sha_update,
1243 	.final		= sahara_sha_final,
1244 	.finup		= sahara_sha_finup,
1245 	.digest		= sahara_sha_digest,
1246 	.export		= sahara_sha_export,
1247 	.import		= sahara_sha_import,
1248 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1249 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1250 	.halg.base	= {
1251 		.cra_name		= "sha1",
1252 		.cra_driver_name	= "sahara-sha1",
1253 		.cra_priority		= 300,
1254 		.cra_flags		= CRYPTO_ALG_ASYNC |
1255 						CRYPTO_ALG_NEED_FALLBACK,
1256 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1257 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1258 		.cra_alignmask		= 0,
1259 		.cra_module		= THIS_MODULE,
1260 		.cra_init		= sahara_sha_cra_init,
1261 	}
1262 },
1263 };
1264 
1265 static struct ahash_alg sha_v4_algs[] = {
1266 {
1267 	.init		= sahara_sha_init,
1268 	.update		= sahara_sha_update,
1269 	.final		= sahara_sha_final,
1270 	.finup		= sahara_sha_finup,
1271 	.digest		= sahara_sha_digest,
1272 	.export		= sahara_sha_export,
1273 	.import		= sahara_sha_import,
1274 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1275 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1276 	.halg.base	= {
1277 		.cra_name		= "sha256",
1278 		.cra_driver_name	= "sahara-sha256",
1279 		.cra_priority		= 300,
1280 		.cra_flags		= CRYPTO_ALG_ASYNC |
1281 						CRYPTO_ALG_NEED_FALLBACK,
1282 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1283 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1284 		.cra_alignmask		= 0,
1285 		.cra_module		= THIS_MODULE,
1286 		.cra_init		= sahara_sha_cra_init,
1287 	}
1288 },
1289 };
1290 
1291 static irqreturn_t sahara_irq_handler(int irq, void *data)
1292 {
1293 	struct sahara_dev *dev = data;
1294 	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1295 	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1296 
1297 	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1298 		     SAHARA_REG_CMD);
1299 
1300 	sahara_decode_status(dev, stat);
1301 
1302 	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1303 		return IRQ_NONE;
1304 	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1305 		dev->error = 0;
1306 	} else {
1307 		sahara_decode_error(dev, err);
1308 		dev->error = -EINVAL;
1309 	}
1310 
1311 	complete(&dev->dma_completion);
1312 
1313 	return IRQ_HANDLED;
1314 }
1315 
1316 
1317 static int sahara_register_algs(struct sahara_dev *dev)
1318 {
1319 	int err;
1320 	unsigned int i, j, k, l;
1321 
1322 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1323 		err = crypto_register_skcipher(&aes_algs[i]);
1324 		if (err)
1325 			goto err_aes_algs;
1326 	}
1327 
1328 	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1329 		err = crypto_register_ahash(&sha_v3_algs[k]);
1330 		if (err)
1331 			goto err_sha_v3_algs;
1332 	}
1333 
1334 	if (dev->version > SAHARA_VERSION_3)
1335 		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1336 			err = crypto_register_ahash(&sha_v4_algs[l]);
1337 			if (err)
1338 				goto err_sha_v4_algs;
1339 		}
1340 
1341 	return 0;
1342 
1343 err_sha_v4_algs:
1344 	for (j = 0; j < l; j++)
1345 		crypto_unregister_ahash(&sha_v4_algs[j]);
1346 
1347 err_sha_v3_algs:
1348 	for (j = 0; j < k; j++)
1349 		crypto_unregister_ahash(&sha_v3_algs[j]);
1350 
1351 err_aes_algs:
1352 	for (j = 0; j < i; j++)
1353 		crypto_unregister_skcipher(&aes_algs[j]);
1354 
1355 	return err;
1356 }
1357 
1358 static void sahara_unregister_algs(struct sahara_dev *dev)
1359 {
1360 	unsigned int i;
1361 
1362 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1363 		crypto_unregister_skcipher(&aes_algs[i]);
1364 
1365 	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1366 		crypto_unregister_ahash(&sha_v3_algs[i]);
1367 
1368 	if (dev->version > SAHARA_VERSION_3)
1369 		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1370 			crypto_unregister_ahash(&sha_v4_algs[i]);
1371 }
1372 
1373 static const struct of_device_id sahara_dt_ids[] = {
1374 	{ .compatible = "fsl,imx53-sahara" },
1375 	{ .compatible = "fsl,imx27-sahara" },
1376 	{ /* sentinel */ }
1377 };
1378 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1379 
1380 static int sahara_probe(struct platform_device *pdev)
1381 {
1382 	struct sahara_dev *dev;
1383 	u32 version;
1384 	int irq;
1385 	int err;
1386 	int i;
1387 
1388 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1389 	if (!dev)
1390 		return -ENOMEM;
1391 
1392 	dev->device = &pdev->dev;
1393 	platform_set_drvdata(pdev, dev);
1394 
1395 	/* Get the base address */
1396 	dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1397 	if (IS_ERR(dev->regs_base))
1398 		return PTR_ERR(dev->regs_base);
1399 
1400 	/* Get the IRQ */
1401 	irq = platform_get_irq(pdev,  0);
1402 	if (irq < 0)
1403 		return irq;
1404 
1405 	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1406 			       0, dev_name(&pdev->dev), dev);
1407 	if (err) {
1408 		dev_err(&pdev->dev, "failed to request irq\n");
1409 		return err;
1410 	}
1411 
1412 	/* clocks */
1413 	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1414 	if (IS_ERR(dev->clk_ipg)) {
1415 		dev_err(&pdev->dev, "Could not get ipg clock\n");
1416 		return PTR_ERR(dev->clk_ipg);
1417 	}
1418 
1419 	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1420 	if (IS_ERR(dev->clk_ahb)) {
1421 		dev_err(&pdev->dev, "Could not get ahb clock\n");
1422 		return PTR_ERR(dev->clk_ahb);
1423 	}
1424 
1425 	/* Allocate HW descriptors */
1426 	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1427 			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1428 			&dev->hw_phys_desc[0], GFP_KERNEL);
1429 	if (!dev->hw_desc[0]) {
1430 		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1431 		return -ENOMEM;
1432 	}
1433 	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1434 	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1435 				sizeof(struct sahara_hw_desc);
1436 
1437 	/* Allocate space for iv and key */
1438 	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1439 				&dev->key_phys_base, GFP_KERNEL);
1440 	if (!dev->key_base) {
1441 		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1442 		return -ENOMEM;
1443 	}
1444 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1445 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1446 
1447 	/* Allocate space for context: largest digest + message length field */
1448 	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1449 					SHA256_DIGEST_SIZE + 4,
1450 					&dev->context_phys_base, GFP_KERNEL);
1451 	if (!dev->context_base) {
1452 		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1453 		return -ENOMEM;
1454 	}
1455 
1456 	/* Allocate space for HW links */
1457 	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1458 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1459 			&dev->hw_phys_link[0], GFP_KERNEL);
1460 	if (!dev->hw_link[0]) {
1461 		dev_err(&pdev->dev, "Could not allocate hw links\n");
1462 		return -ENOMEM;
1463 	}
1464 	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1465 		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1466 					sizeof(struct sahara_hw_link);
1467 		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1468 	}
1469 
1470 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1471 
1472 	spin_lock_init(&dev->queue_spinlock);
1473 
1474 	dev_ptr = dev;
1475 
1476 	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1477 	if (IS_ERR(dev->kthread)) {
1478 		return PTR_ERR(dev->kthread);
1479 	}
1480 
1481 	init_completion(&dev->dma_completion);
1482 
1483 	err = clk_prepare_enable(dev->clk_ipg);
1484 	if (err)
1485 		return err;
1486 	err = clk_prepare_enable(dev->clk_ahb);
1487 	if (err)
1488 		goto clk_ipg_disable;
1489 
1490 	version = sahara_read(dev, SAHARA_REG_VERSION);
1491 	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1492 		if (version != SAHARA_VERSION_3)
1493 			err = -ENODEV;
1494 	} else if (of_device_is_compatible(pdev->dev.of_node,
1495 			"fsl,imx53-sahara")) {
1496 		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1497 			err = -ENODEV;
1498 		version = (version >> 8) & 0xff;
1499 	}
1500 	if (err == -ENODEV) {
1501 		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1502 				version);
1503 		goto err_algs;
1504 	}
1505 
1506 	dev->version = version;
1507 
1508 	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1509 		     SAHARA_REG_CMD);
1510 	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1511 			SAHARA_CONTROL_SET_MAXBURST(8) |
1512 			SAHARA_CONTROL_RNG_AUTORSD |
1513 			SAHARA_CONTROL_ENABLE_INT,
1514 			SAHARA_REG_CONTROL);
1515 
1516 	err = sahara_register_algs(dev);
1517 	if (err)
1518 		goto err_algs;
1519 
1520 	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1521 
1522 	return 0;
1523 
1524 err_algs:
1525 	kthread_stop(dev->kthread);
1526 	dev_ptr = NULL;
1527 	clk_disable_unprepare(dev->clk_ahb);
1528 clk_ipg_disable:
1529 	clk_disable_unprepare(dev->clk_ipg);
1530 
1531 	return err;
1532 }
1533 
1534 static int sahara_remove(struct platform_device *pdev)
1535 {
1536 	struct sahara_dev *dev = platform_get_drvdata(pdev);
1537 
1538 	kthread_stop(dev->kthread);
1539 
1540 	sahara_unregister_algs(dev);
1541 
1542 	clk_disable_unprepare(dev->clk_ipg);
1543 	clk_disable_unprepare(dev->clk_ahb);
1544 
1545 	dev_ptr = NULL;
1546 
1547 	return 0;
1548 }
1549 
1550 static struct platform_driver sahara_driver = {
1551 	.probe		= sahara_probe,
1552 	.remove		= sahara_remove,
1553 	.driver		= {
1554 		.name	= SAHARA_NAME,
1555 		.of_match_table = sahara_dt_ids,
1556 	},
1557 };
1558 
1559 module_platform_driver(sahara_driver);
1560 
1561 MODULE_LICENSE("GPL");
1562 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1563 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1564 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1565