xref: /openbmc/linux/drivers/crypto/sahara.c (revision 4167eb94)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for SAHARA cryptographic accelerator.
6  *
7  * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8  * Copyright (c) 2013 Vista Silicon S.L.
9  * Author: Javier Martin <javier.martin@vista-silicon.com>
10  *
11  * Based on omap-aes.c and tegra-aes.c
12  */
13 
14 #include <crypto/aes.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/sha1.h>
19 #include <crypto/sha2.h>
20 
21 #include <linux/clk.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/irq.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/module.h>
29 #include <linux/of.h>
30 #include <linux/platform_device.h>
31 #include <linux/spinlock.h>
32 
33 #define SHA_BUFFER_LEN		PAGE_SIZE
34 #define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
35 
36 #define SAHARA_NAME "sahara"
37 #define SAHARA_VERSION_3	3
38 #define SAHARA_VERSION_4	4
39 #define SAHARA_TIMEOUT_MS	1000
40 #define SAHARA_MAX_HW_DESC	2
41 #define SAHARA_MAX_HW_LINK	20
42 
43 #define FLAGS_MODE_MASK		0x000f
44 #define FLAGS_ENCRYPT		BIT(0)
45 #define FLAGS_CBC		BIT(1)
46 
47 #define SAHARA_HDR_BASE			0x00800000
48 #define SAHARA_HDR_SKHA_ALG_AES	0
49 #define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
50 #define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
51 #define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
52 #define SAHARA_HDR_FORM_DATA		(5 << 16)
53 #define SAHARA_HDR_FORM_KEY		(8 << 16)
54 #define SAHARA_HDR_LLO			(1 << 24)
55 #define SAHARA_HDR_CHA_SKHA		(1 << 28)
56 #define SAHARA_HDR_CHA_MDHA		(2 << 28)
57 #define SAHARA_HDR_PARITY_BIT		(1 << 31)
58 
59 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
60 #define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
61 #define SAHARA_HDR_MDHA_HASH		0xA0850000
62 #define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
63 #define SAHARA_HDR_MDHA_ALG_SHA1	0
64 #define SAHARA_HDR_MDHA_ALG_MD5		1
65 #define SAHARA_HDR_MDHA_ALG_SHA256	2
66 #define SAHARA_HDR_MDHA_ALG_SHA224	3
67 #define SAHARA_HDR_MDHA_PDATA		(1 << 2)
68 #define SAHARA_HDR_MDHA_HMAC		(1 << 3)
69 #define SAHARA_HDR_MDHA_INIT		(1 << 5)
70 #define SAHARA_HDR_MDHA_IPAD		(1 << 6)
71 #define SAHARA_HDR_MDHA_OPAD		(1 << 7)
72 #define SAHARA_HDR_MDHA_SWAP		(1 << 8)
73 #define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
74 #define SAHARA_HDR_MDHA_SSL		(1 << 10)
75 
76 /* SAHARA can only process one request at a time */
77 #define SAHARA_QUEUE_LENGTH	1
78 
79 #define SAHARA_REG_VERSION	0x00
80 #define SAHARA_REG_DAR		0x04
81 #define SAHARA_REG_CONTROL	0x08
82 #define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
83 #define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
84 #define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
85 #define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
86 #define SAHARA_REG_CMD		0x0C
87 #define		SAHARA_CMD_RESET		(1 << 0)
88 #define		SAHARA_CMD_CLEAR_INT		(1 << 8)
89 #define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
90 #define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
91 #define		SAHARA_CMD_MODE_BATCH		(1 << 16)
92 #define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
93 #define	SAHARA_REG_STATUS	0x10
94 #define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
95 #define			SAHARA_STATE_IDLE	0
96 #define			SAHARA_STATE_BUSY	1
97 #define			SAHARA_STATE_ERR	2
98 #define			SAHARA_STATE_FAULT	3
99 #define			SAHARA_STATE_COMPLETE	4
100 #define			SAHARA_STATE_COMP_FLAG	(1 << 2)
101 #define		SAHARA_STATUS_DAR_FULL		(1 << 3)
102 #define		SAHARA_STATUS_ERROR		(1 << 4)
103 #define		SAHARA_STATUS_SECURE		(1 << 5)
104 #define		SAHARA_STATUS_FAIL		(1 << 6)
105 #define		SAHARA_STATUS_INIT		(1 << 7)
106 #define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
107 #define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
108 #define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
109 #define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
110 #define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
111 #define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
112 #define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
113 #define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
114 #define SAHARA_REG_ERRSTATUS	0x14
115 #define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
116 #define			SAHARA_ERRSOURCE_CHA	14
117 #define			SAHARA_ERRSOURCE_DMA	15
118 #define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
119 #define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
120 #define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
121 #define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
122 #define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
123 #define SAHARA_REG_FADDR	0x18
124 #define SAHARA_REG_CDAR		0x1C
125 #define SAHARA_REG_IDAR		0x20
126 
127 struct sahara_hw_desc {
128 	u32	hdr;
129 	u32	len1;
130 	u32	p1;
131 	u32	len2;
132 	u32	p2;
133 	u32	next;
134 };
135 
136 struct sahara_hw_link {
137 	u32	len;
138 	u32	p;
139 	u32	next;
140 };
141 
142 struct sahara_ctx {
143 	/* AES-specific context */
144 	int keylen;
145 	u8 key[AES_KEYSIZE_128];
146 	struct crypto_skcipher *fallback;
147 };
148 
149 struct sahara_aes_reqctx {
150 	unsigned long mode;
151 	u8 iv_out[AES_BLOCK_SIZE];
152 	struct skcipher_request fallback_req;	// keep at the end
153 };
154 
155 /*
156  * struct sahara_sha_reqctx - private data per request
157  * @buf: holds data for requests smaller than block_size
158  * @rembuf: used to prepare one block_size-aligned request
159  * @context: hw-specific context for request. Digest is extracted from this
160  * @mode: specifies what type of hw-descriptor needs to be built
161  * @digest_size: length of digest for this request
162  * @context_size: length of hw-context for this request.
163  *                Always digest_size + 4
164  * @buf_cnt: number of bytes saved in buf
165  * @sg_in_idx: number of hw links
166  * @in_sg: scatterlist for input data
167  * @in_sg_chain: scatterlists for chained input data
168  * @total: total number of bytes for transfer
169  * @last: is this the last block
170  * @first: is this the first block
171  * @active: inside a transfer
172  */
173 struct sahara_sha_reqctx {
174 	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
175 	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
176 	u8			context[SHA256_DIGEST_SIZE + 4];
177 	unsigned int		mode;
178 	unsigned int		digest_size;
179 	unsigned int		context_size;
180 	unsigned int		buf_cnt;
181 	unsigned int		sg_in_idx;
182 	struct scatterlist	*in_sg;
183 	struct scatterlist	in_sg_chain[2];
184 	size_t			total;
185 	unsigned int		last;
186 	unsigned int		first;
187 	unsigned int		active;
188 };
189 
190 struct sahara_dev {
191 	struct device		*device;
192 	unsigned int		version;
193 	void __iomem		*regs_base;
194 	struct clk		*clk_ipg;
195 	struct clk		*clk_ahb;
196 	spinlock_t		queue_spinlock;
197 	struct task_struct	*kthread;
198 	struct completion	dma_completion;
199 
200 	struct sahara_ctx	*ctx;
201 	struct crypto_queue	queue;
202 	unsigned long		flags;
203 
204 	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
205 	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
206 
207 	u8			*key_base;
208 	dma_addr_t		key_phys_base;
209 
210 	u8			*iv_base;
211 	dma_addr_t		iv_phys_base;
212 
213 	u8			*context_base;
214 	dma_addr_t		context_phys_base;
215 
216 	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
217 	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
218 
219 	size_t			total;
220 	struct scatterlist	*in_sg;
221 	int		nb_in_sg;
222 	struct scatterlist	*out_sg;
223 	int		nb_out_sg;
224 
225 	u32			error;
226 };
227 
228 static struct sahara_dev *dev_ptr;
229 
230 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
231 {
232 	writel(data, dev->regs_base + reg);
233 }
234 
235 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
236 {
237 	return readl(dev->regs_base + reg);
238 }
239 
240 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
241 {
242 	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
243 			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
244 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
245 
246 	if (dev->flags & FLAGS_CBC) {
247 		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
248 		hdr ^= SAHARA_HDR_PARITY_BIT;
249 	}
250 
251 	if (dev->flags & FLAGS_ENCRYPT) {
252 		hdr |= SAHARA_HDR_SKHA_OP_ENC;
253 		hdr ^= SAHARA_HDR_PARITY_BIT;
254 	}
255 
256 	return hdr;
257 }
258 
259 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
260 {
261 	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
262 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
263 }
264 
265 static const char *sahara_err_src[16] = {
266 	"No error",
267 	"Header error",
268 	"Descriptor length error",
269 	"Descriptor length or pointer error",
270 	"Link length error",
271 	"Link pointer error",
272 	"Input buffer error",
273 	"Output buffer error",
274 	"Output buffer starvation",
275 	"Internal state fault",
276 	"General descriptor problem",
277 	"Reserved",
278 	"Descriptor address error",
279 	"Link address error",
280 	"CHA error",
281 	"DMA error"
282 };
283 
284 static const char *sahara_err_dmasize[4] = {
285 	"Byte transfer",
286 	"Half-word transfer",
287 	"Word transfer",
288 	"Reserved"
289 };
290 
291 static const char *sahara_err_dmasrc[8] = {
292 	"No error",
293 	"AHB bus error",
294 	"Internal IP bus error",
295 	"Parity error",
296 	"DMA crosses 256 byte boundary",
297 	"DMA is busy",
298 	"Reserved",
299 	"DMA HW error"
300 };
301 
302 static const char *sahara_cha_errsrc[12] = {
303 	"Input buffer non-empty",
304 	"Illegal address",
305 	"Illegal mode",
306 	"Illegal data size",
307 	"Illegal key size",
308 	"Write during processing",
309 	"CTX read during processing",
310 	"HW error",
311 	"Input buffer disabled/underflow",
312 	"Output buffer disabled/overflow",
313 	"DES key parity error",
314 	"Reserved"
315 };
316 
317 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
318 
319 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
320 {
321 	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
322 	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
323 
324 	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
325 
326 	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
327 
328 	if (source == SAHARA_ERRSOURCE_DMA) {
329 		if (error & SAHARA_ERRSTATUS_DMA_DIR)
330 			dev_err(dev->device, "		* DMA read.\n");
331 		else
332 			dev_err(dev->device, "		* DMA write.\n");
333 
334 		dev_err(dev->device, "		* %s.\n",
335 		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
336 		dev_err(dev->device, "		* %s.\n",
337 		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
338 	} else if (source == SAHARA_ERRSOURCE_CHA) {
339 		dev_err(dev->device, "		* %s.\n",
340 			sahara_cha_errsrc[chasrc]);
341 		dev_err(dev->device, "		* %s.\n",
342 		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
343 	}
344 	dev_err(dev->device, "\n");
345 }
346 
347 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
348 
349 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
350 {
351 	u8 state;
352 
353 	if (!__is_defined(DEBUG))
354 		return;
355 
356 	state = SAHARA_STATUS_GET_STATE(status);
357 
358 	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
359 		__func__, status);
360 
361 	dev_dbg(dev->device, "	- State = %d:\n", state);
362 	if (state & SAHARA_STATE_COMP_FLAG)
363 		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
364 
365 	dev_dbg(dev->device, "		* %s.\n",
366 	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
367 
368 	if (status & SAHARA_STATUS_DAR_FULL)
369 		dev_dbg(dev->device, "	- DAR Full.\n");
370 	if (status & SAHARA_STATUS_ERROR)
371 		dev_dbg(dev->device, "	- Error.\n");
372 	if (status & SAHARA_STATUS_SECURE)
373 		dev_dbg(dev->device, "	- Secure.\n");
374 	if (status & SAHARA_STATUS_FAIL)
375 		dev_dbg(dev->device, "	- Fail.\n");
376 	if (status & SAHARA_STATUS_RNG_RESEED)
377 		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
378 	if (status & SAHARA_STATUS_ACTIVE_RNG)
379 		dev_dbg(dev->device, "	- RNG Active.\n");
380 	if (status & SAHARA_STATUS_ACTIVE_MDHA)
381 		dev_dbg(dev->device, "	- MDHA Active.\n");
382 	if (status & SAHARA_STATUS_ACTIVE_SKHA)
383 		dev_dbg(dev->device, "	- SKHA Active.\n");
384 
385 	if (status & SAHARA_STATUS_MODE_BATCH)
386 		dev_dbg(dev->device, "	- Batch Mode.\n");
387 	else if (status & SAHARA_STATUS_MODE_DEDICATED)
388 		dev_dbg(dev->device, "	- Dedicated Mode.\n");
389 	else if (status & SAHARA_STATUS_MODE_DEBUG)
390 		dev_dbg(dev->device, "	- Debug Mode.\n");
391 
392 	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
393 	       SAHARA_STATUS_GET_ISTATE(status));
394 
395 	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
396 		sahara_read(dev, SAHARA_REG_CDAR));
397 	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
398 		sahara_read(dev, SAHARA_REG_IDAR));
399 }
400 
401 static void sahara_dump_descriptors(struct sahara_dev *dev)
402 {
403 	int i;
404 
405 	if (!__is_defined(DEBUG))
406 		return;
407 
408 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
409 		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
410 			i, &dev->hw_phys_desc[i]);
411 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
412 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
413 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
414 		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
415 		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
416 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
417 			dev->hw_desc[i]->next);
418 	}
419 	dev_dbg(dev->device, "\n");
420 }
421 
422 static void sahara_dump_links(struct sahara_dev *dev)
423 {
424 	int i;
425 
426 	if (!__is_defined(DEBUG))
427 		return;
428 
429 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
430 		dev_dbg(dev->device, "Link (%d) (%pad):\n",
431 			i, &dev->hw_phys_link[i]);
432 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
433 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
434 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
435 			dev->hw_link[i]->next);
436 	}
437 	dev_dbg(dev->device, "\n");
438 }
439 
440 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
441 {
442 	struct sahara_ctx *ctx = dev->ctx;
443 	struct scatterlist *sg;
444 	int ret;
445 	int i, j;
446 	int idx = 0;
447 	u32 len;
448 
449 	memcpy(dev->key_base, ctx->key, ctx->keylen);
450 
451 	if (dev->flags & FLAGS_CBC) {
452 		dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
453 		dev->hw_desc[idx]->p1 = dev->iv_phys_base;
454 	} else {
455 		dev->hw_desc[idx]->len1 = 0;
456 		dev->hw_desc[idx]->p1 = 0;
457 	}
458 	dev->hw_desc[idx]->len2 = ctx->keylen;
459 	dev->hw_desc[idx]->p2 = dev->key_phys_base;
460 	dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
461 	dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
462 
463 	idx++;
464 
465 
466 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
467 	if (dev->nb_in_sg < 0) {
468 		dev_err(dev->device, "Invalid numbers of src SG.\n");
469 		return dev->nb_in_sg;
470 	}
471 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
472 	if (dev->nb_out_sg < 0) {
473 		dev_err(dev->device, "Invalid numbers of dst SG.\n");
474 		return dev->nb_out_sg;
475 	}
476 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
477 		dev_err(dev->device, "not enough hw links (%d)\n",
478 			dev->nb_in_sg + dev->nb_out_sg);
479 		return -EINVAL;
480 	}
481 
482 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
483 			 DMA_TO_DEVICE);
484 	if (!ret) {
485 		dev_err(dev->device, "couldn't map in sg\n");
486 		goto unmap_in;
487 	}
488 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
489 			 DMA_FROM_DEVICE);
490 	if (!ret) {
491 		dev_err(dev->device, "couldn't map out sg\n");
492 		goto unmap_out;
493 	}
494 
495 	/* Create input links */
496 	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
497 	sg = dev->in_sg;
498 	len = dev->total;
499 	for (i = 0; i < dev->nb_in_sg; i++) {
500 		dev->hw_link[i]->len = min(len, sg->length);
501 		dev->hw_link[i]->p = sg->dma_address;
502 		if (i == (dev->nb_in_sg - 1)) {
503 			dev->hw_link[i]->next = 0;
504 		} else {
505 			len -= min(len, sg->length);
506 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
507 			sg = sg_next(sg);
508 		}
509 	}
510 
511 	/* Create output links */
512 	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
513 	sg = dev->out_sg;
514 	len = dev->total;
515 	for (j = i; j < dev->nb_out_sg + i; j++) {
516 		dev->hw_link[j]->len = min(len, sg->length);
517 		dev->hw_link[j]->p = sg->dma_address;
518 		if (j == (dev->nb_out_sg + i - 1)) {
519 			dev->hw_link[j]->next = 0;
520 		} else {
521 			len -= min(len, sg->length);
522 			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
523 			sg = sg_next(sg);
524 		}
525 	}
526 
527 	/* Fill remaining fields of hw_desc[1] */
528 	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
529 	dev->hw_desc[idx]->len1 = dev->total;
530 	dev->hw_desc[idx]->len2 = dev->total;
531 	dev->hw_desc[idx]->next = 0;
532 
533 	sahara_dump_descriptors(dev);
534 	sahara_dump_links(dev);
535 
536 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
537 
538 	return 0;
539 
540 unmap_out:
541 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
542 		DMA_FROM_DEVICE);
543 unmap_in:
544 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
545 		DMA_TO_DEVICE);
546 
547 	return -EINVAL;
548 }
549 
550 static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
551 {
552 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
553 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
554 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
555 
556 	/* Update IV buffer to contain the last ciphertext block */
557 	if (rctx->mode & FLAGS_ENCRYPT) {
558 		sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
559 				   ivsize, req->cryptlen - ivsize);
560 	} else {
561 		memcpy(req->iv, rctx->iv_out, ivsize);
562 	}
563 }
564 
565 static int sahara_aes_process(struct skcipher_request *req)
566 {
567 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
568 	struct sahara_dev *dev = dev_ptr;
569 	struct sahara_ctx *ctx;
570 	struct sahara_aes_reqctx *rctx;
571 	int ret;
572 	unsigned long timeout;
573 
574 	/* Request is ready to be dispatched by the device */
575 	dev_dbg(dev->device,
576 		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
577 		req->cryptlen, req->src, req->dst);
578 
579 	/* assign new request to device */
580 	dev->total = req->cryptlen;
581 	dev->in_sg = req->src;
582 	dev->out_sg = req->dst;
583 
584 	rctx = skcipher_request_ctx(req);
585 	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
586 	rctx->mode &= FLAGS_MODE_MASK;
587 	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
588 
589 	if ((dev->flags & FLAGS_CBC) && req->iv) {
590 		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
591 
592 		memcpy(dev->iv_base, req->iv, ivsize);
593 
594 		if (!(dev->flags & FLAGS_ENCRYPT)) {
595 			sg_pcopy_to_buffer(req->src, sg_nents(req->src),
596 					   rctx->iv_out, ivsize,
597 					   req->cryptlen - ivsize);
598 		}
599 	}
600 
601 	/* assign new context to device */
602 	dev->ctx = ctx;
603 
604 	reinit_completion(&dev->dma_completion);
605 
606 	ret = sahara_hw_descriptor_create(dev);
607 	if (ret)
608 		return -EINVAL;
609 
610 	timeout = wait_for_completion_timeout(&dev->dma_completion,
611 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
612 	if (!timeout) {
613 		dev_err(dev->device, "AES timeout\n");
614 		return -ETIMEDOUT;
615 	}
616 
617 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
618 		DMA_FROM_DEVICE);
619 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
620 		DMA_TO_DEVICE);
621 
622 	if ((dev->flags & FLAGS_CBC) && req->iv)
623 		sahara_aes_cbc_update_iv(req);
624 
625 	return 0;
626 }
627 
628 static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
629 			     unsigned int keylen)
630 {
631 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
632 
633 	ctx->keylen = keylen;
634 
635 	/* SAHARA only supports 128bit keys */
636 	if (keylen == AES_KEYSIZE_128) {
637 		memcpy(ctx->key, key, keylen);
638 		return 0;
639 	}
640 
641 	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
642 		return -EINVAL;
643 
644 	/*
645 	 * The requested key size is not supported by HW, do a fallback.
646 	 */
647 	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
648 	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
649 						 CRYPTO_TFM_REQ_MASK);
650 	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
651 }
652 
653 static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
654 {
655 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
656 	struct sahara_dev *dev = dev_ptr;
657 	int err = 0;
658 
659 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
660 		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
661 
662 	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
663 		dev_err(dev->device,
664 			"request size is not exact amount of AES blocks\n");
665 		return -EINVAL;
666 	}
667 
668 	rctx->mode = mode;
669 
670 	spin_lock_bh(&dev->queue_spinlock);
671 	err = crypto_enqueue_request(&dev->queue, &req->base);
672 	spin_unlock_bh(&dev->queue_spinlock);
673 
674 	wake_up_process(dev->kthread);
675 
676 	return err;
677 }
678 
679 static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
680 {
681 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
682 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
683 		crypto_skcipher_reqtfm(req));
684 
685 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
686 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
687 		skcipher_request_set_callback(&rctx->fallback_req,
688 					      req->base.flags,
689 					      req->base.complete,
690 					      req->base.data);
691 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
692 					   req->dst, req->cryptlen, req->iv);
693 		return crypto_skcipher_encrypt(&rctx->fallback_req);
694 	}
695 
696 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
697 }
698 
699 static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
700 {
701 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
702 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
703 		crypto_skcipher_reqtfm(req));
704 
705 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
706 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
707 		skcipher_request_set_callback(&rctx->fallback_req,
708 					      req->base.flags,
709 					      req->base.complete,
710 					      req->base.data);
711 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
712 					   req->dst, req->cryptlen, req->iv);
713 		return crypto_skcipher_decrypt(&rctx->fallback_req);
714 	}
715 
716 	return sahara_aes_crypt(req, 0);
717 }
718 
719 static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
720 {
721 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
722 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
723 		crypto_skcipher_reqtfm(req));
724 
725 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
726 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
727 		skcipher_request_set_callback(&rctx->fallback_req,
728 					      req->base.flags,
729 					      req->base.complete,
730 					      req->base.data);
731 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
732 					   req->dst, req->cryptlen, req->iv);
733 		return crypto_skcipher_encrypt(&rctx->fallback_req);
734 	}
735 
736 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
737 }
738 
739 static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
740 {
741 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
742 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
743 		crypto_skcipher_reqtfm(req));
744 
745 	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
746 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
747 		skcipher_request_set_callback(&rctx->fallback_req,
748 					      req->base.flags,
749 					      req->base.complete,
750 					      req->base.data);
751 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
752 					   req->dst, req->cryptlen, req->iv);
753 		return crypto_skcipher_decrypt(&rctx->fallback_req);
754 	}
755 
756 	return sahara_aes_crypt(req, FLAGS_CBC);
757 }
758 
759 static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
760 {
761 	const char *name = crypto_tfm_alg_name(&tfm->base);
762 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
763 
764 	ctx->fallback = crypto_alloc_skcipher(name, 0,
765 					      CRYPTO_ALG_NEED_FALLBACK);
766 	if (IS_ERR(ctx->fallback)) {
767 		pr_err("Error allocating fallback algo %s\n", name);
768 		return PTR_ERR(ctx->fallback);
769 	}
770 
771 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
772 					 crypto_skcipher_reqsize(ctx->fallback));
773 
774 	return 0;
775 }
776 
777 static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
778 {
779 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
780 
781 	crypto_free_skcipher(ctx->fallback);
782 }
783 
784 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
785 			      struct sahara_sha_reqctx *rctx)
786 {
787 	u32 hdr = 0;
788 
789 	hdr = rctx->mode;
790 
791 	if (rctx->first) {
792 		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
793 		hdr |= SAHARA_HDR_MDHA_INIT;
794 	} else {
795 		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
796 	}
797 
798 	if (rctx->last)
799 		hdr |= SAHARA_HDR_MDHA_PDATA;
800 
801 	if (hweight_long(hdr) % 2 == 0)
802 		hdr |= SAHARA_HDR_PARITY_BIT;
803 
804 	return hdr;
805 }
806 
807 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
808 				       struct sahara_sha_reqctx *rctx,
809 				       int start)
810 {
811 	struct scatterlist *sg;
812 	unsigned int i;
813 	int ret;
814 
815 	dev->in_sg = rctx->in_sg;
816 
817 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
818 	if (dev->nb_in_sg < 0) {
819 		dev_err(dev->device, "Invalid numbers of src SG.\n");
820 		return dev->nb_in_sg;
821 	}
822 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
823 		dev_err(dev->device, "not enough hw links (%d)\n",
824 			dev->nb_in_sg + dev->nb_out_sg);
825 		return -EINVAL;
826 	}
827 
828 	sg = dev->in_sg;
829 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
830 	if (!ret)
831 		return -EFAULT;
832 
833 	for (i = start; i < dev->nb_in_sg + start; i++) {
834 		dev->hw_link[i]->len = sg->length;
835 		dev->hw_link[i]->p = sg->dma_address;
836 		if (i == (dev->nb_in_sg + start - 1)) {
837 			dev->hw_link[i]->next = 0;
838 		} else {
839 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
840 			sg = sg_next(sg);
841 		}
842 	}
843 
844 	return i;
845 }
846 
847 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
848 						struct sahara_sha_reqctx *rctx,
849 						struct ahash_request *req,
850 						int index)
851 {
852 	unsigned result_len;
853 	int i = index;
854 
855 	if (rctx->first)
856 		/* Create initial descriptor: #8*/
857 		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
858 	else
859 		/* Create hash descriptor: #10. Must follow #6. */
860 		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
861 
862 	dev->hw_desc[index]->len1 = rctx->total;
863 	if (dev->hw_desc[index]->len1 == 0) {
864 		/* if len1 is 0, p1 must be 0, too */
865 		dev->hw_desc[index]->p1 = 0;
866 		rctx->sg_in_idx = 0;
867 	} else {
868 		/* Create input links */
869 		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
870 		i = sahara_sha_hw_links_create(dev, rctx, index);
871 
872 		rctx->sg_in_idx = index;
873 		if (i < 0)
874 			return i;
875 	}
876 
877 	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
878 
879 	/* Save the context for the next operation */
880 	result_len = rctx->context_size;
881 	dev->hw_link[i]->p = dev->context_phys_base;
882 
883 	dev->hw_link[i]->len = result_len;
884 	dev->hw_desc[index]->len2 = result_len;
885 
886 	dev->hw_link[i]->next = 0;
887 
888 	return 0;
889 }
890 
891 /*
892  * Load descriptor aka #6
893  *
894  * To load a previously saved context back to the MDHA unit
895  *
896  * p1: Saved Context
897  * p2: NULL
898  *
899  */
900 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
901 						struct sahara_sha_reqctx *rctx,
902 						struct ahash_request *req,
903 						int index)
904 {
905 	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
906 
907 	dev->hw_desc[index]->len1 = rctx->context_size;
908 	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
909 	dev->hw_desc[index]->len2 = 0;
910 	dev->hw_desc[index]->p2 = 0;
911 
912 	dev->hw_link[index]->len = rctx->context_size;
913 	dev->hw_link[index]->p = dev->context_phys_base;
914 	dev->hw_link[index]->next = 0;
915 
916 	return 0;
917 }
918 
919 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
920 {
921 	if (!sg || !sg->length)
922 		return nbytes;
923 
924 	while (nbytes && sg) {
925 		if (nbytes <= sg->length) {
926 			sg->length = nbytes;
927 			sg_mark_end(sg);
928 			break;
929 		}
930 		nbytes -= sg->length;
931 		sg = sg_next(sg);
932 	}
933 
934 	return nbytes;
935 }
936 
937 static int sahara_sha_prepare_request(struct ahash_request *req)
938 {
939 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
940 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
941 	unsigned int hash_later;
942 	unsigned int block_size;
943 	unsigned int len;
944 
945 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
946 
947 	/* append bytes from previous operation */
948 	len = rctx->buf_cnt + req->nbytes;
949 
950 	/* only the last transfer can be padded in hardware */
951 	if (!rctx->last && (len < block_size)) {
952 		/* to few data, save for next operation */
953 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
954 					 0, req->nbytes, 0);
955 		rctx->buf_cnt += req->nbytes;
956 
957 		return 0;
958 	}
959 
960 	/* add data from previous operation first */
961 	if (rctx->buf_cnt)
962 		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
963 
964 	/* data must always be a multiple of block_size */
965 	hash_later = rctx->last ? 0 : len & (block_size - 1);
966 	if (hash_later) {
967 		unsigned int offset = req->nbytes - hash_later;
968 		/* Save remaining bytes for later use */
969 		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
970 					hash_later, 0);
971 	}
972 
973 	/* nbytes should now be multiple of blocksize */
974 	req->nbytes = req->nbytes - hash_later;
975 
976 	sahara_walk_and_recalc(req->src, req->nbytes);
977 
978 	/* have data from previous operation and current */
979 	if (rctx->buf_cnt && req->nbytes) {
980 		sg_init_table(rctx->in_sg_chain, 2);
981 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
982 
983 		sg_chain(rctx->in_sg_chain, 2, req->src);
984 
985 		rctx->total = req->nbytes + rctx->buf_cnt;
986 		rctx->in_sg = rctx->in_sg_chain;
987 
988 		req->src = rctx->in_sg_chain;
989 	/* only data from previous operation */
990 	} else if (rctx->buf_cnt) {
991 		if (req->src)
992 			rctx->in_sg = req->src;
993 		else
994 			rctx->in_sg = rctx->in_sg_chain;
995 		/* buf was copied into rembuf above */
996 		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
997 		rctx->total = rctx->buf_cnt;
998 	/* no data from previous operation */
999 	} else {
1000 		rctx->in_sg = req->src;
1001 		rctx->total = req->nbytes;
1002 		req->src = rctx->in_sg;
1003 	}
1004 
1005 	/* on next call, we only have the remaining data in the buffer */
1006 	rctx->buf_cnt = hash_later;
1007 
1008 	return -EINPROGRESS;
1009 }
1010 
1011 static int sahara_sha_process(struct ahash_request *req)
1012 {
1013 	struct sahara_dev *dev = dev_ptr;
1014 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1015 	int ret;
1016 	unsigned long timeout;
1017 
1018 	ret = sahara_sha_prepare_request(req);
1019 	if (!ret)
1020 		return ret;
1021 
1022 	if (rctx->first) {
1023 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1024 		dev->hw_desc[0]->next = 0;
1025 		rctx->first = 0;
1026 	} else {
1027 		memcpy(dev->context_base, rctx->context, rctx->context_size);
1028 
1029 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1030 		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1031 		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1032 		dev->hw_desc[1]->next = 0;
1033 	}
1034 
1035 	sahara_dump_descriptors(dev);
1036 	sahara_dump_links(dev);
1037 
1038 	reinit_completion(&dev->dma_completion);
1039 
1040 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1041 
1042 	timeout = wait_for_completion_timeout(&dev->dma_completion,
1043 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1044 	if (!timeout) {
1045 		dev_err(dev->device, "SHA timeout\n");
1046 		return -ETIMEDOUT;
1047 	}
1048 
1049 	if (rctx->sg_in_idx)
1050 		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1051 			     DMA_TO_DEVICE);
1052 
1053 	memcpy(rctx->context, dev->context_base, rctx->context_size);
1054 
1055 	if (req->result && rctx->last)
1056 		memcpy(req->result, rctx->context, rctx->digest_size);
1057 
1058 	return 0;
1059 }
1060 
1061 static int sahara_queue_manage(void *data)
1062 {
1063 	struct sahara_dev *dev = data;
1064 	struct crypto_async_request *async_req;
1065 	struct crypto_async_request *backlog;
1066 	int ret = 0;
1067 
1068 	do {
1069 		__set_current_state(TASK_INTERRUPTIBLE);
1070 
1071 		spin_lock_bh(&dev->queue_spinlock);
1072 		backlog = crypto_get_backlog(&dev->queue);
1073 		async_req = crypto_dequeue_request(&dev->queue);
1074 		spin_unlock_bh(&dev->queue_spinlock);
1075 
1076 		if (backlog)
1077 			crypto_request_complete(backlog, -EINPROGRESS);
1078 
1079 		if (async_req) {
1080 			if (crypto_tfm_alg_type(async_req->tfm) ==
1081 			    CRYPTO_ALG_TYPE_AHASH) {
1082 				struct ahash_request *req =
1083 					ahash_request_cast(async_req);
1084 
1085 				ret = sahara_sha_process(req);
1086 			} else {
1087 				struct skcipher_request *req =
1088 					skcipher_request_cast(async_req);
1089 
1090 				ret = sahara_aes_process(req);
1091 			}
1092 
1093 			crypto_request_complete(async_req, ret);
1094 
1095 			continue;
1096 		}
1097 
1098 		schedule();
1099 	} while (!kthread_should_stop());
1100 
1101 	return 0;
1102 }
1103 
1104 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1105 {
1106 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1107 	struct sahara_dev *dev = dev_ptr;
1108 	int ret;
1109 
1110 	if (!req->nbytes && !last)
1111 		return 0;
1112 
1113 	rctx->last = last;
1114 
1115 	if (!rctx->active) {
1116 		rctx->active = 1;
1117 		rctx->first = 1;
1118 	}
1119 
1120 	spin_lock_bh(&dev->queue_spinlock);
1121 	ret = crypto_enqueue_request(&dev->queue, &req->base);
1122 	spin_unlock_bh(&dev->queue_spinlock);
1123 
1124 	wake_up_process(dev->kthread);
1125 
1126 	return ret;
1127 }
1128 
1129 static int sahara_sha_init(struct ahash_request *req)
1130 {
1131 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1132 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1133 
1134 	memset(rctx, 0, sizeof(*rctx));
1135 
1136 	switch (crypto_ahash_digestsize(tfm)) {
1137 	case SHA1_DIGEST_SIZE:
1138 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1139 		rctx->digest_size = SHA1_DIGEST_SIZE;
1140 		break;
1141 	case SHA256_DIGEST_SIZE:
1142 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1143 		rctx->digest_size = SHA256_DIGEST_SIZE;
1144 		break;
1145 	default:
1146 		return -EINVAL;
1147 	}
1148 
1149 	rctx->context_size = rctx->digest_size + 4;
1150 	rctx->active = 0;
1151 
1152 	return 0;
1153 }
1154 
1155 static int sahara_sha_update(struct ahash_request *req)
1156 {
1157 	return sahara_sha_enqueue(req, 0);
1158 }
1159 
1160 static int sahara_sha_final(struct ahash_request *req)
1161 {
1162 	req->nbytes = 0;
1163 	return sahara_sha_enqueue(req, 1);
1164 }
1165 
1166 static int sahara_sha_finup(struct ahash_request *req)
1167 {
1168 	return sahara_sha_enqueue(req, 1);
1169 }
1170 
1171 static int sahara_sha_digest(struct ahash_request *req)
1172 {
1173 	sahara_sha_init(req);
1174 
1175 	return sahara_sha_finup(req);
1176 }
1177 
1178 static int sahara_sha_export(struct ahash_request *req, void *out)
1179 {
1180 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1181 
1182 	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1183 
1184 	return 0;
1185 }
1186 
1187 static int sahara_sha_import(struct ahash_request *req, const void *in)
1188 {
1189 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1190 
1191 	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1192 
1193 	return 0;
1194 }
1195 
1196 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1197 {
1198 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1199 				 sizeof(struct sahara_sha_reqctx) +
1200 				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1201 
1202 	return 0;
1203 }
1204 
1205 static struct skcipher_alg aes_algs[] = {
1206 {
1207 	.base.cra_name		= "ecb(aes)",
1208 	.base.cra_driver_name	= "sahara-ecb-aes",
1209 	.base.cra_priority	= 300,
1210 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1211 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1212 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1213 	.base.cra_alignmask	= 0x0,
1214 	.base.cra_module	= THIS_MODULE,
1215 
1216 	.init			= sahara_aes_init_tfm,
1217 	.exit			= sahara_aes_exit_tfm,
1218 	.min_keysize		= AES_MIN_KEY_SIZE ,
1219 	.max_keysize		= AES_MAX_KEY_SIZE,
1220 	.setkey			= sahara_aes_setkey,
1221 	.encrypt		= sahara_aes_ecb_encrypt,
1222 	.decrypt		= sahara_aes_ecb_decrypt,
1223 }, {
1224 	.base.cra_name		= "cbc(aes)",
1225 	.base.cra_driver_name	= "sahara-cbc-aes",
1226 	.base.cra_priority	= 300,
1227 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1228 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1229 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1230 	.base.cra_alignmask	= 0x0,
1231 	.base.cra_module	= THIS_MODULE,
1232 
1233 	.init			= sahara_aes_init_tfm,
1234 	.exit			= sahara_aes_exit_tfm,
1235 	.min_keysize		= AES_MIN_KEY_SIZE ,
1236 	.max_keysize		= AES_MAX_KEY_SIZE,
1237 	.ivsize			= AES_BLOCK_SIZE,
1238 	.setkey			= sahara_aes_setkey,
1239 	.encrypt		= sahara_aes_cbc_encrypt,
1240 	.decrypt		= sahara_aes_cbc_decrypt,
1241 }
1242 };
1243 
1244 static struct ahash_alg sha_v3_algs[] = {
1245 {
1246 	.init		= sahara_sha_init,
1247 	.update		= sahara_sha_update,
1248 	.final		= sahara_sha_final,
1249 	.finup		= sahara_sha_finup,
1250 	.digest		= sahara_sha_digest,
1251 	.export		= sahara_sha_export,
1252 	.import		= sahara_sha_import,
1253 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1254 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1255 	.halg.base	= {
1256 		.cra_name		= "sha1",
1257 		.cra_driver_name	= "sahara-sha1",
1258 		.cra_priority		= 300,
1259 		.cra_flags		= CRYPTO_ALG_ASYNC |
1260 						CRYPTO_ALG_NEED_FALLBACK,
1261 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1262 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1263 		.cra_alignmask		= 0,
1264 		.cra_module		= THIS_MODULE,
1265 		.cra_init		= sahara_sha_cra_init,
1266 	}
1267 },
1268 };
1269 
1270 static struct ahash_alg sha_v4_algs[] = {
1271 {
1272 	.init		= sahara_sha_init,
1273 	.update		= sahara_sha_update,
1274 	.final		= sahara_sha_final,
1275 	.finup		= sahara_sha_finup,
1276 	.digest		= sahara_sha_digest,
1277 	.export		= sahara_sha_export,
1278 	.import		= sahara_sha_import,
1279 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1280 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1281 	.halg.base	= {
1282 		.cra_name		= "sha256",
1283 		.cra_driver_name	= "sahara-sha256",
1284 		.cra_priority		= 300,
1285 		.cra_flags		= CRYPTO_ALG_ASYNC |
1286 						CRYPTO_ALG_NEED_FALLBACK,
1287 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1288 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1289 		.cra_alignmask		= 0,
1290 		.cra_module		= THIS_MODULE,
1291 		.cra_init		= sahara_sha_cra_init,
1292 	}
1293 },
1294 };
1295 
1296 static irqreturn_t sahara_irq_handler(int irq, void *data)
1297 {
1298 	struct sahara_dev *dev = data;
1299 	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1300 	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1301 
1302 	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1303 		     SAHARA_REG_CMD);
1304 
1305 	sahara_decode_status(dev, stat);
1306 
1307 	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1308 		return IRQ_NONE;
1309 	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1310 		dev->error = 0;
1311 	} else {
1312 		sahara_decode_error(dev, err);
1313 		dev->error = -EINVAL;
1314 	}
1315 
1316 	complete(&dev->dma_completion);
1317 
1318 	return IRQ_HANDLED;
1319 }
1320 
1321 
1322 static int sahara_register_algs(struct sahara_dev *dev)
1323 {
1324 	int err;
1325 	unsigned int i, j, k, l;
1326 
1327 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1328 		err = crypto_register_skcipher(&aes_algs[i]);
1329 		if (err)
1330 			goto err_aes_algs;
1331 	}
1332 
1333 	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1334 		err = crypto_register_ahash(&sha_v3_algs[k]);
1335 		if (err)
1336 			goto err_sha_v3_algs;
1337 	}
1338 
1339 	if (dev->version > SAHARA_VERSION_3)
1340 		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1341 			err = crypto_register_ahash(&sha_v4_algs[l]);
1342 			if (err)
1343 				goto err_sha_v4_algs;
1344 		}
1345 
1346 	return 0;
1347 
1348 err_sha_v4_algs:
1349 	for (j = 0; j < l; j++)
1350 		crypto_unregister_ahash(&sha_v4_algs[j]);
1351 
1352 err_sha_v3_algs:
1353 	for (j = 0; j < k; j++)
1354 		crypto_unregister_ahash(&sha_v3_algs[j]);
1355 
1356 err_aes_algs:
1357 	for (j = 0; j < i; j++)
1358 		crypto_unregister_skcipher(&aes_algs[j]);
1359 
1360 	return err;
1361 }
1362 
1363 static void sahara_unregister_algs(struct sahara_dev *dev)
1364 {
1365 	unsigned int i;
1366 
1367 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1368 		crypto_unregister_skcipher(&aes_algs[i]);
1369 
1370 	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1371 		crypto_unregister_ahash(&sha_v3_algs[i]);
1372 
1373 	if (dev->version > SAHARA_VERSION_3)
1374 		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1375 			crypto_unregister_ahash(&sha_v4_algs[i]);
1376 }
1377 
1378 static const struct of_device_id sahara_dt_ids[] = {
1379 	{ .compatible = "fsl,imx53-sahara" },
1380 	{ .compatible = "fsl,imx27-sahara" },
1381 	{ /* sentinel */ }
1382 };
1383 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1384 
1385 static int sahara_probe(struct platform_device *pdev)
1386 {
1387 	struct sahara_dev *dev;
1388 	u32 version;
1389 	int irq;
1390 	int err;
1391 	int i;
1392 
1393 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1394 	if (!dev)
1395 		return -ENOMEM;
1396 
1397 	dev->device = &pdev->dev;
1398 	platform_set_drvdata(pdev, dev);
1399 
1400 	/* Get the base address */
1401 	dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1402 	if (IS_ERR(dev->regs_base))
1403 		return PTR_ERR(dev->regs_base);
1404 
1405 	/* Get the IRQ */
1406 	irq = platform_get_irq(pdev,  0);
1407 	if (irq < 0)
1408 		return irq;
1409 
1410 	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1411 			       0, dev_name(&pdev->dev), dev);
1412 	if (err) {
1413 		dev_err(&pdev->dev, "failed to request irq\n");
1414 		return err;
1415 	}
1416 
1417 	/* clocks */
1418 	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1419 	if (IS_ERR(dev->clk_ipg)) {
1420 		dev_err(&pdev->dev, "Could not get ipg clock\n");
1421 		return PTR_ERR(dev->clk_ipg);
1422 	}
1423 
1424 	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1425 	if (IS_ERR(dev->clk_ahb)) {
1426 		dev_err(&pdev->dev, "Could not get ahb clock\n");
1427 		return PTR_ERR(dev->clk_ahb);
1428 	}
1429 
1430 	/* Allocate HW descriptors */
1431 	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1432 			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1433 			&dev->hw_phys_desc[0], GFP_KERNEL);
1434 	if (!dev->hw_desc[0]) {
1435 		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1436 		return -ENOMEM;
1437 	}
1438 	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1439 	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1440 				sizeof(struct sahara_hw_desc);
1441 
1442 	/* Allocate space for iv and key */
1443 	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1444 				&dev->key_phys_base, GFP_KERNEL);
1445 	if (!dev->key_base) {
1446 		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1447 		return -ENOMEM;
1448 	}
1449 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1450 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1451 
1452 	/* Allocate space for context: largest digest + message length field */
1453 	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1454 					SHA256_DIGEST_SIZE + 4,
1455 					&dev->context_phys_base, GFP_KERNEL);
1456 	if (!dev->context_base) {
1457 		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1458 		return -ENOMEM;
1459 	}
1460 
1461 	/* Allocate space for HW links */
1462 	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1463 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1464 			&dev->hw_phys_link[0], GFP_KERNEL);
1465 	if (!dev->hw_link[0]) {
1466 		dev_err(&pdev->dev, "Could not allocate hw links\n");
1467 		return -ENOMEM;
1468 	}
1469 	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1470 		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1471 					sizeof(struct sahara_hw_link);
1472 		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1473 	}
1474 
1475 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1476 
1477 	spin_lock_init(&dev->queue_spinlock);
1478 
1479 	dev_ptr = dev;
1480 
1481 	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1482 	if (IS_ERR(dev->kthread)) {
1483 		return PTR_ERR(dev->kthread);
1484 	}
1485 
1486 	init_completion(&dev->dma_completion);
1487 
1488 	err = clk_prepare_enable(dev->clk_ipg);
1489 	if (err)
1490 		return err;
1491 	err = clk_prepare_enable(dev->clk_ahb);
1492 	if (err)
1493 		goto clk_ipg_disable;
1494 
1495 	version = sahara_read(dev, SAHARA_REG_VERSION);
1496 	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1497 		if (version != SAHARA_VERSION_3)
1498 			err = -ENODEV;
1499 	} else if (of_device_is_compatible(pdev->dev.of_node,
1500 			"fsl,imx53-sahara")) {
1501 		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1502 			err = -ENODEV;
1503 		version = (version >> 8) & 0xff;
1504 	}
1505 	if (err == -ENODEV) {
1506 		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1507 				version);
1508 		goto err_algs;
1509 	}
1510 
1511 	dev->version = version;
1512 
1513 	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1514 		     SAHARA_REG_CMD);
1515 	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1516 			SAHARA_CONTROL_SET_MAXBURST(8) |
1517 			SAHARA_CONTROL_RNG_AUTORSD |
1518 			SAHARA_CONTROL_ENABLE_INT,
1519 			SAHARA_REG_CONTROL);
1520 
1521 	err = sahara_register_algs(dev);
1522 	if (err)
1523 		goto err_algs;
1524 
1525 	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1526 
1527 	return 0;
1528 
1529 err_algs:
1530 	kthread_stop(dev->kthread);
1531 	dev_ptr = NULL;
1532 	clk_disable_unprepare(dev->clk_ahb);
1533 clk_ipg_disable:
1534 	clk_disable_unprepare(dev->clk_ipg);
1535 
1536 	return err;
1537 }
1538 
1539 static int sahara_remove(struct platform_device *pdev)
1540 {
1541 	struct sahara_dev *dev = platform_get_drvdata(pdev);
1542 
1543 	kthread_stop(dev->kthread);
1544 
1545 	sahara_unregister_algs(dev);
1546 
1547 	clk_disable_unprepare(dev->clk_ipg);
1548 	clk_disable_unprepare(dev->clk_ahb);
1549 
1550 	dev_ptr = NULL;
1551 
1552 	return 0;
1553 }
1554 
1555 static struct platform_driver sahara_driver = {
1556 	.probe		= sahara_probe,
1557 	.remove		= sahara_remove,
1558 	.driver		= {
1559 		.name	= SAHARA_NAME,
1560 		.of_match_table = sahara_dt_ids,
1561 	},
1562 };
1563 
1564 module_platform_driver(sahara_driver);
1565 
1566 MODULE_LICENSE("GPL");
1567 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1568 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1569 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1570