xref: /openbmc/linux/drivers/crypto/sahara.c (revision c660aa77)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for SAHARA cryptographic accelerator.
6  *
7  * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8  * Copyright (c) 2013 Vista Silicon S.L.
9  * Author: Javier Martin <javier.martin@vista-silicon.com>
10  *
11  * Based on omap-aes.c and tegra-aes.c
12  */
13 
14 #include <crypto/aes.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/sha1.h>
19 #include <crypto/sha2.h>
20 
21 #include <linux/clk.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/irq.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/module.h>
29 #include <linux/of.h>
30 #include <linux/platform_device.h>
31 #include <linux/spinlock.h>
32 
33 #define SHA_BUFFER_LEN		PAGE_SIZE
34 #define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
35 
36 #define SAHARA_NAME "sahara"
37 #define SAHARA_VERSION_3	3
38 #define SAHARA_VERSION_4	4
39 #define SAHARA_TIMEOUT_MS	1000
40 #define SAHARA_MAX_HW_DESC	2
41 #define SAHARA_MAX_HW_LINK	20
42 
43 #define FLAGS_MODE_MASK		0x000f
44 #define FLAGS_ENCRYPT		BIT(0)
45 #define FLAGS_CBC		BIT(1)
46 
47 #define SAHARA_HDR_BASE			0x00800000
48 #define SAHARA_HDR_SKHA_ALG_AES	0
49 #define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
50 #define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
51 #define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
52 #define SAHARA_HDR_FORM_DATA		(5 << 16)
53 #define SAHARA_HDR_FORM_KEY		(8 << 16)
54 #define SAHARA_HDR_LLO			(1 << 24)
55 #define SAHARA_HDR_CHA_SKHA		(1 << 28)
56 #define SAHARA_HDR_CHA_MDHA		(2 << 28)
57 #define SAHARA_HDR_PARITY_BIT		(1 << 31)
58 
59 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
60 #define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
61 #define SAHARA_HDR_MDHA_HASH		0xA0850000
62 #define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
63 #define SAHARA_HDR_MDHA_ALG_SHA1	0
64 #define SAHARA_HDR_MDHA_ALG_MD5		1
65 #define SAHARA_HDR_MDHA_ALG_SHA256	2
66 #define SAHARA_HDR_MDHA_ALG_SHA224	3
67 #define SAHARA_HDR_MDHA_PDATA		(1 << 2)
68 #define SAHARA_HDR_MDHA_HMAC		(1 << 3)
69 #define SAHARA_HDR_MDHA_INIT		(1 << 5)
70 #define SAHARA_HDR_MDHA_IPAD		(1 << 6)
71 #define SAHARA_HDR_MDHA_OPAD		(1 << 7)
72 #define SAHARA_HDR_MDHA_SWAP		(1 << 8)
73 #define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
74 #define SAHARA_HDR_MDHA_SSL		(1 << 10)
75 
76 /* SAHARA can only process one request at a time */
77 #define SAHARA_QUEUE_LENGTH	1
78 
79 #define SAHARA_REG_VERSION	0x00
80 #define SAHARA_REG_DAR		0x04
81 #define SAHARA_REG_CONTROL	0x08
82 #define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
83 #define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
84 #define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
85 #define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
86 #define SAHARA_REG_CMD		0x0C
87 #define		SAHARA_CMD_RESET		(1 << 0)
88 #define		SAHARA_CMD_CLEAR_INT		(1 << 8)
89 #define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
90 #define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
91 #define		SAHARA_CMD_MODE_BATCH		(1 << 16)
92 #define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
93 #define	SAHARA_REG_STATUS	0x10
94 #define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
95 #define			SAHARA_STATE_IDLE	0
96 #define			SAHARA_STATE_BUSY	1
97 #define			SAHARA_STATE_ERR	2
98 #define			SAHARA_STATE_FAULT	3
99 #define			SAHARA_STATE_COMPLETE	4
100 #define			SAHARA_STATE_COMP_FLAG	(1 << 2)
101 #define		SAHARA_STATUS_DAR_FULL		(1 << 3)
102 #define		SAHARA_STATUS_ERROR		(1 << 4)
103 #define		SAHARA_STATUS_SECURE		(1 << 5)
104 #define		SAHARA_STATUS_FAIL		(1 << 6)
105 #define		SAHARA_STATUS_INIT		(1 << 7)
106 #define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
107 #define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
108 #define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
109 #define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
110 #define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
111 #define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
112 #define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
113 #define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
114 #define SAHARA_REG_ERRSTATUS	0x14
115 #define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
116 #define			SAHARA_ERRSOURCE_CHA	14
117 #define			SAHARA_ERRSOURCE_DMA	15
118 #define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
119 #define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
120 #define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
121 #define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
122 #define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
123 #define SAHARA_REG_FADDR	0x18
124 #define SAHARA_REG_CDAR		0x1C
125 #define SAHARA_REG_IDAR		0x20
126 
127 struct sahara_hw_desc {
128 	u32	hdr;
129 	u32	len1;
130 	u32	p1;
131 	u32	len2;
132 	u32	p2;
133 	u32	next;
134 };
135 
136 struct sahara_hw_link {
137 	u32	len;
138 	u32	p;
139 	u32	next;
140 };
141 
142 struct sahara_ctx {
143 	/* AES-specific context */
144 	int keylen;
145 	u8 key[AES_KEYSIZE_128];
146 	struct crypto_skcipher *fallback;
147 };
148 
149 struct sahara_aes_reqctx {
150 	unsigned long mode;
151 	u8 iv_out[AES_BLOCK_SIZE];
152 	struct skcipher_request fallback_req;	// keep at the end
153 };
154 
155 /*
156  * struct sahara_sha_reqctx - private data per request
157  * @buf: holds data for requests smaller than block_size
158  * @rembuf: used to prepare one block_size-aligned request
159  * @context: hw-specific context for request. Digest is extracted from this
160  * @mode: specifies what type of hw-descriptor needs to be built
161  * @digest_size: length of digest for this request
162  * @context_size: length of hw-context for this request.
163  *                Always digest_size + 4
164  * @buf_cnt: number of bytes saved in buf
165  * @sg_in_idx: number of hw links
166  * @in_sg: scatterlist for input data
167  * @in_sg_chain: scatterlists for chained input data
168  * @total: total number of bytes for transfer
169  * @last: is this the last block
170  * @first: is this the first block
171  * @active: inside a transfer
172  */
173 struct sahara_sha_reqctx {
174 	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
175 	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
176 	u8			context[SHA256_DIGEST_SIZE + 4];
177 	unsigned int		mode;
178 	unsigned int		digest_size;
179 	unsigned int		context_size;
180 	unsigned int		buf_cnt;
181 	unsigned int		sg_in_idx;
182 	struct scatterlist	*in_sg;
183 	struct scatterlist	in_sg_chain[2];
184 	size_t			total;
185 	unsigned int		last;
186 	unsigned int		first;
187 	unsigned int		active;
188 };
189 
190 struct sahara_dev {
191 	struct device		*device;
192 	unsigned int		version;
193 	void __iomem		*regs_base;
194 	struct clk		*clk_ipg;
195 	struct clk		*clk_ahb;
196 	spinlock_t		queue_spinlock;
197 	struct task_struct	*kthread;
198 	struct completion	dma_completion;
199 
200 	struct sahara_ctx	*ctx;
201 	struct crypto_queue	queue;
202 	unsigned long		flags;
203 
204 	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
205 	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
206 
207 	u8			*key_base;
208 	dma_addr_t		key_phys_base;
209 
210 	u8			*iv_base;
211 	dma_addr_t		iv_phys_base;
212 
213 	u8			*context_base;
214 	dma_addr_t		context_phys_base;
215 
216 	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
217 	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
218 
219 	size_t			total;
220 	struct scatterlist	*in_sg;
221 	int		nb_in_sg;
222 	struct scatterlist	*out_sg;
223 	int		nb_out_sg;
224 
225 	u32			error;
226 };
227 
228 static struct sahara_dev *dev_ptr;
229 
sahara_write(struct sahara_dev * dev,u32 data,u32 reg)230 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
231 {
232 	writel(data, dev->regs_base + reg);
233 }
234 
sahara_read(struct sahara_dev * dev,u32 reg)235 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
236 {
237 	return readl(dev->regs_base + reg);
238 }
239 
sahara_aes_key_hdr(struct sahara_dev * dev)240 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
241 {
242 	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
243 			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
244 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
245 
246 	if (dev->flags & FLAGS_CBC) {
247 		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
248 		hdr ^= SAHARA_HDR_PARITY_BIT;
249 	}
250 
251 	if (dev->flags & FLAGS_ENCRYPT) {
252 		hdr |= SAHARA_HDR_SKHA_OP_ENC;
253 		hdr ^= SAHARA_HDR_PARITY_BIT;
254 	}
255 
256 	return hdr;
257 }
258 
sahara_aes_data_link_hdr(struct sahara_dev * dev)259 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
260 {
261 	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
262 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
263 }
264 
265 static const char *sahara_err_src[16] = {
266 	"No error",
267 	"Header error",
268 	"Descriptor length error",
269 	"Descriptor length or pointer error",
270 	"Link length error",
271 	"Link pointer error",
272 	"Input buffer error",
273 	"Output buffer error",
274 	"Output buffer starvation",
275 	"Internal state fault",
276 	"General descriptor problem",
277 	"Reserved",
278 	"Descriptor address error",
279 	"Link address error",
280 	"CHA error",
281 	"DMA error"
282 };
283 
284 static const char *sahara_err_dmasize[4] = {
285 	"Byte transfer",
286 	"Half-word transfer",
287 	"Word transfer",
288 	"Reserved"
289 };
290 
291 static const char *sahara_err_dmasrc[8] = {
292 	"No error",
293 	"AHB bus error",
294 	"Internal IP bus error",
295 	"Parity error",
296 	"DMA crosses 256 byte boundary",
297 	"DMA is busy",
298 	"Reserved",
299 	"DMA HW error"
300 };
301 
302 static const char *sahara_cha_errsrc[12] = {
303 	"Input buffer non-empty",
304 	"Illegal address",
305 	"Illegal mode",
306 	"Illegal data size",
307 	"Illegal key size",
308 	"Write during processing",
309 	"CTX read during processing",
310 	"HW error",
311 	"Input buffer disabled/underflow",
312 	"Output buffer disabled/overflow",
313 	"DES key parity error",
314 	"Reserved"
315 };
316 
317 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
318 
sahara_decode_error(struct sahara_dev * dev,unsigned int error)319 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
320 {
321 	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
322 	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
323 
324 	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
325 
326 	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
327 
328 	if (source == SAHARA_ERRSOURCE_DMA) {
329 		if (error & SAHARA_ERRSTATUS_DMA_DIR)
330 			dev_err(dev->device, "		* DMA read.\n");
331 		else
332 			dev_err(dev->device, "		* DMA write.\n");
333 
334 		dev_err(dev->device, "		* %s.\n",
335 		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
336 		dev_err(dev->device, "		* %s.\n",
337 		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
338 	} else if (source == SAHARA_ERRSOURCE_CHA) {
339 		dev_err(dev->device, "		* %s.\n",
340 			sahara_cha_errsrc[chasrc]);
341 		dev_err(dev->device, "		* %s.\n",
342 		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
343 	}
344 	dev_err(dev->device, "\n");
345 }
346 
347 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
348 
sahara_decode_status(struct sahara_dev * dev,unsigned int status)349 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
350 {
351 	u8 state;
352 
353 	if (!__is_defined(DEBUG))
354 		return;
355 
356 	state = SAHARA_STATUS_GET_STATE(status);
357 
358 	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
359 		__func__, status);
360 
361 	dev_dbg(dev->device, "	- State = %d:\n", state);
362 	if (state & SAHARA_STATE_COMP_FLAG)
363 		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
364 
365 	dev_dbg(dev->device, "		* %s.\n",
366 	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
367 
368 	if (status & SAHARA_STATUS_DAR_FULL)
369 		dev_dbg(dev->device, "	- DAR Full.\n");
370 	if (status & SAHARA_STATUS_ERROR)
371 		dev_dbg(dev->device, "	- Error.\n");
372 	if (status & SAHARA_STATUS_SECURE)
373 		dev_dbg(dev->device, "	- Secure.\n");
374 	if (status & SAHARA_STATUS_FAIL)
375 		dev_dbg(dev->device, "	- Fail.\n");
376 	if (status & SAHARA_STATUS_RNG_RESEED)
377 		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
378 	if (status & SAHARA_STATUS_ACTIVE_RNG)
379 		dev_dbg(dev->device, "	- RNG Active.\n");
380 	if (status & SAHARA_STATUS_ACTIVE_MDHA)
381 		dev_dbg(dev->device, "	- MDHA Active.\n");
382 	if (status & SAHARA_STATUS_ACTIVE_SKHA)
383 		dev_dbg(dev->device, "	- SKHA Active.\n");
384 
385 	if (status & SAHARA_STATUS_MODE_BATCH)
386 		dev_dbg(dev->device, "	- Batch Mode.\n");
387 	else if (status & SAHARA_STATUS_MODE_DEDICATED)
388 		dev_dbg(dev->device, "	- Dedicated Mode.\n");
389 	else if (status & SAHARA_STATUS_MODE_DEBUG)
390 		dev_dbg(dev->device, "	- Debug Mode.\n");
391 
392 	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
393 	       SAHARA_STATUS_GET_ISTATE(status));
394 
395 	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
396 		sahara_read(dev, SAHARA_REG_CDAR));
397 	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
398 		sahara_read(dev, SAHARA_REG_IDAR));
399 }
400 
sahara_dump_descriptors(struct sahara_dev * dev)401 static void sahara_dump_descriptors(struct sahara_dev *dev)
402 {
403 	int i;
404 
405 	if (!__is_defined(DEBUG))
406 		return;
407 
408 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
409 		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
410 			i, &dev->hw_phys_desc[i]);
411 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
412 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
413 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
414 		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
415 		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
416 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
417 			dev->hw_desc[i]->next);
418 	}
419 	dev_dbg(dev->device, "\n");
420 }
421 
sahara_dump_links(struct sahara_dev * dev)422 static void sahara_dump_links(struct sahara_dev *dev)
423 {
424 	int i;
425 
426 	if (!__is_defined(DEBUG))
427 		return;
428 
429 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
430 		dev_dbg(dev->device, "Link (%d) (%pad):\n",
431 			i, &dev->hw_phys_link[i]);
432 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
433 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
434 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
435 			dev->hw_link[i]->next);
436 	}
437 	dev_dbg(dev->device, "\n");
438 }
439 
sahara_hw_descriptor_create(struct sahara_dev * dev)440 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
441 {
442 	struct sahara_ctx *ctx = dev->ctx;
443 	struct scatterlist *sg;
444 	int ret;
445 	int i, j;
446 	int idx = 0;
447 	u32 len;
448 
449 	memcpy(dev->key_base, ctx->key, ctx->keylen);
450 
451 	if (dev->flags & FLAGS_CBC) {
452 		dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
453 		dev->hw_desc[idx]->p1 = dev->iv_phys_base;
454 	} else {
455 		dev->hw_desc[idx]->len1 = 0;
456 		dev->hw_desc[idx]->p1 = 0;
457 	}
458 	dev->hw_desc[idx]->len2 = ctx->keylen;
459 	dev->hw_desc[idx]->p2 = dev->key_phys_base;
460 	dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
461 	dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
462 
463 	idx++;
464 
465 
466 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
467 	if (dev->nb_in_sg < 0) {
468 		dev_err(dev->device, "Invalid numbers of src SG.\n");
469 		return dev->nb_in_sg;
470 	}
471 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
472 	if (dev->nb_out_sg < 0) {
473 		dev_err(dev->device, "Invalid numbers of dst SG.\n");
474 		return dev->nb_out_sg;
475 	}
476 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
477 		dev_err(dev->device, "not enough hw links (%d)\n",
478 			dev->nb_in_sg + dev->nb_out_sg);
479 		return -EINVAL;
480 	}
481 
482 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
483 			 DMA_TO_DEVICE);
484 	if (!ret) {
485 		dev_err(dev->device, "couldn't map in sg\n");
486 		return -EINVAL;
487 	}
488 
489 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
490 			 DMA_FROM_DEVICE);
491 	if (!ret) {
492 		dev_err(dev->device, "couldn't map out sg\n");
493 		goto unmap_in;
494 	}
495 
496 	/* Create input links */
497 	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
498 	sg = dev->in_sg;
499 	len = dev->total;
500 	for (i = 0; i < dev->nb_in_sg; i++) {
501 		dev->hw_link[i]->len = min(len, sg->length);
502 		dev->hw_link[i]->p = sg->dma_address;
503 		if (i == (dev->nb_in_sg - 1)) {
504 			dev->hw_link[i]->next = 0;
505 		} else {
506 			len -= min(len, sg->length);
507 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
508 			sg = sg_next(sg);
509 		}
510 	}
511 
512 	/* Create output links */
513 	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
514 	sg = dev->out_sg;
515 	len = dev->total;
516 	for (j = i; j < dev->nb_out_sg + i; j++) {
517 		dev->hw_link[j]->len = min(len, sg->length);
518 		dev->hw_link[j]->p = sg->dma_address;
519 		if (j == (dev->nb_out_sg + i - 1)) {
520 			dev->hw_link[j]->next = 0;
521 		} else {
522 			len -= min(len, sg->length);
523 			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
524 			sg = sg_next(sg);
525 		}
526 	}
527 
528 	/* Fill remaining fields of hw_desc[1] */
529 	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
530 	dev->hw_desc[idx]->len1 = dev->total;
531 	dev->hw_desc[idx]->len2 = dev->total;
532 	dev->hw_desc[idx]->next = 0;
533 
534 	sahara_dump_descriptors(dev);
535 	sahara_dump_links(dev);
536 
537 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
538 
539 	return 0;
540 
541 unmap_in:
542 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
543 		DMA_TO_DEVICE);
544 
545 	return -EINVAL;
546 }
547 
sahara_aes_cbc_update_iv(struct skcipher_request * req)548 static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
549 {
550 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
551 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
552 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
553 
554 	/* Update IV buffer to contain the last ciphertext block */
555 	if (rctx->mode & FLAGS_ENCRYPT) {
556 		sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
557 				   ivsize, req->cryptlen - ivsize);
558 	} else {
559 		memcpy(req->iv, rctx->iv_out, ivsize);
560 	}
561 }
562 
sahara_aes_process(struct skcipher_request * req)563 static int sahara_aes_process(struct skcipher_request *req)
564 {
565 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
566 	struct sahara_dev *dev = dev_ptr;
567 	struct sahara_ctx *ctx;
568 	struct sahara_aes_reqctx *rctx;
569 	int ret;
570 	unsigned long timeout;
571 
572 	/* Request is ready to be dispatched by the device */
573 	dev_dbg(dev->device,
574 		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
575 		req->cryptlen, req->src, req->dst);
576 
577 	/* assign new request to device */
578 	dev->total = req->cryptlen;
579 	dev->in_sg = req->src;
580 	dev->out_sg = req->dst;
581 
582 	rctx = skcipher_request_ctx(req);
583 	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
584 	rctx->mode &= FLAGS_MODE_MASK;
585 	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
586 
587 	if ((dev->flags & FLAGS_CBC) && req->iv) {
588 		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
589 
590 		memcpy(dev->iv_base, req->iv, ivsize);
591 
592 		if (!(dev->flags & FLAGS_ENCRYPT)) {
593 			sg_pcopy_to_buffer(req->src, sg_nents(req->src),
594 					   rctx->iv_out, ivsize,
595 					   req->cryptlen - ivsize);
596 		}
597 	}
598 
599 	/* assign new context to device */
600 	dev->ctx = ctx;
601 
602 	reinit_completion(&dev->dma_completion);
603 
604 	ret = sahara_hw_descriptor_create(dev);
605 	if (ret)
606 		return -EINVAL;
607 
608 	timeout = wait_for_completion_timeout(&dev->dma_completion,
609 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
610 
611 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
612 		DMA_FROM_DEVICE);
613 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
614 		DMA_TO_DEVICE);
615 
616 	if (!timeout) {
617 		dev_err(dev->device, "AES timeout\n");
618 		return -ETIMEDOUT;
619 	}
620 
621 	if ((dev->flags & FLAGS_CBC) && req->iv)
622 		sahara_aes_cbc_update_iv(req);
623 
624 	return 0;
625 }
626 
sahara_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)627 static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
628 			     unsigned int keylen)
629 {
630 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
631 
632 	ctx->keylen = keylen;
633 
634 	/* SAHARA only supports 128bit keys */
635 	if (keylen == AES_KEYSIZE_128) {
636 		memcpy(ctx->key, key, keylen);
637 		return 0;
638 	}
639 
640 	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
641 		return -EINVAL;
642 
643 	/*
644 	 * The requested key size is not supported by HW, do a fallback.
645 	 */
646 	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
647 	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
648 						 CRYPTO_TFM_REQ_MASK);
649 	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
650 }
651 
sahara_aes_fallback(struct skcipher_request * req,unsigned long mode)652 static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
653 {
654 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
655 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
656 		crypto_skcipher_reqtfm(req));
657 
658 	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
659 	skcipher_request_set_callback(&rctx->fallback_req,
660 				      req->base.flags,
661 				      req->base.complete,
662 				      req->base.data);
663 	skcipher_request_set_crypt(&rctx->fallback_req, req->src,
664 				   req->dst, req->cryptlen, req->iv);
665 
666 	if (mode & FLAGS_ENCRYPT)
667 		return crypto_skcipher_encrypt(&rctx->fallback_req);
668 
669 	return crypto_skcipher_decrypt(&rctx->fallback_req);
670 }
671 
sahara_aes_crypt(struct skcipher_request * req,unsigned long mode)672 static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
673 {
674 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
675 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
676 		crypto_skcipher_reqtfm(req));
677 	struct sahara_dev *dev = dev_ptr;
678 	int err = 0;
679 
680 	if (!req->cryptlen)
681 		return 0;
682 
683 	if (unlikely(ctx->keylen != AES_KEYSIZE_128))
684 		return sahara_aes_fallback(req, mode);
685 
686 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
687 		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
688 
689 	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
690 		dev_err(dev->device,
691 			"request size is not exact amount of AES blocks\n");
692 		return -EINVAL;
693 	}
694 
695 	rctx->mode = mode;
696 
697 	spin_lock_bh(&dev->queue_spinlock);
698 	err = crypto_enqueue_request(&dev->queue, &req->base);
699 	spin_unlock_bh(&dev->queue_spinlock);
700 
701 	wake_up_process(dev->kthread);
702 
703 	return err;
704 }
705 
sahara_aes_ecb_encrypt(struct skcipher_request * req)706 static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
707 {
708 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
709 }
710 
sahara_aes_ecb_decrypt(struct skcipher_request * req)711 static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
712 {
713 	return sahara_aes_crypt(req, 0);
714 }
715 
sahara_aes_cbc_encrypt(struct skcipher_request * req)716 static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
717 {
718 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
719 }
720 
sahara_aes_cbc_decrypt(struct skcipher_request * req)721 static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
722 {
723 	return sahara_aes_crypt(req, FLAGS_CBC);
724 }
725 
sahara_aes_init_tfm(struct crypto_skcipher * tfm)726 static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
727 {
728 	const char *name = crypto_tfm_alg_name(&tfm->base);
729 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
730 
731 	ctx->fallback = crypto_alloc_skcipher(name, 0,
732 					      CRYPTO_ALG_NEED_FALLBACK);
733 	if (IS_ERR(ctx->fallback)) {
734 		pr_err("Error allocating fallback algo %s\n", name);
735 		return PTR_ERR(ctx->fallback);
736 	}
737 
738 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
739 					 crypto_skcipher_reqsize(ctx->fallback));
740 
741 	return 0;
742 }
743 
sahara_aes_exit_tfm(struct crypto_skcipher * tfm)744 static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
745 {
746 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
747 
748 	crypto_free_skcipher(ctx->fallback);
749 }
750 
sahara_sha_init_hdr(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx)751 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
752 			      struct sahara_sha_reqctx *rctx)
753 {
754 	u32 hdr = 0;
755 
756 	hdr = rctx->mode;
757 
758 	if (rctx->first) {
759 		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
760 		hdr |= SAHARA_HDR_MDHA_INIT;
761 	} else {
762 		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
763 	}
764 
765 	if (rctx->last)
766 		hdr |= SAHARA_HDR_MDHA_PDATA;
767 
768 	if (hweight_long(hdr) % 2 == 0)
769 		hdr |= SAHARA_HDR_PARITY_BIT;
770 
771 	return hdr;
772 }
773 
sahara_sha_hw_links_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,int start)774 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
775 				       struct sahara_sha_reqctx *rctx,
776 				       int start)
777 {
778 	struct scatterlist *sg;
779 	unsigned int len;
780 	unsigned int i;
781 	int ret;
782 
783 	dev->in_sg = rctx->in_sg;
784 
785 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
786 	if (dev->nb_in_sg < 0) {
787 		dev_err(dev->device, "Invalid numbers of src SG.\n");
788 		return dev->nb_in_sg;
789 	}
790 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
791 		dev_err(dev->device, "not enough hw links (%d)\n",
792 			dev->nb_in_sg + dev->nb_out_sg);
793 		return -EINVAL;
794 	}
795 
796 	sg = dev->in_sg;
797 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
798 	if (!ret)
799 		return -EFAULT;
800 
801 	len = rctx->total;
802 	for (i = start; i < dev->nb_in_sg + start; i++) {
803 		dev->hw_link[i]->len = min(len, sg->length);
804 		dev->hw_link[i]->p = sg->dma_address;
805 		if (i == (dev->nb_in_sg + start - 1)) {
806 			dev->hw_link[i]->next = 0;
807 		} else {
808 			len -= min(len, sg->length);
809 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
810 			sg = sg_next(sg);
811 		}
812 	}
813 
814 	return i;
815 }
816 
sahara_sha_hw_data_descriptor_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,struct ahash_request * req,int index)817 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
818 						struct sahara_sha_reqctx *rctx,
819 						struct ahash_request *req,
820 						int index)
821 {
822 	unsigned result_len;
823 	int i = index;
824 
825 	if (rctx->first)
826 		/* Create initial descriptor: #8*/
827 		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
828 	else
829 		/* Create hash descriptor: #10. Must follow #6. */
830 		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
831 
832 	dev->hw_desc[index]->len1 = rctx->total;
833 	if (dev->hw_desc[index]->len1 == 0) {
834 		/* if len1 is 0, p1 must be 0, too */
835 		dev->hw_desc[index]->p1 = 0;
836 		rctx->sg_in_idx = 0;
837 	} else {
838 		/* Create input links */
839 		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
840 		i = sahara_sha_hw_links_create(dev, rctx, index);
841 
842 		rctx->sg_in_idx = index;
843 		if (i < 0)
844 			return i;
845 	}
846 
847 	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
848 
849 	/* Save the context for the next operation */
850 	result_len = rctx->context_size;
851 	dev->hw_link[i]->p = dev->context_phys_base;
852 
853 	dev->hw_link[i]->len = result_len;
854 	dev->hw_desc[index]->len2 = result_len;
855 
856 	dev->hw_link[i]->next = 0;
857 
858 	return 0;
859 }
860 
861 /*
862  * Load descriptor aka #6
863  *
864  * To load a previously saved context back to the MDHA unit
865  *
866  * p1: Saved Context
867  * p2: NULL
868  *
869  */
sahara_sha_hw_context_descriptor_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,struct ahash_request * req,int index)870 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
871 						struct sahara_sha_reqctx *rctx,
872 						struct ahash_request *req,
873 						int index)
874 {
875 	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
876 
877 	dev->hw_desc[index]->len1 = rctx->context_size;
878 	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
879 	dev->hw_desc[index]->len2 = 0;
880 	dev->hw_desc[index]->p2 = 0;
881 
882 	dev->hw_link[index]->len = rctx->context_size;
883 	dev->hw_link[index]->p = dev->context_phys_base;
884 	dev->hw_link[index]->next = 0;
885 
886 	return 0;
887 }
888 
sahara_sha_prepare_request(struct ahash_request * req)889 static int sahara_sha_prepare_request(struct ahash_request *req)
890 {
891 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
892 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
893 	unsigned int hash_later;
894 	unsigned int block_size;
895 	unsigned int len;
896 
897 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
898 
899 	/* append bytes from previous operation */
900 	len = rctx->buf_cnt + req->nbytes;
901 
902 	/* only the last transfer can be padded in hardware */
903 	if (!rctx->last && (len < block_size)) {
904 		/* to few data, save for next operation */
905 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
906 					 0, req->nbytes, 0);
907 		rctx->buf_cnt += req->nbytes;
908 
909 		return 0;
910 	}
911 
912 	/* add data from previous operation first */
913 	if (rctx->buf_cnt)
914 		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
915 
916 	/* data must always be a multiple of block_size */
917 	hash_later = rctx->last ? 0 : len & (block_size - 1);
918 	if (hash_later) {
919 		unsigned int offset = req->nbytes - hash_later;
920 		/* Save remaining bytes for later use */
921 		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
922 					hash_later, 0);
923 	}
924 
925 	rctx->total = len - hash_later;
926 	/* have data from previous operation and current */
927 	if (rctx->buf_cnt && req->nbytes) {
928 		sg_init_table(rctx->in_sg_chain, 2);
929 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
930 		sg_chain(rctx->in_sg_chain, 2, req->src);
931 		rctx->in_sg = rctx->in_sg_chain;
932 	/* only data from previous operation */
933 	} else if (rctx->buf_cnt) {
934 		rctx->in_sg = rctx->in_sg_chain;
935 		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
936 	/* no data from previous operation */
937 	} else {
938 		rctx->in_sg = req->src;
939 	}
940 
941 	/* on next call, we only have the remaining data in the buffer */
942 	rctx->buf_cnt = hash_later;
943 
944 	return -EINPROGRESS;
945 }
946 
sahara_sha_process(struct ahash_request * req)947 static int sahara_sha_process(struct ahash_request *req)
948 {
949 	struct sahara_dev *dev = dev_ptr;
950 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
951 	int ret;
952 	unsigned long timeout;
953 
954 	ret = sahara_sha_prepare_request(req);
955 	if (!ret)
956 		return ret;
957 
958 	if (rctx->first) {
959 		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
960 		if (ret)
961 			return ret;
962 
963 		dev->hw_desc[0]->next = 0;
964 		rctx->first = 0;
965 	} else {
966 		memcpy(dev->context_base, rctx->context, rctx->context_size);
967 
968 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
969 		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
970 		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
971 		if (ret)
972 			return ret;
973 
974 		dev->hw_desc[1]->next = 0;
975 	}
976 
977 	sahara_dump_descriptors(dev);
978 	sahara_dump_links(dev);
979 
980 	reinit_completion(&dev->dma_completion);
981 
982 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
983 
984 	timeout = wait_for_completion_timeout(&dev->dma_completion,
985 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
986 
987 	if (rctx->sg_in_idx)
988 		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
989 			     DMA_TO_DEVICE);
990 
991 	if (!timeout) {
992 		dev_err(dev->device, "SHA timeout\n");
993 		return -ETIMEDOUT;
994 	}
995 
996 	memcpy(rctx->context, dev->context_base, rctx->context_size);
997 
998 	if (req->result && rctx->last)
999 		memcpy(req->result, rctx->context, rctx->digest_size);
1000 
1001 	return 0;
1002 }
1003 
sahara_queue_manage(void * data)1004 static int sahara_queue_manage(void *data)
1005 {
1006 	struct sahara_dev *dev = data;
1007 	struct crypto_async_request *async_req;
1008 	struct crypto_async_request *backlog;
1009 	int ret = 0;
1010 
1011 	do {
1012 		__set_current_state(TASK_INTERRUPTIBLE);
1013 
1014 		spin_lock_bh(&dev->queue_spinlock);
1015 		backlog = crypto_get_backlog(&dev->queue);
1016 		async_req = crypto_dequeue_request(&dev->queue);
1017 		spin_unlock_bh(&dev->queue_spinlock);
1018 
1019 		if (backlog)
1020 			crypto_request_complete(backlog, -EINPROGRESS);
1021 
1022 		if (async_req) {
1023 			if (crypto_tfm_alg_type(async_req->tfm) ==
1024 			    CRYPTO_ALG_TYPE_AHASH) {
1025 				struct ahash_request *req =
1026 					ahash_request_cast(async_req);
1027 
1028 				ret = sahara_sha_process(req);
1029 			} else {
1030 				struct skcipher_request *req =
1031 					skcipher_request_cast(async_req);
1032 
1033 				ret = sahara_aes_process(req);
1034 			}
1035 
1036 			crypto_request_complete(async_req, ret);
1037 
1038 			continue;
1039 		}
1040 
1041 		schedule();
1042 	} while (!kthread_should_stop());
1043 
1044 	return 0;
1045 }
1046 
sahara_sha_enqueue(struct ahash_request * req,int last)1047 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1048 {
1049 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1050 	struct sahara_dev *dev = dev_ptr;
1051 	int ret;
1052 
1053 	if (!req->nbytes && !last)
1054 		return 0;
1055 
1056 	rctx->last = last;
1057 
1058 	if (!rctx->active) {
1059 		rctx->active = 1;
1060 		rctx->first = 1;
1061 	}
1062 
1063 	spin_lock_bh(&dev->queue_spinlock);
1064 	ret = crypto_enqueue_request(&dev->queue, &req->base);
1065 	spin_unlock_bh(&dev->queue_spinlock);
1066 
1067 	wake_up_process(dev->kthread);
1068 
1069 	return ret;
1070 }
1071 
sahara_sha_init(struct ahash_request * req)1072 static int sahara_sha_init(struct ahash_request *req)
1073 {
1074 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1075 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1076 
1077 	memset(rctx, 0, sizeof(*rctx));
1078 
1079 	switch (crypto_ahash_digestsize(tfm)) {
1080 	case SHA1_DIGEST_SIZE:
1081 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1082 		rctx->digest_size = SHA1_DIGEST_SIZE;
1083 		break;
1084 	case SHA256_DIGEST_SIZE:
1085 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1086 		rctx->digest_size = SHA256_DIGEST_SIZE;
1087 		break;
1088 	default:
1089 		return -EINVAL;
1090 	}
1091 
1092 	rctx->context_size = rctx->digest_size + 4;
1093 	rctx->active = 0;
1094 
1095 	return 0;
1096 }
1097 
sahara_sha_update(struct ahash_request * req)1098 static int sahara_sha_update(struct ahash_request *req)
1099 {
1100 	return sahara_sha_enqueue(req, 0);
1101 }
1102 
sahara_sha_final(struct ahash_request * req)1103 static int sahara_sha_final(struct ahash_request *req)
1104 {
1105 	req->nbytes = 0;
1106 	return sahara_sha_enqueue(req, 1);
1107 }
1108 
sahara_sha_finup(struct ahash_request * req)1109 static int sahara_sha_finup(struct ahash_request *req)
1110 {
1111 	return sahara_sha_enqueue(req, 1);
1112 }
1113 
sahara_sha_digest(struct ahash_request * req)1114 static int sahara_sha_digest(struct ahash_request *req)
1115 {
1116 	sahara_sha_init(req);
1117 
1118 	return sahara_sha_finup(req);
1119 }
1120 
sahara_sha_export(struct ahash_request * req,void * out)1121 static int sahara_sha_export(struct ahash_request *req, void *out)
1122 {
1123 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1124 
1125 	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1126 
1127 	return 0;
1128 }
1129 
sahara_sha_import(struct ahash_request * req,const void * in)1130 static int sahara_sha_import(struct ahash_request *req, const void *in)
1131 {
1132 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1133 
1134 	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1135 
1136 	return 0;
1137 }
1138 
sahara_sha_cra_init(struct crypto_tfm * tfm)1139 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1140 {
1141 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1142 				 sizeof(struct sahara_sha_reqctx));
1143 
1144 	return 0;
1145 }
1146 
1147 static struct skcipher_alg aes_algs[] = {
1148 {
1149 	.base.cra_name		= "ecb(aes)",
1150 	.base.cra_driver_name	= "sahara-ecb-aes",
1151 	.base.cra_priority	= 300,
1152 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1153 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1154 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1155 	.base.cra_alignmask	= 0x0,
1156 	.base.cra_module	= THIS_MODULE,
1157 
1158 	.init			= sahara_aes_init_tfm,
1159 	.exit			= sahara_aes_exit_tfm,
1160 	.min_keysize		= AES_MIN_KEY_SIZE ,
1161 	.max_keysize		= AES_MAX_KEY_SIZE,
1162 	.setkey			= sahara_aes_setkey,
1163 	.encrypt		= sahara_aes_ecb_encrypt,
1164 	.decrypt		= sahara_aes_ecb_decrypt,
1165 }, {
1166 	.base.cra_name		= "cbc(aes)",
1167 	.base.cra_driver_name	= "sahara-cbc-aes",
1168 	.base.cra_priority	= 300,
1169 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1170 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1171 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1172 	.base.cra_alignmask	= 0x0,
1173 	.base.cra_module	= THIS_MODULE,
1174 
1175 	.init			= sahara_aes_init_tfm,
1176 	.exit			= sahara_aes_exit_tfm,
1177 	.min_keysize		= AES_MIN_KEY_SIZE ,
1178 	.max_keysize		= AES_MAX_KEY_SIZE,
1179 	.ivsize			= AES_BLOCK_SIZE,
1180 	.setkey			= sahara_aes_setkey,
1181 	.encrypt		= sahara_aes_cbc_encrypt,
1182 	.decrypt		= sahara_aes_cbc_decrypt,
1183 }
1184 };
1185 
1186 static struct ahash_alg sha_v3_algs[] = {
1187 {
1188 	.init		= sahara_sha_init,
1189 	.update		= sahara_sha_update,
1190 	.final		= sahara_sha_final,
1191 	.finup		= sahara_sha_finup,
1192 	.digest		= sahara_sha_digest,
1193 	.export		= sahara_sha_export,
1194 	.import		= sahara_sha_import,
1195 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1196 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1197 	.halg.base	= {
1198 		.cra_name		= "sha1",
1199 		.cra_driver_name	= "sahara-sha1",
1200 		.cra_priority		= 300,
1201 		.cra_flags		= CRYPTO_ALG_ASYNC |
1202 						CRYPTO_ALG_NEED_FALLBACK,
1203 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1204 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1205 		.cra_alignmask		= 0,
1206 		.cra_module		= THIS_MODULE,
1207 		.cra_init		= sahara_sha_cra_init,
1208 	}
1209 },
1210 };
1211 
1212 static struct ahash_alg sha_v4_algs[] = {
1213 {
1214 	.init		= sahara_sha_init,
1215 	.update		= sahara_sha_update,
1216 	.final		= sahara_sha_final,
1217 	.finup		= sahara_sha_finup,
1218 	.digest		= sahara_sha_digest,
1219 	.export		= sahara_sha_export,
1220 	.import		= sahara_sha_import,
1221 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1222 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1223 	.halg.base	= {
1224 		.cra_name		= "sha256",
1225 		.cra_driver_name	= "sahara-sha256",
1226 		.cra_priority		= 300,
1227 		.cra_flags		= CRYPTO_ALG_ASYNC |
1228 						CRYPTO_ALG_NEED_FALLBACK,
1229 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1230 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1231 		.cra_alignmask		= 0,
1232 		.cra_module		= THIS_MODULE,
1233 		.cra_init		= sahara_sha_cra_init,
1234 	}
1235 },
1236 };
1237 
sahara_irq_handler(int irq,void * data)1238 static irqreturn_t sahara_irq_handler(int irq, void *data)
1239 {
1240 	struct sahara_dev *dev = data;
1241 	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1242 	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1243 
1244 	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1245 		     SAHARA_REG_CMD);
1246 
1247 	sahara_decode_status(dev, stat);
1248 
1249 	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1250 		return IRQ_NONE;
1251 	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1252 		dev->error = 0;
1253 	} else {
1254 		sahara_decode_error(dev, err);
1255 		dev->error = -EINVAL;
1256 	}
1257 
1258 	complete(&dev->dma_completion);
1259 
1260 	return IRQ_HANDLED;
1261 }
1262 
1263 
sahara_register_algs(struct sahara_dev * dev)1264 static int sahara_register_algs(struct sahara_dev *dev)
1265 {
1266 	int err;
1267 	unsigned int i, j, k, l;
1268 
1269 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1270 		err = crypto_register_skcipher(&aes_algs[i]);
1271 		if (err)
1272 			goto err_aes_algs;
1273 	}
1274 
1275 	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1276 		err = crypto_register_ahash(&sha_v3_algs[k]);
1277 		if (err)
1278 			goto err_sha_v3_algs;
1279 	}
1280 
1281 	if (dev->version > SAHARA_VERSION_3)
1282 		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1283 			err = crypto_register_ahash(&sha_v4_algs[l]);
1284 			if (err)
1285 				goto err_sha_v4_algs;
1286 		}
1287 
1288 	return 0;
1289 
1290 err_sha_v4_algs:
1291 	for (j = 0; j < l; j++)
1292 		crypto_unregister_ahash(&sha_v4_algs[j]);
1293 
1294 err_sha_v3_algs:
1295 	for (j = 0; j < k; j++)
1296 		crypto_unregister_ahash(&sha_v3_algs[j]);
1297 
1298 err_aes_algs:
1299 	for (j = 0; j < i; j++)
1300 		crypto_unregister_skcipher(&aes_algs[j]);
1301 
1302 	return err;
1303 }
1304 
sahara_unregister_algs(struct sahara_dev * dev)1305 static void sahara_unregister_algs(struct sahara_dev *dev)
1306 {
1307 	unsigned int i;
1308 
1309 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1310 		crypto_unregister_skcipher(&aes_algs[i]);
1311 
1312 	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1313 		crypto_unregister_ahash(&sha_v3_algs[i]);
1314 
1315 	if (dev->version > SAHARA_VERSION_3)
1316 		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1317 			crypto_unregister_ahash(&sha_v4_algs[i]);
1318 }
1319 
1320 static const struct of_device_id sahara_dt_ids[] = {
1321 	{ .compatible = "fsl,imx53-sahara" },
1322 	{ .compatible = "fsl,imx27-sahara" },
1323 	{ /* sentinel */ }
1324 };
1325 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1326 
sahara_probe(struct platform_device * pdev)1327 static int sahara_probe(struct platform_device *pdev)
1328 {
1329 	struct sahara_dev *dev;
1330 	u32 version;
1331 	int irq;
1332 	int err;
1333 	int i;
1334 
1335 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1336 	if (!dev)
1337 		return -ENOMEM;
1338 
1339 	dev->device = &pdev->dev;
1340 	platform_set_drvdata(pdev, dev);
1341 
1342 	/* Get the base address */
1343 	dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1344 	if (IS_ERR(dev->regs_base))
1345 		return PTR_ERR(dev->regs_base);
1346 
1347 	/* Get the IRQ */
1348 	irq = platform_get_irq(pdev,  0);
1349 	if (irq < 0)
1350 		return irq;
1351 
1352 	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1353 			       0, dev_name(&pdev->dev), dev);
1354 	if (err) {
1355 		dev_err(&pdev->dev, "failed to request irq\n");
1356 		return err;
1357 	}
1358 
1359 	/* clocks */
1360 	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1361 	if (IS_ERR(dev->clk_ipg)) {
1362 		dev_err(&pdev->dev, "Could not get ipg clock\n");
1363 		return PTR_ERR(dev->clk_ipg);
1364 	}
1365 
1366 	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1367 	if (IS_ERR(dev->clk_ahb)) {
1368 		dev_err(&pdev->dev, "Could not get ahb clock\n");
1369 		return PTR_ERR(dev->clk_ahb);
1370 	}
1371 
1372 	/* Allocate HW descriptors */
1373 	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1374 			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1375 			&dev->hw_phys_desc[0], GFP_KERNEL);
1376 	if (!dev->hw_desc[0]) {
1377 		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1378 		return -ENOMEM;
1379 	}
1380 	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1381 	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1382 				sizeof(struct sahara_hw_desc);
1383 
1384 	/* Allocate space for iv and key */
1385 	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1386 				&dev->key_phys_base, GFP_KERNEL);
1387 	if (!dev->key_base) {
1388 		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1389 		return -ENOMEM;
1390 	}
1391 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1392 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1393 
1394 	/* Allocate space for context: largest digest + message length field */
1395 	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1396 					SHA256_DIGEST_SIZE + 4,
1397 					&dev->context_phys_base, GFP_KERNEL);
1398 	if (!dev->context_base) {
1399 		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1400 		return -ENOMEM;
1401 	}
1402 
1403 	/* Allocate space for HW links */
1404 	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1405 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1406 			&dev->hw_phys_link[0], GFP_KERNEL);
1407 	if (!dev->hw_link[0]) {
1408 		dev_err(&pdev->dev, "Could not allocate hw links\n");
1409 		return -ENOMEM;
1410 	}
1411 	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1412 		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1413 					sizeof(struct sahara_hw_link);
1414 		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1415 	}
1416 
1417 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1418 
1419 	spin_lock_init(&dev->queue_spinlock);
1420 
1421 	dev_ptr = dev;
1422 
1423 	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1424 	if (IS_ERR(dev->kthread)) {
1425 		return PTR_ERR(dev->kthread);
1426 	}
1427 
1428 	init_completion(&dev->dma_completion);
1429 
1430 	err = clk_prepare_enable(dev->clk_ipg);
1431 	if (err)
1432 		return err;
1433 	err = clk_prepare_enable(dev->clk_ahb);
1434 	if (err)
1435 		goto clk_ipg_disable;
1436 
1437 	version = sahara_read(dev, SAHARA_REG_VERSION);
1438 	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1439 		if (version != SAHARA_VERSION_3)
1440 			err = -ENODEV;
1441 	} else if (of_device_is_compatible(pdev->dev.of_node,
1442 			"fsl,imx53-sahara")) {
1443 		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1444 			err = -ENODEV;
1445 		version = (version >> 8) & 0xff;
1446 	}
1447 	if (err == -ENODEV) {
1448 		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1449 				version);
1450 		goto err_algs;
1451 	}
1452 
1453 	dev->version = version;
1454 
1455 	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1456 		     SAHARA_REG_CMD);
1457 	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1458 			SAHARA_CONTROL_SET_MAXBURST(8) |
1459 			SAHARA_CONTROL_RNG_AUTORSD |
1460 			SAHARA_CONTROL_ENABLE_INT,
1461 			SAHARA_REG_CONTROL);
1462 
1463 	err = sahara_register_algs(dev);
1464 	if (err)
1465 		goto err_algs;
1466 
1467 	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1468 
1469 	return 0;
1470 
1471 err_algs:
1472 	kthread_stop(dev->kthread);
1473 	dev_ptr = NULL;
1474 	clk_disable_unprepare(dev->clk_ahb);
1475 clk_ipg_disable:
1476 	clk_disable_unprepare(dev->clk_ipg);
1477 
1478 	return err;
1479 }
1480 
sahara_remove(struct platform_device * pdev)1481 static int sahara_remove(struct platform_device *pdev)
1482 {
1483 	struct sahara_dev *dev = platform_get_drvdata(pdev);
1484 
1485 	kthread_stop(dev->kthread);
1486 
1487 	sahara_unregister_algs(dev);
1488 
1489 	clk_disable_unprepare(dev->clk_ipg);
1490 	clk_disable_unprepare(dev->clk_ahb);
1491 
1492 	dev_ptr = NULL;
1493 
1494 	return 0;
1495 }
1496 
1497 static struct platform_driver sahara_driver = {
1498 	.probe		= sahara_probe,
1499 	.remove		= sahara_remove,
1500 	.driver		= {
1501 		.name	= SAHARA_NAME,
1502 		.of_match_table = sahara_dt_ids,
1503 	},
1504 };
1505 
1506 module_platform_driver(sahara_driver);
1507 
1508 MODULE_LICENSE("GPL");
1509 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1510 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1511 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1512