xref: /openbmc/linux/drivers/crypto/mxs-dcp.c (revision b34e08d5)
1 /*
2  * Freescale i.MX23/i.MX28 Data Co-Processor driver
3  *
4  * Copyright (C) 2013 Marek Vasut <marex@denx.de>
5  *
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13 
14 #include <linux/crypto.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/stmp_device.h>
24 
25 #include <crypto/aes.h>
26 #include <crypto/sha.h>
27 #include <crypto/internal/hash.h>
28 
29 #define DCP_MAX_CHANS	4
30 #define DCP_BUF_SZ	PAGE_SIZE
31 
32 #define DCP_ALIGNMENT	64
33 
34 /* DCP DMA descriptor. */
35 struct dcp_dma_desc {
36 	uint32_t	next_cmd_addr;
37 	uint32_t	control0;
38 	uint32_t	control1;
39 	uint32_t	source;
40 	uint32_t	destination;
41 	uint32_t	size;
42 	uint32_t	payload;
43 	uint32_t	status;
44 };
45 
46 /* Coherent aligned block for bounce buffering. */
47 struct dcp_coherent_block {
48 	uint8_t			aes_in_buf[DCP_BUF_SZ];
49 	uint8_t			aes_out_buf[DCP_BUF_SZ];
50 	uint8_t			sha_in_buf[DCP_BUF_SZ];
51 
52 	uint8_t			aes_key[2 * AES_KEYSIZE_128];
53 
54 	struct dcp_dma_desc	desc[DCP_MAX_CHANS];
55 };
56 
57 struct dcp {
58 	struct device			*dev;
59 	void __iomem			*base;
60 
61 	uint32_t			caps;
62 
63 	struct dcp_coherent_block	*coh;
64 
65 	struct completion		completion[DCP_MAX_CHANS];
66 	struct mutex			mutex[DCP_MAX_CHANS];
67 	struct task_struct		*thread[DCP_MAX_CHANS];
68 	struct crypto_queue		queue[DCP_MAX_CHANS];
69 };
70 
71 enum dcp_chan {
72 	DCP_CHAN_HASH_SHA	= 0,
73 	DCP_CHAN_CRYPTO		= 2,
74 };
75 
76 struct dcp_async_ctx {
77 	/* Common context */
78 	enum dcp_chan	chan;
79 	uint32_t	fill;
80 
81 	/* SHA Hash-specific context */
82 	struct mutex			mutex;
83 	uint32_t			alg;
84 	unsigned int			hot:1;
85 
86 	/* Crypto-specific context */
87 	struct crypto_ablkcipher	*fallback;
88 	unsigned int			key_len;
89 	uint8_t				key[AES_KEYSIZE_128];
90 };
91 
92 struct dcp_aes_req_ctx {
93 	unsigned int	enc:1;
94 	unsigned int	ecb:1;
95 };
96 
97 struct dcp_sha_req_ctx {
98 	unsigned int	init:1;
99 	unsigned int	fini:1;
100 };
101 
102 /*
103  * There can even be only one instance of the MXS DCP due to the
104  * design of Linux Crypto API.
105  */
106 static struct dcp *global_sdcp;
107 static DEFINE_MUTEX(global_mutex);
108 
109 /* DCP register layout. */
110 #define MXS_DCP_CTRL				0x00
111 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES	(1 << 23)
112 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING	(1 << 22)
113 
114 #define MXS_DCP_STAT				0x10
115 #define MXS_DCP_STAT_CLR			0x18
116 #define MXS_DCP_STAT_IRQ_MASK			0xf
117 
118 #define MXS_DCP_CHANNELCTRL			0x20
119 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK	0xff
120 
121 #define MXS_DCP_CAPABILITY1			0x40
122 #define MXS_DCP_CAPABILITY1_SHA256		(4 << 16)
123 #define MXS_DCP_CAPABILITY1_SHA1		(1 << 16)
124 #define MXS_DCP_CAPABILITY1_AES128		(1 << 0)
125 
126 #define MXS_DCP_CONTEXT				0x50
127 
128 #define MXS_DCP_CH_N_CMDPTR(n)			(0x100 + ((n) * 0x40))
129 
130 #define MXS_DCP_CH_N_SEMA(n)			(0x110 + ((n) * 0x40))
131 
132 #define MXS_DCP_CH_N_STAT(n)			(0x120 + ((n) * 0x40))
133 #define MXS_DCP_CH_N_STAT_CLR(n)		(0x128 + ((n) * 0x40))
134 
135 /* DMA descriptor bits. */
136 #define MXS_DCP_CONTROL0_HASH_TERM		(1 << 13)
137 #define MXS_DCP_CONTROL0_HASH_INIT		(1 << 12)
138 #define MXS_DCP_CONTROL0_PAYLOAD_KEY		(1 << 11)
139 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT		(1 << 8)
140 #define MXS_DCP_CONTROL0_CIPHER_INIT		(1 << 9)
141 #define MXS_DCP_CONTROL0_ENABLE_HASH		(1 << 6)
142 #define MXS_DCP_CONTROL0_ENABLE_CIPHER		(1 << 5)
143 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE		(1 << 1)
144 #define MXS_DCP_CONTROL0_INTERRUPT		(1 << 0)
145 
146 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256	(2 << 16)
147 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1	(0 << 16)
148 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC	(1 << 4)
149 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB	(0 << 4)
150 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128	(0 << 0)
151 
152 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
153 {
154 	struct dcp *sdcp = global_sdcp;
155 	const int chan = actx->chan;
156 	uint32_t stat;
157 	int ret;
158 	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
159 
160 	dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
161 					      DMA_TO_DEVICE);
162 
163 	reinit_completion(&sdcp->completion[chan]);
164 
165 	/* Clear status register. */
166 	writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
167 
168 	/* Load the DMA descriptor. */
169 	writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
170 
171 	/* Increment the semaphore to start the DMA transfer. */
172 	writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
173 
174 	ret = wait_for_completion_timeout(&sdcp->completion[chan],
175 					  msecs_to_jiffies(1000));
176 	if (!ret) {
177 		dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
178 			chan, readl(sdcp->base + MXS_DCP_STAT));
179 		return -ETIMEDOUT;
180 	}
181 
182 	stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
183 	if (stat & 0xff) {
184 		dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
185 			chan, stat);
186 		return -EINVAL;
187 	}
188 
189 	dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
190 
191 	return 0;
192 }
193 
194 /*
195  * Encryption (AES128)
196  */
197 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
198 			   struct ablkcipher_request *req, int init)
199 {
200 	struct dcp *sdcp = global_sdcp;
201 	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
202 	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
203 	int ret;
204 
205 	dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
206 					     2 * AES_KEYSIZE_128,
207 					     DMA_TO_DEVICE);
208 	dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
209 					     DCP_BUF_SZ, DMA_TO_DEVICE);
210 	dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
211 					     DCP_BUF_SZ, DMA_FROM_DEVICE);
212 
213 	/* Fill in the DMA descriptor. */
214 	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
215 		    MXS_DCP_CONTROL0_INTERRUPT |
216 		    MXS_DCP_CONTROL0_ENABLE_CIPHER;
217 
218 	/* Payload contains the key. */
219 	desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
220 
221 	if (rctx->enc)
222 		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
223 	if (init)
224 		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
225 
226 	desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
227 
228 	if (rctx->ecb)
229 		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
230 	else
231 		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
232 
233 	desc->next_cmd_addr = 0;
234 	desc->source = src_phys;
235 	desc->destination = dst_phys;
236 	desc->size = actx->fill;
237 	desc->payload = key_phys;
238 	desc->status = 0;
239 
240 	ret = mxs_dcp_start_dma(actx);
241 
242 	dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
243 			 DMA_TO_DEVICE);
244 	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
245 	dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
246 
247 	return ret;
248 }
249 
250 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
251 {
252 	struct dcp *sdcp = global_sdcp;
253 
254 	struct ablkcipher_request *req = ablkcipher_request_cast(arq);
255 	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
256 	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
257 
258 	struct scatterlist *dst = req->dst;
259 	struct scatterlist *src = req->src;
260 	const int nents = sg_nents(req->src);
261 
262 	const int out_off = DCP_BUF_SZ;
263 	uint8_t *in_buf = sdcp->coh->aes_in_buf;
264 	uint8_t *out_buf = sdcp->coh->aes_out_buf;
265 
266 	uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
267 	uint32_t dst_off = 0;
268 
269 	uint8_t *key = sdcp->coh->aes_key;
270 
271 	int ret = 0;
272 	int split = 0;
273 	unsigned int i, len, clen, rem = 0;
274 	int init = 0;
275 
276 	actx->fill = 0;
277 
278 	/* Copy the key from the temporary location. */
279 	memcpy(key, actx->key, actx->key_len);
280 
281 	if (!rctx->ecb) {
282 		/* Copy the CBC IV just past the key. */
283 		memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
284 		/* CBC needs the INIT set. */
285 		init = 1;
286 	} else {
287 		memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
288 	}
289 
290 	for_each_sg(req->src, src, nents, i) {
291 		src_buf = sg_virt(src);
292 		len = sg_dma_len(src);
293 
294 		do {
295 			if (actx->fill + len > out_off)
296 				clen = out_off - actx->fill;
297 			else
298 				clen = len;
299 
300 			memcpy(in_buf + actx->fill, src_buf, clen);
301 			len -= clen;
302 			src_buf += clen;
303 			actx->fill += clen;
304 
305 			/*
306 			 * If we filled the buffer or this is the last SG,
307 			 * submit the buffer.
308 			 */
309 			if (actx->fill == out_off || sg_is_last(src)) {
310 				ret = mxs_dcp_run_aes(actx, req, init);
311 				if (ret)
312 					return ret;
313 				init = 0;
314 
315 				out_tmp = out_buf;
316 				while (dst && actx->fill) {
317 					if (!split) {
318 						dst_buf = sg_virt(dst);
319 						dst_off = 0;
320 					}
321 					rem = min(sg_dma_len(dst) - dst_off,
322 						  actx->fill);
323 
324 					memcpy(dst_buf + dst_off, out_tmp, rem);
325 					out_tmp += rem;
326 					dst_off += rem;
327 					actx->fill -= rem;
328 
329 					if (dst_off == sg_dma_len(dst)) {
330 						dst = sg_next(dst);
331 						split = 0;
332 					} else {
333 						split = 1;
334 					}
335 				}
336 			}
337 		} while (len);
338 	}
339 
340 	return ret;
341 }
342 
343 static int dcp_chan_thread_aes(void *data)
344 {
345 	struct dcp *sdcp = global_sdcp;
346 	const int chan = DCP_CHAN_CRYPTO;
347 
348 	struct crypto_async_request *backlog;
349 	struct crypto_async_request *arq;
350 
351 	int ret;
352 
353 	do {
354 		__set_current_state(TASK_INTERRUPTIBLE);
355 
356 		mutex_lock(&sdcp->mutex[chan]);
357 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
358 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
359 		mutex_unlock(&sdcp->mutex[chan]);
360 
361 		if (backlog)
362 			backlog->complete(backlog, -EINPROGRESS);
363 
364 		if (arq) {
365 			ret = mxs_dcp_aes_block_crypt(arq);
366 			arq->complete(arq, ret);
367 			continue;
368 		}
369 
370 		schedule();
371 	} while (!kthread_should_stop());
372 
373 	return 0;
374 }
375 
376 static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
377 {
378 	struct crypto_tfm *tfm =
379 		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
380 	struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(
381 		crypto_ablkcipher_reqtfm(req));
382 	int ret;
383 
384 	ablkcipher_request_set_tfm(req, ctx->fallback);
385 
386 	if (enc)
387 		ret = crypto_ablkcipher_encrypt(req);
388 	else
389 		ret = crypto_ablkcipher_decrypt(req);
390 
391 	ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
392 
393 	return ret;
394 }
395 
396 static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
397 {
398 	struct dcp *sdcp = global_sdcp;
399 	struct crypto_async_request *arq = &req->base;
400 	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
401 	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
402 	int ret;
403 
404 	if (unlikely(actx->key_len != AES_KEYSIZE_128))
405 		return mxs_dcp_block_fallback(req, enc);
406 
407 	rctx->enc = enc;
408 	rctx->ecb = ecb;
409 	actx->chan = DCP_CHAN_CRYPTO;
410 
411 	mutex_lock(&sdcp->mutex[actx->chan]);
412 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
413 	mutex_unlock(&sdcp->mutex[actx->chan]);
414 
415 	wake_up_process(sdcp->thread[actx->chan]);
416 
417 	return -EINPROGRESS;
418 }
419 
420 static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
421 {
422 	return mxs_dcp_aes_enqueue(req, 0, 1);
423 }
424 
425 static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
426 {
427 	return mxs_dcp_aes_enqueue(req, 1, 1);
428 }
429 
430 static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
431 {
432 	return mxs_dcp_aes_enqueue(req, 0, 0);
433 }
434 
435 static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
436 {
437 	return mxs_dcp_aes_enqueue(req, 1, 0);
438 }
439 
440 static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
441 			      unsigned int len)
442 {
443 	struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
444 	unsigned int ret;
445 
446 	/*
447 	 * AES 128 is supposed by the hardware, store key into temporary
448 	 * buffer and exit. We must use the temporary buffer here, since
449 	 * there can still be an operation in progress.
450 	 */
451 	actx->key_len = len;
452 	if (len == AES_KEYSIZE_128) {
453 		memcpy(actx->key, key, len);
454 		return 0;
455 	}
456 
457 	/* Check if the key size is supported by kernel at all. */
458 	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
459 		tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
460 		return -EINVAL;
461 	}
462 
463 	/*
464 	 * If the requested AES key size is not supported by the hardware,
465 	 * but is supported by in-kernel software implementation, we use
466 	 * software fallback.
467 	 */
468 	actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
469 	actx->fallback->base.crt_flags |=
470 		tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK;
471 
472 	ret = crypto_ablkcipher_setkey(actx->fallback, key, len);
473 	if (!ret)
474 		return 0;
475 
476 	tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
477 	tfm->base.crt_flags |=
478 		actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK;
479 
480 	return ret;
481 }
482 
483 static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
484 {
485 	const char *name = tfm->__crt_alg->cra_name;
486 	const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
487 	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
488 	struct crypto_ablkcipher *blk;
489 
490 	blk = crypto_alloc_ablkcipher(name, 0, flags);
491 	if (IS_ERR(blk))
492 		return PTR_ERR(blk);
493 
494 	actx->fallback = blk;
495 	tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
496 	return 0;
497 }
498 
499 static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
500 {
501 	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
502 
503 	crypto_free_ablkcipher(actx->fallback);
504 	actx->fallback = NULL;
505 }
506 
507 /*
508  * Hashing (SHA1/SHA256)
509  */
510 static int mxs_dcp_run_sha(struct ahash_request *req)
511 {
512 	struct dcp *sdcp = global_sdcp;
513 	int ret;
514 
515 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
516 	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
517 	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
518 	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
519 
520 	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
521 
522 	dma_addr_t digest_phys = 0;
523 	dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
524 					     DCP_BUF_SZ, DMA_TO_DEVICE);
525 
526 	/* Fill in the DMA descriptor. */
527 	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
528 		    MXS_DCP_CONTROL0_INTERRUPT |
529 		    MXS_DCP_CONTROL0_ENABLE_HASH;
530 	if (rctx->init)
531 		desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
532 
533 	desc->control1 = actx->alg;
534 	desc->next_cmd_addr = 0;
535 	desc->source = buf_phys;
536 	desc->destination = 0;
537 	desc->size = actx->fill;
538 	desc->payload = 0;
539 	desc->status = 0;
540 
541 	/* Set HASH_TERM bit for last transfer block. */
542 	if (rctx->fini) {
543 		digest_phys = dma_map_single(sdcp->dev, req->result,
544 					     halg->digestsize, DMA_FROM_DEVICE);
545 		desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
546 		desc->payload = digest_phys;
547 	}
548 
549 	ret = mxs_dcp_start_dma(actx);
550 
551 	if (rctx->fini)
552 		dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
553 				 DMA_FROM_DEVICE);
554 
555 	dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
556 
557 	return ret;
558 }
559 
560 static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
561 {
562 	struct dcp *sdcp = global_sdcp;
563 
564 	struct ahash_request *req = ahash_request_cast(arq);
565 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
566 	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
567 	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
568 	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
569 	const int nents = sg_nents(req->src);
570 
571 	uint8_t *in_buf = sdcp->coh->sha_in_buf;
572 
573 	uint8_t *src_buf;
574 
575 	struct scatterlist *src;
576 
577 	unsigned int i, len, clen;
578 	int ret;
579 
580 	int fin = rctx->fini;
581 	if (fin)
582 		rctx->fini = 0;
583 
584 	for_each_sg(req->src, src, nents, i) {
585 		src_buf = sg_virt(src);
586 		len = sg_dma_len(src);
587 
588 		do {
589 			if (actx->fill + len > DCP_BUF_SZ)
590 				clen = DCP_BUF_SZ - actx->fill;
591 			else
592 				clen = len;
593 
594 			memcpy(in_buf + actx->fill, src_buf, clen);
595 			len -= clen;
596 			src_buf += clen;
597 			actx->fill += clen;
598 
599 			/*
600 			 * If we filled the buffer and still have some
601 			 * more data, submit the buffer.
602 			 */
603 			if (len && actx->fill == DCP_BUF_SZ) {
604 				ret = mxs_dcp_run_sha(req);
605 				if (ret)
606 					return ret;
607 				actx->fill = 0;
608 				rctx->init = 0;
609 			}
610 		} while (len);
611 	}
612 
613 	if (fin) {
614 		rctx->fini = 1;
615 
616 		/* Submit whatever is left. */
617 		if (!req->result)
618 			return -EINVAL;
619 
620 		ret = mxs_dcp_run_sha(req);
621 		if (ret)
622 			return ret;
623 
624 		actx->fill = 0;
625 
626 		/* For some reason, the result is flipped. */
627 		for (i = 0; i < halg->digestsize / 2; i++) {
628 			swap(req->result[i],
629 			     req->result[halg->digestsize - i - 1]);
630 		}
631 	}
632 
633 	return 0;
634 }
635 
636 static int dcp_chan_thread_sha(void *data)
637 {
638 	struct dcp *sdcp = global_sdcp;
639 	const int chan = DCP_CHAN_HASH_SHA;
640 
641 	struct crypto_async_request *backlog;
642 	struct crypto_async_request *arq;
643 
644 	struct dcp_sha_req_ctx *rctx;
645 
646 	struct ahash_request *req;
647 	int ret, fini;
648 
649 	do {
650 		__set_current_state(TASK_INTERRUPTIBLE);
651 
652 		mutex_lock(&sdcp->mutex[chan]);
653 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
654 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
655 		mutex_unlock(&sdcp->mutex[chan]);
656 
657 		if (backlog)
658 			backlog->complete(backlog, -EINPROGRESS);
659 
660 		if (arq) {
661 			req = ahash_request_cast(arq);
662 			rctx = ahash_request_ctx(req);
663 
664 			ret = dcp_sha_req_to_buf(arq);
665 			fini = rctx->fini;
666 			arq->complete(arq, ret);
667 			if (!fini)
668 				continue;
669 		}
670 
671 		schedule();
672 	} while (!kthread_should_stop());
673 
674 	return 0;
675 }
676 
677 static int dcp_sha_init(struct ahash_request *req)
678 {
679 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
680 	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
681 
682 	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
683 
684 	/*
685 	 * Start hashing session. The code below only inits the
686 	 * hashing session context, nothing more.
687 	 */
688 	memset(actx, 0, sizeof(*actx));
689 
690 	if (strcmp(halg->base.cra_name, "sha1") == 0)
691 		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
692 	else
693 		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
694 
695 	actx->fill = 0;
696 	actx->hot = 0;
697 	actx->chan = DCP_CHAN_HASH_SHA;
698 
699 	mutex_init(&actx->mutex);
700 
701 	return 0;
702 }
703 
704 static int dcp_sha_update_fx(struct ahash_request *req, int fini)
705 {
706 	struct dcp *sdcp = global_sdcp;
707 
708 	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
709 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
710 	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
711 
712 	int ret;
713 
714 	/*
715 	 * Ignore requests that have no data in them and are not
716 	 * the trailing requests in the stream of requests.
717 	 */
718 	if (!req->nbytes && !fini)
719 		return 0;
720 
721 	mutex_lock(&actx->mutex);
722 
723 	rctx->fini = fini;
724 
725 	if (!actx->hot) {
726 		actx->hot = 1;
727 		rctx->init = 1;
728 	}
729 
730 	mutex_lock(&sdcp->mutex[actx->chan]);
731 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
732 	mutex_unlock(&sdcp->mutex[actx->chan]);
733 
734 	wake_up_process(sdcp->thread[actx->chan]);
735 	mutex_unlock(&actx->mutex);
736 
737 	return -EINPROGRESS;
738 }
739 
740 static int dcp_sha_update(struct ahash_request *req)
741 {
742 	return dcp_sha_update_fx(req, 0);
743 }
744 
745 static int dcp_sha_final(struct ahash_request *req)
746 {
747 	ahash_request_set_crypt(req, NULL, req->result, 0);
748 	req->nbytes = 0;
749 	return dcp_sha_update_fx(req, 1);
750 }
751 
752 static int dcp_sha_finup(struct ahash_request *req)
753 {
754 	return dcp_sha_update_fx(req, 1);
755 }
756 
757 static int dcp_sha_digest(struct ahash_request *req)
758 {
759 	int ret;
760 
761 	ret = dcp_sha_init(req);
762 	if (ret)
763 		return ret;
764 
765 	return dcp_sha_finup(req);
766 }
767 
768 static int dcp_sha_cra_init(struct crypto_tfm *tfm)
769 {
770 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
771 				 sizeof(struct dcp_sha_req_ctx));
772 	return 0;
773 }
774 
775 static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
776 {
777 }
778 
779 /* AES 128 ECB and AES 128 CBC */
780 static struct crypto_alg dcp_aes_algs[] = {
781 	{
782 		.cra_name		= "ecb(aes)",
783 		.cra_driver_name	= "ecb-aes-dcp",
784 		.cra_priority		= 400,
785 		.cra_alignmask		= 15,
786 		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
787 					  CRYPTO_ALG_ASYNC |
788 					  CRYPTO_ALG_NEED_FALLBACK,
789 		.cra_init		= mxs_dcp_aes_fallback_init,
790 		.cra_exit		= mxs_dcp_aes_fallback_exit,
791 		.cra_blocksize		= AES_BLOCK_SIZE,
792 		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
793 		.cra_type		= &crypto_ablkcipher_type,
794 		.cra_module		= THIS_MODULE,
795 		.cra_u	= {
796 			.ablkcipher = {
797 				.min_keysize	= AES_MIN_KEY_SIZE,
798 				.max_keysize	= AES_MAX_KEY_SIZE,
799 				.setkey		= mxs_dcp_aes_setkey,
800 				.encrypt	= mxs_dcp_aes_ecb_encrypt,
801 				.decrypt	= mxs_dcp_aes_ecb_decrypt
802 			},
803 		},
804 	}, {
805 		.cra_name		= "cbc(aes)",
806 		.cra_driver_name	= "cbc-aes-dcp",
807 		.cra_priority		= 400,
808 		.cra_alignmask		= 15,
809 		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
810 					  CRYPTO_ALG_ASYNC |
811 					  CRYPTO_ALG_NEED_FALLBACK,
812 		.cra_init		= mxs_dcp_aes_fallback_init,
813 		.cra_exit		= mxs_dcp_aes_fallback_exit,
814 		.cra_blocksize		= AES_BLOCK_SIZE,
815 		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
816 		.cra_type		= &crypto_ablkcipher_type,
817 		.cra_module		= THIS_MODULE,
818 		.cra_u = {
819 			.ablkcipher = {
820 				.min_keysize	= AES_MIN_KEY_SIZE,
821 				.max_keysize	= AES_MAX_KEY_SIZE,
822 				.setkey		= mxs_dcp_aes_setkey,
823 				.encrypt	= mxs_dcp_aes_cbc_encrypt,
824 				.decrypt	= mxs_dcp_aes_cbc_decrypt,
825 				.ivsize		= AES_BLOCK_SIZE,
826 			},
827 		},
828 	},
829 };
830 
831 /* SHA1 */
832 static struct ahash_alg dcp_sha1_alg = {
833 	.init	= dcp_sha_init,
834 	.update	= dcp_sha_update,
835 	.final	= dcp_sha_final,
836 	.finup	= dcp_sha_finup,
837 	.digest	= dcp_sha_digest,
838 	.halg	= {
839 		.digestsize	= SHA1_DIGEST_SIZE,
840 		.base		= {
841 			.cra_name		= "sha1",
842 			.cra_driver_name	= "sha1-dcp",
843 			.cra_priority		= 400,
844 			.cra_alignmask		= 63,
845 			.cra_flags		= CRYPTO_ALG_ASYNC,
846 			.cra_blocksize		= SHA1_BLOCK_SIZE,
847 			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
848 			.cra_module		= THIS_MODULE,
849 			.cra_init		= dcp_sha_cra_init,
850 			.cra_exit		= dcp_sha_cra_exit,
851 		},
852 	},
853 };
854 
855 /* SHA256 */
856 static struct ahash_alg dcp_sha256_alg = {
857 	.init	= dcp_sha_init,
858 	.update	= dcp_sha_update,
859 	.final	= dcp_sha_final,
860 	.finup	= dcp_sha_finup,
861 	.digest	= dcp_sha_digest,
862 	.halg	= {
863 		.digestsize	= SHA256_DIGEST_SIZE,
864 		.base		= {
865 			.cra_name		= "sha256",
866 			.cra_driver_name	= "sha256-dcp",
867 			.cra_priority		= 400,
868 			.cra_alignmask		= 63,
869 			.cra_flags		= CRYPTO_ALG_ASYNC,
870 			.cra_blocksize		= SHA256_BLOCK_SIZE,
871 			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
872 			.cra_module		= THIS_MODULE,
873 			.cra_init		= dcp_sha_cra_init,
874 			.cra_exit		= dcp_sha_cra_exit,
875 		},
876 	},
877 };
878 
879 static irqreturn_t mxs_dcp_irq(int irq, void *context)
880 {
881 	struct dcp *sdcp = context;
882 	uint32_t stat;
883 	int i;
884 
885 	stat = readl(sdcp->base + MXS_DCP_STAT);
886 	stat &= MXS_DCP_STAT_IRQ_MASK;
887 	if (!stat)
888 		return IRQ_NONE;
889 
890 	/* Clear the interrupts. */
891 	writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
892 
893 	/* Complete the DMA requests that finished. */
894 	for (i = 0; i < DCP_MAX_CHANS; i++)
895 		if (stat & (1 << i))
896 			complete(&sdcp->completion[i]);
897 
898 	return IRQ_HANDLED;
899 }
900 
901 static int mxs_dcp_probe(struct platform_device *pdev)
902 {
903 	struct device *dev = &pdev->dev;
904 	struct dcp *sdcp = NULL;
905 	int i, ret;
906 
907 	struct resource *iores;
908 	int dcp_vmi_irq, dcp_irq;
909 
910 	mutex_lock(&global_mutex);
911 	if (global_sdcp) {
912 		dev_err(dev, "Only one DCP instance allowed!\n");
913 		ret = -ENODEV;
914 		goto err_mutex;
915 	}
916 
917 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
918 	dcp_vmi_irq = platform_get_irq(pdev, 0);
919 	if (dcp_vmi_irq < 0) {
920 		ret = dcp_vmi_irq;
921 		goto err_mutex;
922 	}
923 
924 	dcp_irq = platform_get_irq(pdev, 1);
925 	if (dcp_irq < 0) {
926 		ret = dcp_irq;
927 		goto err_mutex;
928 	}
929 
930 	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
931 	if (!sdcp) {
932 		ret = -ENOMEM;
933 		goto err_mutex;
934 	}
935 
936 	sdcp->dev = dev;
937 	sdcp->base = devm_ioremap_resource(dev, iores);
938 	if (IS_ERR(sdcp->base)) {
939 		ret = PTR_ERR(sdcp->base);
940 		goto err_mutex;
941 	}
942 
943 	ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
944 			       "dcp-vmi-irq", sdcp);
945 	if (ret) {
946 		dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
947 		goto err_mutex;
948 	}
949 
950 	ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
951 			       "dcp-irq", sdcp);
952 	if (ret) {
953 		dev_err(dev, "Failed to claim DCP IRQ!\n");
954 		goto err_mutex;
955 	}
956 
957 	/* Allocate coherent helper block. */
958 	sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
959 				   GFP_KERNEL);
960 	if (!sdcp->coh) {
961 		ret = -ENOMEM;
962 		goto err_mutex;
963 	}
964 
965 	/* Re-align the structure so it fits the DCP constraints. */
966 	sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
967 
968 	/* Restart the DCP block. */
969 	ret = stmp_reset_block(sdcp->base);
970 	if (ret)
971 		goto err_mutex;
972 
973 	/* Initialize control register. */
974 	writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
975 	       MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
976 	       sdcp->base + MXS_DCP_CTRL);
977 
978 	/* Enable all DCP DMA channels. */
979 	writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
980 	       sdcp->base + MXS_DCP_CHANNELCTRL);
981 
982 	/*
983 	 * We do not enable context switching. Give the context buffer a
984 	 * pointer to an illegal address so if context switching is
985 	 * inadvertantly enabled, the DCP will return an error instead of
986 	 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
987 	 * address will do.
988 	 */
989 	writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
990 	for (i = 0; i < DCP_MAX_CHANS; i++)
991 		writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
992 	writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
993 
994 	global_sdcp = sdcp;
995 
996 	platform_set_drvdata(pdev, sdcp);
997 
998 	for (i = 0; i < DCP_MAX_CHANS; i++) {
999 		mutex_init(&sdcp->mutex[i]);
1000 		init_completion(&sdcp->completion[i]);
1001 		crypto_init_queue(&sdcp->queue[i], 50);
1002 	}
1003 
1004 	/* Create the SHA and AES handler threads. */
1005 	sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1006 						      NULL, "mxs_dcp_chan/sha");
1007 	if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1008 		dev_err(dev, "Error starting SHA thread!\n");
1009 		ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1010 		goto err_mutex;
1011 	}
1012 
1013 	sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1014 						    NULL, "mxs_dcp_chan/aes");
1015 	if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1016 		dev_err(dev, "Error starting SHA thread!\n");
1017 		ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1018 		goto err_destroy_sha_thread;
1019 	}
1020 
1021 	/* Register the various crypto algorithms. */
1022 	sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1023 
1024 	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1025 		ret = crypto_register_algs(dcp_aes_algs,
1026 					   ARRAY_SIZE(dcp_aes_algs));
1027 		if (ret) {
1028 			/* Failed to register algorithm. */
1029 			dev_err(dev, "Failed to register AES crypto!\n");
1030 			goto err_destroy_aes_thread;
1031 		}
1032 	}
1033 
1034 	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1035 		ret = crypto_register_ahash(&dcp_sha1_alg);
1036 		if (ret) {
1037 			dev_err(dev, "Failed to register %s hash!\n",
1038 				dcp_sha1_alg.halg.base.cra_name);
1039 			goto err_unregister_aes;
1040 		}
1041 	}
1042 
1043 	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1044 		ret = crypto_register_ahash(&dcp_sha256_alg);
1045 		if (ret) {
1046 			dev_err(dev, "Failed to register %s hash!\n",
1047 				dcp_sha256_alg.halg.base.cra_name);
1048 			goto err_unregister_sha1;
1049 		}
1050 	}
1051 
1052 	return 0;
1053 
1054 err_unregister_sha1:
1055 	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1056 		crypto_unregister_ahash(&dcp_sha1_alg);
1057 
1058 err_unregister_aes:
1059 	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1060 		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1061 
1062 err_destroy_aes_thread:
1063 	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1064 
1065 err_destroy_sha_thread:
1066 	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1067 
1068 err_mutex:
1069 	mutex_unlock(&global_mutex);
1070 	return ret;
1071 }
1072 
1073 static int mxs_dcp_remove(struct platform_device *pdev)
1074 {
1075 	struct dcp *sdcp = platform_get_drvdata(pdev);
1076 
1077 	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1078 		crypto_unregister_ahash(&dcp_sha256_alg);
1079 
1080 	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1081 		crypto_unregister_ahash(&dcp_sha1_alg);
1082 
1083 	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1084 		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1085 
1086 	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1087 	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1088 
1089 	platform_set_drvdata(pdev, NULL);
1090 
1091 	mutex_lock(&global_mutex);
1092 	global_sdcp = NULL;
1093 	mutex_unlock(&global_mutex);
1094 
1095 	return 0;
1096 }
1097 
1098 static const struct of_device_id mxs_dcp_dt_ids[] = {
1099 	{ .compatible = "fsl,imx23-dcp", .data = NULL, },
1100 	{ .compatible = "fsl,imx28-dcp", .data = NULL, },
1101 	{ /* sentinel */ }
1102 };
1103 
1104 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1105 
1106 static struct platform_driver mxs_dcp_driver = {
1107 	.probe	= mxs_dcp_probe,
1108 	.remove	= mxs_dcp_remove,
1109 	.driver	= {
1110 		.name		= "mxs-dcp",
1111 		.owner		= THIS_MODULE,
1112 		.of_match_table	= mxs_dcp_dt_ids,
1113 	},
1114 };
1115 
1116 module_platform_driver(mxs_dcp_driver);
1117 
1118 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1119 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1120 MODULE_LICENSE("GPL");
1121 MODULE_ALIAS("platform:mxs-dcp");
1122