xref: /openbmc/linux/drivers/crypto/mxs-dcp.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Freescale i.MX23/i.MX28 Data Co-Processor driver
4   *
5   * Copyright (C) 2013 Marek Vasut <marex@denx.de>
6   */
7  
8  #include <linux/dma-mapping.h>
9  #include <linux/interrupt.h>
10  #include <linux/io.h>
11  #include <linux/kernel.h>
12  #include <linux/kthread.h>
13  #include <linux/module.h>
14  #include <linux/of.h>
15  #include <linux/platform_device.h>
16  #include <linux/stmp_device.h>
17  #include <linux/clk.h>
18  
19  #include <crypto/aes.h>
20  #include <crypto/sha1.h>
21  #include <crypto/sha2.h>
22  #include <crypto/internal/hash.h>
23  #include <crypto/internal/skcipher.h>
24  #include <crypto/scatterwalk.h>
25  
26  #define DCP_MAX_CHANS	4
27  #define DCP_BUF_SZ	PAGE_SIZE
28  #define DCP_SHA_PAY_SZ  64
29  
30  #define DCP_ALIGNMENT	64
31  
32  /*
33   * Null hashes to align with hw behavior on imx6sl and ull
34   * these are flipped for consistency with hw output
35   */
36  static const uint8_t sha1_null_hash[] =
37  	"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
38  	"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
39  
40  static const uint8_t sha256_null_hash[] =
41  	"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
42  	"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
43  	"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
44  	"\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
45  
46  /* DCP DMA descriptor. */
47  struct dcp_dma_desc {
48  	uint32_t	next_cmd_addr;
49  	uint32_t	control0;
50  	uint32_t	control1;
51  	uint32_t	source;
52  	uint32_t	destination;
53  	uint32_t	size;
54  	uint32_t	payload;
55  	uint32_t	status;
56  };
57  
58  /* Coherent aligned block for bounce buffering. */
59  struct dcp_coherent_block {
60  	uint8_t			aes_in_buf[DCP_BUF_SZ];
61  	uint8_t			aes_out_buf[DCP_BUF_SZ];
62  	uint8_t			sha_in_buf[DCP_BUF_SZ];
63  	uint8_t			sha_out_buf[DCP_SHA_PAY_SZ];
64  
65  	uint8_t			aes_key[2 * AES_KEYSIZE_128];
66  
67  	struct dcp_dma_desc	desc[DCP_MAX_CHANS];
68  };
69  
70  struct dcp {
71  	struct device			*dev;
72  	void __iomem			*base;
73  
74  	uint32_t			caps;
75  
76  	struct dcp_coherent_block	*coh;
77  
78  	struct completion		completion[DCP_MAX_CHANS];
79  	spinlock_t			lock[DCP_MAX_CHANS];
80  	struct task_struct		*thread[DCP_MAX_CHANS];
81  	struct crypto_queue		queue[DCP_MAX_CHANS];
82  	struct clk			*dcp_clk;
83  };
84  
85  enum dcp_chan {
86  	DCP_CHAN_HASH_SHA	= 0,
87  	DCP_CHAN_CRYPTO		= 2,
88  };
89  
90  struct dcp_async_ctx {
91  	/* Common context */
92  	enum dcp_chan	chan;
93  	uint32_t	fill;
94  
95  	/* SHA Hash-specific context */
96  	struct mutex			mutex;
97  	uint32_t			alg;
98  	unsigned int			hot:1;
99  
100  	/* Crypto-specific context */
101  	struct crypto_skcipher		*fallback;
102  	unsigned int			key_len;
103  	uint8_t				key[AES_KEYSIZE_128];
104  };
105  
106  struct dcp_aes_req_ctx {
107  	unsigned int	enc:1;
108  	unsigned int	ecb:1;
109  	struct skcipher_request fallback_req;	// keep at the end
110  };
111  
112  struct dcp_sha_req_ctx {
113  	unsigned int	init:1;
114  	unsigned int	fini:1;
115  };
116  
117  struct dcp_export_state {
118  	struct dcp_sha_req_ctx req_ctx;
119  	struct dcp_async_ctx async_ctx;
120  };
121  
122  /*
123   * There can even be only one instance of the MXS DCP due to the
124   * design of Linux Crypto API.
125   */
126  static struct dcp *global_sdcp;
127  
128  /* DCP register layout. */
129  #define MXS_DCP_CTRL				0x00
130  #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES	(1 << 23)
131  #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING	(1 << 22)
132  
133  #define MXS_DCP_STAT				0x10
134  #define MXS_DCP_STAT_CLR			0x18
135  #define MXS_DCP_STAT_IRQ_MASK			0xf
136  
137  #define MXS_DCP_CHANNELCTRL			0x20
138  #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK	0xff
139  
140  #define MXS_DCP_CAPABILITY1			0x40
141  #define MXS_DCP_CAPABILITY1_SHA256		(4 << 16)
142  #define MXS_DCP_CAPABILITY1_SHA1		(1 << 16)
143  #define MXS_DCP_CAPABILITY1_AES128		(1 << 0)
144  
145  #define MXS_DCP_CONTEXT				0x50
146  
147  #define MXS_DCP_CH_N_CMDPTR(n)			(0x100 + ((n) * 0x40))
148  
149  #define MXS_DCP_CH_N_SEMA(n)			(0x110 + ((n) * 0x40))
150  
151  #define MXS_DCP_CH_N_STAT(n)			(0x120 + ((n) * 0x40))
152  #define MXS_DCP_CH_N_STAT_CLR(n)		(0x128 + ((n) * 0x40))
153  
154  /* DMA descriptor bits. */
155  #define MXS_DCP_CONTROL0_HASH_TERM		(1 << 13)
156  #define MXS_DCP_CONTROL0_HASH_INIT		(1 << 12)
157  #define MXS_DCP_CONTROL0_PAYLOAD_KEY		(1 << 11)
158  #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT		(1 << 8)
159  #define MXS_DCP_CONTROL0_CIPHER_INIT		(1 << 9)
160  #define MXS_DCP_CONTROL0_ENABLE_HASH		(1 << 6)
161  #define MXS_DCP_CONTROL0_ENABLE_CIPHER		(1 << 5)
162  #define MXS_DCP_CONTROL0_DECR_SEMAPHORE		(1 << 1)
163  #define MXS_DCP_CONTROL0_INTERRUPT		(1 << 0)
164  
165  #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256	(2 << 16)
166  #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1	(0 << 16)
167  #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC	(1 << 4)
168  #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB	(0 << 4)
169  #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128	(0 << 0)
170  
mxs_dcp_start_dma(struct dcp_async_ctx * actx)171  static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
172  {
173  	int dma_err;
174  	struct dcp *sdcp = global_sdcp;
175  	const int chan = actx->chan;
176  	uint32_t stat;
177  	unsigned long ret;
178  	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
179  	dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
180  					      DMA_TO_DEVICE);
181  
182  	dma_err = dma_mapping_error(sdcp->dev, desc_phys);
183  	if (dma_err)
184  		return dma_err;
185  
186  	reinit_completion(&sdcp->completion[chan]);
187  
188  	/* Clear status register. */
189  	writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
190  
191  	/* Load the DMA descriptor. */
192  	writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
193  
194  	/* Increment the semaphore to start the DMA transfer. */
195  	writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
196  
197  	ret = wait_for_completion_timeout(&sdcp->completion[chan],
198  					  msecs_to_jiffies(1000));
199  	if (!ret) {
200  		dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
201  			chan, readl(sdcp->base + MXS_DCP_STAT));
202  		return -ETIMEDOUT;
203  	}
204  
205  	stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
206  	if (stat & 0xff) {
207  		dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
208  			chan, stat);
209  		return -EINVAL;
210  	}
211  
212  	dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
213  
214  	return 0;
215  }
216  
217  /*
218   * Encryption (AES128)
219   */
mxs_dcp_run_aes(struct dcp_async_ctx * actx,struct skcipher_request * req,int init)220  static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
221  			   struct skcipher_request *req, int init)
222  {
223  	dma_addr_t key_phys, src_phys, dst_phys;
224  	struct dcp *sdcp = global_sdcp;
225  	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
226  	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
227  	int ret;
228  
229  	key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
230  				  2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
231  	ret = dma_mapping_error(sdcp->dev, key_phys);
232  	if (ret)
233  		return ret;
234  
235  	src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
236  				  DCP_BUF_SZ, DMA_TO_DEVICE);
237  	ret = dma_mapping_error(sdcp->dev, src_phys);
238  	if (ret)
239  		goto err_src;
240  
241  	dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
242  				  DCP_BUF_SZ, DMA_FROM_DEVICE);
243  	ret = dma_mapping_error(sdcp->dev, dst_phys);
244  	if (ret)
245  		goto err_dst;
246  
247  	if (actx->fill % AES_BLOCK_SIZE) {
248  		dev_err(sdcp->dev, "Invalid block size!\n");
249  		ret = -EINVAL;
250  		goto aes_done_run;
251  	}
252  
253  	/* Fill in the DMA descriptor. */
254  	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
255  		    MXS_DCP_CONTROL0_INTERRUPT |
256  		    MXS_DCP_CONTROL0_ENABLE_CIPHER;
257  
258  	/* Payload contains the key. */
259  	desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
260  
261  	if (rctx->enc)
262  		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
263  	if (init)
264  		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
265  
266  	desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
267  
268  	if (rctx->ecb)
269  		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
270  	else
271  		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
272  
273  	desc->next_cmd_addr = 0;
274  	desc->source = src_phys;
275  	desc->destination = dst_phys;
276  	desc->size = actx->fill;
277  	desc->payload = key_phys;
278  	desc->status = 0;
279  
280  	ret = mxs_dcp_start_dma(actx);
281  
282  aes_done_run:
283  	dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
284  err_dst:
285  	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
286  err_src:
287  	dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
288  			 DMA_TO_DEVICE);
289  
290  	return ret;
291  }
292  
mxs_dcp_aes_block_crypt(struct crypto_async_request * arq)293  static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
294  {
295  	struct dcp *sdcp = global_sdcp;
296  
297  	struct skcipher_request *req = skcipher_request_cast(arq);
298  	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
299  	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
300  
301  	struct scatterlist *dst = req->dst;
302  	struct scatterlist *src = req->src;
303  	int dst_nents = sg_nents(dst);
304  
305  	const int out_off = DCP_BUF_SZ;
306  	uint8_t *in_buf = sdcp->coh->aes_in_buf;
307  	uint8_t *out_buf = sdcp->coh->aes_out_buf;
308  
309  	uint32_t dst_off = 0;
310  	uint8_t *src_buf = NULL;
311  	uint32_t last_out_len = 0;
312  
313  	uint8_t *key = sdcp->coh->aes_key;
314  
315  	int ret = 0;
316  	unsigned int i, len, clen, tlen = 0;
317  	int init = 0;
318  	bool limit_hit = false;
319  
320  	actx->fill = 0;
321  
322  	/* Copy the key from the temporary location. */
323  	memcpy(key, actx->key, actx->key_len);
324  
325  	if (!rctx->ecb) {
326  		/* Copy the CBC IV just past the key. */
327  		memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
328  		/* CBC needs the INIT set. */
329  		init = 1;
330  	} else {
331  		memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
332  	}
333  
334  	for_each_sg(req->src, src, sg_nents(req->src), i) {
335  		src_buf = sg_virt(src);
336  		len = sg_dma_len(src);
337  		tlen += len;
338  		limit_hit = tlen > req->cryptlen;
339  
340  		if (limit_hit)
341  			len = req->cryptlen - (tlen - len);
342  
343  		do {
344  			if (actx->fill + len > out_off)
345  				clen = out_off - actx->fill;
346  			else
347  				clen = len;
348  
349  			memcpy(in_buf + actx->fill, src_buf, clen);
350  			len -= clen;
351  			src_buf += clen;
352  			actx->fill += clen;
353  
354  			/*
355  			 * If we filled the buffer or this is the last SG,
356  			 * submit the buffer.
357  			 */
358  			if (actx->fill == out_off || sg_is_last(src) ||
359  			    limit_hit) {
360  				ret = mxs_dcp_run_aes(actx, req, init);
361  				if (ret)
362  					return ret;
363  				init = 0;
364  
365  				sg_pcopy_from_buffer(dst, dst_nents, out_buf,
366  						     actx->fill, dst_off);
367  				dst_off += actx->fill;
368  				last_out_len = actx->fill;
369  				actx->fill = 0;
370  			}
371  		} while (len);
372  
373  		if (limit_hit)
374  			break;
375  	}
376  
377  	/* Copy the IV for CBC for chaining */
378  	if (!rctx->ecb) {
379  		if (rctx->enc)
380  			memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
381  				AES_BLOCK_SIZE);
382  		else
383  			memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
384  				AES_BLOCK_SIZE);
385  	}
386  
387  	return ret;
388  }
389  
dcp_chan_thread_aes(void * data)390  static int dcp_chan_thread_aes(void *data)
391  {
392  	struct dcp *sdcp = global_sdcp;
393  	const int chan = DCP_CHAN_CRYPTO;
394  
395  	struct crypto_async_request *backlog;
396  	struct crypto_async_request *arq;
397  
398  	int ret;
399  
400  	while (!kthread_should_stop()) {
401  		set_current_state(TASK_INTERRUPTIBLE);
402  
403  		spin_lock(&sdcp->lock[chan]);
404  		backlog = crypto_get_backlog(&sdcp->queue[chan]);
405  		arq = crypto_dequeue_request(&sdcp->queue[chan]);
406  		spin_unlock(&sdcp->lock[chan]);
407  
408  		if (!backlog && !arq) {
409  			schedule();
410  			continue;
411  		}
412  
413  		set_current_state(TASK_RUNNING);
414  
415  		if (backlog)
416  			crypto_request_complete(backlog, -EINPROGRESS);
417  
418  		if (arq) {
419  			ret = mxs_dcp_aes_block_crypt(arq);
420  			crypto_request_complete(arq, ret);
421  		}
422  	}
423  
424  	return 0;
425  }
426  
mxs_dcp_block_fallback(struct skcipher_request * req,int enc)427  static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
428  {
429  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
430  	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
431  	struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
432  	int ret;
433  
434  	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
435  	skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
436  				      req->base.complete, req->base.data);
437  	skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
438  				   req->cryptlen, req->iv);
439  
440  	if (enc)
441  		ret = crypto_skcipher_encrypt(&rctx->fallback_req);
442  	else
443  		ret = crypto_skcipher_decrypt(&rctx->fallback_req);
444  
445  	return ret;
446  }
447  
mxs_dcp_aes_enqueue(struct skcipher_request * req,int enc,int ecb)448  static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
449  {
450  	struct dcp *sdcp = global_sdcp;
451  	struct crypto_async_request *arq = &req->base;
452  	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
453  	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
454  	int ret;
455  
456  	if (unlikely(actx->key_len != AES_KEYSIZE_128))
457  		return mxs_dcp_block_fallback(req, enc);
458  
459  	rctx->enc = enc;
460  	rctx->ecb = ecb;
461  	actx->chan = DCP_CHAN_CRYPTO;
462  
463  	spin_lock(&sdcp->lock[actx->chan]);
464  	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
465  	spin_unlock(&sdcp->lock[actx->chan]);
466  
467  	wake_up_process(sdcp->thread[actx->chan]);
468  
469  	return ret;
470  }
471  
mxs_dcp_aes_ecb_decrypt(struct skcipher_request * req)472  static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
473  {
474  	return mxs_dcp_aes_enqueue(req, 0, 1);
475  }
476  
mxs_dcp_aes_ecb_encrypt(struct skcipher_request * req)477  static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
478  {
479  	return mxs_dcp_aes_enqueue(req, 1, 1);
480  }
481  
mxs_dcp_aes_cbc_decrypt(struct skcipher_request * req)482  static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
483  {
484  	return mxs_dcp_aes_enqueue(req, 0, 0);
485  }
486  
mxs_dcp_aes_cbc_encrypt(struct skcipher_request * req)487  static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
488  {
489  	return mxs_dcp_aes_enqueue(req, 1, 0);
490  }
491  
mxs_dcp_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)492  static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
493  			      unsigned int len)
494  {
495  	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
496  
497  	/*
498  	 * AES 128 is supposed by the hardware, store key into temporary
499  	 * buffer and exit. We must use the temporary buffer here, since
500  	 * there can still be an operation in progress.
501  	 */
502  	actx->key_len = len;
503  	if (len == AES_KEYSIZE_128) {
504  		memcpy(actx->key, key, len);
505  		return 0;
506  	}
507  
508  	/*
509  	 * If the requested AES key size is not supported by the hardware,
510  	 * but is supported by in-kernel software implementation, we use
511  	 * software fallback.
512  	 */
513  	crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
514  	crypto_skcipher_set_flags(actx->fallback,
515  				  tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
516  	return crypto_skcipher_setkey(actx->fallback, key, len);
517  }
518  
mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher * tfm)519  static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
520  {
521  	const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
522  	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
523  	struct crypto_skcipher *blk;
524  
525  	blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
526  	if (IS_ERR(blk))
527  		return PTR_ERR(blk);
528  
529  	actx->fallback = blk;
530  	crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
531  					 crypto_skcipher_reqsize(blk));
532  	return 0;
533  }
534  
mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher * tfm)535  static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
536  {
537  	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
538  
539  	crypto_free_skcipher(actx->fallback);
540  }
541  
542  /*
543   * Hashing (SHA1/SHA256)
544   */
mxs_dcp_run_sha(struct ahash_request * req)545  static int mxs_dcp_run_sha(struct ahash_request *req)
546  {
547  	struct dcp *sdcp = global_sdcp;
548  	int ret;
549  
550  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
551  	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
552  	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
553  	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
554  
555  	dma_addr_t digest_phys = 0;
556  	dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
557  					     DCP_BUF_SZ, DMA_TO_DEVICE);
558  
559  	ret = dma_mapping_error(sdcp->dev, buf_phys);
560  	if (ret)
561  		return ret;
562  
563  	/* Fill in the DMA descriptor. */
564  	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
565  		    MXS_DCP_CONTROL0_INTERRUPT |
566  		    MXS_DCP_CONTROL0_ENABLE_HASH;
567  	if (rctx->init)
568  		desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
569  
570  	desc->control1 = actx->alg;
571  	desc->next_cmd_addr = 0;
572  	desc->source = buf_phys;
573  	desc->destination = 0;
574  	desc->size = actx->fill;
575  	desc->payload = 0;
576  	desc->status = 0;
577  
578  	/*
579  	 * Align driver with hw behavior when generating null hashes
580  	 */
581  	if (rctx->init && rctx->fini && desc->size == 0) {
582  		struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
583  		const uint8_t *sha_buf =
584  			(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
585  			sha1_null_hash : sha256_null_hash;
586  		memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
587  		ret = 0;
588  		goto done_run;
589  	}
590  
591  	/* Set HASH_TERM bit for last transfer block. */
592  	if (rctx->fini) {
593  		digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
594  					     DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
595  		ret = dma_mapping_error(sdcp->dev, digest_phys);
596  		if (ret)
597  			goto done_run;
598  
599  		desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
600  		desc->payload = digest_phys;
601  	}
602  
603  	ret = mxs_dcp_start_dma(actx);
604  
605  	if (rctx->fini)
606  		dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
607  				 DMA_FROM_DEVICE);
608  
609  done_run:
610  	dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
611  
612  	return ret;
613  }
614  
dcp_sha_req_to_buf(struct crypto_async_request * arq)615  static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
616  {
617  	struct dcp *sdcp = global_sdcp;
618  
619  	struct ahash_request *req = ahash_request_cast(arq);
620  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
621  	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
622  	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
623  	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
624  
625  	uint8_t *in_buf = sdcp->coh->sha_in_buf;
626  	uint8_t *out_buf = sdcp->coh->sha_out_buf;
627  
628  	struct scatterlist *src;
629  
630  	unsigned int i, len, clen, oft = 0;
631  	int ret;
632  
633  	int fin = rctx->fini;
634  	if (fin)
635  		rctx->fini = 0;
636  
637  	src = req->src;
638  	len = req->nbytes;
639  
640  	while (len) {
641  		if (actx->fill + len > DCP_BUF_SZ)
642  			clen = DCP_BUF_SZ - actx->fill;
643  		else
644  			clen = len;
645  
646  		scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
647  					 0);
648  
649  		len -= clen;
650  		oft += clen;
651  		actx->fill += clen;
652  
653  		/*
654  		 * If we filled the buffer and still have some
655  		 * more data, submit the buffer.
656  		 */
657  		if (len && actx->fill == DCP_BUF_SZ) {
658  			ret = mxs_dcp_run_sha(req);
659  			if (ret)
660  				return ret;
661  			actx->fill = 0;
662  			rctx->init = 0;
663  		}
664  	}
665  
666  	if (fin) {
667  		rctx->fini = 1;
668  
669  		/* Submit whatever is left. */
670  		if (!req->result)
671  			return -EINVAL;
672  
673  		ret = mxs_dcp_run_sha(req);
674  		if (ret)
675  			return ret;
676  
677  		actx->fill = 0;
678  
679  		/* For some reason the result is flipped */
680  		for (i = 0; i < halg->digestsize; i++)
681  			req->result[i] = out_buf[halg->digestsize - i - 1];
682  	}
683  
684  	return 0;
685  }
686  
dcp_chan_thread_sha(void * data)687  static int dcp_chan_thread_sha(void *data)
688  {
689  	struct dcp *sdcp = global_sdcp;
690  	const int chan = DCP_CHAN_HASH_SHA;
691  
692  	struct crypto_async_request *backlog;
693  	struct crypto_async_request *arq;
694  	int ret;
695  
696  	while (!kthread_should_stop()) {
697  		set_current_state(TASK_INTERRUPTIBLE);
698  
699  		spin_lock(&sdcp->lock[chan]);
700  		backlog = crypto_get_backlog(&sdcp->queue[chan]);
701  		arq = crypto_dequeue_request(&sdcp->queue[chan]);
702  		spin_unlock(&sdcp->lock[chan]);
703  
704  		if (!backlog && !arq) {
705  			schedule();
706  			continue;
707  		}
708  
709  		set_current_state(TASK_RUNNING);
710  
711  		if (backlog)
712  			crypto_request_complete(backlog, -EINPROGRESS);
713  
714  		if (arq) {
715  			ret = dcp_sha_req_to_buf(arq);
716  			crypto_request_complete(arq, ret);
717  		}
718  	}
719  
720  	return 0;
721  }
722  
dcp_sha_init(struct ahash_request * req)723  static int dcp_sha_init(struct ahash_request *req)
724  {
725  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
726  	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
727  
728  	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
729  
730  	/*
731  	 * Start hashing session. The code below only inits the
732  	 * hashing session context, nothing more.
733  	 */
734  	memset(actx, 0, sizeof(*actx));
735  
736  	if (strcmp(halg->base.cra_name, "sha1") == 0)
737  		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
738  	else
739  		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
740  
741  	actx->fill = 0;
742  	actx->hot = 0;
743  	actx->chan = DCP_CHAN_HASH_SHA;
744  
745  	mutex_init(&actx->mutex);
746  
747  	return 0;
748  }
749  
dcp_sha_update_fx(struct ahash_request * req,int fini)750  static int dcp_sha_update_fx(struct ahash_request *req, int fini)
751  {
752  	struct dcp *sdcp = global_sdcp;
753  
754  	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
755  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
756  	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
757  
758  	int ret;
759  
760  	/*
761  	 * Ignore requests that have no data in them and are not
762  	 * the trailing requests in the stream of requests.
763  	 */
764  	if (!req->nbytes && !fini)
765  		return 0;
766  
767  	mutex_lock(&actx->mutex);
768  
769  	rctx->fini = fini;
770  
771  	if (!actx->hot) {
772  		actx->hot = 1;
773  		rctx->init = 1;
774  	}
775  
776  	spin_lock(&sdcp->lock[actx->chan]);
777  	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
778  	spin_unlock(&sdcp->lock[actx->chan]);
779  
780  	wake_up_process(sdcp->thread[actx->chan]);
781  	mutex_unlock(&actx->mutex);
782  
783  	return ret;
784  }
785  
dcp_sha_update(struct ahash_request * req)786  static int dcp_sha_update(struct ahash_request *req)
787  {
788  	return dcp_sha_update_fx(req, 0);
789  }
790  
dcp_sha_final(struct ahash_request * req)791  static int dcp_sha_final(struct ahash_request *req)
792  {
793  	ahash_request_set_crypt(req, NULL, req->result, 0);
794  	req->nbytes = 0;
795  	return dcp_sha_update_fx(req, 1);
796  }
797  
dcp_sha_finup(struct ahash_request * req)798  static int dcp_sha_finup(struct ahash_request *req)
799  {
800  	return dcp_sha_update_fx(req, 1);
801  }
802  
dcp_sha_digest(struct ahash_request * req)803  static int dcp_sha_digest(struct ahash_request *req)
804  {
805  	int ret;
806  
807  	ret = dcp_sha_init(req);
808  	if (ret)
809  		return ret;
810  
811  	return dcp_sha_finup(req);
812  }
813  
dcp_sha_import(struct ahash_request * req,const void * in)814  static int dcp_sha_import(struct ahash_request *req, const void *in)
815  {
816  	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
817  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
818  	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
819  	const struct dcp_export_state *export = in;
820  
821  	memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
822  	memset(actx, 0, sizeof(struct dcp_async_ctx));
823  	memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
824  	memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
825  
826  	return 0;
827  }
828  
dcp_sha_export(struct ahash_request * req,void * out)829  static int dcp_sha_export(struct ahash_request *req, void *out)
830  {
831  	struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
832  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
833  	struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
834  	struct dcp_export_state *export = out;
835  
836  	memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
837  	memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
838  
839  	return 0;
840  }
841  
dcp_sha_cra_init(struct crypto_tfm * tfm)842  static int dcp_sha_cra_init(struct crypto_tfm *tfm)
843  {
844  	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
845  				 sizeof(struct dcp_sha_req_ctx));
846  	return 0;
847  }
848  
dcp_sha_cra_exit(struct crypto_tfm * tfm)849  static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
850  {
851  }
852  
853  /* AES 128 ECB and AES 128 CBC */
854  static struct skcipher_alg dcp_aes_algs[] = {
855  	{
856  		.base.cra_name		= "ecb(aes)",
857  		.base.cra_driver_name	= "ecb-aes-dcp",
858  		.base.cra_priority	= 400,
859  		.base.cra_alignmask	= 15,
860  		.base.cra_flags		= CRYPTO_ALG_ASYNC |
861  					  CRYPTO_ALG_NEED_FALLBACK,
862  		.base.cra_blocksize	= AES_BLOCK_SIZE,
863  		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
864  		.base.cra_module	= THIS_MODULE,
865  
866  		.min_keysize		= AES_MIN_KEY_SIZE,
867  		.max_keysize		= AES_MAX_KEY_SIZE,
868  		.setkey			= mxs_dcp_aes_setkey,
869  		.encrypt		= mxs_dcp_aes_ecb_encrypt,
870  		.decrypt		= mxs_dcp_aes_ecb_decrypt,
871  		.init			= mxs_dcp_aes_fallback_init_tfm,
872  		.exit			= mxs_dcp_aes_fallback_exit_tfm,
873  	}, {
874  		.base.cra_name		= "cbc(aes)",
875  		.base.cra_driver_name	= "cbc-aes-dcp",
876  		.base.cra_priority	= 400,
877  		.base.cra_alignmask	= 15,
878  		.base.cra_flags		= CRYPTO_ALG_ASYNC |
879  					  CRYPTO_ALG_NEED_FALLBACK,
880  		.base.cra_blocksize	= AES_BLOCK_SIZE,
881  		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
882  		.base.cra_module	= THIS_MODULE,
883  
884  		.min_keysize		= AES_MIN_KEY_SIZE,
885  		.max_keysize		= AES_MAX_KEY_SIZE,
886  		.setkey			= mxs_dcp_aes_setkey,
887  		.encrypt		= mxs_dcp_aes_cbc_encrypt,
888  		.decrypt		= mxs_dcp_aes_cbc_decrypt,
889  		.ivsize			= AES_BLOCK_SIZE,
890  		.init			= mxs_dcp_aes_fallback_init_tfm,
891  		.exit			= mxs_dcp_aes_fallback_exit_tfm,
892  	},
893  };
894  
895  /* SHA1 */
896  static struct ahash_alg dcp_sha1_alg = {
897  	.init	= dcp_sha_init,
898  	.update	= dcp_sha_update,
899  	.final	= dcp_sha_final,
900  	.finup	= dcp_sha_finup,
901  	.digest	= dcp_sha_digest,
902  	.import = dcp_sha_import,
903  	.export = dcp_sha_export,
904  	.halg	= {
905  		.digestsize	= SHA1_DIGEST_SIZE,
906  		.statesize	= sizeof(struct dcp_export_state),
907  		.base		= {
908  			.cra_name		= "sha1",
909  			.cra_driver_name	= "sha1-dcp",
910  			.cra_priority		= 400,
911  			.cra_alignmask		= 63,
912  			.cra_flags		= CRYPTO_ALG_ASYNC,
913  			.cra_blocksize		= SHA1_BLOCK_SIZE,
914  			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
915  			.cra_module		= THIS_MODULE,
916  			.cra_init		= dcp_sha_cra_init,
917  			.cra_exit		= dcp_sha_cra_exit,
918  		},
919  	},
920  };
921  
922  /* SHA256 */
923  static struct ahash_alg dcp_sha256_alg = {
924  	.init	= dcp_sha_init,
925  	.update	= dcp_sha_update,
926  	.final	= dcp_sha_final,
927  	.finup	= dcp_sha_finup,
928  	.digest	= dcp_sha_digest,
929  	.import = dcp_sha_import,
930  	.export = dcp_sha_export,
931  	.halg	= {
932  		.digestsize	= SHA256_DIGEST_SIZE,
933  		.statesize	= sizeof(struct dcp_export_state),
934  		.base		= {
935  			.cra_name		= "sha256",
936  			.cra_driver_name	= "sha256-dcp",
937  			.cra_priority		= 400,
938  			.cra_alignmask		= 63,
939  			.cra_flags		= CRYPTO_ALG_ASYNC,
940  			.cra_blocksize		= SHA256_BLOCK_SIZE,
941  			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
942  			.cra_module		= THIS_MODULE,
943  			.cra_init		= dcp_sha_cra_init,
944  			.cra_exit		= dcp_sha_cra_exit,
945  		},
946  	},
947  };
948  
mxs_dcp_irq(int irq,void * context)949  static irqreturn_t mxs_dcp_irq(int irq, void *context)
950  {
951  	struct dcp *sdcp = context;
952  	uint32_t stat;
953  	int i;
954  
955  	stat = readl(sdcp->base + MXS_DCP_STAT);
956  	stat &= MXS_DCP_STAT_IRQ_MASK;
957  	if (!stat)
958  		return IRQ_NONE;
959  
960  	/* Clear the interrupts. */
961  	writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
962  
963  	/* Complete the DMA requests that finished. */
964  	for (i = 0; i < DCP_MAX_CHANS; i++)
965  		if (stat & (1 << i))
966  			complete(&sdcp->completion[i]);
967  
968  	return IRQ_HANDLED;
969  }
970  
mxs_dcp_probe(struct platform_device * pdev)971  static int mxs_dcp_probe(struct platform_device *pdev)
972  {
973  	struct device *dev = &pdev->dev;
974  	struct dcp *sdcp = NULL;
975  	int i, ret;
976  	int dcp_vmi_irq, dcp_irq;
977  
978  	if (global_sdcp) {
979  		dev_err(dev, "Only one DCP instance allowed!\n");
980  		return -ENODEV;
981  	}
982  
983  	dcp_vmi_irq = platform_get_irq(pdev, 0);
984  	if (dcp_vmi_irq < 0)
985  		return dcp_vmi_irq;
986  
987  	dcp_irq = platform_get_irq(pdev, 1);
988  	if (dcp_irq < 0)
989  		return dcp_irq;
990  
991  	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
992  	if (!sdcp)
993  		return -ENOMEM;
994  
995  	sdcp->dev = dev;
996  	sdcp->base = devm_platform_ioremap_resource(pdev, 0);
997  	if (IS_ERR(sdcp->base))
998  		return PTR_ERR(sdcp->base);
999  
1000  
1001  	ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1002  			       "dcp-vmi-irq", sdcp);
1003  	if (ret) {
1004  		dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1005  		return ret;
1006  	}
1007  
1008  	ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1009  			       "dcp-irq", sdcp);
1010  	if (ret) {
1011  		dev_err(dev, "Failed to claim DCP IRQ!\n");
1012  		return ret;
1013  	}
1014  
1015  	/* Allocate coherent helper block. */
1016  	sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1017  				   GFP_KERNEL);
1018  	if (!sdcp->coh)
1019  		return -ENOMEM;
1020  
1021  	/* Re-align the structure so it fits the DCP constraints. */
1022  	sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1023  
1024  	/* DCP clock is optional, only used on some SOCs */
1025  	sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp");
1026  	if (IS_ERR(sdcp->dcp_clk))
1027  		return PTR_ERR(sdcp->dcp_clk);
1028  
1029  	/* Restart the DCP block. */
1030  	ret = stmp_reset_block(sdcp->base);
1031  	if (ret) {
1032  		dev_err(dev, "Failed reset\n");
1033  		return ret;
1034  	}
1035  
1036  	/* Initialize control register. */
1037  	writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1038  	       MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1039  	       sdcp->base + MXS_DCP_CTRL);
1040  
1041  	/* Enable all DCP DMA channels. */
1042  	writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1043  	       sdcp->base + MXS_DCP_CHANNELCTRL);
1044  
1045  	/*
1046  	 * We do not enable context switching. Give the context buffer a
1047  	 * pointer to an illegal address so if context switching is
1048  	 * inadvertantly enabled, the DCP will return an error instead of
1049  	 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1050  	 * address will do.
1051  	 */
1052  	writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1053  	for (i = 0; i < DCP_MAX_CHANS; i++)
1054  		writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1055  	writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1056  
1057  	global_sdcp = sdcp;
1058  
1059  	platform_set_drvdata(pdev, sdcp);
1060  
1061  	for (i = 0; i < DCP_MAX_CHANS; i++) {
1062  		spin_lock_init(&sdcp->lock[i]);
1063  		init_completion(&sdcp->completion[i]);
1064  		crypto_init_queue(&sdcp->queue[i], 50);
1065  	}
1066  
1067  	/* Create the SHA and AES handler threads. */
1068  	sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1069  						      NULL, "mxs_dcp_chan/sha");
1070  	if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1071  		dev_err(dev, "Error starting SHA thread!\n");
1072  		ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1073  		return ret;
1074  	}
1075  
1076  	sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1077  						    NULL, "mxs_dcp_chan/aes");
1078  	if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1079  		dev_err(dev, "Error starting SHA thread!\n");
1080  		ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1081  		goto err_destroy_sha_thread;
1082  	}
1083  
1084  	/* Register the various crypto algorithms. */
1085  	sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1086  
1087  	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1088  		ret = crypto_register_skciphers(dcp_aes_algs,
1089  						ARRAY_SIZE(dcp_aes_algs));
1090  		if (ret) {
1091  			/* Failed to register algorithm. */
1092  			dev_err(dev, "Failed to register AES crypto!\n");
1093  			goto err_destroy_aes_thread;
1094  		}
1095  	}
1096  
1097  	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1098  		ret = crypto_register_ahash(&dcp_sha1_alg);
1099  		if (ret) {
1100  			dev_err(dev, "Failed to register %s hash!\n",
1101  				dcp_sha1_alg.halg.base.cra_name);
1102  			goto err_unregister_aes;
1103  		}
1104  	}
1105  
1106  	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1107  		ret = crypto_register_ahash(&dcp_sha256_alg);
1108  		if (ret) {
1109  			dev_err(dev, "Failed to register %s hash!\n",
1110  				dcp_sha256_alg.halg.base.cra_name);
1111  			goto err_unregister_sha1;
1112  		}
1113  	}
1114  
1115  	return 0;
1116  
1117  err_unregister_sha1:
1118  	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1119  		crypto_unregister_ahash(&dcp_sha1_alg);
1120  
1121  err_unregister_aes:
1122  	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1123  		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1124  
1125  err_destroy_aes_thread:
1126  	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1127  
1128  err_destroy_sha_thread:
1129  	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1130  
1131  	return ret;
1132  }
1133  
mxs_dcp_remove(struct platform_device * pdev)1134  static int mxs_dcp_remove(struct platform_device *pdev)
1135  {
1136  	struct dcp *sdcp = platform_get_drvdata(pdev);
1137  
1138  	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1139  		crypto_unregister_ahash(&dcp_sha256_alg);
1140  
1141  	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1142  		crypto_unregister_ahash(&dcp_sha1_alg);
1143  
1144  	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1145  		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1146  
1147  	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1148  	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1149  
1150  	platform_set_drvdata(pdev, NULL);
1151  
1152  	global_sdcp = NULL;
1153  
1154  	return 0;
1155  }
1156  
1157  static const struct of_device_id mxs_dcp_dt_ids[] = {
1158  	{ .compatible = "fsl,imx23-dcp", .data = NULL, },
1159  	{ .compatible = "fsl,imx28-dcp", .data = NULL, },
1160  	{ /* sentinel */ }
1161  };
1162  
1163  MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1164  
1165  static struct platform_driver mxs_dcp_driver = {
1166  	.probe	= mxs_dcp_probe,
1167  	.remove	= mxs_dcp_remove,
1168  	.driver	= {
1169  		.name		= "mxs-dcp",
1170  		.of_match_table	= mxs_dcp_dt_ids,
1171  	},
1172  };
1173  
1174  module_platform_driver(mxs_dcp_driver);
1175  
1176  MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1177  MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1178  MODULE_LICENSE("GPL");
1179  MODULE_ALIAS("platform:mxs-dcp");
1180