1 /*
2  * AMD Cryptographic Coprocessor (CCP) driver
3  *
4  * Copyright (C) 2016 Advanced Micro Devices, Inc.
5  *
6  * Author: Gary R Hook <gary.hook@amd.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/dmaengine.h>
16 #include <linux/spinlock.h>
17 #include <linux/mutex.h>
18 #include <linux/ccp.h>
19 
20 #include "ccp-dev.h"
21 #include "../../dma/dmaengine.h"
22 
23 #define CCP_DMA_WIDTH(_mask)		\
24 ({					\
25 	u64 mask = _mask + 1;		\
26 	(mask == 0) ? 64 : fls64(mask);	\
27 })
28 
29 /* The CCP as a DMA provider can be configured for public or private
30  * channels. Default is specified in the vdata for the device (PCI ID).
31  * This module parameter will override for all channels on all devices:
32  *   dma_chan_attr = 0x2 to force all channels public
33  *                 = 0x1 to force all channels private
34  *                 = 0x0 to defer to the vdata setting
35  *                 = any other value: warning, revert to 0x0
36  */
37 static unsigned int dma_chan_attr = CCP_DMA_DFLT;
38 module_param(dma_chan_attr, uint, 0444);
39 MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
40 
41 unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
42 {
43 	switch (dma_chan_attr) {
44 	case CCP_DMA_DFLT:
45 		return ccp->vdata->dma_chan_attr;
46 
47 	case CCP_DMA_PRIV:
48 		return DMA_PRIVATE;
49 
50 	case CCP_DMA_PUB:
51 		return 0;
52 
53 	default:
54 		dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
55 			      dma_chan_attr);
56 		return ccp->vdata->dma_chan_attr;
57 	}
58 }
59 
60 static void ccp_free_cmd_resources(struct ccp_device *ccp,
61 				   struct list_head *list)
62 {
63 	struct ccp_dma_cmd *cmd, *ctmp;
64 
65 	list_for_each_entry_safe(cmd, ctmp, list, entry) {
66 		list_del(&cmd->entry);
67 		kmem_cache_free(ccp->dma_cmd_cache, cmd);
68 	}
69 }
70 
71 static void ccp_free_desc_resources(struct ccp_device *ccp,
72 				    struct list_head *list)
73 {
74 	struct ccp_dma_desc *desc, *dtmp;
75 
76 	list_for_each_entry_safe(desc, dtmp, list, entry) {
77 		ccp_free_cmd_resources(ccp, &desc->active);
78 		ccp_free_cmd_resources(ccp, &desc->pending);
79 
80 		list_del(&desc->entry);
81 		kmem_cache_free(ccp->dma_desc_cache, desc);
82 	}
83 }
84 
85 static void ccp_free_chan_resources(struct dma_chan *dma_chan)
86 {
87 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
88 						 dma_chan);
89 	unsigned long flags;
90 
91 	dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
92 
93 	spin_lock_irqsave(&chan->lock, flags);
94 
95 	ccp_free_desc_resources(chan->ccp, &chan->complete);
96 	ccp_free_desc_resources(chan->ccp, &chan->active);
97 	ccp_free_desc_resources(chan->ccp, &chan->pending);
98 	ccp_free_desc_resources(chan->ccp, &chan->created);
99 
100 	spin_unlock_irqrestore(&chan->lock, flags);
101 }
102 
103 static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
104 				       struct list_head *list)
105 {
106 	struct ccp_dma_desc *desc, *dtmp;
107 
108 	list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
109 		if (!async_tx_test_ack(&desc->tx_desc))
110 			continue;
111 
112 		dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
113 
114 		ccp_free_cmd_resources(ccp, &desc->active);
115 		ccp_free_cmd_resources(ccp, &desc->pending);
116 
117 		list_del(&desc->entry);
118 		kmem_cache_free(ccp->dma_desc_cache, desc);
119 	}
120 }
121 
122 static void ccp_do_cleanup(unsigned long data)
123 {
124 	struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
125 	unsigned long flags;
126 
127 	dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
128 		dma_chan_name(&chan->dma_chan));
129 
130 	spin_lock_irqsave(&chan->lock, flags);
131 
132 	ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
133 
134 	spin_unlock_irqrestore(&chan->lock, flags);
135 }
136 
137 static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
138 {
139 	struct ccp_dma_cmd *cmd;
140 	int ret;
141 
142 	cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
143 	list_move(&cmd->entry, &desc->active);
144 
145 	dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
146 		desc->tx_desc.cookie, cmd);
147 
148 	ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
149 	if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
150 		return 0;
151 
152 	dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
153 		ret, desc->tx_desc.cookie, cmd);
154 
155 	return ret;
156 }
157 
158 static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
159 {
160 	struct ccp_dma_cmd *cmd;
161 
162 	cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
163 				       entry);
164 	if (!cmd)
165 		return;
166 
167 	dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
168 		__func__, desc->tx_desc.cookie, cmd);
169 
170 	list_del(&cmd->entry);
171 	kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
172 }
173 
174 static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
175 						struct ccp_dma_desc *desc)
176 {
177 	/* Move current DMA descriptor to the complete list */
178 	if (desc)
179 		list_move(&desc->entry, &chan->complete);
180 
181 	/* Get the next DMA descriptor on the active list */
182 	desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
183 					entry);
184 
185 	return desc;
186 }
187 
188 static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
189 						   struct ccp_dma_desc *desc)
190 {
191 	struct dma_async_tx_descriptor *tx_desc;
192 	unsigned long flags;
193 
194 	/* Loop over descriptors until one is found with commands */
195 	do {
196 		if (desc) {
197 			/* Remove the DMA command from the list and free it */
198 			ccp_free_active_cmd(desc);
199 
200 			if (!list_empty(&desc->pending)) {
201 				/* No errors, keep going */
202 				if (desc->status != DMA_ERROR)
203 					return desc;
204 
205 				/* Error, free remaining commands and move on */
206 				ccp_free_cmd_resources(desc->ccp,
207 						       &desc->pending);
208 			}
209 
210 			tx_desc = &desc->tx_desc;
211 		} else {
212 			tx_desc = NULL;
213 		}
214 
215 		spin_lock_irqsave(&chan->lock, flags);
216 
217 		if (desc) {
218 			if (desc->status != DMA_ERROR)
219 				desc->status = DMA_COMPLETE;
220 
221 			dev_dbg(desc->ccp->dev,
222 				"%s - tx %d complete, status=%u\n", __func__,
223 				desc->tx_desc.cookie, desc->status);
224 
225 			dma_cookie_complete(tx_desc);
226 		}
227 
228 		desc = __ccp_next_dma_desc(chan, desc);
229 
230 		spin_unlock_irqrestore(&chan->lock, flags);
231 
232 		if (tx_desc) {
233 			if (tx_desc->callback &&
234 			    (tx_desc->flags & DMA_PREP_INTERRUPT))
235 				tx_desc->callback(tx_desc->callback_param);
236 
237 			dma_run_dependencies(tx_desc);
238 		}
239 	} while (desc);
240 
241 	return NULL;
242 }
243 
244 static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
245 {
246 	struct ccp_dma_desc *desc;
247 
248 	if (list_empty(&chan->pending))
249 		return NULL;
250 
251 	desc = list_empty(&chan->active)
252 		? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
253 		: NULL;
254 
255 	list_splice_tail_init(&chan->pending, &chan->active);
256 
257 	return desc;
258 }
259 
260 static void ccp_cmd_callback(void *data, int err)
261 {
262 	struct ccp_dma_desc *desc = data;
263 	struct ccp_dma_chan *chan;
264 	int ret;
265 
266 	if (err == -EINPROGRESS)
267 		return;
268 
269 	chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
270 			    dma_chan);
271 
272 	dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
273 		__func__, desc->tx_desc.cookie, err);
274 
275 	if (err)
276 		desc->status = DMA_ERROR;
277 
278 	while (true) {
279 		/* Check for DMA descriptor completion */
280 		desc = ccp_handle_active_desc(chan, desc);
281 
282 		/* Don't submit cmd if no descriptor or DMA is paused */
283 		if (!desc || (chan->status == DMA_PAUSED))
284 			break;
285 
286 		ret = ccp_issue_next_cmd(desc);
287 		if (!ret)
288 			break;
289 
290 		desc->status = DMA_ERROR;
291 	}
292 
293 	tasklet_schedule(&chan->cleanup_tasklet);
294 }
295 
296 static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
297 {
298 	struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
299 						 tx_desc);
300 	struct ccp_dma_chan *chan;
301 	dma_cookie_t cookie;
302 	unsigned long flags;
303 
304 	chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
305 
306 	spin_lock_irqsave(&chan->lock, flags);
307 
308 	cookie = dma_cookie_assign(tx_desc);
309 	list_del(&desc->entry);
310 	list_add_tail(&desc->entry, &chan->pending);
311 
312 	spin_unlock_irqrestore(&chan->lock, flags);
313 
314 	dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
315 		__func__, cookie);
316 
317 	return cookie;
318 }
319 
320 static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
321 {
322 	struct ccp_dma_cmd *cmd;
323 
324 	cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
325 	if (cmd)
326 		memset(cmd, 0, sizeof(*cmd));
327 
328 	return cmd;
329 }
330 
331 static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
332 					       unsigned long flags)
333 {
334 	struct ccp_dma_desc *desc;
335 
336 	desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
337 	if (!desc)
338 		return NULL;
339 
340 	dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
341 	desc->tx_desc.flags = flags;
342 	desc->tx_desc.tx_submit = ccp_tx_submit;
343 	desc->ccp = chan->ccp;
344 	INIT_LIST_HEAD(&desc->pending);
345 	INIT_LIST_HEAD(&desc->active);
346 	desc->status = DMA_IN_PROGRESS;
347 
348 	return desc;
349 }
350 
351 static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
352 					    struct scatterlist *dst_sg,
353 					    unsigned int dst_nents,
354 					    struct scatterlist *src_sg,
355 					    unsigned int src_nents,
356 					    unsigned long flags)
357 {
358 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
359 						 dma_chan);
360 	struct ccp_device *ccp = chan->ccp;
361 	struct ccp_dma_desc *desc;
362 	struct ccp_dma_cmd *cmd;
363 	struct ccp_cmd *ccp_cmd;
364 	struct ccp_passthru_nomap_engine *ccp_pt;
365 	unsigned int src_offset, src_len;
366 	unsigned int dst_offset, dst_len;
367 	unsigned int len;
368 	unsigned long sflags;
369 	size_t total_len;
370 
371 	if (!dst_sg || !src_sg)
372 		return NULL;
373 
374 	if (!dst_nents || !src_nents)
375 		return NULL;
376 
377 	desc = ccp_alloc_dma_desc(chan, flags);
378 	if (!desc)
379 		return NULL;
380 
381 	total_len = 0;
382 
383 	src_len = sg_dma_len(src_sg);
384 	src_offset = 0;
385 
386 	dst_len = sg_dma_len(dst_sg);
387 	dst_offset = 0;
388 
389 	while (true) {
390 		if (!src_len) {
391 			src_nents--;
392 			if (!src_nents)
393 				break;
394 
395 			src_sg = sg_next(src_sg);
396 			if (!src_sg)
397 				break;
398 
399 			src_len = sg_dma_len(src_sg);
400 			src_offset = 0;
401 			continue;
402 		}
403 
404 		if (!dst_len) {
405 			dst_nents--;
406 			if (!dst_nents)
407 				break;
408 
409 			dst_sg = sg_next(dst_sg);
410 			if (!dst_sg)
411 				break;
412 
413 			dst_len = sg_dma_len(dst_sg);
414 			dst_offset = 0;
415 			continue;
416 		}
417 
418 		len = min(dst_len, src_len);
419 
420 		cmd = ccp_alloc_dma_cmd(chan);
421 		if (!cmd)
422 			goto err;
423 
424 		ccp_cmd = &cmd->ccp_cmd;
425 		ccp_cmd->ccp = chan->ccp;
426 		ccp_pt = &ccp_cmd->u.passthru_nomap;
427 		ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
428 		ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
429 		ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
430 		ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
431 		ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
432 		ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
433 		ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
434 		ccp_pt->src_len = len;
435 		ccp_pt->final = 1;
436 		ccp_cmd->callback = ccp_cmd_callback;
437 		ccp_cmd->data = desc;
438 
439 		list_add_tail(&cmd->entry, &desc->pending);
440 
441 		dev_dbg(ccp->dev,
442 			"%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
443 			cmd, &ccp_pt->src_dma,
444 			&ccp_pt->dst_dma, ccp_pt->src_len);
445 
446 		total_len += len;
447 
448 		src_len -= len;
449 		src_offset += len;
450 
451 		dst_len -= len;
452 		dst_offset += len;
453 	}
454 
455 	desc->len = total_len;
456 
457 	if (list_empty(&desc->pending))
458 		goto err;
459 
460 	dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
461 
462 	spin_lock_irqsave(&chan->lock, sflags);
463 
464 	list_add_tail(&desc->entry, &chan->created);
465 
466 	spin_unlock_irqrestore(&chan->lock, sflags);
467 
468 	return desc;
469 
470 err:
471 	ccp_free_cmd_resources(ccp, &desc->pending);
472 	kmem_cache_free(ccp->dma_desc_cache, desc);
473 
474 	return NULL;
475 }
476 
477 static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
478 	struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
479 	unsigned long flags)
480 {
481 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
482 						 dma_chan);
483 	struct ccp_dma_desc *desc;
484 	struct scatterlist dst_sg, src_sg;
485 
486 	dev_dbg(chan->ccp->dev,
487 		"%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
488 		__func__, &src, &dst, len, flags);
489 
490 	sg_init_table(&dst_sg, 1);
491 	sg_dma_address(&dst_sg) = dst;
492 	sg_dma_len(&dst_sg) = len;
493 
494 	sg_init_table(&src_sg, 1);
495 	sg_dma_address(&src_sg) = src;
496 	sg_dma_len(&src_sg) = len;
497 
498 	desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
499 	if (!desc)
500 		return NULL;
501 
502 	return &desc->tx_desc;
503 }
504 
505 static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
506 	struct dma_chan *dma_chan, struct scatterlist *dst_sg,
507 	unsigned int dst_nents, struct scatterlist *src_sg,
508 	unsigned int src_nents, unsigned long flags)
509 {
510 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
511 						 dma_chan);
512 	struct ccp_dma_desc *desc;
513 
514 	dev_dbg(chan->ccp->dev,
515 		"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
516 		__func__, src_sg, src_nents, dst_sg, dst_nents, flags);
517 
518 	desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
519 			       flags);
520 	if (!desc)
521 		return NULL;
522 
523 	return &desc->tx_desc;
524 }
525 
526 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
527 	struct dma_chan *dma_chan, unsigned long flags)
528 {
529 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
530 						 dma_chan);
531 	struct ccp_dma_desc *desc;
532 
533 	desc = ccp_alloc_dma_desc(chan, flags);
534 	if (!desc)
535 		return NULL;
536 
537 	return &desc->tx_desc;
538 }
539 
540 static void ccp_issue_pending(struct dma_chan *dma_chan)
541 {
542 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
543 						 dma_chan);
544 	struct ccp_dma_desc *desc;
545 	unsigned long flags;
546 
547 	dev_dbg(chan->ccp->dev, "%s\n", __func__);
548 
549 	spin_lock_irqsave(&chan->lock, flags);
550 
551 	desc = __ccp_pending_to_active(chan);
552 
553 	spin_unlock_irqrestore(&chan->lock, flags);
554 
555 	/* If there was nothing active, start processing */
556 	if (desc)
557 		ccp_cmd_callback(desc, 0);
558 }
559 
560 static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
561 				     dma_cookie_t cookie,
562 				     struct dma_tx_state *state)
563 {
564 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
565 						 dma_chan);
566 	struct ccp_dma_desc *desc;
567 	enum dma_status ret;
568 	unsigned long flags;
569 
570 	if (chan->status == DMA_PAUSED) {
571 		ret = DMA_PAUSED;
572 		goto out;
573 	}
574 
575 	ret = dma_cookie_status(dma_chan, cookie, state);
576 	if (ret == DMA_COMPLETE) {
577 		spin_lock_irqsave(&chan->lock, flags);
578 
579 		/* Get status from complete chain, if still there */
580 		list_for_each_entry(desc, &chan->complete, entry) {
581 			if (desc->tx_desc.cookie != cookie)
582 				continue;
583 
584 			ret = desc->status;
585 			break;
586 		}
587 
588 		spin_unlock_irqrestore(&chan->lock, flags);
589 	}
590 
591 out:
592 	dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
593 
594 	return ret;
595 }
596 
597 static int ccp_pause(struct dma_chan *dma_chan)
598 {
599 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
600 						 dma_chan);
601 
602 	chan->status = DMA_PAUSED;
603 
604 	/*TODO: Wait for active DMA to complete before returning? */
605 
606 	return 0;
607 }
608 
609 static int ccp_resume(struct dma_chan *dma_chan)
610 {
611 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
612 						 dma_chan);
613 	struct ccp_dma_desc *desc;
614 	unsigned long flags;
615 
616 	spin_lock_irqsave(&chan->lock, flags);
617 
618 	desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
619 					entry);
620 
621 	spin_unlock_irqrestore(&chan->lock, flags);
622 
623 	/* Indicate the channel is running again */
624 	chan->status = DMA_IN_PROGRESS;
625 
626 	/* If there was something active, re-start */
627 	if (desc)
628 		ccp_cmd_callback(desc, 0);
629 
630 	return 0;
631 }
632 
633 static int ccp_terminate_all(struct dma_chan *dma_chan)
634 {
635 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
636 						 dma_chan);
637 	unsigned long flags;
638 
639 	dev_dbg(chan->ccp->dev, "%s\n", __func__);
640 
641 	/*TODO: Wait for active DMA to complete before continuing */
642 
643 	spin_lock_irqsave(&chan->lock, flags);
644 
645 	/*TODO: Purge the complete list? */
646 	ccp_free_desc_resources(chan->ccp, &chan->active);
647 	ccp_free_desc_resources(chan->ccp, &chan->pending);
648 	ccp_free_desc_resources(chan->ccp, &chan->created);
649 
650 	spin_unlock_irqrestore(&chan->lock, flags);
651 
652 	return 0;
653 }
654 
655 int ccp_dmaengine_register(struct ccp_device *ccp)
656 {
657 	struct ccp_dma_chan *chan;
658 	struct dma_device *dma_dev = &ccp->dma_dev;
659 	struct dma_chan *dma_chan;
660 	char *dma_cmd_cache_name;
661 	char *dma_desc_cache_name;
662 	unsigned int i;
663 	int ret;
664 
665 	ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
666 					 sizeof(*(ccp->ccp_dma_chan)),
667 					 GFP_KERNEL);
668 	if (!ccp->ccp_dma_chan)
669 		return -ENOMEM;
670 
671 	dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
672 					    "%s-dmaengine-cmd-cache",
673 					    ccp->name);
674 	if (!dma_cmd_cache_name)
675 		return -ENOMEM;
676 
677 	ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
678 					       sizeof(struct ccp_dma_cmd),
679 					       sizeof(void *),
680 					       SLAB_HWCACHE_ALIGN, NULL);
681 	if (!ccp->dma_cmd_cache)
682 		return -ENOMEM;
683 
684 	dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
685 					     "%s-dmaengine-desc-cache",
686 					     ccp->name);
687 	if (!dma_desc_cache_name) {
688 		ret = -ENOMEM;
689 		goto err_cache;
690 	}
691 
692 	ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
693 						sizeof(struct ccp_dma_desc),
694 						sizeof(void *),
695 						SLAB_HWCACHE_ALIGN, NULL);
696 	if (!ccp->dma_desc_cache) {
697 		ret = -ENOMEM;
698 		goto err_cache;
699 	}
700 
701 	dma_dev->dev = ccp->dev;
702 	dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
703 	dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
704 	dma_dev->directions = DMA_MEM_TO_MEM;
705 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
706 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
707 	dma_cap_set(DMA_SG, dma_dev->cap_mask);
708 	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
709 
710 	/* The DMA channels for this device can be set to public or private,
711 	 * and overridden by the module parameter dma_chan_attr.
712 	 * Default: according to the value in vdata (dma_chan_attr=0)
713 	 * dma_chan_attr=0x1: all channels private (override vdata)
714 	 * dma_chan_attr=0x2: all channels public (override vdata)
715 	 */
716 	if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
717 		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
718 
719 	INIT_LIST_HEAD(&dma_dev->channels);
720 	for (i = 0; i < ccp->cmd_q_count; i++) {
721 		chan = ccp->ccp_dma_chan + i;
722 		dma_chan = &chan->dma_chan;
723 
724 		chan->ccp = ccp;
725 
726 		spin_lock_init(&chan->lock);
727 		INIT_LIST_HEAD(&chan->created);
728 		INIT_LIST_HEAD(&chan->pending);
729 		INIT_LIST_HEAD(&chan->active);
730 		INIT_LIST_HEAD(&chan->complete);
731 
732 		tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
733 			     (unsigned long)chan);
734 
735 		dma_chan->device = dma_dev;
736 		dma_cookie_init(dma_chan);
737 
738 		list_add_tail(&dma_chan->device_node, &dma_dev->channels);
739 	}
740 
741 	dma_dev->device_free_chan_resources = ccp_free_chan_resources;
742 	dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
743 	dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
744 	dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
745 	dma_dev->device_issue_pending = ccp_issue_pending;
746 	dma_dev->device_tx_status = ccp_tx_status;
747 	dma_dev->device_pause = ccp_pause;
748 	dma_dev->device_resume = ccp_resume;
749 	dma_dev->device_terminate_all = ccp_terminate_all;
750 
751 	ret = dma_async_device_register(dma_dev);
752 	if (ret)
753 		goto err_reg;
754 
755 	return 0;
756 
757 err_reg:
758 	kmem_cache_destroy(ccp->dma_desc_cache);
759 
760 err_cache:
761 	kmem_cache_destroy(ccp->dma_cmd_cache);
762 
763 	return ret;
764 }
765 
766 void ccp_dmaengine_unregister(struct ccp_device *ccp)
767 {
768 	struct dma_device *dma_dev = &ccp->dma_dev;
769 
770 	dma_async_device_unregister(dma_dev);
771 
772 	kmem_cache_destroy(ccp->dma_desc_cache);
773 	kmem_cache_destroy(ccp->dma_cmd_cache);
774 }
775