1 /*
2  * AMD Cryptographic Coprocessor (CCP) driver
3  *
4  * Copyright (C) 2016 Advanced Micro Devices, Inc.
5  *
6  * Author: Gary R Hook <gary.hook@amd.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/dmaengine.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17 #include <linux/ccp.h>
18 
19 #include "ccp-dev.h"
20 #include "../../dma/dmaengine.h"
21 
22 #define CCP_DMA_WIDTH(_mask)		\
23 ({					\
24 	u64 mask = _mask + 1;		\
25 	(mask == 0) ? 64 : fls64(mask);	\
26 })
27 
28 static void ccp_free_cmd_resources(struct ccp_device *ccp,
29 				   struct list_head *list)
30 {
31 	struct ccp_dma_cmd *cmd, *ctmp;
32 
33 	list_for_each_entry_safe(cmd, ctmp, list, entry) {
34 		list_del(&cmd->entry);
35 		kmem_cache_free(ccp->dma_cmd_cache, cmd);
36 	}
37 }
38 
39 static void ccp_free_desc_resources(struct ccp_device *ccp,
40 				    struct list_head *list)
41 {
42 	struct ccp_dma_desc *desc, *dtmp;
43 
44 	list_for_each_entry_safe(desc, dtmp, list, entry) {
45 		ccp_free_cmd_resources(ccp, &desc->active);
46 		ccp_free_cmd_resources(ccp, &desc->pending);
47 
48 		list_del(&desc->entry);
49 		kmem_cache_free(ccp->dma_desc_cache, desc);
50 	}
51 }
52 
53 static void ccp_free_chan_resources(struct dma_chan *dma_chan)
54 {
55 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
56 						 dma_chan);
57 	unsigned long flags;
58 
59 	dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
60 
61 	spin_lock_irqsave(&chan->lock, flags);
62 
63 	ccp_free_desc_resources(chan->ccp, &chan->complete);
64 	ccp_free_desc_resources(chan->ccp, &chan->active);
65 	ccp_free_desc_resources(chan->ccp, &chan->pending);
66 
67 	spin_unlock_irqrestore(&chan->lock, flags);
68 }
69 
70 static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
71 				       struct list_head *list)
72 {
73 	struct ccp_dma_desc *desc, *dtmp;
74 
75 	list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
76 		if (!async_tx_test_ack(&desc->tx_desc))
77 			continue;
78 
79 		dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
80 
81 		ccp_free_cmd_resources(ccp, &desc->active);
82 		ccp_free_cmd_resources(ccp, &desc->pending);
83 
84 		list_del(&desc->entry);
85 		kmem_cache_free(ccp->dma_desc_cache, desc);
86 	}
87 }
88 
89 static void ccp_do_cleanup(unsigned long data)
90 {
91 	struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
92 	unsigned long flags;
93 
94 	dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
95 		dma_chan_name(&chan->dma_chan));
96 
97 	spin_lock_irqsave(&chan->lock, flags);
98 
99 	ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
100 
101 	spin_unlock_irqrestore(&chan->lock, flags);
102 }
103 
104 static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
105 {
106 	struct ccp_dma_cmd *cmd;
107 	int ret;
108 
109 	cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
110 	list_move(&cmd->entry, &desc->active);
111 
112 	dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
113 		desc->tx_desc.cookie, cmd);
114 
115 	ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
116 	if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
117 		return 0;
118 
119 	dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
120 		ret, desc->tx_desc.cookie, cmd);
121 
122 	return ret;
123 }
124 
125 static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
126 {
127 	struct ccp_dma_cmd *cmd;
128 
129 	cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
130 				       entry);
131 	if (!cmd)
132 		return;
133 
134 	dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
135 		__func__, desc->tx_desc.cookie, cmd);
136 
137 	list_del(&cmd->entry);
138 	kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
139 }
140 
141 static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
142 						struct ccp_dma_desc *desc)
143 {
144 	/* Move current DMA descriptor to the complete list */
145 	if (desc)
146 		list_move(&desc->entry, &chan->complete);
147 
148 	/* Get the next DMA descriptor on the active list */
149 	desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
150 					entry);
151 
152 	return desc;
153 }
154 
155 static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
156 						   struct ccp_dma_desc *desc)
157 {
158 	struct dma_async_tx_descriptor *tx_desc;
159 	unsigned long flags;
160 
161 	/* Loop over descriptors until one is found with commands */
162 	do {
163 		if (desc) {
164 			/* Remove the DMA command from the list and free it */
165 			ccp_free_active_cmd(desc);
166 
167 			if (!list_empty(&desc->pending)) {
168 				/* No errors, keep going */
169 				if (desc->status != DMA_ERROR)
170 					return desc;
171 
172 				/* Error, free remaining commands and move on */
173 				ccp_free_cmd_resources(desc->ccp,
174 						       &desc->pending);
175 			}
176 
177 			tx_desc = &desc->tx_desc;
178 		} else {
179 			tx_desc = NULL;
180 		}
181 
182 		spin_lock_irqsave(&chan->lock, flags);
183 
184 		if (desc) {
185 			if (desc->status != DMA_ERROR)
186 				desc->status = DMA_COMPLETE;
187 
188 			dev_dbg(desc->ccp->dev,
189 				"%s - tx %d complete, status=%u\n", __func__,
190 				desc->tx_desc.cookie, desc->status);
191 
192 			dma_cookie_complete(tx_desc);
193 		}
194 
195 		desc = __ccp_next_dma_desc(chan, desc);
196 
197 		spin_unlock_irqrestore(&chan->lock, flags);
198 
199 		if (tx_desc) {
200 			if (tx_desc->callback &&
201 			    (tx_desc->flags & DMA_PREP_INTERRUPT))
202 				tx_desc->callback(tx_desc->callback_param);
203 
204 			dma_run_dependencies(tx_desc);
205 		}
206 	} while (desc);
207 
208 	return NULL;
209 }
210 
211 static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
212 {
213 	struct ccp_dma_desc *desc;
214 
215 	if (list_empty(&chan->pending))
216 		return NULL;
217 
218 	desc = list_empty(&chan->active)
219 		? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
220 		: NULL;
221 
222 	list_splice_tail_init(&chan->pending, &chan->active);
223 
224 	return desc;
225 }
226 
227 static void ccp_cmd_callback(void *data, int err)
228 {
229 	struct ccp_dma_desc *desc = data;
230 	struct ccp_dma_chan *chan;
231 	int ret;
232 
233 	if (err == -EINPROGRESS)
234 		return;
235 
236 	chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
237 			    dma_chan);
238 
239 	dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
240 		__func__, desc->tx_desc.cookie, err);
241 
242 	if (err)
243 		desc->status = DMA_ERROR;
244 
245 	while (true) {
246 		/* Check for DMA descriptor completion */
247 		desc = ccp_handle_active_desc(chan, desc);
248 
249 		/* Don't submit cmd if no descriptor or DMA is paused */
250 		if (!desc || (chan->status == DMA_PAUSED))
251 			break;
252 
253 		ret = ccp_issue_next_cmd(desc);
254 		if (!ret)
255 			break;
256 
257 		desc->status = DMA_ERROR;
258 	}
259 
260 	tasklet_schedule(&chan->cleanup_tasklet);
261 }
262 
263 static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
264 {
265 	struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
266 						 tx_desc);
267 	struct ccp_dma_chan *chan;
268 	dma_cookie_t cookie;
269 	unsigned long flags;
270 
271 	chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
272 
273 	spin_lock_irqsave(&chan->lock, flags);
274 
275 	cookie = dma_cookie_assign(tx_desc);
276 	list_add_tail(&desc->entry, &chan->pending);
277 
278 	spin_unlock_irqrestore(&chan->lock, flags);
279 
280 	dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
281 		__func__, cookie);
282 
283 	return cookie;
284 }
285 
286 static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
287 {
288 	struct ccp_dma_cmd *cmd;
289 
290 	cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
291 	if (cmd)
292 		memset(cmd, 0, sizeof(*cmd));
293 
294 	return cmd;
295 }
296 
297 static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
298 					       unsigned long flags)
299 {
300 	struct ccp_dma_desc *desc;
301 
302 	desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
303 	if (!desc)
304 		return NULL;
305 
306 	dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
307 	desc->tx_desc.flags = flags;
308 	desc->tx_desc.tx_submit = ccp_tx_submit;
309 	desc->ccp = chan->ccp;
310 	INIT_LIST_HEAD(&desc->pending);
311 	INIT_LIST_HEAD(&desc->active);
312 	desc->status = DMA_IN_PROGRESS;
313 
314 	return desc;
315 }
316 
317 static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
318 					    struct scatterlist *dst_sg,
319 					    unsigned int dst_nents,
320 					    struct scatterlist *src_sg,
321 					    unsigned int src_nents,
322 					    unsigned long flags)
323 {
324 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
325 						 dma_chan);
326 	struct ccp_device *ccp = chan->ccp;
327 	struct ccp_dma_desc *desc;
328 	struct ccp_dma_cmd *cmd;
329 	struct ccp_cmd *ccp_cmd;
330 	struct ccp_passthru_nomap_engine *ccp_pt;
331 	unsigned int src_offset, src_len;
332 	unsigned int dst_offset, dst_len;
333 	unsigned int len;
334 	unsigned long sflags;
335 	size_t total_len;
336 
337 	if (!dst_sg || !src_sg)
338 		return NULL;
339 
340 	if (!dst_nents || !src_nents)
341 		return NULL;
342 
343 	desc = ccp_alloc_dma_desc(chan, flags);
344 	if (!desc)
345 		return NULL;
346 
347 	total_len = 0;
348 
349 	src_len = sg_dma_len(src_sg);
350 	src_offset = 0;
351 
352 	dst_len = sg_dma_len(dst_sg);
353 	dst_offset = 0;
354 
355 	while (true) {
356 		if (!src_len) {
357 			src_nents--;
358 			if (!src_nents)
359 				break;
360 
361 			src_sg = sg_next(src_sg);
362 			if (!src_sg)
363 				break;
364 
365 			src_len = sg_dma_len(src_sg);
366 			src_offset = 0;
367 			continue;
368 		}
369 
370 		if (!dst_len) {
371 			dst_nents--;
372 			if (!dst_nents)
373 				break;
374 
375 			dst_sg = sg_next(dst_sg);
376 			if (!dst_sg)
377 				break;
378 
379 			dst_len = sg_dma_len(dst_sg);
380 			dst_offset = 0;
381 			continue;
382 		}
383 
384 		len = min(dst_len, src_len);
385 
386 		cmd = ccp_alloc_dma_cmd(chan);
387 		if (!cmd)
388 			goto err;
389 
390 		ccp_cmd = &cmd->ccp_cmd;
391 		ccp_pt = &ccp_cmd->u.passthru_nomap;
392 		ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
393 		ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
394 		ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
395 		ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
396 		ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
397 		ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
398 		ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
399 		ccp_pt->src_len = len;
400 		ccp_pt->final = 1;
401 		ccp_cmd->callback = ccp_cmd_callback;
402 		ccp_cmd->data = desc;
403 
404 		list_add_tail(&cmd->entry, &desc->pending);
405 
406 		dev_dbg(ccp->dev,
407 			"%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
408 			cmd, &ccp_pt->src_dma,
409 			&ccp_pt->dst_dma, ccp_pt->src_len);
410 
411 		total_len += len;
412 
413 		src_len -= len;
414 		src_offset += len;
415 
416 		dst_len -= len;
417 		dst_offset += len;
418 	}
419 
420 	desc->len = total_len;
421 
422 	if (list_empty(&desc->pending))
423 		goto err;
424 
425 	dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
426 
427 	spin_lock_irqsave(&chan->lock, sflags);
428 
429 	list_add_tail(&desc->entry, &chan->pending);
430 
431 	spin_unlock_irqrestore(&chan->lock, sflags);
432 
433 	return desc;
434 
435 err:
436 	ccp_free_cmd_resources(ccp, &desc->pending);
437 	kmem_cache_free(ccp->dma_desc_cache, desc);
438 
439 	return NULL;
440 }
441 
442 static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
443 	struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
444 	unsigned long flags)
445 {
446 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
447 						 dma_chan);
448 	struct ccp_dma_desc *desc;
449 	struct scatterlist dst_sg, src_sg;
450 
451 	dev_dbg(chan->ccp->dev,
452 		"%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
453 		__func__, &src, &dst, len, flags);
454 
455 	sg_init_table(&dst_sg, 1);
456 	sg_dma_address(&dst_sg) = dst;
457 	sg_dma_len(&dst_sg) = len;
458 
459 	sg_init_table(&src_sg, 1);
460 	sg_dma_address(&src_sg) = src;
461 	sg_dma_len(&src_sg) = len;
462 
463 	desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
464 	if (!desc)
465 		return NULL;
466 
467 	return &desc->tx_desc;
468 }
469 
470 static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
471 	struct dma_chan *dma_chan, struct scatterlist *dst_sg,
472 	unsigned int dst_nents, struct scatterlist *src_sg,
473 	unsigned int src_nents, unsigned long flags)
474 {
475 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
476 						 dma_chan);
477 	struct ccp_dma_desc *desc;
478 
479 	dev_dbg(chan->ccp->dev,
480 		"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
481 		__func__, src_sg, src_nents, dst_sg, dst_nents, flags);
482 
483 	desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
484 			       flags);
485 	if (!desc)
486 		return NULL;
487 
488 	return &desc->tx_desc;
489 }
490 
491 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
492 	struct dma_chan *dma_chan, unsigned long flags)
493 {
494 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
495 						 dma_chan);
496 	struct ccp_dma_desc *desc;
497 
498 	desc = ccp_alloc_dma_desc(chan, flags);
499 	if (!desc)
500 		return NULL;
501 
502 	return &desc->tx_desc;
503 }
504 
505 static void ccp_issue_pending(struct dma_chan *dma_chan)
506 {
507 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
508 						 dma_chan);
509 	struct ccp_dma_desc *desc;
510 	unsigned long flags;
511 
512 	dev_dbg(chan->ccp->dev, "%s\n", __func__);
513 
514 	spin_lock_irqsave(&chan->lock, flags);
515 
516 	desc = __ccp_pending_to_active(chan);
517 
518 	spin_unlock_irqrestore(&chan->lock, flags);
519 
520 	/* If there was nothing active, start processing */
521 	if (desc)
522 		ccp_cmd_callback(desc, 0);
523 }
524 
525 static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
526 				     dma_cookie_t cookie,
527 				     struct dma_tx_state *state)
528 {
529 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
530 						 dma_chan);
531 	struct ccp_dma_desc *desc;
532 	enum dma_status ret;
533 	unsigned long flags;
534 
535 	if (chan->status == DMA_PAUSED) {
536 		ret = DMA_PAUSED;
537 		goto out;
538 	}
539 
540 	ret = dma_cookie_status(dma_chan, cookie, state);
541 	if (ret == DMA_COMPLETE) {
542 		spin_lock_irqsave(&chan->lock, flags);
543 
544 		/* Get status from complete chain, if still there */
545 		list_for_each_entry(desc, &chan->complete, entry) {
546 			if (desc->tx_desc.cookie != cookie)
547 				continue;
548 
549 			ret = desc->status;
550 			break;
551 		}
552 
553 		spin_unlock_irqrestore(&chan->lock, flags);
554 	}
555 
556 out:
557 	dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
558 
559 	return ret;
560 }
561 
562 static int ccp_pause(struct dma_chan *dma_chan)
563 {
564 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
565 						 dma_chan);
566 
567 	chan->status = DMA_PAUSED;
568 
569 	/*TODO: Wait for active DMA to complete before returning? */
570 
571 	return 0;
572 }
573 
574 static int ccp_resume(struct dma_chan *dma_chan)
575 {
576 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
577 						 dma_chan);
578 	struct ccp_dma_desc *desc;
579 	unsigned long flags;
580 
581 	spin_lock_irqsave(&chan->lock, flags);
582 
583 	desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
584 					entry);
585 
586 	spin_unlock_irqrestore(&chan->lock, flags);
587 
588 	/* Indicate the channel is running again */
589 	chan->status = DMA_IN_PROGRESS;
590 
591 	/* If there was something active, re-start */
592 	if (desc)
593 		ccp_cmd_callback(desc, 0);
594 
595 	return 0;
596 }
597 
598 static int ccp_terminate_all(struct dma_chan *dma_chan)
599 {
600 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
601 						 dma_chan);
602 	unsigned long flags;
603 
604 	dev_dbg(chan->ccp->dev, "%s\n", __func__);
605 
606 	/*TODO: Wait for active DMA to complete before continuing */
607 
608 	spin_lock_irqsave(&chan->lock, flags);
609 
610 	/*TODO: Purge the complete list? */
611 	ccp_free_desc_resources(chan->ccp, &chan->active);
612 	ccp_free_desc_resources(chan->ccp, &chan->pending);
613 
614 	spin_unlock_irqrestore(&chan->lock, flags);
615 
616 	return 0;
617 }
618 
619 int ccp_dmaengine_register(struct ccp_device *ccp)
620 {
621 	struct ccp_dma_chan *chan;
622 	struct dma_device *dma_dev = &ccp->dma_dev;
623 	struct dma_chan *dma_chan;
624 	char *dma_cmd_cache_name;
625 	char *dma_desc_cache_name;
626 	unsigned int i;
627 	int ret;
628 
629 	ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
630 					 sizeof(*(ccp->ccp_dma_chan)),
631 					 GFP_KERNEL);
632 	if (!ccp->ccp_dma_chan)
633 		return -ENOMEM;
634 
635 	dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
636 					    "%s-dmaengine-cmd-cache",
637 					    ccp->name);
638 	if (!dma_cmd_cache_name)
639 		return -ENOMEM;
640 
641 	ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
642 					       sizeof(struct ccp_dma_cmd),
643 					       sizeof(void *),
644 					       SLAB_HWCACHE_ALIGN, NULL);
645 	if (!ccp->dma_cmd_cache)
646 		return -ENOMEM;
647 
648 	dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
649 					     "%s-dmaengine-desc-cache",
650 					     ccp->name);
651 	if (!dma_desc_cache_name) {
652 		ret = -ENOMEM;
653 		goto err_cache;
654 	}
655 
656 	ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
657 						sizeof(struct ccp_dma_desc),
658 						sizeof(void *),
659 						SLAB_HWCACHE_ALIGN, NULL);
660 	if (!ccp->dma_desc_cache) {
661 		ret = -ENOMEM;
662 		goto err_cache;
663 	}
664 
665 	dma_dev->dev = ccp->dev;
666 	dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
667 	dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
668 	dma_dev->directions = DMA_MEM_TO_MEM;
669 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
670 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
671 	dma_cap_set(DMA_SG, dma_dev->cap_mask);
672 	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
673 
674 	INIT_LIST_HEAD(&dma_dev->channels);
675 	for (i = 0; i < ccp->cmd_q_count; i++) {
676 		chan = ccp->ccp_dma_chan + i;
677 		dma_chan = &chan->dma_chan;
678 
679 		chan->ccp = ccp;
680 
681 		spin_lock_init(&chan->lock);
682 		INIT_LIST_HEAD(&chan->pending);
683 		INIT_LIST_HEAD(&chan->active);
684 		INIT_LIST_HEAD(&chan->complete);
685 
686 		tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
687 			     (unsigned long)chan);
688 
689 		dma_chan->device = dma_dev;
690 		dma_cookie_init(dma_chan);
691 
692 		list_add_tail(&dma_chan->device_node, &dma_dev->channels);
693 	}
694 
695 	dma_dev->device_free_chan_resources = ccp_free_chan_resources;
696 	dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
697 	dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
698 	dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
699 	dma_dev->device_issue_pending = ccp_issue_pending;
700 	dma_dev->device_tx_status = ccp_tx_status;
701 	dma_dev->device_pause = ccp_pause;
702 	dma_dev->device_resume = ccp_resume;
703 	dma_dev->device_terminate_all = ccp_terminate_all;
704 
705 	ret = dma_async_device_register(dma_dev);
706 	if (ret)
707 		goto err_reg;
708 
709 	return 0;
710 
711 err_reg:
712 	kmem_cache_destroy(ccp->dma_desc_cache);
713 
714 err_cache:
715 	kmem_cache_destroy(ccp->dma_cmd_cache);
716 
717 	return ret;
718 }
719 
720 void ccp_dmaengine_unregister(struct ccp_device *ccp)
721 {
722 	struct dma_device *dma_dev = &ccp->dma_dev;
723 
724 	dma_async_device_unregister(dma_dev);
725 
726 	kmem_cache_destroy(ccp->dma_desc_cache);
727 	kmem_cache_destroy(ccp->dma_cmd_cache);
728 }
729