1 /* 2 * AMD Cryptographic Coprocessor (CCP) driver 3 * 4 * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. 5 * 6 * Author: Gary R Hook <gary.hook@amd.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/dmaengine.h> 16 #include <linux/spinlock.h> 17 #include <linux/mutex.h> 18 #include <linux/ccp.h> 19 20 #include "ccp-dev.h" 21 #include "../../dma/dmaengine.h" 22 23 #define CCP_DMA_WIDTH(_mask) \ 24 ({ \ 25 u64 mask = _mask + 1; \ 26 (mask == 0) ? 64 : fls64(mask); \ 27 }) 28 29 /* The CCP as a DMA provider can be configured for public or private 30 * channels. Default is specified in the vdata for the device (PCI ID). 31 * This module parameter will override for all channels on all devices: 32 * dma_chan_attr = 0x2 to force all channels public 33 * = 0x1 to force all channels private 34 * = 0x0 to defer to the vdata setting 35 * = any other value: warning, revert to 0x0 36 */ 37 static unsigned int dma_chan_attr = CCP_DMA_DFLT; 38 module_param(dma_chan_attr, uint, 0444); 39 MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public"); 40 41 unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) 42 { 43 switch (dma_chan_attr) { 44 case CCP_DMA_DFLT: 45 return ccp->vdata->dma_chan_attr; 46 47 case CCP_DMA_PRIV: 48 return DMA_PRIVATE; 49 50 case CCP_DMA_PUB: 51 return 0; 52 53 default: 54 dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n", 55 dma_chan_attr); 56 return ccp->vdata->dma_chan_attr; 57 } 58 } 59 60 static void ccp_free_cmd_resources(struct ccp_device *ccp, 61 struct list_head *list) 62 { 63 struct ccp_dma_cmd *cmd, *ctmp; 64 65 list_for_each_entry_safe(cmd, ctmp, list, entry) { 66 list_del(&cmd->entry); 67 kmem_cache_free(ccp->dma_cmd_cache, cmd); 68 } 69 } 70 71 static void ccp_free_desc_resources(struct ccp_device *ccp, 72 struct list_head *list) 73 { 74 struct ccp_dma_desc *desc, *dtmp; 75 76 list_for_each_entry_safe(desc, dtmp, list, entry) { 77 ccp_free_cmd_resources(ccp, &desc->active); 78 ccp_free_cmd_resources(ccp, &desc->pending); 79 80 list_del(&desc->entry); 81 kmem_cache_free(ccp->dma_desc_cache, desc); 82 } 83 } 84 85 static void ccp_free_chan_resources(struct dma_chan *dma_chan) 86 { 87 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, 88 dma_chan); 89 unsigned long flags; 90 91 dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan); 92 93 spin_lock_irqsave(&chan->lock, flags); 94 95 ccp_free_desc_resources(chan->ccp, &chan->complete); 96 ccp_free_desc_resources(chan->ccp, &chan->active); 97 ccp_free_desc_resources(chan->ccp, &chan->pending); 98 ccp_free_desc_resources(chan->ccp, &chan->created); 99 100 spin_unlock_irqrestore(&chan->lock, flags); 101 } 102 103 static void ccp_cleanup_desc_resources(struct ccp_device *ccp, 104 struct list_head *list) 105 { 106 struct ccp_dma_desc *desc, *dtmp; 107 108 list_for_each_entry_safe_reverse(desc, dtmp, list, entry) { 109 if (!async_tx_test_ack(&desc->tx_desc)) 110 continue; 111 112 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc); 113 114 ccp_free_cmd_resources(ccp, &desc->active); 115 ccp_free_cmd_resources(ccp, &desc->pending); 116 117 list_del(&desc->entry); 118 kmem_cache_free(ccp->dma_desc_cache, desc); 119 } 120 } 121 122 static void ccp_do_cleanup(unsigned long data) 123 { 124 struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data; 125 unsigned long flags; 126 127 dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__, 128 dma_chan_name(&chan->dma_chan)); 129 130 spin_lock_irqsave(&chan->lock, flags); 131 132 ccp_cleanup_desc_resources(chan->ccp, &chan->complete); 133 134 spin_unlock_irqrestore(&chan->lock, flags); 135 } 136 137 static int ccp_issue_next_cmd(struct ccp_dma_desc *desc) 138 { 139 struct ccp_dma_cmd *cmd; 140 int ret; 141 142 cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry); 143 list_move(&cmd->entry, &desc->active); 144 145 dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__, 146 desc->tx_desc.cookie, cmd); 147 148 ret = ccp_enqueue_cmd(&cmd->ccp_cmd); 149 if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY)) 150 return 0; 151 152 dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__, 153 ret, desc->tx_desc.cookie, cmd); 154 155 return ret; 156 } 157 158 static void ccp_free_active_cmd(struct ccp_dma_desc *desc) 159 { 160 struct ccp_dma_cmd *cmd; 161 162 cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd, 163 entry); 164 if (!cmd) 165 return; 166 167 dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n", 168 __func__, desc->tx_desc.cookie, cmd); 169 170 list_del(&cmd->entry); 171 kmem_cache_free(desc->ccp->dma_cmd_cache, cmd); 172 } 173 174 static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan, 175 struct ccp_dma_desc *desc) 176 { 177 /* Move current DMA descriptor to the complete list */ 178 if (desc) 179 list_move(&desc->entry, &chan->complete); 180 181 /* Get the next DMA descriptor on the active list */ 182 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc, 183 entry); 184 185 return desc; 186 } 187 188 static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan, 189 struct ccp_dma_desc *desc) 190 { 191 struct dma_async_tx_descriptor *tx_desc; 192 unsigned long flags; 193 194 /* Loop over descriptors until one is found with commands */ 195 do { 196 if (desc) { 197 /* Remove the DMA command from the list and free it */ 198 ccp_free_active_cmd(desc); 199 200 if (!list_empty(&desc->pending)) { 201 /* No errors, keep going */ 202 if (desc->status != DMA_ERROR) 203 return desc; 204 205 /* Error, free remaining commands and move on */ 206 ccp_free_cmd_resources(desc->ccp, 207 &desc->pending); 208 } 209 210 tx_desc = &desc->tx_desc; 211 } else { 212 tx_desc = NULL; 213 } 214 215 spin_lock_irqsave(&chan->lock, flags); 216 217 if (desc) { 218 if (desc->status != DMA_ERROR) 219 desc->status = DMA_COMPLETE; 220 221 dev_dbg(desc->ccp->dev, 222 "%s - tx %d complete, status=%u\n", __func__, 223 desc->tx_desc.cookie, desc->status); 224 225 dma_cookie_complete(tx_desc); 226 dma_descriptor_unmap(tx_desc); 227 } 228 229 desc = __ccp_next_dma_desc(chan, desc); 230 231 spin_unlock_irqrestore(&chan->lock, flags); 232 233 if (tx_desc) { 234 dmaengine_desc_get_callback_invoke(tx_desc, NULL); 235 236 dma_run_dependencies(tx_desc); 237 } 238 } while (desc); 239 240 return NULL; 241 } 242 243 static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan) 244 { 245 struct ccp_dma_desc *desc; 246 247 if (list_empty(&chan->pending)) 248 return NULL; 249 250 desc = list_empty(&chan->active) 251 ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry) 252 : NULL; 253 254 list_splice_tail_init(&chan->pending, &chan->active); 255 256 return desc; 257 } 258 259 static void ccp_cmd_callback(void *data, int err) 260 { 261 struct ccp_dma_desc *desc = data; 262 struct ccp_dma_chan *chan; 263 int ret; 264 265 if (err == -EINPROGRESS) 266 return; 267 268 chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan, 269 dma_chan); 270 271 dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n", 272 __func__, desc->tx_desc.cookie, err); 273 274 if (err) 275 desc->status = DMA_ERROR; 276 277 while (true) { 278 /* Check for DMA descriptor completion */ 279 desc = ccp_handle_active_desc(chan, desc); 280 281 /* Don't submit cmd if no descriptor or DMA is paused */ 282 if (!desc || (chan->status == DMA_PAUSED)) 283 break; 284 285 ret = ccp_issue_next_cmd(desc); 286 if (!ret) 287 break; 288 289 desc->status = DMA_ERROR; 290 } 291 292 tasklet_schedule(&chan->cleanup_tasklet); 293 } 294 295 static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc) 296 { 297 struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc, 298 tx_desc); 299 struct ccp_dma_chan *chan; 300 dma_cookie_t cookie; 301 unsigned long flags; 302 303 chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan); 304 305 spin_lock_irqsave(&chan->lock, flags); 306 307 cookie = dma_cookie_assign(tx_desc); 308 list_del(&desc->entry); 309 list_add_tail(&desc->entry, &chan->pending); 310 311 spin_unlock_irqrestore(&chan->lock, flags); 312 313 dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n", 314 __func__, cookie); 315 316 return cookie; 317 } 318 319 static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan) 320 { 321 struct ccp_dma_cmd *cmd; 322 323 cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT); 324 if (cmd) 325 memset(cmd, 0, sizeof(*cmd)); 326 327 return cmd; 328 } 329 330 static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan, 331 unsigned long flags) 332 { 333 struct ccp_dma_desc *desc; 334 335 desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT); 336 if (!desc) 337 return NULL; 338 339 dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan); 340 desc->tx_desc.flags = flags; 341 desc->tx_desc.tx_submit = ccp_tx_submit; 342 desc->ccp = chan->ccp; 343 INIT_LIST_HEAD(&desc->pending); 344 INIT_LIST_HEAD(&desc->active); 345 desc->status = DMA_IN_PROGRESS; 346 347 return desc; 348 } 349 350 static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, 351 struct scatterlist *dst_sg, 352 unsigned int dst_nents, 353 struct scatterlist *src_sg, 354 unsigned int src_nents, 355 unsigned long flags) 356 { 357 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, 358 dma_chan); 359 struct ccp_device *ccp = chan->ccp; 360 struct ccp_dma_desc *desc; 361 struct ccp_dma_cmd *cmd; 362 struct ccp_cmd *ccp_cmd; 363 struct ccp_passthru_nomap_engine *ccp_pt; 364 unsigned int src_offset, src_len; 365 unsigned int dst_offset, dst_len; 366 unsigned int len; 367 unsigned long sflags; 368 size_t total_len; 369 370 if (!dst_sg || !src_sg) 371 return NULL; 372 373 if (!dst_nents || !src_nents) 374 return NULL; 375 376 desc = ccp_alloc_dma_desc(chan, flags); 377 if (!desc) 378 return NULL; 379 380 total_len = 0; 381 382 src_len = sg_dma_len(src_sg); 383 src_offset = 0; 384 385 dst_len = sg_dma_len(dst_sg); 386 dst_offset = 0; 387 388 while (true) { 389 if (!src_len) { 390 src_nents--; 391 if (!src_nents) 392 break; 393 394 src_sg = sg_next(src_sg); 395 if (!src_sg) 396 break; 397 398 src_len = sg_dma_len(src_sg); 399 src_offset = 0; 400 continue; 401 } 402 403 if (!dst_len) { 404 dst_nents--; 405 if (!dst_nents) 406 break; 407 408 dst_sg = sg_next(dst_sg); 409 if (!dst_sg) 410 break; 411 412 dst_len = sg_dma_len(dst_sg); 413 dst_offset = 0; 414 continue; 415 } 416 417 len = min(dst_len, src_len); 418 419 cmd = ccp_alloc_dma_cmd(chan); 420 if (!cmd) 421 goto err; 422 423 ccp_cmd = &cmd->ccp_cmd; 424 ccp_cmd->ccp = chan->ccp; 425 ccp_pt = &ccp_cmd->u.passthru_nomap; 426 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; 427 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; 428 ccp_cmd->engine = CCP_ENGINE_PASSTHRU; 429 ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP; 430 ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; 431 ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset; 432 ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset; 433 ccp_pt->src_len = len; 434 ccp_pt->final = 1; 435 ccp_cmd->callback = ccp_cmd_callback; 436 ccp_cmd->data = desc; 437 438 list_add_tail(&cmd->entry, &desc->pending); 439 440 dev_dbg(ccp->dev, 441 "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__, 442 cmd, &ccp_pt->src_dma, 443 &ccp_pt->dst_dma, ccp_pt->src_len); 444 445 total_len += len; 446 447 src_len -= len; 448 src_offset += len; 449 450 dst_len -= len; 451 dst_offset += len; 452 } 453 454 desc->len = total_len; 455 456 if (list_empty(&desc->pending)) 457 goto err; 458 459 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc); 460 461 spin_lock_irqsave(&chan->lock, sflags); 462 463 list_add_tail(&desc->entry, &chan->created); 464 465 spin_unlock_irqrestore(&chan->lock, sflags); 466 467 return desc; 468 469 err: 470 ccp_free_cmd_resources(ccp, &desc->pending); 471 kmem_cache_free(ccp->dma_desc_cache, desc); 472 473 return NULL; 474 } 475 476 static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy( 477 struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len, 478 unsigned long flags) 479 { 480 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, 481 dma_chan); 482 struct ccp_dma_desc *desc; 483 struct scatterlist dst_sg, src_sg; 484 485 dev_dbg(chan->ccp->dev, 486 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n", 487 __func__, &src, &dst, len, flags); 488 489 sg_init_table(&dst_sg, 1); 490 sg_dma_address(&dst_sg) = dst; 491 sg_dma_len(&dst_sg) = len; 492 493 sg_init_table(&src_sg, 1); 494 sg_dma_address(&src_sg) = src; 495 sg_dma_len(&src_sg) = len; 496 497 desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags); 498 if (!desc) 499 return NULL; 500 501 return &desc->tx_desc; 502 } 503 504 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt( 505 struct dma_chan *dma_chan, unsigned long flags) 506 { 507 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, 508 dma_chan); 509 struct ccp_dma_desc *desc; 510 511 desc = ccp_alloc_dma_desc(chan, flags); 512 if (!desc) 513 return NULL; 514 515 return &desc->tx_desc; 516 } 517 518 static void ccp_issue_pending(struct dma_chan *dma_chan) 519 { 520 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, 521 dma_chan); 522 struct ccp_dma_desc *desc; 523 unsigned long flags; 524 525 dev_dbg(chan->ccp->dev, "%s\n", __func__); 526 527 spin_lock_irqsave(&chan->lock, flags); 528 529 desc = __ccp_pending_to_active(chan); 530 531 spin_unlock_irqrestore(&chan->lock, flags); 532 533 /* If there was nothing active, start processing */ 534 if (desc) 535 ccp_cmd_callback(desc, 0); 536 } 537 538 static enum dma_status ccp_tx_status(struct dma_chan *dma_chan, 539 dma_cookie_t cookie, 540 struct dma_tx_state *state) 541 { 542 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, 543 dma_chan); 544 struct ccp_dma_desc *desc; 545 enum dma_status ret; 546 unsigned long flags; 547 548 if (chan->status == DMA_PAUSED) { 549 ret = DMA_PAUSED; 550 goto out; 551 } 552 553 ret = dma_cookie_status(dma_chan, cookie, state); 554 if (ret == DMA_COMPLETE) { 555 spin_lock_irqsave(&chan->lock, flags); 556 557 /* Get status from complete chain, if still there */ 558 list_for_each_entry(desc, &chan->complete, entry) { 559 if (desc->tx_desc.cookie != cookie) 560 continue; 561 562 ret = desc->status; 563 break; 564 } 565 566 spin_unlock_irqrestore(&chan->lock, flags); 567 } 568 569 out: 570 dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret); 571 572 return ret; 573 } 574 575 static int ccp_pause(struct dma_chan *dma_chan) 576 { 577 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, 578 dma_chan); 579 580 chan->status = DMA_PAUSED; 581 582 /*TODO: Wait for active DMA to complete before returning? */ 583 584 return 0; 585 } 586 587 static int ccp_resume(struct dma_chan *dma_chan) 588 { 589 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, 590 dma_chan); 591 struct ccp_dma_desc *desc; 592 unsigned long flags; 593 594 spin_lock_irqsave(&chan->lock, flags); 595 596 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc, 597 entry); 598 599 spin_unlock_irqrestore(&chan->lock, flags); 600 601 /* Indicate the channel is running again */ 602 chan->status = DMA_IN_PROGRESS; 603 604 /* If there was something active, re-start */ 605 if (desc) 606 ccp_cmd_callback(desc, 0); 607 608 return 0; 609 } 610 611 static int ccp_terminate_all(struct dma_chan *dma_chan) 612 { 613 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, 614 dma_chan); 615 unsigned long flags; 616 617 dev_dbg(chan->ccp->dev, "%s\n", __func__); 618 619 /*TODO: Wait for active DMA to complete before continuing */ 620 621 spin_lock_irqsave(&chan->lock, flags); 622 623 /*TODO: Purge the complete list? */ 624 ccp_free_desc_resources(chan->ccp, &chan->active); 625 ccp_free_desc_resources(chan->ccp, &chan->pending); 626 ccp_free_desc_resources(chan->ccp, &chan->created); 627 628 spin_unlock_irqrestore(&chan->lock, flags); 629 630 return 0; 631 } 632 633 int ccp_dmaengine_register(struct ccp_device *ccp) 634 { 635 struct ccp_dma_chan *chan; 636 struct dma_device *dma_dev = &ccp->dma_dev; 637 struct dma_chan *dma_chan; 638 char *dma_cmd_cache_name; 639 char *dma_desc_cache_name; 640 unsigned int i; 641 int ret; 642 643 ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count, 644 sizeof(*(ccp->ccp_dma_chan)), 645 GFP_KERNEL); 646 if (!ccp->ccp_dma_chan) 647 return -ENOMEM; 648 649 dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, 650 "%s-dmaengine-cmd-cache", 651 ccp->name); 652 if (!dma_cmd_cache_name) 653 return -ENOMEM; 654 655 ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name, 656 sizeof(struct ccp_dma_cmd), 657 sizeof(void *), 658 SLAB_HWCACHE_ALIGN, NULL); 659 if (!ccp->dma_cmd_cache) 660 return -ENOMEM; 661 662 dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, 663 "%s-dmaengine-desc-cache", 664 ccp->name); 665 if (!dma_desc_cache_name) { 666 ret = -ENOMEM; 667 goto err_cache; 668 } 669 670 ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name, 671 sizeof(struct ccp_dma_desc), 672 sizeof(void *), 673 SLAB_HWCACHE_ALIGN, NULL); 674 if (!ccp->dma_desc_cache) { 675 ret = -ENOMEM; 676 goto err_cache; 677 } 678 679 dma_dev->dev = ccp->dev; 680 dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev)); 681 dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev)); 682 dma_dev->directions = DMA_MEM_TO_MEM; 683 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 684 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 685 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); 686 687 /* The DMA channels for this device can be set to public or private, 688 * and overridden by the module parameter dma_chan_attr. 689 * Default: according to the value in vdata (dma_chan_attr=0) 690 * dma_chan_attr=0x1: all channels private (override vdata) 691 * dma_chan_attr=0x2: all channels public (override vdata) 692 */ 693 if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE) 694 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); 695 696 INIT_LIST_HEAD(&dma_dev->channels); 697 for (i = 0; i < ccp->cmd_q_count; i++) { 698 chan = ccp->ccp_dma_chan + i; 699 dma_chan = &chan->dma_chan; 700 701 chan->ccp = ccp; 702 703 spin_lock_init(&chan->lock); 704 INIT_LIST_HEAD(&chan->created); 705 INIT_LIST_HEAD(&chan->pending); 706 INIT_LIST_HEAD(&chan->active); 707 INIT_LIST_HEAD(&chan->complete); 708 709 tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup, 710 (unsigned long)chan); 711 712 dma_chan->device = dma_dev; 713 dma_cookie_init(dma_chan); 714 715 list_add_tail(&dma_chan->device_node, &dma_dev->channels); 716 } 717 718 dma_dev->device_free_chan_resources = ccp_free_chan_resources; 719 dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy; 720 dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt; 721 dma_dev->device_issue_pending = ccp_issue_pending; 722 dma_dev->device_tx_status = ccp_tx_status; 723 dma_dev->device_pause = ccp_pause; 724 dma_dev->device_resume = ccp_resume; 725 dma_dev->device_terminate_all = ccp_terminate_all; 726 727 ret = dma_async_device_register(dma_dev); 728 if (ret) 729 goto err_reg; 730 731 return 0; 732 733 err_reg: 734 kmem_cache_destroy(ccp->dma_desc_cache); 735 736 err_cache: 737 kmem_cache_destroy(ccp->dma_cmd_cache); 738 739 return ret; 740 } 741 742 void ccp_dmaengine_unregister(struct ccp_device *ccp) 743 { 744 struct dma_device *dma_dev = &ccp->dma_dev; 745 746 dma_async_device_unregister(dma_dev); 747 748 kmem_cache_destroy(ccp->dma_desc_cache); 749 kmem_cache_destroy(ccp->dma_cmd_cache); 750 } 751