1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 4 */ 5 6 /* 7 * This code implements the DMA subsystem. It provides a HW-neutral interface 8 * for other kernel code to use asynchronous memory copy capabilities, 9 * if present, and allows different HW DMA drivers to register as providing 10 * this capability. 11 * 12 * Due to the fact we are accelerating what is already a relatively fast 13 * operation, the code goes to great lengths to avoid additional overhead, 14 * such as locking. 15 * 16 * LOCKING: 17 * 18 * The subsystem keeps a global list of dma_device structs it is protected by a 19 * mutex, dma_list_mutex. 20 * 21 * A subsystem can get access to a channel by calling dmaengine_get() followed 22 * by dma_find_channel(), or if it has need for an exclusive channel it can call 23 * dma_request_channel(). Once a channel is allocated a reference is taken 24 * against its corresponding driver to disable removal. 25 * 26 * Each device has a channels list, which runs unlocked but is never modified 27 * once the device is registered, it's just setup by the driver. 28 * 29 * See Documentation/driver-api/dmaengine for more details 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include <linux/platform_device.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/init.h> 37 #include <linux/module.h> 38 #include <linux/mm.h> 39 #include <linux/device.h> 40 #include <linux/dmaengine.h> 41 #include <linux/hardirq.h> 42 #include <linux/spinlock.h> 43 #include <linux/percpu.h> 44 #include <linux/rcupdate.h> 45 #include <linux/mutex.h> 46 #include <linux/jiffies.h> 47 #include <linux/rculist.h> 48 #include <linux/idr.h> 49 #include <linux/slab.h> 50 #include <linux/acpi.h> 51 #include <linux/acpi_dma.h> 52 #include <linux/of_dma.h> 53 #include <linux/mempool.h> 54 #include <linux/numa.h> 55 56 static DEFINE_MUTEX(dma_list_mutex); 57 static DEFINE_IDA(dma_ida); 58 static LIST_HEAD(dma_device_list); 59 static long dmaengine_ref_count; 60 61 /* --- debugfs implementation --- */ 62 #ifdef CONFIG_DEBUG_FS 63 #include <linux/debugfs.h> 64 65 static struct dentry *rootdir; 66 67 static void dmaengine_debug_register(struct dma_device *dma_dev) 68 { 69 dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev), 70 rootdir); 71 if (IS_ERR(dma_dev->dbg_dev_root)) 72 dma_dev->dbg_dev_root = NULL; 73 } 74 75 static void dmaengine_debug_unregister(struct dma_device *dma_dev) 76 { 77 debugfs_remove_recursive(dma_dev->dbg_dev_root); 78 dma_dev->dbg_dev_root = NULL; 79 } 80 81 static void dmaengine_dbg_summary_show(struct seq_file *s, 82 struct dma_device *dma_dev) 83 { 84 struct dma_chan *chan; 85 86 list_for_each_entry(chan, &dma_dev->channels, device_node) { 87 if (chan->client_count) { 88 seq_printf(s, " %-13s| %s", dma_chan_name(chan), 89 chan->dbg_client_name ?: "in-use"); 90 91 if (chan->router) 92 seq_printf(s, " (via router: %s)\n", 93 dev_name(chan->router->dev)); 94 else 95 seq_puts(s, "\n"); 96 } 97 } 98 } 99 100 static int dmaengine_summary_show(struct seq_file *s, void *data) 101 { 102 struct dma_device *dma_dev = NULL; 103 104 mutex_lock(&dma_list_mutex); 105 list_for_each_entry(dma_dev, &dma_device_list, global_node) { 106 seq_printf(s, "dma%d (%s): number of channels: %u\n", 107 dma_dev->dev_id, dev_name(dma_dev->dev), 108 dma_dev->chancnt); 109 110 if (dma_dev->dbg_summary_show) 111 dma_dev->dbg_summary_show(s, dma_dev); 112 else 113 dmaengine_dbg_summary_show(s, dma_dev); 114 115 if (!list_is_last(&dma_dev->global_node, &dma_device_list)) 116 seq_puts(s, "\n"); 117 } 118 mutex_unlock(&dma_list_mutex); 119 120 return 0; 121 } 122 DEFINE_SHOW_ATTRIBUTE(dmaengine_summary); 123 124 static void __init dmaengine_debugfs_init(void) 125 { 126 rootdir = debugfs_create_dir("dmaengine", NULL); 127 128 /* /sys/kernel/debug/dmaengine/summary */ 129 debugfs_create_file("summary", 0444, rootdir, NULL, 130 &dmaengine_summary_fops); 131 } 132 #else 133 static inline void dmaengine_debugfs_init(void) { } 134 static inline int dmaengine_debug_register(struct dma_device *dma_dev) 135 { 136 return 0; 137 } 138 139 static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { } 140 #endif /* DEBUG_FS */ 141 142 /* --- sysfs implementation --- */ 143 144 #define DMA_SLAVE_NAME "slave" 145 146 /** 147 * dev_to_dma_chan - convert a device pointer to its sysfs container object 148 * @dev - device node 149 * 150 * Must be called under dma_list_mutex 151 */ 152 static struct dma_chan *dev_to_dma_chan(struct device *dev) 153 { 154 struct dma_chan_dev *chan_dev; 155 156 chan_dev = container_of(dev, typeof(*chan_dev), device); 157 return chan_dev->chan; 158 } 159 160 static ssize_t memcpy_count_show(struct device *dev, 161 struct device_attribute *attr, char *buf) 162 { 163 struct dma_chan *chan; 164 unsigned long count = 0; 165 int i; 166 int err; 167 168 mutex_lock(&dma_list_mutex); 169 chan = dev_to_dma_chan(dev); 170 if (chan) { 171 for_each_possible_cpu(i) 172 count += per_cpu_ptr(chan->local, i)->memcpy_count; 173 err = sprintf(buf, "%lu\n", count); 174 } else 175 err = -ENODEV; 176 mutex_unlock(&dma_list_mutex); 177 178 return err; 179 } 180 static DEVICE_ATTR_RO(memcpy_count); 181 182 static ssize_t bytes_transferred_show(struct device *dev, 183 struct device_attribute *attr, char *buf) 184 { 185 struct dma_chan *chan; 186 unsigned long count = 0; 187 int i; 188 int err; 189 190 mutex_lock(&dma_list_mutex); 191 chan = dev_to_dma_chan(dev); 192 if (chan) { 193 for_each_possible_cpu(i) 194 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 195 err = sprintf(buf, "%lu\n", count); 196 } else 197 err = -ENODEV; 198 mutex_unlock(&dma_list_mutex); 199 200 return err; 201 } 202 static DEVICE_ATTR_RO(bytes_transferred); 203 204 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, 205 char *buf) 206 { 207 struct dma_chan *chan; 208 int err; 209 210 mutex_lock(&dma_list_mutex); 211 chan = dev_to_dma_chan(dev); 212 if (chan) 213 err = sprintf(buf, "%d\n", chan->client_count); 214 else 215 err = -ENODEV; 216 mutex_unlock(&dma_list_mutex); 217 218 return err; 219 } 220 static DEVICE_ATTR_RO(in_use); 221 222 static struct attribute *dma_dev_attrs[] = { 223 &dev_attr_memcpy_count.attr, 224 &dev_attr_bytes_transferred.attr, 225 &dev_attr_in_use.attr, 226 NULL, 227 }; 228 ATTRIBUTE_GROUPS(dma_dev); 229 230 static void chan_dev_release(struct device *dev) 231 { 232 struct dma_chan_dev *chan_dev; 233 234 chan_dev = container_of(dev, typeof(*chan_dev), device); 235 kfree(chan_dev); 236 } 237 238 static struct class dma_devclass = { 239 .name = "dma", 240 .dev_groups = dma_dev_groups, 241 .dev_release = chan_dev_release, 242 }; 243 244 /* --- client and device registration --- */ 245 246 /** 247 * dma_cap_mask_all - enable iteration over all operation types 248 */ 249 static dma_cap_mask_t dma_cap_mask_all; 250 251 /** 252 * dma_chan_tbl_ent - tracks channel allocations per core/operation 253 * @chan - associated channel for this entry 254 */ 255 struct dma_chan_tbl_ent { 256 struct dma_chan *chan; 257 }; 258 259 /** 260 * channel_table - percpu lookup table for memory-to-memory offload providers 261 */ 262 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; 263 264 static int __init dma_channel_table_init(void) 265 { 266 enum dma_transaction_type cap; 267 int err = 0; 268 269 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); 270 271 /* 'interrupt', 'private', and 'slave' are channel capabilities, 272 * but are not associated with an operation so they do not need 273 * an entry in the channel_table 274 */ 275 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); 276 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); 277 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); 278 279 for_each_dma_cap_mask(cap, dma_cap_mask_all) { 280 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); 281 if (!channel_table[cap]) { 282 err = -ENOMEM; 283 break; 284 } 285 } 286 287 if (err) { 288 pr_err("dmaengine dma_channel_table_init failure: %d\n", err); 289 for_each_dma_cap_mask(cap, dma_cap_mask_all) 290 free_percpu(channel_table[cap]); 291 } 292 293 return err; 294 } 295 arch_initcall(dma_channel_table_init); 296 297 /** 298 * dma_chan_is_local - returns true if the channel is in the same numa-node as 299 * the cpu 300 */ 301 static bool dma_chan_is_local(struct dma_chan *chan, int cpu) 302 { 303 int node = dev_to_node(chan->device->dev); 304 return node == NUMA_NO_NODE || 305 cpumask_test_cpu(cpu, cpumask_of_node(node)); 306 } 307 308 /** 309 * min_chan - returns the channel with min count and in the same numa-node as 310 * the cpu 311 * @cap: capability to match 312 * @cpu: cpu index which the channel should be close to 313 * 314 * If some channels are close to the given cpu, the one with the lowest 315 * reference count is returned. Otherwise, cpu is ignored and only the 316 * reference count is taken into account. 317 * Must be called under dma_list_mutex. 318 */ 319 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) 320 { 321 struct dma_device *device; 322 struct dma_chan *chan; 323 struct dma_chan *min = NULL; 324 struct dma_chan *localmin = NULL; 325 326 list_for_each_entry(device, &dma_device_list, global_node) { 327 if (!dma_has_cap(cap, device->cap_mask) || 328 dma_has_cap(DMA_PRIVATE, device->cap_mask)) 329 continue; 330 list_for_each_entry(chan, &device->channels, device_node) { 331 if (!chan->client_count) 332 continue; 333 if (!min || chan->table_count < min->table_count) 334 min = chan; 335 336 if (dma_chan_is_local(chan, cpu)) 337 if (!localmin || 338 chan->table_count < localmin->table_count) 339 localmin = chan; 340 } 341 } 342 343 chan = localmin ? localmin : min; 344 345 if (chan) 346 chan->table_count++; 347 348 return chan; 349 } 350 351 /** 352 * dma_channel_rebalance - redistribute the available channels 353 * 354 * Optimize for cpu isolation (each cpu gets a dedicated channel for an 355 * operation type) in the SMP case, and operation isolation (avoid 356 * multi-tasking channels) in the non-SMP case. Must be called under 357 * dma_list_mutex. 358 */ 359 static void dma_channel_rebalance(void) 360 { 361 struct dma_chan *chan; 362 struct dma_device *device; 363 int cpu; 364 int cap; 365 366 /* undo the last distribution */ 367 for_each_dma_cap_mask(cap, dma_cap_mask_all) 368 for_each_possible_cpu(cpu) 369 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; 370 371 list_for_each_entry(device, &dma_device_list, global_node) { 372 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 373 continue; 374 list_for_each_entry(chan, &device->channels, device_node) 375 chan->table_count = 0; 376 } 377 378 /* don't populate the channel_table if no clients are available */ 379 if (!dmaengine_ref_count) 380 return; 381 382 /* redistribute available channels */ 383 for_each_dma_cap_mask(cap, dma_cap_mask_all) 384 for_each_online_cpu(cpu) { 385 chan = min_chan(cap, cpu); 386 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; 387 } 388 } 389 390 static int dma_device_satisfies_mask(struct dma_device *device, 391 const dma_cap_mask_t *want) 392 { 393 dma_cap_mask_t has; 394 395 bitmap_and(has.bits, want->bits, device->cap_mask.bits, 396 DMA_TX_TYPE_END); 397 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 398 } 399 400 static struct module *dma_chan_to_owner(struct dma_chan *chan) 401 { 402 return chan->device->owner; 403 } 404 405 /** 406 * balance_ref_count - catch up the channel reference count 407 * @chan - channel to balance ->client_count versus dmaengine_ref_count 408 * 409 * balance_ref_count must be called under dma_list_mutex 410 */ 411 static void balance_ref_count(struct dma_chan *chan) 412 { 413 struct module *owner = dma_chan_to_owner(chan); 414 415 while (chan->client_count < dmaengine_ref_count) { 416 __module_get(owner); 417 chan->client_count++; 418 } 419 } 420 421 static void dma_device_release(struct kref *ref) 422 { 423 struct dma_device *device = container_of(ref, struct dma_device, ref); 424 425 list_del_rcu(&device->global_node); 426 dma_channel_rebalance(); 427 428 if (device->device_release) 429 device->device_release(device); 430 } 431 432 static void dma_device_put(struct dma_device *device) 433 { 434 lockdep_assert_held(&dma_list_mutex); 435 kref_put(&device->ref, dma_device_release); 436 } 437 438 /** 439 * dma_chan_get - try to grab a dma channel's parent driver module 440 * @chan - channel to grab 441 * 442 * Must be called under dma_list_mutex 443 */ 444 static int dma_chan_get(struct dma_chan *chan) 445 { 446 struct module *owner = dma_chan_to_owner(chan); 447 int ret; 448 449 /* The channel is already in use, update client count */ 450 if (chan->client_count) { 451 __module_get(owner); 452 goto out; 453 } 454 455 if (!try_module_get(owner)) 456 return -ENODEV; 457 458 ret = kref_get_unless_zero(&chan->device->ref); 459 if (!ret) { 460 ret = -ENODEV; 461 goto module_put_out; 462 } 463 464 /* allocate upon first client reference */ 465 if (chan->device->device_alloc_chan_resources) { 466 ret = chan->device->device_alloc_chan_resources(chan); 467 if (ret < 0) 468 goto err_out; 469 } 470 471 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) 472 balance_ref_count(chan); 473 474 out: 475 chan->client_count++; 476 return 0; 477 478 err_out: 479 dma_device_put(chan->device); 480 module_put_out: 481 module_put(owner); 482 return ret; 483 } 484 485 /** 486 * dma_chan_put - drop a reference to a dma channel's parent driver module 487 * @chan - channel to release 488 * 489 * Must be called under dma_list_mutex 490 */ 491 static void dma_chan_put(struct dma_chan *chan) 492 { 493 /* This channel is not in use, bail out */ 494 if (!chan->client_count) 495 return; 496 497 chan->client_count--; 498 499 /* This channel is not in use anymore, free it */ 500 if (!chan->client_count && chan->device->device_free_chan_resources) { 501 /* Make sure all operations have completed */ 502 dmaengine_synchronize(chan); 503 chan->device->device_free_chan_resources(chan); 504 } 505 506 /* If the channel is used via a DMA request router, free the mapping */ 507 if (chan->router && chan->router->route_free) { 508 chan->router->route_free(chan->router->dev, chan->route_data); 509 chan->router = NULL; 510 chan->route_data = NULL; 511 } 512 513 dma_device_put(chan->device); 514 module_put(dma_chan_to_owner(chan)); 515 } 516 517 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 518 { 519 enum dma_status status; 520 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 521 522 dma_async_issue_pending(chan); 523 do { 524 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 525 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 526 dev_err(chan->device->dev, "%s: timeout!\n", __func__); 527 return DMA_ERROR; 528 } 529 if (status != DMA_IN_PROGRESS) 530 break; 531 cpu_relax(); 532 } while (1); 533 534 return status; 535 } 536 EXPORT_SYMBOL(dma_sync_wait); 537 538 /** 539 * dma_find_channel - find a channel to carry out the operation 540 * @tx_type: transaction type 541 */ 542 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 543 { 544 return this_cpu_read(channel_table[tx_type]->chan); 545 } 546 EXPORT_SYMBOL(dma_find_channel); 547 548 /** 549 * dma_issue_pending_all - flush all pending operations across all channels 550 */ 551 void dma_issue_pending_all(void) 552 { 553 struct dma_device *device; 554 struct dma_chan *chan; 555 556 rcu_read_lock(); 557 list_for_each_entry_rcu(device, &dma_device_list, global_node) { 558 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 559 continue; 560 list_for_each_entry(chan, &device->channels, device_node) 561 if (chan->client_count) 562 device->device_issue_pending(chan); 563 } 564 rcu_read_unlock(); 565 } 566 EXPORT_SYMBOL(dma_issue_pending_all); 567 568 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) 569 { 570 struct dma_device *device; 571 572 if (!chan || !caps) 573 return -EINVAL; 574 575 device = chan->device; 576 577 /* check if the channel supports slave transactions */ 578 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || 579 test_bit(DMA_CYCLIC, device->cap_mask.bits))) 580 return -ENXIO; 581 582 /* 583 * Check whether it reports it uses the generic slave 584 * capabilities, if not, that means it doesn't support any 585 * kind of slave capabilities reporting. 586 */ 587 if (!device->directions) 588 return -ENXIO; 589 590 caps->src_addr_widths = device->src_addr_widths; 591 caps->dst_addr_widths = device->dst_addr_widths; 592 caps->directions = device->directions; 593 caps->max_burst = device->max_burst; 594 caps->residue_granularity = device->residue_granularity; 595 caps->descriptor_reuse = device->descriptor_reuse; 596 caps->cmd_pause = !!device->device_pause; 597 caps->cmd_resume = !!device->device_resume; 598 caps->cmd_terminate = !!device->device_terminate_all; 599 600 return 0; 601 } 602 EXPORT_SYMBOL_GPL(dma_get_slave_caps); 603 604 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, 605 struct dma_device *dev, 606 dma_filter_fn fn, void *fn_param) 607 { 608 struct dma_chan *chan; 609 610 if (mask && !dma_device_satisfies_mask(dev, mask)) { 611 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); 612 return NULL; 613 } 614 /* devices with multiple channels need special handling as we need to 615 * ensure that all channels are either private or public. 616 */ 617 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) 618 list_for_each_entry(chan, &dev->channels, device_node) { 619 /* some channels are already publicly allocated */ 620 if (chan->client_count) 621 return NULL; 622 } 623 624 list_for_each_entry(chan, &dev->channels, device_node) { 625 if (chan->client_count) { 626 dev_dbg(dev->dev, "%s: %s busy\n", 627 __func__, dma_chan_name(chan)); 628 continue; 629 } 630 if (fn && !fn(chan, fn_param)) { 631 dev_dbg(dev->dev, "%s: %s filter said false\n", 632 __func__, dma_chan_name(chan)); 633 continue; 634 } 635 return chan; 636 } 637 638 return NULL; 639 } 640 641 static struct dma_chan *find_candidate(struct dma_device *device, 642 const dma_cap_mask_t *mask, 643 dma_filter_fn fn, void *fn_param) 644 { 645 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); 646 int err; 647 648 if (chan) { 649 /* Found a suitable channel, try to grab, prep, and return it. 650 * We first set DMA_PRIVATE to disable balance_ref_count as this 651 * channel will not be published in the general-purpose 652 * allocator 653 */ 654 dma_cap_set(DMA_PRIVATE, device->cap_mask); 655 device->privatecnt++; 656 err = dma_chan_get(chan); 657 658 if (err) { 659 if (err == -ENODEV) { 660 dev_dbg(device->dev, "%s: %s module removed\n", 661 __func__, dma_chan_name(chan)); 662 list_del_rcu(&device->global_node); 663 } else 664 dev_dbg(device->dev, 665 "%s: failed to get %s: (%d)\n", 666 __func__, dma_chan_name(chan), err); 667 668 if (--device->privatecnt == 0) 669 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 670 671 chan = ERR_PTR(err); 672 } 673 } 674 675 return chan ? chan : ERR_PTR(-EPROBE_DEFER); 676 } 677 678 /** 679 * dma_get_slave_channel - try to get specific channel exclusively 680 * @chan: target channel 681 */ 682 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) 683 { 684 int err = -EBUSY; 685 686 /* lock against __dma_request_channel */ 687 mutex_lock(&dma_list_mutex); 688 689 if (chan->client_count == 0) { 690 struct dma_device *device = chan->device; 691 692 dma_cap_set(DMA_PRIVATE, device->cap_mask); 693 device->privatecnt++; 694 err = dma_chan_get(chan); 695 if (err) { 696 dev_dbg(chan->device->dev, 697 "%s: failed to get %s: (%d)\n", 698 __func__, dma_chan_name(chan), err); 699 chan = NULL; 700 if (--device->privatecnt == 0) 701 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 702 } 703 } else 704 chan = NULL; 705 706 mutex_unlock(&dma_list_mutex); 707 708 709 return chan; 710 } 711 EXPORT_SYMBOL_GPL(dma_get_slave_channel); 712 713 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) 714 { 715 dma_cap_mask_t mask; 716 struct dma_chan *chan; 717 718 dma_cap_zero(mask); 719 dma_cap_set(DMA_SLAVE, mask); 720 721 /* lock against __dma_request_channel */ 722 mutex_lock(&dma_list_mutex); 723 724 chan = find_candidate(device, &mask, NULL, NULL); 725 726 mutex_unlock(&dma_list_mutex); 727 728 return IS_ERR(chan) ? NULL : chan; 729 } 730 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); 731 732 /** 733 * __dma_request_channel - try to allocate an exclusive channel 734 * @mask: capabilities that the channel must satisfy 735 * @fn: optional callback to disposition available channels 736 * @fn_param: opaque parameter to pass to dma_filter_fn 737 * @np: device node to look for DMA channels 738 * 739 * Returns pointer to appropriate DMA channel on success or NULL. 740 */ 741 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 742 dma_filter_fn fn, void *fn_param, 743 struct device_node *np) 744 { 745 struct dma_device *device, *_d; 746 struct dma_chan *chan = NULL; 747 748 /* Find a channel */ 749 mutex_lock(&dma_list_mutex); 750 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 751 /* Finds a DMA controller with matching device node */ 752 if (np && device->dev->of_node && np != device->dev->of_node) 753 continue; 754 755 chan = find_candidate(device, mask, fn, fn_param); 756 if (!IS_ERR(chan)) 757 break; 758 759 chan = NULL; 760 } 761 mutex_unlock(&dma_list_mutex); 762 763 pr_debug("%s: %s (%s)\n", 764 __func__, 765 chan ? "success" : "fail", 766 chan ? dma_chan_name(chan) : NULL); 767 768 return chan; 769 } 770 EXPORT_SYMBOL_GPL(__dma_request_channel); 771 772 static const struct dma_slave_map *dma_filter_match(struct dma_device *device, 773 const char *name, 774 struct device *dev) 775 { 776 int i; 777 778 if (!device->filter.mapcnt) 779 return NULL; 780 781 for (i = 0; i < device->filter.mapcnt; i++) { 782 const struct dma_slave_map *map = &device->filter.map[i]; 783 784 if (!strcmp(map->devname, dev_name(dev)) && 785 !strcmp(map->slave, name)) 786 return map; 787 } 788 789 return NULL; 790 } 791 792 /** 793 * dma_request_chan - try to allocate an exclusive slave channel 794 * @dev: pointer to client device structure 795 * @name: slave channel name 796 * 797 * Returns pointer to appropriate DMA channel on success or an error pointer. 798 */ 799 struct dma_chan *dma_request_chan(struct device *dev, const char *name) 800 { 801 struct dma_device *d, *_d; 802 struct dma_chan *chan = NULL; 803 804 /* If device-tree is present get slave info from here */ 805 if (dev->of_node) 806 chan = of_dma_request_slave_channel(dev->of_node, name); 807 808 /* If device was enumerated by ACPI get slave info from here */ 809 if (has_acpi_companion(dev) && !chan) 810 chan = acpi_dma_request_slave_chan_by_name(dev, name); 811 812 if (PTR_ERR(chan) == -EPROBE_DEFER) 813 return chan; 814 815 if (!IS_ERR_OR_NULL(chan)) 816 goto found; 817 818 /* Try to find the channel via the DMA filter map(s) */ 819 mutex_lock(&dma_list_mutex); 820 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { 821 dma_cap_mask_t mask; 822 const struct dma_slave_map *map = dma_filter_match(d, name, dev); 823 824 if (!map) 825 continue; 826 827 dma_cap_zero(mask); 828 dma_cap_set(DMA_SLAVE, mask); 829 830 chan = find_candidate(d, &mask, d->filter.fn, map->param); 831 if (!IS_ERR(chan)) 832 break; 833 } 834 mutex_unlock(&dma_list_mutex); 835 836 if (IS_ERR_OR_NULL(chan)) 837 return chan ? chan : ERR_PTR(-EPROBE_DEFER); 838 839 found: 840 #ifdef CONFIG_DEBUG_FS 841 chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), 842 name); 843 #endif 844 845 chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); 846 if (!chan->name) 847 return chan; 848 chan->slave = dev; 849 850 if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, 851 DMA_SLAVE_NAME)) 852 dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME); 853 if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) 854 dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name); 855 856 return chan; 857 } 858 EXPORT_SYMBOL_GPL(dma_request_chan); 859 860 /** 861 * dma_request_slave_channel - try to allocate an exclusive slave channel 862 * @dev: pointer to client device structure 863 * @name: slave channel name 864 * 865 * Returns pointer to appropriate DMA channel on success or NULL. 866 */ 867 struct dma_chan *dma_request_slave_channel(struct device *dev, 868 const char *name) 869 { 870 struct dma_chan *ch = dma_request_chan(dev, name); 871 if (IS_ERR(ch)) 872 return NULL; 873 874 return ch; 875 } 876 EXPORT_SYMBOL_GPL(dma_request_slave_channel); 877 878 /** 879 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities 880 * @mask: capabilities that the channel must satisfy 881 * 882 * Returns pointer to appropriate DMA channel on success or an error pointer. 883 */ 884 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) 885 { 886 struct dma_chan *chan; 887 888 if (!mask) 889 return ERR_PTR(-ENODEV); 890 891 chan = __dma_request_channel(mask, NULL, NULL, NULL); 892 if (!chan) { 893 mutex_lock(&dma_list_mutex); 894 if (list_empty(&dma_device_list)) 895 chan = ERR_PTR(-EPROBE_DEFER); 896 else 897 chan = ERR_PTR(-ENODEV); 898 mutex_unlock(&dma_list_mutex); 899 } 900 901 return chan; 902 } 903 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); 904 905 void dma_release_channel(struct dma_chan *chan) 906 { 907 mutex_lock(&dma_list_mutex); 908 WARN_ONCE(chan->client_count != 1, 909 "chan reference count %d != 1\n", chan->client_count); 910 dma_chan_put(chan); 911 /* drop PRIVATE cap enabled by __dma_request_channel() */ 912 if (--chan->device->privatecnt == 0) 913 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 914 915 if (chan->slave) { 916 sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); 917 sysfs_remove_link(&chan->slave->kobj, chan->name); 918 kfree(chan->name); 919 chan->name = NULL; 920 chan->slave = NULL; 921 } 922 923 #ifdef CONFIG_DEBUG_FS 924 kfree(chan->dbg_client_name); 925 chan->dbg_client_name = NULL; 926 #endif 927 mutex_unlock(&dma_list_mutex); 928 } 929 EXPORT_SYMBOL_GPL(dma_release_channel); 930 931 /** 932 * dmaengine_get - register interest in dma_channels 933 */ 934 void dmaengine_get(void) 935 { 936 struct dma_device *device, *_d; 937 struct dma_chan *chan; 938 int err; 939 940 mutex_lock(&dma_list_mutex); 941 dmaengine_ref_count++; 942 943 /* try to grab channels */ 944 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 945 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 946 continue; 947 list_for_each_entry(chan, &device->channels, device_node) { 948 err = dma_chan_get(chan); 949 if (err == -ENODEV) { 950 /* module removed before we could use it */ 951 list_del_rcu(&device->global_node); 952 break; 953 } else if (err) 954 dev_dbg(chan->device->dev, 955 "%s: failed to get %s: (%d)\n", 956 __func__, dma_chan_name(chan), err); 957 } 958 } 959 960 /* if this is the first reference and there were channels 961 * waiting we need to rebalance to get those channels 962 * incorporated into the channel table 963 */ 964 if (dmaengine_ref_count == 1) 965 dma_channel_rebalance(); 966 mutex_unlock(&dma_list_mutex); 967 } 968 EXPORT_SYMBOL(dmaengine_get); 969 970 /** 971 * dmaengine_put - let dma drivers be removed when ref_count == 0 972 */ 973 void dmaengine_put(void) 974 { 975 struct dma_device *device, *_d; 976 struct dma_chan *chan; 977 978 mutex_lock(&dma_list_mutex); 979 dmaengine_ref_count--; 980 BUG_ON(dmaengine_ref_count < 0); 981 /* drop channel references */ 982 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 983 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 984 continue; 985 list_for_each_entry(chan, &device->channels, device_node) 986 dma_chan_put(chan); 987 } 988 mutex_unlock(&dma_list_mutex); 989 } 990 EXPORT_SYMBOL(dmaengine_put); 991 992 static bool device_has_all_tx_types(struct dma_device *device) 993 { 994 /* A device that satisfies this test has channels that will never cause 995 * an async_tx channel switch event as all possible operation types can 996 * be handled. 997 */ 998 #ifdef CONFIG_ASYNC_TX_DMA 999 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) 1000 return false; 1001 #endif 1002 1003 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY) 1004 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) 1005 return false; 1006 #endif 1007 1008 #if IS_ENABLED(CONFIG_ASYNC_XOR) 1009 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 1010 return false; 1011 1012 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA 1013 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) 1014 return false; 1015 #endif 1016 #endif 1017 1018 #if IS_ENABLED(CONFIG_ASYNC_PQ) 1019 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 1020 return false; 1021 1022 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA 1023 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) 1024 return false; 1025 #endif 1026 #endif 1027 1028 return true; 1029 } 1030 1031 static int get_dma_id(struct dma_device *device) 1032 { 1033 int rc = ida_alloc(&dma_ida, GFP_KERNEL); 1034 1035 if (rc < 0) 1036 return rc; 1037 device->dev_id = rc; 1038 return 0; 1039 } 1040 1041 static int __dma_async_device_channel_register(struct dma_device *device, 1042 struct dma_chan *chan) 1043 { 1044 int rc = 0; 1045 1046 chan->local = alloc_percpu(typeof(*chan->local)); 1047 if (!chan->local) 1048 goto err_out; 1049 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); 1050 if (!chan->dev) { 1051 free_percpu(chan->local); 1052 chan->local = NULL; 1053 goto err_out; 1054 } 1055 1056 /* 1057 * When the chan_id is a negative value, we are dynamically adding 1058 * the channel. Otherwise we are static enumerating. 1059 */ 1060 mutex_lock(&device->chan_mutex); 1061 chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); 1062 mutex_unlock(&device->chan_mutex); 1063 if (chan->chan_id < 0) { 1064 pr_err("%s: unable to alloc ida for chan: %d\n", 1065 __func__, chan->chan_id); 1066 goto err_out; 1067 } 1068 1069 chan->dev->device.class = &dma_devclass; 1070 chan->dev->device.parent = device->dev; 1071 chan->dev->chan = chan; 1072 chan->dev->dev_id = device->dev_id; 1073 dev_set_name(&chan->dev->device, "dma%dchan%d", 1074 device->dev_id, chan->chan_id); 1075 rc = device_register(&chan->dev->device); 1076 if (rc) 1077 goto err_out_ida; 1078 chan->client_count = 0; 1079 device->chancnt++; 1080 1081 return 0; 1082 1083 err_out_ida: 1084 mutex_lock(&device->chan_mutex); 1085 ida_free(&device->chan_ida, chan->chan_id); 1086 mutex_unlock(&device->chan_mutex); 1087 err_out: 1088 free_percpu(chan->local); 1089 kfree(chan->dev); 1090 return rc; 1091 } 1092 1093 int dma_async_device_channel_register(struct dma_device *device, 1094 struct dma_chan *chan) 1095 { 1096 int rc; 1097 1098 rc = __dma_async_device_channel_register(device, chan); 1099 if (rc < 0) 1100 return rc; 1101 1102 dma_channel_rebalance(); 1103 return 0; 1104 } 1105 EXPORT_SYMBOL_GPL(dma_async_device_channel_register); 1106 1107 static void __dma_async_device_channel_unregister(struct dma_device *device, 1108 struct dma_chan *chan) 1109 { 1110 WARN_ONCE(!device->device_release && chan->client_count, 1111 "%s called while %d clients hold a reference\n", 1112 __func__, chan->client_count); 1113 mutex_lock(&dma_list_mutex); 1114 list_del(&chan->device_node); 1115 device->chancnt--; 1116 chan->dev->chan = NULL; 1117 mutex_unlock(&dma_list_mutex); 1118 mutex_lock(&device->chan_mutex); 1119 ida_free(&device->chan_ida, chan->chan_id); 1120 mutex_unlock(&device->chan_mutex); 1121 device_unregister(&chan->dev->device); 1122 free_percpu(chan->local); 1123 } 1124 1125 void dma_async_device_channel_unregister(struct dma_device *device, 1126 struct dma_chan *chan) 1127 { 1128 __dma_async_device_channel_unregister(device, chan); 1129 dma_channel_rebalance(); 1130 } 1131 EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); 1132 1133 /** 1134 * dma_async_device_register - registers DMA devices found 1135 * @device: &dma_device 1136 * 1137 * After calling this routine the structure should not be freed except in the 1138 * device_release() callback which will be called after 1139 * dma_async_device_unregister() is called and no further references are taken. 1140 */ 1141 int dma_async_device_register(struct dma_device *device) 1142 { 1143 int rc; 1144 struct dma_chan* chan; 1145 1146 if (!device) 1147 return -ENODEV; 1148 1149 /* validate device routines */ 1150 if (!device->dev) { 1151 pr_err("DMAdevice must have dev\n"); 1152 return -EIO; 1153 } 1154 1155 device->owner = device->dev->driver->owner; 1156 1157 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { 1158 dev_err(device->dev, 1159 "Device claims capability %s, but op is not defined\n", 1160 "DMA_MEMCPY"); 1161 return -EIO; 1162 } 1163 1164 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { 1165 dev_err(device->dev, 1166 "Device claims capability %s, but op is not defined\n", 1167 "DMA_XOR"); 1168 return -EIO; 1169 } 1170 1171 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { 1172 dev_err(device->dev, 1173 "Device claims capability %s, but op is not defined\n", 1174 "DMA_XOR_VAL"); 1175 return -EIO; 1176 } 1177 1178 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { 1179 dev_err(device->dev, 1180 "Device claims capability %s, but op is not defined\n", 1181 "DMA_PQ"); 1182 return -EIO; 1183 } 1184 1185 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { 1186 dev_err(device->dev, 1187 "Device claims capability %s, but op is not defined\n", 1188 "DMA_PQ_VAL"); 1189 return -EIO; 1190 } 1191 1192 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { 1193 dev_err(device->dev, 1194 "Device claims capability %s, but op is not defined\n", 1195 "DMA_MEMSET"); 1196 return -EIO; 1197 } 1198 1199 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { 1200 dev_err(device->dev, 1201 "Device claims capability %s, but op is not defined\n", 1202 "DMA_INTERRUPT"); 1203 return -EIO; 1204 } 1205 1206 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { 1207 dev_err(device->dev, 1208 "Device claims capability %s, but op is not defined\n", 1209 "DMA_CYCLIC"); 1210 return -EIO; 1211 } 1212 1213 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { 1214 dev_err(device->dev, 1215 "Device claims capability %s, but op is not defined\n", 1216 "DMA_INTERLEAVE"); 1217 return -EIO; 1218 } 1219 1220 1221 if (!device->device_tx_status) { 1222 dev_err(device->dev, "Device tx_status is not defined\n"); 1223 return -EIO; 1224 } 1225 1226 1227 if (!device->device_issue_pending) { 1228 dev_err(device->dev, "Device issue_pending is not defined\n"); 1229 return -EIO; 1230 } 1231 1232 if (!device->device_release) 1233 dev_dbg(device->dev, 1234 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); 1235 1236 kref_init(&device->ref); 1237 1238 /* note: this only matters in the 1239 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 1240 */ 1241 if (device_has_all_tx_types(device)) 1242 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); 1243 1244 rc = get_dma_id(device); 1245 if (rc != 0) 1246 return rc; 1247 1248 mutex_init(&device->chan_mutex); 1249 ida_init(&device->chan_ida); 1250 1251 /* represent channels in sysfs. Probably want devs too */ 1252 list_for_each_entry(chan, &device->channels, device_node) { 1253 rc = __dma_async_device_channel_register(device, chan); 1254 if (rc < 0) 1255 goto err_out; 1256 } 1257 1258 mutex_lock(&dma_list_mutex); 1259 /* take references on public channels */ 1260 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) 1261 list_for_each_entry(chan, &device->channels, device_node) { 1262 /* if clients are already waiting for channels we need 1263 * to take references on their behalf 1264 */ 1265 if (dma_chan_get(chan) == -ENODEV) { 1266 /* note we can only get here for the first 1267 * channel as the remaining channels are 1268 * guaranteed to get a reference 1269 */ 1270 rc = -ENODEV; 1271 mutex_unlock(&dma_list_mutex); 1272 goto err_out; 1273 } 1274 } 1275 list_add_tail_rcu(&device->global_node, &dma_device_list); 1276 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 1277 device->privatecnt++; /* Always private */ 1278 dma_channel_rebalance(); 1279 mutex_unlock(&dma_list_mutex); 1280 1281 dmaengine_debug_register(device); 1282 1283 return 0; 1284 1285 err_out: 1286 /* if we never registered a channel just release the idr */ 1287 if (!device->chancnt) { 1288 ida_free(&dma_ida, device->dev_id); 1289 return rc; 1290 } 1291 1292 list_for_each_entry(chan, &device->channels, device_node) { 1293 if (chan->local == NULL) 1294 continue; 1295 mutex_lock(&dma_list_mutex); 1296 chan->dev->chan = NULL; 1297 mutex_unlock(&dma_list_mutex); 1298 device_unregister(&chan->dev->device); 1299 free_percpu(chan->local); 1300 } 1301 return rc; 1302 } 1303 EXPORT_SYMBOL(dma_async_device_register); 1304 1305 /** 1306 * dma_async_device_unregister - unregister a DMA device 1307 * @device: &dma_device 1308 * 1309 * This routine is called by dma driver exit routines, dmaengine holds module 1310 * references to prevent it being called while channels are in use. 1311 */ 1312 void dma_async_device_unregister(struct dma_device *device) 1313 { 1314 struct dma_chan *chan, *n; 1315 1316 dmaengine_debug_unregister(device); 1317 1318 list_for_each_entry_safe(chan, n, &device->channels, device_node) 1319 __dma_async_device_channel_unregister(device, chan); 1320 1321 mutex_lock(&dma_list_mutex); 1322 /* 1323 * setting DMA_PRIVATE ensures the device being torn down will not 1324 * be used in the channel_table 1325 */ 1326 dma_cap_set(DMA_PRIVATE, device->cap_mask); 1327 dma_channel_rebalance(); 1328 ida_free(&dma_ida, device->dev_id); 1329 dma_device_put(device); 1330 mutex_unlock(&dma_list_mutex); 1331 } 1332 EXPORT_SYMBOL(dma_async_device_unregister); 1333 1334 static void dmam_device_release(struct device *dev, void *res) 1335 { 1336 struct dma_device *device; 1337 1338 device = *(struct dma_device **)res; 1339 dma_async_device_unregister(device); 1340 } 1341 1342 /** 1343 * dmaenginem_async_device_register - registers DMA devices found 1344 * @device: &dma_device 1345 * 1346 * The operation is managed and will be undone on driver detach. 1347 */ 1348 int dmaenginem_async_device_register(struct dma_device *device) 1349 { 1350 void *p; 1351 int ret; 1352 1353 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL); 1354 if (!p) 1355 return -ENOMEM; 1356 1357 ret = dma_async_device_register(device); 1358 if (!ret) { 1359 *(struct dma_device **)p = device; 1360 devres_add(device->dev, p); 1361 } else { 1362 devres_free(p); 1363 } 1364 1365 return ret; 1366 } 1367 EXPORT_SYMBOL(dmaenginem_async_device_register); 1368 1369 struct dmaengine_unmap_pool { 1370 struct kmem_cache *cache; 1371 const char *name; 1372 mempool_t *pool; 1373 size_t size; 1374 }; 1375 1376 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } 1377 static struct dmaengine_unmap_pool unmap_pool[] = { 1378 __UNMAP_POOL(2), 1379 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) 1380 __UNMAP_POOL(16), 1381 __UNMAP_POOL(128), 1382 __UNMAP_POOL(256), 1383 #endif 1384 }; 1385 1386 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) 1387 { 1388 int order = get_count_order(nr); 1389 1390 switch (order) { 1391 case 0 ... 1: 1392 return &unmap_pool[0]; 1393 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) 1394 case 2 ... 4: 1395 return &unmap_pool[1]; 1396 case 5 ... 7: 1397 return &unmap_pool[2]; 1398 case 8: 1399 return &unmap_pool[3]; 1400 #endif 1401 default: 1402 BUG(); 1403 return NULL; 1404 } 1405 } 1406 1407 static void dmaengine_unmap(struct kref *kref) 1408 { 1409 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); 1410 struct device *dev = unmap->dev; 1411 int cnt, i; 1412 1413 cnt = unmap->to_cnt; 1414 for (i = 0; i < cnt; i++) 1415 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1416 DMA_TO_DEVICE); 1417 cnt += unmap->from_cnt; 1418 for (; i < cnt; i++) 1419 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1420 DMA_FROM_DEVICE); 1421 cnt += unmap->bidi_cnt; 1422 for (; i < cnt; i++) { 1423 if (unmap->addr[i] == 0) 1424 continue; 1425 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1426 DMA_BIDIRECTIONAL); 1427 } 1428 cnt = unmap->map_cnt; 1429 mempool_free(unmap, __get_unmap_pool(cnt)->pool); 1430 } 1431 1432 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) 1433 { 1434 if (unmap) 1435 kref_put(&unmap->kref, dmaengine_unmap); 1436 } 1437 EXPORT_SYMBOL_GPL(dmaengine_unmap_put); 1438 1439 static void dmaengine_destroy_unmap_pool(void) 1440 { 1441 int i; 1442 1443 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { 1444 struct dmaengine_unmap_pool *p = &unmap_pool[i]; 1445 1446 mempool_destroy(p->pool); 1447 p->pool = NULL; 1448 kmem_cache_destroy(p->cache); 1449 p->cache = NULL; 1450 } 1451 } 1452 1453 static int __init dmaengine_init_unmap_pool(void) 1454 { 1455 int i; 1456 1457 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { 1458 struct dmaengine_unmap_pool *p = &unmap_pool[i]; 1459 size_t size; 1460 1461 size = sizeof(struct dmaengine_unmap_data) + 1462 sizeof(dma_addr_t) * p->size; 1463 1464 p->cache = kmem_cache_create(p->name, size, 0, 1465 SLAB_HWCACHE_ALIGN, NULL); 1466 if (!p->cache) 1467 break; 1468 p->pool = mempool_create_slab_pool(1, p->cache); 1469 if (!p->pool) 1470 break; 1471 } 1472 1473 if (i == ARRAY_SIZE(unmap_pool)) 1474 return 0; 1475 1476 dmaengine_destroy_unmap_pool(); 1477 return -ENOMEM; 1478 } 1479 1480 struct dmaengine_unmap_data * 1481 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) 1482 { 1483 struct dmaengine_unmap_data *unmap; 1484 1485 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); 1486 if (!unmap) 1487 return NULL; 1488 1489 memset(unmap, 0, sizeof(*unmap)); 1490 kref_init(&unmap->kref); 1491 unmap->dev = dev; 1492 unmap->map_cnt = nr; 1493 1494 return unmap; 1495 } 1496 EXPORT_SYMBOL(dmaengine_get_unmap_data); 1497 1498 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 1499 struct dma_chan *chan) 1500 { 1501 tx->chan = chan; 1502 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 1503 spin_lock_init(&tx->lock); 1504 #endif 1505 } 1506 EXPORT_SYMBOL(dma_async_tx_descriptor_init); 1507 1508 static inline int desc_check_and_set_metadata_mode( 1509 struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode) 1510 { 1511 /* Make sure that the metadata mode is not mixed */ 1512 if (!desc->desc_metadata_mode) { 1513 if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) 1514 desc->desc_metadata_mode = mode; 1515 else 1516 return -ENOTSUPP; 1517 } else if (desc->desc_metadata_mode != mode) { 1518 return -EINVAL; 1519 } 1520 1521 return 0; 1522 } 1523 1524 int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, 1525 void *data, size_t len) 1526 { 1527 int ret; 1528 1529 if (!desc) 1530 return -EINVAL; 1531 1532 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT); 1533 if (ret) 1534 return ret; 1535 1536 if (!desc->metadata_ops || !desc->metadata_ops->attach) 1537 return -ENOTSUPP; 1538 1539 return desc->metadata_ops->attach(desc, data, len); 1540 } 1541 EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata); 1542 1543 void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, 1544 size_t *payload_len, size_t *max_len) 1545 { 1546 int ret; 1547 1548 if (!desc) 1549 return ERR_PTR(-EINVAL); 1550 1551 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); 1552 if (ret) 1553 return ERR_PTR(ret); 1554 1555 if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) 1556 return ERR_PTR(-ENOTSUPP); 1557 1558 return desc->metadata_ops->get_ptr(desc, payload_len, max_len); 1559 } 1560 EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr); 1561 1562 int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, 1563 size_t payload_len) 1564 { 1565 int ret; 1566 1567 if (!desc) 1568 return -EINVAL; 1569 1570 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); 1571 if (ret) 1572 return ret; 1573 1574 if (!desc->metadata_ops || !desc->metadata_ops->set_len) 1575 return -ENOTSUPP; 1576 1577 return desc->metadata_ops->set_len(desc, payload_len); 1578 } 1579 EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); 1580 1581 /* dma_wait_for_async_tx - spin wait for a transaction to complete 1582 * @tx: in-flight transaction to wait on 1583 */ 1584 enum dma_status 1585 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 1586 { 1587 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 1588 1589 if (!tx) 1590 return DMA_COMPLETE; 1591 1592 while (tx->cookie == -EBUSY) { 1593 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1594 dev_err(tx->chan->device->dev, 1595 "%s timeout waiting for descriptor submission\n", 1596 __func__); 1597 return DMA_ERROR; 1598 } 1599 cpu_relax(); 1600 } 1601 return dma_sync_wait(tx->chan, tx->cookie); 1602 } 1603 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); 1604 1605 /* dma_run_dependencies - helper routine for dma drivers to process 1606 * (start) dependent operations on their target channel 1607 * @tx: transaction with dependencies 1608 */ 1609 void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 1610 { 1611 struct dma_async_tx_descriptor *dep = txd_next(tx); 1612 struct dma_async_tx_descriptor *dep_next; 1613 struct dma_chan *chan; 1614 1615 if (!dep) 1616 return; 1617 1618 /* we'll submit tx->next now, so clear the link */ 1619 txd_clear_next(tx); 1620 chan = dep->chan; 1621 1622 /* keep submitting up until a channel switch is detected 1623 * in that case we will be called again as a result of 1624 * processing the interrupt from async_tx_channel_switch 1625 */ 1626 for (; dep; dep = dep_next) { 1627 txd_lock(dep); 1628 txd_clear_parent(dep); 1629 dep_next = txd_next(dep); 1630 if (dep_next && dep_next->chan == chan) 1631 txd_clear_next(dep); /* ->next will be submitted */ 1632 else 1633 dep_next = NULL; /* submit current dep and terminate */ 1634 txd_unlock(dep); 1635 1636 dep->tx_submit(dep); 1637 } 1638 1639 chan->device->device_issue_pending(chan); 1640 } 1641 EXPORT_SYMBOL_GPL(dma_run_dependencies); 1642 1643 static int __init dma_bus_init(void) 1644 { 1645 int err = dmaengine_init_unmap_pool(); 1646 1647 if (err) 1648 return err; 1649 1650 err = class_register(&dma_devclass); 1651 if (!err) 1652 dmaengine_debugfs_init(); 1653 1654 return err; 1655 } 1656 arch_initcall(dma_bus_init); 1657