1 /* 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the Free 6 * Software Foundation; either version 2 of the License, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * The full GNU General Public License is included in this distribution in the 15 * file called COPYING. 16 */ 17 18 /* 19 * This code implements the DMA subsystem. It provides a HW-neutral interface 20 * for other kernel code to use asynchronous memory copy capabilities, 21 * if present, and allows different HW DMA drivers to register as providing 22 * this capability. 23 * 24 * Due to the fact we are accelerating what is already a relatively fast 25 * operation, the code goes to great lengths to avoid additional overhead, 26 * such as locking. 27 * 28 * LOCKING: 29 * 30 * The subsystem keeps a global list of dma_device structs it is protected by a 31 * mutex, dma_list_mutex. 32 * 33 * A subsystem can get access to a channel by calling dmaengine_get() followed 34 * by dma_find_channel(), or if it has need for an exclusive channel it can call 35 * dma_request_channel(). Once a channel is allocated a reference is taken 36 * against its corresponding driver to disable removal. 37 * 38 * Each device has a channels list, which runs unlocked but is never modified 39 * once the device is registered, it's just setup by the driver. 40 * 41 * See Documentation/dmaengine.txt for more details 42 */ 43 44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 45 46 #include <linux/platform_device.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/init.h> 49 #include <linux/module.h> 50 #include <linux/mm.h> 51 #include <linux/device.h> 52 #include <linux/dmaengine.h> 53 #include <linux/hardirq.h> 54 #include <linux/spinlock.h> 55 #include <linux/percpu.h> 56 #include <linux/rcupdate.h> 57 #include <linux/mutex.h> 58 #include <linux/jiffies.h> 59 #include <linux/rculist.h> 60 #include <linux/idr.h> 61 #include <linux/slab.h> 62 #include <linux/acpi.h> 63 #include <linux/acpi_dma.h> 64 #include <linux/of_dma.h> 65 #include <linux/mempool.h> 66 67 static DEFINE_MUTEX(dma_list_mutex); 68 static DEFINE_IDR(dma_idr); 69 static LIST_HEAD(dma_device_list); 70 static long dmaengine_ref_count; 71 72 /* --- sysfs implementation --- */ 73 74 /** 75 * dev_to_dma_chan - convert a device pointer to the its sysfs container object 76 * @dev - device node 77 * 78 * Must be called under dma_list_mutex 79 */ 80 static struct dma_chan *dev_to_dma_chan(struct device *dev) 81 { 82 struct dma_chan_dev *chan_dev; 83 84 chan_dev = container_of(dev, typeof(*chan_dev), device); 85 return chan_dev->chan; 86 } 87 88 static ssize_t memcpy_count_show(struct device *dev, 89 struct device_attribute *attr, char *buf) 90 { 91 struct dma_chan *chan; 92 unsigned long count = 0; 93 int i; 94 int err; 95 96 mutex_lock(&dma_list_mutex); 97 chan = dev_to_dma_chan(dev); 98 if (chan) { 99 for_each_possible_cpu(i) 100 count += per_cpu_ptr(chan->local, i)->memcpy_count; 101 err = sprintf(buf, "%lu\n", count); 102 } else 103 err = -ENODEV; 104 mutex_unlock(&dma_list_mutex); 105 106 return err; 107 } 108 static DEVICE_ATTR_RO(memcpy_count); 109 110 static ssize_t bytes_transferred_show(struct device *dev, 111 struct device_attribute *attr, char *buf) 112 { 113 struct dma_chan *chan; 114 unsigned long count = 0; 115 int i; 116 int err; 117 118 mutex_lock(&dma_list_mutex); 119 chan = dev_to_dma_chan(dev); 120 if (chan) { 121 for_each_possible_cpu(i) 122 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 123 err = sprintf(buf, "%lu\n", count); 124 } else 125 err = -ENODEV; 126 mutex_unlock(&dma_list_mutex); 127 128 return err; 129 } 130 static DEVICE_ATTR_RO(bytes_transferred); 131 132 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, 133 char *buf) 134 { 135 struct dma_chan *chan; 136 int err; 137 138 mutex_lock(&dma_list_mutex); 139 chan = dev_to_dma_chan(dev); 140 if (chan) 141 err = sprintf(buf, "%d\n", chan->client_count); 142 else 143 err = -ENODEV; 144 mutex_unlock(&dma_list_mutex); 145 146 return err; 147 } 148 static DEVICE_ATTR_RO(in_use); 149 150 static struct attribute *dma_dev_attrs[] = { 151 &dev_attr_memcpy_count.attr, 152 &dev_attr_bytes_transferred.attr, 153 &dev_attr_in_use.attr, 154 NULL, 155 }; 156 ATTRIBUTE_GROUPS(dma_dev); 157 158 static void chan_dev_release(struct device *dev) 159 { 160 struct dma_chan_dev *chan_dev; 161 162 chan_dev = container_of(dev, typeof(*chan_dev), device); 163 if (atomic_dec_and_test(chan_dev->idr_ref)) { 164 mutex_lock(&dma_list_mutex); 165 idr_remove(&dma_idr, chan_dev->dev_id); 166 mutex_unlock(&dma_list_mutex); 167 kfree(chan_dev->idr_ref); 168 } 169 kfree(chan_dev); 170 } 171 172 static struct class dma_devclass = { 173 .name = "dma", 174 .dev_groups = dma_dev_groups, 175 .dev_release = chan_dev_release, 176 }; 177 178 /* --- client and device registration --- */ 179 180 #define dma_device_satisfies_mask(device, mask) \ 181 __dma_device_satisfies_mask((device), &(mask)) 182 static int 183 __dma_device_satisfies_mask(struct dma_device *device, 184 const dma_cap_mask_t *want) 185 { 186 dma_cap_mask_t has; 187 188 bitmap_and(has.bits, want->bits, device->cap_mask.bits, 189 DMA_TX_TYPE_END); 190 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 191 } 192 193 static struct module *dma_chan_to_owner(struct dma_chan *chan) 194 { 195 return chan->device->dev->driver->owner; 196 } 197 198 /** 199 * balance_ref_count - catch up the channel reference count 200 * @chan - channel to balance ->client_count versus dmaengine_ref_count 201 * 202 * balance_ref_count must be called under dma_list_mutex 203 */ 204 static void balance_ref_count(struct dma_chan *chan) 205 { 206 struct module *owner = dma_chan_to_owner(chan); 207 208 while (chan->client_count < dmaengine_ref_count) { 209 __module_get(owner); 210 chan->client_count++; 211 } 212 } 213 214 /** 215 * dma_chan_get - try to grab a dma channel's parent driver module 216 * @chan - channel to grab 217 * 218 * Must be called under dma_list_mutex 219 */ 220 static int dma_chan_get(struct dma_chan *chan) 221 { 222 struct module *owner = dma_chan_to_owner(chan); 223 int ret; 224 225 /* The channel is already in use, update client count */ 226 if (chan->client_count) { 227 __module_get(owner); 228 goto out; 229 } 230 231 if (!try_module_get(owner)) 232 return -ENODEV; 233 234 /* allocate upon first client reference */ 235 if (chan->device->device_alloc_chan_resources) { 236 ret = chan->device->device_alloc_chan_resources(chan); 237 if (ret < 0) 238 goto err_out; 239 } 240 241 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) 242 balance_ref_count(chan); 243 244 out: 245 chan->client_count++; 246 return 0; 247 248 err_out: 249 module_put(owner); 250 return ret; 251 } 252 253 /** 254 * dma_chan_put - drop a reference to a dma channel's parent driver module 255 * @chan - channel to release 256 * 257 * Must be called under dma_list_mutex 258 */ 259 static void dma_chan_put(struct dma_chan *chan) 260 { 261 /* This channel is not in use, bail out */ 262 if (!chan->client_count) 263 return; 264 265 chan->client_count--; 266 module_put(dma_chan_to_owner(chan)); 267 268 /* This channel is not in use anymore, free it */ 269 if (!chan->client_count && chan->device->device_free_chan_resources) { 270 /* Make sure all operations have completed */ 271 dmaengine_synchronize(chan); 272 chan->device->device_free_chan_resources(chan); 273 } 274 275 /* If the channel is used via a DMA request router, free the mapping */ 276 if (chan->router && chan->router->route_free) { 277 chan->router->route_free(chan->router->dev, chan->route_data); 278 chan->router = NULL; 279 chan->route_data = NULL; 280 } 281 } 282 283 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 284 { 285 enum dma_status status; 286 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 287 288 dma_async_issue_pending(chan); 289 do { 290 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 291 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 292 pr_err("%s: timeout!\n", __func__); 293 return DMA_ERROR; 294 } 295 if (status != DMA_IN_PROGRESS) 296 break; 297 cpu_relax(); 298 } while (1); 299 300 return status; 301 } 302 EXPORT_SYMBOL(dma_sync_wait); 303 304 /** 305 * dma_cap_mask_all - enable iteration over all operation types 306 */ 307 static dma_cap_mask_t dma_cap_mask_all; 308 309 /** 310 * dma_chan_tbl_ent - tracks channel allocations per core/operation 311 * @chan - associated channel for this entry 312 */ 313 struct dma_chan_tbl_ent { 314 struct dma_chan *chan; 315 }; 316 317 /** 318 * channel_table - percpu lookup table for memory-to-memory offload providers 319 */ 320 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; 321 322 static int __init dma_channel_table_init(void) 323 { 324 enum dma_transaction_type cap; 325 int err = 0; 326 327 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); 328 329 /* 'interrupt', 'private', and 'slave' are channel capabilities, 330 * but are not associated with an operation so they do not need 331 * an entry in the channel_table 332 */ 333 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); 334 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); 335 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); 336 337 for_each_dma_cap_mask(cap, dma_cap_mask_all) { 338 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); 339 if (!channel_table[cap]) { 340 err = -ENOMEM; 341 break; 342 } 343 } 344 345 if (err) { 346 pr_err("initialization failure\n"); 347 for_each_dma_cap_mask(cap, dma_cap_mask_all) 348 free_percpu(channel_table[cap]); 349 } 350 351 return err; 352 } 353 arch_initcall(dma_channel_table_init); 354 355 /** 356 * dma_find_channel - find a channel to carry out the operation 357 * @tx_type: transaction type 358 */ 359 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 360 { 361 return this_cpu_read(channel_table[tx_type]->chan); 362 } 363 EXPORT_SYMBOL(dma_find_channel); 364 365 /** 366 * dma_issue_pending_all - flush all pending operations across all channels 367 */ 368 void dma_issue_pending_all(void) 369 { 370 struct dma_device *device; 371 struct dma_chan *chan; 372 373 rcu_read_lock(); 374 list_for_each_entry_rcu(device, &dma_device_list, global_node) { 375 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 376 continue; 377 list_for_each_entry(chan, &device->channels, device_node) 378 if (chan->client_count) 379 device->device_issue_pending(chan); 380 } 381 rcu_read_unlock(); 382 } 383 EXPORT_SYMBOL(dma_issue_pending_all); 384 385 /** 386 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu 387 */ 388 static bool dma_chan_is_local(struct dma_chan *chan, int cpu) 389 { 390 int node = dev_to_node(chan->device->dev); 391 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node)); 392 } 393 394 /** 395 * min_chan - returns the channel with min count and in the same numa-node as the cpu 396 * @cap: capability to match 397 * @cpu: cpu index which the channel should be close to 398 * 399 * If some channels are close to the given cpu, the one with the lowest 400 * reference count is returned. Otherwise, cpu is ignored and only the 401 * reference count is taken into account. 402 * Must be called under dma_list_mutex. 403 */ 404 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) 405 { 406 struct dma_device *device; 407 struct dma_chan *chan; 408 struct dma_chan *min = NULL; 409 struct dma_chan *localmin = NULL; 410 411 list_for_each_entry(device, &dma_device_list, global_node) { 412 if (!dma_has_cap(cap, device->cap_mask) || 413 dma_has_cap(DMA_PRIVATE, device->cap_mask)) 414 continue; 415 list_for_each_entry(chan, &device->channels, device_node) { 416 if (!chan->client_count) 417 continue; 418 if (!min || chan->table_count < min->table_count) 419 min = chan; 420 421 if (dma_chan_is_local(chan, cpu)) 422 if (!localmin || 423 chan->table_count < localmin->table_count) 424 localmin = chan; 425 } 426 } 427 428 chan = localmin ? localmin : min; 429 430 if (chan) 431 chan->table_count++; 432 433 return chan; 434 } 435 436 /** 437 * dma_channel_rebalance - redistribute the available channels 438 * 439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an 440 * operation type) in the SMP case, and operation isolation (avoid 441 * multi-tasking channels) in the non-SMP case. Must be called under 442 * dma_list_mutex. 443 */ 444 static void dma_channel_rebalance(void) 445 { 446 struct dma_chan *chan; 447 struct dma_device *device; 448 int cpu; 449 int cap; 450 451 /* undo the last distribution */ 452 for_each_dma_cap_mask(cap, dma_cap_mask_all) 453 for_each_possible_cpu(cpu) 454 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; 455 456 list_for_each_entry(device, &dma_device_list, global_node) { 457 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 458 continue; 459 list_for_each_entry(chan, &device->channels, device_node) 460 chan->table_count = 0; 461 } 462 463 /* don't populate the channel_table if no clients are available */ 464 if (!dmaengine_ref_count) 465 return; 466 467 /* redistribute available channels */ 468 for_each_dma_cap_mask(cap, dma_cap_mask_all) 469 for_each_online_cpu(cpu) { 470 chan = min_chan(cap, cpu); 471 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; 472 } 473 } 474 475 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) 476 { 477 struct dma_device *device; 478 479 if (!chan || !caps) 480 return -EINVAL; 481 482 device = chan->device; 483 484 /* check if the channel supports slave transactions */ 485 if (!test_bit(DMA_SLAVE, device->cap_mask.bits)) 486 return -ENXIO; 487 488 /* 489 * Check whether it reports it uses the generic slave 490 * capabilities, if not, that means it doesn't support any 491 * kind of slave capabilities reporting. 492 */ 493 if (!device->directions) 494 return -ENXIO; 495 496 caps->src_addr_widths = device->src_addr_widths; 497 caps->dst_addr_widths = device->dst_addr_widths; 498 caps->directions = device->directions; 499 caps->residue_granularity = device->residue_granularity; 500 caps->descriptor_reuse = device->descriptor_reuse; 501 502 /* 503 * Some devices implement only pause (e.g. to get residuum) but no 504 * resume. However cmd_pause is advertised as pause AND resume. 505 */ 506 caps->cmd_pause = !!(device->device_pause && device->device_resume); 507 caps->cmd_terminate = !!device->device_terminate_all; 508 509 return 0; 510 } 511 EXPORT_SYMBOL_GPL(dma_get_slave_caps); 512 513 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, 514 struct dma_device *dev, 515 dma_filter_fn fn, void *fn_param) 516 { 517 struct dma_chan *chan; 518 519 if (mask && !__dma_device_satisfies_mask(dev, mask)) { 520 pr_debug("%s: wrong capabilities\n", __func__); 521 return NULL; 522 } 523 /* devices with multiple channels need special handling as we need to 524 * ensure that all channels are either private or public. 525 */ 526 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) 527 list_for_each_entry(chan, &dev->channels, device_node) { 528 /* some channels are already publicly allocated */ 529 if (chan->client_count) 530 return NULL; 531 } 532 533 list_for_each_entry(chan, &dev->channels, device_node) { 534 if (chan->client_count) { 535 pr_debug("%s: %s busy\n", 536 __func__, dma_chan_name(chan)); 537 continue; 538 } 539 if (fn && !fn(chan, fn_param)) { 540 pr_debug("%s: %s filter said false\n", 541 __func__, dma_chan_name(chan)); 542 continue; 543 } 544 return chan; 545 } 546 547 return NULL; 548 } 549 550 static struct dma_chan *find_candidate(struct dma_device *device, 551 const dma_cap_mask_t *mask, 552 dma_filter_fn fn, void *fn_param) 553 { 554 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); 555 int err; 556 557 if (chan) { 558 /* Found a suitable channel, try to grab, prep, and return it. 559 * We first set DMA_PRIVATE to disable balance_ref_count as this 560 * channel will not be published in the general-purpose 561 * allocator 562 */ 563 dma_cap_set(DMA_PRIVATE, device->cap_mask); 564 device->privatecnt++; 565 err = dma_chan_get(chan); 566 567 if (err) { 568 if (err == -ENODEV) { 569 pr_debug("%s: %s module removed\n", __func__, 570 dma_chan_name(chan)); 571 list_del_rcu(&device->global_node); 572 } else 573 pr_debug("%s: failed to get %s: (%d)\n", 574 __func__, dma_chan_name(chan), err); 575 576 if (--device->privatecnt == 0) 577 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 578 579 chan = ERR_PTR(err); 580 } 581 } 582 583 return chan ? chan : ERR_PTR(-EPROBE_DEFER); 584 } 585 586 /** 587 * dma_get_slave_channel - try to get specific channel exclusively 588 * @chan: target channel 589 */ 590 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) 591 { 592 int err = -EBUSY; 593 594 /* lock against __dma_request_channel */ 595 mutex_lock(&dma_list_mutex); 596 597 if (chan->client_count == 0) { 598 struct dma_device *device = chan->device; 599 600 dma_cap_set(DMA_PRIVATE, device->cap_mask); 601 device->privatecnt++; 602 err = dma_chan_get(chan); 603 if (err) { 604 pr_debug("%s: failed to get %s: (%d)\n", 605 __func__, dma_chan_name(chan), err); 606 chan = NULL; 607 if (--device->privatecnt == 0) 608 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 609 } 610 } else 611 chan = NULL; 612 613 mutex_unlock(&dma_list_mutex); 614 615 616 return chan; 617 } 618 EXPORT_SYMBOL_GPL(dma_get_slave_channel); 619 620 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) 621 { 622 dma_cap_mask_t mask; 623 struct dma_chan *chan; 624 625 dma_cap_zero(mask); 626 dma_cap_set(DMA_SLAVE, mask); 627 628 /* lock against __dma_request_channel */ 629 mutex_lock(&dma_list_mutex); 630 631 chan = find_candidate(device, &mask, NULL, NULL); 632 633 mutex_unlock(&dma_list_mutex); 634 635 return IS_ERR(chan) ? NULL : chan; 636 } 637 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); 638 639 /** 640 * __dma_request_channel - try to allocate an exclusive channel 641 * @mask: capabilities that the channel must satisfy 642 * @fn: optional callback to disposition available channels 643 * @fn_param: opaque parameter to pass to dma_filter_fn 644 * 645 * Returns pointer to appropriate DMA channel on success or NULL. 646 */ 647 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 648 dma_filter_fn fn, void *fn_param) 649 { 650 struct dma_device *device, *_d; 651 struct dma_chan *chan = NULL; 652 653 /* Find a channel */ 654 mutex_lock(&dma_list_mutex); 655 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 656 chan = find_candidate(device, mask, fn, fn_param); 657 if (!IS_ERR(chan)) 658 break; 659 660 chan = NULL; 661 } 662 mutex_unlock(&dma_list_mutex); 663 664 pr_debug("%s: %s (%s)\n", 665 __func__, 666 chan ? "success" : "fail", 667 chan ? dma_chan_name(chan) : NULL); 668 669 return chan; 670 } 671 EXPORT_SYMBOL_GPL(__dma_request_channel); 672 673 static const struct dma_slave_map *dma_filter_match(struct dma_device *device, 674 const char *name, 675 struct device *dev) 676 { 677 int i; 678 679 if (!device->filter.mapcnt) 680 return NULL; 681 682 for (i = 0; i < device->filter.mapcnt; i++) { 683 const struct dma_slave_map *map = &device->filter.map[i]; 684 685 if (!strcmp(map->devname, dev_name(dev)) && 686 !strcmp(map->slave, name)) 687 return map; 688 } 689 690 return NULL; 691 } 692 693 /** 694 * dma_request_chan - try to allocate an exclusive slave channel 695 * @dev: pointer to client device structure 696 * @name: slave channel name 697 * 698 * Returns pointer to appropriate DMA channel on success or an error pointer. 699 */ 700 struct dma_chan *dma_request_chan(struct device *dev, const char *name) 701 { 702 struct dma_device *d, *_d; 703 struct dma_chan *chan = NULL; 704 705 /* If device-tree is present get slave info from here */ 706 if (dev->of_node) 707 chan = of_dma_request_slave_channel(dev->of_node, name); 708 709 /* If device was enumerated by ACPI get slave info from here */ 710 if (has_acpi_companion(dev) && !chan) 711 chan = acpi_dma_request_slave_chan_by_name(dev, name); 712 713 if (chan) { 714 /* Valid channel found or requester need to be deferred */ 715 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) 716 return chan; 717 } 718 719 /* Try to find the channel via the DMA filter map(s) */ 720 mutex_lock(&dma_list_mutex); 721 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { 722 dma_cap_mask_t mask; 723 const struct dma_slave_map *map = dma_filter_match(d, name, dev); 724 725 if (!map) 726 continue; 727 728 dma_cap_zero(mask); 729 dma_cap_set(DMA_SLAVE, mask); 730 731 chan = find_candidate(d, &mask, d->filter.fn, map->param); 732 if (!IS_ERR(chan)) 733 break; 734 } 735 mutex_unlock(&dma_list_mutex); 736 737 return chan ? chan : ERR_PTR(-EPROBE_DEFER); 738 } 739 EXPORT_SYMBOL_GPL(dma_request_chan); 740 741 /** 742 * dma_request_slave_channel - try to allocate an exclusive slave channel 743 * @dev: pointer to client device structure 744 * @name: slave channel name 745 * 746 * Returns pointer to appropriate DMA channel on success or NULL. 747 */ 748 struct dma_chan *dma_request_slave_channel(struct device *dev, 749 const char *name) 750 { 751 struct dma_chan *ch = dma_request_chan(dev, name); 752 if (IS_ERR(ch)) 753 return NULL; 754 755 return ch; 756 } 757 EXPORT_SYMBOL_GPL(dma_request_slave_channel); 758 759 /** 760 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities 761 * @mask: capabilities that the channel must satisfy 762 * 763 * Returns pointer to appropriate DMA channel on success or an error pointer. 764 */ 765 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) 766 { 767 struct dma_chan *chan; 768 769 if (!mask) 770 return ERR_PTR(-ENODEV); 771 772 chan = __dma_request_channel(mask, NULL, NULL); 773 if (!chan) 774 chan = ERR_PTR(-ENODEV); 775 776 return chan; 777 } 778 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); 779 780 void dma_release_channel(struct dma_chan *chan) 781 { 782 mutex_lock(&dma_list_mutex); 783 WARN_ONCE(chan->client_count != 1, 784 "chan reference count %d != 1\n", chan->client_count); 785 dma_chan_put(chan); 786 /* drop PRIVATE cap enabled by __dma_request_channel() */ 787 if (--chan->device->privatecnt == 0) 788 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 789 mutex_unlock(&dma_list_mutex); 790 } 791 EXPORT_SYMBOL_GPL(dma_release_channel); 792 793 /** 794 * dmaengine_get - register interest in dma_channels 795 */ 796 void dmaengine_get(void) 797 { 798 struct dma_device *device, *_d; 799 struct dma_chan *chan; 800 int err; 801 802 mutex_lock(&dma_list_mutex); 803 dmaengine_ref_count++; 804 805 /* try to grab channels */ 806 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 807 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 808 continue; 809 list_for_each_entry(chan, &device->channels, device_node) { 810 err = dma_chan_get(chan); 811 if (err == -ENODEV) { 812 /* module removed before we could use it */ 813 list_del_rcu(&device->global_node); 814 break; 815 } else if (err) 816 pr_debug("%s: failed to get %s: (%d)\n", 817 __func__, dma_chan_name(chan), err); 818 } 819 } 820 821 /* if this is the first reference and there were channels 822 * waiting we need to rebalance to get those channels 823 * incorporated into the channel table 824 */ 825 if (dmaengine_ref_count == 1) 826 dma_channel_rebalance(); 827 mutex_unlock(&dma_list_mutex); 828 } 829 EXPORT_SYMBOL(dmaengine_get); 830 831 /** 832 * dmaengine_put - let dma drivers be removed when ref_count == 0 833 */ 834 void dmaengine_put(void) 835 { 836 struct dma_device *device; 837 struct dma_chan *chan; 838 839 mutex_lock(&dma_list_mutex); 840 dmaengine_ref_count--; 841 BUG_ON(dmaengine_ref_count < 0); 842 /* drop channel references */ 843 list_for_each_entry(device, &dma_device_list, global_node) { 844 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 845 continue; 846 list_for_each_entry(chan, &device->channels, device_node) 847 dma_chan_put(chan); 848 } 849 mutex_unlock(&dma_list_mutex); 850 } 851 EXPORT_SYMBOL(dmaengine_put); 852 853 static bool device_has_all_tx_types(struct dma_device *device) 854 { 855 /* A device that satisfies this test has channels that will never cause 856 * an async_tx channel switch event as all possible operation types can 857 * be handled. 858 */ 859 #ifdef CONFIG_ASYNC_TX_DMA 860 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) 861 return false; 862 #endif 863 864 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) 865 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) 866 return false; 867 #endif 868 869 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 870 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 871 return false; 872 873 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA 874 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) 875 return false; 876 #endif 877 #endif 878 879 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) 880 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 881 return false; 882 883 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA 884 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) 885 return false; 886 #endif 887 #endif 888 889 return true; 890 } 891 892 static int get_dma_id(struct dma_device *device) 893 { 894 int rc; 895 896 mutex_lock(&dma_list_mutex); 897 898 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL); 899 if (rc >= 0) 900 device->dev_id = rc; 901 902 mutex_unlock(&dma_list_mutex); 903 return rc < 0 ? rc : 0; 904 } 905 906 /** 907 * dma_async_device_register - registers DMA devices found 908 * @device: &dma_device 909 */ 910 int dma_async_device_register(struct dma_device *device) 911 { 912 int chancnt = 0, rc; 913 struct dma_chan* chan; 914 atomic_t *idr_ref; 915 916 if (!device) 917 return -ENODEV; 918 919 /* validate device routines */ 920 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && 921 !device->device_prep_dma_memcpy); 922 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && 923 !device->device_prep_dma_xor); 924 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && 925 !device->device_prep_dma_xor_val); 926 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && 927 !device->device_prep_dma_pq); 928 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && 929 !device->device_prep_dma_pq_val); 930 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 931 !device->device_prep_dma_memset); 932 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 933 !device->device_prep_dma_interrupt); 934 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && 935 !device->device_prep_dma_sg); 936 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 937 !device->device_prep_dma_cyclic); 938 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && 939 !device->device_prep_interleaved_dma); 940 941 BUG_ON(!device->device_tx_status); 942 BUG_ON(!device->device_issue_pending); 943 BUG_ON(!device->dev); 944 945 /* note: this only matters in the 946 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 947 */ 948 if (device_has_all_tx_types(device)) 949 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); 950 951 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 952 if (!idr_ref) 953 return -ENOMEM; 954 rc = get_dma_id(device); 955 if (rc != 0) { 956 kfree(idr_ref); 957 return rc; 958 } 959 960 atomic_set(idr_ref, 0); 961 962 /* represent channels in sysfs. Probably want devs too */ 963 list_for_each_entry(chan, &device->channels, device_node) { 964 rc = -ENOMEM; 965 chan->local = alloc_percpu(typeof(*chan->local)); 966 if (chan->local == NULL) 967 goto err_out; 968 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); 969 if (chan->dev == NULL) { 970 free_percpu(chan->local); 971 chan->local = NULL; 972 goto err_out; 973 } 974 975 chan->chan_id = chancnt++; 976 chan->dev->device.class = &dma_devclass; 977 chan->dev->device.parent = device->dev; 978 chan->dev->chan = chan; 979 chan->dev->idr_ref = idr_ref; 980 chan->dev->dev_id = device->dev_id; 981 atomic_inc(idr_ref); 982 dev_set_name(&chan->dev->device, "dma%dchan%d", 983 device->dev_id, chan->chan_id); 984 985 rc = device_register(&chan->dev->device); 986 if (rc) { 987 free_percpu(chan->local); 988 chan->local = NULL; 989 kfree(chan->dev); 990 atomic_dec(idr_ref); 991 goto err_out; 992 } 993 chan->client_count = 0; 994 } 995 device->chancnt = chancnt; 996 997 mutex_lock(&dma_list_mutex); 998 /* take references on public channels */ 999 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) 1000 list_for_each_entry(chan, &device->channels, device_node) { 1001 /* if clients are already waiting for channels we need 1002 * to take references on their behalf 1003 */ 1004 if (dma_chan_get(chan) == -ENODEV) { 1005 /* note we can only get here for the first 1006 * channel as the remaining channels are 1007 * guaranteed to get a reference 1008 */ 1009 rc = -ENODEV; 1010 mutex_unlock(&dma_list_mutex); 1011 goto err_out; 1012 } 1013 } 1014 list_add_tail_rcu(&device->global_node, &dma_device_list); 1015 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 1016 device->privatecnt++; /* Always private */ 1017 dma_channel_rebalance(); 1018 mutex_unlock(&dma_list_mutex); 1019 1020 return 0; 1021 1022 err_out: 1023 /* if we never registered a channel just release the idr */ 1024 if (atomic_read(idr_ref) == 0) { 1025 mutex_lock(&dma_list_mutex); 1026 idr_remove(&dma_idr, device->dev_id); 1027 mutex_unlock(&dma_list_mutex); 1028 kfree(idr_ref); 1029 return rc; 1030 } 1031 1032 list_for_each_entry(chan, &device->channels, device_node) { 1033 if (chan->local == NULL) 1034 continue; 1035 mutex_lock(&dma_list_mutex); 1036 chan->dev->chan = NULL; 1037 mutex_unlock(&dma_list_mutex); 1038 device_unregister(&chan->dev->device); 1039 free_percpu(chan->local); 1040 } 1041 return rc; 1042 } 1043 EXPORT_SYMBOL(dma_async_device_register); 1044 1045 /** 1046 * dma_async_device_unregister - unregister a DMA device 1047 * @device: &dma_device 1048 * 1049 * This routine is called by dma driver exit routines, dmaengine holds module 1050 * references to prevent it being called while channels are in use. 1051 */ 1052 void dma_async_device_unregister(struct dma_device *device) 1053 { 1054 struct dma_chan *chan; 1055 1056 mutex_lock(&dma_list_mutex); 1057 list_del_rcu(&device->global_node); 1058 dma_channel_rebalance(); 1059 mutex_unlock(&dma_list_mutex); 1060 1061 list_for_each_entry(chan, &device->channels, device_node) { 1062 WARN_ONCE(chan->client_count, 1063 "%s called while %d clients hold a reference\n", 1064 __func__, chan->client_count); 1065 mutex_lock(&dma_list_mutex); 1066 chan->dev->chan = NULL; 1067 mutex_unlock(&dma_list_mutex); 1068 device_unregister(&chan->dev->device); 1069 free_percpu(chan->local); 1070 } 1071 } 1072 EXPORT_SYMBOL(dma_async_device_unregister); 1073 1074 struct dmaengine_unmap_pool { 1075 struct kmem_cache *cache; 1076 const char *name; 1077 mempool_t *pool; 1078 size_t size; 1079 }; 1080 1081 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } 1082 static struct dmaengine_unmap_pool unmap_pool[] = { 1083 __UNMAP_POOL(2), 1084 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) 1085 __UNMAP_POOL(16), 1086 __UNMAP_POOL(128), 1087 __UNMAP_POOL(256), 1088 #endif 1089 }; 1090 1091 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) 1092 { 1093 int order = get_count_order(nr); 1094 1095 switch (order) { 1096 case 0 ... 1: 1097 return &unmap_pool[0]; 1098 case 2 ... 4: 1099 return &unmap_pool[1]; 1100 case 5 ... 7: 1101 return &unmap_pool[2]; 1102 case 8: 1103 return &unmap_pool[3]; 1104 default: 1105 BUG(); 1106 return NULL; 1107 } 1108 } 1109 1110 static void dmaengine_unmap(struct kref *kref) 1111 { 1112 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); 1113 struct device *dev = unmap->dev; 1114 int cnt, i; 1115 1116 cnt = unmap->to_cnt; 1117 for (i = 0; i < cnt; i++) 1118 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1119 DMA_TO_DEVICE); 1120 cnt += unmap->from_cnt; 1121 for (; i < cnt; i++) 1122 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1123 DMA_FROM_DEVICE); 1124 cnt += unmap->bidi_cnt; 1125 for (; i < cnt; i++) { 1126 if (unmap->addr[i] == 0) 1127 continue; 1128 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1129 DMA_BIDIRECTIONAL); 1130 } 1131 cnt = unmap->map_cnt; 1132 mempool_free(unmap, __get_unmap_pool(cnt)->pool); 1133 } 1134 1135 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) 1136 { 1137 if (unmap) 1138 kref_put(&unmap->kref, dmaengine_unmap); 1139 } 1140 EXPORT_SYMBOL_GPL(dmaengine_unmap_put); 1141 1142 static void dmaengine_destroy_unmap_pool(void) 1143 { 1144 int i; 1145 1146 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { 1147 struct dmaengine_unmap_pool *p = &unmap_pool[i]; 1148 1149 mempool_destroy(p->pool); 1150 p->pool = NULL; 1151 kmem_cache_destroy(p->cache); 1152 p->cache = NULL; 1153 } 1154 } 1155 1156 static int __init dmaengine_init_unmap_pool(void) 1157 { 1158 int i; 1159 1160 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { 1161 struct dmaengine_unmap_pool *p = &unmap_pool[i]; 1162 size_t size; 1163 1164 size = sizeof(struct dmaengine_unmap_data) + 1165 sizeof(dma_addr_t) * p->size; 1166 1167 p->cache = kmem_cache_create(p->name, size, 0, 1168 SLAB_HWCACHE_ALIGN, NULL); 1169 if (!p->cache) 1170 break; 1171 p->pool = mempool_create_slab_pool(1, p->cache); 1172 if (!p->pool) 1173 break; 1174 } 1175 1176 if (i == ARRAY_SIZE(unmap_pool)) 1177 return 0; 1178 1179 dmaengine_destroy_unmap_pool(); 1180 return -ENOMEM; 1181 } 1182 1183 struct dmaengine_unmap_data * 1184 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) 1185 { 1186 struct dmaengine_unmap_data *unmap; 1187 1188 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); 1189 if (!unmap) 1190 return NULL; 1191 1192 memset(unmap, 0, sizeof(*unmap)); 1193 kref_init(&unmap->kref); 1194 unmap->dev = dev; 1195 unmap->map_cnt = nr; 1196 1197 return unmap; 1198 } 1199 EXPORT_SYMBOL(dmaengine_get_unmap_data); 1200 1201 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 1202 struct dma_chan *chan) 1203 { 1204 tx->chan = chan; 1205 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 1206 spin_lock_init(&tx->lock); 1207 #endif 1208 } 1209 EXPORT_SYMBOL(dma_async_tx_descriptor_init); 1210 1211 /* dma_wait_for_async_tx - spin wait for a transaction to complete 1212 * @tx: in-flight transaction to wait on 1213 */ 1214 enum dma_status 1215 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 1216 { 1217 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 1218 1219 if (!tx) 1220 return DMA_COMPLETE; 1221 1222 while (tx->cookie == -EBUSY) { 1223 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1224 pr_err("%s timeout waiting for descriptor submission\n", 1225 __func__); 1226 return DMA_ERROR; 1227 } 1228 cpu_relax(); 1229 } 1230 return dma_sync_wait(tx->chan, tx->cookie); 1231 } 1232 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); 1233 1234 /* dma_run_dependencies - helper routine for dma drivers to process 1235 * (start) dependent operations on their target channel 1236 * @tx: transaction with dependencies 1237 */ 1238 void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 1239 { 1240 struct dma_async_tx_descriptor *dep = txd_next(tx); 1241 struct dma_async_tx_descriptor *dep_next; 1242 struct dma_chan *chan; 1243 1244 if (!dep) 1245 return; 1246 1247 /* we'll submit tx->next now, so clear the link */ 1248 txd_clear_next(tx); 1249 chan = dep->chan; 1250 1251 /* keep submitting up until a channel switch is detected 1252 * in that case we will be called again as a result of 1253 * processing the interrupt from async_tx_channel_switch 1254 */ 1255 for (; dep; dep = dep_next) { 1256 txd_lock(dep); 1257 txd_clear_parent(dep); 1258 dep_next = txd_next(dep); 1259 if (dep_next && dep_next->chan == chan) 1260 txd_clear_next(dep); /* ->next will be submitted */ 1261 else 1262 dep_next = NULL; /* submit current dep and terminate */ 1263 txd_unlock(dep); 1264 1265 dep->tx_submit(dep); 1266 } 1267 1268 chan->device->device_issue_pending(chan); 1269 } 1270 EXPORT_SYMBOL_GPL(dma_run_dependencies); 1271 1272 static int __init dma_bus_init(void) 1273 { 1274 int err = dmaengine_init_unmap_pool(); 1275 1276 if (err) 1277 return err; 1278 return class_register(&dma_devclass); 1279 } 1280 arch_initcall(dma_bus_init); 1281 1282 1283