1 /* 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the Free 6 * Software Foundation; either version 2 of the License, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * The full GNU General Public License is included in this distribution in the 19 * file called COPYING. 20 */ 21 22 /* 23 * This code implements the DMA subsystem. It provides a HW-neutral interface 24 * for other kernel code to use asynchronous memory copy capabilities, 25 * if present, and allows different HW DMA drivers to register as providing 26 * this capability. 27 * 28 * Due to the fact we are accelerating what is already a relatively fast 29 * operation, the code goes to great lengths to avoid additional overhead, 30 * such as locking. 31 * 32 * LOCKING: 33 * 34 * The subsystem keeps a global list of dma_device structs it is protected by a 35 * mutex, dma_list_mutex. 36 * 37 * A subsystem can get access to a channel by calling dmaengine_get() followed 38 * by dma_find_channel(), or if it has need for an exclusive channel it can call 39 * dma_request_channel(). Once a channel is allocated a reference is taken 40 * against its corresponding driver to disable removal. 41 * 42 * Each device has a channels list, which runs unlocked but is never modified 43 * once the device is registered, it's just setup by the driver. 44 * 45 * See Documentation/dmaengine.txt for more details 46 */ 47 48 #include <linux/init.h> 49 #include <linux/module.h> 50 #include <linux/mm.h> 51 #include <linux/device.h> 52 #include <linux/dmaengine.h> 53 #include <linux/hardirq.h> 54 #include <linux/spinlock.h> 55 #include <linux/percpu.h> 56 #include <linux/rcupdate.h> 57 #include <linux/mutex.h> 58 #include <linux/jiffies.h> 59 #include <linux/rculist.h> 60 #include <linux/idr.h> 61 62 static DEFINE_MUTEX(dma_list_mutex); 63 static LIST_HEAD(dma_device_list); 64 static long dmaengine_ref_count; 65 static struct idr dma_idr; 66 67 /* --- sysfs implementation --- */ 68 69 /** 70 * dev_to_dma_chan - convert a device pointer to the its sysfs container object 71 * @dev - device node 72 * 73 * Must be called under dma_list_mutex 74 */ 75 static struct dma_chan *dev_to_dma_chan(struct device *dev) 76 { 77 struct dma_chan_dev *chan_dev; 78 79 chan_dev = container_of(dev, typeof(*chan_dev), device); 80 return chan_dev->chan; 81 } 82 83 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) 84 { 85 struct dma_chan *chan; 86 unsigned long count = 0; 87 int i; 88 int err; 89 90 mutex_lock(&dma_list_mutex); 91 chan = dev_to_dma_chan(dev); 92 if (chan) { 93 for_each_possible_cpu(i) 94 count += per_cpu_ptr(chan->local, i)->memcpy_count; 95 err = sprintf(buf, "%lu\n", count); 96 } else 97 err = -ENODEV; 98 mutex_unlock(&dma_list_mutex); 99 100 return err; 101 } 102 103 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, 104 char *buf) 105 { 106 struct dma_chan *chan; 107 unsigned long count = 0; 108 int i; 109 int err; 110 111 mutex_lock(&dma_list_mutex); 112 chan = dev_to_dma_chan(dev); 113 if (chan) { 114 for_each_possible_cpu(i) 115 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 116 err = sprintf(buf, "%lu\n", count); 117 } else 118 err = -ENODEV; 119 mutex_unlock(&dma_list_mutex); 120 121 return err; 122 } 123 124 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) 125 { 126 struct dma_chan *chan; 127 int err; 128 129 mutex_lock(&dma_list_mutex); 130 chan = dev_to_dma_chan(dev); 131 if (chan) 132 err = sprintf(buf, "%d\n", chan->client_count); 133 else 134 err = -ENODEV; 135 mutex_unlock(&dma_list_mutex); 136 137 return err; 138 } 139 140 static struct device_attribute dma_attrs[] = { 141 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), 142 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), 143 __ATTR(in_use, S_IRUGO, show_in_use, NULL), 144 __ATTR_NULL 145 }; 146 147 static void chan_dev_release(struct device *dev) 148 { 149 struct dma_chan_dev *chan_dev; 150 151 chan_dev = container_of(dev, typeof(*chan_dev), device); 152 if (atomic_dec_and_test(chan_dev->idr_ref)) { 153 mutex_lock(&dma_list_mutex); 154 idr_remove(&dma_idr, chan_dev->dev_id); 155 mutex_unlock(&dma_list_mutex); 156 kfree(chan_dev->idr_ref); 157 } 158 kfree(chan_dev); 159 } 160 161 static struct class dma_devclass = { 162 .name = "dma", 163 .dev_attrs = dma_attrs, 164 .dev_release = chan_dev_release, 165 }; 166 167 /* --- client and device registration --- */ 168 169 #define dma_device_satisfies_mask(device, mask) \ 170 __dma_device_satisfies_mask((device), &(mask)) 171 static int 172 __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) 173 { 174 dma_cap_mask_t has; 175 176 bitmap_and(has.bits, want->bits, device->cap_mask.bits, 177 DMA_TX_TYPE_END); 178 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 179 } 180 181 static struct module *dma_chan_to_owner(struct dma_chan *chan) 182 { 183 return chan->device->dev->driver->owner; 184 } 185 186 /** 187 * balance_ref_count - catch up the channel reference count 188 * @chan - channel to balance ->client_count versus dmaengine_ref_count 189 * 190 * balance_ref_count must be called under dma_list_mutex 191 */ 192 static void balance_ref_count(struct dma_chan *chan) 193 { 194 struct module *owner = dma_chan_to_owner(chan); 195 196 while (chan->client_count < dmaengine_ref_count) { 197 __module_get(owner); 198 chan->client_count++; 199 } 200 } 201 202 /** 203 * dma_chan_get - try to grab a dma channel's parent driver module 204 * @chan - channel to grab 205 * 206 * Must be called under dma_list_mutex 207 */ 208 static int dma_chan_get(struct dma_chan *chan) 209 { 210 int err = -ENODEV; 211 struct module *owner = dma_chan_to_owner(chan); 212 213 if (chan->client_count) { 214 __module_get(owner); 215 err = 0; 216 } else if (try_module_get(owner)) 217 err = 0; 218 219 if (err == 0) 220 chan->client_count++; 221 222 /* allocate upon first client reference */ 223 if (chan->client_count == 1 && err == 0) { 224 int desc_cnt = chan->device->device_alloc_chan_resources(chan); 225 226 if (desc_cnt < 0) { 227 err = desc_cnt; 228 chan->client_count = 0; 229 module_put(owner); 230 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) 231 balance_ref_count(chan); 232 } 233 234 return err; 235 } 236 237 /** 238 * dma_chan_put - drop a reference to a dma channel's parent driver module 239 * @chan - channel to release 240 * 241 * Must be called under dma_list_mutex 242 */ 243 static void dma_chan_put(struct dma_chan *chan) 244 { 245 if (!chan->client_count) 246 return; /* this channel failed alloc_chan_resources */ 247 chan->client_count--; 248 module_put(dma_chan_to_owner(chan)); 249 if (chan->client_count == 0) 250 chan->device->device_free_chan_resources(chan); 251 } 252 253 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 254 { 255 enum dma_status status; 256 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 257 258 dma_async_issue_pending(chan); 259 do { 260 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 261 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 262 printk(KERN_ERR "dma_sync_wait_timeout!\n"); 263 return DMA_ERROR; 264 } 265 } while (status == DMA_IN_PROGRESS); 266 267 return status; 268 } 269 EXPORT_SYMBOL(dma_sync_wait); 270 271 /** 272 * dma_cap_mask_all - enable iteration over all operation types 273 */ 274 static dma_cap_mask_t dma_cap_mask_all; 275 276 /** 277 * dma_chan_tbl_ent - tracks channel allocations per core/operation 278 * @chan - associated channel for this entry 279 */ 280 struct dma_chan_tbl_ent { 281 struct dma_chan *chan; 282 }; 283 284 /** 285 * channel_table - percpu lookup table for memory-to-memory offload providers 286 */ 287 static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; 288 289 static int __init dma_channel_table_init(void) 290 { 291 enum dma_transaction_type cap; 292 int err = 0; 293 294 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); 295 296 /* 'interrupt', 'private', and 'slave' are channel capabilities, 297 * but are not associated with an operation so they do not need 298 * an entry in the channel_table 299 */ 300 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); 301 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); 302 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); 303 304 for_each_dma_cap_mask(cap, dma_cap_mask_all) { 305 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); 306 if (!channel_table[cap]) { 307 err = -ENOMEM; 308 break; 309 } 310 } 311 312 if (err) { 313 pr_err("dmaengine: initialization failure\n"); 314 for_each_dma_cap_mask(cap, dma_cap_mask_all) 315 if (channel_table[cap]) 316 free_percpu(channel_table[cap]); 317 } 318 319 return err; 320 } 321 arch_initcall(dma_channel_table_init); 322 323 /** 324 * dma_find_channel - find a channel to carry out the operation 325 * @tx_type: transaction type 326 */ 327 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 328 { 329 return this_cpu_read(channel_table[tx_type]->chan); 330 } 331 EXPORT_SYMBOL(dma_find_channel); 332 333 /** 334 * dma_issue_pending_all - flush all pending operations across all channels 335 */ 336 void dma_issue_pending_all(void) 337 { 338 struct dma_device *device; 339 struct dma_chan *chan; 340 341 rcu_read_lock(); 342 list_for_each_entry_rcu(device, &dma_device_list, global_node) { 343 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 344 continue; 345 list_for_each_entry(chan, &device->channels, device_node) 346 if (chan->client_count) 347 device->device_issue_pending(chan); 348 } 349 rcu_read_unlock(); 350 } 351 EXPORT_SYMBOL(dma_issue_pending_all); 352 353 /** 354 * nth_chan - returns the nth channel of the given capability 355 * @cap: capability to match 356 * @n: nth channel desired 357 * 358 * Defaults to returning the channel with the desired capability and the 359 * lowest reference count when 'n' cannot be satisfied. Must be called 360 * under dma_list_mutex. 361 */ 362 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) 363 { 364 struct dma_device *device; 365 struct dma_chan *chan; 366 struct dma_chan *ret = NULL; 367 struct dma_chan *min = NULL; 368 369 list_for_each_entry(device, &dma_device_list, global_node) { 370 if (!dma_has_cap(cap, device->cap_mask) || 371 dma_has_cap(DMA_PRIVATE, device->cap_mask)) 372 continue; 373 list_for_each_entry(chan, &device->channels, device_node) { 374 if (!chan->client_count) 375 continue; 376 if (!min) 377 min = chan; 378 else if (chan->table_count < min->table_count) 379 min = chan; 380 381 if (n-- == 0) { 382 ret = chan; 383 break; /* done */ 384 } 385 } 386 if (ret) 387 break; /* done */ 388 } 389 390 if (!ret) 391 ret = min; 392 393 if (ret) 394 ret->table_count++; 395 396 return ret; 397 } 398 399 /** 400 * dma_channel_rebalance - redistribute the available channels 401 * 402 * Optimize for cpu isolation (each cpu gets a dedicated channel for an 403 * operation type) in the SMP case, and operation isolation (avoid 404 * multi-tasking channels) in the non-SMP case. Must be called under 405 * dma_list_mutex. 406 */ 407 static void dma_channel_rebalance(void) 408 { 409 struct dma_chan *chan; 410 struct dma_device *device; 411 int cpu; 412 int cap; 413 int n; 414 415 /* undo the last distribution */ 416 for_each_dma_cap_mask(cap, dma_cap_mask_all) 417 for_each_possible_cpu(cpu) 418 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; 419 420 list_for_each_entry(device, &dma_device_list, global_node) { 421 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 422 continue; 423 list_for_each_entry(chan, &device->channels, device_node) 424 chan->table_count = 0; 425 } 426 427 /* don't populate the channel_table if no clients are available */ 428 if (!dmaengine_ref_count) 429 return; 430 431 /* redistribute available channels */ 432 n = 0; 433 for_each_dma_cap_mask(cap, dma_cap_mask_all) 434 for_each_online_cpu(cpu) { 435 if (num_possible_cpus() > 1) 436 chan = nth_chan(cap, n++); 437 else 438 chan = nth_chan(cap, -1); 439 440 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; 441 } 442 } 443 444 static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, 445 dma_filter_fn fn, void *fn_param) 446 { 447 struct dma_chan *chan; 448 449 if (!__dma_device_satisfies_mask(dev, mask)) { 450 pr_debug("%s: wrong capabilities\n", __func__); 451 return NULL; 452 } 453 /* devices with multiple channels need special handling as we need to 454 * ensure that all channels are either private or public. 455 */ 456 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) 457 list_for_each_entry(chan, &dev->channels, device_node) { 458 /* some channels are already publicly allocated */ 459 if (chan->client_count) 460 return NULL; 461 } 462 463 list_for_each_entry(chan, &dev->channels, device_node) { 464 if (chan->client_count) { 465 pr_debug("%s: %s busy\n", 466 __func__, dma_chan_name(chan)); 467 continue; 468 } 469 if (fn && !fn(chan, fn_param)) { 470 pr_debug("%s: %s filter said false\n", 471 __func__, dma_chan_name(chan)); 472 continue; 473 } 474 return chan; 475 } 476 477 return NULL; 478 } 479 480 /** 481 * dma_request_channel - try to allocate an exclusive channel 482 * @mask: capabilities that the channel must satisfy 483 * @fn: optional callback to disposition available channels 484 * @fn_param: opaque parameter to pass to dma_filter_fn 485 */ 486 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) 487 { 488 struct dma_device *device, *_d; 489 struct dma_chan *chan = NULL; 490 int err; 491 492 /* Find a channel */ 493 mutex_lock(&dma_list_mutex); 494 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 495 chan = private_candidate(mask, device, fn, fn_param); 496 if (chan) { 497 /* Found a suitable channel, try to grab, prep, and 498 * return it. We first set DMA_PRIVATE to disable 499 * balance_ref_count as this channel will not be 500 * published in the general-purpose allocator 501 */ 502 dma_cap_set(DMA_PRIVATE, device->cap_mask); 503 device->privatecnt++; 504 err = dma_chan_get(chan); 505 506 if (err == -ENODEV) { 507 pr_debug("%s: %s module removed\n", __func__, 508 dma_chan_name(chan)); 509 list_del_rcu(&device->global_node); 510 } else if (err) 511 pr_err("dmaengine: failed to get %s: (%d)\n", 512 dma_chan_name(chan), err); 513 else 514 break; 515 if (--device->privatecnt == 0) 516 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 517 chan->private = NULL; 518 chan = NULL; 519 } 520 } 521 mutex_unlock(&dma_list_mutex); 522 523 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", 524 chan ? dma_chan_name(chan) : NULL); 525 526 return chan; 527 } 528 EXPORT_SYMBOL_GPL(__dma_request_channel); 529 530 void dma_release_channel(struct dma_chan *chan) 531 { 532 mutex_lock(&dma_list_mutex); 533 WARN_ONCE(chan->client_count != 1, 534 "chan reference count %d != 1\n", chan->client_count); 535 dma_chan_put(chan); 536 /* drop PRIVATE cap enabled by __dma_request_channel() */ 537 if (--chan->device->privatecnt == 0) 538 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 539 chan->private = NULL; 540 mutex_unlock(&dma_list_mutex); 541 } 542 EXPORT_SYMBOL_GPL(dma_release_channel); 543 544 /** 545 * dmaengine_get - register interest in dma_channels 546 */ 547 void dmaengine_get(void) 548 { 549 struct dma_device *device, *_d; 550 struct dma_chan *chan; 551 int err; 552 553 mutex_lock(&dma_list_mutex); 554 dmaengine_ref_count++; 555 556 /* try to grab channels */ 557 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 558 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 559 continue; 560 list_for_each_entry(chan, &device->channels, device_node) { 561 err = dma_chan_get(chan); 562 if (err == -ENODEV) { 563 /* module removed before we could use it */ 564 list_del_rcu(&device->global_node); 565 break; 566 } else if (err) 567 pr_err("dmaengine: failed to get %s: (%d)\n", 568 dma_chan_name(chan), err); 569 } 570 } 571 572 /* if this is the first reference and there were channels 573 * waiting we need to rebalance to get those channels 574 * incorporated into the channel table 575 */ 576 if (dmaengine_ref_count == 1) 577 dma_channel_rebalance(); 578 mutex_unlock(&dma_list_mutex); 579 } 580 EXPORT_SYMBOL(dmaengine_get); 581 582 /** 583 * dmaengine_put - let dma drivers be removed when ref_count == 0 584 */ 585 void dmaengine_put(void) 586 { 587 struct dma_device *device; 588 struct dma_chan *chan; 589 590 mutex_lock(&dma_list_mutex); 591 dmaengine_ref_count--; 592 BUG_ON(dmaengine_ref_count < 0); 593 /* drop channel references */ 594 list_for_each_entry(device, &dma_device_list, global_node) { 595 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 596 continue; 597 list_for_each_entry(chan, &device->channels, device_node) 598 dma_chan_put(chan); 599 } 600 mutex_unlock(&dma_list_mutex); 601 } 602 EXPORT_SYMBOL(dmaengine_put); 603 604 static bool device_has_all_tx_types(struct dma_device *device) 605 { 606 /* A device that satisfies this test has channels that will never cause 607 * an async_tx channel switch event as all possible operation types can 608 * be handled. 609 */ 610 #ifdef CONFIG_ASYNC_TX_DMA 611 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) 612 return false; 613 #endif 614 615 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) 616 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) 617 return false; 618 #endif 619 620 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) 621 if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) 622 return false; 623 #endif 624 625 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 626 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 627 return false; 628 629 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA 630 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) 631 return false; 632 #endif 633 #endif 634 635 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) 636 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 637 return false; 638 639 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA 640 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) 641 return false; 642 #endif 643 #endif 644 645 return true; 646 } 647 648 static int get_dma_id(struct dma_device *device) 649 { 650 int rc; 651 652 idr_retry: 653 if (!idr_pre_get(&dma_idr, GFP_KERNEL)) 654 return -ENOMEM; 655 mutex_lock(&dma_list_mutex); 656 rc = idr_get_new(&dma_idr, NULL, &device->dev_id); 657 mutex_unlock(&dma_list_mutex); 658 if (rc == -EAGAIN) 659 goto idr_retry; 660 else if (rc != 0) 661 return rc; 662 663 return 0; 664 } 665 666 /** 667 * dma_async_device_register - registers DMA devices found 668 * @device: &dma_device 669 */ 670 int dma_async_device_register(struct dma_device *device) 671 { 672 int chancnt = 0, rc; 673 struct dma_chan* chan; 674 atomic_t *idr_ref; 675 676 if (!device) 677 return -ENODEV; 678 679 /* validate device routines */ 680 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && 681 !device->device_prep_dma_memcpy); 682 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && 683 !device->device_prep_dma_xor); 684 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && 685 !device->device_prep_dma_xor_val); 686 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && 687 !device->device_prep_dma_pq); 688 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && 689 !device->device_prep_dma_pq_val); 690 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 691 !device->device_prep_dma_memset); 692 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 693 !device->device_prep_dma_interrupt); 694 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 695 !device->device_prep_slave_sg); 696 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 697 !device->device_terminate_all); 698 699 BUG_ON(!device->device_alloc_chan_resources); 700 BUG_ON(!device->device_free_chan_resources); 701 BUG_ON(!device->device_is_tx_complete); 702 BUG_ON(!device->device_issue_pending); 703 BUG_ON(!device->dev); 704 705 /* note: this only matters in the 706 * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case 707 */ 708 if (device_has_all_tx_types(device)) 709 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); 710 711 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 712 if (!idr_ref) 713 return -ENOMEM; 714 rc = get_dma_id(device); 715 if (rc != 0) { 716 kfree(idr_ref); 717 return rc; 718 } 719 720 atomic_set(idr_ref, 0); 721 722 /* represent channels in sysfs. Probably want devs too */ 723 list_for_each_entry(chan, &device->channels, device_node) { 724 rc = -ENOMEM; 725 chan->local = alloc_percpu(typeof(*chan->local)); 726 if (chan->local == NULL) 727 goto err_out; 728 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); 729 if (chan->dev == NULL) { 730 free_percpu(chan->local); 731 chan->local = NULL; 732 goto err_out; 733 } 734 735 chan->chan_id = chancnt++; 736 chan->dev->device.class = &dma_devclass; 737 chan->dev->device.parent = device->dev; 738 chan->dev->chan = chan; 739 chan->dev->idr_ref = idr_ref; 740 chan->dev->dev_id = device->dev_id; 741 atomic_inc(idr_ref); 742 dev_set_name(&chan->dev->device, "dma%dchan%d", 743 device->dev_id, chan->chan_id); 744 745 rc = device_register(&chan->dev->device); 746 if (rc) { 747 free_percpu(chan->local); 748 chan->local = NULL; 749 kfree(chan->dev); 750 atomic_dec(idr_ref); 751 goto err_out; 752 } 753 chan->client_count = 0; 754 } 755 device->chancnt = chancnt; 756 757 mutex_lock(&dma_list_mutex); 758 /* take references on public channels */ 759 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) 760 list_for_each_entry(chan, &device->channels, device_node) { 761 /* if clients are already waiting for channels we need 762 * to take references on their behalf 763 */ 764 if (dma_chan_get(chan) == -ENODEV) { 765 /* note we can only get here for the first 766 * channel as the remaining channels are 767 * guaranteed to get a reference 768 */ 769 rc = -ENODEV; 770 mutex_unlock(&dma_list_mutex); 771 goto err_out; 772 } 773 } 774 list_add_tail_rcu(&device->global_node, &dma_device_list); 775 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 776 device->privatecnt++; /* Always private */ 777 dma_channel_rebalance(); 778 mutex_unlock(&dma_list_mutex); 779 780 return 0; 781 782 err_out: 783 /* if we never registered a channel just release the idr */ 784 if (atomic_read(idr_ref) == 0) { 785 mutex_lock(&dma_list_mutex); 786 idr_remove(&dma_idr, device->dev_id); 787 mutex_unlock(&dma_list_mutex); 788 kfree(idr_ref); 789 return rc; 790 } 791 792 list_for_each_entry(chan, &device->channels, device_node) { 793 if (chan->local == NULL) 794 continue; 795 mutex_lock(&dma_list_mutex); 796 chan->dev->chan = NULL; 797 mutex_unlock(&dma_list_mutex); 798 device_unregister(&chan->dev->device); 799 free_percpu(chan->local); 800 } 801 return rc; 802 } 803 EXPORT_SYMBOL(dma_async_device_register); 804 805 /** 806 * dma_async_device_unregister - unregister a DMA device 807 * @device: &dma_device 808 * 809 * This routine is called by dma driver exit routines, dmaengine holds module 810 * references to prevent it being called while channels are in use. 811 */ 812 void dma_async_device_unregister(struct dma_device *device) 813 { 814 struct dma_chan *chan; 815 816 mutex_lock(&dma_list_mutex); 817 list_del_rcu(&device->global_node); 818 dma_channel_rebalance(); 819 mutex_unlock(&dma_list_mutex); 820 821 list_for_each_entry(chan, &device->channels, device_node) { 822 WARN_ONCE(chan->client_count, 823 "%s called while %d clients hold a reference\n", 824 __func__, chan->client_count); 825 mutex_lock(&dma_list_mutex); 826 chan->dev->chan = NULL; 827 mutex_unlock(&dma_list_mutex); 828 device_unregister(&chan->dev->device); 829 } 830 } 831 EXPORT_SYMBOL(dma_async_device_unregister); 832 833 /** 834 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses 835 * @chan: DMA channel to offload copy to 836 * @dest: destination address (virtual) 837 * @src: source address (virtual) 838 * @len: length 839 * 840 * Both @dest and @src must be mappable to a bus address according to the 841 * DMA mapping API rules for streaming mappings. 842 * Both @dest and @src must stay memory resident (kernel memory or locked 843 * user space pages). 844 */ 845 dma_cookie_t 846 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, 847 void *src, size_t len) 848 { 849 struct dma_device *dev = chan->device; 850 struct dma_async_tx_descriptor *tx; 851 dma_addr_t dma_dest, dma_src; 852 dma_cookie_t cookie; 853 unsigned long flags; 854 855 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); 856 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); 857 flags = DMA_CTRL_ACK | 858 DMA_COMPL_SRC_UNMAP_SINGLE | 859 DMA_COMPL_DEST_UNMAP_SINGLE; 860 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 861 862 if (!tx) { 863 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 864 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 865 return -ENOMEM; 866 } 867 868 tx->callback = NULL; 869 cookie = tx->tx_submit(tx); 870 871 preempt_disable(); 872 __this_cpu_add(chan->local->bytes_transferred, len); 873 __this_cpu_inc(chan->local->memcpy_count); 874 preempt_enable(); 875 876 return cookie; 877 } 878 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); 879 880 /** 881 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page 882 * @chan: DMA channel to offload copy to 883 * @page: destination page 884 * @offset: offset in page to copy to 885 * @kdata: source address (virtual) 886 * @len: length 887 * 888 * Both @page/@offset and @kdata must be mappable to a bus address according 889 * to the DMA mapping API rules for streaming mappings. 890 * Both @page/@offset and @kdata must stay memory resident (kernel memory or 891 * locked user space pages) 892 */ 893 dma_cookie_t 894 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, 895 unsigned int offset, void *kdata, size_t len) 896 { 897 struct dma_device *dev = chan->device; 898 struct dma_async_tx_descriptor *tx; 899 dma_addr_t dma_dest, dma_src; 900 dma_cookie_t cookie; 901 unsigned long flags; 902 903 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); 904 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); 905 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; 906 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 907 908 if (!tx) { 909 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 910 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 911 return -ENOMEM; 912 } 913 914 tx->callback = NULL; 915 cookie = tx->tx_submit(tx); 916 917 preempt_disable(); 918 __this_cpu_add(chan->local->bytes_transferred, len); 919 __this_cpu_inc(chan->local->memcpy_count); 920 preempt_enable(); 921 922 return cookie; 923 } 924 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); 925 926 /** 927 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page 928 * @chan: DMA channel to offload copy to 929 * @dest_pg: destination page 930 * @dest_off: offset in page to copy to 931 * @src_pg: source page 932 * @src_off: offset in page to copy from 933 * @len: length 934 * 935 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus 936 * address according to the DMA mapping API rules for streaming mappings. 937 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident 938 * (kernel memory or locked user space pages). 939 */ 940 dma_cookie_t 941 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, 942 unsigned int dest_off, struct page *src_pg, unsigned int src_off, 943 size_t len) 944 { 945 struct dma_device *dev = chan->device; 946 struct dma_async_tx_descriptor *tx; 947 dma_addr_t dma_dest, dma_src; 948 dma_cookie_t cookie; 949 unsigned long flags; 950 951 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 952 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, 953 DMA_FROM_DEVICE); 954 flags = DMA_CTRL_ACK; 955 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 956 957 if (!tx) { 958 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); 959 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 960 return -ENOMEM; 961 } 962 963 tx->callback = NULL; 964 cookie = tx->tx_submit(tx); 965 966 preempt_disable(); 967 __this_cpu_add(chan->local->bytes_transferred, len); 968 __this_cpu_inc(chan->local->memcpy_count); 969 preempt_enable(); 970 971 return cookie; 972 } 973 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); 974 975 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 976 struct dma_chan *chan) 977 { 978 tx->chan = chan; 979 spin_lock_init(&tx->lock); 980 } 981 EXPORT_SYMBOL(dma_async_tx_descriptor_init); 982 983 /* dma_wait_for_async_tx - spin wait for a transaction to complete 984 * @tx: in-flight transaction to wait on 985 */ 986 enum dma_status 987 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 988 { 989 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 990 991 if (!tx) 992 return DMA_SUCCESS; 993 994 while (tx->cookie == -EBUSY) { 995 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 996 pr_err("%s timeout waiting for descriptor submission\n", 997 __func__); 998 return DMA_ERROR; 999 } 1000 cpu_relax(); 1001 } 1002 return dma_sync_wait(tx->chan, tx->cookie); 1003 } 1004 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); 1005 1006 /* dma_run_dependencies - helper routine for dma drivers to process 1007 * (start) dependent operations on their target channel 1008 * @tx: transaction with dependencies 1009 */ 1010 void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 1011 { 1012 struct dma_async_tx_descriptor *dep = tx->next; 1013 struct dma_async_tx_descriptor *dep_next; 1014 struct dma_chan *chan; 1015 1016 if (!dep) 1017 return; 1018 1019 /* we'll submit tx->next now, so clear the link */ 1020 tx->next = NULL; 1021 chan = dep->chan; 1022 1023 /* keep submitting up until a channel switch is detected 1024 * in that case we will be called again as a result of 1025 * processing the interrupt from async_tx_channel_switch 1026 */ 1027 for (; dep; dep = dep_next) { 1028 spin_lock_bh(&dep->lock); 1029 dep->parent = NULL; 1030 dep_next = dep->next; 1031 if (dep_next && dep_next->chan == chan) 1032 dep->next = NULL; /* ->next will be submitted */ 1033 else 1034 dep_next = NULL; /* submit current dep and terminate */ 1035 spin_unlock_bh(&dep->lock); 1036 1037 dep->tx_submit(dep); 1038 } 1039 1040 chan->device->device_issue_pending(chan); 1041 } 1042 EXPORT_SYMBOL_GPL(dma_run_dependencies); 1043 1044 static int __init dma_bus_init(void) 1045 { 1046 idr_init(&dma_idr); 1047 mutex_init(&dma_list_mutex); 1048 return class_register(&dma_devclass); 1049 } 1050 arch_initcall(dma_bus_init); 1051 1052 1053