1 /* 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the Free 6 * Software Foundation; either version 2 of the License, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * The full GNU General Public License is included in this distribution in the 19 * file called COPYING. 20 */ 21 22 /* 23 * This code implements the DMA subsystem. It provides a HW-neutral interface 24 * for other kernel code to use asynchronous memory copy capabilities, 25 * if present, and allows different HW DMA drivers to register as providing 26 * this capability. 27 * 28 * Due to the fact we are accelerating what is already a relatively fast 29 * operation, the code goes to great lengths to avoid additional overhead, 30 * such as locking. 31 * 32 * LOCKING: 33 * 34 * The subsystem keeps two global lists, dma_device_list and dma_client_list. 35 * Both of these are protected by a mutex, dma_list_mutex. 36 * 37 * Each device has a channels list, which runs unlocked but is never modified 38 * once the device is registered, it's just setup by the driver. 39 * 40 * Each client is responsible for keeping track of the channels it uses. See 41 * the definition of dma_event_callback in dmaengine.h. 42 * 43 * Each device has a kref, which is initialized to 1 when the device is 44 * registered. A kref_get is done for each device registered. When the 45 * device is released, the corresponding kref_put is done in the release 46 * method. Every time one of the device's channels is allocated to a client, 47 * a kref_get occurs. When the channel is freed, the corresponding kref_put 48 * happens. The device's release function does a completion, so 49 * unregister_device does a remove event, device_unregister, a kref_put 50 * for the first reference, then waits on the completion for all other 51 * references to finish. 52 * 53 * Each channel has an open-coded implementation of Rusty Russell's "bigref," 54 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client 55 * signals that it wants to use a channel, and dma_chan_put is called when 56 * a channel is removed or a client using it is unregistered. A client can 57 * take extra references per outstanding transaction, as is the case with 58 * the NET DMA client. The release function does a kref_put on the device. 59 * -ChrisL, DanW 60 */ 61 62 #include <linux/init.h> 63 #include <linux/module.h> 64 #include <linux/mm.h> 65 #include <linux/device.h> 66 #include <linux/dmaengine.h> 67 #include <linux/hardirq.h> 68 #include <linux/spinlock.h> 69 #include <linux/percpu.h> 70 #include <linux/rcupdate.h> 71 #include <linux/mutex.h> 72 #include <linux/jiffies.h> 73 74 static DEFINE_MUTEX(dma_list_mutex); 75 static LIST_HEAD(dma_device_list); 76 static LIST_HEAD(dma_client_list); 77 78 /* --- sysfs implementation --- */ 79 80 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) 81 { 82 struct dma_chan *chan = to_dma_chan(dev); 83 unsigned long count = 0; 84 int i; 85 86 for_each_possible_cpu(i) 87 count += per_cpu_ptr(chan->local, i)->memcpy_count; 88 89 return sprintf(buf, "%lu\n", count); 90 } 91 92 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, 93 char *buf) 94 { 95 struct dma_chan *chan = to_dma_chan(dev); 96 unsigned long count = 0; 97 int i; 98 99 for_each_possible_cpu(i) 100 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 101 102 return sprintf(buf, "%lu\n", count); 103 } 104 105 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) 106 { 107 struct dma_chan *chan = to_dma_chan(dev); 108 int in_use = 0; 109 110 if (unlikely(chan->slow_ref) && 111 atomic_read(&chan->refcount.refcount) > 1) 112 in_use = 1; 113 else { 114 if (local_read(&(per_cpu_ptr(chan->local, 115 get_cpu())->refcount)) > 0) 116 in_use = 1; 117 put_cpu(); 118 } 119 120 return sprintf(buf, "%d\n", in_use); 121 } 122 123 static struct device_attribute dma_attrs[] = { 124 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), 125 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), 126 __ATTR(in_use, S_IRUGO, show_in_use, NULL), 127 __ATTR_NULL 128 }; 129 130 static void dma_async_device_cleanup(struct kref *kref); 131 132 static void dma_dev_release(struct device *dev) 133 { 134 struct dma_chan *chan = to_dma_chan(dev); 135 kref_put(&chan->device->refcount, dma_async_device_cleanup); 136 } 137 138 static struct class dma_devclass = { 139 .name = "dma", 140 .dev_attrs = dma_attrs, 141 .dev_release = dma_dev_release, 142 }; 143 144 /* --- client and device registration --- */ 145 146 #define dma_chan_satisfies_mask(chan, mask) \ 147 __dma_chan_satisfies_mask((chan), &(mask)) 148 static int 149 __dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want) 150 { 151 dma_cap_mask_t has; 152 153 bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits, 154 DMA_TX_TYPE_END); 155 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 156 } 157 158 /** 159 * dma_client_chan_alloc - try to allocate channels to a client 160 * @client: &dma_client 161 * 162 * Called with dma_list_mutex held. 163 */ 164 static void dma_client_chan_alloc(struct dma_client *client) 165 { 166 struct dma_device *device; 167 struct dma_chan *chan; 168 int desc; /* allocated descriptor count */ 169 enum dma_state_client ack; 170 171 /* Find a channel */ 172 list_for_each_entry(device, &dma_device_list, global_node) { 173 /* Does the client require a specific DMA controller? */ 174 if (client->slave && client->slave->dma_dev 175 && client->slave->dma_dev != device->dev) 176 continue; 177 178 list_for_each_entry(chan, &device->channels, device_node) { 179 if (!dma_chan_satisfies_mask(chan, client->cap_mask)) 180 continue; 181 182 desc = chan->device->device_alloc_chan_resources( 183 chan, client); 184 if (desc >= 0) { 185 ack = client->event_callback(client, 186 chan, 187 DMA_RESOURCE_AVAILABLE); 188 189 /* we are done once this client rejects 190 * an available resource 191 */ 192 if (ack == DMA_ACK) { 193 dma_chan_get(chan); 194 chan->client_count++; 195 } else if (ack == DMA_NAK) 196 return; 197 } 198 } 199 } 200 } 201 202 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 203 { 204 enum dma_status status; 205 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 206 207 dma_async_issue_pending(chan); 208 do { 209 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 210 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 211 printk(KERN_ERR "dma_sync_wait_timeout!\n"); 212 return DMA_ERROR; 213 } 214 } while (status == DMA_IN_PROGRESS); 215 216 return status; 217 } 218 EXPORT_SYMBOL(dma_sync_wait); 219 220 /** 221 * dma_chan_cleanup - release a DMA channel's resources 222 * @kref: kernel reference structure that contains the DMA channel device 223 */ 224 void dma_chan_cleanup(struct kref *kref) 225 { 226 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); 227 chan->device->device_free_chan_resources(chan); 228 kref_put(&chan->device->refcount, dma_async_device_cleanup); 229 } 230 EXPORT_SYMBOL(dma_chan_cleanup); 231 232 static void dma_chan_free_rcu(struct rcu_head *rcu) 233 { 234 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); 235 int bias = 0x7FFFFFFF; 236 int i; 237 for_each_possible_cpu(i) 238 bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount); 239 atomic_sub(bias, &chan->refcount.refcount); 240 kref_put(&chan->refcount, dma_chan_cleanup); 241 } 242 243 static void dma_chan_release(struct dma_chan *chan) 244 { 245 atomic_add(0x7FFFFFFF, &chan->refcount.refcount); 246 chan->slow_ref = 1; 247 call_rcu(&chan->rcu, dma_chan_free_rcu); 248 } 249 250 /** 251 * dma_chans_notify_available - broadcast available channels to the clients 252 */ 253 static void dma_clients_notify_available(void) 254 { 255 struct dma_client *client; 256 257 mutex_lock(&dma_list_mutex); 258 259 list_for_each_entry(client, &dma_client_list, global_node) 260 dma_client_chan_alloc(client); 261 262 mutex_unlock(&dma_list_mutex); 263 } 264 265 /** 266 * dma_chans_notify_available - tell the clients that a channel is going away 267 * @chan: channel on its way out 268 */ 269 static void dma_clients_notify_removed(struct dma_chan *chan) 270 { 271 struct dma_client *client; 272 enum dma_state_client ack; 273 274 mutex_lock(&dma_list_mutex); 275 276 list_for_each_entry(client, &dma_client_list, global_node) { 277 ack = client->event_callback(client, chan, 278 DMA_RESOURCE_REMOVED); 279 280 /* client was holding resources for this channel so 281 * free it 282 */ 283 if (ack == DMA_ACK) { 284 dma_chan_put(chan); 285 chan->client_count--; 286 } 287 } 288 289 mutex_unlock(&dma_list_mutex); 290 } 291 292 /** 293 * dma_async_client_register - register a &dma_client 294 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' 295 */ 296 void dma_async_client_register(struct dma_client *client) 297 { 298 /* validate client data */ 299 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) && 300 !client->slave); 301 302 mutex_lock(&dma_list_mutex); 303 list_add_tail(&client->global_node, &dma_client_list); 304 mutex_unlock(&dma_list_mutex); 305 } 306 EXPORT_SYMBOL(dma_async_client_register); 307 308 /** 309 * dma_async_client_unregister - unregister a client and free the &dma_client 310 * @client: &dma_client to free 311 * 312 * Force frees any allocated DMA channels, frees the &dma_client memory 313 */ 314 void dma_async_client_unregister(struct dma_client *client) 315 { 316 struct dma_device *device; 317 struct dma_chan *chan; 318 enum dma_state_client ack; 319 320 if (!client) 321 return; 322 323 mutex_lock(&dma_list_mutex); 324 /* free all channels the client is holding */ 325 list_for_each_entry(device, &dma_device_list, global_node) 326 list_for_each_entry(chan, &device->channels, device_node) { 327 ack = client->event_callback(client, chan, 328 DMA_RESOURCE_REMOVED); 329 330 if (ack == DMA_ACK) { 331 dma_chan_put(chan); 332 chan->client_count--; 333 } 334 } 335 336 list_del(&client->global_node); 337 mutex_unlock(&dma_list_mutex); 338 } 339 EXPORT_SYMBOL(dma_async_client_unregister); 340 341 /** 342 * dma_async_client_chan_request - send all available channels to the 343 * client that satisfy the capability mask 344 * @client - requester 345 */ 346 void dma_async_client_chan_request(struct dma_client *client) 347 { 348 mutex_lock(&dma_list_mutex); 349 dma_client_chan_alloc(client); 350 mutex_unlock(&dma_list_mutex); 351 } 352 EXPORT_SYMBOL(dma_async_client_chan_request); 353 354 /** 355 * dma_async_device_register - registers DMA devices found 356 * @device: &dma_device 357 */ 358 int dma_async_device_register(struct dma_device *device) 359 { 360 static int id; 361 int chancnt = 0, rc; 362 struct dma_chan* chan; 363 364 if (!device) 365 return -ENODEV; 366 367 /* validate device routines */ 368 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && 369 !device->device_prep_dma_memcpy); 370 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && 371 !device->device_prep_dma_xor); 372 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && 373 !device->device_prep_dma_zero_sum); 374 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 375 !device->device_prep_dma_memset); 376 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 377 !device->device_prep_dma_interrupt); 378 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 379 !device->device_prep_slave_sg); 380 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 381 !device->device_terminate_all); 382 383 BUG_ON(!device->device_alloc_chan_resources); 384 BUG_ON(!device->device_free_chan_resources); 385 BUG_ON(!device->device_is_tx_complete); 386 BUG_ON(!device->device_issue_pending); 387 BUG_ON(!device->dev); 388 389 init_completion(&device->done); 390 kref_init(&device->refcount); 391 device->dev_id = id++; 392 393 /* represent channels in sysfs. Probably want devs too */ 394 list_for_each_entry(chan, &device->channels, device_node) { 395 chan->local = alloc_percpu(typeof(*chan->local)); 396 if (chan->local == NULL) 397 continue; 398 399 chan->chan_id = chancnt++; 400 chan->dev.class = &dma_devclass; 401 chan->dev.parent = device->dev; 402 snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", 403 device->dev_id, chan->chan_id); 404 405 rc = device_register(&chan->dev); 406 if (rc) { 407 chancnt--; 408 free_percpu(chan->local); 409 chan->local = NULL; 410 goto err_out; 411 } 412 413 /* One for the channel, one of the class device */ 414 kref_get(&device->refcount); 415 kref_get(&device->refcount); 416 kref_init(&chan->refcount); 417 chan->client_count = 0; 418 chan->slow_ref = 0; 419 INIT_RCU_HEAD(&chan->rcu); 420 } 421 422 mutex_lock(&dma_list_mutex); 423 list_add_tail(&device->global_node, &dma_device_list); 424 mutex_unlock(&dma_list_mutex); 425 426 dma_clients_notify_available(); 427 428 return 0; 429 430 err_out: 431 list_for_each_entry(chan, &device->channels, device_node) { 432 if (chan->local == NULL) 433 continue; 434 kref_put(&device->refcount, dma_async_device_cleanup); 435 device_unregister(&chan->dev); 436 chancnt--; 437 free_percpu(chan->local); 438 } 439 return rc; 440 } 441 EXPORT_SYMBOL(dma_async_device_register); 442 443 /** 444 * dma_async_device_cleanup - function called when all references are released 445 * @kref: kernel reference object 446 */ 447 static void dma_async_device_cleanup(struct kref *kref) 448 { 449 struct dma_device *device; 450 451 device = container_of(kref, struct dma_device, refcount); 452 complete(&device->done); 453 } 454 455 /** 456 * dma_async_device_unregister - unregisters DMA devices 457 * @device: &dma_device 458 */ 459 void dma_async_device_unregister(struct dma_device *device) 460 { 461 struct dma_chan *chan; 462 463 mutex_lock(&dma_list_mutex); 464 list_del(&device->global_node); 465 mutex_unlock(&dma_list_mutex); 466 467 list_for_each_entry(chan, &device->channels, device_node) { 468 dma_clients_notify_removed(chan); 469 device_unregister(&chan->dev); 470 dma_chan_release(chan); 471 } 472 473 kref_put(&device->refcount, dma_async_device_cleanup); 474 wait_for_completion(&device->done); 475 } 476 EXPORT_SYMBOL(dma_async_device_unregister); 477 478 /** 479 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses 480 * @chan: DMA channel to offload copy to 481 * @dest: destination address (virtual) 482 * @src: source address (virtual) 483 * @len: length 484 * 485 * Both @dest and @src must be mappable to a bus address according to the 486 * DMA mapping API rules for streaming mappings. 487 * Both @dest and @src must stay memory resident (kernel memory or locked 488 * user space pages). 489 */ 490 dma_cookie_t 491 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, 492 void *src, size_t len) 493 { 494 struct dma_device *dev = chan->device; 495 struct dma_async_tx_descriptor *tx; 496 dma_addr_t dma_dest, dma_src; 497 dma_cookie_t cookie; 498 int cpu; 499 500 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); 501 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); 502 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 503 DMA_CTRL_ACK); 504 505 if (!tx) { 506 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 507 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 508 return -ENOMEM; 509 } 510 511 tx->callback = NULL; 512 cookie = tx->tx_submit(tx); 513 514 cpu = get_cpu(); 515 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; 516 per_cpu_ptr(chan->local, cpu)->memcpy_count++; 517 put_cpu(); 518 519 return cookie; 520 } 521 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); 522 523 /** 524 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page 525 * @chan: DMA channel to offload copy to 526 * @page: destination page 527 * @offset: offset in page to copy to 528 * @kdata: source address (virtual) 529 * @len: length 530 * 531 * Both @page/@offset and @kdata must be mappable to a bus address according 532 * to the DMA mapping API rules for streaming mappings. 533 * Both @page/@offset and @kdata must stay memory resident (kernel memory or 534 * locked user space pages) 535 */ 536 dma_cookie_t 537 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, 538 unsigned int offset, void *kdata, size_t len) 539 { 540 struct dma_device *dev = chan->device; 541 struct dma_async_tx_descriptor *tx; 542 dma_addr_t dma_dest, dma_src; 543 dma_cookie_t cookie; 544 int cpu; 545 546 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); 547 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); 548 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 549 DMA_CTRL_ACK); 550 551 if (!tx) { 552 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 553 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 554 return -ENOMEM; 555 } 556 557 tx->callback = NULL; 558 cookie = tx->tx_submit(tx); 559 560 cpu = get_cpu(); 561 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; 562 per_cpu_ptr(chan->local, cpu)->memcpy_count++; 563 put_cpu(); 564 565 return cookie; 566 } 567 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); 568 569 /** 570 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page 571 * @chan: DMA channel to offload copy to 572 * @dest_pg: destination page 573 * @dest_off: offset in page to copy to 574 * @src_pg: source page 575 * @src_off: offset in page to copy from 576 * @len: length 577 * 578 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus 579 * address according to the DMA mapping API rules for streaming mappings. 580 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident 581 * (kernel memory or locked user space pages). 582 */ 583 dma_cookie_t 584 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, 585 unsigned int dest_off, struct page *src_pg, unsigned int src_off, 586 size_t len) 587 { 588 struct dma_device *dev = chan->device; 589 struct dma_async_tx_descriptor *tx; 590 dma_addr_t dma_dest, dma_src; 591 dma_cookie_t cookie; 592 int cpu; 593 594 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 595 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, 596 DMA_FROM_DEVICE); 597 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 598 DMA_CTRL_ACK); 599 600 if (!tx) { 601 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); 602 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 603 return -ENOMEM; 604 } 605 606 tx->callback = NULL; 607 cookie = tx->tx_submit(tx); 608 609 cpu = get_cpu(); 610 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; 611 per_cpu_ptr(chan->local, cpu)->memcpy_count++; 612 put_cpu(); 613 614 return cookie; 615 } 616 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); 617 618 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 619 struct dma_chan *chan) 620 { 621 tx->chan = chan; 622 spin_lock_init(&tx->lock); 623 } 624 EXPORT_SYMBOL(dma_async_tx_descriptor_init); 625 626 static int __init dma_bus_init(void) 627 { 628 mutex_init(&dma_list_mutex); 629 return class_register(&dma_devclass); 630 } 631 subsys_initcall(dma_bus_init); 632 633