1c13c8260SChris Leech /* 2c13c8260SChris Leech * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 3c13c8260SChris Leech * 4c13c8260SChris Leech * This program is free software; you can redistribute it and/or modify it 5c13c8260SChris Leech * under the terms of the GNU General Public License as published by the Free 6c13c8260SChris Leech * Software Foundation; either version 2 of the License, or (at your option) 7c13c8260SChris Leech * any later version. 8c13c8260SChris Leech * 9c13c8260SChris Leech * This program is distributed in the hope that it will be useful, but WITHOUT 10c13c8260SChris Leech * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11c13c8260SChris Leech * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12c13c8260SChris Leech * more details. 13c13c8260SChris Leech * 14c13c8260SChris Leech * You should have received a copy of the GNU General Public License along with 15c13c8260SChris Leech * this program; if not, write to the Free Software Foundation, Inc., 59 16c13c8260SChris Leech * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17c13c8260SChris Leech * 18c13c8260SChris Leech * The full GNU General Public License is included in this distribution in the 19c13c8260SChris Leech * file called COPYING. 20c13c8260SChris Leech */ 21c13c8260SChris Leech 22c13c8260SChris Leech /* 23c13c8260SChris Leech * This code implements the DMA subsystem. It provides a HW-neutral interface 24c13c8260SChris Leech * for other kernel code to use asynchronous memory copy capabilities, 25c13c8260SChris Leech * if present, and allows different HW DMA drivers to register as providing 26c13c8260SChris Leech * this capability. 27c13c8260SChris Leech * 28c13c8260SChris Leech * Due to the fact we are accelerating what is already a relatively fast 29c13c8260SChris Leech * operation, the code goes to great lengths to avoid additional overhead, 30c13c8260SChris Leech * such as locking. 31c13c8260SChris Leech * 32c13c8260SChris Leech * LOCKING: 33c13c8260SChris Leech * 34aa1e6f1aSDan Williams * The subsystem keeps a global list of dma_device structs it is protected by a 35aa1e6f1aSDan Williams * mutex, dma_list_mutex. 36c13c8260SChris Leech * 37f27c580cSDan Williams * A subsystem can get access to a channel by calling dmaengine_get() followed 38f27c580cSDan Williams * by dma_find_channel(), or if it has need for an exclusive channel it can call 39f27c580cSDan Williams * dma_request_channel(). Once a channel is allocated a reference is taken 40f27c580cSDan Williams * against its corresponding driver to disable removal. 41f27c580cSDan Williams * 42c13c8260SChris Leech * Each device has a channels list, which runs unlocked but is never modified 43c13c8260SChris Leech * once the device is registered, it's just setup by the driver. 44c13c8260SChris Leech * 45f27c580cSDan Williams * See Documentation/dmaengine.txt for more details 46c13c8260SChris Leech */ 47c13c8260SChris Leech 4863433250SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4963433250SJoe Perches 50b7f080cfSAlexey Dobriyan #include <linux/dma-mapping.h> 51c13c8260SChris Leech #include <linux/init.h> 52c13c8260SChris Leech #include <linux/module.h> 537405f74bSDan Williams #include <linux/mm.h> 54c13c8260SChris Leech #include <linux/device.h> 55c13c8260SChris Leech #include <linux/dmaengine.h> 56c13c8260SChris Leech #include <linux/hardirq.h> 57c13c8260SChris Leech #include <linux/spinlock.h> 58c13c8260SChris Leech #include <linux/percpu.h> 59c13c8260SChris Leech #include <linux/rcupdate.h> 60c13c8260SChris Leech #include <linux/mutex.h> 617405f74bSDan Williams #include <linux/jiffies.h> 622ba05622SDan Williams #include <linux/rculist.h> 63864498aaSDan Williams #include <linux/idr.h> 645a0e3ad6STejun Heo #include <linux/slab.h> 654e82f5ddSAndy Shevchenko #include <linux/acpi.h> 664e82f5ddSAndy Shevchenko #include <linux/acpi_dma.h> 679a6cecc8SJon Hunter #include <linux/of_dma.h> 68c13c8260SChris Leech 69c13c8260SChris Leech static DEFINE_MUTEX(dma_list_mutex); 7021ef4b8bSAxel Lin static DEFINE_IDR(dma_idr); 71c13c8260SChris Leech static LIST_HEAD(dma_device_list); 726f49a57aSDan Williams static long dmaengine_ref_count; 73c13c8260SChris Leech 74c13c8260SChris Leech /* --- sysfs implementation --- */ 75c13c8260SChris Leech 7641d5e59cSDan Williams /** 7741d5e59cSDan Williams * dev_to_dma_chan - convert a device pointer to the its sysfs container object 7841d5e59cSDan Williams * @dev - device node 7941d5e59cSDan Williams * 8041d5e59cSDan Williams * Must be called under dma_list_mutex 8141d5e59cSDan Williams */ 8241d5e59cSDan Williams static struct dma_chan *dev_to_dma_chan(struct device *dev) 8341d5e59cSDan Williams { 8441d5e59cSDan Williams struct dma_chan_dev *chan_dev; 8541d5e59cSDan Williams 8641d5e59cSDan Williams chan_dev = container_of(dev, typeof(*chan_dev), device); 8741d5e59cSDan Williams return chan_dev->chan; 8841d5e59cSDan Williams } 8941d5e59cSDan Williams 90891f78eaSTony Jones static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) 91c13c8260SChris Leech { 9241d5e59cSDan Williams struct dma_chan *chan; 93c13c8260SChris Leech unsigned long count = 0; 94c13c8260SChris Leech int i; 9541d5e59cSDan Williams int err; 96c13c8260SChris Leech 9741d5e59cSDan Williams mutex_lock(&dma_list_mutex); 9841d5e59cSDan Williams chan = dev_to_dma_chan(dev); 9941d5e59cSDan Williams if (chan) { 10017f3ae08SAndrew Morton for_each_possible_cpu(i) 101c13c8260SChris Leech count += per_cpu_ptr(chan->local, i)->memcpy_count; 10241d5e59cSDan Williams err = sprintf(buf, "%lu\n", count); 10341d5e59cSDan Williams } else 10441d5e59cSDan Williams err = -ENODEV; 10541d5e59cSDan Williams mutex_unlock(&dma_list_mutex); 106c13c8260SChris Leech 10741d5e59cSDan Williams return err; 108c13c8260SChris Leech } 109c13c8260SChris Leech 110891f78eaSTony Jones static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, 111891f78eaSTony Jones char *buf) 112c13c8260SChris Leech { 11341d5e59cSDan Williams struct dma_chan *chan; 114c13c8260SChris Leech unsigned long count = 0; 115c13c8260SChris Leech int i; 11641d5e59cSDan Williams int err; 117c13c8260SChris Leech 11841d5e59cSDan Williams mutex_lock(&dma_list_mutex); 11941d5e59cSDan Williams chan = dev_to_dma_chan(dev); 12041d5e59cSDan Williams if (chan) { 12117f3ae08SAndrew Morton for_each_possible_cpu(i) 122c13c8260SChris Leech count += per_cpu_ptr(chan->local, i)->bytes_transferred; 12341d5e59cSDan Williams err = sprintf(buf, "%lu\n", count); 12441d5e59cSDan Williams } else 12541d5e59cSDan Williams err = -ENODEV; 12641d5e59cSDan Williams mutex_unlock(&dma_list_mutex); 127c13c8260SChris Leech 12841d5e59cSDan Williams return err; 129c13c8260SChris Leech } 130c13c8260SChris Leech 131891f78eaSTony Jones static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) 132c13c8260SChris Leech { 13341d5e59cSDan Williams struct dma_chan *chan; 13441d5e59cSDan Williams int err; 135c13c8260SChris Leech 13641d5e59cSDan Williams mutex_lock(&dma_list_mutex); 13741d5e59cSDan Williams chan = dev_to_dma_chan(dev); 13841d5e59cSDan Williams if (chan) 13941d5e59cSDan Williams err = sprintf(buf, "%d\n", chan->client_count); 14041d5e59cSDan Williams else 14141d5e59cSDan Williams err = -ENODEV; 14241d5e59cSDan Williams mutex_unlock(&dma_list_mutex); 14341d5e59cSDan Williams 14441d5e59cSDan Williams return err; 145c13c8260SChris Leech } 146c13c8260SChris Leech 147891f78eaSTony Jones static struct device_attribute dma_attrs[] = { 148c13c8260SChris Leech __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), 149c13c8260SChris Leech __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), 150c13c8260SChris Leech __ATTR(in_use, S_IRUGO, show_in_use, NULL), 151c13c8260SChris Leech __ATTR_NULL 152c13c8260SChris Leech }; 153c13c8260SChris Leech 15441d5e59cSDan Williams static void chan_dev_release(struct device *dev) 15541d5e59cSDan Williams { 15641d5e59cSDan Williams struct dma_chan_dev *chan_dev; 15741d5e59cSDan Williams 15841d5e59cSDan Williams chan_dev = container_of(dev, typeof(*chan_dev), device); 159864498aaSDan Williams if (atomic_dec_and_test(chan_dev->idr_ref)) { 160864498aaSDan Williams mutex_lock(&dma_list_mutex); 161864498aaSDan Williams idr_remove(&dma_idr, chan_dev->dev_id); 162864498aaSDan Williams mutex_unlock(&dma_list_mutex); 163864498aaSDan Williams kfree(chan_dev->idr_ref); 164864498aaSDan Williams } 16541d5e59cSDan Williams kfree(chan_dev); 16641d5e59cSDan Williams } 16741d5e59cSDan Williams 168c13c8260SChris Leech static struct class dma_devclass = { 169c13c8260SChris Leech .name = "dma", 170891f78eaSTony Jones .dev_attrs = dma_attrs, 17141d5e59cSDan Williams .dev_release = chan_dev_release, 172c13c8260SChris Leech }; 173c13c8260SChris Leech 174c13c8260SChris Leech /* --- client and device registration --- */ 175c13c8260SChris Leech 17659b5ec21SDan Williams #define dma_device_satisfies_mask(device, mask) \ 17759b5ec21SDan Williams __dma_device_satisfies_mask((device), &(mask)) 178d379b01eSDan Williams static int 179a53e28daSLars-Peter Clausen __dma_device_satisfies_mask(struct dma_device *device, 180a53e28daSLars-Peter Clausen const dma_cap_mask_t *want) 181d379b01eSDan Williams { 182d379b01eSDan Williams dma_cap_mask_t has; 183d379b01eSDan Williams 18459b5ec21SDan Williams bitmap_and(has.bits, want->bits, device->cap_mask.bits, 185d379b01eSDan Williams DMA_TX_TYPE_END); 186d379b01eSDan Williams return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 187d379b01eSDan Williams } 188d379b01eSDan Williams 1896f49a57aSDan Williams static struct module *dma_chan_to_owner(struct dma_chan *chan) 1906f49a57aSDan Williams { 1916f49a57aSDan Williams return chan->device->dev->driver->owner; 1926f49a57aSDan Williams } 1936f49a57aSDan Williams 1946f49a57aSDan Williams /** 1956f49a57aSDan Williams * balance_ref_count - catch up the channel reference count 1966f49a57aSDan Williams * @chan - channel to balance ->client_count versus dmaengine_ref_count 1976f49a57aSDan Williams * 1986f49a57aSDan Williams * balance_ref_count must be called under dma_list_mutex 1996f49a57aSDan Williams */ 2006f49a57aSDan Williams static void balance_ref_count(struct dma_chan *chan) 2016f49a57aSDan Williams { 2026f49a57aSDan Williams struct module *owner = dma_chan_to_owner(chan); 2036f49a57aSDan Williams 2046f49a57aSDan Williams while (chan->client_count < dmaengine_ref_count) { 2056f49a57aSDan Williams __module_get(owner); 2066f49a57aSDan Williams chan->client_count++; 2076f49a57aSDan Williams } 2086f49a57aSDan Williams } 2096f49a57aSDan Williams 2106f49a57aSDan Williams /** 2116f49a57aSDan Williams * dma_chan_get - try to grab a dma channel's parent driver module 2126f49a57aSDan Williams * @chan - channel to grab 2136f49a57aSDan Williams * 2146f49a57aSDan Williams * Must be called under dma_list_mutex 2156f49a57aSDan Williams */ 2166f49a57aSDan Williams static int dma_chan_get(struct dma_chan *chan) 2176f49a57aSDan Williams { 2186f49a57aSDan Williams int err = -ENODEV; 2196f49a57aSDan Williams struct module *owner = dma_chan_to_owner(chan); 2206f49a57aSDan Williams 2216f49a57aSDan Williams if (chan->client_count) { 2226f49a57aSDan Williams __module_get(owner); 2236f49a57aSDan Williams err = 0; 2246f49a57aSDan Williams } else if (try_module_get(owner)) 2256f49a57aSDan Williams err = 0; 2266f49a57aSDan Williams 2276f49a57aSDan Williams if (err == 0) 2286f49a57aSDan Williams chan->client_count++; 2296f49a57aSDan Williams 2306f49a57aSDan Williams /* allocate upon first client reference */ 2316f49a57aSDan Williams if (chan->client_count == 1 && err == 0) { 232aa1e6f1aSDan Williams int desc_cnt = chan->device->device_alloc_chan_resources(chan); 2336f49a57aSDan Williams 2346f49a57aSDan Williams if (desc_cnt < 0) { 2356f49a57aSDan Williams err = desc_cnt; 2366f49a57aSDan Williams chan->client_count = 0; 2376f49a57aSDan Williams module_put(owner); 23859b5ec21SDan Williams } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) 2396f49a57aSDan Williams balance_ref_count(chan); 2406f49a57aSDan Williams } 2416f49a57aSDan Williams 2426f49a57aSDan Williams return err; 2436f49a57aSDan Williams } 2446f49a57aSDan Williams 2456f49a57aSDan Williams /** 2466f49a57aSDan Williams * dma_chan_put - drop a reference to a dma channel's parent driver module 2476f49a57aSDan Williams * @chan - channel to release 2486f49a57aSDan Williams * 2496f49a57aSDan Williams * Must be called under dma_list_mutex 2506f49a57aSDan Williams */ 2516f49a57aSDan Williams static void dma_chan_put(struct dma_chan *chan) 2526f49a57aSDan Williams { 2536f49a57aSDan Williams if (!chan->client_count) 2546f49a57aSDan Williams return; /* this channel failed alloc_chan_resources */ 2556f49a57aSDan Williams chan->client_count--; 2566f49a57aSDan Williams module_put(dma_chan_to_owner(chan)); 2576f49a57aSDan Williams if (chan->client_count == 0) 2586f49a57aSDan Williams chan->device->device_free_chan_resources(chan); 2596f49a57aSDan Williams } 2606f49a57aSDan Williams 2617405f74bSDan Williams enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 2627405f74bSDan Williams { 2637405f74bSDan Williams enum dma_status status; 2647405f74bSDan Williams unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 2657405f74bSDan Williams 2667405f74bSDan Williams dma_async_issue_pending(chan); 2677405f74bSDan Williams do { 2687405f74bSDan Williams status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 2697405f74bSDan Williams if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 27063433250SJoe Perches pr_err("%s: timeout!\n", __func__); 2717405f74bSDan Williams return DMA_ERROR; 2727405f74bSDan Williams } 2732cbe7febSBartlomiej Zolnierkiewicz if (status != DMA_IN_PROGRESS) 2742cbe7febSBartlomiej Zolnierkiewicz break; 2752cbe7febSBartlomiej Zolnierkiewicz cpu_relax(); 2762cbe7febSBartlomiej Zolnierkiewicz } while (1); 2777405f74bSDan Williams 2787405f74bSDan Williams return status; 2797405f74bSDan Williams } 2807405f74bSDan Williams EXPORT_SYMBOL(dma_sync_wait); 2817405f74bSDan Williams 282c13c8260SChris Leech /** 283bec08513SDan Williams * dma_cap_mask_all - enable iteration over all operation types 284bec08513SDan Williams */ 285bec08513SDan Williams static dma_cap_mask_t dma_cap_mask_all; 286bec08513SDan Williams 287bec08513SDan Williams /** 288bec08513SDan Williams * dma_chan_tbl_ent - tracks channel allocations per core/operation 289bec08513SDan Williams * @chan - associated channel for this entry 290bec08513SDan Williams */ 291bec08513SDan Williams struct dma_chan_tbl_ent { 292bec08513SDan Williams struct dma_chan *chan; 293bec08513SDan Williams }; 294bec08513SDan Williams 295bec08513SDan Williams /** 296bec08513SDan Williams * channel_table - percpu lookup table for memory-to-memory offload providers 297bec08513SDan Williams */ 298a29d8b8eSTejun Heo static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; 299bec08513SDan Williams 300bec08513SDan Williams static int __init dma_channel_table_init(void) 301bec08513SDan Williams { 302bec08513SDan Williams enum dma_transaction_type cap; 303bec08513SDan Williams int err = 0; 304bec08513SDan Williams 305bec08513SDan Williams bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); 306bec08513SDan Williams 30759b5ec21SDan Williams /* 'interrupt', 'private', and 'slave' are channel capabilities, 30859b5ec21SDan Williams * but are not associated with an operation so they do not need 30959b5ec21SDan Williams * an entry in the channel_table 310bec08513SDan Williams */ 311bec08513SDan Williams clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); 31259b5ec21SDan Williams clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); 313bec08513SDan Williams clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); 314bec08513SDan Williams 315bec08513SDan Williams for_each_dma_cap_mask(cap, dma_cap_mask_all) { 316bec08513SDan Williams channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); 317bec08513SDan Williams if (!channel_table[cap]) { 318bec08513SDan Williams err = -ENOMEM; 319bec08513SDan Williams break; 320bec08513SDan Williams } 321bec08513SDan Williams } 322bec08513SDan Williams 323bec08513SDan Williams if (err) { 32463433250SJoe Perches pr_err("initialization failure\n"); 325bec08513SDan Williams for_each_dma_cap_mask(cap, dma_cap_mask_all) 326bec08513SDan Williams if (channel_table[cap]) 327bec08513SDan Williams free_percpu(channel_table[cap]); 328bec08513SDan Williams } 329bec08513SDan Williams 330bec08513SDan Williams return err; 331bec08513SDan Williams } 332652afc27SDan Williams arch_initcall(dma_channel_table_init); 333bec08513SDan Williams 334bec08513SDan Williams /** 335bec08513SDan Williams * dma_find_channel - find a channel to carry out the operation 336bec08513SDan Williams * @tx_type: transaction type 337bec08513SDan Williams */ 338bec08513SDan Williams struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 339bec08513SDan Williams { 340e7dcaa47SChristoph Lameter return this_cpu_read(channel_table[tx_type]->chan); 341bec08513SDan Williams } 342bec08513SDan Williams EXPORT_SYMBOL(dma_find_channel); 343bec08513SDan Williams 344a2bd1140SDave Jiang /* 345a2bd1140SDave Jiang * net_dma_find_channel - find a channel for net_dma 346a2bd1140SDave Jiang * net_dma has alignment requirements 347a2bd1140SDave Jiang */ 348a2bd1140SDave Jiang struct dma_chan *net_dma_find_channel(void) 349a2bd1140SDave Jiang { 350a2bd1140SDave Jiang struct dma_chan *chan = dma_find_channel(DMA_MEMCPY); 351a2bd1140SDave Jiang if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1)) 352a2bd1140SDave Jiang return NULL; 353a2bd1140SDave Jiang 354a2bd1140SDave Jiang return chan; 355a2bd1140SDave Jiang } 356a2bd1140SDave Jiang EXPORT_SYMBOL(net_dma_find_channel); 357a2bd1140SDave Jiang 358bec08513SDan Williams /** 3592ba05622SDan Williams * dma_issue_pending_all - flush all pending operations across all channels 3602ba05622SDan Williams */ 3612ba05622SDan Williams void dma_issue_pending_all(void) 3622ba05622SDan Williams { 3632ba05622SDan Williams struct dma_device *device; 3642ba05622SDan Williams struct dma_chan *chan; 3652ba05622SDan Williams 3662ba05622SDan Williams rcu_read_lock(); 36759b5ec21SDan Williams list_for_each_entry_rcu(device, &dma_device_list, global_node) { 36859b5ec21SDan Williams if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 36959b5ec21SDan Williams continue; 3702ba05622SDan Williams list_for_each_entry(chan, &device->channels, device_node) 3712ba05622SDan Williams if (chan->client_count) 3722ba05622SDan Williams device->device_issue_pending(chan); 37359b5ec21SDan Williams } 3742ba05622SDan Williams rcu_read_unlock(); 3752ba05622SDan Williams } 3762ba05622SDan Williams EXPORT_SYMBOL(dma_issue_pending_all); 3772ba05622SDan Williams 3782ba05622SDan Williams /** 379bec08513SDan Williams * nth_chan - returns the nth channel of the given capability 380bec08513SDan Williams * @cap: capability to match 381bec08513SDan Williams * @n: nth channel desired 382bec08513SDan Williams * 383bec08513SDan Williams * Defaults to returning the channel with the desired capability and the 384bec08513SDan Williams * lowest reference count when 'n' cannot be satisfied. Must be called 385bec08513SDan Williams * under dma_list_mutex. 386bec08513SDan Williams */ 387bec08513SDan Williams static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) 388bec08513SDan Williams { 389bec08513SDan Williams struct dma_device *device; 390bec08513SDan Williams struct dma_chan *chan; 391bec08513SDan Williams struct dma_chan *ret = NULL; 392bec08513SDan Williams struct dma_chan *min = NULL; 393bec08513SDan Williams 394bec08513SDan Williams list_for_each_entry(device, &dma_device_list, global_node) { 39559b5ec21SDan Williams if (!dma_has_cap(cap, device->cap_mask) || 39659b5ec21SDan Williams dma_has_cap(DMA_PRIVATE, device->cap_mask)) 397bec08513SDan Williams continue; 398bec08513SDan Williams list_for_each_entry(chan, &device->channels, device_node) { 399bec08513SDan Williams if (!chan->client_count) 400bec08513SDan Williams continue; 401bec08513SDan Williams if (!min) 402bec08513SDan Williams min = chan; 403bec08513SDan Williams else if (chan->table_count < min->table_count) 404bec08513SDan Williams min = chan; 405bec08513SDan Williams 406bec08513SDan Williams if (n-- == 0) { 407bec08513SDan Williams ret = chan; 408bec08513SDan Williams break; /* done */ 409bec08513SDan Williams } 410bec08513SDan Williams } 411bec08513SDan Williams if (ret) 412bec08513SDan Williams break; /* done */ 413bec08513SDan Williams } 414bec08513SDan Williams 415bec08513SDan Williams if (!ret) 416bec08513SDan Williams ret = min; 417bec08513SDan Williams 418bec08513SDan Williams if (ret) 419bec08513SDan Williams ret->table_count++; 420bec08513SDan Williams 421bec08513SDan Williams return ret; 422bec08513SDan Williams } 423bec08513SDan Williams 424bec08513SDan Williams /** 425bec08513SDan Williams * dma_channel_rebalance - redistribute the available channels 426bec08513SDan Williams * 427bec08513SDan Williams * Optimize for cpu isolation (each cpu gets a dedicated channel for an 428bec08513SDan Williams * operation type) in the SMP case, and operation isolation (avoid 429bec08513SDan Williams * multi-tasking channels) in the non-SMP case. Must be called under 430bec08513SDan Williams * dma_list_mutex. 431bec08513SDan Williams */ 432bec08513SDan Williams static void dma_channel_rebalance(void) 433bec08513SDan Williams { 434bec08513SDan Williams struct dma_chan *chan; 435bec08513SDan Williams struct dma_device *device; 436bec08513SDan Williams int cpu; 437bec08513SDan Williams int cap; 438bec08513SDan Williams int n; 439bec08513SDan Williams 440bec08513SDan Williams /* undo the last distribution */ 441bec08513SDan Williams for_each_dma_cap_mask(cap, dma_cap_mask_all) 442bec08513SDan Williams for_each_possible_cpu(cpu) 443bec08513SDan Williams per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; 444bec08513SDan Williams 44559b5ec21SDan Williams list_for_each_entry(device, &dma_device_list, global_node) { 44659b5ec21SDan Williams if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 44759b5ec21SDan Williams continue; 448bec08513SDan Williams list_for_each_entry(chan, &device->channels, device_node) 449bec08513SDan Williams chan->table_count = 0; 45059b5ec21SDan Williams } 451bec08513SDan Williams 452bec08513SDan Williams /* don't populate the channel_table if no clients are available */ 453bec08513SDan Williams if (!dmaengine_ref_count) 454bec08513SDan Williams return; 455bec08513SDan Williams 456bec08513SDan Williams /* redistribute available channels */ 457bec08513SDan Williams n = 0; 458bec08513SDan Williams for_each_dma_cap_mask(cap, dma_cap_mask_all) 459bec08513SDan Williams for_each_online_cpu(cpu) { 460bec08513SDan Williams if (num_possible_cpus() > 1) 461bec08513SDan Williams chan = nth_chan(cap, n++); 462bec08513SDan Williams else 463bec08513SDan Williams chan = nth_chan(cap, -1); 464bec08513SDan Williams 465bec08513SDan Williams per_cpu_ptr(channel_table[cap], cpu)->chan = chan; 466bec08513SDan Williams } 467bec08513SDan Williams } 468bec08513SDan Williams 469a53e28daSLars-Peter Clausen static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, 470a53e28daSLars-Peter Clausen struct dma_device *dev, 471e2346677SDan Williams dma_filter_fn fn, void *fn_param) 47259b5ec21SDan Williams { 47359b5ec21SDan Williams struct dma_chan *chan; 47459b5ec21SDan Williams 47559b5ec21SDan Williams if (!__dma_device_satisfies_mask(dev, mask)) { 47659b5ec21SDan Williams pr_debug("%s: wrong capabilities\n", __func__); 47759b5ec21SDan Williams return NULL; 47859b5ec21SDan Williams } 47959b5ec21SDan Williams /* devices with multiple channels need special handling as we need to 48059b5ec21SDan Williams * ensure that all channels are either private or public. 48159b5ec21SDan Williams */ 48259b5ec21SDan Williams if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) 48359b5ec21SDan Williams list_for_each_entry(chan, &dev->channels, device_node) { 48459b5ec21SDan Williams /* some channels are already publicly allocated */ 48559b5ec21SDan Williams if (chan->client_count) 48659b5ec21SDan Williams return NULL; 48759b5ec21SDan Williams } 48859b5ec21SDan Williams 48959b5ec21SDan Williams list_for_each_entry(chan, &dev->channels, device_node) { 49059b5ec21SDan Williams if (chan->client_count) { 49159b5ec21SDan Williams pr_debug("%s: %s busy\n", 49241d5e59cSDan Williams __func__, dma_chan_name(chan)); 49359b5ec21SDan Williams continue; 49459b5ec21SDan Williams } 495e2346677SDan Williams if (fn && !fn(chan, fn_param)) { 496e2346677SDan Williams pr_debug("%s: %s filter said false\n", 497e2346677SDan Williams __func__, dma_chan_name(chan)); 498e2346677SDan Williams continue; 499e2346677SDan Williams } 500e2346677SDan Williams return chan; 50159b5ec21SDan Williams } 50259b5ec21SDan Williams 503e2346677SDan Williams return NULL; 50459b5ec21SDan Williams } 50559b5ec21SDan Williams 50659b5ec21SDan Williams /** 507*7bb587f4SZhangfei Gao * dma_request_channel - try to get specific channel exclusively 508*7bb587f4SZhangfei Gao * @chan: target channel 509*7bb587f4SZhangfei Gao */ 510*7bb587f4SZhangfei Gao struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) 511*7bb587f4SZhangfei Gao { 512*7bb587f4SZhangfei Gao int err = -EBUSY; 513*7bb587f4SZhangfei Gao 514*7bb587f4SZhangfei Gao /* lock against __dma_request_channel */ 515*7bb587f4SZhangfei Gao mutex_lock(&dma_list_mutex); 516*7bb587f4SZhangfei Gao 517*7bb587f4SZhangfei Gao if (chan->client_count == 0) 518*7bb587f4SZhangfei Gao err = dma_chan_get(chan); 519*7bb587f4SZhangfei Gao else 520*7bb587f4SZhangfei Gao chan = NULL; 521*7bb587f4SZhangfei Gao 522*7bb587f4SZhangfei Gao mutex_unlock(&dma_list_mutex); 523*7bb587f4SZhangfei Gao 524*7bb587f4SZhangfei Gao if (err) 525*7bb587f4SZhangfei Gao pr_debug("%s: failed to get %s: (%d)\n", 526*7bb587f4SZhangfei Gao __func__, dma_chan_name(chan), err); 527*7bb587f4SZhangfei Gao 528*7bb587f4SZhangfei Gao return chan; 529*7bb587f4SZhangfei Gao } 530*7bb587f4SZhangfei Gao EXPORT_SYMBOL_GPL(dma_get_slave_channel); 531*7bb587f4SZhangfei Gao 532*7bb587f4SZhangfei Gao /** 53359b5ec21SDan Williams * dma_request_channel - try to allocate an exclusive channel 53459b5ec21SDan Williams * @mask: capabilities that the channel must satisfy 53559b5ec21SDan Williams * @fn: optional callback to disposition available channels 53659b5ec21SDan Williams * @fn_param: opaque parameter to pass to dma_filter_fn 53759b5ec21SDan Williams */ 538a53e28daSLars-Peter Clausen struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 539a53e28daSLars-Peter Clausen dma_filter_fn fn, void *fn_param) 54059b5ec21SDan Williams { 54159b5ec21SDan Williams struct dma_device *device, *_d; 54259b5ec21SDan Williams struct dma_chan *chan = NULL; 54359b5ec21SDan Williams int err; 54459b5ec21SDan Williams 54559b5ec21SDan Williams /* Find a channel */ 54659b5ec21SDan Williams mutex_lock(&dma_list_mutex); 54759b5ec21SDan Williams list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 548e2346677SDan Williams chan = private_candidate(mask, device, fn, fn_param); 549e2346677SDan Williams if (chan) { 55059b5ec21SDan Williams /* Found a suitable channel, try to grab, prep, and 55159b5ec21SDan Williams * return it. We first set DMA_PRIVATE to disable 55259b5ec21SDan Williams * balance_ref_count as this channel will not be 55359b5ec21SDan Williams * published in the general-purpose allocator 55459b5ec21SDan Williams */ 55559b5ec21SDan Williams dma_cap_set(DMA_PRIVATE, device->cap_mask); 5560f571515SAtsushi Nemoto device->privatecnt++; 55759b5ec21SDan Williams err = dma_chan_get(chan); 55859b5ec21SDan Williams 55959b5ec21SDan Williams if (err == -ENODEV) { 56063433250SJoe Perches pr_debug("%s: %s module removed\n", 56163433250SJoe Perches __func__, dma_chan_name(chan)); 56259b5ec21SDan Williams list_del_rcu(&device->global_node); 56359b5ec21SDan Williams } else if (err) 564d8b53489SFabio Estevam pr_debug("%s: failed to get %s: (%d)\n", 565d8b53489SFabio Estevam __func__, dma_chan_name(chan), err); 56659b5ec21SDan Williams else 56759b5ec21SDan Williams break; 5680f571515SAtsushi Nemoto if (--device->privatecnt == 0) 5690f571515SAtsushi Nemoto dma_cap_clear(DMA_PRIVATE, device->cap_mask); 57059b5ec21SDan Williams chan = NULL; 57159b5ec21SDan Williams } 572e2346677SDan Williams } 57359b5ec21SDan Williams mutex_unlock(&dma_list_mutex); 57459b5ec21SDan Williams 57563433250SJoe Perches pr_debug("%s: %s (%s)\n", 57663433250SJoe Perches __func__, 57763433250SJoe Perches chan ? "success" : "fail", 57841d5e59cSDan Williams chan ? dma_chan_name(chan) : NULL); 57959b5ec21SDan Williams 58059b5ec21SDan Williams return chan; 58159b5ec21SDan Williams } 58259b5ec21SDan Williams EXPORT_SYMBOL_GPL(__dma_request_channel); 58359b5ec21SDan Williams 5849a6cecc8SJon Hunter /** 5859a6cecc8SJon Hunter * dma_request_slave_channel - try to allocate an exclusive slave channel 5869a6cecc8SJon Hunter * @dev: pointer to client device structure 5879a6cecc8SJon Hunter * @name: slave channel name 5889a6cecc8SJon Hunter */ 589bef29ec5SMarkus Pargmann struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) 5909a6cecc8SJon Hunter { 5919a6cecc8SJon Hunter /* If device-tree is present get slave info from here */ 5929a6cecc8SJon Hunter if (dev->of_node) 5939a6cecc8SJon Hunter return of_dma_request_slave_channel(dev->of_node, name); 5949a6cecc8SJon Hunter 5954e82f5ddSAndy Shevchenko /* If device was enumerated by ACPI get slave info from here */ 5964e82f5ddSAndy Shevchenko if (ACPI_HANDLE(dev)) 5974e82f5ddSAndy Shevchenko return acpi_dma_request_slave_chan_by_name(dev, name); 5984e82f5ddSAndy Shevchenko 5999a6cecc8SJon Hunter return NULL; 6009a6cecc8SJon Hunter } 6019a6cecc8SJon Hunter EXPORT_SYMBOL_GPL(dma_request_slave_channel); 6029a6cecc8SJon Hunter 60359b5ec21SDan Williams void dma_release_channel(struct dma_chan *chan) 60459b5ec21SDan Williams { 60559b5ec21SDan Williams mutex_lock(&dma_list_mutex); 60659b5ec21SDan Williams WARN_ONCE(chan->client_count != 1, 60759b5ec21SDan Williams "chan reference count %d != 1\n", chan->client_count); 60859b5ec21SDan Williams dma_chan_put(chan); 6090f571515SAtsushi Nemoto /* drop PRIVATE cap enabled by __dma_request_channel() */ 6100f571515SAtsushi Nemoto if (--chan->device->privatecnt == 0) 6110f571515SAtsushi Nemoto dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 61259b5ec21SDan Williams mutex_unlock(&dma_list_mutex); 61359b5ec21SDan Williams } 61459b5ec21SDan Williams EXPORT_SYMBOL_GPL(dma_release_channel); 61559b5ec21SDan Williams 616bec08513SDan Williams /** 617209b84a8SDan Williams * dmaengine_get - register interest in dma_channels 618c13c8260SChris Leech */ 619209b84a8SDan Williams void dmaengine_get(void) 620c13c8260SChris Leech { 6216f49a57aSDan Williams struct dma_device *device, *_d; 6226f49a57aSDan Williams struct dma_chan *chan; 6236f49a57aSDan Williams int err; 6246f49a57aSDan Williams 625c13c8260SChris Leech mutex_lock(&dma_list_mutex); 6266f49a57aSDan Williams dmaengine_ref_count++; 6276f49a57aSDan Williams 6286f49a57aSDan Williams /* try to grab channels */ 62959b5ec21SDan Williams list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 63059b5ec21SDan Williams if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 63159b5ec21SDan Williams continue; 6326f49a57aSDan Williams list_for_each_entry(chan, &device->channels, device_node) { 6336f49a57aSDan Williams err = dma_chan_get(chan); 6346f49a57aSDan Williams if (err == -ENODEV) { 6356f49a57aSDan Williams /* module removed before we could use it */ 6362ba05622SDan Williams list_del_rcu(&device->global_node); 6376f49a57aSDan Williams break; 6386f49a57aSDan Williams } else if (err) 6390eb5a358SFabio Estevam pr_debug("%s: failed to get %s: (%d)\n", 640d8b53489SFabio Estevam __func__, dma_chan_name(chan), err); 6416f49a57aSDan Williams } 64259b5ec21SDan Williams } 6436f49a57aSDan Williams 644bec08513SDan Williams /* if this is the first reference and there were channels 645bec08513SDan Williams * waiting we need to rebalance to get those channels 646bec08513SDan Williams * incorporated into the channel table 647bec08513SDan Williams */ 648bec08513SDan Williams if (dmaengine_ref_count == 1) 649bec08513SDan Williams dma_channel_rebalance(); 650c13c8260SChris Leech mutex_unlock(&dma_list_mutex); 651c13c8260SChris Leech } 652209b84a8SDan Williams EXPORT_SYMBOL(dmaengine_get); 653c13c8260SChris Leech 654c13c8260SChris Leech /** 655209b84a8SDan Williams * dmaengine_put - let dma drivers be removed when ref_count == 0 656c13c8260SChris Leech */ 657209b84a8SDan Williams void dmaengine_put(void) 658c13c8260SChris Leech { 659d379b01eSDan Williams struct dma_device *device; 660c13c8260SChris Leech struct dma_chan *chan; 661c13c8260SChris Leech 662c13c8260SChris Leech mutex_lock(&dma_list_mutex); 6636f49a57aSDan Williams dmaengine_ref_count--; 6646f49a57aSDan Williams BUG_ON(dmaengine_ref_count < 0); 6656f49a57aSDan Williams /* drop channel references */ 66659b5ec21SDan Williams list_for_each_entry(device, &dma_device_list, global_node) { 66759b5ec21SDan Williams if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 66859b5ec21SDan Williams continue; 6696f49a57aSDan Williams list_for_each_entry(chan, &device->channels, device_node) 670d379b01eSDan Williams dma_chan_put(chan); 67159b5ec21SDan Williams } 672c13c8260SChris Leech mutex_unlock(&dma_list_mutex); 673c13c8260SChris Leech } 674209b84a8SDan Williams EXPORT_SYMBOL(dmaengine_put); 675c13c8260SChris Leech 676138f4c35SDan Williams static bool device_has_all_tx_types(struct dma_device *device) 677138f4c35SDan Williams { 678138f4c35SDan Williams /* A device that satisfies this test has channels that will never cause 679138f4c35SDan Williams * an async_tx channel switch event as all possible operation types can 680138f4c35SDan Williams * be handled. 681138f4c35SDan Williams */ 682138f4c35SDan Williams #ifdef CONFIG_ASYNC_TX_DMA 683138f4c35SDan Williams if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) 684138f4c35SDan Williams return false; 685138f4c35SDan Williams #endif 686138f4c35SDan Williams 687138f4c35SDan Williams #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) 688138f4c35SDan Williams if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) 689138f4c35SDan Williams return false; 690138f4c35SDan Williams #endif 691138f4c35SDan Williams 692138f4c35SDan Williams #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 693138f4c35SDan Williams if (!dma_has_cap(DMA_XOR, device->cap_mask)) 694138f4c35SDan Williams return false; 6957b3cc2b1SDan Williams 6967b3cc2b1SDan Williams #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA 6974499a24dSDan Williams if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) 6984499a24dSDan Williams return false; 699138f4c35SDan Williams #endif 7007b3cc2b1SDan Williams #endif 701138f4c35SDan Williams 702138f4c35SDan Williams #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) 703138f4c35SDan Williams if (!dma_has_cap(DMA_PQ, device->cap_mask)) 704138f4c35SDan Williams return false; 7057b3cc2b1SDan Williams 7067b3cc2b1SDan Williams #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA 7074499a24dSDan Williams if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) 7084499a24dSDan Williams return false; 709138f4c35SDan Williams #endif 7107b3cc2b1SDan Williams #endif 711138f4c35SDan Williams 712138f4c35SDan Williams return true; 713138f4c35SDan Williams } 714138f4c35SDan Williams 715257b17caSDan Williams static int get_dma_id(struct dma_device *device) 716257b17caSDan Williams { 717257b17caSDan Williams int rc; 718257b17caSDan Williams 719257b17caSDan Williams mutex_lock(&dma_list_mutex); 720257b17caSDan Williams 72169ee266bSTejun Heo rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL); 72269ee266bSTejun Heo if (rc >= 0) 72369ee266bSTejun Heo device->dev_id = rc; 72469ee266bSTejun Heo 72569ee266bSTejun Heo mutex_unlock(&dma_list_mutex); 72669ee266bSTejun Heo return rc < 0 ? rc : 0; 727257b17caSDan Williams } 728257b17caSDan Williams 729c13c8260SChris Leech /** 7306508871eSRandy Dunlap * dma_async_device_register - registers DMA devices found 731c13c8260SChris Leech * @device: &dma_device 732c13c8260SChris Leech */ 733c13c8260SChris Leech int dma_async_device_register(struct dma_device *device) 734c13c8260SChris Leech { 735ff487fb7SJeff Garzik int chancnt = 0, rc; 736c13c8260SChris Leech struct dma_chan* chan; 737864498aaSDan Williams atomic_t *idr_ref; 738c13c8260SChris Leech 739c13c8260SChris Leech if (!device) 740c13c8260SChris Leech return -ENODEV; 741c13c8260SChris Leech 7427405f74bSDan Williams /* validate device routines */ 7437405f74bSDan Williams BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && 7447405f74bSDan Williams !device->device_prep_dma_memcpy); 7457405f74bSDan Williams BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && 7467405f74bSDan Williams !device->device_prep_dma_xor); 747099f53cbSDan Williams BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && 748099f53cbSDan Williams !device->device_prep_dma_xor_val); 749b2f46fd8SDan Williams BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && 750b2f46fd8SDan Williams !device->device_prep_dma_pq); 751b2f46fd8SDan Williams BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && 752b2f46fd8SDan Williams !device->device_prep_dma_pq_val); 7539b941c66SZhang Wei BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 7547405f74bSDan Williams !device->device_prep_dma_interrupt); 755a86ee03cSIra Snyder BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && 756a86ee03cSIra Snyder !device->device_prep_dma_sg); 757782bc950SSascha Hauer BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 758782bc950SSascha Hauer !device->device_prep_dma_cyclic); 759dc0ee643SHaavard Skinnemoen BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 760c3635c78SLinus Walleij !device->device_control); 761b14dab79SJassi Brar BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && 762b14dab79SJassi Brar !device->device_prep_interleaved_dma); 7637405f74bSDan Williams 7647405f74bSDan Williams BUG_ON(!device->device_alloc_chan_resources); 7657405f74bSDan Williams BUG_ON(!device->device_free_chan_resources); 76607934481SLinus Walleij BUG_ON(!device->device_tx_status); 7677405f74bSDan Williams BUG_ON(!device->device_issue_pending); 7687405f74bSDan Williams BUG_ON(!device->dev); 7697405f74bSDan Williams 770138f4c35SDan Williams /* note: this only matters in the 7715fc6d897SDan Williams * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 772138f4c35SDan Williams */ 773138f4c35SDan Williams if (device_has_all_tx_types(device)) 774138f4c35SDan Williams dma_cap_set(DMA_ASYNC_TX, device->cap_mask); 775138f4c35SDan Williams 776864498aaSDan Williams idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 777864498aaSDan Williams if (!idr_ref) 778864498aaSDan Williams return -ENOMEM; 779257b17caSDan Williams rc = get_dma_id(device); 780257b17caSDan Williams if (rc != 0) { 781257b17caSDan Williams kfree(idr_ref); 782864498aaSDan Williams return rc; 783257b17caSDan Williams } 784257b17caSDan Williams 785257b17caSDan Williams atomic_set(idr_ref, 0); 786c13c8260SChris Leech 787c13c8260SChris Leech /* represent channels in sysfs. Probably want devs too */ 788c13c8260SChris Leech list_for_each_entry(chan, &device->channels, device_node) { 789257b17caSDan Williams rc = -ENOMEM; 790c13c8260SChris Leech chan->local = alloc_percpu(typeof(*chan->local)); 791c13c8260SChris Leech if (chan->local == NULL) 792257b17caSDan Williams goto err_out; 79341d5e59cSDan Williams chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); 79441d5e59cSDan Williams if (chan->dev == NULL) { 79541d5e59cSDan Williams free_percpu(chan->local); 796257b17caSDan Williams chan->local = NULL; 797257b17caSDan Williams goto err_out; 79841d5e59cSDan Williams } 799c13c8260SChris Leech 800c13c8260SChris Leech chan->chan_id = chancnt++; 80141d5e59cSDan Williams chan->dev->device.class = &dma_devclass; 80241d5e59cSDan Williams chan->dev->device.parent = device->dev; 80341d5e59cSDan Williams chan->dev->chan = chan; 804864498aaSDan Williams chan->dev->idr_ref = idr_ref; 805864498aaSDan Williams chan->dev->dev_id = device->dev_id; 806864498aaSDan Williams atomic_inc(idr_ref); 80741d5e59cSDan Williams dev_set_name(&chan->dev->device, "dma%dchan%d", 808c13c8260SChris Leech device->dev_id, chan->chan_id); 809c13c8260SChris Leech 81041d5e59cSDan Williams rc = device_register(&chan->dev->device); 811ff487fb7SJeff Garzik if (rc) { 812ff487fb7SJeff Garzik free_percpu(chan->local); 813ff487fb7SJeff Garzik chan->local = NULL; 814257b17caSDan Williams kfree(chan->dev); 815257b17caSDan Williams atomic_dec(idr_ref); 816ff487fb7SJeff Garzik goto err_out; 817ff487fb7SJeff Garzik } 8187cc5bf9aSDan Williams chan->client_count = 0; 819c13c8260SChris Leech } 82059b5ec21SDan Williams device->chancnt = chancnt; 821c13c8260SChris Leech 822c13c8260SChris Leech mutex_lock(&dma_list_mutex); 82359b5ec21SDan Williams /* take references on public channels */ 82459b5ec21SDan Williams if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) 8256f49a57aSDan Williams list_for_each_entry(chan, &device->channels, device_node) { 8266f49a57aSDan Williams /* if clients are already waiting for channels we need 8276f49a57aSDan Williams * to take references on their behalf 8286f49a57aSDan Williams */ 8296f49a57aSDan Williams if (dma_chan_get(chan) == -ENODEV) { 8306f49a57aSDan Williams /* note we can only get here for the first 8316f49a57aSDan Williams * channel as the remaining channels are 8326f49a57aSDan Williams * guaranteed to get a reference 8336f49a57aSDan Williams */ 8346f49a57aSDan Williams rc = -ENODEV; 8356f49a57aSDan Williams mutex_unlock(&dma_list_mutex); 8366f49a57aSDan Williams goto err_out; 8376f49a57aSDan Williams } 8386f49a57aSDan Williams } 8392ba05622SDan Williams list_add_tail_rcu(&device->global_node, &dma_device_list); 8400f571515SAtsushi Nemoto if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 8410f571515SAtsushi Nemoto device->privatecnt++; /* Always private */ 842bec08513SDan Williams dma_channel_rebalance(); 843c13c8260SChris Leech mutex_unlock(&dma_list_mutex); 844c13c8260SChris Leech 845c13c8260SChris Leech return 0; 846ff487fb7SJeff Garzik 847ff487fb7SJeff Garzik err_out: 848257b17caSDan Williams /* if we never registered a channel just release the idr */ 849257b17caSDan Williams if (atomic_read(idr_ref) == 0) { 850257b17caSDan Williams mutex_lock(&dma_list_mutex); 851257b17caSDan Williams idr_remove(&dma_idr, device->dev_id); 852257b17caSDan Williams mutex_unlock(&dma_list_mutex); 853257b17caSDan Williams kfree(idr_ref); 854257b17caSDan Williams return rc; 855257b17caSDan Williams } 856257b17caSDan Williams 857ff487fb7SJeff Garzik list_for_each_entry(chan, &device->channels, device_node) { 858ff487fb7SJeff Garzik if (chan->local == NULL) 859ff487fb7SJeff Garzik continue; 86041d5e59cSDan Williams mutex_lock(&dma_list_mutex); 86141d5e59cSDan Williams chan->dev->chan = NULL; 86241d5e59cSDan Williams mutex_unlock(&dma_list_mutex); 86341d5e59cSDan Williams device_unregister(&chan->dev->device); 864ff487fb7SJeff Garzik free_percpu(chan->local); 865ff487fb7SJeff Garzik } 866ff487fb7SJeff Garzik return rc; 867c13c8260SChris Leech } 868765e3d8aSDavid Brownell EXPORT_SYMBOL(dma_async_device_register); 869c13c8260SChris Leech 870c13c8260SChris Leech /** 8716f49a57aSDan Williams * dma_async_device_unregister - unregister a DMA device 8726508871eSRandy Dunlap * @device: &dma_device 873f27c580cSDan Williams * 874f27c580cSDan Williams * This routine is called by dma driver exit routines, dmaengine holds module 875f27c580cSDan Williams * references to prevent it being called while channels are in use. 8766508871eSRandy Dunlap */ 877c13c8260SChris Leech void dma_async_device_unregister(struct dma_device *device) 878c13c8260SChris Leech { 879c13c8260SChris Leech struct dma_chan *chan; 880c13c8260SChris Leech 881c13c8260SChris Leech mutex_lock(&dma_list_mutex); 8822ba05622SDan Williams list_del_rcu(&device->global_node); 883bec08513SDan Williams dma_channel_rebalance(); 884c13c8260SChris Leech mutex_unlock(&dma_list_mutex); 885c13c8260SChris Leech 886c13c8260SChris Leech list_for_each_entry(chan, &device->channels, device_node) { 8876f49a57aSDan Williams WARN_ONCE(chan->client_count, 8886f49a57aSDan Williams "%s called while %d clients hold a reference\n", 8896f49a57aSDan Williams __func__, chan->client_count); 89041d5e59cSDan Williams mutex_lock(&dma_list_mutex); 89141d5e59cSDan Williams chan->dev->chan = NULL; 89241d5e59cSDan Williams mutex_unlock(&dma_list_mutex); 89341d5e59cSDan Williams device_unregister(&chan->dev->device); 894adef4772SAnatolij Gustschin free_percpu(chan->local); 895c13c8260SChris Leech } 896c13c8260SChris Leech } 897765e3d8aSDavid Brownell EXPORT_SYMBOL(dma_async_device_unregister); 898c13c8260SChris Leech 8997405f74bSDan Williams /** 9007405f74bSDan Williams * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses 9017405f74bSDan Williams * @chan: DMA channel to offload copy to 9027405f74bSDan Williams * @dest: destination address (virtual) 9037405f74bSDan Williams * @src: source address (virtual) 9047405f74bSDan Williams * @len: length 9057405f74bSDan Williams * 9067405f74bSDan Williams * Both @dest and @src must be mappable to a bus address according to the 9077405f74bSDan Williams * DMA mapping API rules for streaming mappings. 9087405f74bSDan Williams * Both @dest and @src must stay memory resident (kernel memory or locked 9097405f74bSDan Williams * user space pages). 9107405f74bSDan Williams */ 9117405f74bSDan Williams dma_cookie_t 9127405f74bSDan Williams dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, 9137405f74bSDan Williams void *src, size_t len) 9147405f74bSDan Williams { 9157405f74bSDan Williams struct dma_device *dev = chan->device; 9167405f74bSDan Williams struct dma_async_tx_descriptor *tx; 9170036731cSDan Williams dma_addr_t dma_dest, dma_src; 9187405f74bSDan Williams dma_cookie_t cookie; 9194f005dbeSMaciej Sosnowski unsigned long flags; 9207405f74bSDan Williams 9210036731cSDan Williams dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); 9220036731cSDan Williams dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); 9234f005dbeSMaciej Sosnowski flags = DMA_CTRL_ACK | 9244f005dbeSMaciej Sosnowski DMA_COMPL_SRC_UNMAP_SINGLE | 9254f005dbeSMaciej Sosnowski DMA_COMPL_DEST_UNMAP_SINGLE; 9264f005dbeSMaciej Sosnowski tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 9270036731cSDan Williams 9280036731cSDan Williams if (!tx) { 9290036731cSDan Williams dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 9300036731cSDan Williams dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 9317405f74bSDan Williams return -ENOMEM; 9320036731cSDan Williams } 9337405f74bSDan Williams 9347405f74bSDan Williams tx->callback = NULL; 9357405f74bSDan Williams cookie = tx->tx_submit(tx); 9367405f74bSDan Williams 937e7dcaa47SChristoph Lameter preempt_disable(); 938e7dcaa47SChristoph Lameter __this_cpu_add(chan->local->bytes_transferred, len); 939e7dcaa47SChristoph Lameter __this_cpu_inc(chan->local->memcpy_count); 940e7dcaa47SChristoph Lameter preempt_enable(); 9417405f74bSDan Williams 9427405f74bSDan Williams return cookie; 9437405f74bSDan Williams } 9447405f74bSDan Williams EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); 9457405f74bSDan Williams 9467405f74bSDan Williams /** 9477405f74bSDan Williams * dma_async_memcpy_buf_to_pg - offloaded copy from address to page 9487405f74bSDan Williams * @chan: DMA channel to offload copy to 9497405f74bSDan Williams * @page: destination page 9507405f74bSDan Williams * @offset: offset in page to copy to 9517405f74bSDan Williams * @kdata: source address (virtual) 9527405f74bSDan Williams * @len: length 9537405f74bSDan Williams * 9547405f74bSDan Williams * Both @page/@offset and @kdata must be mappable to a bus address according 9557405f74bSDan Williams * to the DMA mapping API rules for streaming mappings. 9567405f74bSDan Williams * Both @page/@offset and @kdata must stay memory resident (kernel memory or 9577405f74bSDan Williams * locked user space pages) 9587405f74bSDan Williams */ 9597405f74bSDan Williams dma_cookie_t 9607405f74bSDan Williams dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, 9617405f74bSDan Williams unsigned int offset, void *kdata, size_t len) 9627405f74bSDan Williams { 9637405f74bSDan Williams struct dma_device *dev = chan->device; 9647405f74bSDan Williams struct dma_async_tx_descriptor *tx; 9650036731cSDan Williams dma_addr_t dma_dest, dma_src; 9667405f74bSDan Williams dma_cookie_t cookie; 9674f005dbeSMaciej Sosnowski unsigned long flags; 9687405f74bSDan Williams 9690036731cSDan Williams dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); 9700036731cSDan Williams dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); 9714f005dbeSMaciej Sosnowski flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; 9724f005dbeSMaciej Sosnowski tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 9730036731cSDan Williams 9740036731cSDan Williams if (!tx) { 9750036731cSDan Williams dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 9760036731cSDan Williams dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 9777405f74bSDan Williams return -ENOMEM; 9780036731cSDan Williams } 9797405f74bSDan Williams 9807405f74bSDan Williams tx->callback = NULL; 9817405f74bSDan Williams cookie = tx->tx_submit(tx); 9827405f74bSDan Williams 983e7dcaa47SChristoph Lameter preempt_disable(); 984e7dcaa47SChristoph Lameter __this_cpu_add(chan->local->bytes_transferred, len); 985e7dcaa47SChristoph Lameter __this_cpu_inc(chan->local->memcpy_count); 986e7dcaa47SChristoph Lameter preempt_enable(); 9877405f74bSDan Williams 9887405f74bSDan Williams return cookie; 9897405f74bSDan Williams } 9907405f74bSDan Williams EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); 9917405f74bSDan Williams 9927405f74bSDan Williams /** 9937405f74bSDan Williams * dma_async_memcpy_pg_to_pg - offloaded copy from page to page 9947405f74bSDan Williams * @chan: DMA channel to offload copy to 9957405f74bSDan Williams * @dest_pg: destination page 9967405f74bSDan Williams * @dest_off: offset in page to copy to 9977405f74bSDan Williams * @src_pg: source page 9987405f74bSDan Williams * @src_off: offset in page to copy from 9997405f74bSDan Williams * @len: length 10007405f74bSDan Williams * 10017405f74bSDan Williams * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus 10027405f74bSDan Williams * address according to the DMA mapping API rules for streaming mappings. 10037405f74bSDan Williams * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident 10047405f74bSDan Williams * (kernel memory or locked user space pages). 10057405f74bSDan Williams */ 10067405f74bSDan Williams dma_cookie_t 10077405f74bSDan Williams dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, 10087405f74bSDan Williams unsigned int dest_off, struct page *src_pg, unsigned int src_off, 10097405f74bSDan Williams size_t len) 10107405f74bSDan Williams { 10117405f74bSDan Williams struct dma_device *dev = chan->device; 10127405f74bSDan Williams struct dma_async_tx_descriptor *tx; 10130036731cSDan Williams dma_addr_t dma_dest, dma_src; 10147405f74bSDan Williams dma_cookie_t cookie; 10154f005dbeSMaciej Sosnowski unsigned long flags; 10167405f74bSDan Williams 10170036731cSDan Williams dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 10180036731cSDan Williams dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, 10190036731cSDan Williams DMA_FROM_DEVICE); 10204f005dbeSMaciej Sosnowski flags = DMA_CTRL_ACK; 10214f005dbeSMaciej Sosnowski tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 10220036731cSDan Williams 10230036731cSDan Williams if (!tx) { 10240036731cSDan Williams dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); 10250036731cSDan Williams dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 10267405f74bSDan Williams return -ENOMEM; 10270036731cSDan Williams } 10287405f74bSDan Williams 10297405f74bSDan Williams tx->callback = NULL; 10307405f74bSDan Williams cookie = tx->tx_submit(tx); 10317405f74bSDan Williams 1032e7dcaa47SChristoph Lameter preempt_disable(); 1033e7dcaa47SChristoph Lameter __this_cpu_add(chan->local->bytes_transferred, len); 1034e7dcaa47SChristoph Lameter __this_cpu_inc(chan->local->memcpy_count); 1035e7dcaa47SChristoph Lameter preempt_enable(); 10367405f74bSDan Williams 10377405f74bSDan Williams return cookie; 10387405f74bSDan Williams } 10397405f74bSDan Williams EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); 10407405f74bSDan Williams 10417405f74bSDan Williams void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 10427405f74bSDan Williams struct dma_chan *chan) 10437405f74bSDan Williams { 10447405f74bSDan Williams tx->chan = chan; 10455fc6d897SDan Williams #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 10467405f74bSDan Williams spin_lock_init(&tx->lock); 1047caa20d97SDan Williams #endif 10487405f74bSDan Williams } 10497405f74bSDan Williams EXPORT_SYMBOL(dma_async_tx_descriptor_init); 10507405f74bSDan Williams 105107f2211eSDan Williams /* dma_wait_for_async_tx - spin wait for a transaction to complete 105207f2211eSDan Williams * @tx: in-flight transaction to wait on 105307f2211eSDan Williams */ 105407f2211eSDan Williams enum dma_status 105507f2211eSDan Williams dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 105607f2211eSDan Williams { 105795475e57SDan Williams unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 105807f2211eSDan Williams 105907f2211eSDan Williams if (!tx) 106007f2211eSDan Williams return DMA_SUCCESS; 106107f2211eSDan Williams 106295475e57SDan Williams while (tx->cookie == -EBUSY) { 106395475e57SDan Williams if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 106495475e57SDan Williams pr_err("%s timeout waiting for descriptor submission\n", 106595475e57SDan Williams __func__); 106695475e57SDan Williams return DMA_ERROR; 106795475e57SDan Williams } 106807f2211eSDan Williams cpu_relax(); 106995475e57SDan Williams } 107095475e57SDan Williams return dma_sync_wait(tx->chan, tx->cookie); 107107f2211eSDan Williams } 107207f2211eSDan Williams EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); 107307f2211eSDan Williams 107407f2211eSDan Williams /* dma_run_dependencies - helper routine for dma drivers to process 107507f2211eSDan Williams * (start) dependent operations on their target channel 107607f2211eSDan Williams * @tx: transaction with dependencies 107707f2211eSDan Williams */ 107807f2211eSDan Williams void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 107907f2211eSDan Williams { 1080caa20d97SDan Williams struct dma_async_tx_descriptor *dep = txd_next(tx); 108107f2211eSDan Williams struct dma_async_tx_descriptor *dep_next; 108207f2211eSDan Williams struct dma_chan *chan; 108307f2211eSDan Williams 108407f2211eSDan Williams if (!dep) 108507f2211eSDan Williams return; 108607f2211eSDan Williams 1087dd59b853SYuri Tikhonov /* we'll submit tx->next now, so clear the link */ 1088caa20d97SDan Williams txd_clear_next(tx); 108907f2211eSDan Williams chan = dep->chan; 109007f2211eSDan Williams 109107f2211eSDan Williams /* keep submitting up until a channel switch is detected 109207f2211eSDan Williams * in that case we will be called again as a result of 109307f2211eSDan Williams * processing the interrupt from async_tx_channel_switch 109407f2211eSDan Williams */ 109507f2211eSDan Williams for (; dep; dep = dep_next) { 1096caa20d97SDan Williams txd_lock(dep); 1097caa20d97SDan Williams txd_clear_parent(dep); 1098caa20d97SDan Williams dep_next = txd_next(dep); 109907f2211eSDan Williams if (dep_next && dep_next->chan == chan) 1100caa20d97SDan Williams txd_clear_next(dep); /* ->next will be submitted */ 110107f2211eSDan Williams else 110207f2211eSDan Williams dep_next = NULL; /* submit current dep and terminate */ 1103caa20d97SDan Williams txd_unlock(dep); 110407f2211eSDan Williams 110507f2211eSDan Williams dep->tx_submit(dep); 110607f2211eSDan Williams } 110707f2211eSDan Williams 110807f2211eSDan Williams chan->device->device_issue_pending(chan); 110907f2211eSDan Williams } 111007f2211eSDan Williams EXPORT_SYMBOL_GPL(dma_run_dependencies); 111107f2211eSDan Williams 1112c13c8260SChris Leech static int __init dma_bus_init(void) 1113c13c8260SChris Leech { 1114c13c8260SChris Leech return class_register(&dma_devclass); 1115c13c8260SChris Leech } 1116652afc27SDan Williams arch_initcall(dma_bus_init); 1117c13c8260SChris Leech 1118bec08513SDan Williams 1119