xref: /openbmc/linux/drivers/dma/dmaengine.c (revision 7d7ae873b5e0f46d19e5dc818d1a7809e4b7cc81)
19ab65affSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2c13c8260SChris Leech /*
3c13c8260SChris Leech  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4c13c8260SChris Leech  */
5c13c8260SChris Leech 
6c13c8260SChris Leech /*
7c13c8260SChris Leech  * This code implements the DMA subsystem. It provides a HW-neutral interface
8c13c8260SChris Leech  * for other kernel code to use asynchronous memory copy capabilities,
9c13c8260SChris Leech  * if present, and allows different HW DMA drivers to register as providing
10c13c8260SChris Leech  * this capability.
11c13c8260SChris Leech  *
12c13c8260SChris Leech  * Due to the fact we are accelerating what is already a relatively fast
13c13c8260SChris Leech  * operation, the code goes to great lengths to avoid additional overhead,
14c13c8260SChris Leech  * such as locking.
15c13c8260SChris Leech  *
16c13c8260SChris Leech  * LOCKING:
17c13c8260SChris Leech  *
18aa1e6f1aSDan Williams  * The subsystem keeps a global list of dma_device structs it is protected by a
19aa1e6f1aSDan Williams  * mutex, dma_list_mutex.
20c13c8260SChris Leech  *
21f27c580cSDan Williams  * A subsystem can get access to a channel by calling dmaengine_get() followed
22f27c580cSDan Williams  * by dma_find_channel(), or if it has need for an exclusive channel it can call
23f27c580cSDan Williams  * dma_request_channel().  Once a channel is allocated a reference is taken
24f27c580cSDan Williams  * against its corresponding driver to disable removal.
25f27c580cSDan Williams  *
26c13c8260SChris Leech  * Each device has a channels list, which runs unlocked but is never modified
27c13c8260SChris Leech  * once the device is registered, it's just setup by the driver.
28c13c8260SChris Leech  *
2944348e8aSMauro Carvalho Chehab  * See Documentation/driver-api/dmaengine for more details
30c13c8260SChris Leech  */
31c13c8260SChris Leech 
3263433250SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3363433250SJoe Perches 
34a8135d0dSPeter Ujfalusi #include <linux/platform_device.h>
35b7f080cfSAlexey Dobriyan #include <linux/dma-mapping.h>
36c13c8260SChris Leech #include <linux/init.h>
37c13c8260SChris Leech #include <linux/module.h>
387405f74bSDan Williams #include <linux/mm.h>
39c13c8260SChris Leech #include <linux/device.h>
40c13c8260SChris Leech #include <linux/dmaengine.h>
41c13c8260SChris Leech #include <linux/hardirq.h>
42c13c8260SChris Leech #include <linux/spinlock.h>
43c13c8260SChris Leech #include <linux/percpu.h>
44c13c8260SChris Leech #include <linux/rcupdate.h>
45c13c8260SChris Leech #include <linux/mutex.h>
467405f74bSDan Williams #include <linux/jiffies.h>
472ba05622SDan Williams #include <linux/rculist.h>
48864498aaSDan Williams #include <linux/idr.h>
495a0e3ad6STejun Heo #include <linux/slab.h>
504e82f5ddSAndy Shevchenko #include <linux/acpi.h>
514e82f5ddSAndy Shevchenko #include <linux/acpi_dma.h>
529a6cecc8SJon Hunter #include <linux/of_dma.h>
5345c463aeSDan Williams #include <linux/mempool.h>
5498fa15f3SAnshuman Khandual #include <linux/numa.h>
55c13c8260SChris Leech 
56833d88f3SAndy Shevchenko #include "dmaengine.h"
57833d88f3SAndy Shevchenko 
58c13c8260SChris Leech static DEFINE_MUTEX(dma_list_mutex);
59adc064cdSMatthew Wilcox static DEFINE_IDA(dma_ida);
60c13c8260SChris Leech static LIST_HEAD(dma_device_list);
616f49a57aSDan Williams static long dmaengine_ref_count;
62c13c8260SChris Leech 
63e937cc1dSPeter Ujfalusi /* --- debugfs implementation --- */
64e937cc1dSPeter Ujfalusi #ifdef CONFIG_DEBUG_FS
65e937cc1dSPeter Ujfalusi #include <linux/debugfs.h>
66e937cc1dSPeter Ujfalusi 
6726cf132dSPeter Ujfalusi static struct dentry *rootdir;
6826cf132dSPeter Ujfalusi 
dmaengine_debug_register(struct dma_device * dma_dev)6926cf132dSPeter Ujfalusi static void dmaengine_debug_register(struct dma_device *dma_dev)
7026cf132dSPeter Ujfalusi {
7126cf132dSPeter Ujfalusi 	dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
7226cf132dSPeter Ujfalusi 						   rootdir);
7326cf132dSPeter Ujfalusi 	if (IS_ERR(dma_dev->dbg_dev_root))
7426cf132dSPeter Ujfalusi 		dma_dev->dbg_dev_root = NULL;
7526cf132dSPeter Ujfalusi }
7626cf132dSPeter Ujfalusi 
dmaengine_debug_unregister(struct dma_device * dma_dev)7726cf132dSPeter Ujfalusi static void dmaengine_debug_unregister(struct dma_device *dma_dev)
7826cf132dSPeter Ujfalusi {
7926cf132dSPeter Ujfalusi 	debugfs_remove_recursive(dma_dev->dbg_dev_root);
8026cf132dSPeter Ujfalusi 	dma_dev->dbg_dev_root = NULL;
8126cf132dSPeter Ujfalusi }
8226cf132dSPeter Ujfalusi 
dmaengine_dbg_summary_show(struct seq_file * s,struct dma_device * dma_dev)83e937cc1dSPeter Ujfalusi static void dmaengine_dbg_summary_show(struct seq_file *s,
84e937cc1dSPeter Ujfalusi 				       struct dma_device *dma_dev)
85e937cc1dSPeter Ujfalusi {
86e937cc1dSPeter Ujfalusi 	struct dma_chan *chan;
87e937cc1dSPeter Ujfalusi 
88e937cc1dSPeter Ujfalusi 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
89e937cc1dSPeter Ujfalusi 		if (chan->client_count) {
90e937cc1dSPeter Ujfalusi 			seq_printf(s, " %-13s| %s", dma_chan_name(chan),
91e937cc1dSPeter Ujfalusi 				   chan->dbg_client_name ?: "in-use");
92e937cc1dSPeter Ujfalusi 
93e937cc1dSPeter Ujfalusi 			if (chan->router)
94e937cc1dSPeter Ujfalusi 				seq_printf(s, " (via router: %s)\n",
95e937cc1dSPeter Ujfalusi 					dev_name(chan->router->dev));
96e937cc1dSPeter Ujfalusi 			else
97e937cc1dSPeter Ujfalusi 				seq_puts(s, "\n");
98e937cc1dSPeter Ujfalusi 		}
99e937cc1dSPeter Ujfalusi 	}
100e937cc1dSPeter Ujfalusi }
101e937cc1dSPeter Ujfalusi 
dmaengine_summary_show(struct seq_file * s,void * data)102e937cc1dSPeter Ujfalusi static int dmaengine_summary_show(struct seq_file *s, void *data)
103e937cc1dSPeter Ujfalusi {
104e937cc1dSPeter Ujfalusi 	struct dma_device *dma_dev = NULL;
105e937cc1dSPeter Ujfalusi 
106e937cc1dSPeter Ujfalusi 	mutex_lock(&dma_list_mutex);
107e937cc1dSPeter Ujfalusi 	list_for_each_entry(dma_dev, &dma_device_list, global_node) {
108e937cc1dSPeter Ujfalusi 		seq_printf(s, "dma%d (%s): number of channels: %u\n",
109e937cc1dSPeter Ujfalusi 			   dma_dev->dev_id, dev_name(dma_dev->dev),
110e937cc1dSPeter Ujfalusi 			   dma_dev->chancnt);
111e937cc1dSPeter Ujfalusi 
112e937cc1dSPeter Ujfalusi 		if (dma_dev->dbg_summary_show)
113e937cc1dSPeter Ujfalusi 			dma_dev->dbg_summary_show(s, dma_dev);
114e937cc1dSPeter Ujfalusi 		else
115e937cc1dSPeter Ujfalusi 			dmaengine_dbg_summary_show(s, dma_dev);
116e937cc1dSPeter Ujfalusi 
117e937cc1dSPeter Ujfalusi 		if (!list_is_last(&dma_dev->global_node, &dma_device_list))
118e937cc1dSPeter Ujfalusi 			seq_puts(s, "\n");
119e937cc1dSPeter Ujfalusi 	}
120e937cc1dSPeter Ujfalusi 	mutex_unlock(&dma_list_mutex);
121e937cc1dSPeter Ujfalusi 
122e937cc1dSPeter Ujfalusi 	return 0;
123e937cc1dSPeter Ujfalusi }
124e937cc1dSPeter Ujfalusi DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
125e937cc1dSPeter Ujfalusi 
dmaengine_debugfs_init(void)126e937cc1dSPeter Ujfalusi static void __init dmaengine_debugfs_init(void)
127e937cc1dSPeter Ujfalusi {
12826cf132dSPeter Ujfalusi 	rootdir = debugfs_create_dir("dmaengine", NULL);
129e937cc1dSPeter Ujfalusi 
130e937cc1dSPeter Ujfalusi 	/* /sys/kernel/debug/dmaengine/summary */
131e937cc1dSPeter Ujfalusi 	debugfs_create_file("summary", 0444, rootdir, NULL,
132e937cc1dSPeter Ujfalusi 			    &dmaengine_summary_fops);
133e937cc1dSPeter Ujfalusi }
134e937cc1dSPeter Ujfalusi #else
dmaengine_debugfs_init(void)135e937cc1dSPeter Ujfalusi static inline void dmaengine_debugfs_init(void) { }
dmaengine_debug_register(struct dma_device * dma_dev)13626cf132dSPeter Ujfalusi static inline int dmaengine_debug_register(struct dma_device *dma_dev)
13726cf132dSPeter Ujfalusi {
13826cf132dSPeter Ujfalusi 	return 0;
13926cf132dSPeter Ujfalusi }
14026cf132dSPeter Ujfalusi 
dmaengine_debug_unregister(struct dma_device * dma_dev)14126cf132dSPeter Ujfalusi static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
142e937cc1dSPeter Ujfalusi #endif	/* DEBUG_FS */
143e937cc1dSPeter Ujfalusi 
144c13c8260SChris Leech /* --- sysfs implementation --- */
145c13c8260SChris Leech 
14671723a96SGeert Uytterhoeven #define DMA_SLAVE_NAME	"slave"
14771723a96SGeert Uytterhoeven 
14841d5e59cSDan Williams /**
149fe333389SGeert Uytterhoeven  * dev_to_dma_chan - convert a device pointer to its sysfs container object
1509872e23dSAndy Shevchenko  * @dev:	device node
15141d5e59cSDan Williams  *
1529872e23dSAndy Shevchenko  * Must be called under dma_list_mutex.
15341d5e59cSDan Williams  */
dev_to_dma_chan(struct device * dev)15441d5e59cSDan Williams static struct dma_chan *dev_to_dma_chan(struct device *dev)
15541d5e59cSDan Williams {
15641d5e59cSDan Williams 	struct dma_chan_dev *chan_dev;
15741d5e59cSDan Williams 
15841d5e59cSDan Williams 	chan_dev = container_of(dev, typeof(*chan_dev), device);
15941d5e59cSDan Williams 	return chan_dev->chan;
16041d5e59cSDan Williams }
16141d5e59cSDan Williams 
memcpy_count_show(struct device * dev,struct device_attribute * attr,char * buf)16258b267d3SGreg Kroah-Hartman static ssize_t memcpy_count_show(struct device *dev,
16358b267d3SGreg Kroah-Hartman 				 struct device_attribute *attr, char *buf)
164c13c8260SChris Leech {
16541d5e59cSDan Williams 	struct dma_chan *chan;
166c13c8260SChris Leech 	unsigned long count = 0;
167c13c8260SChris Leech 	int i;
16841d5e59cSDan Williams 	int err;
169c13c8260SChris Leech 
17041d5e59cSDan Williams 	mutex_lock(&dma_list_mutex);
17141d5e59cSDan Williams 	chan = dev_to_dma_chan(dev);
17241d5e59cSDan Williams 	if (chan) {
17317f3ae08SAndrew Morton 		for_each_possible_cpu(i)
174c13c8260SChris Leech 			count += per_cpu_ptr(chan->local, i)->memcpy_count;
17540e171c2SAndy Shevchenko 		err = sysfs_emit(buf, "%lu\n", count);
17641d5e59cSDan Williams 	} else
17741d5e59cSDan Williams 		err = -ENODEV;
17841d5e59cSDan Williams 	mutex_unlock(&dma_list_mutex);
179c13c8260SChris Leech 
18041d5e59cSDan Williams 	return err;
181c13c8260SChris Leech }
18258b267d3SGreg Kroah-Hartman static DEVICE_ATTR_RO(memcpy_count);
183c13c8260SChris Leech 
bytes_transferred_show(struct device * dev,struct device_attribute * attr,char * buf)18458b267d3SGreg Kroah-Hartman static ssize_t bytes_transferred_show(struct device *dev,
18558b267d3SGreg Kroah-Hartman 				      struct device_attribute *attr, char *buf)
186c13c8260SChris Leech {
18741d5e59cSDan Williams 	struct dma_chan *chan;
188c13c8260SChris Leech 	unsigned long count = 0;
189c13c8260SChris Leech 	int i;
19041d5e59cSDan Williams 	int err;
191c13c8260SChris Leech 
19241d5e59cSDan Williams 	mutex_lock(&dma_list_mutex);
19341d5e59cSDan Williams 	chan = dev_to_dma_chan(dev);
19441d5e59cSDan Williams 	if (chan) {
19517f3ae08SAndrew Morton 		for_each_possible_cpu(i)
196c13c8260SChris Leech 			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
19740e171c2SAndy Shevchenko 		err = sysfs_emit(buf, "%lu\n", count);
19841d5e59cSDan Williams 	} else
19941d5e59cSDan Williams 		err = -ENODEV;
20041d5e59cSDan Williams 	mutex_unlock(&dma_list_mutex);
201c13c8260SChris Leech 
20241d5e59cSDan Williams 	return err;
203c13c8260SChris Leech }
20458b267d3SGreg Kroah-Hartman static DEVICE_ATTR_RO(bytes_transferred);
205c13c8260SChris Leech 
in_use_show(struct device * dev,struct device_attribute * attr,char * buf)20658b267d3SGreg Kroah-Hartman static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
20758b267d3SGreg Kroah-Hartman 			   char *buf)
208c13c8260SChris Leech {
20941d5e59cSDan Williams 	struct dma_chan *chan;
21041d5e59cSDan Williams 	int err;
211c13c8260SChris Leech 
21241d5e59cSDan Williams 	mutex_lock(&dma_list_mutex);
21341d5e59cSDan Williams 	chan = dev_to_dma_chan(dev);
21441d5e59cSDan Williams 	if (chan)
21540e171c2SAndy Shevchenko 		err = sysfs_emit(buf, "%d\n", chan->client_count);
21641d5e59cSDan Williams 	else
21741d5e59cSDan Williams 		err = -ENODEV;
21841d5e59cSDan Williams 	mutex_unlock(&dma_list_mutex);
21941d5e59cSDan Williams 
22041d5e59cSDan Williams 	return err;
221c13c8260SChris Leech }
22258b267d3SGreg Kroah-Hartman static DEVICE_ATTR_RO(in_use);
223c13c8260SChris Leech 
22458b267d3SGreg Kroah-Hartman static struct attribute *dma_dev_attrs[] = {
22558b267d3SGreg Kroah-Hartman 	&dev_attr_memcpy_count.attr,
22658b267d3SGreg Kroah-Hartman 	&dev_attr_bytes_transferred.attr,
22758b267d3SGreg Kroah-Hartman 	&dev_attr_in_use.attr,
22858b267d3SGreg Kroah-Hartman 	NULL,
229c13c8260SChris Leech };
23058b267d3SGreg Kroah-Hartman ATTRIBUTE_GROUPS(dma_dev);
231c13c8260SChris Leech 
chan_dev_release(struct device * dev)23241d5e59cSDan Williams static void chan_dev_release(struct device *dev)
23341d5e59cSDan Williams {
23441d5e59cSDan Williams 	struct dma_chan_dev *chan_dev;
23541d5e59cSDan Williams 
23641d5e59cSDan Williams 	chan_dev = container_of(dev, typeof(*chan_dev), device);
23741d5e59cSDan Williams 	kfree(chan_dev);
23841d5e59cSDan Williams }
23941d5e59cSDan Williams 
240c13c8260SChris Leech static struct class dma_devclass = {
241c13c8260SChris Leech 	.name		= "dma",
24258b267d3SGreg Kroah-Hartman 	.dev_groups	= dma_dev_groups,
24341d5e59cSDan Williams 	.dev_release	= chan_dev_release,
244c13c8260SChris Leech };
245c13c8260SChris Leech 
246c13c8260SChris Leech /* --- client and device registration --- */
247c13c8260SChris Leech 
2489872e23dSAndy Shevchenko /* enable iteration over all operation types */
24911a0fd2bSLogan Gunthorpe static dma_cap_mask_t dma_cap_mask_all;
25011a0fd2bSLogan Gunthorpe 
25111a0fd2bSLogan Gunthorpe /**
2529872e23dSAndy Shevchenko  * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
2539872e23dSAndy Shevchenko  * @chan:	associated channel for this entry
25411a0fd2bSLogan Gunthorpe  */
25511a0fd2bSLogan Gunthorpe struct dma_chan_tbl_ent {
25611a0fd2bSLogan Gunthorpe 	struct dma_chan *chan;
25711a0fd2bSLogan Gunthorpe };
25811a0fd2bSLogan Gunthorpe 
2599872e23dSAndy Shevchenko /* percpu lookup table for memory-to-memory offload providers */
26011a0fd2bSLogan Gunthorpe static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
26111a0fd2bSLogan Gunthorpe 
dma_channel_table_init(void)26211a0fd2bSLogan Gunthorpe static int __init dma_channel_table_init(void)
26311a0fd2bSLogan Gunthorpe {
26411a0fd2bSLogan Gunthorpe 	enum dma_transaction_type cap;
26511a0fd2bSLogan Gunthorpe 	int err = 0;
26611a0fd2bSLogan Gunthorpe 
26711a0fd2bSLogan Gunthorpe 	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
26811a0fd2bSLogan Gunthorpe 
26911a0fd2bSLogan Gunthorpe 	/* 'interrupt', 'private', and 'slave' are channel capabilities,
27011a0fd2bSLogan Gunthorpe 	 * but are not associated with an operation so they do not need
27111a0fd2bSLogan Gunthorpe 	 * an entry in the channel_table
27211a0fd2bSLogan Gunthorpe 	 */
27311a0fd2bSLogan Gunthorpe 	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
27411a0fd2bSLogan Gunthorpe 	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
27511a0fd2bSLogan Gunthorpe 	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
27611a0fd2bSLogan Gunthorpe 
27711a0fd2bSLogan Gunthorpe 	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
27811a0fd2bSLogan Gunthorpe 		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
27911a0fd2bSLogan Gunthorpe 		if (!channel_table[cap]) {
28011a0fd2bSLogan Gunthorpe 			err = -ENOMEM;
28111a0fd2bSLogan Gunthorpe 			break;
28211a0fd2bSLogan Gunthorpe 		}
28311a0fd2bSLogan Gunthorpe 	}
28411a0fd2bSLogan Gunthorpe 
28511a0fd2bSLogan Gunthorpe 	if (err) {
28608baca42SVinod Koul 		pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
28711a0fd2bSLogan Gunthorpe 		for_each_dma_cap_mask(cap, dma_cap_mask_all)
28811a0fd2bSLogan Gunthorpe 			free_percpu(channel_table[cap]);
28911a0fd2bSLogan Gunthorpe 	}
29011a0fd2bSLogan Gunthorpe 
29111a0fd2bSLogan Gunthorpe 	return err;
29211a0fd2bSLogan Gunthorpe }
29311a0fd2bSLogan Gunthorpe arch_initcall(dma_channel_table_init);
29411a0fd2bSLogan Gunthorpe 
29511a0fd2bSLogan Gunthorpe /**
2969872e23dSAndy Shevchenko  * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
2979872e23dSAndy Shevchenko  * @chan:	DMA channel to test
2989872e23dSAndy Shevchenko  * @cpu:	CPU index which the channel should be close to
2999872e23dSAndy Shevchenko  *
3009872e23dSAndy Shevchenko  * Returns true if the channel is in the same NUMA-node as the CPU.
30111a0fd2bSLogan Gunthorpe  */
dma_chan_is_local(struct dma_chan * chan,int cpu)30211a0fd2bSLogan Gunthorpe static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
30311a0fd2bSLogan Gunthorpe {
30411a0fd2bSLogan Gunthorpe 	int node = dev_to_node(chan->device->dev);
30511a0fd2bSLogan Gunthorpe 	return node == NUMA_NO_NODE ||
30611a0fd2bSLogan Gunthorpe 		cpumask_test_cpu(cpu, cpumask_of_node(node));
30711a0fd2bSLogan Gunthorpe }
30811a0fd2bSLogan Gunthorpe 
30911a0fd2bSLogan Gunthorpe /**
3109872e23dSAndy Shevchenko  * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
31111a0fd2bSLogan Gunthorpe  * @cap:	capability to match
3129872e23dSAndy Shevchenko  * @cpu:	CPU index which the channel should be close to
31311a0fd2bSLogan Gunthorpe  *
3149872e23dSAndy Shevchenko  * If some channels are close to the given CPU, the one with the lowest
3159872e23dSAndy Shevchenko  * reference count is returned. Otherwise, CPU is ignored and only the
31611a0fd2bSLogan Gunthorpe  * reference count is taken into account.
3179872e23dSAndy Shevchenko  *
31811a0fd2bSLogan Gunthorpe  * Must be called under dma_list_mutex.
31911a0fd2bSLogan Gunthorpe  */
min_chan(enum dma_transaction_type cap,int cpu)32011a0fd2bSLogan Gunthorpe static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
32111a0fd2bSLogan Gunthorpe {
32211a0fd2bSLogan Gunthorpe 	struct dma_device *device;
32311a0fd2bSLogan Gunthorpe 	struct dma_chan *chan;
32411a0fd2bSLogan Gunthorpe 	struct dma_chan *min = NULL;
32511a0fd2bSLogan Gunthorpe 	struct dma_chan *localmin = NULL;
32611a0fd2bSLogan Gunthorpe 
32711a0fd2bSLogan Gunthorpe 	list_for_each_entry(device, &dma_device_list, global_node) {
32811a0fd2bSLogan Gunthorpe 		if (!dma_has_cap(cap, device->cap_mask) ||
32911a0fd2bSLogan Gunthorpe 		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
33011a0fd2bSLogan Gunthorpe 			continue;
33111a0fd2bSLogan Gunthorpe 		list_for_each_entry(chan, &device->channels, device_node) {
33211a0fd2bSLogan Gunthorpe 			if (!chan->client_count)
33311a0fd2bSLogan Gunthorpe 				continue;
33411a0fd2bSLogan Gunthorpe 			if (!min || chan->table_count < min->table_count)
33511a0fd2bSLogan Gunthorpe 				min = chan;
33611a0fd2bSLogan Gunthorpe 
33711a0fd2bSLogan Gunthorpe 			if (dma_chan_is_local(chan, cpu))
33811a0fd2bSLogan Gunthorpe 				if (!localmin ||
33911a0fd2bSLogan Gunthorpe 				    chan->table_count < localmin->table_count)
34011a0fd2bSLogan Gunthorpe 					localmin = chan;
34111a0fd2bSLogan Gunthorpe 		}
34211a0fd2bSLogan Gunthorpe 	}
34311a0fd2bSLogan Gunthorpe 
34411a0fd2bSLogan Gunthorpe 	chan = localmin ? localmin : min;
34511a0fd2bSLogan Gunthorpe 
34611a0fd2bSLogan Gunthorpe 	if (chan)
34711a0fd2bSLogan Gunthorpe 		chan->table_count++;
34811a0fd2bSLogan Gunthorpe 
34911a0fd2bSLogan Gunthorpe 	return chan;
35011a0fd2bSLogan Gunthorpe }
35111a0fd2bSLogan Gunthorpe 
35211a0fd2bSLogan Gunthorpe /**
35311a0fd2bSLogan Gunthorpe  * dma_channel_rebalance - redistribute the available channels
35411a0fd2bSLogan Gunthorpe  *
3559872e23dSAndy Shevchenko  * Optimize for CPU isolation (each CPU gets a dedicated channel for an
35611a0fd2bSLogan Gunthorpe  * operation type) in the SMP case, and operation isolation (avoid
3579872e23dSAndy Shevchenko  * multi-tasking channels) in the non-SMP case.
3589872e23dSAndy Shevchenko  *
3599872e23dSAndy Shevchenko  * Must be called under dma_list_mutex.
36011a0fd2bSLogan Gunthorpe  */
dma_channel_rebalance(void)36111a0fd2bSLogan Gunthorpe static void dma_channel_rebalance(void)
36211a0fd2bSLogan Gunthorpe {
36311a0fd2bSLogan Gunthorpe 	struct dma_chan *chan;
36411a0fd2bSLogan Gunthorpe 	struct dma_device *device;
36511a0fd2bSLogan Gunthorpe 	int cpu;
36611a0fd2bSLogan Gunthorpe 	int cap;
36711a0fd2bSLogan Gunthorpe 
36811a0fd2bSLogan Gunthorpe 	/* undo the last distribution */
36911a0fd2bSLogan Gunthorpe 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
37011a0fd2bSLogan Gunthorpe 		for_each_possible_cpu(cpu)
37111a0fd2bSLogan Gunthorpe 			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
37211a0fd2bSLogan Gunthorpe 
37311a0fd2bSLogan Gunthorpe 	list_for_each_entry(device, &dma_device_list, global_node) {
37411a0fd2bSLogan Gunthorpe 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
37511a0fd2bSLogan Gunthorpe 			continue;
37611a0fd2bSLogan Gunthorpe 		list_for_each_entry(chan, &device->channels, device_node)
37711a0fd2bSLogan Gunthorpe 			chan->table_count = 0;
37811a0fd2bSLogan Gunthorpe 	}
37911a0fd2bSLogan Gunthorpe 
38011a0fd2bSLogan Gunthorpe 	/* don't populate the channel_table if no clients are available */
38111a0fd2bSLogan Gunthorpe 	if (!dmaengine_ref_count)
38211a0fd2bSLogan Gunthorpe 		return;
38311a0fd2bSLogan Gunthorpe 
38411a0fd2bSLogan Gunthorpe 	/* redistribute available channels */
38511a0fd2bSLogan Gunthorpe 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
38611a0fd2bSLogan Gunthorpe 		for_each_online_cpu(cpu) {
38711a0fd2bSLogan Gunthorpe 			chan = min_chan(cap, cpu);
38811a0fd2bSLogan Gunthorpe 			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
38911a0fd2bSLogan Gunthorpe 		}
39011a0fd2bSLogan Gunthorpe }
39111a0fd2bSLogan Gunthorpe 
dma_device_satisfies_mask(struct dma_device * device,const dma_cap_mask_t * want)39269b1189bSGeert Uytterhoeven static int dma_device_satisfies_mask(struct dma_device *device,
393a53e28daSLars-Peter Clausen 				     const dma_cap_mask_t *want)
394d379b01eSDan Williams {
395d379b01eSDan Williams 	dma_cap_mask_t has;
396d379b01eSDan Williams 
39759b5ec21SDan Williams 	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
398d379b01eSDan Williams 		DMA_TX_TYPE_END);
399d379b01eSDan Williams 	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
400d379b01eSDan Williams }
401d379b01eSDan Williams 
dma_chan_to_owner(struct dma_chan * chan)4026f49a57aSDan Williams static struct module *dma_chan_to_owner(struct dma_chan *chan)
4036f49a57aSDan Williams {
404dae7a589SLogan Gunthorpe 	return chan->device->owner;
4056f49a57aSDan Williams }
4066f49a57aSDan Williams 
4076f49a57aSDan Williams /**
4086f49a57aSDan Williams  * balance_ref_count - catch up the channel reference count
4099872e23dSAndy Shevchenko  * @chan:	channel to balance ->client_count versus dmaengine_ref_count
4106f49a57aSDan Williams  *
4119872e23dSAndy Shevchenko  * Must be called under dma_list_mutex.
4126f49a57aSDan Williams  */
balance_ref_count(struct dma_chan * chan)4136f49a57aSDan Williams static void balance_ref_count(struct dma_chan *chan)
4146f49a57aSDan Williams {
4156f49a57aSDan Williams 	struct module *owner = dma_chan_to_owner(chan);
4166f49a57aSDan Williams 
4176f49a57aSDan Williams 	while (chan->client_count < dmaengine_ref_count) {
4186f49a57aSDan Williams 		__module_get(owner);
4196f49a57aSDan Williams 		chan->client_count++;
4206f49a57aSDan Williams 	}
4216f49a57aSDan Williams }
4226f49a57aSDan Williams 
dma_device_release(struct kref * ref)4238ad342a8SLogan Gunthorpe static void dma_device_release(struct kref *ref)
4248ad342a8SLogan Gunthorpe {
4258ad342a8SLogan Gunthorpe 	struct dma_device *device = container_of(ref, struct dma_device, ref);
4268ad342a8SLogan Gunthorpe 
4278ad342a8SLogan Gunthorpe 	list_del_rcu(&device->global_node);
4288ad342a8SLogan Gunthorpe 	dma_channel_rebalance();
4298ad342a8SLogan Gunthorpe 
4308ad342a8SLogan Gunthorpe 	if (device->device_release)
4318ad342a8SLogan Gunthorpe 		device->device_release(device);
4328ad342a8SLogan Gunthorpe }
4338ad342a8SLogan Gunthorpe 
dma_device_put(struct dma_device * device)4348ad342a8SLogan Gunthorpe static void dma_device_put(struct dma_device *device)
4358ad342a8SLogan Gunthorpe {
4368ad342a8SLogan Gunthorpe 	lockdep_assert_held(&dma_list_mutex);
4378ad342a8SLogan Gunthorpe 	kref_put(&device->ref, dma_device_release);
4388ad342a8SLogan Gunthorpe }
4398ad342a8SLogan Gunthorpe 
4406f49a57aSDan Williams /**
4419872e23dSAndy Shevchenko  * dma_chan_get - try to grab a DMA channel's parent driver module
4429872e23dSAndy Shevchenko  * @chan:	channel to grab
4436f49a57aSDan Williams  *
4449872e23dSAndy Shevchenko  * Must be called under dma_list_mutex.
4456f49a57aSDan Williams  */
dma_chan_get(struct dma_chan * chan)4466f49a57aSDan Williams static int dma_chan_get(struct dma_chan *chan)
4476f49a57aSDan Williams {
4486f49a57aSDan Williams 	struct module *owner = dma_chan_to_owner(chan);
449d2f4f99dSMaxime Ripard 	int ret;
4506f49a57aSDan Williams 
451d2f4f99dSMaxime Ripard 	/* The channel is already in use, update client count */
4526f49a57aSDan Williams 	if (chan->client_count) {
4536f49a57aSDan Williams 		__module_get(owner);
454f3dc1b3bSKoba Ko 		chan->client_count++;
455f3dc1b3bSKoba Ko 		return 0;
4566f49a57aSDan Williams 	}
4576f49a57aSDan Williams 
458d2f4f99dSMaxime Ripard 	if (!try_module_get(owner))
459d2f4f99dSMaxime Ripard 		return -ENODEV;
460d2f4f99dSMaxime Ripard 
4618ad342a8SLogan Gunthorpe 	ret = kref_get_unless_zero(&chan->device->ref);
4628ad342a8SLogan Gunthorpe 	if (!ret) {
4638ad342a8SLogan Gunthorpe 		ret = -ENODEV;
4648ad342a8SLogan Gunthorpe 		goto module_put_out;
4658ad342a8SLogan Gunthorpe 	}
4668ad342a8SLogan Gunthorpe 
467d2f4f99dSMaxime Ripard 	/* allocate upon first client reference */
468c4b54a64SMaxime Ripard 	if (chan->device->device_alloc_chan_resources) {
469d2f4f99dSMaxime Ripard 		ret = chan->device->device_alloc_chan_resources(chan);
470d2f4f99dSMaxime Ripard 		if (ret < 0)
471d2f4f99dSMaxime Ripard 			goto err_out;
472c4b54a64SMaxime Ripard 	}
473d2f4f99dSMaxime Ripard 
474f3dc1b3bSKoba Ko 	chan->client_count++;
475f3dc1b3bSKoba Ko 
476d2f4f99dSMaxime Ripard 	if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
477d2f4f99dSMaxime Ripard 		balance_ref_count(chan);
478d2f4f99dSMaxime Ripard 
479d2f4f99dSMaxime Ripard 	return 0;
480d2f4f99dSMaxime Ripard 
481d2f4f99dSMaxime Ripard err_out:
4828ad342a8SLogan Gunthorpe 	dma_device_put(chan->device);
4838ad342a8SLogan Gunthorpe module_put_out:
484d2f4f99dSMaxime Ripard 	module_put(owner);
485d2f4f99dSMaxime Ripard 	return ret;
4866f49a57aSDan Williams }
4876f49a57aSDan Williams 
4886f49a57aSDan Williams /**
4899872e23dSAndy Shevchenko  * dma_chan_put - drop a reference to a DMA channel's parent driver module
4909872e23dSAndy Shevchenko  * @chan:	channel to release
4916f49a57aSDan Williams  *
4929872e23dSAndy Shevchenko  * Must be called under dma_list_mutex.
4936f49a57aSDan Williams  */
dma_chan_put(struct dma_chan * chan)4946f49a57aSDan Williams static void dma_chan_put(struct dma_chan *chan)
4956f49a57aSDan Williams {
496c4b54a64SMaxime Ripard 	/* This channel is not in use, bail out */
4976f49a57aSDan Williams 	if (!chan->client_count)
498c4b54a64SMaxime Ripard 		return;
499c4b54a64SMaxime Ripard 
5006f49a57aSDan Williams 	chan->client_count--;
501c4b54a64SMaxime Ripard 
502c4b54a64SMaxime Ripard 	/* This channel is not in use anymore, free it */
503b36f09c3SLars-Peter Clausen 	if (!chan->client_count && chan->device->device_free_chan_resources) {
504b36f09c3SLars-Peter Clausen 		/* Make sure all operations have completed */
505b36f09c3SLars-Peter Clausen 		dmaengine_synchronize(chan);
5066f49a57aSDan Williams 		chan->device->device_free_chan_resources(chan);
507b36f09c3SLars-Peter Clausen 	}
50856f13c0dSPeter Ujfalusi 
50956f13c0dSPeter Ujfalusi 	/* If the channel is used via a DMA request router, free the mapping */
51056f13c0dSPeter Ujfalusi 	if (chan->router && chan->router->route_free) {
51156f13c0dSPeter Ujfalusi 		chan->router->route_free(chan->router->dev, chan->route_data);
51256f13c0dSPeter Ujfalusi 		chan->router = NULL;
51356f13c0dSPeter Ujfalusi 		chan->route_data = NULL;
51456f13c0dSPeter Ujfalusi 	}
51583c77940SVinod Koul 
51683c77940SVinod Koul 	dma_device_put(chan->device);
51783c77940SVinod Koul 	module_put(dma_chan_to_owner(chan));
5186f49a57aSDan Williams }
5196f49a57aSDan Williams 
dma_sync_wait(struct dma_chan * chan,dma_cookie_t cookie)5207405f74bSDan Williams enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
5217405f74bSDan Williams {
5227405f74bSDan Williams 	enum dma_status status;
5237405f74bSDan Williams 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
5247405f74bSDan Williams 
5257405f74bSDan Williams 	dma_async_issue_pending(chan);
5267405f74bSDan Williams 	do {
5277405f74bSDan Williams 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
5287405f74bSDan Williams 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
529ef859312SJarkko Nikula 			dev_err(chan->device->dev, "%s: timeout!\n", __func__);
5307405f74bSDan Williams 			return DMA_ERROR;
5317405f74bSDan Williams 		}
5322cbe7febSBartlomiej Zolnierkiewicz 		if (status != DMA_IN_PROGRESS)
5332cbe7febSBartlomiej Zolnierkiewicz 			break;
5342cbe7febSBartlomiej Zolnierkiewicz 		cpu_relax();
5352cbe7febSBartlomiej Zolnierkiewicz 	} while (1);
5367405f74bSDan Williams 
5377405f74bSDan Williams 	return status;
5387405f74bSDan Williams }
5397405f74bSDan Williams EXPORT_SYMBOL(dma_sync_wait);
5407405f74bSDan Williams 
541c13c8260SChris Leech /**
542bec08513SDan Williams  * dma_find_channel - find a channel to carry out the operation
543bec08513SDan Williams  * @tx_type:	transaction type
544bec08513SDan Williams  */
dma_find_channel(enum dma_transaction_type tx_type)545bec08513SDan Williams struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
546bec08513SDan Williams {
547e7dcaa47SChristoph Lameter 	return this_cpu_read(channel_table[tx_type]->chan);
548bec08513SDan Williams }
549bec08513SDan Williams EXPORT_SYMBOL(dma_find_channel);
550bec08513SDan Williams 
551bec08513SDan Williams /**
5522ba05622SDan Williams  * dma_issue_pending_all - flush all pending operations across all channels
5532ba05622SDan Williams  */
dma_issue_pending_all(void)5542ba05622SDan Williams void dma_issue_pending_all(void)
5552ba05622SDan Williams {
5562ba05622SDan Williams 	struct dma_device *device;
5572ba05622SDan Williams 	struct dma_chan *chan;
5582ba05622SDan Williams 
5592ba05622SDan Williams 	rcu_read_lock();
56059b5ec21SDan Williams 	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
56159b5ec21SDan Williams 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
56259b5ec21SDan Williams 			continue;
5632ba05622SDan Williams 		list_for_each_entry(chan, &device->channels, device_node)
5642ba05622SDan Williams 			if (chan->client_count)
5652ba05622SDan Williams 				device->device_issue_pending(chan);
56659b5ec21SDan Williams 	}
5672ba05622SDan Williams 	rcu_read_unlock();
5682ba05622SDan Williams }
5692ba05622SDan Williams EXPORT_SYMBOL(dma_issue_pending_all);
5702ba05622SDan Williams 
dma_get_slave_caps(struct dma_chan * chan,struct dma_slave_caps * caps)5710d5484b1SLaurent Pinchart int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
5720d5484b1SLaurent Pinchart {
5730d5484b1SLaurent Pinchart 	struct dma_device *device;
5740d5484b1SLaurent Pinchart 
5750d5484b1SLaurent Pinchart 	if (!chan || !caps)
5760d5484b1SLaurent Pinchart 		return -EINVAL;
5770d5484b1SLaurent Pinchart 
5780d5484b1SLaurent Pinchart 	device = chan->device;
5790d5484b1SLaurent Pinchart 
5800d5484b1SLaurent Pinchart 	/* check if the channel supports slave transactions */
581dd4e91d5SAndy Shevchenko 	if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
582dd4e91d5SAndy Shevchenko 	      test_bit(DMA_CYCLIC, device->cap_mask.bits)))
5830d5484b1SLaurent Pinchart 		return -ENXIO;
5840d5484b1SLaurent Pinchart 
5850d5484b1SLaurent Pinchart 	/*
5860d5484b1SLaurent Pinchart 	 * Check whether it reports it uses the generic slave
5870d5484b1SLaurent Pinchart 	 * capabilities, if not, that means it doesn't support any
5880d5484b1SLaurent Pinchart 	 * kind of slave capabilities reporting.
5890d5484b1SLaurent Pinchart 	 */
5900d5484b1SLaurent Pinchart 	if (!device->directions)
5910d5484b1SLaurent Pinchart 		return -ENXIO;
5920d5484b1SLaurent Pinchart 
5930d5484b1SLaurent Pinchart 	caps->src_addr_widths = device->src_addr_widths;
5940d5484b1SLaurent Pinchart 	caps->dst_addr_widths = device->dst_addr_widths;
5950d5484b1SLaurent Pinchart 	caps->directions = device->directions;
596d97758e0SSerge Semin 	caps->min_burst = device->min_burst;
5976d5bbed3SShawn Lin 	caps->max_burst = device->max_burst;
598b1b40b8fSSerge Semin 	caps->max_sg_burst = device->max_sg_burst;
5990d5484b1SLaurent Pinchart 	caps->residue_granularity = device->residue_granularity;
6009eeacd3aSRobert Jarzmik 	caps->descriptor_reuse = device->descriptor_reuse;
601d8095f94SMarek Szyprowski 	caps->cmd_pause = !!device->device_pause;
602d8095f94SMarek Szyprowski 	caps->cmd_resume = !!device->device_resume;
6030d5484b1SLaurent Pinchart 	caps->cmd_terminate = !!device->device_terminate_all;
6040d5484b1SLaurent Pinchart 
6053b6d694eSSerge Semin 	/*
6063b6d694eSSerge Semin 	 * DMA engine device might be configured with non-uniformly
6073b6d694eSSerge Semin 	 * distributed slave capabilities per device channels. In this
6083b6d694eSSerge Semin 	 * case the corresponding driver may provide the device_caps
6093b6d694eSSerge Semin 	 * callback to override the generic capabilities with
6103b6d694eSSerge Semin 	 * channel-specific ones.
6113b6d694eSSerge Semin 	 */
6123b6d694eSSerge Semin 	if (device->device_caps)
6133b6d694eSSerge Semin 		device->device_caps(chan, caps);
6143b6d694eSSerge Semin 
6150d5484b1SLaurent Pinchart 	return 0;
6160d5484b1SLaurent Pinchart }
6170d5484b1SLaurent Pinchart EXPORT_SYMBOL_GPL(dma_get_slave_caps);
6180d5484b1SLaurent Pinchart 
private_candidate(const dma_cap_mask_t * mask,struct dma_device * dev,dma_filter_fn fn,void * fn_param)619a53e28daSLars-Peter Clausen static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
620a53e28daSLars-Peter Clausen 					  struct dma_device *dev,
621e2346677SDan Williams 					  dma_filter_fn fn, void *fn_param)
62259b5ec21SDan Williams {
62359b5ec21SDan Williams 	struct dma_chan *chan;
62459b5ec21SDan Williams 
62569b1189bSGeert Uytterhoeven 	if (mask && !dma_device_satisfies_mask(dev, mask)) {
626ef859312SJarkko Nikula 		dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
62759b5ec21SDan Williams 		return NULL;
62859b5ec21SDan Williams 	}
62959b5ec21SDan Williams 	/* devices with multiple channels need special handling as we need to
63059b5ec21SDan Williams 	 * ensure that all channels are either private or public.
63159b5ec21SDan Williams 	 */
63259b5ec21SDan Williams 	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
63359b5ec21SDan Williams 		list_for_each_entry(chan, &dev->channels, device_node) {
63459b5ec21SDan Williams 			/* some channels are already publicly allocated */
63559b5ec21SDan Williams 			if (chan->client_count)
63659b5ec21SDan Williams 				return NULL;
63759b5ec21SDan Williams 		}
63859b5ec21SDan Williams 
63959b5ec21SDan Williams 	list_for_each_entry(chan, &dev->channels, device_node) {
64059b5ec21SDan Williams 		if (chan->client_count) {
641ef859312SJarkko Nikula 			dev_dbg(dev->dev, "%s: %s busy\n",
64241d5e59cSDan Williams 				 __func__, dma_chan_name(chan));
64359b5ec21SDan Williams 			continue;
64459b5ec21SDan Williams 		}
645e2346677SDan Williams 		if (fn && !fn(chan, fn_param)) {
646ef859312SJarkko Nikula 			dev_dbg(dev->dev, "%s: %s filter said false\n",
647e2346677SDan Williams 				 __func__, dma_chan_name(chan));
648e2346677SDan Williams 			continue;
649e2346677SDan Williams 		}
650e2346677SDan Williams 		return chan;
65159b5ec21SDan Williams 	}
65259b5ec21SDan Williams 
653e2346677SDan Williams 	return NULL;
65459b5ec21SDan Williams }
65559b5ec21SDan Williams 
find_candidate(struct dma_device * device,const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param)6567bd903c5SPeter Ujfalusi static struct dma_chan *find_candidate(struct dma_device *device,
6577bd903c5SPeter Ujfalusi 				       const dma_cap_mask_t *mask,
6587bd903c5SPeter Ujfalusi 				       dma_filter_fn fn, void *fn_param)
6597bd903c5SPeter Ujfalusi {
6607bd903c5SPeter Ujfalusi 	struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
6617bd903c5SPeter Ujfalusi 	int err;
6627bd903c5SPeter Ujfalusi 
6637bd903c5SPeter Ujfalusi 	if (chan) {
6647bd903c5SPeter Ujfalusi 		/* Found a suitable channel, try to grab, prep, and return it.
6657bd903c5SPeter Ujfalusi 		 * We first set DMA_PRIVATE to disable balance_ref_count as this
6667bd903c5SPeter Ujfalusi 		 * channel will not be published in the general-purpose
6677bd903c5SPeter Ujfalusi 		 * allocator
6687bd903c5SPeter Ujfalusi 		 */
6697bd903c5SPeter Ujfalusi 		dma_cap_set(DMA_PRIVATE, device->cap_mask);
6707bd903c5SPeter Ujfalusi 		device->privatecnt++;
6717bd903c5SPeter Ujfalusi 		err = dma_chan_get(chan);
6727bd903c5SPeter Ujfalusi 
6737bd903c5SPeter Ujfalusi 		if (err) {
6747bd903c5SPeter Ujfalusi 			if (err == -ENODEV) {
675ef859312SJarkko Nikula 				dev_dbg(device->dev, "%s: %s module removed\n",
676ef859312SJarkko Nikula 					__func__, dma_chan_name(chan));
6777bd903c5SPeter Ujfalusi 				list_del_rcu(&device->global_node);
6787bd903c5SPeter Ujfalusi 			} else
679ef859312SJarkko Nikula 				dev_dbg(device->dev,
680ef859312SJarkko Nikula 					"%s: failed to get %s: (%d)\n",
6817bd903c5SPeter Ujfalusi 					 __func__, dma_chan_name(chan), err);
6827bd903c5SPeter Ujfalusi 
6837bd903c5SPeter Ujfalusi 			if (--device->privatecnt == 0)
6847bd903c5SPeter Ujfalusi 				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
6857bd903c5SPeter Ujfalusi 
6867bd903c5SPeter Ujfalusi 			chan = ERR_PTR(err);
6877bd903c5SPeter Ujfalusi 		}
6887bd903c5SPeter Ujfalusi 	}
6897bd903c5SPeter Ujfalusi 
6907bd903c5SPeter Ujfalusi 	return chan ? chan : ERR_PTR(-EPROBE_DEFER);
6917bd903c5SPeter Ujfalusi }
6927bd903c5SPeter Ujfalusi 
69359b5ec21SDan Williams /**
69419d643d6SStefan Agner  * dma_get_slave_channel - try to get specific channel exclusively
6957bb587f4SZhangfei Gao  * @chan:	target channel
6967bb587f4SZhangfei Gao  */
dma_get_slave_channel(struct dma_chan * chan)6977bb587f4SZhangfei Gao struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
6987bb587f4SZhangfei Gao {
6997bb587f4SZhangfei Gao 	/* lock against __dma_request_channel */
7007bb587f4SZhangfei Gao 	mutex_lock(&dma_list_mutex);
7017bb587f4SZhangfei Gao 
702d9a6c8f5SVinod Koul 	if (chan->client_count == 0) {
703214fc4e4SPeter Ujfalusi 		struct dma_device *device = chan->device;
7041f6a89efSColin Ian King 		int err;
705214fc4e4SPeter Ujfalusi 
706214fc4e4SPeter Ujfalusi 		dma_cap_set(DMA_PRIVATE, device->cap_mask);
707214fc4e4SPeter Ujfalusi 		device->privatecnt++;
7087bb587f4SZhangfei Gao 		err = dma_chan_get(chan);
709214fc4e4SPeter Ujfalusi 		if (err) {
710ef859312SJarkko Nikula 			dev_dbg(chan->device->dev,
711ef859312SJarkko Nikula 				"%s: failed to get %s: (%d)\n",
712d9a6c8f5SVinod Koul 				__func__, dma_chan_name(chan), err);
713214fc4e4SPeter Ujfalusi 			chan = NULL;
714214fc4e4SPeter Ujfalusi 			if (--device->privatecnt == 0)
715214fc4e4SPeter Ujfalusi 				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
716214fc4e4SPeter Ujfalusi 		}
717d9a6c8f5SVinod Koul 	} else
7187bb587f4SZhangfei Gao 		chan = NULL;
7197bb587f4SZhangfei Gao 
7207bb587f4SZhangfei Gao 	mutex_unlock(&dma_list_mutex);
7217bb587f4SZhangfei Gao 
7227bb587f4SZhangfei Gao 
7237bb587f4SZhangfei Gao 	return chan;
7247bb587f4SZhangfei Gao }
7257bb587f4SZhangfei Gao EXPORT_SYMBOL_GPL(dma_get_slave_channel);
7267bb587f4SZhangfei Gao 
dma_get_any_slave_channel(struct dma_device * device)7278010dad5SStephen Warren struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
7288010dad5SStephen Warren {
7298010dad5SStephen Warren 	dma_cap_mask_t mask;
7308010dad5SStephen Warren 	struct dma_chan *chan;
7318010dad5SStephen Warren 
7328010dad5SStephen Warren 	dma_cap_zero(mask);
7338010dad5SStephen Warren 	dma_cap_set(DMA_SLAVE, mask);
7348010dad5SStephen Warren 
7358010dad5SStephen Warren 	/* lock against __dma_request_channel */
7368010dad5SStephen Warren 	mutex_lock(&dma_list_mutex);
7378010dad5SStephen Warren 
7387bd903c5SPeter Ujfalusi 	chan = find_candidate(device, &mask, NULL, NULL);
7398010dad5SStephen Warren 
7408010dad5SStephen Warren 	mutex_unlock(&dma_list_mutex);
7418010dad5SStephen Warren 
7427bd903c5SPeter Ujfalusi 	return IS_ERR(chan) ? NULL : chan;
7438010dad5SStephen Warren }
7448010dad5SStephen Warren EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
7458010dad5SStephen Warren 
7467bb587f4SZhangfei Gao /**
7476b9019a7SDaniel Mack  * __dma_request_channel - try to allocate an exclusive channel
74859b5ec21SDan Williams  * @mask:	capabilities that the channel must satisfy
74959b5ec21SDan Williams  * @fn:		optional callback to disposition available channels
7509872e23dSAndy Shevchenko  * @fn_param:	opaque parameter to pass to dma_filter_fn()
751f5151311SBaolin Wang  * @np:		device node to look for DMA channels
7520ad7c000SStephen Warren  *
7530ad7c000SStephen Warren  * Returns pointer to appropriate DMA channel on success or NULL.
75459b5ec21SDan Williams  */
__dma_request_channel(const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param,struct device_node * np)755a53e28daSLars-Peter Clausen struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
756f5151311SBaolin Wang 				       dma_filter_fn fn, void *fn_param,
757f5151311SBaolin Wang 				       struct device_node *np)
75859b5ec21SDan Williams {
75959b5ec21SDan Williams 	struct dma_device *device, *_d;
76059b5ec21SDan Williams 	struct dma_chan *chan = NULL;
76159b5ec21SDan Williams 
76259b5ec21SDan Williams 	/* Find a channel */
76359b5ec21SDan Williams 	mutex_lock(&dma_list_mutex);
76459b5ec21SDan Williams 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
765f5151311SBaolin Wang 		/* Finds a DMA controller with matching device node */
766f5151311SBaolin Wang 		if (np && device->dev->of_node && np != device->dev->of_node)
767f5151311SBaolin Wang 			continue;
768f5151311SBaolin Wang 
7697bd903c5SPeter Ujfalusi 		chan = find_candidate(device, mask, fn, fn_param);
7707bd903c5SPeter Ujfalusi 		if (!IS_ERR(chan))
77159b5ec21SDan Williams 			break;
7727bd903c5SPeter Ujfalusi 
77359b5ec21SDan Williams 		chan = NULL;
77459b5ec21SDan Williams 	}
77559b5ec21SDan Williams 	mutex_unlock(&dma_list_mutex);
77659b5ec21SDan Williams 
7774c4d7f87SJarkko Nikula 	pr_debug("%s: %s (%s)\n",
77863433250SJoe Perches 		 __func__,
77963433250SJoe Perches 		 chan ? "success" : "fail",
78041d5e59cSDan Williams 		 chan ? dma_chan_name(chan) : NULL);
78159b5ec21SDan Williams 
78259b5ec21SDan Williams 	return chan;
78359b5ec21SDan Williams }
78459b5ec21SDan Williams EXPORT_SYMBOL_GPL(__dma_request_channel);
78559b5ec21SDan Williams 
dma_filter_match(struct dma_device * device,const char * name,struct device * dev)786a8135d0dSPeter Ujfalusi static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
787a8135d0dSPeter Ujfalusi 						    const char *name,
788a8135d0dSPeter Ujfalusi 						    struct device *dev)
789a8135d0dSPeter Ujfalusi {
790a8135d0dSPeter Ujfalusi 	int i;
791a8135d0dSPeter Ujfalusi 
792a8135d0dSPeter Ujfalusi 	if (!device->filter.mapcnt)
793a8135d0dSPeter Ujfalusi 		return NULL;
794a8135d0dSPeter Ujfalusi 
795a8135d0dSPeter Ujfalusi 	for (i = 0; i < device->filter.mapcnt; i++) {
796a8135d0dSPeter Ujfalusi 		const struct dma_slave_map *map = &device->filter.map[i];
797a8135d0dSPeter Ujfalusi 
798a8135d0dSPeter Ujfalusi 		if (!strcmp(map->devname, dev_name(dev)) &&
799a8135d0dSPeter Ujfalusi 		    !strcmp(map->slave, name))
800a8135d0dSPeter Ujfalusi 			return map;
801a8135d0dSPeter Ujfalusi 	}
802a8135d0dSPeter Ujfalusi 
803a8135d0dSPeter Ujfalusi 	return NULL;
804a8135d0dSPeter Ujfalusi }
805a8135d0dSPeter Ujfalusi 
8069a6cecc8SJon Hunter /**
807a8135d0dSPeter Ujfalusi  * dma_request_chan - try to allocate an exclusive slave channel
8089a6cecc8SJon Hunter  * @dev:	pointer to client device structure
8099a6cecc8SJon Hunter  * @name:	slave channel name
8100ad7c000SStephen Warren  *
8110ad7c000SStephen Warren  * Returns pointer to appropriate DMA channel on success or an error pointer.
8129a6cecc8SJon Hunter  */
dma_request_chan(struct device * dev,const char * name)813a8135d0dSPeter Ujfalusi struct dma_chan *dma_request_chan(struct device *dev, const char *name)
8149a6cecc8SJon Hunter {
815a8135d0dSPeter Ujfalusi 	struct dma_device *d, *_d;
816a8135d0dSPeter Ujfalusi 	struct dma_chan *chan = NULL;
817a8135d0dSPeter Ujfalusi 
8189a6cecc8SJon Hunter 	/* If device-tree is present get slave info from here */
8199a6cecc8SJon Hunter 	if (dev->of_node)
820a8135d0dSPeter Ujfalusi 		chan = of_dma_request_slave_channel(dev->of_node, name);
8219a6cecc8SJon Hunter 
8224e82f5ddSAndy Shevchenko 	/* If device was enumerated by ACPI get slave info from here */
823a8135d0dSPeter Ujfalusi 	if (has_acpi_companion(dev) && !chan)
824a8135d0dSPeter Ujfalusi 		chan = acpi_dma_request_slave_chan_by_name(dev, name);
8254e82f5ddSAndy Shevchenko 
82671723a96SGeert Uytterhoeven 	if (PTR_ERR(chan) == -EPROBE_DEFER)
827a8135d0dSPeter Ujfalusi 		return chan;
82871723a96SGeert Uytterhoeven 
82971723a96SGeert Uytterhoeven 	if (!IS_ERR_OR_NULL(chan))
83071723a96SGeert Uytterhoeven 		goto found;
831a8135d0dSPeter Ujfalusi 
832a8135d0dSPeter Ujfalusi 	/* Try to find the channel via the DMA filter map(s) */
833a8135d0dSPeter Ujfalusi 	mutex_lock(&dma_list_mutex);
834a8135d0dSPeter Ujfalusi 	list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
835a8135d0dSPeter Ujfalusi 		dma_cap_mask_t mask;
836a8135d0dSPeter Ujfalusi 		const struct dma_slave_map *map = dma_filter_match(d, name, dev);
837a8135d0dSPeter Ujfalusi 
838a8135d0dSPeter Ujfalusi 		if (!map)
839a8135d0dSPeter Ujfalusi 			continue;
840a8135d0dSPeter Ujfalusi 
841a8135d0dSPeter Ujfalusi 		dma_cap_zero(mask);
842a8135d0dSPeter Ujfalusi 		dma_cap_set(DMA_SLAVE, mask);
843a8135d0dSPeter Ujfalusi 
844a8135d0dSPeter Ujfalusi 		chan = find_candidate(d, &mask, d->filter.fn, map->param);
845a8135d0dSPeter Ujfalusi 		if (!IS_ERR(chan))
846a8135d0dSPeter Ujfalusi 			break;
847a8135d0dSPeter Ujfalusi 	}
848a8135d0dSPeter Ujfalusi 	mutex_unlock(&dma_list_mutex);
849a8135d0dSPeter Ujfalusi 
8505d7e816eSAndy Shevchenko 	if (IS_ERR(chan))
8515d7e816eSAndy Shevchenko 		return chan;
8525d7e816eSAndy Shevchenko 	if (!chan)
8535d7e816eSAndy Shevchenko 		return ERR_PTR(-EPROBE_DEFER);
85471723a96SGeert Uytterhoeven 
85571723a96SGeert Uytterhoeven found:
856e937cc1dSPeter Ujfalusi #ifdef CONFIG_DEBUG_FS
857e937cc1dSPeter Ujfalusi 	chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
858e937cc1dSPeter Ujfalusi 					  name);
859e937cc1dSPeter Ujfalusi #endif
860e937cc1dSPeter Ujfalusi 
86171723a96SGeert Uytterhoeven 	chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
86271723a96SGeert Uytterhoeven 	if (!chan->name)
863bad83565SPeter Ujfalusi 		return chan;
864bad83565SPeter Ujfalusi 	chan->slave = dev;
86571723a96SGeert Uytterhoeven 
86671723a96SGeert Uytterhoeven 	if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
86771723a96SGeert Uytterhoeven 			      DMA_SLAVE_NAME))
868bad83565SPeter Ujfalusi 		dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
86971723a96SGeert Uytterhoeven 	if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
870bad83565SPeter Ujfalusi 		dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
871bad83565SPeter Ujfalusi 
87271723a96SGeert Uytterhoeven 	return chan;
873a8135d0dSPeter Ujfalusi }
874a8135d0dSPeter Ujfalusi EXPORT_SYMBOL_GPL(dma_request_chan);
8750ad7c000SStephen Warren 
8760ad7c000SStephen Warren /**
877a8135d0dSPeter Ujfalusi  * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
878a8135d0dSPeter Ujfalusi  * @mask:	capabilities that the channel must satisfy
879a8135d0dSPeter Ujfalusi  *
880a8135d0dSPeter Ujfalusi  * Returns pointer to appropriate DMA channel on success or an error pointer.
881a8135d0dSPeter Ujfalusi  */
dma_request_chan_by_mask(const dma_cap_mask_t * mask)882a8135d0dSPeter Ujfalusi struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
883a8135d0dSPeter Ujfalusi {
884a8135d0dSPeter Ujfalusi 	struct dma_chan *chan;
885a8135d0dSPeter Ujfalusi 
886a8135d0dSPeter Ujfalusi 	if (!mask)
887a8135d0dSPeter Ujfalusi 		return ERR_PTR(-ENODEV);
888a8135d0dSPeter Ujfalusi 
889f5151311SBaolin Wang 	chan = __dma_request_channel(mask, NULL, NULL, NULL);
890ec8ca8e3SPeter Ujfalusi 	if (!chan) {
891ec8ca8e3SPeter Ujfalusi 		mutex_lock(&dma_list_mutex);
892ec8ca8e3SPeter Ujfalusi 		if (list_empty(&dma_device_list))
893ec8ca8e3SPeter Ujfalusi 			chan = ERR_PTR(-EPROBE_DEFER);
894ec8ca8e3SPeter Ujfalusi 		else
895a8135d0dSPeter Ujfalusi 			chan = ERR_PTR(-ENODEV);
896ec8ca8e3SPeter Ujfalusi 		mutex_unlock(&dma_list_mutex);
897ec8ca8e3SPeter Ujfalusi 	}
898a8135d0dSPeter Ujfalusi 
899a8135d0dSPeter Ujfalusi 	return chan;
900a8135d0dSPeter Ujfalusi }
901a8135d0dSPeter Ujfalusi EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
902a8135d0dSPeter Ujfalusi 
dma_release_channel(struct dma_chan * chan)90359b5ec21SDan Williams void dma_release_channel(struct dma_chan *chan)
90459b5ec21SDan Williams {
90559b5ec21SDan Williams 	mutex_lock(&dma_list_mutex);
90659b5ec21SDan Williams 	WARN_ONCE(chan->client_count != 1,
90759b5ec21SDan Williams 		  "chan reference count %d != 1\n", chan->client_count);
90859b5ec21SDan Williams 	dma_chan_put(chan);
9090f571515SAtsushi Nemoto 	/* drop PRIVATE cap enabled by __dma_request_channel() */
9100f571515SAtsushi Nemoto 	if (--chan->device->privatecnt == 0)
9110f571515SAtsushi Nemoto 		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
912bad83565SPeter Ujfalusi 
91371723a96SGeert Uytterhoeven 	if (chan->slave) {
914bad83565SPeter Ujfalusi 		sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
91571723a96SGeert Uytterhoeven 		sysfs_remove_link(&chan->slave->kobj, chan->name);
91671723a96SGeert Uytterhoeven 		kfree(chan->name);
91771723a96SGeert Uytterhoeven 		chan->name = NULL;
91871723a96SGeert Uytterhoeven 		chan->slave = NULL;
91971723a96SGeert Uytterhoeven 	}
920e937cc1dSPeter Ujfalusi 
921e937cc1dSPeter Ujfalusi #ifdef CONFIG_DEBUG_FS
922e937cc1dSPeter Ujfalusi 	kfree(chan->dbg_client_name);
923e937cc1dSPeter Ujfalusi 	chan->dbg_client_name = NULL;
924e937cc1dSPeter Ujfalusi #endif
92559b5ec21SDan Williams 	mutex_unlock(&dma_list_mutex);
92659b5ec21SDan Williams }
92759b5ec21SDan Williams EXPORT_SYMBOL_GPL(dma_release_channel);
92859b5ec21SDan Williams 
929bec08513SDan Williams /**
930209b84a8SDan Williams  * dmaengine_get - register interest in dma_channels
931c13c8260SChris Leech  */
dmaengine_get(void)932209b84a8SDan Williams void dmaengine_get(void)
933c13c8260SChris Leech {
9346f49a57aSDan Williams 	struct dma_device *device, *_d;
9356f49a57aSDan Williams 	struct dma_chan *chan;
9366f49a57aSDan Williams 	int err;
9376f49a57aSDan Williams 
938c13c8260SChris Leech 	mutex_lock(&dma_list_mutex);
9396f49a57aSDan Williams 	dmaengine_ref_count++;
9406f49a57aSDan Williams 
9416f49a57aSDan Williams 	/* try to grab channels */
94259b5ec21SDan Williams 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
94359b5ec21SDan Williams 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
94459b5ec21SDan Williams 			continue;
9456f49a57aSDan Williams 		list_for_each_entry(chan, &device->channels, device_node) {
9466f49a57aSDan Williams 			err = dma_chan_get(chan);
9476f49a57aSDan Williams 			if (err == -ENODEV) {
9486f49a57aSDan Williams 				/* module removed before we could use it */
9492ba05622SDan Williams 				list_del_rcu(&device->global_node);
9506f49a57aSDan Williams 				break;
9516f49a57aSDan Williams 			} else if (err)
952ef859312SJarkko Nikula 				dev_dbg(chan->device->dev,
953ef859312SJarkko Nikula 					"%s: failed to get %s: (%d)\n",
954d8b53489SFabio Estevam 					__func__, dma_chan_name(chan), err);
9556f49a57aSDan Williams 		}
95659b5ec21SDan Williams 	}
9576f49a57aSDan Williams 
958bec08513SDan Williams 	/* if this is the first reference and there were channels
959bec08513SDan Williams 	 * waiting we need to rebalance to get those channels
960bec08513SDan Williams 	 * incorporated into the channel table
961bec08513SDan Williams 	 */
962bec08513SDan Williams 	if (dmaengine_ref_count == 1)
963bec08513SDan Williams 		dma_channel_rebalance();
964c13c8260SChris Leech 	mutex_unlock(&dma_list_mutex);
965c13c8260SChris Leech }
966209b84a8SDan Williams EXPORT_SYMBOL(dmaengine_get);
967c13c8260SChris Leech 
968c13c8260SChris Leech /**
9699872e23dSAndy Shevchenko  * dmaengine_put - let DMA drivers be removed when ref_count == 0
970c13c8260SChris Leech  */
dmaengine_put(void)971209b84a8SDan Williams void dmaengine_put(void)
972c13c8260SChris Leech {
9738ad342a8SLogan Gunthorpe 	struct dma_device *device, *_d;
974c13c8260SChris Leech 	struct dma_chan *chan;
975c13c8260SChris Leech 
976c13c8260SChris Leech 	mutex_lock(&dma_list_mutex);
9776f49a57aSDan Williams 	dmaengine_ref_count--;
9786f49a57aSDan Williams 	BUG_ON(dmaengine_ref_count < 0);
9796f49a57aSDan Williams 	/* drop channel references */
9808ad342a8SLogan Gunthorpe 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
98159b5ec21SDan Williams 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
98259b5ec21SDan Williams 			continue;
9836f49a57aSDan Williams 		list_for_each_entry(chan, &device->channels, device_node)
984d379b01eSDan Williams 			dma_chan_put(chan);
98559b5ec21SDan Williams 	}
986c13c8260SChris Leech 	mutex_unlock(&dma_list_mutex);
987c13c8260SChris Leech }
988209b84a8SDan Williams EXPORT_SYMBOL(dmaengine_put);
989c13c8260SChris Leech 
device_has_all_tx_types(struct dma_device * device)990138f4c35SDan Williams static bool device_has_all_tx_types(struct dma_device *device)
991138f4c35SDan Williams {
992138f4c35SDan Williams 	/* A device that satisfies this test has channels that will never cause
993138f4c35SDan Williams 	 * an async_tx channel switch event as all possible operation types can
994138f4c35SDan Williams 	 * be handled.
995138f4c35SDan Williams 	 */
996138f4c35SDan Williams 	#ifdef CONFIG_ASYNC_TX_DMA
997138f4c35SDan Williams 	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
998138f4c35SDan Williams 		return false;
999138f4c35SDan Williams 	#endif
1000138f4c35SDan Williams 
1001d57d3a48SJavier Martinez Canillas 	#if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
1002138f4c35SDan Williams 	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
1003138f4c35SDan Williams 		return false;
1004138f4c35SDan Williams 	#endif
1005138f4c35SDan Williams 
1006d57d3a48SJavier Martinez Canillas 	#if IS_ENABLED(CONFIG_ASYNC_XOR)
1007138f4c35SDan Williams 	if (!dma_has_cap(DMA_XOR, device->cap_mask))
1008138f4c35SDan Williams 		return false;
10097b3cc2b1SDan Williams 
10107b3cc2b1SDan Williams 	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
10114499a24dSDan Williams 	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
10124499a24dSDan Williams 		return false;
1013138f4c35SDan Williams 	#endif
10147b3cc2b1SDan Williams 	#endif
1015138f4c35SDan Williams 
1016d57d3a48SJavier Martinez Canillas 	#if IS_ENABLED(CONFIG_ASYNC_PQ)
1017138f4c35SDan Williams 	if (!dma_has_cap(DMA_PQ, device->cap_mask))
1018138f4c35SDan Williams 		return false;
10197b3cc2b1SDan Williams 
10207b3cc2b1SDan Williams 	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
10214499a24dSDan Williams 	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
10224499a24dSDan Williams 		return false;
1023138f4c35SDan Williams 	#endif
10247b3cc2b1SDan Williams 	#endif
1025138f4c35SDan Williams 
1026138f4c35SDan Williams 	return true;
1027138f4c35SDan Williams }
1028138f4c35SDan Williams 
get_dma_id(struct dma_device * device)1029257b17caSDan Williams static int get_dma_id(struct dma_device *device)
1030257b17caSDan Williams {
1031485258b4SMatthew Wilcox 	int rc = ida_alloc(&dma_ida, GFP_KERNEL);
1032257b17caSDan Williams 
1033485258b4SMatthew Wilcox 	if (rc < 0)
1034adc064cdSMatthew Wilcox 		return rc;
1035485258b4SMatthew Wilcox 	device->dev_id = rc;
1036485258b4SMatthew Wilcox 	return 0;
1037257b17caSDan Williams }
1038257b17caSDan Williams 
__dma_async_device_channel_register(struct dma_device * device,struct dma_chan * chan)1039d2fb0a04SDave Jiang static int __dma_async_device_channel_register(struct dma_device *device,
104008210094SDave Jiang 					       struct dma_chan *chan)
1041d2fb0a04SDave Jiang {
10427e4be129SDan Carpenter 	int rc;
1043d2fb0a04SDave Jiang 
1044d2fb0a04SDave Jiang 	chan->local = alloc_percpu(typeof(*chan->local));
1045d2fb0a04SDave Jiang 	if (!chan->local)
10467e4be129SDan Carpenter 		return -ENOMEM;
1047d2fb0a04SDave Jiang 	chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1048d2fb0a04SDave Jiang 	if (!chan->dev) {
10497e4be129SDan Carpenter 		rc = -ENOMEM;
10507e4be129SDan Carpenter 		goto err_free_local;
1051d2fb0a04SDave Jiang 	}
1052d2fb0a04SDave Jiang 
1053d2fb0a04SDave Jiang 	/*
1054d2fb0a04SDave Jiang 	 * When the chan_id is a negative value, we are dynamically adding
1055d2fb0a04SDave Jiang 	 * the channel. Otherwise we are static enumerating.
1056d2fb0a04SDave Jiang 	 */
105708210094SDave Jiang 	chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
105808210094SDave Jiang 	if (chan->chan_id < 0) {
105908210094SDave Jiang 		pr_err("%s: unable to alloc ida for chan: %d\n",
106008210094SDave Jiang 		       __func__, chan->chan_id);
10617e4be129SDan Carpenter 		rc = chan->chan_id;
10627e4be129SDan Carpenter 		goto err_free_dev;
106308210094SDave Jiang 	}
106408210094SDave Jiang 
1065d2fb0a04SDave Jiang 	chan->dev->device.class = &dma_devclass;
1066d2fb0a04SDave Jiang 	chan->dev->device.parent = device->dev;
1067d2fb0a04SDave Jiang 	chan->dev->chan = chan;
1068d2fb0a04SDave Jiang 	chan->dev->dev_id = device->dev_id;
1069d2fb0a04SDave Jiang 	dev_set_name(&chan->dev->device, "dma%dchan%d",
1070d2fb0a04SDave Jiang 		     device->dev_id, chan->chan_id);
1071d2fb0a04SDave Jiang 	rc = device_register(&chan->dev->device);
1072d2fb0a04SDave Jiang 	if (rc)
107308210094SDave Jiang 		goto err_out_ida;
1074d2fb0a04SDave Jiang 	chan->client_count = 0;
107508210094SDave Jiang 	device->chancnt++;
1076d2fb0a04SDave Jiang 
1077d2fb0a04SDave Jiang 	return 0;
1078d2fb0a04SDave Jiang 
107908210094SDave Jiang  err_out_ida:
108008210094SDave Jiang 	ida_free(&device->chan_ida, chan->chan_id);
10817e4be129SDan Carpenter  err_free_dev:
1082d2fb0a04SDave Jiang 	kfree(chan->dev);
10837e4be129SDan Carpenter  err_free_local:
10847e4be129SDan Carpenter 	free_percpu(chan->local);
1085ea45b600SLv Yunlong 	chan->local = NULL;
1086d2fb0a04SDave Jiang 	return rc;
1087d2fb0a04SDave Jiang }
1088d2fb0a04SDave Jiang 
dma_async_device_channel_register(struct dma_device * device,struct dma_chan * chan)1089e81274cdSDave Jiang int dma_async_device_channel_register(struct dma_device *device,
1090e81274cdSDave Jiang 				      struct dma_chan *chan)
1091e81274cdSDave Jiang {
1092e81274cdSDave Jiang 	int rc;
1093e81274cdSDave Jiang 
109408210094SDave Jiang 	rc = __dma_async_device_channel_register(device, chan);
1095e81274cdSDave Jiang 	if (rc < 0)
1096e81274cdSDave Jiang 		return rc;
1097e81274cdSDave Jiang 
1098e81274cdSDave Jiang 	dma_channel_rebalance();
1099e81274cdSDave Jiang 	return 0;
1100e81274cdSDave Jiang }
1101e81274cdSDave Jiang EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
1102e81274cdSDave Jiang 
__dma_async_device_channel_unregister(struct dma_device * device,struct dma_chan * chan)1103d2fb0a04SDave Jiang static void __dma_async_device_channel_unregister(struct dma_device *device,
1104d2fb0a04SDave Jiang 						  struct dma_chan *chan)
1105d2fb0a04SDave Jiang {
1106*7f0ccfadSAmelie Delaunay 	if (chan->local == NULL)
1107*7f0ccfadSAmelie Delaunay 		return;
1108*7f0ccfadSAmelie Delaunay 
1109d2fb0a04SDave Jiang 	WARN_ONCE(!device->device_release && chan->client_count,
1110d2fb0a04SDave Jiang 		  "%s called while %d clients hold a reference\n",
1111d2fb0a04SDave Jiang 		  __func__, chan->client_count);
1112d2fb0a04SDave Jiang 	mutex_lock(&dma_list_mutex);
1113e81274cdSDave Jiang 	device->chancnt--;
1114d2fb0a04SDave Jiang 	chan->dev->chan = NULL;
1115d2fb0a04SDave Jiang 	mutex_unlock(&dma_list_mutex);
111608210094SDave Jiang 	ida_free(&device->chan_ida, chan->chan_id);
1117d2fb0a04SDave Jiang 	device_unregister(&chan->dev->device);
1118d2fb0a04SDave Jiang 	free_percpu(chan->local);
1119d2fb0a04SDave Jiang }
1120d2fb0a04SDave Jiang 
dma_async_device_channel_unregister(struct dma_device * device,struct dma_chan * chan)1121e81274cdSDave Jiang void dma_async_device_channel_unregister(struct dma_device *device,
1122e81274cdSDave Jiang 					 struct dma_chan *chan)
1123e81274cdSDave Jiang {
1124e81274cdSDave Jiang 	__dma_async_device_channel_unregister(device, chan);
1125e81274cdSDave Jiang 	dma_channel_rebalance();
1126e81274cdSDave Jiang }
1127e81274cdSDave Jiang EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
1128e81274cdSDave Jiang 
1129c13c8260SChris Leech /**
11306508871eSRandy Dunlap  * dma_async_device_register - registers DMA devices found
11319872e23dSAndy Shevchenko  * @device:	pointer to &struct dma_device
11328ad342a8SLogan Gunthorpe  *
11338ad342a8SLogan Gunthorpe  * After calling this routine the structure should not be freed except in the
11348ad342a8SLogan Gunthorpe  * device_release() callback which will be called after
11358ad342a8SLogan Gunthorpe  * dma_async_device_unregister() is called and no further references are taken.
1136c13c8260SChris Leech  */
dma_async_device_register(struct dma_device * device)1137c13c8260SChris Leech int dma_async_device_register(struct dma_device *device)
1138c13c8260SChris Leech {
113908210094SDave Jiang 	int rc;
1140c13c8260SChris Leech 	struct dma_chan* chan;
1141c13c8260SChris Leech 
1142c13c8260SChris Leech 	if (!device)
1143c13c8260SChris Leech 		return -ENODEV;
1144c13c8260SChris Leech 
11457405f74bSDan Williams 	/* validate device routines */
11463eeb5156SVinod Koul 	if (!device->dev) {
11473eeb5156SVinod Koul 		pr_err("DMAdevice must have dev\n");
11483eeb5156SVinod Koul 		return -EIO;
11493eeb5156SVinod Koul 	}
11507405f74bSDan Williams 
1151dae7a589SLogan Gunthorpe 	device->owner = device->dev->driver->owner;
1152dae7a589SLogan Gunthorpe 
115381ebed8aSYajun Deng #define CHECK_CAP(_name, _type)								\
115481ebed8aSYajun Deng {											\
115581ebed8aSYajun Deng 	if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) {	\
115681ebed8aSYajun Deng 		dev_err(device->dev,							\
115781ebed8aSYajun Deng 			"Device claims capability %s, but op is not defined\n",		\
115881ebed8aSYajun Deng 			__stringify(_type));						\
115981ebed8aSYajun Deng 		return -EIO;								\
116081ebed8aSYajun Deng 	}										\
11613eeb5156SVinod Koul }
11623eeb5156SVinod Koul 
116381ebed8aSYajun Deng 	CHECK_CAP(dma_memcpy,      DMA_MEMCPY);
116481ebed8aSYajun Deng 	CHECK_CAP(dma_xor,         DMA_XOR);
116581ebed8aSYajun Deng 	CHECK_CAP(dma_xor_val,     DMA_XOR_VAL);
116681ebed8aSYajun Deng 	CHECK_CAP(dma_pq,          DMA_PQ);
116781ebed8aSYajun Deng 	CHECK_CAP(dma_pq_val,      DMA_PQ_VAL);
116881ebed8aSYajun Deng 	CHECK_CAP(dma_memset,      DMA_MEMSET);
116981ebed8aSYajun Deng 	CHECK_CAP(dma_interrupt,   DMA_INTERRUPT);
117081ebed8aSYajun Deng 	CHECK_CAP(dma_cyclic,      DMA_CYCLIC);
117181ebed8aSYajun Deng 	CHECK_CAP(interleaved_dma, DMA_INTERLEAVE);
11723eeb5156SVinod Koul 
117381ebed8aSYajun Deng #undef CHECK_CAP
11743eeb5156SVinod Koul 
11753eeb5156SVinod Koul 	if (!device->device_tx_status) {
11763eeb5156SVinod Koul 		dev_err(device->dev, "Device tx_status is not defined\n");
11773eeb5156SVinod Koul 		return -EIO;
11783eeb5156SVinod Koul 	}
11793eeb5156SVinod Koul 
11803eeb5156SVinod Koul 
11813eeb5156SVinod Koul 	if (!device->device_issue_pending) {
11823eeb5156SVinod Koul 		dev_err(device->dev, "Device issue_pending is not defined\n");
11833eeb5156SVinod Koul 		return -EIO;
11843eeb5156SVinod Koul 	}
11857405f74bSDan Williams 
11868ad342a8SLogan Gunthorpe 	if (!device->device_release)
1187f91da3bdSVinod Koul 		dev_dbg(device->dev,
11888ad342a8SLogan Gunthorpe 			 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
11898ad342a8SLogan Gunthorpe 
11908ad342a8SLogan Gunthorpe 	kref_init(&device->ref);
11918ad342a8SLogan Gunthorpe 
1192138f4c35SDan Williams 	/* note: this only matters in the
11935fc6d897SDan Williams 	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1194138f4c35SDan Williams 	 */
1195138f4c35SDan Williams 	if (device_has_all_tx_types(device))
1196138f4c35SDan Williams 		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1197138f4c35SDan Williams 
1198257b17caSDan Williams 	rc = get_dma_id(device);
1199d2fb0a04SDave Jiang 	if (rc != 0)
1200864498aaSDan Williams 		return rc;
1201c13c8260SChris Leech 
120208210094SDave Jiang 	ida_init(&device->chan_ida);
120308210094SDave Jiang 
1204c13c8260SChris Leech 	/* represent channels in sysfs. Probably want devs too */
1205c13c8260SChris Leech 	list_for_each_entry(chan, &device->channels, device_node) {
120608210094SDave Jiang 		rc = __dma_async_device_channel_register(device, chan);
1207d2fb0a04SDave Jiang 		if (rc < 0)
1208257b17caSDan Williams 			goto err_out;
120941d5e59cSDan Williams 	}
1210c13c8260SChris Leech 
1211c13c8260SChris Leech 	mutex_lock(&dma_list_mutex);
121259b5ec21SDan Williams 	/* take references on public channels */
121359b5ec21SDan Williams 	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
12146f49a57aSDan Williams 		list_for_each_entry(chan, &device->channels, device_node) {
12156f49a57aSDan Williams 			/* if clients are already waiting for channels we need
12166f49a57aSDan Williams 			 * to take references on their behalf
12176f49a57aSDan Williams 			 */
12186f49a57aSDan Williams 			if (dma_chan_get(chan) == -ENODEV) {
12196f49a57aSDan Williams 				/* note we can only get here for the first
12206f49a57aSDan Williams 				 * channel as the remaining channels are
12216f49a57aSDan Williams 				 * guaranteed to get a reference
12226f49a57aSDan Williams 				 */
12236f49a57aSDan Williams 				rc = -ENODEV;
12246f49a57aSDan Williams 				mutex_unlock(&dma_list_mutex);
12256f49a57aSDan Williams 				goto err_out;
12266f49a57aSDan Williams 			}
12276f49a57aSDan Williams 		}
12282ba05622SDan Williams 	list_add_tail_rcu(&device->global_node, &dma_device_list);
12290f571515SAtsushi Nemoto 	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
12300f571515SAtsushi Nemoto 		device->privatecnt++;	/* Always private */
1231bec08513SDan Williams 	dma_channel_rebalance();
1232c13c8260SChris Leech 	mutex_unlock(&dma_list_mutex);
1233c13c8260SChris Leech 
123426cf132dSPeter Ujfalusi 	dmaengine_debug_register(device);
123526cf132dSPeter Ujfalusi 
1236c13c8260SChris Leech 	return 0;
1237ff487fb7SJeff Garzik 
1238ff487fb7SJeff Garzik err_out:
1239257b17caSDan Williams 	/* if we never registered a channel just release the idr */
1240d2fb0a04SDave Jiang 	if (!device->chancnt) {
1241485258b4SMatthew Wilcox 		ida_free(&dma_ida, device->dev_id);
1242257b17caSDan Williams 		return rc;
1243257b17caSDan Williams 	}
1244257b17caSDan Williams 
1245ff487fb7SJeff Garzik 	list_for_each_entry(chan, &device->channels, device_node) {
1246ff487fb7SJeff Garzik 		if (chan->local == NULL)
1247ff487fb7SJeff Garzik 			continue;
124841d5e59cSDan Williams 		mutex_lock(&dma_list_mutex);
124941d5e59cSDan Williams 		chan->dev->chan = NULL;
125041d5e59cSDan Williams 		mutex_unlock(&dma_list_mutex);
125141d5e59cSDan Williams 		device_unregister(&chan->dev->device);
1252ff487fb7SJeff Garzik 		free_percpu(chan->local);
1253ff487fb7SJeff Garzik 	}
1254ff487fb7SJeff Garzik 	return rc;
1255c13c8260SChris Leech }
1256765e3d8aSDavid Brownell EXPORT_SYMBOL(dma_async_device_register);
1257c13c8260SChris Leech 
1258c13c8260SChris Leech /**
12596f49a57aSDan Williams  * dma_async_device_unregister - unregister a DMA device
12609872e23dSAndy Shevchenko  * @device:	pointer to &struct dma_device
1261f27c580cSDan Williams  *
1262f27c580cSDan Williams  * This routine is called by dma driver exit routines, dmaengine holds module
1263f27c580cSDan Williams  * references to prevent it being called while channels are in use.
12646508871eSRandy Dunlap  */
dma_async_device_unregister(struct dma_device * device)1265c13c8260SChris Leech void dma_async_device_unregister(struct dma_device *device)
1266c13c8260SChris Leech {
1267e81274cdSDave Jiang 	struct dma_chan *chan, *n;
1268c13c8260SChris Leech 
126926cf132dSPeter Ujfalusi 	dmaengine_debug_unregister(device);
127026cf132dSPeter Ujfalusi 
1271e81274cdSDave Jiang 	list_for_each_entry_safe(chan, n, &device->channels, device_node)
1272d2fb0a04SDave Jiang 		__dma_async_device_channel_unregister(device, chan);
12738ad342a8SLogan Gunthorpe 
12748ad342a8SLogan Gunthorpe 	mutex_lock(&dma_list_mutex);
12758ad342a8SLogan Gunthorpe 	/*
12768ad342a8SLogan Gunthorpe 	 * setting DMA_PRIVATE ensures the device being torn down will not
12778ad342a8SLogan Gunthorpe 	 * be used in the channel_table
12788ad342a8SLogan Gunthorpe 	 */
12798ad342a8SLogan Gunthorpe 	dma_cap_set(DMA_PRIVATE, device->cap_mask);
12808ad342a8SLogan Gunthorpe 	dma_channel_rebalance();
128108210094SDave Jiang 	ida_free(&dma_ida, device->dev_id);
12828ad342a8SLogan Gunthorpe 	dma_device_put(device);
12838ad342a8SLogan Gunthorpe 	mutex_unlock(&dma_list_mutex);
1284c13c8260SChris Leech }
1285765e3d8aSDavid Brownell EXPORT_SYMBOL(dma_async_device_unregister);
1286c13c8260SChris Leech 
dmaenginem_async_device_unregister(void * device)1287a1beaa50SAndy Shevchenko static void dmaenginem_async_device_unregister(void *device)
1288f39b948dSHuang Shijie {
1289f39b948dSHuang Shijie 	dma_async_device_unregister(device);
1290f39b948dSHuang Shijie }
1291f39b948dSHuang Shijie 
1292f39b948dSHuang Shijie /**
1293f39b948dSHuang Shijie  * dmaenginem_async_device_register - registers DMA devices found
12949872e23dSAndy Shevchenko  * @device:	pointer to &struct dma_device
1295f39b948dSHuang Shijie  *
1296f39b948dSHuang Shijie  * The operation is managed and will be undone on driver detach.
1297f39b948dSHuang Shijie  */
dmaenginem_async_device_register(struct dma_device * device)1298f39b948dSHuang Shijie int dmaenginem_async_device_register(struct dma_device *device)
1299f39b948dSHuang Shijie {
1300f39b948dSHuang Shijie 	int ret;
1301f39b948dSHuang Shijie 
1302f39b948dSHuang Shijie 	ret = dma_async_device_register(device);
1303a1beaa50SAndy Shevchenko 	if (ret)
1304f39b948dSHuang Shijie 		return ret;
1305a1beaa50SAndy Shevchenko 
130691e78b25SAndy Shevchenko 	return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device);
1307f39b948dSHuang Shijie }
1308f39b948dSHuang Shijie EXPORT_SYMBOL(dmaenginem_async_device_register);
1309f39b948dSHuang Shijie 
131045c463aeSDan Williams struct dmaengine_unmap_pool {
131145c463aeSDan Williams 	struct kmem_cache *cache;
131245c463aeSDan Williams 	const char *name;
131345c463aeSDan Williams 	mempool_t *pool;
131445c463aeSDan Williams 	size_t size;
131545c463aeSDan Williams };
131645c463aeSDan Williams 
131745c463aeSDan Williams #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
131845c463aeSDan Williams static struct dmaengine_unmap_pool unmap_pool[] = {
131945c463aeSDan Williams 	__UNMAP_POOL(2),
13203cc377b9SDan Williams 	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
132145c463aeSDan Williams 	__UNMAP_POOL(16),
132245c463aeSDan Williams 	__UNMAP_POOL(128),
132345c463aeSDan Williams 	__UNMAP_POOL(256),
132445c463aeSDan Williams 	#endif
132545c463aeSDan Williams };
132645c463aeSDan Williams 
__get_unmap_pool(int nr)132745c463aeSDan Williams static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
13287405f74bSDan Williams {
132945c463aeSDan Williams 	int order = get_count_order(nr);
13307405f74bSDan Williams 
133145c463aeSDan Williams 	switch (order) {
133245c463aeSDan Williams 	case 0 ... 1:
133345c463aeSDan Williams 		return &unmap_pool[0];
133423f963e9SMatthias Kaehlcke #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
133545c463aeSDan Williams 	case 2 ... 4:
133645c463aeSDan Williams 		return &unmap_pool[1];
133745c463aeSDan Williams 	case 5 ... 7:
133845c463aeSDan Williams 		return &unmap_pool[2];
133945c463aeSDan Williams 	case 8:
134045c463aeSDan Williams 		return &unmap_pool[3];
134123f963e9SMatthias Kaehlcke #endif
134245c463aeSDan Williams 	default:
134345c463aeSDan Williams 		BUG();
134445c463aeSDan Williams 		return NULL;
134545c463aeSDan Williams 	}
134645c463aeSDan Williams }
13470036731cSDan Williams 
dmaengine_unmap(struct kref * kref)134845c463aeSDan Williams static void dmaengine_unmap(struct kref *kref)
134945c463aeSDan Williams {
135045c463aeSDan Williams 	struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
135145c463aeSDan Williams 	struct device *dev = unmap->dev;
135245c463aeSDan Williams 	int cnt, i;
135345c463aeSDan Williams 
135445c463aeSDan Williams 	cnt = unmap->to_cnt;
135545c463aeSDan Williams 	for (i = 0; i < cnt; i++)
135645c463aeSDan Williams 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
135745c463aeSDan Williams 			       DMA_TO_DEVICE);
135845c463aeSDan Williams 	cnt += unmap->from_cnt;
135945c463aeSDan Williams 	for (; i < cnt; i++)
136045c463aeSDan Williams 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
136145c463aeSDan Williams 			       DMA_FROM_DEVICE);
136245c463aeSDan Williams 	cnt += unmap->bidi_cnt;
13637476bd79SDan Williams 	for (; i < cnt; i++) {
13647476bd79SDan Williams 		if (unmap->addr[i] == 0)
13657476bd79SDan Williams 			continue;
136645c463aeSDan Williams 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
136745c463aeSDan Williams 			       DMA_BIDIRECTIONAL);
13687476bd79SDan Williams 	}
1369c1f43dd9SXuelin Shi 	cnt = unmap->map_cnt;
137045c463aeSDan Williams 	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
137145c463aeSDan Williams }
137245c463aeSDan Williams 
dmaengine_unmap_put(struct dmaengine_unmap_data * unmap)137345c463aeSDan Williams void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
137445c463aeSDan Williams {
137545c463aeSDan Williams 	if (unmap)
137645c463aeSDan Williams 		kref_put(&unmap->kref, dmaengine_unmap);
137745c463aeSDan Williams }
137845c463aeSDan Williams EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
137945c463aeSDan Williams 
dmaengine_destroy_unmap_pool(void)138045c463aeSDan Williams static void dmaengine_destroy_unmap_pool(void)
138145c463aeSDan Williams {
138245c463aeSDan Williams 	int i;
138345c463aeSDan Williams 
138445c463aeSDan Williams 	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
138545c463aeSDan Williams 		struct dmaengine_unmap_pool *p = &unmap_pool[i];
138645c463aeSDan Williams 
138745c463aeSDan Williams 		mempool_destroy(p->pool);
138845c463aeSDan Williams 		p->pool = NULL;
138945c463aeSDan Williams 		kmem_cache_destroy(p->cache);
139045c463aeSDan Williams 		p->cache = NULL;
139145c463aeSDan Williams 	}
139245c463aeSDan Williams }
139345c463aeSDan Williams 
dmaengine_init_unmap_pool(void)139445c463aeSDan Williams static int __init dmaengine_init_unmap_pool(void)
139545c463aeSDan Williams {
139645c463aeSDan Williams 	int i;
139745c463aeSDan Williams 
139845c463aeSDan Williams 	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
139945c463aeSDan Williams 		struct dmaengine_unmap_pool *p = &unmap_pool[i];
140045c463aeSDan Williams 		size_t size;
140145c463aeSDan Williams 
140245c463aeSDan Williams 		size = sizeof(struct dmaengine_unmap_data) +
140345c463aeSDan Williams 		       sizeof(dma_addr_t) * p->size;
140445c463aeSDan Williams 
140545c463aeSDan Williams 		p->cache = kmem_cache_create(p->name, size, 0,
140645c463aeSDan Williams 					     SLAB_HWCACHE_ALIGN, NULL);
140745c463aeSDan Williams 		if (!p->cache)
140845c463aeSDan Williams 			break;
140945c463aeSDan Williams 		p->pool = mempool_create_slab_pool(1, p->cache);
141045c463aeSDan Williams 		if (!p->pool)
141145c463aeSDan Williams 			break;
141245c463aeSDan Williams 	}
141345c463aeSDan Williams 
141445c463aeSDan Williams 	if (i == ARRAY_SIZE(unmap_pool))
141545c463aeSDan Williams 		return 0;
141645c463aeSDan Williams 
141745c463aeSDan Williams 	dmaengine_destroy_unmap_pool();
14187405f74bSDan Williams 	return -ENOMEM;
14190036731cSDan Williams }
14207405f74bSDan Williams 
142189716462SDan Williams struct dmaengine_unmap_data *
dmaengine_get_unmap_data(struct device * dev,int nr,gfp_t flags)142245c463aeSDan Williams dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
14237405f74bSDan Williams {
142445c463aeSDan Williams 	struct dmaengine_unmap_data *unmap;
14257405f74bSDan Williams 
142645c463aeSDan Williams 	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
142745c463aeSDan Williams 	if (!unmap)
142845c463aeSDan Williams 		return NULL;
14290036731cSDan Williams 
143045c463aeSDan Williams 	memset(unmap, 0, sizeof(*unmap));
143145c463aeSDan Williams 	kref_init(&unmap->kref);
143245c463aeSDan Williams 	unmap->dev = dev;
1433c1f43dd9SXuelin Shi 	unmap->map_cnt = nr;
143445c463aeSDan Williams 
143545c463aeSDan Williams 	return unmap;
14360036731cSDan Williams }
143789716462SDan Williams EXPORT_SYMBOL(dmaengine_get_unmap_data);
14387405f74bSDan Williams 
dma_async_tx_descriptor_init(struct dma_async_tx_descriptor * tx,struct dma_chan * chan)14397405f74bSDan Williams void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
14407405f74bSDan Williams 	struct dma_chan *chan)
14417405f74bSDan Williams {
14427405f74bSDan Williams 	tx->chan = chan;
14435fc6d897SDan Williams 	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
14447405f74bSDan Williams 	spin_lock_init(&tx->lock);
1445caa20d97SDan Williams 	#endif
14467405f74bSDan Williams }
14477405f74bSDan Williams EXPORT_SYMBOL(dma_async_tx_descriptor_init);
14487405f74bSDan Williams 
desc_check_and_set_metadata_mode(struct dma_async_tx_descriptor * desc,enum dma_desc_metadata_mode mode)14494db8fd32SPeter Ujfalusi static inline int desc_check_and_set_metadata_mode(
14504db8fd32SPeter Ujfalusi 	struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
14514db8fd32SPeter Ujfalusi {
14524db8fd32SPeter Ujfalusi 	/* Make sure that the metadata mode is not mixed */
14534db8fd32SPeter Ujfalusi 	if (!desc->desc_metadata_mode) {
14544db8fd32SPeter Ujfalusi 		if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
14554db8fd32SPeter Ujfalusi 			desc->desc_metadata_mode = mode;
14564db8fd32SPeter Ujfalusi 		else
14574db8fd32SPeter Ujfalusi 			return -ENOTSUPP;
14584db8fd32SPeter Ujfalusi 	} else if (desc->desc_metadata_mode != mode) {
14594db8fd32SPeter Ujfalusi 		return -EINVAL;
14604db8fd32SPeter Ujfalusi 	}
14614db8fd32SPeter Ujfalusi 
14624db8fd32SPeter Ujfalusi 	return 0;
14634db8fd32SPeter Ujfalusi }
14644db8fd32SPeter Ujfalusi 
dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor * desc,void * data,size_t len)14654db8fd32SPeter Ujfalusi int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
14664db8fd32SPeter Ujfalusi 				   void *data, size_t len)
14674db8fd32SPeter Ujfalusi {
14684db8fd32SPeter Ujfalusi 	int ret;
14694db8fd32SPeter Ujfalusi 
14704db8fd32SPeter Ujfalusi 	if (!desc)
14714db8fd32SPeter Ujfalusi 		return -EINVAL;
14724db8fd32SPeter Ujfalusi 
14734db8fd32SPeter Ujfalusi 	ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
14744db8fd32SPeter Ujfalusi 	if (ret)
14754db8fd32SPeter Ujfalusi 		return ret;
14764db8fd32SPeter Ujfalusi 
14774db8fd32SPeter Ujfalusi 	if (!desc->metadata_ops || !desc->metadata_ops->attach)
14784db8fd32SPeter Ujfalusi 		return -ENOTSUPP;
14794db8fd32SPeter Ujfalusi 
14804db8fd32SPeter Ujfalusi 	return desc->metadata_ops->attach(desc, data, len);
14814db8fd32SPeter Ujfalusi }
14824db8fd32SPeter Ujfalusi EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
14834db8fd32SPeter Ujfalusi 
dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor * desc,size_t * payload_len,size_t * max_len)14844db8fd32SPeter Ujfalusi void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
14854db8fd32SPeter Ujfalusi 				      size_t *payload_len, size_t *max_len)
14864db8fd32SPeter Ujfalusi {
14874db8fd32SPeter Ujfalusi 	int ret;
14884db8fd32SPeter Ujfalusi 
14894db8fd32SPeter Ujfalusi 	if (!desc)
14904db8fd32SPeter Ujfalusi 		return ERR_PTR(-EINVAL);
14914db8fd32SPeter Ujfalusi 
14924db8fd32SPeter Ujfalusi 	ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
14934db8fd32SPeter Ujfalusi 	if (ret)
14944db8fd32SPeter Ujfalusi 		return ERR_PTR(ret);
14954db8fd32SPeter Ujfalusi 
14964db8fd32SPeter Ujfalusi 	if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
14974db8fd32SPeter Ujfalusi 		return ERR_PTR(-ENOTSUPP);
14984db8fd32SPeter Ujfalusi 
14994db8fd32SPeter Ujfalusi 	return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
15004db8fd32SPeter Ujfalusi }
15014db8fd32SPeter Ujfalusi EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
15024db8fd32SPeter Ujfalusi 
dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor * desc,size_t payload_len)15034db8fd32SPeter Ujfalusi int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
15044db8fd32SPeter Ujfalusi 				    size_t payload_len)
15054db8fd32SPeter Ujfalusi {
15064db8fd32SPeter Ujfalusi 	int ret;
15074db8fd32SPeter Ujfalusi 
15084db8fd32SPeter Ujfalusi 	if (!desc)
15094db8fd32SPeter Ujfalusi 		return -EINVAL;
15104db8fd32SPeter Ujfalusi 
15114db8fd32SPeter Ujfalusi 	ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
15124db8fd32SPeter Ujfalusi 	if (ret)
15134db8fd32SPeter Ujfalusi 		return ret;
15144db8fd32SPeter Ujfalusi 
15154db8fd32SPeter Ujfalusi 	if (!desc->metadata_ops || !desc->metadata_ops->set_len)
15164db8fd32SPeter Ujfalusi 		return -ENOTSUPP;
15174db8fd32SPeter Ujfalusi 
15184db8fd32SPeter Ujfalusi 	return desc->metadata_ops->set_len(desc, payload_len);
15194db8fd32SPeter Ujfalusi }
15204db8fd32SPeter Ujfalusi EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
15214db8fd32SPeter Ujfalusi 
15229872e23dSAndy Shevchenko /**
15239872e23dSAndy Shevchenko  * dma_wait_for_async_tx - spin wait for a transaction to complete
152407f2211eSDan Williams  * @tx:		in-flight transaction to wait on
152507f2211eSDan Williams  */
152607f2211eSDan Williams enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor * tx)152707f2211eSDan Williams dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
152807f2211eSDan Williams {
152995475e57SDan Williams 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
153007f2211eSDan Williams 
153107f2211eSDan Williams 	if (!tx)
1532adfedd9aSVinod Koul 		return DMA_COMPLETE;
153307f2211eSDan Williams 
153495475e57SDan Williams 	while (tx->cookie == -EBUSY) {
153595475e57SDan Williams 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1536ef859312SJarkko Nikula 			dev_err(tx->chan->device->dev,
1537ef859312SJarkko Nikula 				"%s timeout waiting for descriptor submission\n",
153895475e57SDan Williams 				__func__);
153995475e57SDan Williams 			return DMA_ERROR;
154095475e57SDan Williams 		}
154107f2211eSDan Williams 		cpu_relax();
154295475e57SDan Williams 	}
154395475e57SDan Williams 	return dma_sync_wait(tx->chan, tx->cookie);
154407f2211eSDan Williams }
154507f2211eSDan Williams EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
154607f2211eSDan Williams 
15479872e23dSAndy Shevchenko /**
15489872e23dSAndy Shevchenko  * dma_run_dependencies - process dependent operations on the target channel
154907f2211eSDan Williams  * @tx:		transaction with dependencies
15509872e23dSAndy Shevchenko  *
15519872e23dSAndy Shevchenko  * Helper routine for DMA drivers to process (start) dependent operations
15529872e23dSAndy Shevchenko  * on their target channel.
155307f2211eSDan Williams  */
dma_run_dependencies(struct dma_async_tx_descriptor * tx)155407f2211eSDan Williams void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
155507f2211eSDan Williams {
1556caa20d97SDan Williams 	struct dma_async_tx_descriptor *dep = txd_next(tx);
155707f2211eSDan Williams 	struct dma_async_tx_descriptor *dep_next;
155807f2211eSDan Williams 	struct dma_chan *chan;
155907f2211eSDan Williams 
156007f2211eSDan Williams 	if (!dep)
156107f2211eSDan Williams 		return;
156207f2211eSDan Williams 
1563dd59b853SYuri Tikhonov 	/* we'll submit tx->next now, so clear the link */
1564caa20d97SDan Williams 	txd_clear_next(tx);
156507f2211eSDan Williams 	chan = dep->chan;
156607f2211eSDan Williams 
156707f2211eSDan Williams 	/* keep submitting up until a channel switch is detected
156807f2211eSDan Williams 	 * in that case we will be called again as a result of
156907f2211eSDan Williams 	 * processing the interrupt from async_tx_channel_switch
157007f2211eSDan Williams 	 */
157107f2211eSDan Williams 	for (; dep; dep = dep_next) {
1572caa20d97SDan Williams 		txd_lock(dep);
1573caa20d97SDan Williams 		txd_clear_parent(dep);
1574caa20d97SDan Williams 		dep_next = txd_next(dep);
157507f2211eSDan Williams 		if (dep_next && dep_next->chan == chan)
1576caa20d97SDan Williams 			txd_clear_next(dep); /* ->next will be submitted */
157707f2211eSDan Williams 		else
157807f2211eSDan Williams 			dep_next = NULL; /* submit current dep and terminate */
1579caa20d97SDan Williams 		txd_unlock(dep);
158007f2211eSDan Williams 
158107f2211eSDan Williams 		dep->tx_submit(dep);
158207f2211eSDan Williams 	}
158307f2211eSDan Williams 
158407f2211eSDan Williams 	chan->device->device_issue_pending(chan);
158507f2211eSDan Williams }
158607f2211eSDan Williams EXPORT_SYMBOL_GPL(dma_run_dependencies);
158707f2211eSDan Williams 
dma_bus_init(void)1588c13c8260SChris Leech static int __init dma_bus_init(void)
1589c13c8260SChris Leech {
159045c463aeSDan Williams 	int err = dmaengine_init_unmap_pool();
159145c463aeSDan Williams 
159245c463aeSDan Williams 	if (err)
159345c463aeSDan Williams 		return err;
1594e937cc1dSPeter Ujfalusi 
1595e937cc1dSPeter Ujfalusi 	err = class_register(&dma_devclass);
1596e937cc1dSPeter Ujfalusi 	if (!err)
1597e937cc1dSPeter Ujfalusi 		dmaengine_debugfs_init();
1598e937cc1dSPeter Ujfalusi 
1599e937cc1dSPeter Ujfalusi 	return err;
1600c13c8260SChris Leech }
1601652afc27SDan Williams arch_initcall(dma_bus_init);
1602