xref: /openbmc/linux/drivers/dma/dmaengine.c (revision b34081f1)
1 /*
2  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59
16  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called COPYING.
20  */
21 
22 /*
23  * This code implements the DMA subsystem. It provides a HW-neutral interface
24  * for other kernel code to use asynchronous memory copy capabilities,
25  * if present, and allows different HW DMA drivers to register as providing
26  * this capability.
27  *
28  * Due to the fact we are accelerating what is already a relatively fast
29  * operation, the code goes to great lengths to avoid additional overhead,
30  * such as locking.
31  *
32  * LOCKING:
33  *
34  * The subsystem keeps a global list of dma_device structs it is protected by a
35  * mutex, dma_list_mutex.
36  *
37  * A subsystem can get access to a channel by calling dmaengine_get() followed
38  * by dma_find_channel(), or if it has need for an exclusive channel it can call
39  * dma_request_channel().  Once a channel is allocated a reference is taken
40  * against its corresponding driver to disable removal.
41  *
42  * Each device has a channels list, which runs unlocked but is never modified
43  * once the device is registered, it's just setup by the driver.
44  *
45  * See Documentation/dmaengine.txt for more details
46  */
47 
48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 
50 #include <linux/dma-mapping.h>
51 #include <linux/init.h>
52 #include <linux/module.h>
53 #include <linux/mm.h>
54 #include <linux/device.h>
55 #include <linux/dmaengine.h>
56 #include <linux/hardirq.h>
57 #include <linux/spinlock.h>
58 #include <linux/percpu.h>
59 #include <linux/rcupdate.h>
60 #include <linux/mutex.h>
61 #include <linux/jiffies.h>
62 #include <linux/rculist.h>
63 #include <linux/idr.h>
64 #include <linux/slab.h>
65 #include <linux/acpi.h>
66 #include <linux/acpi_dma.h>
67 #include <linux/of_dma.h>
68 
69 static DEFINE_MUTEX(dma_list_mutex);
70 static DEFINE_IDR(dma_idr);
71 static LIST_HEAD(dma_device_list);
72 static long dmaengine_ref_count;
73 
74 /* --- sysfs implementation --- */
75 
76 /**
77  * dev_to_dma_chan - convert a device pointer to the its sysfs container object
78  * @dev - device node
79  *
80  * Must be called under dma_list_mutex
81  */
82 static struct dma_chan *dev_to_dma_chan(struct device *dev)
83 {
84 	struct dma_chan_dev *chan_dev;
85 
86 	chan_dev = container_of(dev, typeof(*chan_dev), device);
87 	return chan_dev->chan;
88 }
89 
90 static ssize_t memcpy_count_show(struct device *dev,
91 				 struct device_attribute *attr, char *buf)
92 {
93 	struct dma_chan *chan;
94 	unsigned long count = 0;
95 	int i;
96 	int err;
97 
98 	mutex_lock(&dma_list_mutex);
99 	chan = dev_to_dma_chan(dev);
100 	if (chan) {
101 		for_each_possible_cpu(i)
102 			count += per_cpu_ptr(chan->local, i)->memcpy_count;
103 		err = sprintf(buf, "%lu\n", count);
104 	} else
105 		err = -ENODEV;
106 	mutex_unlock(&dma_list_mutex);
107 
108 	return err;
109 }
110 static DEVICE_ATTR_RO(memcpy_count);
111 
112 static ssize_t bytes_transferred_show(struct device *dev,
113 				      struct device_attribute *attr, char *buf)
114 {
115 	struct dma_chan *chan;
116 	unsigned long count = 0;
117 	int i;
118 	int err;
119 
120 	mutex_lock(&dma_list_mutex);
121 	chan = dev_to_dma_chan(dev);
122 	if (chan) {
123 		for_each_possible_cpu(i)
124 			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
125 		err = sprintf(buf, "%lu\n", count);
126 	} else
127 		err = -ENODEV;
128 	mutex_unlock(&dma_list_mutex);
129 
130 	return err;
131 }
132 static DEVICE_ATTR_RO(bytes_transferred);
133 
134 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
135 			   char *buf)
136 {
137 	struct dma_chan *chan;
138 	int err;
139 
140 	mutex_lock(&dma_list_mutex);
141 	chan = dev_to_dma_chan(dev);
142 	if (chan)
143 		err = sprintf(buf, "%d\n", chan->client_count);
144 	else
145 		err = -ENODEV;
146 	mutex_unlock(&dma_list_mutex);
147 
148 	return err;
149 }
150 static DEVICE_ATTR_RO(in_use);
151 
152 static struct attribute *dma_dev_attrs[] = {
153 	&dev_attr_memcpy_count.attr,
154 	&dev_attr_bytes_transferred.attr,
155 	&dev_attr_in_use.attr,
156 	NULL,
157 };
158 ATTRIBUTE_GROUPS(dma_dev);
159 
160 static void chan_dev_release(struct device *dev)
161 {
162 	struct dma_chan_dev *chan_dev;
163 
164 	chan_dev = container_of(dev, typeof(*chan_dev), device);
165 	if (atomic_dec_and_test(chan_dev->idr_ref)) {
166 		mutex_lock(&dma_list_mutex);
167 		idr_remove(&dma_idr, chan_dev->dev_id);
168 		mutex_unlock(&dma_list_mutex);
169 		kfree(chan_dev->idr_ref);
170 	}
171 	kfree(chan_dev);
172 }
173 
174 static struct class dma_devclass = {
175 	.name		= "dma",
176 	.dev_groups	= dma_dev_groups,
177 	.dev_release	= chan_dev_release,
178 };
179 
180 /* --- client and device registration --- */
181 
182 #define dma_device_satisfies_mask(device, mask) \
183 	__dma_device_satisfies_mask((device), &(mask))
184 static int
185 __dma_device_satisfies_mask(struct dma_device *device,
186 			    const dma_cap_mask_t *want)
187 {
188 	dma_cap_mask_t has;
189 
190 	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
191 		DMA_TX_TYPE_END);
192 	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
193 }
194 
195 static struct module *dma_chan_to_owner(struct dma_chan *chan)
196 {
197 	return chan->device->dev->driver->owner;
198 }
199 
200 /**
201  * balance_ref_count - catch up the channel reference count
202  * @chan - channel to balance ->client_count versus dmaengine_ref_count
203  *
204  * balance_ref_count must be called under dma_list_mutex
205  */
206 static void balance_ref_count(struct dma_chan *chan)
207 {
208 	struct module *owner = dma_chan_to_owner(chan);
209 
210 	while (chan->client_count < dmaengine_ref_count) {
211 		__module_get(owner);
212 		chan->client_count++;
213 	}
214 }
215 
216 /**
217  * dma_chan_get - try to grab a dma channel's parent driver module
218  * @chan - channel to grab
219  *
220  * Must be called under dma_list_mutex
221  */
222 static int dma_chan_get(struct dma_chan *chan)
223 {
224 	int err = -ENODEV;
225 	struct module *owner = dma_chan_to_owner(chan);
226 
227 	if (chan->client_count) {
228 		__module_get(owner);
229 		err = 0;
230 	} else if (try_module_get(owner))
231 		err = 0;
232 
233 	if (err == 0)
234 		chan->client_count++;
235 
236 	/* allocate upon first client reference */
237 	if (chan->client_count == 1 && err == 0) {
238 		int desc_cnt = chan->device->device_alloc_chan_resources(chan);
239 
240 		if (desc_cnt < 0) {
241 			err = desc_cnt;
242 			chan->client_count = 0;
243 			module_put(owner);
244 		} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
245 			balance_ref_count(chan);
246 	}
247 
248 	return err;
249 }
250 
251 /**
252  * dma_chan_put - drop a reference to a dma channel's parent driver module
253  * @chan - channel to release
254  *
255  * Must be called under dma_list_mutex
256  */
257 static void dma_chan_put(struct dma_chan *chan)
258 {
259 	if (!chan->client_count)
260 		return; /* this channel failed alloc_chan_resources */
261 	chan->client_count--;
262 	module_put(dma_chan_to_owner(chan));
263 	if (chan->client_count == 0)
264 		chan->device->device_free_chan_resources(chan);
265 }
266 
267 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
268 {
269 	enum dma_status status;
270 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
271 
272 	dma_async_issue_pending(chan);
273 	do {
274 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
275 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
276 			pr_err("%s: timeout!\n", __func__);
277 			return DMA_ERROR;
278 		}
279 		if (status != DMA_IN_PROGRESS)
280 			break;
281 		cpu_relax();
282 	} while (1);
283 
284 	return status;
285 }
286 EXPORT_SYMBOL(dma_sync_wait);
287 
288 /**
289  * dma_cap_mask_all - enable iteration over all operation types
290  */
291 static dma_cap_mask_t dma_cap_mask_all;
292 
293 /**
294  * dma_chan_tbl_ent - tracks channel allocations per core/operation
295  * @chan - associated channel for this entry
296  */
297 struct dma_chan_tbl_ent {
298 	struct dma_chan *chan;
299 };
300 
301 /**
302  * channel_table - percpu lookup table for memory-to-memory offload providers
303  */
304 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
305 
306 static int __init dma_channel_table_init(void)
307 {
308 	enum dma_transaction_type cap;
309 	int err = 0;
310 
311 	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
312 
313 	/* 'interrupt', 'private', and 'slave' are channel capabilities,
314 	 * but are not associated with an operation so they do not need
315 	 * an entry in the channel_table
316 	 */
317 	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
318 	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
319 	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
320 
321 	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
322 		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
323 		if (!channel_table[cap]) {
324 			err = -ENOMEM;
325 			break;
326 		}
327 	}
328 
329 	if (err) {
330 		pr_err("initialization failure\n");
331 		for_each_dma_cap_mask(cap, dma_cap_mask_all)
332 			if (channel_table[cap])
333 				free_percpu(channel_table[cap]);
334 	}
335 
336 	return err;
337 }
338 arch_initcall(dma_channel_table_init);
339 
340 /**
341  * dma_find_channel - find a channel to carry out the operation
342  * @tx_type: transaction type
343  */
344 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
345 {
346 	return this_cpu_read(channel_table[tx_type]->chan);
347 }
348 EXPORT_SYMBOL(dma_find_channel);
349 
350 /*
351  * net_dma_find_channel - find a channel for net_dma
352  * net_dma has alignment requirements
353  */
354 struct dma_chan *net_dma_find_channel(void)
355 {
356 	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
357 	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
358 		return NULL;
359 
360 	return chan;
361 }
362 EXPORT_SYMBOL(net_dma_find_channel);
363 
364 /**
365  * dma_issue_pending_all - flush all pending operations across all channels
366  */
367 void dma_issue_pending_all(void)
368 {
369 	struct dma_device *device;
370 	struct dma_chan *chan;
371 
372 	rcu_read_lock();
373 	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
374 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
375 			continue;
376 		list_for_each_entry(chan, &device->channels, device_node)
377 			if (chan->client_count)
378 				device->device_issue_pending(chan);
379 	}
380 	rcu_read_unlock();
381 }
382 EXPORT_SYMBOL(dma_issue_pending_all);
383 
384 /**
385  * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
386  */
387 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
388 {
389 	int node = dev_to_node(chan->device->dev);
390 	return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
391 }
392 
393 /**
394  * min_chan - returns the channel with min count and in the same numa-node as the cpu
395  * @cap: capability to match
396  * @cpu: cpu index which the channel should be close to
397  *
398  * If some channels are close to the given cpu, the one with the lowest
399  * reference count is returned. Otherwise, cpu is ignored and only the
400  * reference count is taken into account.
401  * Must be called under dma_list_mutex.
402  */
403 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
404 {
405 	struct dma_device *device;
406 	struct dma_chan *chan;
407 	struct dma_chan *min = NULL;
408 	struct dma_chan *localmin = NULL;
409 
410 	list_for_each_entry(device, &dma_device_list, global_node) {
411 		if (!dma_has_cap(cap, device->cap_mask) ||
412 		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
413 			continue;
414 		list_for_each_entry(chan, &device->channels, device_node) {
415 			if (!chan->client_count)
416 				continue;
417 			if (!min || chan->table_count < min->table_count)
418 				min = chan;
419 
420 			if (dma_chan_is_local(chan, cpu))
421 				if (!localmin ||
422 				    chan->table_count < localmin->table_count)
423 					localmin = chan;
424 		}
425 	}
426 
427 	chan = localmin ? localmin : min;
428 
429 	if (chan)
430 		chan->table_count++;
431 
432 	return chan;
433 }
434 
435 /**
436  * dma_channel_rebalance - redistribute the available channels
437  *
438  * Optimize for cpu isolation (each cpu gets a dedicated channel for an
439  * operation type) in the SMP case,  and operation isolation (avoid
440  * multi-tasking channels) in the non-SMP case.  Must be called under
441  * dma_list_mutex.
442  */
443 static void dma_channel_rebalance(void)
444 {
445 	struct dma_chan *chan;
446 	struct dma_device *device;
447 	int cpu;
448 	int cap;
449 
450 	/* undo the last distribution */
451 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
452 		for_each_possible_cpu(cpu)
453 			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
454 
455 	list_for_each_entry(device, &dma_device_list, global_node) {
456 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
457 			continue;
458 		list_for_each_entry(chan, &device->channels, device_node)
459 			chan->table_count = 0;
460 	}
461 
462 	/* don't populate the channel_table if no clients are available */
463 	if (!dmaengine_ref_count)
464 		return;
465 
466 	/* redistribute available channels */
467 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
468 		for_each_online_cpu(cpu) {
469 			chan = min_chan(cap, cpu);
470 			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
471 		}
472 }
473 
474 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
475 					  struct dma_device *dev,
476 					  dma_filter_fn fn, void *fn_param)
477 {
478 	struct dma_chan *chan;
479 
480 	if (!__dma_device_satisfies_mask(dev, mask)) {
481 		pr_debug("%s: wrong capabilities\n", __func__);
482 		return NULL;
483 	}
484 	/* devices with multiple channels need special handling as we need to
485 	 * ensure that all channels are either private or public.
486 	 */
487 	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
488 		list_for_each_entry(chan, &dev->channels, device_node) {
489 			/* some channels are already publicly allocated */
490 			if (chan->client_count)
491 				return NULL;
492 		}
493 
494 	list_for_each_entry(chan, &dev->channels, device_node) {
495 		if (chan->client_count) {
496 			pr_debug("%s: %s busy\n",
497 				 __func__, dma_chan_name(chan));
498 			continue;
499 		}
500 		if (fn && !fn(chan, fn_param)) {
501 			pr_debug("%s: %s filter said false\n",
502 				 __func__, dma_chan_name(chan));
503 			continue;
504 		}
505 		return chan;
506 	}
507 
508 	return NULL;
509 }
510 
511 /**
512  * dma_request_slave_channel - try to get specific channel exclusively
513  * @chan: target channel
514  */
515 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
516 {
517 	int err = -EBUSY;
518 
519 	/* lock against __dma_request_channel */
520 	mutex_lock(&dma_list_mutex);
521 
522 	if (chan->client_count == 0) {
523 		err = dma_chan_get(chan);
524 		if (err)
525 			pr_debug("%s: failed to get %s: (%d)\n",
526 				__func__, dma_chan_name(chan), err);
527 	} else
528 		chan = NULL;
529 
530 	mutex_unlock(&dma_list_mutex);
531 
532 
533 	return chan;
534 }
535 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
536 
537 /**
538  * __dma_request_channel - try to allocate an exclusive channel
539  * @mask: capabilities that the channel must satisfy
540  * @fn: optional callback to disposition available channels
541  * @fn_param: opaque parameter to pass to dma_filter_fn
542  */
543 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
544 				       dma_filter_fn fn, void *fn_param)
545 {
546 	struct dma_device *device, *_d;
547 	struct dma_chan *chan = NULL;
548 	int err;
549 
550 	/* Find a channel */
551 	mutex_lock(&dma_list_mutex);
552 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
553 		chan = private_candidate(mask, device, fn, fn_param);
554 		if (chan) {
555 			/* Found a suitable channel, try to grab, prep, and
556 			 * return it.  We first set DMA_PRIVATE to disable
557 			 * balance_ref_count as this channel will not be
558 			 * published in the general-purpose allocator
559 			 */
560 			dma_cap_set(DMA_PRIVATE, device->cap_mask);
561 			device->privatecnt++;
562 			err = dma_chan_get(chan);
563 
564 			if (err == -ENODEV) {
565 				pr_debug("%s: %s module removed\n",
566 					 __func__, dma_chan_name(chan));
567 				list_del_rcu(&device->global_node);
568 			} else if (err)
569 				pr_debug("%s: failed to get %s: (%d)\n",
570 					 __func__, dma_chan_name(chan), err);
571 			else
572 				break;
573 			if (--device->privatecnt == 0)
574 				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
575 			chan = NULL;
576 		}
577 	}
578 	mutex_unlock(&dma_list_mutex);
579 
580 	pr_debug("%s: %s (%s)\n",
581 		 __func__,
582 		 chan ? "success" : "fail",
583 		 chan ? dma_chan_name(chan) : NULL);
584 
585 	return chan;
586 }
587 EXPORT_SYMBOL_GPL(__dma_request_channel);
588 
589 /**
590  * dma_request_slave_channel - try to allocate an exclusive slave channel
591  * @dev:	pointer to client device structure
592  * @name:	slave channel name
593  */
594 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
595 {
596 	/* If device-tree is present get slave info from here */
597 	if (dev->of_node)
598 		return of_dma_request_slave_channel(dev->of_node, name);
599 
600 	/* If device was enumerated by ACPI get slave info from here */
601 	if (ACPI_HANDLE(dev))
602 		return acpi_dma_request_slave_chan_by_name(dev, name);
603 
604 	return NULL;
605 }
606 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
607 
608 void dma_release_channel(struct dma_chan *chan)
609 {
610 	mutex_lock(&dma_list_mutex);
611 	WARN_ONCE(chan->client_count != 1,
612 		  "chan reference count %d != 1\n", chan->client_count);
613 	dma_chan_put(chan);
614 	/* drop PRIVATE cap enabled by __dma_request_channel() */
615 	if (--chan->device->privatecnt == 0)
616 		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
617 	mutex_unlock(&dma_list_mutex);
618 }
619 EXPORT_SYMBOL_GPL(dma_release_channel);
620 
621 /**
622  * dmaengine_get - register interest in dma_channels
623  */
624 void dmaengine_get(void)
625 {
626 	struct dma_device *device, *_d;
627 	struct dma_chan *chan;
628 	int err;
629 
630 	mutex_lock(&dma_list_mutex);
631 	dmaengine_ref_count++;
632 
633 	/* try to grab channels */
634 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
635 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
636 			continue;
637 		list_for_each_entry(chan, &device->channels, device_node) {
638 			err = dma_chan_get(chan);
639 			if (err == -ENODEV) {
640 				/* module removed before we could use it */
641 				list_del_rcu(&device->global_node);
642 				break;
643 			} else if (err)
644 				pr_debug("%s: failed to get %s: (%d)\n",
645 				       __func__, dma_chan_name(chan), err);
646 		}
647 	}
648 
649 	/* if this is the first reference and there were channels
650 	 * waiting we need to rebalance to get those channels
651 	 * incorporated into the channel table
652 	 */
653 	if (dmaengine_ref_count == 1)
654 		dma_channel_rebalance();
655 	mutex_unlock(&dma_list_mutex);
656 }
657 EXPORT_SYMBOL(dmaengine_get);
658 
659 /**
660  * dmaengine_put - let dma drivers be removed when ref_count == 0
661  */
662 void dmaengine_put(void)
663 {
664 	struct dma_device *device;
665 	struct dma_chan *chan;
666 
667 	mutex_lock(&dma_list_mutex);
668 	dmaengine_ref_count--;
669 	BUG_ON(dmaengine_ref_count < 0);
670 	/* drop channel references */
671 	list_for_each_entry(device, &dma_device_list, global_node) {
672 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
673 			continue;
674 		list_for_each_entry(chan, &device->channels, device_node)
675 			dma_chan_put(chan);
676 	}
677 	mutex_unlock(&dma_list_mutex);
678 }
679 EXPORT_SYMBOL(dmaengine_put);
680 
681 static bool device_has_all_tx_types(struct dma_device *device)
682 {
683 	/* A device that satisfies this test has channels that will never cause
684 	 * an async_tx channel switch event as all possible operation types can
685 	 * be handled.
686 	 */
687 	#ifdef CONFIG_ASYNC_TX_DMA
688 	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
689 		return false;
690 	#endif
691 
692 	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
693 	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
694 		return false;
695 	#endif
696 
697 	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
698 	if (!dma_has_cap(DMA_XOR, device->cap_mask))
699 		return false;
700 
701 	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
702 	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
703 		return false;
704 	#endif
705 	#endif
706 
707 	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
708 	if (!dma_has_cap(DMA_PQ, device->cap_mask))
709 		return false;
710 
711 	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
712 	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
713 		return false;
714 	#endif
715 	#endif
716 
717 	return true;
718 }
719 
720 static int get_dma_id(struct dma_device *device)
721 {
722 	int rc;
723 
724 	mutex_lock(&dma_list_mutex);
725 
726 	rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
727 	if (rc >= 0)
728 		device->dev_id = rc;
729 
730 	mutex_unlock(&dma_list_mutex);
731 	return rc < 0 ? rc : 0;
732 }
733 
734 /**
735  * dma_async_device_register - registers DMA devices found
736  * @device: &dma_device
737  */
738 int dma_async_device_register(struct dma_device *device)
739 {
740 	int chancnt = 0, rc;
741 	struct dma_chan* chan;
742 	atomic_t *idr_ref;
743 
744 	if (!device)
745 		return -ENODEV;
746 
747 	/* validate device routines */
748 	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
749 		!device->device_prep_dma_memcpy);
750 	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
751 		!device->device_prep_dma_xor);
752 	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
753 		!device->device_prep_dma_xor_val);
754 	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
755 		!device->device_prep_dma_pq);
756 	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
757 		!device->device_prep_dma_pq_val);
758 	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
759 		!device->device_prep_dma_interrupt);
760 	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
761 		!device->device_prep_dma_sg);
762 	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
763 		!device->device_prep_dma_cyclic);
764 	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
765 		!device->device_control);
766 	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
767 		!device->device_prep_interleaved_dma);
768 
769 	BUG_ON(!device->device_alloc_chan_resources);
770 	BUG_ON(!device->device_free_chan_resources);
771 	BUG_ON(!device->device_tx_status);
772 	BUG_ON(!device->device_issue_pending);
773 	BUG_ON(!device->dev);
774 
775 	/* note: this only matters in the
776 	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
777 	 */
778 	if (device_has_all_tx_types(device))
779 		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
780 
781 	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
782 	if (!idr_ref)
783 		return -ENOMEM;
784 	rc = get_dma_id(device);
785 	if (rc != 0) {
786 		kfree(idr_ref);
787 		return rc;
788 	}
789 
790 	atomic_set(idr_ref, 0);
791 
792 	/* represent channels in sysfs. Probably want devs too */
793 	list_for_each_entry(chan, &device->channels, device_node) {
794 		rc = -ENOMEM;
795 		chan->local = alloc_percpu(typeof(*chan->local));
796 		if (chan->local == NULL)
797 			goto err_out;
798 		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
799 		if (chan->dev == NULL) {
800 			free_percpu(chan->local);
801 			chan->local = NULL;
802 			goto err_out;
803 		}
804 
805 		chan->chan_id = chancnt++;
806 		chan->dev->device.class = &dma_devclass;
807 		chan->dev->device.parent = device->dev;
808 		chan->dev->chan = chan;
809 		chan->dev->idr_ref = idr_ref;
810 		chan->dev->dev_id = device->dev_id;
811 		atomic_inc(idr_ref);
812 		dev_set_name(&chan->dev->device, "dma%dchan%d",
813 			     device->dev_id, chan->chan_id);
814 
815 		rc = device_register(&chan->dev->device);
816 		if (rc) {
817 			free_percpu(chan->local);
818 			chan->local = NULL;
819 			kfree(chan->dev);
820 			atomic_dec(idr_ref);
821 			goto err_out;
822 		}
823 		chan->client_count = 0;
824 	}
825 	device->chancnt = chancnt;
826 
827 	mutex_lock(&dma_list_mutex);
828 	/* take references on public channels */
829 	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
830 		list_for_each_entry(chan, &device->channels, device_node) {
831 			/* if clients are already waiting for channels we need
832 			 * to take references on their behalf
833 			 */
834 			if (dma_chan_get(chan) == -ENODEV) {
835 				/* note we can only get here for the first
836 				 * channel as the remaining channels are
837 				 * guaranteed to get a reference
838 				 */
839 				rc = -ENODEV;
840 				mutex_unlock(&dma_list_mutex);
841 				goto err_out;
842 			}
843 		}
844 	list_add_tail_rcu(&device->global_node, &dma_device_list);
845 	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
846 		device->privatecnt++;	/* Always private */
847 	dma_channel_rebalance();
848 	mutex_unlock(&dma_list_mutex);
849 
850 	return 0;
851 
852 err_out:
853 	/* if we never registered a channel just release the idr */
854 	if (atomic_read(idr_ref) == 0) {
855 		mutex_lock(&dma_list_mutex);
856 		idr_remove(&dma_idr, device->dev_id);
857 		mutex_unlock(&dma_list_mutex);
858 		kfree(idr_ref);
859 		return rc;
860 	}
861 
862 	list_for_each_entry(chan, &device->channels, device_node) {
863 		if (chan->local == NULL)
864 			continue;
865 		mutex_lock(&dma_list_mutex);
866 		chan->dev->chan = NULL;
867 		mutex_unlock(&dma_list_mutex);
868 		device_unregister(&chan->dev->device);
869 		free_percpu(chan->local);
870 	}
871 	return rc;
872 }
873 EXPORT_SYMBOL(dma_async_device_register);
874 
875 /**
876  * dma_async_device_unregister - unregister a DMA device
877  * @device: &dma_device
878  *
879  * This routine is called by dma driver exit routines, dmaengine holds module
880  * references to prevent it being called while channels are in use.
881  */
882 void dma_async_device_unregister(struct dma_device *device)
883 {
884 	struct dma_chan *chan;
885 
886 	mutex_lock(&dma_list_mutex);
887 	list_del_rcu(&device->global_node);
888 	dma_channel_rebalance();
889 	mutex_unlock(&dma_list_mutex);
890 
891 	list_for_each_entry(chan, &device->channels, device_node) {
892 		WARN_ONCE(chan->client_count,
893 			  "%s called while %d clients hold a reference\n",
894 			  __func__, chan->client_count);
895 		mutex_lock(&dma_list_mutex);
896 		chan->dev->chan = NULL;
897 		mutex_unlock(&dma_list_mutex);
898 		device_unregister(&chan->dev->device);
899 		free_percpu(chan->local);
900 	}
901 }
902 EXPORT_SYMBOL(dma_async_device_unregister);
903 
904 /**
905  * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
906  * @chan: DMA channel to offload copy to
907  * @dest: destination address (virtual)
908  * @src: source address (virtual)
909  * @len: length
910  *
911  * Both @dest and @src must be mappable to a bus address according to the
912  * DMA mapping API rules for streaming mappings.
913  * Both @dest and @src must stay memory resident (kernel memory or locked
914  * user space pages).
915  */
916 dma_cookie_t
917 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
918 			void *src, size_t len)
919 {
920 	struct dma_device *dev = chan->device;
921 	struct dma_async_tx_descriptor *tx;
922 	dma_addr_t dma_dest, dma_src;
923 	dma_cookie_t cookie;
924 	unsigned long flags;
925 
926 	dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
927 	dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
928 	flags = DMA_CTRL_ACK |
929 		DMA_COMPL_SRC_UNMAP_SINGLE |
930 		DMA_COMPL_DEST_UNMAP_SINGLE;
931 	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
932 
933 	if (!tx) {
934 		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
935 		dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
936 		return -ENOMEM;
937 	}
938 
939 	tx->callback = NULL;
940 	cookie = tx->tx_submit(tx);
941 
942 	preempt_disable();
943 	__this_cpu_add(chan->local->bytes_transferred, len);
944 	__this_cpu_inc(chan->local->memcpy_count);
945 	preempt_enable();
946 
947 	return cookie;
948 }
949 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
950 
951 /**
952  * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
953  * @chan: DMA channel to offload copy to
954  * @page: destination page
955  * @offset: offset in page to copy to
956  * @kdata: source address (virtual)
957  * @len: length
958  *
959  * Both @page/@offset and @kdata must be mappable to a bus address according
960  * to the DMA mapping API rules for streaming mappings.
961  * Both @page/@offset and @kdata must stay memory resident (kernel memory or
962  * locked user space pages)
963  */
964 dma_cookie_t
965 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
966 			unsigned int offset, void *kdata, size_t len)
967 {
968 	struct dma_device *dev = chan->device;
969 	struct dma_async_tx_descriptor *tx;
970 	dma_addr_t dma_dest, dma_src;
971 	dma_cookie_t cookie;
972 	unsigned long flags;
973 
974 	dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
975 	dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
976 	flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
977 	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
978 
979 	if (!tx) {
980 		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
981 		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
982 		return -ENOMEM;
983 	}
984 
985 	tx->callback = NULL;
986 	cookie = tx->tx_submit(tx);
987 
988 	preempt_disable();
989 	__this_cpu_add(chan->local->bytes_transferred, len);
990 	__this_cpu_inc(chan->local->memcpy_count);
991 	preempt_enable();
992 
993 	return cookie;
994 }
995 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
996 
997 /**
998  * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
999  * @chan: DMA channel to offload copy to
1000  * @dest_pg: destination page
1001  * @dest_off: offset in page to copy to
1002  * @src_pg: source page
1003  * @src_off: offset in page to copy from
1004  * @len: length
1005  *
1006  * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1007  * address according to the DMA mapping API rules for streaming mappings.
1008  * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1009  * (kernel memory or locked user space pages).
1010  */
1011 dma_cookie_t
1012 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1013 	unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1014 	size_t len)
1015 {
1016 	struct dma_device *dev = chan->device;
1017 	struct dma_async_tx_descriptor *tx;
1018 	dma_addr_t dma_dest, dma_src;
1019 	dma_cookie_t cookie;
1020 	unsigned long flags;
1021 
1022 	dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
1023 	dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
1024 				DMA_FROM_DEVICE);
1025 	flags = DMA_CTRL_ACK;
1026 	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
1027 
1028 	if (!tx) {
1029 		dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
1030 		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
1031 		return -ENOMEM;
1032 	}
1033 
1034 	tx->callback = NULL;
1035 	cookie = tx->tx_submit(tx);
1036 
1037 	preempt_disable();
1038 	__this_cpu_add(chan->local->bytes_transferred, len);
1039 	__this_cpu_inc(chan->local->memcpy_count);
1040 	preempt_enable();
1041 
1042 	return cookie;
1043 }
1044 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1045 
1046 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1047 	struct dma_chan *chan)
1048 {
1049 	tx->chan = chan;
1050 	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1051 	spin_lock_init(&tx->lock);
1052 	#endif
1053 }
1054 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1055 
1056 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1057  * @tx: in-flight transaction to wait on
1058  */
1059 enum dma_status
1060 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1061 {
1062 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1063 
1064 	if (!tx)
1065 		return DMA_SUCCESS;
1066 
1067 	while (tx->cookie == -EBUSY) {
1068 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1069 			pr_err("%s timeout waiting for descriptor submission\n",
1070 			       __func__);
1071 			return DMA_ERROR;
1072 		}
1073 		cpu_relax();
1074 	}
1075 	return dma_sync_wait(tx->chan, tx->cookie);
1076 }
1077 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1078 
1079 /* dma_run_dependencies - helper routine for dma drivers to process
1080  *	(start) dependent operations on their target channel
1081  * @tx: transaction with dependencies
1082  */
1083 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1084 {
1085 	struct dma_async_tx_descriptor *dep = txd_next(tx);
1086 	struct dma_async_tx_descriptor *dep_next;
1087 	struct dma_chan *chan;
1088 
1089 	if (!dep)
1090 		return;
1091 
1092 	/* we'll submit tx->next now, so clear the link */
1093 	txd_clear_next(tx);
1094 	chan = dep->chan;
1095 
1096 	/* keep submitting up until a channel switch is detected
1097 	 * in that case we will be called again as a result of
1098 	 * processing the interrupt from async_tx_channel_switch
1099 	 */
1100 	for (; dep; dep = dep_next) {
1101 		txd_lock(dep);
1102 		txd_clear_parent(dep);
1103 		dep_next = txd_next(dep);
1104 		if (dep_next && dep_next->chan == chan)
1105 			txd_clear_next(dep); /* ->next will be submitted */
1106 		else
1107 			dep_next = NULL; /* submit current dep and terminate */
1108 		txd_unlock(dep);
1109 
1110 		dep->tx_submit(dep);
1111 	}
1112 
1113 	chan->device->device_issue_pending(chan);
1114 }
1115 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1116 
1117 static int __init dma_bus_init(void)
1118 {
1119 	return class_register(&dma_devclass);
1120 }
1121 arch_initcall(dma_bus_init);
1122 
1123 
1124