xref: /openbmc/linux/drivers/dma/dmaengine.c (revision 9b799b78)
1 /*
2  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * The full GNU General Public License is included in this distribution in the
15  * file called COPYING.
16  */
17 
18 /*
19  * This code implements the DMA subsystem. It provides a HW-neutral interface
20  * for other kernel code to use asynchronous memory copy capabilities,
21  * if present, and allows different HW DMA drivers to register as providing
22  * this capability.
23  *
24  * Due to the fact we are accelerating what is already a relatively fast
25  * operation, the code goes to great lengths to avoid additional overhead,
26  * such as locking.
27  *
28  * LOCKING:
29  *
30  * The subsystem keeps a global list of dma_device structs it is protected by a
31  * mutex, dma_list_mutex.
32  *
33  * A subsystem can get access to a channel by calling dmaengine_get() followed
34  * by dma_find_channel(), or if it has need for an exclusive channel it can call
35  * dma_request_channel().  Once a channel is allocated a reference is taken
36  * against its corresponding driver to disable removal.
37  *
38  * Each device has a channels list, which runs unlocked but is never modified
39  * once the device is registered, it's just setup by the driver.
40  *
41  * See Documentation/dmaengine.txt for more details
42  */
43 
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 
46 #include <linux/dma-mapping.h>
47 #include <linux/init.h>
48 #include <linux/module.h>
49 #include <linux/mm.h>
50 #include <linux/device.h>
51 #include <linux/dmaengine.h>
52 #include <linux/hardirq.h>
53 #include <linux/spinlock.h>
54 #include <linux/percpu.h>
55 #include <linux/rcupdate.h>
56 #include <linux/mutex.h>
57 #include <linux/jiffies.h>
58 #include <linux/rculist.h>
59 #include <linux/idr.h>
60 #include <linux/slab.h>
61 #include <linux/acpi.h>
62 #include <linux/acpi_dma.h>
63 #include <linux/of_dma.h>
64 #include <linux/mempool.h>
65 
66 static DEFINE_MUTEX(dma_list_mutex);
67 static DEFINE_IDR(dma_idr);
68 static LIST_HEAD(dma_device_list);
69 static long dmaengine_ref_count;
70 
71 /* --- sysfs implementation --- */
72 
73 /**
74  * dev_to_dma_chan - convert a device pointer to the its sysfs container object
75  * @dev - device node
76  *
77  * Must be called under dma_list_mutex
78  */
79 static struct dma_chan *dev_to_dma_chan(struct device *dev)
80 {
81 	struct dma_chan_dev *chan_dev;
82 
83 	chan_dev = container_of(dev, typeof(*chan_dev), device);
84 	return chan_dev->chan;
85 }
86 
87 static ssize_t memcpy_count_show(struct device *dev,
88 				 struct device_attribute *attr, char *buf)
89 {
90 	struct dma_chan *chan;
91 	unsigned long count = 0;
92 	int i;
93 	int err;
94 
95 	mutex_lock(&dma_list_mutex);
96 	chan = dev_to_dma_chan(dev);
97 	if (chan) {
98 		for_each_possible_cpu(i)
99 			count += per_cpu_ptr(chan->local, i)->memcpy_count;
100 		err = sprintf(buf, "%lu\n", count);
101 	} else
102 		err = -ENODEV;
103 	mutex_unlock(&dma_list_mutex);
104 
105 	return err;
106 }
107 static DEVICE_ATTR_RO(memcpy_count);
108 
109 static ssize_t bytes_transferred_show(struct device *dev,
110 				      struct device_attribute *attr, char *buf)
111 {
112 	struct dma_chan *chan;
113 	unsigned long count = 0;
114 	int i;
115 	int err;
116 
117 	mutex_lock(&dma_list_mutex);
118 	chan = dev_to_dma_chan(dev);
119 	if (chan) {
120 		for_each_possible_cpu(i)
121 			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
122 		err = sprintf(buf, "%lu\n", count);
123 	} else
124 		err = -ENODEV;
125 	mutex_unlock(&dma_list_mutex);
126 
127 	return err;
128 }
129 static DEVICE_ATTR_RO(bytes_transferred);
130 
131 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
132 			   char *buf)
133 {
134 	struct dma_chan *chan;
135 	int err;
136 
137 	mutex_lock(&dma_list_mutex);
138 	chan = dev_to_dma_chan(dev);
139 	if (chan)
140 		err = sprintf(buf, "%d\n", chan->client_count);
141 	else
142 		err = -ENODEV;
143 	mutex_unlock(&dma_list_mutex);
144 
145 	return err;
146 }
147 static DEVICE_ATTR_RO(in_use);
148 
149 static struct attribute *dma_dev_attrs[] = {
150 	&dev_attr_memcpy_count.attr,
151 	&dev_attr_bytes_transferred.attr,
152 	&dev_attr_in_use.attr,
153 	NULL,
154 };
155 ATTRIBUTE_GROUPS(dma_dev);
156 
157 static void chan_dev_release(struct device *dev)
158 {
159 	struct dma_chan_dev *chan_dev;
160 
161 	chan_dev = container_of(dev, typeof(*chan_dev), device);
162 	if (atomic_dec_and_test(chan_dev->idr_ref)) {
163 		mutex_lock(&dma_list_mutex);
164 		idr_remove(&dma_idr, chan_dev->dev_id);
165 		mutex_unlock(&dma_list_mutex);
166 		kfree(chan_dev->idr_ref);
167 	}
168 	kfree(chan_dev);
169 }
170 
171 static struct class dma_devclass = {
172 	.name		= "dma",
173 	.dev_groups	= dma_dev_groups,
174 	.dev_release	= chan_dev_release,
175 };
176 
177 /* --- client and device registration --- */
178 
179 #define dma_device_satisfies_mask(device, mask) \
180 	__dma_device_satisfies_mask((device), &(mask))
181 static int
182 __dma_device_satisfies_mask(struct dma_device *device,
183 			    const dma_cap_mask_t *want)
184 {
185 	dma_cap_mask_t has;
186 
187 	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
188 		DMA_TX_TYPE_END);
189 	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
190 }
191 
192 static struct module *dma_chan_to_owner(struct dma_chan *chan)
193 {
194 	return chan->device->dev->driver->owner;
195 }
196 
197 /**
198  * balance_ref_count - catch up the channel reference count
199  * @chan - channel to balance ->client_count versus dmaengine_ref_count
200  *
201  * balance_ref_count must be called under dma_list_mutex
202  */
203 static void balance_ref_count(struct dma_chan *chan)
204 {
205 	struct module *owner = dma_chan_to_owner(chan);
206 
207 	while (chan->client_count < dmaengine_ref_count) {
208 		__module_get(owner);
209 		chan->client_count++;
210 	}
211 }
212 
213 /**
214  * dma_chan_get - try to grab a dma channel's parent driver module
215  * @chan - channel to grab
216  *
217  * Must be called under dma_list_mutex
218  */
219 static int dma_chan_get(struct dma_chan *chan)
220 {
221 	struct module *owner = dma_chan_to_owner(chan);
222 	int ret;
223 
224 	/* The channel is already in use, update client count */
225 	if (chan->client_count) {
226 		__module_get(owner);
227 		goto out;
228 	}
229 
230 	if (!try_module_get(owner))
231 		return -ENODEV;
232 
233 	/* allocate upon first client reference */
234 	if (chan->device->device_alloc_chan_resources) {
235 		ret = chan->device->device_alloc_chan_resources(chan);
236 		if (ret < 0)
237 			goto err_out;
238 	}
239 
240 	if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
241 		balance_ref_count(chan);
242 
243 out:
244 	chan->client_count++;
245 	return 0;
246 
247 err_out:
248 	module_put(owner);
249 	return ret;
250 }
251 
252 /**
253  * dma_chan_put - drop a reference to a dma channel's parent driver module
254  * @chan - channel to release
255  *
256  * Must be called under dma_list_mutex
257  */
258 static void dma_chan_put(struct dma_chan *chan)
259 {
260 	/* This channel is not in use, bail out */
261 	if (!chan->client_count)
262 		return;
263 
264 	chan->client_count--;
265 	module_put(dma_chan_to_owner(chan));
266 
267 	/* This channel is not in use anymore, free it */
268 	if (!chan->client_count && chan->device->device_free_chan_resources)
269 		chan->device->device_free_chan_resources(chan);
270 }
271 
272 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
273 {
274 	enum dma_status status;
275 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
276 
277 	dma_async_issue_pending(chan);
278 	do {
279 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
280 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
281 			pr_err("%s: timeout!\n", __func__);
282 			return DMA_ERROR;
283 		}
284 		if (status != DMA_IN_PROGRESS)
285 			break;
286 		cpu_relax();
287 	} while (1);
288 
289 	return status;
290 }
291 EXPORT_SYMBOL(dma_sync_wait);
292 
293 /**
294  * dma_cap_mask_all - enable iteration over all operation types
295  */
296 static dma_cap_mask_t dma_cap_mask_all;
297 
298 /**
299  * dma_chan_tbl_ent - tracks channel allocations per core/operation
300  * @chan - associated channel for this entry
301  */
302 struct dma_chan_tbl_ent {
303 	struct dma_chan *chan;
304 };
305 
306 /**
307  * channel_table - percpu lookup table for memory-to-memory offload providers
308  */
309 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
310 
311 static int __init dma_channel_table_init(void)
312 {
313 	enum dma_transaction_type cap;
314 	int err = 0;
315 
316 	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
317 
318 	/* 'interrupt', 'private', and 'slave' are channel capabilities,
319 	 * but are not associated with an operation so they do not need
320 	 * an entry in the channel_table
321 	 */
322 	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
323 	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
324 	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
325 
326 	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
327 		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
328 		if (!channel_table[cap]) {
329 			err = -ENOMEM;
330 			break;
331 		}
332 	}
333 
334 	if (err) {
335 		pr_err("initialization failure\n");
336 		for_each_dma_cap_mask(cap, dma_cap_mask_all)
337 			free_percpu(channel_table[cap]);
338 	}
339 
340 	return err;
341 }
342 arch_initcall(dma_channel_table_init);
343 
344 /**
345  * dma_find_channel - find a channel to carry out the operation
346  * @tx_type: transaction type
347  */
348 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
349 {
350 	return this_cpu_read(channel_table[tx_type]->chan);
351 }
352 EXPORT_SYMBOL(dma_find_channel);
353 
354 /**
355  * dma_issue_pending_all - flush all pending operations across all channels
356  */
357 void dma_issue_pending_all(void)
358 {
359 	struct dma_device *device;
360 	struct dma_chan *chan;
361 
362 	rcu_read_lock();
363 	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
364 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
365 			continue;
366 		list_for_each_entry(chan, &device->channels, device_node)
367 			if (chan->client_count)
368 				device->device_issue_pending(chan);
369 	}
370 	rcu_read_unlock();
371 }
372 EXPORT_SYMBOL(dma_issue_pending_all);
373 
374 /**
375  * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
376  */
377 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
378 {
379 	int node = dev_to_node(chan->device->dev);
380 	return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
381 }
382 
383 /**
384  * min_chan - returns the channel with min count and in the same numa-node as the cpu
385  * @cap: capability to match
386  * @cpu: cpu index which the channel should be close to
387  *
388  * If some channels are close to the given cpu, the one with the lowest
389  * reference count is returned. Otherwise, cpu is ignored and only the
390  * reference count is taken into account.
391  * Must be called under dma_list_mutex.
392  */
393 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
394 {
395 	struct dma_device *device;
396 	struct dma_chan *chan;
397 	struct dma_chan *min = NULL;
398 	struct dma_chan *localmin = NULL;
399 
400 	list_for_each_entry(device, &dma_device_list, global_node) {
401 		if (!dma_has_cap(cap, device->cap_mask) ||
402 		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
403 			continue;
404 		list_for_each_entry(chan, &device->channels, device_node) {
405 			if (!chan->client_count)
406 				continue;
407 			if (!min || chan->table_count < min->table_count)
408 				min = chan;
409 
410 			if (dma_chan_is_local(chan, cpu))
411 				if (!localmin ||
412 				    chan->table_count < localmin->table_count)
413 					localmin = chan;
414 		}
415 	}
416 
417 	chan = localmin ? localmin : min;
418 
419 	if (chan)
420 		chan->table_count++;
421 
422 	return chan;
423 }
424 
425 /**
426  * dma_channel_rebalance - redistribute the available channels
427  *
428  * Optimize for cpu isolation (each cpu gets a dedicated channel for an
429  * operation type) in the SMP case,  and operation isolation (avoid
430  * multi-tasking channels) in the non-SMP case.  Must be called under
431  * dma_list_mutex.
432  */
433 static void dma_channel_rebalance(void)
434 {
435 	struct dma_chan *chan;
436 	struct dma_device *device;
437 	int cpu;
438 	int cap;
439 
440 	/* undo the last distribution */
441 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
442 		for_each_possible_cpu(cpu)
443 			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
444 
445 	list_for_each_entry(device, &dma_device_list, global_node) {
446 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
447 			continue;
448 		list_for_each_entry(chan, &device->channels, device_node)
449 			chan->table_count = 0;
450 	}
451 
452 	/* don't populate the channel_table if no clients are available */
453 	if (!dmaengine_ref_count)
454 		return;
455 
456 	/* redistribute available channels */
457 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
458 		for_each_online_cpu(cpu) {
459 			chan = min_chan(cap, cpu);
460 			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
461 		}
462 }
463 
464 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
465 {
466 	struct dma_device *device;
467 
468 	if (!chan || !caps)
469 		return -EINVAL;
470 
471 	device = chan->device;
472 
473 	/* check if the channel supports slave transactions */
474 	if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
475 		return -ENXIO;
476 
477 	/*
478 	 * Check whether it reports it uses the generic slave
479 	 * capabilities, if not, that means it doesn't support any
480 	 * kind of slave capabilities reporting.
481 	 */
482 	if (!device->directions)
483 		return -ENXIO;
484 
485 	caps->src_addr_widths = device->src_addr_widths;
486 	caps->dst_addr_widths = device->dst_addr_widths;
487 	caps->directions = device->directions;
488 	caps->residue_granularity = device->residue_granularity;
489 
490 	caps->cmd_pause = !!device->device_pause;
491 	caps->cmd_terminate = !!device->device_terminate_all;
492 
493 	return 0;
494 }
495 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
496 
497 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
498 					  struct dma_device *dev,
499 					  dma_filter_fn fn, void *fn_param)
500 {
501 	struct dma_chan *chan;
502 
503 	if (!__dma_device_satisfies_mask(dev, mask)) {
504 		pr_debug("%s: wrong capabilities\n", __func__);
505 		return NULL;
506 	}
507 	/* devices with multiple channels need special handling as we need to
508 	 * ensure that all channels are either private or public.
509 	 */
510 	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
511 		list_for_each_entry(chan, &dev->channels, device_node) {
512 			/* some channels are already publicly allocated */
513 			if (chan->client_count)
514 				return NULL;
515 		}
516 
517 	list_for_each_entry(chan, &dev->channels, device_node) {
518 		if (chan->client_count) {
519 			pr_debug("%s: %s busy\n",
520 				 __func__, dma_chan_name(chan));
521 			continue;
522 		}
523 		if (fn && !fn(chan, fn_param)) {
524 			pr_debug("%s: %s filter said false\n",
525 				 __func__, dma_chan_name(chan));
526 			continue;
527 		}
528 		return chan;
529 	}
530 
531 	return NULL;
532 }
533 
534 /**
535  * dma_request_slave_channel - try to get specific channel exclusively
536  * @chan: target channel
537  */
538 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
539 {
540 	int err = -EBUSY;
541 
542 	/* lock against __dma_request_channel */
543 	mutex_lock(&dma_list_mutex);
544 
545 	if (chan->client_count == 0) {
546 		err = dma_chan_get(chan);
547 		if (err)
548 			pr_debug("%s: failed to get %s: (%d)\n",
549 				__func__, dma_chan_name(chan), err);
550 	} else
551 		chan = NULL;
552 
553 	mutex_unlock(&dma_list_mutex);
554 
555 
556 	return chan;
557 }
558 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
559 
560 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
561 {
562 	dma_cap_mask_t mask;
563 	struct dma_chan *chan;
564 	int err;
565 
566 	dma_cap_zero(mask);
567 	dma_cap_set(DMA_SLAVE, mask);
568 
569 	/* lock against __dma_request_channel */
570 	mutex_lock(&dma_list_mutex);
571 
572 	chan = private_candidate(&mask, device, NULL, NULL);
573 	if (chan) {
574 		dma_cap_set(DMA_PRIVATE, device->cap_mask);
575 		device->privatecnt++;
576 		err = dma_chan_get(chan);
577 		if (err) {
578 			pr_debug("%s: failed to get %s: (%d)\n",
579 				__func__, dma_chan_name(chan), err);
580 			chan = NULL;
581 			if (--device->privatecnt == 0)
582 				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
583 		}
584 	}
585 
586 	mutex_unlock(&dma_list_mutex);
587 
588 	return chan;
589 }
590 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
591 
592 /**
593  * __dma_request_channel - try to allocate an exclusive channel
594  * @mask: capabilities that the channel must satisfy
595  * @fn: optional callback to disposition available channels
596  * @fn_param: opaque parameter to pass to dma_filter_fn
597  *
598  * Returns pointer to appropriate DMA channel on success or NULL.
599  */
600 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
601 				       dma_filter_fn fn, void *fn_param)
602 {
603 	struct dma_device *device, *_d;
604 	struct dma_chan *chan = NULL;
605 	int err;
606 
607 	/* Find a channel */
608 	mutex_lock(&dma_list_mutex);
609 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
610 		chan = private_candidate(mask, device, fn, fn_param);
611 		if (chan) {
612 			/* Found a suitable channel, try to grab, prep, and
613 			 * return it.  We first set DMA_PRIVATE to disable
614 			 * balance_ref_count as this channel will not be
615 			 * published in the general-purpose allocator
616 			 */
617 			dma_cap_set(DMA_PRIVATE, device->cap_mask);
618 			device->privatecnt++;
619 			err = dma_chan_get(chan);
620 
621 			if (err == -ENODEV) {
622 				pr_debug("%s: %s module removed\n",
623 					 __func__, dma_chan_name(chan));
624 				list_del_rcu(&device->global_node);
625 			} else if (err)
626 				pr_debug("%s: failed to get %s: (%d)\n",
627 					 __func__, dma_chan_name(chan), err);
628 			else
629 				break;
630 			if (--device->privatecnt == 0)
631 				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
632 			chan = NULL;
633 		}
634 	}
635 	mutex_unlock(&dma_list_mutex);
636 
637 	pr_debug("%s: %s (%s)\n",
638 		 __func__,
639 		 chan ? "success" : "fail",
640 		 chan ? dma_chan_name(chan) : NULL);
641 
642 	return chan;
643 }
644 EXPORT_SYMBOL_GPL(__dma_request_channel);
645 
646 /**
647  * dma_request_slave_channel - try to allocate an exclusive slave channel
648  * @dev:	pointer to client device structure
649  * @name:	slave channel name
650  *
651  * Returns pointer to appropriate DMA channel on success or an error pointer.
652  */
653 struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
654 						  const char *name)
655 {
656 	/* If device-tree is present get slave info from here */
657 	if (dev->of_node)
658 		return of_dma_request_slave_channel(dev->of_node, name);
659 
660 	/* If device was enumerated by ACPI get slave info from here */
661 	if (ACPI_HANDLE(dev))
662 		return acpi_dma_request_slave_chan_by_name(dev, name);
663 
664 	return ERR_PTR(-ENODEV);
665 }
666 EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
667 
668 /**
669  * dma_request_slave_channel - try to allocate an exclusive slave channel
670  * @dev:	pointer to client device structure
671  * @name:	slave channel name
672  *
673  * Returns pointer to appropriate DMA channel on success or NULL.
674  */
675 struct dma_chan *dma_request_slave_channel(struct device *dev,
676 					   const char *name)
677 {
678 	struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
679 	if (IS_ERR(ch))
680 		return NULL;
681 	return ch;
682 }
683 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
684 
685 void dma_release_channel(struct dma_chan *chan)
686 {
687 	mutex_lock(&dma_list_mutex);
688 	WARN_ONCE(chan->client_count != 1,
689 		  "chan reference count %d != 1\n", chan->client_count);
690 	dma_chan_put(chan);
691 	/* drop PRIVATE cap enabled by __dma_request_channel() */
692 	if (--chan->device->privatecnt == 0)
693 		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
694 	mutex_unlock(&dma_list_mutex);
695 }
696 EXPORT_SYMBOL_GPL(dma_release_channel);
697 
698 /**
699  * dmaengine_get - register interest in dma_channels
700  */
701 void dmaengine_get(void)
702 {
703 	struct dma_device *device, *_d;
704 	struct dma_chan *chan;
705 	int err;
706 
707 	mutex_lock(&dma_list_mutex);
708 	dmaengine_ref_count++;
709 
710 	/* try to grab channels */
711 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
712 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
713 			continue;
714 		list_for_each_entry(chan, &device->channels, device_node) {
715 			err = dma_chan_get(chan);
716 			if (err == -ENODEV) {
717 				/* module removed before we could use it */
718 				list_del_rcu(&device->global_node);
719 				break;
720 			} else if (err)
721 				pr_debug("%s: failed to get %s: (%d)\n",
722 				       __func__, dma_chan_name(chan), err);
723 		}
724 	}
725 
726 	/* if this is the first reference and there were channels
727 	 * waiting we need to rebalance to get those channels
728 	 * incorporated into the channel table
729 	 */
730 	if (dmaengine_ref_count == 1)
731 		dma_channel_rebalance();
732 	mutex_unlock(&dma_list_mutex);
733 }
734 EXPORT_SYMBOL(dmaengine_get);
735 
736 /**
737  * dmaengine_put - let dma drivers be removed when ref_count == 0
738  */
739 void dmaengine_put(void)
740 {
741 	struct dma_device *device;
742 	struct dma_chan *chan;
743 
744 	mutex_lock(&dma_list_mutex);
745 	dmaengine_ref_count--;
746 	BUG_ON(dmaengine_ref_count < 0);
747 	/* drop channel references */
748 	list_for_each_entry(device, &dma_device_list, global_node) {
749 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
750 			continue;
751 		list_for_each_entry(chan, &device->channels, device_node)
752 			dma_chan_put(chan);
753 	}
754 	mutex_unlock(&dma_list_mutex);
755 }
756 EXPORT_SYMBOL(dmaengine_put);
757 
758 static bool device_has_all_tx_types(struct dma_device *device)
759 {
760 	/* A device that satisfies this test has channels that will never cause
761 	 * an async_tx channel switch event as all possible operation types can
762 	 * be handled.
763 	 */
764 	#ifdef CONFIG_ASYNC_TX_DMA
765 	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
766 		return false;
767 	#endif
768 
769 	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
770 	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
771 		return false;
772 	#endif
773 
774 	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
775 	if (!dma_has_cap(DMA_XOR, device->cap_mask))
776 		return false;
777 
778 	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
779 	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
780 		return false;
781 	#endif
782 	#endif
783 
784 	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
785 	if (!dma_has_cap(DMA_PQ, device->cap_mask))
786 		return false;
787 
788 	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
789 	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
790 		return false;
791 	#endif
792 	#endif
793 
794 	return true;
795 }
796 
797 static int get_dma_id(struct dma_device *device)
798 {
799 	int rc;
800 
801 	mutex_lock(&dma_list_mutex);
802 
803 	rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
804 	if (rc >= 0)
805 		device->dev_id = rc;
806 
807 	mutex_unlock(&dma_list_mutex);
808 	return rc < 0 ? rc : 0;
809 }
810 
811 /**
812  * dma_async_device_register - registers DMA devices found
813  * @device: &dma_device
814  */
815 int dma_async_device_register(struct dma_device *device)
816 {
817 	int chancnt = 0, rc;
818 	struct dma_chan* chan;
819 	atomic_t *idr_ref;
820 
821 	if (!device)
822 		return -ENODEV;
823 
824 	/* validate device routines */
825 	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
826 		!device->device_prep_dma_memcpy);
827 	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
828 		!device->device_prep_dma_xor);
829 	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
830 		!device->device_prep_dma_xor_val);
831 	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
832 		!device->device_prep_dma_pq);
833 	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
834 		!device->device_prep_dma_pq_val);
835 	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
836 		!device->device_prep_dma_interrupt);
837 	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
838 		!device->device_prep_dma_sg);
839 	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
840 		!device->device_prep_dma_cyclic);
841 	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
842 		!device->device_prep_interleaved_dma);
843 
844 	BUG_ON(!device->device_tx_status);
845 	BUG_ON(!device->device_issue_pending);
846 	BUG_ON(!device->dev);
847 
848 	/* note: this only matters in the
849 	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
850 	 */
851 	if (device_has_all_tx_types(device))
852 		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
853 
854 	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
855 	if (!idr_ref)
856 		return -ENOMEM;
857 	rc = get_dma_id(device);
858 	if (rc != 0) {
859 		kfree(idr_ref);
860 		return rc;
861 	}
862 
863 	atomic_set(idr_ref, 0);
864 
865 	/* represent channels in sysfs. Probably want devs too */
866 	list_for_each_entry(chan, &device->channels, device_node) {
867 		rc = -ENOMEM;
868 		chan->local = alloc_percpu(typeof(*chan->local));
869 		if (chan->local == NULL)
870 			goto err_out;
871 		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
872 		if (chan->dev == NULL) {
873 			free_percpu(chan->local);
874 			chan->local = NULL;
875 			goto err_out;
876 		}
877 
878 		chan->chan_id = chancnt++;
879 		chan->dev->device.class = &dma_devclass;
880 		chan->dev->device.parent = device->dev;
881 		chan->dev->chan = chan;
882 		chan->dev->idr_ref = idr_ref;
883 		chan->dev->dev_id = device->dev_id;
884 		atomic_inc(idr_ref);
885 		dev_set_name(&chan->dev->device, "dma%dchan%d",
886 			     device->dev_id, chan->chan_id);
887 
888 		rc = device_register(&chan->dev->device);
889 		if (rc) {
890 			free_percpu(chan->local);
891 			chan->local = NULL;
892 			kfree(chan->dev);
893 			atomic_dec(idr_ref);
894 			goto err_out;
895 		}
896 		chan->client_count = 0;
897 	}
898 	device->chancnt = chancnt;
899 
900 	mutex_lock(&dma_list_mutex);
901 	/* take references on public channels */
902 	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
903 		list_for_each_entry(chan, &device->channels, device_node) {
904 			/* if clients are already waiting for channels we need
905 			 * to take references on their behalf
906 			 */
907 			if (dma_chan_get(chan) == -ENODEV) {
908 				/* note we can only get here for the first
909 				 * channel as the remaining channels are
910 				 * guaranteed to get a reference
911 				 */
912 				rc = -ENODEV;
913 				mutex_unlock(&dma_list_mutex);
914 				goto err_out;
915 			}
916 		}
917 	list_add_tail_rcu(&device->global_node, &dma_device_list);
918 	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
919 		device->privatecnt++;	/* Always private */
920 	dma_channel_rebalance();
921 	mutex_unlock(&dma_list_mutex);
922 
923 	return 0;
924 
925 err_out:
926 	/* if we never registered a channel just release the idr */
927 	if (atomic_read(idr_ref) == 0) {
928 		mutex_lock(&dma_list_mutex);
929 		idr_remove(&dma_idr, device->dev_id);
930 		mutex_unlock(&dma_list_mutex);
931 		kfree(idr_ref);
932 		return rc;
933 	}
934 
935 	list_for_each_entry(chan, &device->channels, device_node) {
936 		if (chan->local == NULL)
937 			continue;
938 		mutex_lock(&dma_list_mutex);
939 		chan->dev->chan = NULL;
940 		mutex_unlock(&dma_list_mutex);
941 		device_unregister(&chan->dev->device);
942 		free_percpu(chan->local);
943 	}
944 	return rc;
945 }
946 EXPORT_SYMBOL(dma_async_device_register);
947 
948 /**
949  * dma_async_device_unregister - unregister a DMA device
950  * @device: &dma_device
951  *
952  * This routine is called by dma driver exit routines, dmaengine holds module
953  * references to prevent it being called while channels are in use.
954  */
955 void dma_async_device_unregister(struct dma_device *device)
956 {
957 	struct dma_chan *chan;
958 
959 	mutex_lock(&dma_list_mutex);
960 	list_del_rcu(&device->global_node);
961 	dma_channel_rebalance();
962 	mutex_unlock(&dma_list_mutex);
963 
964 	list_for_each_entry(chan, &device->channels, device_node) {
965 		WARN_ONCE(chan->client_count,
966 			  "%s called while %d clients hold a reference\n",
967 			  __func__, chan->client_count);
968 		mutex_lock(&dma_list_mutex);
969 		chan->dev->chan = NULL;
970 		mutex_unlock(&dma_list_mutex);
971 		device_unregister(&chan->dev->device);
972 		free_percpu(chan->local);
973 	}
974 }
975 EXPORT_SYMBOL(dma_async_device_unregister);
976 
977 struct dmaengine_unmap_pool {
978 	struct kmem_cache *cache;
979 	const char *name;
980 	mempool_t *pool;
981 	size_t size;
982 };
983 
984 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
985 static struct dmaengine_unmap_pool unmap_pool[] = {
986 	__UNMAP_POOL(2),
987 	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
988 	__UNMAP_POOL(16),
989 	__UNMAP_POOL(128),
990 	__UNMAP_POOL(256),
991 	#endif
992 };
993 
994 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
995 {
996 	int order = get_count_order(nr);
997 
998 	switch (order) {
999 	case 0 ... 1:
1000 		return &unmap_pool[0];
1001 	case 2 ... 4:
1002 		return &unmap_pool[1];
1003 	case 5 ... 7:
1004 		return &unmap_pool[2];
1005 	case 8:
1006 		return &unmap_pool[3];
1007 	default:
1008 		BUG();
1009 		return NULL;
1010 	}
1011 }
1012 
1013 static void dmaengine_unmap(struct kref *kref)
1014 {
1015 	struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1016 	struct device *dev = unmap->dev;
1017 	int cnt, i;
1018 
1019 	cnt = unmap->to_cnt;
1020 	for (i = 0; i < cnt; i++)
1021 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1022 			       DMA_TO_DEVICE);
1023 	cnt += unmap->from_cnt;
1024 	for (; i < cnt; i++)
1025 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1026 			       DMA_FROM_DEVICE);
1027 	cnt += unmap->bidi_cnt;
1028 	for (; i < cnt; i++) {
1029 		if (unmap->addr[i] == 0)
1030 			continue;
1031 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1032 			       DMA_BIDIRECTIONAL);
1033 	}
1034 	cnt = unmap->map_cnt;
1035 	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1036 }
1037 
1038 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1039 {
1040 	if (unmap)
1041 		kref_put(&unmap->kref, dmaengine_unmap);
1042 }
1043 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1044 
1045 static void dmaengine_destroy_unmap_pool(void)
1046 {
1047 	int i;
1048 
1049 	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1050 		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1051 
1052 		if (p->pool)
1053 			mempool_destroy(p->pool);
1054 		p->pool = NULL;
1055 		if (p->cache)
1056 			kmem_cache_destroy(p->cache);
1057 		p->cache = NULL;
1058 	}
1059 }
1060 
1061 static int __init dmaengine_init_unmap_pool(void)
1062 {
1063 	int i;
1064 
1065 	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1066 		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1067 		size_t size;
1068 
1069 		size = sizeof(struct dmaengine_unmap_data) +
1070 		       sizeof(dma_addr_t) * p->size;
1071 
1072 		p->cache = kmem_cache_create(p->name, size, 0,
1073 					     SLAB_HWCACHE_ALIGN, NULL);
1074 		if (!p->cache)
1075 			break;
1076 		p->pool = mempool_create_slab_pool(1, p->cache);
1077 		if (!p->pool)
1078 			break;
1079 	}
1080 
1081 	if (i == ARRAY_SIZE(unmap_pool))
1082 		return 0;
1083 
1084 	dmaengine_destroy_unmap_pool();
1085 	return -ENOMEM;
1086 }
1087 
1088 struct dmaengine_unmap_data *
1089 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1090 {
1091 	struct dmaengine_unmap_data *unmap;
1092 
1093 	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1094 	if (!unmap)
1095 		return NULL;
1096 
1097 	memset(unmap, 0, sizeof(*unmap));
1098 	kref_init(&unmap->kref);
1099 	unmap->dev = dev;
1100 	unmap->map_cnt = nr;
1101 
1102 	return unmap;
1103 }
1104 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1105 
1106 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1107 	struct dma_chan *chan)
1108 {
1109 	tx->chan = chan;
1110 	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1111 	spin_lock_init(&tx->lock);
1112 	#endif
1113 }
1114 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1115 
1116 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1117  * @tx: in-flight transaction to wait on
1118  */
1119 enum dma_status
1120 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1121 {
1122 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1123 
1124 	if (!tx)
1125 		return DMA_COMPLETE;
1126 
1127 	while (tx->cookie == -EBUSY) {
1128 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1129 			pr_err("%s timeout waiting for descriptor submission\n",
1130 			       __func__);
1131 			return DMA_ERROR;
1132 		}
1133 		cpu_relax();
1134 	}
1135 	return dma_sync_wait(tx->chan, tx->cookie);
1136 }
1137 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1138 
1139 /* dma_run_dependencies - helper routine for dma drivers to process
1140  *	(start) dependent operations on their target channel
1141  * @tx: transaction with dependencies
1142  */
1143 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1144 {
1145 	struct dma_async_tx_descriptor *dep = txd_next(tx);
1146 	struct dma_async_tx_descriptor *dep_next;
1147 	struct dma_chan *chan;
1148 
1149 	if (!dep)
1150 		return;
1151 
1152 	/* we'll submit tx->next now, so clear the link */
1153 	txd_clear_next(tx);
1154 	chan = dep->chan;
1155 
1156 	/* keep submitting up until a channel switch is detected
1157 	 * in that case we will be called again as a result of
1158 	 * processing the interrupt from async_tx_channel_switch
1159 	 */
1160 	for (; dep; dep = dep_next) {
1161 		txd_lock(dep);
1162 		txd_clear_parent(dep);
1163 		dep_next = txd_next(dep);
1164 		if (dep_next && dep_next->chan == chan)
1165 			txd_clear_next(dep); /* ->next will be submitted */
1166 		else
1167 			dep_next = NULL; /* submit current dep and terminate */
1168 		txd_unlock(dep);
1169 
1170 		dep->tx_submit(dep);
1171 	}
1172 
1173 	chan->device->device_issue_pending(chan);
1174 }
1175 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1176 
1177 static int __init dma_bus_init(void)
1178 {
1179 	int err = dmaengine_init_unmap_pool();
1180 
1181 	if (err)
1182 		return err;
1183 	return class_register(&dma_devclass);
1184 }
1185 arch_initcall(dma_bus_init);
1186 
1187 
1188