xref: /openbmc/linux/drivers/dma/dmaengine.c (revision f42b3800)
1 /*
2  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59
16  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called COPYING.
20  */
21 
22 /*
23  * This code implements the DMA subsystem. It provides a HW-neutral interface
24  * for other kernel code to use asynchronous memory copy capabilities,
25  * if present, and allows different HW DMA drivers to register as providing
26  * this capability.
27  *
28  * Due to the fact we are accelerating what is already a relatively fast
29  * operation, the code goes to great lengths to avoid additional overhead,
30  * such as locking.
31  *
32  * LOCKING:
33  *
34  * The subsystem keeps two global lists, dma_device_list and dma_client_list.
35  * Both of these are protected by a mutex, dma_list_mutex.
36  *
37  * Each device has a channels list, which runs unlocked but is never modified
38  * once the device is registered, it's just setup by the driver.
39  *
40  * Each client is responsible for keeping track of the channels it uses.  See
41  * the definition of dma_event_callback in dmaengine.h.
42  *
43  * Each device has a kref, which is initialized to 1 when the device is
44  * registered. A kref_get is done for each device registered.  When the
45  * device is released, the coresponding kref_put is done in the release
46  * method. Every time one of the device's channels is allocated to a client,
47  * a kref_get occurs.  When the channel is freed, the coresponding kref_put
48  * happens. The device's release function does a completion, so
49  * unregister_device does a remove event, device_unregister, a kref_put
50  * for the first reference, then waits on the completion for all other
51  * references to finish.
52  *
53  * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
54  * with a kref and a per_cpu local_t.  A dma_chan_get is called when a client
55  * signals that it wants to use a channel, and dma_chan_put is called when
56  * a channel is removed or a client using it is unregesitered.  A client can
57  * take extra references per outstanding transaction, as is the case with
58  * the NET DMA client.  The release function does a kref_put on the device.
59  *	-ChrisL, DanW
60  */
61 
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/mm.h>
65 #include <linux/device.h>
66 #include <linux/dmaengine.h>
67 #include <linux/hardirq.h>
68 #include <linux/spinlock.h>
69 #include <linux/percpu.h>
70 #include <linux/rcupdate.h>
71 #include <linux/mutex.h>
72 #include <linux/jiffies.h>
73 
74 static DEFINE_MUTEX(dma_list_mutex);
75 static LIST_HEAD(dma_device_list);
76 static LIST_HEAD(dma_client_list);
77 
78 /* --- sysfs implementation --- */
79 
80 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
81 {
82 	struct dma_chan *chan = to_dma_chan(dev);
83 	unsigned long count = 0;
84 	int i;
85 
86 	for_each_possible_cpu(i)
87 		count += per_cpu_ptr(chan->local, i)->memcpy_count;
88 
89 	return sprintf(buf, "%lu\n", count);
90 }
91 
92 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
93 				      char *buf)
94 {
95 	struct dma_chan *chan = to_dma_chan(dev);
96 	unsigned long count = 0;
97 	int i;
98 
99 	for_each_possible_cpu(i)
100 		count += per_cpu_ptr(chan->local, i)->bytes_transferred;
101 
102 	return sprintf(buf, "%lu\n", count);
103 }
104 
105 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
106 {
107 	struct dma_chan *chan = to_dma_chan(dev);
108 	int in_use = 0;
109 
110 	if (unlikely(chan->slow_ref) &&
111 		atomic_read(&chan->refcount.refcount) > 1)
112 		in_use = 1;
113 	else {
114 		if (local_read(&(per_cpu_ptr(chan->local,
115 			get_cpu())->refcount)) > 0)
116 			in_use = 1;
117 		put_cpu();
118 	}
119 
120 	return sprintf(buf, "%d\n", in_use);
121 }
122 
123 static struct device_attribute dma_attrs[] = {
124 	__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
125 	__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
126 	__ATTR(in_use, S_IRUGO, show_in_use, NULL),
127 	__ATTR_NULL
128 };
129 
130 static void dma_async_device_cleanup(struct kref *kref);
131 
132 static void dma_dev_release(struct device *dev)
133 {
134 	struct dma_chan *chan = to_dma_chan(dev);
135 	kref_put(&chan->device->refcount, dma_async_device_cleanup);
136 }
137 
138 static struct class dma_devclass = {
139 	.name		= "dma",
140 	.dev_attrs	= dma_attrs,
141 	.dev_release	= dma_dev_release,
142 };
143 
144 /* --- client and device registration --- */
145 
146 #define dma_chan_satisfies_mask(chan, mask) \
147 	__dma_chan_satisfies_mask((chan), &(mask))
148 static int
149 __dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
150 {
151 	dma_cap_mask_t has;
152 
153 	bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
154 		DMA_TX_TYPE_END);
155 	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
156 }
157 
158 /**
159  * dma_client_chan_alloc - try to allocate channels to a client
160  * @client: &dma_client
161  *
162  * Called with dma_list_mutex held.
163  */
164 static void dma_client_chan_alloc(struct dma_client *client)
165 {
166 	struct dma_device *device;
167 	struct dma_chan *chan;
168 	int desc;	/* allocated descriptor count */
169 	enum dma_state_client ack;
170 
171 	/* Find a channel */
172 	list_for_each_entry(device, &dma_device_list, global_node)
173 		list_for_each_entry(chan, &device->channels, device_node) {
174 			if (!dma_chan_satisfies_mask(chan, client->cap_mask))
175 				continue;
176 
177 			desc = chan->device->device_alloc_chan_resources(chan);
178 			if (desc >= 0) {
179 				ack = client->event_callback(client,
180 						chan,
181 						DMA_RESOURCE_AVAILABLE);
182 
183 				/* we are done once this client rejects
184 				 * an available resource
185 				 */
186 				if (ack == DMA_ACK)
187 					dma_chan_get(chan);
188 				else if (ack == DMA_NAK)
189 					return;
190 			}
191 		}
192 }
193 
194 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
195 {
196 	enum dma_status status;
197 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
198 
199 	dma_async_issue_pending(chan);
200 	do {
201 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
202 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
203 			printk(KERN_ERR "dma_sync_wait_timeout!\n");
204 			return DMA_ERROR;
205 		}
206 	} while (status == DMA_IN_PROGRESS);
207 
208 	return status;
209 }
210 EXPORT_SYMBOL(dma_sync_wait);
211 
212 /**
213  * dma_chan_cleanup - release a DMA channel's resources
214  * @kref: kernel reference structure that contains the DMA channel device
215  */
216 void dma_chan_cleanup(struct kref *kref)
217 {
218 	struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
219 	chan->device->device_free_chan_resources(chan);
220 	kref_put(&chan->device->refcount, dma_async_device_cleanup);
221 }
222 EXPORT_SYMBOL(dma_chan_cleanup);
223 
224 static void dma_chan_free_rcu(struct rcu_head *rcu)
225 {
226 	struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
227 	int bias = 0x7FFFFFFF;
228 	int i;
229 	for_each_possible_cpu(i)
230 		bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
231 	atomic_sub(bias, &chan->refcount.refcount);
232 	kref_put(&chan->refcount, dma_chan_cleanup);
233 }
234 
235 static void dma_chan_release(struct dma_chan *chan)
236 {
237 	atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
238 	chan->slow_ref = 1;
239 	call_rcu(&chan->rcu, dma_chan_free_rcu);
240 }
241 
242 /**
243  * dma_chans_notify_available - broadcast available channels to the clients
244  */
245 static void dma_clients_notify_available(void)
246 {
247 	struct dma_client *client;
248 
249 	mutex_lock(&dma_list_mutex);
250 
251 	list_for_each_entry(client, &dma_client_list, global_node)
252 		dma_client_chan_alloc(client);
253 
254 	mutex_unlock(&dma_list_mutex);
255 }
256 
257 /**
258  * dma_chans_notify_available - tell the clients that a channel is going away
259  * @chan: channel on its way out
260  */
261 static void dma_clients_notify_removed(struct dma_chan *chan)
262 {
263 	struct dma_client *client;
264 	enum dma_state_client ack;
265 
266 	mutex_lock(&dma_list_mutex);
267 
268 	list_for_each_entry(client, &dma_client_list, global_node) {
269 		ack = client->event_callback(client, chan,
270 				DMA_RESOURCE_REMOVED);
271 
272 		/* client was holding resources for this channel so
273 		 * free it
274 		 */
275 		if (ack == DMA_ACK)
276 			dma_chan_put(chan);
277 	}
278 
279 	mutex_unlock(&dma_list_mutex);
280 }
281 
282 /**
283  * dma_async_client_register - register a &dma_client
284  * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
285  */
286 void dma_async_client_register(struct dma_client *client)
287 {
288 	mutex_lock(&dma_list_mutex);
289 	list_add_tail(&client->global_node, &dma_client_list);
290 	mutex_unlock(&dma_list_mutex);
291 }
292 EXPORT_SYMBOL(dma_async_client_register);
293 
294 /**
295  * dma_async_client_unregister - unregister a client and free the &dma_client
296  * @client: &dma_client to free
297  *
298  * Force frees any allocated DMA channels, frees the &dma_client memory
299  */
300 void dma_async_client_unregister(struct dma_client *client)
301 {
302 	struct dma_device *device;
303 	struct dma_chan *chan;
304 	enum dma_state_client ack;
305 
306 	if (!client)
307 		return;
308 
309 	mutex_lock(&dma_list_mutex);
310 	/* free all channels the client is holding */
311 	list_for_each_entry(device, &dma_device_list, global_node)
312 		list_for_each_entry(chan, &device->channels, device_node) {
313 			ack = client->event_callback(client, chan,
314 				DMA_RESOURCE_REMOVED);
315 
316 			if (ack == DMA_ACK)
317 				dma_chan_put(chan);
318 		}
319 
320 	list_del(&client->global_node);
321 	mutex_unlock(&dma_list_mutex);
322 }
323 EXPORT_SYMBOL(dma_async_client_unregister);
324 
325 /**
326  * dma_async_client_chan_request - send all available channels to the
327  * client that satisfy the capability mask
328  * @client - requester
329  */
330 void dma_async_client_chan_request(struct dma_client *client)
331 {
332 	mutex_lock(&dma_list_mutex);
333 	dma_client_chan_alloc(client);
334 	mutex_unlock(&dma_list_mutex);
335 }
336 EXPORT_SYMBOL(dma_async_client_chan_request);
337 
338 /**
339  * dma_async_device_register - registers DMA devices found
340  * @device: &dma_device
341  */
342 int dma_async_device_register(struct dma_device *device)
343 {
344 	static int id;
345 	int chancnt = 0, rc;
346 	struct dma_chan* chan;
347 
348 	if (!device)
349 		return -ENODEV;
350 
351 	/* validate device routines */
352 	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
353 		!device->device_prep_dma_memcpy);
354 	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
355 		!device->device_prep_dma_xor);
356 	BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
357 		!device->device_prep_dma_zero_sum);
358 	BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
359 		!device->device_prep_dma_memset);
360 	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
361 		!device->device_prep_dma_interrupt);
362 
363 	BUG_ON(!device->device_alloc_chan_resources);
364 	BUG_ON(!device->device_free_chan_resources);
365 	BUG_ON(!device->device_is_tx_complete);
366 	BUG_ON(!device->device_issue_pending);
367 	BUG_ON(!device->dev);
368 
369 	init_completion(&device->done);
370 	kref_init(&device->refcount);
371 	device->dev_id = id++;
372 
373 	/* represent channels in sysfs. Probably want devs too */
374 	list_for_each_entry(chan, &device->channels, device_node) {
375 		chan->local = alloc_percpu(typeof(*chan->local));
376 		if (chan->local == NULL)
377 			continue;
378 
379 		chan->chan_id = chancnt++;
380 		chan->dev.class = &dma_devclass;
381 		chan->dev.parent = NULL;
382 		snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
383 		         device->dev_id, chan->chan_id);
384 
385 		rc = device_register(&chan->dev);
386 		if (rc) {
387 			chancnt--;
388 			free_percpu(chan->local);
389 			chan->local = NULL;
390 			goto err_out;
391 		}
392 
393 		/* One for the channel, one of the class device */
394 		kref_get(&device->refcount);
395 		kref_get(&device->refcount);
396 		kref_init(&chan->refcount);
397 		chan->slow_ref = 0;
398 		INIT_RCU_HEAD(&chan->rcu);
399 	}
400 
401 	mutex_lock(&dma_list_mutex);
402 	list_add_tail(&device->global_node, &dma_device_list);
403 	mutex_unlock(&dma_list_mutex);
404 
405 	dma_clients_notify_available();
406 
407 	return 0;
408 
409 err_out:
410 	list_for_each_entry(chan, &device->channels, device_node) {
411 		if (chan->local == NULL)
412 			continue;
413 		kref_put(&device->refcount, dma_async_device_cleanup);
414 		device_unregister(&chan->dev);
415 		chancnt--;
416 		free_percpu(chan->local);
417 	}
418 	return rc;
419 }
420 EXPORT_SYMBOL(dma_async_device_register);
421 
422 /**
423  * dma_async_device_cleanup - function called when all references are released
424  * @kref: kernel reference object
425  */
426 static void dma_async_device_cleanup(struct kref *kref)
427 {
428 	struct dma_device *device;
429 
430 	device = container_of(kref, struct dma_device, refcount);
431 	complete(&device->done);
432 }
433 
434 /**
435  * dma_async_device_unregister - unregisters DMA devices
436  * @device: &dma_device
437  */
438 void dma_async_device_unregister(struct dma_device *device)
439 {
440 	struct dma_chan *chan;
441 
442 	mutex_lock(&dma_list_mutex);
443 	list_del(&device->global_node);
444 	mutex_unlock(&dma_list_mutex);
445 
446 	list_for_each_entry(chan, &device->channels, device_node) {
447 		dma_clients_notify_removed(chan);
448 		device_unregister(&chan->dev);
449 		dma_chan_release(chan);
450 	}
451 
452 	kref_put(&device->refcount, dma_async_device_cleanup);
453 	wait_for_completion(&device->done);
454 }
455 EXPORT_SYMBOL(dma_async_device_unregister);
456 
457 /**
458  * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
459  * @chan: DMA channel to offload copy to
460  * @dest: destination address (virtual)
461  * @src: source address (virtual)
462  * @len: length
463  *
464  * Both @dest and @src must be mappable to a bus address according to the
465  * DMA mapping API rules for streaming mappings.
466  * Both @dest and @src must stay memory resident (kernel memory or locked
467  * user space pages).
468  */
469 dma_cookie_t
470 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
471 			void *src, size_t len)
472 {
473 	struct dma_device *dev = chan->device;
474 	struct dma_async_tx_descriptor *tx;
475 	dma_addr_t dma_dest, dma_src;
476 	dma_cookie_t cookie;
477 	int cpu;
478 
479 	dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
480 	dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
481 	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
482 					 DMA_CTRL_ACK);
483 
484 	if (!tx) {
485 		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
486 		dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
487 		return -ENOMEM;
488 	}
489 
490 	tx->callback = NULL;
491 	cookie = tx->tx_submit(tx);
492 
493 	cpu = get_cpu();
494 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
495 	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
496 	put_cpu();
497 
498 	return cookie;
499 }
500 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
501 
502 /**
503  * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
504  * @chan: DMA channel to offload copy to
505  * @page: destination page
506  * @offset: offset in page to copy to
507  * @kdata: source address (virtual)
508  * @len: length
509  *
510  * Both @page/@offset and @kdata must be mappable to a bus address according
511  * to the DMA mapping API rules for streaming mappings.
512  * Both @page/@offset and @kdata must stay memory resident (kernel memory or
513  * locked user space pages)
514  */
515 dma_cookie_t
516 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
517 			unsigned int offset, void *kdata, size_t len)
518 {
519 	struct dma_device *dev = chan->device;
520 	struct dma_async_tx_descriptor *tx;
521 	dma_addr_t dma_dest, dma_src;
522 	dma_cookie_t cookie;
523 	int cpu;
524 
525 	dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
526 	dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
527 	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
528 					 DMA_CTRL_ACK);
529 
530 	if (!tx) {
531 		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
532 		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
533 		return -ENOMEM;
534 	}
535 
536 	tx->callback = NULL;
537 	cookie = tx->tx_submit(tx);
538 
539 	cpu = get_cpu();
540 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
541 	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
542 	put_cpu();
543 
544 	return cookie;
545 }
546 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
547 
548 /**
549  * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
550  * @chan: DMA channel to offload copy to
551  * @dest_pg: destination page
552  * @dest_off: offset in page to copy to
553  * @src_pg: source page
554  * @src_off: offset in page to copy from
555  * @len: length
556  *
557  * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
558  * address according to the DMA mapping API rules for streaming mappings.
559  * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
560  * (kernel memory or locked user space pages).
561  */
562 dma_cookie_t
563 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
564 	unsigned int dest_off, struct page *src_pg, unsigned int src_off,
565 	size_t len)
566 {
567 	struct dma_device *dev = chan->device;
568 	struct dma_async_tx_descriptor *tx;
569 	dma_addr_t dma_dest, dma_src;
570 	dma_cookie_t cookie;
571 	int cpu;
572 
573 	dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
574 	dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
575 				DMA_FROM_DEVICE);
576 	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
577 					 DMA_CTRL_ACK);
578 
579 	if (!tx) {
580 		dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
581 		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
582 		return -ENOMEM;
583 	}
584 
585 	tx->callback = NULL;
586 	cookie = tx->tx_submit(tx);
587 
588 	cpu = get_cpu();
589 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
590 	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
591 	put_cpu();
592 
593 	return cookie;
594 }
595 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
596 
597 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
598 	struct dma_chan *chan)
599 {
600 	tx->chan = chan;
601 	spin_lock_init(&tx->lock);
602 }
603 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
604 
605 static int __init dma_bus_init(void)
606 {
607 	mutex_init(&dma_list_mutex);
608 	return class_register(&dma_devclass);
609 }
610 subsys_initcall(dma_bus_init);
611 
612