xref: /openbmc/linux/drivers/dma/dmaengine.c (revision c21b37f6)
1 /*
2  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59
16  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called COPYING.
20  */
21 
22 /*
23  * This code implements the DMA subsystem. It provides a HW-neutral interface
24  * for other kernel code to use asynchronous memory copy capabilities,
25  * if present, and allows different HW DMA drivers to register as providing
26  * this capability.
27  *
28  * Due to the fact we are accelerating what is already a relatively fast
29  * operation, the code goes to great lengths to avoid additional overhead,
30  * such as locking.
31  *
32  * LOCKING:
33  *
34  * The subsystem keeps two global lists, dma_device_list and dma_client_list.
35  * Both of these are protected by a mutex, dma_list_mutex.
36  *
37  * Each device has a channels list, which runs unlocked but is never modified
38  * once the device is registered, it's just setup by the driver.
39  *
40  * Each client is responsible for keeping track of the channels it uses.  See
41  * the definition of dma_event_callback in dmaengine.h.
42  *
43  * Each device has a kref, which is initialized to 1 when the device is
44  * registered. A kref_get is done for each class_device registered.  When the
45  * class_device is released, the coresponding kref_put is done in the release
46  * method. Every time one of the device's channels is allocated to a client,
47  * a kref_get occurs.  When the channel is freed, the coresponding kref_put
48  * happens. The device's release function does a completion, so
49  * unregister_device does a remove event, class_device_unregister, a kref_put
50  * for the first reference, then waits on the completion for all other
51  * references to finish.
52  *
53  * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
54  * with a kref and a per_cpu local_t.  A dma_chan_get is called when a client
55  * signals that it wants to use a channel, and dma_chan_put is called when
56  * a channel is removed or a client using it is unregesitered.  A client can
57  * take extra references per outstanding transaction, as is the case with
58  * the NET DMA client.  The release function does a kref_put on the device.
59  *	-ChrisL, DanW
60  */
61 
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/mm.h>
65 #include <linux/device.h>
66 #include <linux/dmaengine.h>
67 #include <linux/hardirq.h>
68 #include <linux/spinlock.h>
69 #include <linux/percpu.h>
70 #include <linux/rcupdate.h>
71 #include <linux/mutex.h>
72 #include <linux/jiffies.h>
73 
74 static DEFINE_MUTEX(dma_list_mutex);
75 static LIST_HEAD(dma_device_list);
76 static LIST_HEAD(dma_client_list);
77 
78 /* --- sysfs implementation --- */
79 
80 static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
81 {
82 	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
83 	unsigned long count = 0;
84 	int i;
85 
86 	for_each_possible_cpu(i)
87 		count += per_cpu_ptr(chan->local, i)->memcpy_count;
88 
89 	return sprintf(buf, "%lu\n", count);
90 }
91 
92 static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
93 {
94 	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
95 	unsigned long count = 0;
96 	int i;
97 
98 	for_each_possible_cpu(i)
99 		count += per_cpu_ptr(chan->local, i)->bytes_transferred;
100 
101 	return sprintf(buf, "%lu\n", count);
102 }
103 
104 static ssize_t show_in_use(struct class_device *cd, char *buf)
105 {
106 	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
107 	int in_use = 0;
108 
109 	if (unlikely(chan->slow_ref) &&
110 		atomic_read(&chan->refcount.refcount) > 1)
111 		in_use = 1;
112 	else {
113 		if (local_read(&(per_cpu_ptr(chan->local,
114 			get_cpu())->refcount)) > 0)
115 			in_use = 1;
116 		put_cpu();
117 	}
118 
119 	return sprintf(buf, "%d\n", in_use);
120 }
121 
122 static struct class_device_attribute dma_class_attrs[] = {
123 	__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
124 	__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
125 	__ATTR(in_use, S_IRUGO, show_in_use, NULL),
126 	__ATTR_NULL
127 };
128 
129 static void dma_async_device_cleanup(struct kref *kref);
130 
131 static void dma_class_dev_release(struct class_device *cd)
132 {
133 	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
134 	kref_put(&chan->device->refcount, dma_async_device_cleanup);
135 }
136 
137 static struct class dma_devclass = {
138 	.name            = "dma",
139 	.class_dev_attrs = dma_class_attrs,
140 	.release = dma_class_dev_release,
141 };
142 
143 /* --- client and device registration --- */
144 
145 #define dma_chan_satisfies_mask(chan, mask) \
146 	__dma_chan_satisfies_mask((chan), &(mask))
147 static int
148 __dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
149 {
150 	dma_cap_mask_t has;
151 
152 	bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
153 		DMA_TX_TYPE_END);
154 	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
155 }
156 
157 /**
158  * dma_client_chan_alloc - try to allocate channels to a client
159  * @client: &dma_client
160  *
161  * Called with dma_list_mutex held.
162  */
163 static void dma_client_chan_alloc(struct dma_client *client)
164 {
165 	struct dma_device *device;
166 	struct dma_chan *chan;
167 	int desc;	/* allocated descriptor count */
168 	enum dma_state_client ack;
169 
170 	/* Find a channel */
171 	list_for_each_entry(device, &dma_device_list, global_node)
172 		list_for_each_entry(chan, &device->channels, device_node) {
173 			if (!dma_chan_satisfies_mask(chan, client->cap_mask))
174 				continue;
175 
176 			desc = chan->device->device_alloc_chan_resources(chan);
177 			if (desc >= 0) {
178 				ack = client->event_callback(client,
179 						chan,
180 						DMA_RESOURCE_AVAILABLE);
181 
182 				/* we are done once this client rejects
183 				 * an available resource
184 				 */
185 				if (ack == DMA_ACK) {
186 					dma_chan_get(chan);
187 					kref_get(&device->refcount);
188 				} else if (ack == DMA_NAK)
189 					return;
190 			}
191 		}
192 }
193 
194 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
195 {
196 	enum dma_status status;
197 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
198 
199 	dma_async_issue_pending(chan);
200 	do {
201 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
202 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
203 			printk(KERN_ERR "dma_sync_wait_timeout!\n");
204 			return DMA_ERROR;
205 		}
206 	} while (status == DMA_IN_PROGRESS);
207 
208 	return status;
209 }
210 EXPORT_SYMBOL(dma_sync_wait);
211 
212 /**
213  * dma_chan_cleanup - release a DMA channel's resources
214  * @kref: kernel reference structure that contains the DMA channel device
215  */
216 void dma_chan_cleanup(struct kref *kref)
217 {
218 	struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
219 	chan->device->device_free_chan_resources(chan);
220 	kref_put(&chan->device->refcount, dma_async_device_cleanup);
221 }
222 EXPORT_SYMBOL(dma_chan_cleanup);
223 
224 static void dma_chan_free_rcu(struct rcu_head *rcu)
225 {
226 	struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
227 	int bias = 0x7FFFFFFF;
228 	int i;
229 	for_each_possible_cpu(i)
230 		bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
231 	atomic_sub(bias, &chan->refcount.refcount);
232 	kref_put(&chan->refcount, dma_chan_cleanup);
233 }
234 
235 static void dma_chan_release(struct dma_chan *chan)
236 {
237 	atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
238 	chan->slow_ref = 1;
239 	call_rcu(&chan->rcu, dma_chan_free_rcu);
240 }
241 
242 /**
243  * dma_chans_notify_available - broadcast available channels to the clients
244  */
245 static void dma_clients_notify_available(void)
246 {
247 	struct dma_client *client;
248 
249 	mutex_lock(&dma_list_mutex);
250 
251 	list_for_each_entry(client, &dma_client_list, global_node)
252 		dma_client_chan_alloc(client);
253 
254 	mutex_unlock(&dma_list_mutex);
255 }
256 
257 /**
258  * dma_chans_notify_available - tell the clients that a channel is going away
259  * @chan: channel on its way out
260  */
261 static void dma_clients_notify_removed(struct dma_chan *chan)
262 {
263 	struct dma_client *client;
264 	enum dma_state_client ack;
265 
266 	mutex_lock(&dma_list_mutex);
267 
268 	list_for_each_entry(client, &dma_client_list, global_node) {
269 		ack = client->event_callback(client, chan,
270 				DMA_RESOURCE_REMOVED);
271 
272 		/* client was holding resources for this channel so
273 		 * free it
274 		 */
275 		if (ack == DMA_ACK) {
276 			dma_chan_put(chan);
277 			kref_put(&chan->device->refcount,
278 				dma_async_device_cleanup);
279 		}
280 	}
281 
282 	mutex_unlock(&dma_list_mutex);
283 }
284 
285 /**
286  * dma_async_client_register - register a &dma_client
287  * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
288  */
289 void dma_async_client_register(struct dma_client *client)
290 {
291 	mutex_lock(&dma_list_mutex);
292 	list_add_tail(&client->global_node, &dma_client_list);
293 	mutex_unlock(&dma_list_mutex);
294 }
295 EXPORT_SYMBOL(dma_async_client_register);
296 
297 /**
298  * dma_async_client_unregister - unregister a client and free the &dma_client
299  * @client: &dma_client to free
300  *
301  * Force frees any allocated DMA channels, frees the &dma_client memory
302  */
303 void dma_async_client_unregister(struct dma_client *client)
304 {
305 	struct dma_device *device;
306 	struct dma_chan *chan;
307 	enum dma_state_client ack;
308 
309 	if (!client)
310 		return;
311 
312 	mutex_lock(&dma_list_mutex);
313 	/* free all channels the client is holding */
314 	list_for_each_entry(device, &dma_device_list, global_node)
315 		list_for_each_entry(chan, &device->channels, device_node) {
316 			ack = client->event_callback(client, chan,
317 				DMA_RESOURCE_REMOVED);
318 
319 			if (ack == DMA_ACK) {
320 				dma_chan_put(chan);
321 				kref_put(&chan->device->refcount,
322 					dma_async_device_cleanup);
323 			}
324 		}
325 
326 	list_del(&client->global_node);
327 	mutex_unlock(&dma_list_mutex);
328 }
329 EXPORT_SYMBOL(dma_async_client_unregister);
330 
331 /**
332  * dma_async_client_chan_request - send all available channels to the
333  * client that satisfy the capability mask
334  * @client - requester
335  */
336 void dma_async_client_chan_request(struct dma_client *client)
337 {
338 	mutex_lock(&dma_list_mutex);
339 	dma_client_chan_alloc(client);
340 	mutex_unlock(&dma_list_mutex);
341 }
342 EXPORT_SYMBOL(dma_async_client_chan_request);
343 
344 /**
345  * dma_async_device_register - registers DMA devices found
346  * @device: &dma_device
347  */
348 int dma_async_device_register(struct dma_device *device)
349 {
350 	static int id;
351 	int chancnt = 0, rc;
352 	struct dma_chan* chan;
353 
354 	if (!device)
355 		return -ENODEV;
356 
357 	/* validate device routines */
358 	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
359 		!device->device_prep_dma_memcpy);
360 	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
361 		!device->device_prep_dma_xor);
362 	BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
363 		!device->device_prep_dma_zero_sum);
364 	BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
365 		!device->device_prep_dma_memset);
366 	BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
367 		!device->device_prep_dma_interrupt);
368 
369 	BUG_ON(!device->device_alloc_chan_resources);
370 	BUG_ON(!device->device_free_chan_resources);
371 	BUG_ON(!device->device_dependency_added);
372 	BUG_ON(!device->device_is_tx_complete);
373 	BUG_ON(!device->device_issue_pending);
374 	BUG_ON(!device->dev);
375 
376 	init_completion(&device->done);
377 	kref_init(&device->refcount);
378 	device->dev_id = id++;
379 
380 	/* represent channels in sysfs. Probably want devs too */
381 	list_for_each_entry(chan, &device->channels, device_node) {
382 		chan->local = alloc_percpu(typeof(*chan->local));
383 		if (chan->local == NULL)
384 			continue;
385 
386 		chan->chan_id = chancnt++;
387 		chan->class_dev.class = &dma_devclass;
388 		chan->class_dev.dev = NULL;
389 		snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d",
390 		         device->dev_id, chan->chan_id);
391 
392 		rc = class_device_register(&chan->class_dev);
393 		if (rc) {
394 			chancnt--;
395 			free_percpu(chan->local);
396 			chan->local = NULL;
397 			goto err_out;
398 		}
399 
400 		kref_get(&device->refcount);
401 		kref_init(&chan->refcount);
402 		chan->slow_ref = 0;
403 		INIT_RCU_HEAD(&chan->rcu);
404 	}
405 
406 	mutex_lock(&dma_list_mutex);
407 	list_add_tail(&device->global_node, &dma_device_list);
408 	mutex_unlock(&dma_list_mutex);
409 
410 	dma_clients_notify_available();
411 
412 	return 0;
413 
414 err_out:
415 	list_for_each_entry(chan, &device->channels, device_node) {
416 		if (chan->local == NULL)
417 			continue;
418 		kref_put(&device->refcount, dma_async_device_cleanup);
419 		class_device_unregister(&chan->class_dev);
420 		chancnt--;
421 		free_percpu(chan->local);
422 	}
423 	return rc;
424 }
425 EXPORT_SYMBOL(dma_async_device_register);
426 
427 /**
428  * dma_async_device_cleanup - function called when all references are released
429  * @kref: kernel reference object
430  */
431 static void dma_async_device_cleanup(struct kref *kref)
432 {
433 	struct dma_device *device;
434 
435 	device = container_of(kref, struct dma_device, refcount);
436 	complete(&device->done);
437 }
438 
439 /**
440  * dma_async_device_unregister - unregisters DMA devices
441  * @device: &dma_device
442  */
443 void dma_async_device_unregister(struct dma_device *device)
444 {
445 	struct dma_chan *chan;
446 
447 	mutex_lock(&dma_list_mutex);
448 	list_del(&device->global_node);
449 	mutex_unlock(&dma_list_mutex);
450 
451 	list_for_each_entry(chan, &device->channels, device_node) {
452 		dma_clients_notify_removed(chan);
453 		class_device_unregister(&chan->class_dev);
454 		dma_chan_release(chan);
455 	}
456 
457 	kref_put(&device->refcount, dma_async_device_cleanup);
458 	wait_for_completion(&device->done);
459 }
460 EXPORT_SYMBOL(dma_async_device_unregister);
461 
462 /**
463  * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
464  * @chan: DMA channel to offload copy to
465  * @dest: destination address (virtual)
466  * @src: source address (virtual)
467  * @len: length
468  *
469  * Both @dest and @src must be mappable to a bus address according to the
470  * DMA mapping API rules for streaming mappings.
471  * Both @dest and @src must stay memory resident (kernel memory or locked
472  * user space pages).
473  */
474 dma_cookie_t
475 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
476 			void *src, size_t len)
477 {
478 	struct dma_device *dev = chan->device;
479 	struct dma_async_tx_descriptor *tx;
480 	dma_addr_t addr;
481 	dma_cookie_t cookie;
482 	int cpu;
483 
484 	tx = dev->device_prep_dma_memcpy(chan, len, 0);
485 	if (!tx)
486 		return -ENOMEM;
487 
488 	tx->ack = 1;
489 	tx->callback = NULL;
490 	addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
491 	tx->tx_set_src(addr, tx, 0);
492 	addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
493 	tx->tx_set_dest(addr, tx, 0);
494 	cookie = tx->tx_submit(tx);
495 
496 	cpu = get_cpu();
497 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
498 	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
499 	put_cpu();
500 
501 	return cookie;
502 }
503 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
504 
505 /**
506  * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
507  * @chan: DMA channel to offload copy to
508  * @page: destination page
509  * @offset: offset in page to copy to
510  * @kdata: source address (virtual)
511  * @len: length
512  *
513  * Both @page/@offset and @kdata must be mappable to a bus address according
514  * to the DMA mapping API rules for streaming mappings.
515  * Both @page/@offset and @kdata must stay memory resident (kernel memory or
516  * locked user space pages)
517  */
518 dma_cookie_t
519 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
520 			unsigned int offset, void *kdata, size_t len)
521 {
522 	struct dma_device *dev = chan->device;
523 	struct dma_async_tx_descriptor *tx;
524 	dma_addr_t addr;
525 	dma_cookie_t cookie;
526 	int cpu;
527 
528 	tx = dev->device_prep_dma_memcpy(chan, len, 0);
529 	if (!tx)
530 		return -ENOMEM;
531 
532 	tx->ack = 1;
533 	tx->callback = NULL;
534 	addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
535 	tx->tx_set_src(addr, tx, 0);
536 	addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
537 	tx->tx_set_dest(addr, tx, 0);
538 	cookie = tx->tx_submit(tx);
539 
540 	cpu = get_cpu();
541 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
542 	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
543 	put_cpu();
544 
545 	return cookie;
546 }
547 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
548 
549 /**
550  * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
551  * @chan: DMA channel to offload copy to
552  * @dest_pg: destination page
553  * @dest_off: offset in page to copy to
554  * @src_pg: source page
555  * @src_off: offset in page to copy from
556  * @len: length
557  *
558  * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
559  * address according to the DMA mapping API rules for streaming mappings.
560  * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
561  * (kernel memory or locked user space pages).
562  */
563 dma_cookie_t
564 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
565 	unsigned int dest_off, struct page *src_pg, unsigned int src_off,
566 	size_t len)
567 {
568 	struct dma_device *dev = chan->device;
569 	struct dma_async_tx_descriptor *tx;
570 	dma_addr_t addr;
571 	dma_cookie_t cookie;
572 	int cpu;
573 
574 	tx = dev->device_prep_dma_memcpy(chan, len, 0);
575 	if (!tx)
576 		return -ENOMEM;
577 
578 	tx->ack = 1;
579 	tx->callback = NULL;
580 	addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
581 	tx->tx_set_src(addr, tx, 0);
582 	addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
583 	tx->tx_set_dest(addr, tx, 0);
584 	cookie = tx->tx_submit(tx);
585 
586 	cpu = get_cpu();
587 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
588 	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
589 	put_cpu();
590 
591 	return cookie;
592 }
593 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
594 
595 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
596 	struct dma_chan *chan)
597 {
598 	tx->chan = chan;
599 	spin_lock_init(&tx->lock);
600 	INIT_LIST_HEAD(&tx->depend_node);
601 	INIT_LIST_HEAD(&tx->depend_list);
602 }
603 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
604 
605 static int __init dma_bus_init(void)
606 {
607 	mutex_init(&dma_list_mutex);
608 	return class_register(&dma_devclass);
609 }
610 subsys_initcall(dma_bus_init);
611 
612