xref: /openbmc/linux/drivers/iio/buffer/industrialio-buffer-dmaengine.c (revision 2d6ca60f328450ff5c7802d0857d12e3711348ce)
1*2d6ca60fSLars-Peter Clausen /*
2*2d6ca60fSLars-Peter Clausen  * Copyright 2014-2015 Analog Devices Inc.
3*2d6ca60fSLars-Peter Clausen  *  Author: Lars-Peter Clausen <lars@metafoo.de>
4*2d6ca60fSLars-Peter Clausen  *
5*2d6ca60fSLars-Peter Clausen  * Licensed under the GPL-2 or later.
6*2d6ca60fSLars-Peter Clausen  */
7*2d6ca60fSLars-Peter Clausen 
8*2d6ca60fSLars-Peter Clausen #include <linux/slab.h>
9*2d6ca60fSLars-Peter Clausen #include <linux/kernel.h>
10*2d6ca60fSLars-Peter Clausen #include <linux/dmaengine.h>
11*2d6ca60fSLars-Peter Clausen #include <linux/dma-mapping.h>
12*2d6ca60fSLars-Peter Clausen #include <linux/spinlock.h>
13*2d6ca60fSLars-Peter Clausen #include <linux/err.h>
14*2d6ca60fSLars-Peter Clausen 
15*2d6ca60fSLars-Peter Clausen #include <linux/iio/iio.h>
16*2d6ca60fSLars-Peter Clausen #include <linux/iio/buffer.h>
17*2d6ca60fSLars-Peter Clausen #include <linux/iio/buffer-dma.h>
18*2d6ca60fSLars-Peter Clausen #include <linux/iio/buffer-dmaengine.h>
19*2d6ca60fSLars-Peter Clausen 
20*2d6ca60fSLars-Peter Clausen /*
21*2d6ca60fSLars-Peter Clausen  * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
22*2d6ca60fSLars-Peter Clausen  * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
23*2d6ca60fSLars-Peter Clausen  * used to manage the buffer memory and implement the IIO buffer operations
24*2d6ca60fSLars-Peter Clausen  * while the DMAengine framework is used to perform the DMA transfers. Combined
25*2d6ca60fSLars-Peter Clausen  * this results in a device independent fully functional DMA buffer
26*2d6ca60fSLars-Peter Clausen  * implementation that can be used by device drivers for peripherals which are
27*2d6ca60fSLars-Peter Clausen  * connected to a DMA controller which has a DMAengine driver implementation.
28*2d6ca60fSLars-Peter Clausen  */
29*2d6ca60fSLars-Peter Clausen 
30*2d6ca60fSLars-Peter Clausen struct dmaengine_buffer {
31*2d6ca60fSLars-Peter Clausen 	struct iio_dma_buffer_queue queue;
32*2d6ca60fSLars-Peter Clausen 
33*2d6ca60fSLars-Peter Clausen 	struct dma_chan *chan;
34*2d6ca60fSLars-Peter Clausen 	struct list_head active;
35*2d6ca60fSLars-Peter Clausen 
36*2d6ca60fSLars-Peter Clausen 	size_t align;
37*2d6ca60fSLars-Peter Clausen 	size_t max_size;
38*2d6ca60fSLars-Peter Clausen };
39*2d6ca60fSLars-Peter Clausen 
40*2d6ca60fSLars-Peter Clausen static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
41*2d6ca60fSLars-Peter Clausen 		struct iio_buffer *buffer)
42*2d6ca60fSLars-Peter Clausen {
43*2d6ca60fSLars-Peter Clausen 	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
44*2d6ca60fSLars-Peter Clausen }
45*2d6ca60fSLars-Peter Clausen 
46*2d6ca60fSLars-Peter Clausen static void iio_dmaengine_buffer_block_done(void *data)
47*2d6ca60fSLars-Peter Clausen {
48*2d6ca60fSLars-Peter Clausen 	struct iio_dma_buffer_block *block = data;
49*2d6ca60fSLars-Peter Clausen 	unsigned long flags;
50*2d6ca60fSLars-Peter Clausen 
51*2d6ca60fSLars-Peter Clausen 	spin_lock_irqsave(&block->queue->list_lock, flags);
52*2d6ca60fSLars-Peter Clausen 	list_del(&block->head);
53*2d6ca60fSLars-Peter Clausen 	spin_unlock_irqrestore(&block->queue->list_lock, flags);
54*2d6ca60fSLars-Peter Clausen 	iio_dma_buffer_block_done(block);
55*2d6ca60fSLars-Peter Clausen }
56*2d6ca60fSLars-Peter Clausen 
57*2d6ca60fSLars-Peter Clausen static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
58*2d6ca60fSLars-Peter Clausen 	struct iio_dma_buffer_block *block)
59*2d6ca60fSLars-Peter Clausen {
60*2d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
61*2d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
62*2d6ca60fSLars-Peter Clausen 	struct dma_async_tx_descriptor *desc;
63*2d6ca60fSLars-Peter Clausen 	dma_cookie_t cookie;
64*2d6ca60fSLars-Peter Clausen 
65*2d6ca60fSLars-Peter Clausen 	block->bytes_used = min(block->size, dmaengine_buffer->max_size);
66*2d6ca60fSLars-Peter Clausen 	block->bytes_used = rounddown(block->bytes_used,
67*2d6ca60fSLars-Peter Clausen 			dmaengine_buffer->align);
68*2d6ca60fSLars-Peter Clausen 
69*2d6ca60fSLars-Peter Clausen 	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
70*2d6ca60fSLars-Peter Clausen 		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
71*2d6ca60fSLars-Peter Clausen 		DMA_PREP_INTERRUPT);
72*2d6ca60fSLars-Peter Clausen 	if (!desc)
73*2d6ca60fSLars-Peter Clausen 		return -ENOMEM;
74*2d6ca60fSLars-Peter Clausen 
75*2d6ca60fSLars-Peter Clausen 	desc->callback = iio_dmaengine_buffer_block_done;
76*2d6ca60fSLars-Peter Clausen 	desc->callback_param = block;
77*2d6ca60fSLars-Peter Clausen 
78*2d6ca60fSLars-Peter Clausen 	cookie = dmaengine_submit(desc);
79*2d6ca60fSLars-Peter Clausen 	if (dma_submit_error(cookie))
80*2d6ca60fSLars-Peter Clausen 		return dma_submit_error(cookie);
81*2d6ca60fSLars-Peter Clausen 
82*2d6ca60fSLars-Peter Clausen 	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
83*2d6ca60fSLars-Peter Clausen 	list_add_tail(&block->head, &dmaengine_buffer->active);
84*2d6ca60fSLars-Peter Clausen 	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
85*2d6ca60fSLars-Peter Clausen 
86*2d6ca60fSLars-Peter Clausen 	dma_async_issue_pending(dmaengine_buffer->chan);
87*2d6ca60fSLars-Peter Clausen 
88*2d6ca60fSLars-Peter Clausen 	return 0;
89*2d6ca60fSLars-Peter Clausen }
90*2d6ca60fSLars-Peter Clausen 
91*2d6ca60fSLars-Peter Clausen static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
92*2d6ca60fSLars-Peter Clausen {
93*2d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
94*2d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
95*2d6ca60fSLars-Peter Clausen 
96*2d6ca60fSLars-Peter Clausen 	dmaengine_terminate_all(dmaengine_buffer->chan);
97*2d6ca60fSLars-Peter Clausen 	/* FIXME: There is a slight chance of a race condition here.
98*2d6ca60fSLars-Peter Clausen 	 * dmaengine_terminate_all() does not guarantee that all transfer
99*2d6ca60fSLars-Peter Clausen 	 * callbacks have finished running. Need to introduce a
100*2d6ca60fSLars-Peter Clausen 	 * dmaengine_terminate_all_sync().
101*2d6ca60fSLars-Peter Clausen 	 */
102*2d6ca60fSLars-Peter Clausen 	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
103*2d6ca60fSLars-Peter Clausen }
104*2d6ca60fSLars-Peter Clausen 
105*2d6ca60fSLars-Peter Clausen static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
106*2d6ca60fSLars-Peter Clausen {
107*2d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
108*2d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(buf);
109*2d6ca60fSLars-Peter Clausen 
110*2d6ca60fSLars-Peter Clausen 	iio_dma_buffer_release(&dmaengine_buffer->queue);
111*2d6ca60fSLars-Peter Clausen 	kfree(dmaengine_buffer);
112*2d6ca60fSLars-Peter Clausen }
113*2d6ca60fSLars-Peter Clausen 
114*2d6ca60fSLars-Peter Clausen static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
115*2d6ca60fSLars-Peter Clausen 	.read_first_n = iio_dma_buffer_read,
116*2d6ca60fSLars-Peter Clausen 	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
117*2d6ca60fSLars-Peter Clausen 	.set_length = iio_dma_buffer_set_length,
118*2d6ca60fSLars-Peter Clausen 	.request_update = iio_dma_buffer_request_update,
119*2d6ca60fSLars-Peter Clausen 	.enable = iio_dma_buffer_enable,
120*2d6ca60fSLars-Peter Clausen 	.disable = iio_dma_buffer_disable,
121*2d6ca60fSLars-Peter Clausen 	.data_available = iio_dma_buffer_data_available,
122*2d6ca60fSLars-Peter Clausen 	.release = iio_dmaengine_buffer_release,
123*2d6ca60fSLars-Peter Clausen 
124*2d6ca60fSLars-Peter Clausen 	.modes = INDIO_BUFFER_HARDWARE,
125*2d6ca60fSLars-Peter Clausen 	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
126*2d6ca60fSLars-Peter Clausen };
127*2d6ca60fSLars-Peter Clausen 
128*2d6ca60fSLars-Peter Clausen static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
129*2d6ca60fSLars-Peter Clausen 	.submit = iio_dmaengine_buffer_submit_block,
130*2d6ca60fSLars-Peter Clausen 	.abort = iio_dmaengine_buffer_abort,
131*2d6ca60fSLars-Peter Clausen };
132*2d6ca60fSLars-Peter Clausen 
133*2d6ca60fSLars-Peter Clausen /**
134*2d6ca60fSLars-Peter Clausen  * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
135*2d6ca60fSLars-Peter Clausen  * @dev: Parent device for the buffer
136*2d6ca60fSLars-Peter Clausen  * @channel: DMA channel name, typically "rx".
137*2d6ca60fSLars-Peter Clausen  *
138*2d6ca60fSLars-Peter Clausen  * This allocates a new IIO buffer which internally uses the DMAengine framework
139*2d6ca60fSLars-Peter Clausen  * to perform its transfers. The parent device will be used to request the DMA
140*2d6ca60fSLars-Peter Clausen  * channel.
141*2d6ca60fSLars-Peter Clausen  *
142*2d6ca60fSLars-Peter Clausen  * Once done using the buffer iio_dmaengine_buffer_free() should be used to
143*2d6ca60fSLars-Peter Clausen  * release it.
144*2d6ca60fSLars-Peter Clausen  */
145*2d6ca60fSLars-Peter Clausen struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
146*2d6ca60fSLars-Peter Clausen 	const char *channel)
147*2d6ca60fSLars-Peter Clausen {
148*2d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer;
149*2d6ca60fSLars-Peter Clausen 	unsigned int width, src_width, dest_width;
150*2d6ca60fSLars-Peter Clausen 	struct dma_slave_caps caps;
151*2d6ca60fSLars-Peter Clausen 	struct dma_chan *chan;
152*2d6ca60fSLars-Peter Clausen 	int ret;
153*2d6ca60fSLars-Peter Clausen 
154*2d6ca60fSLars-Peter Clausen 	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
155*2d6ca60fSLars-Peter Clausen 	if (!dmaengine_buffer)
156*2d6ca60fSLars-Peter Clausen 		return ERR_PTR(-ENOMEM);
157*2d6ca60fSLars-Peter Clausen 
158*2d6ca60fSLars-Peter Clausen 	chan = dma_request_slave_channel_reason(dev, channel);
159*2d6ca60fSLars-Peter Clausen 	if (IS_ERR(chan)) {
160*2d6ca60fSLars-Peter Clausen 		ret = PTR_ERR(chan);
161*2d6ca60fSLars-Peter Clausen 		goto err_free;
162*2d6ca60fSLars-Peter Clausen 	}
163*2d6ca60fSLars-Peter Clausen 
164*2d6ca60fSLars-Peter Clausen 	ret = dma_get_slave_caps(chan, &caps);
165*2d6ca60fSLars-Peter Clausen 	if (ret < 0)
166*2d6ca60fSLars-Peter Clausen 		goto err_free;
167*2d6ca60fSLars-Peter Clausen 
168*2d6ca60fSLars-Peter Clausen 	/* Needs to be aligned to the maximum of the minimums */
169*2d6ca60fSLars-Peter Clausen 	if (caps.src_addr_widths)
170*2d6ca60fSLars-Peter Clausen 		src_width = __ffs(caps.src_addr_widths);
171*2d6ca60fSLars-Peter Clausen 	else
172*2d6ca60fSLars-Peter Clausen 		src_width = 1;
173*2d6ca60fSLars-Peter Clausen 	if (caps.dst_addr_widths)
174*2d6ca60fSLars-Peter Clausen 		dest_width = __ffs(caps.dst_addr_widths);
175*2d6ca60fSLars-Peter Clausen 	else
176*2d6ca60fSLars-Peter Clausen 		dest_width = 1;
177*2d6ca60fSLars-Peter Clausen 	width = max(src_width, dest_width);
178*2d6ca60fSLars-Peter Clausen 
179*2d6ca60fSLars-Peter Clausen 	INIT_LIST_HEAD(&dmaengine_buffer->active);
180*2d6ca60fSLars-Peter Clausen 	dmaengine_buffer->chan = chan;
181*2d6ca60fSLars-Peter Clausen 	dmaengine_buffer->align = width;
182*2d6ca60fSLars-Peter Clausen 	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
183*2d6ca60fSLars-Peter Clausen 
184*2d6ca60fSLars-Peter Clausen 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
185*2d6ca60fSLars-Peter Clausen 		&iio_dmaengine_default_ops);
186*2d6ca60fSLars-Peter Clausen 
187*2d6ca60fSLars-Peter Clausen 	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
188*2d6ca60fSLars-Peter Clausen 
189*2d6ca60fSLars-Peter Clausen 	return &dmaengine_buffer->queue.buffer;
190*2d6ca60fSLars-Peter Clausen 
191*2d6ca60fSLars-Peter Clausen err_free:
192*2d6ca60fSLars-Peter Clausen 	kfree(dmaengine_buffer);
193*2d6ca60fSLars-Peter Clausen 	return ERR_PTR(ret);
194*2d6ca60fSLars-Peter Clausen }
195*2d6ca60fSLars-Peter Clausen EXPORT_SYMBOL(iio_dmaengine_buffer_alloc);
196*2d6ca60fSLars-Peter Clausen 
197*2d6ca60fSLars-Peter Clausen /**
198*2d6ca60fSLars-Peter Clausen  * iio_dmaengine_buffer_free() - Free dmaengine buffer
199*2d6ca60fSLars-Peter Clausen  * @buffer: Buffer to free
200*2d6ca60fSLars-Peter Clausen  *
201*2d6ca60fSLars-Peter Clausen  * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
202*2d6ca60fSLars-Peter Clausen  */
203*2d6ca60fSLars-Peter Clausen void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
204*2d6ca60fSLars-Peter Clausen {
205*2d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
206*2d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(buffer);
207*2d6ca60fSLars-Peter Clausen 
208*2d6ca60fSLars-Peter Clausen 	iio_dma_buffer_exit(&dmaengine_buffer->queue);
209*2d6ca60fSLars-Peter Clausen 	dma_release_channel(dmaengine_buffer->chan);
210*2d6ca60fSLars-Peter Clausen 
211*2d6ca60fSLars-Peter Clausen 	iio_buffer_put(buffer);
212*2d6ca60fSLars-Peter Clausen }
213*2d6ca60fSLars-Peter Clausen EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
214