1 /*
2  * Copyright 2014-2015 Analog Devices Inc.
3  *  Author: Lars-Peter Clausen <lars@metafoo.de>
4  *
5  * Licensed under the GPL-2 or later.
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/kernel.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/spinlock.h>
13 #include <linux/err.h>
14 
15 #include <linux/iio/iio.h>
16 #include <linux/iio/buffer.h>
17 #include <linux/iio/buffer-dma.h>
18 #include <linux/iio/buffer-dmaengine.h>
19 
20 /*
21  * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
22  * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
23  * used to manage the buffer memory and implement the IIO buffer operations
24  * while the DMAengine framework is used to perform the DMA transfers. Combined
25  * this results in a device independent fully functional DMA buffer
26  * implementation that can be used by device drivers for peripherals which are
27  * connected to a DMA controller which has a DMAengine driver implementation.
28  */
29 
30 struct dmaengine_buffer {
31 	struct iio_dma_buffer_queue queue;
32 
33 	struct dma_chan *chan;
34 	struct list_head active;
35 
36 	size_t align;
37 	size_t max_size;
38 };
39 
40 static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
41 		struct iio_buffer *buffer)
42 {
43 	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
44 }
45 
46 static void iio_dmaengine_buffer_block_done(void *data)
47 {
48 	struct iio_dma_buffer_block *block = data;
49 	unsigned long flags;
50 
51 	spin_lock_irqsave(&block->queue->list_lock, flags);
52 	list_del(&block->head);
53 	spin_unlock_irqrestore(&block->queue->list_lock, flags);
54 	iio_dma_buffer_block_done(block);
55 }
56 
57 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
58 	struct iio_dma_buffer_block *block)
59 {
60 	struct dmaengine_buffer *dmaengine_buffer =
61 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
62 	struct dma_async_tx_descriptor *desc;
63 	dma_cookie_t cookie;
64 
65 	block->bytes_used = min(block->size, dmaengine_buffer->max_size);
66 	block->bytes_used = rounddown(block->bytes_used,
67 			dmaengine_buffer->align);
68 
69 	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
70 		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
71 		DMA_PREP_INTERRUPT);
72 	if (!desc)
73 		return -ENOMEM;
74 
75 	desc->callback = iio_dmaengine_buffer_block_done;
76 	desc->callback_param = block;
77 
78 	cookie = dmaengine_submit(desc);
79 	if (dma_submit_error(cookie))
80 		return dma_submit_error(cookie);
81 
82 	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
83 	list_add_tail(&block->head, &dmaengine_buffer->active);
84 	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
85 
86 	dma_async_issue_pending(dmaengine_buffer->chan);
87 
88 	return 0;
89 }
90 
91 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
92 {
93 	struct dmaengine_buffer *dmaengine_buffer =
94 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
95 
96 	dmaengine_terminate_all(dmaengine_buffer->chan);
97 	/* FIXME: There is a slight chance of a race condition here.
98 	 * dmaengine_terminate_all() does not guarantee that all transfer
99 	 * callbacks have finished running. Need to introduce a
100 	 * dmaengine_terminate_all_sync().
101 	 */
102 	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
103 }
104 
105 static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
106 {
107 	struct dmaengine_buffer *dmaengine_buffer =
108 		iio_buffer_to_dmaengine_buffer(buf);
109 
110 	iio_dma_buffer_release(&dmaengine_buffer->queue);
111 	kfree(dmaengine_buffer);
112 }
113 
114 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
115 	.read_first_n = iio_dma_buffer_read,
116 	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
117 	.set_length = iio_dma_buffer_set_length,
118 	.request_update = iio_dma_buffer_request_update,
119 	.enable = iio_dma_buffer_enable,
120 	.disable = iio_dma_buffer_disable,
121 	.data_available = iio_dma_buffer_data_available,
122 	.release = iio_dmaengine_buffer_release,
123 
124 	.modes = INDIO_BUFFER_HARDWARE,
125 	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
126 };
127 
128 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
129 	.submit = iio_dmaengine_buffer_submit_block,
130 	.abort = iio_dmaengine_buffer_abort,
131 };
132 
133 /**
134  * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
135  * @dev: Parent device for the buffer
136  * @channel: DMA channel name, typically "rx".
137  *
138  * This allocates a new IIO buffer which internally uses the DMAengine framework
139  * to perform its transfers. The parent device will be used to request the DMA
140  * channel.
141  *
142  * Once done using the buffer iio_dmaengine_buffer_free() should be used to
143  * release it.
144  */
145 struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
146 	const char *channel)
147 {
148 	struct dmaengine_buffer *dmaengine_buffer;
149 	unsigned int width, src_width, dest_width;
150 	struct dma_slave_caps caps;
151 	struct dma_chan *chan;
152 	int ret;
153 
154 	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
155 	if (!dmaengine_buffer)
156 		return ERR_PTR(-ENOMEM);
157 
158 	chan = dma_request_slave_channel_reason(dev, channel);
159 	if (IS_ERR(chan)) {
160 		ret = PTR_ERR(chan);
161 		goto err_free;
162 	}
163 
164 	ret = dma_get_slave_caps(chan, &caps);
165 	if (ret < 0)
166 		goto err_free;
167 
168 	/* Needs to be aligned to the maximum of the minimums */
169 	if (caps.src_addr_widths)
170 		src_width = __ffs(caps.src_addr_widths);
171 	else
172 		src_width = 1;
173 	if (caps.dst_addr_widths)
174 		dest_width = __ffs(caps.dst_addr_widths);
175 	else
176 		dest_width = 1;
177 	width = max(src_width, dest_width);
178 
179 	INIT_LIST_HEAD(&dmaengine_buffer->active);
180 	dmaengine_buffer->chan = chan;
181 	dmaengine_buffer->align = width;
182 	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
183 
184 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
185 		&iio_dmaengine_default_ops);
186 
187 	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
188 
189 	return &dmaengine_buffer->queue.buffer;
190 
191 err_free:
192 	kfree(dmaengine_buffer);
193 	return ERR_PTR(ret);
194 }
195 EXPORT_SYMBOL(iio_dmaengine_buffer_alloc);
196 
197 /**
198  * iio_dmaengine_buffer_free() - Free dmaengine buffer
199  * @buffer: Buffer to free
200  *
201  * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
202  */
203 void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
204 {
205 	struct dmaengine_buffer *dmaengine_buffer =
206 		iio_buffer_to_dmaengine_buffer(buffer);
207 
208 	iio_dma_buffer_exit(&dmaengine_buffer->queue);
209 	dma_release_channel(dmaengine_buffer->chan);
210 
211 	iio_buffer_put(buffer);
212 }
213 EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
214