180503b23SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
22d6ca60fSLars-Peter Clausen /*
32d6ca60fSLars-Peter Clausen * Copyright 2014-2015 Analog Devices Inc.
42d6ca60fSLars-Peter Clausen * Author: Lars-Peter Clausen <lars@metafoo.de>
52d6ca60fSLars-Peter Clausen */
62d6ca60fSLars-Peter Clausen
72d6ca60fSLars-Peter Clausen #include <linux/slab.h>
82d6ca60fSLars-Peter Clausen #include <linux/kernel.h>
92d6ca60fSLars-Peter Clausen #include <linux/dmaengine.h>
102d6ca60fSLars-Peter Clausen #include <linux/dma-mapping.h>
112d6ca60fSLars-Peter Clausen #include <linux/spinlock.h>
122d6ca60fSLars-Peter Clausen #include <linux/err.h>
130c040d1dSLars-Peter Clausen #include <linux/module.h>
142d6ca60fSLars-Peter Clausen
152d6ca60fSLars-Peter Clausen #include <linux/iio/iio.h>
164538c185SLars-Peter Clausen #include <linux/iio/sysfs.h>
172d6ca60fSLars-Peter Clausen #include <linux/iio/buffer.h>
187981dc07SPhil Reid #include <linux/iio/buffer_impl.h>
192d6ca60fSLars-Peter Clausen #include <linux/iio/buffer-dma.h>
202d6ca60fSLars-Peter Clausen #include <linux/iio/buffer-dmaengine.h>
212d6ca60fSLars-Peter Clausen
222d6ca60fSLars-Peter Clausen /*
232d6ca60fSLars-Peter Clausen * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
242d6ca60fSLars-Peter Clausen * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
252d6ca60fSLars-Peter Clausen * used to manage the buffer memory and implement the IIO buffer operations
262d6ca60fSLars-Peter Clausen * while the DMAengine framework is used to perform the DMA transfers. Combined
272d6ca60fSLars-Peter Clausen * this results in a device independent fully functional DMA buffer
282d6ca60fSLars-Peter Clausen * implementation that can be used by device drivers for peripherals which are
292d6ca60fSLars-Peter Clausen * connected to a DMA controller which has a DMAengine driver implementation.
302d6ca60fSLars-Peter Clausen */
312d6ca60fSLars-Peter Clausen
322d6ca60fSLars-Peter Clausen struct dmaengine_buffer {
332d6ca60fSLars-Peter Clausen struct iio_dma_buffer_queue queue;
342d6ca60fSLars-Peter Clausen
352d6ca60fSLars-Peter Clausen struct dma_chan *chan;
362d6ca60fSLars-Peter Clausen struct list_head active;
372d6ca60fSLars-Peter Clausen
382d6ca60fSLars-Peter Clausen size_t align;
392d6ca60fSLars-Peter Clausen size_t max_size;
402d6ca60fSLars-Peter Clausen };
412d6ca60fSLars-Peter Clausen
iio_buffer_to_dmaengine_buffer(struct iio_buffer * buffer)422d6ca60fSLars-Peter Clausen static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
432d6ca60fSLars-Peter Clausen struct iio_buffer *buffer)
442d6ca60fSLars-Peter Clausen {
452d6ca60fSLars-Peter Clausen return container_of(buffer, struct dmaengine_buffer, queue.buffer);
462d6ca60fSLars-Peter Clausen }
472d6ca60fSLars-Peter Clausen
iio_dmaengine_buffer_block_done(void * data,const struct dmaengine_result * result)4865e02d0bSAlexandru Ardelean static void iio_dmaengine_buffer_block_done(void *data,
4965e02d0bSAlexandru Ardelean const struct dmaengine_result *result)
502d6ca60fSLars-Peter Clausen {
512d6ca60fSLars-Peter Clausen struct iio_dma_buffer_block *block = data;
522d6ca60fSLars-Peter Clausen unsigned long flags;
532d6ca60fSLars-Peter Clausen
542d6ca60fSLars-Peter Clausen spin_lock_irqsave(&block->queue->list_lock, flags);
552d6ca60fSLars-Peter Clausen list_del(&block->head);
562d6ca60fSLars-Peter Clausen spin_unlock_irqrestore(&block->queue->list_lock, flags);
5765e02d0bSAlexandru Ardelean block->bytes_used -= result->residue;
582d6ca60fSLars-Peter Clausen iio_dma_buffer_block_done(block);
592d6ca60fSLars-Peter Clausen }
602d6ca60fSLars-Peter Clausen
iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue * queue,struct iio_dma_buffer_block * block)612d6ca60fSLars-Peter Clausen static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
622d6ca60fSLars-Peter Clausen struct iio_dma_buffer_block *block)
632d6ca60fSLars-Peter Clausen {
642d6ca60fSLars-Peter Clausen struct dmaengine_buffer *dmaengine_buffer =
652d6ca60fSLars-Peter Clausen iio_buffer_to_dmaengine_buffer(&queue->buffer);
662d6ca60fSLars-Peter Clausen struct dma_async_tx_descriptor *desc;
672d6ca60fSLars-Peter Clausen dma_cookie_t cookie;
682d6ca60fSLars-Peter Clausen
692d6ca60fSLars-Peter Clausen block->bytes_used = min(block->size, dmaengine_buffer->max_size);
70ab1fb455SPaul Cercueil block->bytes_used = round_down(block->bytes_used,
712d6ca60fSLars-Peter Clausen dmaengine_buffer->align);
722d6ca60fSLars-Peter Clausen
732d6ca60fSLars-Peter Clausen desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
742d6ca60fSLars-Peter Clausen block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
752d6ca60fSLars-Peter Clausen DMA_PREP_INTERRUPT);
762d6ca60fSLars-Peter Clausen if (!desc)
772d6ca60fSLars-Peter Clausen return -ENOMEM;
782d6ca60fSLars-Peter Clausen
7965e02d0bSAlexandru Ardelean desc->callback_result = iio_dmaengine_buffer_block_done;
802d6ca60fSLars-Peter Clausen desc->callback_param = block;
812d6ca60fSLars-Peter Clausen
822d6ca60fSLars-Peter Clausen cookie = dmaengine_submit(desc);
832d6ca60fSLars-Peter Clausen if (dma_submit_error(cookie))
842d6ca60fSLars-Peter Clausen return dma_submit_error(cookie);
852d6ca60fSLars-Peter Clausen
862d6ca60fSLars-Peter Clausen spin_lock_irq(&dmaengine_buffer->queue.list_lock);
872d6ca60fSLars-Peter Clausen list_add_tail(&block->head, &dmaengine_buffer->active);
882d6ca60fSLars-Peter Clausen spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
892d6ca60fSLars-Peter Clausen
902d6ca60fSLars-Peter Clausen dma_async_issue_pending(dmaengine_buffer->chan);
912d6ca60fSLars-Peter Clausen
922d6ca60fSLars-Peter Clausen return 0;
932d6ca60fSLars-Peter Clausen }
942d6ca60fSLars-Peter Clausen
iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue * queue)952d6ca60fSLars-Peter Clausen static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
962d6ca60fSLars-Peter Clausen {
972d6ca60fSLars-Peter Clausen struct dmaengine_buffer *dmaengine_buffer =
982d6ca60fSLars-Peter Clausen iio_buffer_to_dmaengine_buffer(&queue->buffer);
992d6ca60fSLars-Peter Clausen
1009d452184SLars-Peter Clausen dmaengine_terminate_sync(dmaengine_buffer->chan);
1012d6ca60fSLars-Peter Clausen iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
1022d6ca60fSLars-Peter Clausen }
1032d6ca60fSLars-Peter Clausen
iio_dmaengine_buffer_release(struct iio_buffer * buf)1042d6ca60fSLars-Peter Clausen static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
1052d6ca60fSLars-Peter Clausen {
1062d6ca60fSLars-Peter Clausen struct dmaengine_buffer *dmaengine_buffer =
1072d6ca60fSLars-Peter Clausen iio_buffer_to_dmaengine_buffer(buf);
1082d6ca60fSLars-Peter Clausen
1092d6ca60fSLars-Peter Clausen iio_dma_buffer_release(&dmaengine_buffer->queue);
1102d6ca60fSLars-Peter Clausen kfree(dmaengine_buffer);
1112d6ca60fSLars-Peter Clausen }
1122d6ca60fSLars-Peter Clausen
1132d6ca60fSLars-Peter Clausen static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
114f6d4033dSLars-Peter Clausen .read = iio_dma_buffer_read,
1152d6ca60fSLars-Peter Clausen .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
1162d6ca60fSLars-Peter Clausen .set_length = iio_dma_buffer_set_length,
1172d6ca60fSLars-Peter Clausen .request_update = iio_dma_buffer_request_update,
1182d6ca60fSLars-Peter Clausen .enable = iio_dma_buffer_enable,
1192d6ca60fSLars-Peter Clausen .disable = iio_dma_buffer_disable,
1202d6ca60fSLars-Peter Clausen .data_available = iio_dma_buffer_data_available,
1212d6ca60fSLars-Peter Clausen .release = iio_dmaengine_buffer_release,
1222d6ca60fSLars-Peter Clausen
1232d6ca60fSLars-Peter Clausen .modes = INDIO_BUFFER_HARDWARE,
1242d6ca60fSLars-Peter Clausen .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
1252d6ca60fSLars-Peter Clausen };
1262d6ca60fSLars-Peter Clausen
1272d6ca60fSLars-Peter Clausen static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
1282d6ca60fSLars-Peter Clausen .submit = iio_dmaengine_buffer_submit_block,
1292d6ca60fSLars-Peter Clausen .abort = iio_dmaengine_buffer_abort,
1302d6ca60fSLars-Peter Clausen };
1312d6ca60fSLars-Peter Clausen
iio_dmaengine_buffer_get_length_align(struct device * dev,struct device_attribute * attr,char * buf)1324538c185SLars-Peter Clausen static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
1334538c185SLars-Peter Clausen struct device_attribute *attr, char *buf)
1344538c185SLars-Peter Clausen {
1354991f3eaSAlexandru Ardelean struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1364538c185SLars-Peter Clausen struct dmaengine_buffer *dmaengine_buffer =
1374991f3eaSAlexandru Ardelean iio_buffer_to_dmaengine_buffer(buffer);
1384538c185SLars-Peter Clausen
1390ce1a30cSLars-Peter Clausen return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align);
1404538c185SLars-Peter Clausen }
1414538c185SLars-Peter Clausen
1424538c185SLars-Peter Clausen static IIO_DEVICE_ATTR(length_align_bytes, 0444,
1434538c185SLars-Peter Clausen iio_dmaengine_buffer_get_length_align, NULL, 0);
1444538c185SLars-Peter Clausen
1450a33755cSMatti Vaittinen static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
1460a33755cSMatti Vaittinen &iio_dev_attr_length_align_bytes,
1474538c185SLars-Peter Clausen NULL,
1484538c185SLars-Peter Clausen };
1494538c185SLars-Peter Clausen
1502d6ca60fSLars-Peter Clausen /**
1512d6ca60fSLars-Peter Clausen * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
1522d6ca60fSLars-Peter Clausen * @dev: Parent device for the buffer
1532d6ca60fSLars-Peter Clausen * @channel: DMA channel name, typically "rx".
1542d6ca60fSLars-Peter Clausen *
1552d6ca60fSLars-Peter Clausen * This allocates a new IIO buffer which internally uses the DMAengine framework
1562d6ca60fSLars-Peter Clausen * to perform its transfers. The parent device will be used to request the DMA
1572d6ca60fSLars-Peter Clausen * channel.
1582d6ca60fSLars-Peter Clausen *
1592d6ca60fSLars-Peter Clausen * Once done using the buffer iio_dmaengine_buffer_free() should be used to
1602d6ca60fSLars-Peter Clausen * release it.
1612d6ca60fSLars-Peter Clausen */
iio_dmaengine_buffer_alloc(struct device * dev,const char * channel)162e1fc56c4SNuno Sa struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
1632d6ca60fSLars-Peter Clausen const char *channel)
1642d6ca60fSLars-Peter Clausen {
1652d6ca60fSLars-Peter Clausen struct dmaengine_buffer *dmaengine_buffer;
1662d6ca60fSLars-Peter Clausen unsigned int width, src_width, dest_width;
1672d6ca60fSLars-Peter Clausen struct dma_slave_caps caps;
1682d6ca60fSLars-Peter Clausen struct dma_chan *chan;
1692d6ca60fSLars-Peter Clausen int ret;
1702d6ca60fSLars-Peter Clausen
1712d6ca60fSLars-Peter Clausen dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
1722d6ca60fSLars-Peter Clausen if (!dmaengine_buffer)
1732d6ca60fSLars-Peter Clausen return ERR_PTR(-ENOMEM);
1742d6ca60fSLars-Peter Clausen
175f339f979SPeter Ujfalusi chan = dma_request_chan(dev, channel);
1762d6ca60fSLars-Peter Clausen if (IS_ERR(chan)) {
1772d6ca60fSLars-Peter Clausen ret = PTR_ERR(chan);
1782d6ca60fSLars-Peter Clausen goto err_free;
1792d6ca60fSLars-Peter Clausen }
1802d6ca60fSLars-Peter Clausen
1812d6ca60fSLars-Peter Clausen ret = dma_get_slave_caps(chan, &caps);
1822d6ca60fSLars-Peter Clausen if (ret < 0)
183*cb0f3f0cSDavid Lechner goto err_release;
1842d6ca60fSLars-Peter Clausen
1852d6ca60fSLars-Peter Clausen /* Needs to be aligned to the maximum of the minimums */
1862d6ca60fSLars-Peter Clausen if (caps.src_addr_widths)
1872d6ca60fSLars-Peter Clausen src_width = __ffs(caps.src_addr_widths);
1882d6ca60fSLars-Peter Clausen else
1892d6ca60fSLars-Peter Clausen src_width = 1;
1902d6ca60fSLars-Peter Clausen if (caps.dst_addr_widths)
1912d6ca60fSLars-Peter Clausen dest_width = __ffs(caps.dst_addr_widths);
1922d6ca60fSLars-Peter Clausen else
1932d6ca60fSLars-Peter Clausen dest_width = 1;
1942d6ca60fSLars-Peter Clausen width = max(src_width, dest_width);
1952d6ca60fSLars-Peter Clausen
1962d6ca60fSLars-Peter Clausen INIT_LIST_HEAD(&dmaengine_buffer->active);
1972d6ca60fSLars-Peter Clausen dmaengine_buffer->chan = chan;
1982d6ca60fSLars-Peter Clausen dmaengine_buffer->align = width;
1992d6ca60fSLars-Peter Clausen dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
2002d6ca60fSLars-Peter Clausen
2012d6ca60fSLars-Peter Clausen iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
2022d6ca60fSLars-Peter Clausen &iio_dmaengine_default_ops);
2032d6ca60fSLars-Peter Clausen
2045e6dc43eSAlexandru Ardelean dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs;
2052d6ca60fSLars-Peter Clausen dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
2062d6ca60fSLars-Peter Clausen
2072d6ca60fSLars-Peter Clausen return &dmaengine_buffer->queue.buffer;
2082d6ca60fSLars-Peter Clausen
209*cb0f3f0cSDavid Lechner err_release:
210*cb0f3f0cSDavid Lechner dma_release_channel(chan);
2112d6ca60fSLars-Peter Clausen err_free:
2122d6ca60fSLars-Peter Clausen kfree(dmaengine_buffer);
2132d6ca60fSLars-Peter Clausen return ERR_PTR(ret);
2142d6ca60fSLars-Peter Clausen }
215e1fc56c4SNuno Sa EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER);
2162d6ca60fSLars-Peter Clausen
2172d6ca60fSLars-Peter Clausen /**
2182d6ca60fSLars-Peter Clausen * iio_dmaengine_buffer_free() - Free dmaengine buffer
2192d6ca60fSLars-Peter Clausen * @buffer: Buffer to free
2202d6ca60fSLars-Peter Clausen *
2212d6ca60fSLars-Peter Clausen * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
2222d6ca60fSLars-Peter Clausen */
iio_dmaengine_buffer_free(struct iio_buffer * buffer)223e1fc56c4SNuno Sa void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
2242d6ca60fSLars-Peter Clausen {
2252d6ca60fSLars-Peter Clausen struct dmaengine_buffer *dmaengine_buffer =
2262d6ca60fSLars-Peter Clausen iio_buffer_to_dmaengine_buffer(buffer);
2272d6ca60fSLars-Peter Clausen
2282d6ca60fSLars-Peter Clausen iio_dma_buffer_exit(&dmaengine_buffer->queue);
2292d6ca60fSLars-Peter Clausen dma_release_channel(dmaengine_buffer->chan);
2302d6ca60fSLars-Peter Clausen
2312d6ca60fSLars-Peter Clausen iio_buffer_put(buffer);
2322d6ca60fSLars-Peter Clausen }
233e1fc56c4SNuno Sa EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
2340c040d1dSLars-Peter Clausen
__devm_iio_dmaengine_buffer_free(void * buffer)2352c6a9587SYicong Yang static void __devm_iio_dmaengine_buffer_free(void *buffer)
236e0fcca9fSAlexandru Ardelean {
2372c6a9587SYicong Yang iio_dmaengine_buffer_free(buffer);
238e0fcca9fSAlexandru Ardelean }
239e0fcca9fSAlexandru Ardelean
240e0fcca9fSAlexandru Ardelean /**
241e0fcca9fSAlexandru Ardelean * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
242e0fcca9fSAlexandru Ardelean * @dev: Parent device for the buffer
243e0fcca9fSAlexandru Ardelean * @channel: DMA channel name, typically "rx".
244e0fcca9fSAlexandru Ardelean *
245e0fcca9fSAlexandru Ardelean * This allocates a new IIO buffer which internally uses the DMAengine framework
246e0fcca9fSAlexandru Ardelean * to perform its transfers. The parent device will be used to request the DMA
247e0fcca9fSAlexandru Ardelean * channel.
248e0fcca9fSAlexandru Ardelean *
249e0fcca9fSAlexandru Ardelean * The buffer will be automatically de-allocated once the device gets destroyed.
250e0fcca9fSAlexandru Ardelean */
devm_iio_dmaengine_buffer_alloc(struct device * dev,const char * channel)251a02c09e4SAlexandru Ardelean static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
252e0fcca9fSAlexandru Ardelean const char *channel)
253e0fcca9fSAlexandru Ardelean {
2542c6a9587SYicong Yang struct iio_buffer *buffer;
2552c6a9587SYicong Yang int ret;
256e0fcca9fSAlexandru Ardelean
257e0fcca9fSAlexandru Ardelean buffer = iio_dmaengine_buffer_alloc(dev, channel);
2582c6a9587SYicong Yang if (IS_ERR(buffer))
259e0fcca9fSAlexandru Ardelean return buffer;
260e0fcca9fSAlexandru Ardelean
2612c6a9587SYicong Yang ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
2622c6a9587SYicong Yang buffer);
2632c6a9587SYicong Yang if (ret)
2642c6a9587SYicong Yang return ERR_PTR(ret);
265e0fcca9fSAlexandru Ardelean
266e0fcca9fSAlexandru Ardelean return buffer;
267e0fcca9fSAlexandru Ardelean }
268a02c09e4SAlexandru Ardelean
269a02c09e4SAlexandru Ardelean /**
270a02c09e4SAlexandru Ardelean * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device
271a02c09e4SAlexandru Ardelean * @dev: Parent device for the buffer
272a02c09e4SAlexandru Ardelean * @indio_dev: IIO device to which to attach this buffer.
273a02c09e4SAlexandru Ardelean * @channel: DMA channel name, typically "rx".
274a02c09e4SAlexandru Ardelean *
275a02c09e4SAlexandru Ardelean * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
276a02c09e4SAlexandru Ardelean * and attaches it to an IIO device with iio_device_attach_buffer().
277a02c09e4SAlexandru Ardelean * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
278a02c09e4SAlexandru Ardelean * IIO device.
279a02c09e4SAlexandru Ardelean */
devm_iio_dmaengine_buffer_setup(struct device * dev,struct iio_dev * indio_dev,const char * channel)280a02c09e4SAlexandru Ardelean int devm_iio_dmaengine_buffer_setup(struct device *dev,
281a02c09e4SAlexandru Ardelean struct iio_dev *indio_dev,
282a02c09e4SAlexandru Ardelean const char *channel)
283a02c09e4SAlexandru Ardelean {
284a02c09e4SAlexandru Ardelean struct iio_buffer *buffer;
285a02c09e4SAlexandru Ardelean
286a02c09e4SAlexandru Ardelean buffer = devm_iio_dmaengine_buffer_alloc(indio_dev->dev.parent,
287a02c09e4SAlexandru Ardelean channel);
288a02c09e4SAlexandru Ardelean if (IS_ERR(buffer))
289a02c09e4SAlexandru Ardelean return PTR_ERR(buffer);
290a02c09e4SAlexandru Ardelean
291a02c09e4SAlexandru Ardelean indio_dev->modes |= INDIO_BUFFER_HARDWARE;
292a02c09e4SAlexandru Ardelean
293ee708e6bSAlexandru Ardelean return iio_device_attach_buffer(indio_dev, buffer);
294a02c09e4SAlexandru Ardelean }
295e1fc56c4SNuno Sa EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
296e0fcca9fSAlexandru Ardelean
2970c040d1dSLars-Peter Clausen MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
2980c040d1dSLars-Peter Clausen MODULE_DESCRIPTION("DMA buffer for the IIO framework");
2990c040d1dSLars-Peter Clausen MODULE_LICENSE("GPL");
300