xref: /openbmc/linux/drivers/iio/buffer/industrialio-buffer-dmaengine.c (revision 4538c185680996d7328beac629dbdb7dd3f8f34e)
180503b23SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
22d6ca60fSLars-Peter Clausen /*
32d6ca60fSLars-Peter Clausen  * Copyright 2014-2015 Analog Devices Inc.
42d6ca60fSLars-Peter Clausen  *  Author: Lars-Peter Clausen <lars@metafoo.de>
52d6ca60fSLars-Peter Clausen  */
62d6ca60fSLars-Peter Clausen 
72d6ca60fSLars-Peter Clausen #include <linux/slab.h>
82d6ca60fSLars-Peter Clausen #include <linux/kernel.h>
92d6ca60fSLars-Peter Clausen #include <linux/dmaengine.h>
102d6ca60fSLars-Peter Clausen #include <linux/dma-mapping.h>
112d6ca60fSLars-Peter Clausen #include <linux/spinlock.h>
122d6ca60fSLars-Peter Clausen #include <linux/err.h>
130c040d1dSLars-Peter Clausen #include <linux/module.h>
142d6ca60fSLars-Peter Clausen 
152d6ca60fSLars-Peter Clausen #include <linux/iio/iio.h>
16*4538c185SLars-Peter Clausen #include <linux/iio/sysfs.h>
172d6ca60fSLars-Peter Clausen #include <linux/iio/buffer.h>
187981dc07SPhil Reid #include <linux/iio/buffer_impl.h>
192d6ca60fSLars-Peter Clausen #include <linux/iio/buffer-dma.h>
202d6ca60fSLars-Peter Clausen #include <linux/iio/buffer-dmaengine.h>
212d6ca60fSLars-Peter Clausen 
222d6ca60fSLars-Peter Clausen /*
232d6ca60fSLars-Peter Clausen  * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
242d6ca60fSLars-Peter Clausen  * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
252d6ca60fSLars-Peter Clausen  * used to manage the buffer memory and implement the IIO buffer operations
262d6ca60fSLars-Peter Clausen  * while the DMAengine framework is used to perform the DMA transfers. Combined
272d6ca60fSLars-Peter Clausen  * this results in a device independent fully functional DMA buffer
282d6ca60fSLars-Peter Clausen  * implementation that can be used by device drivers for peripherals which are
292d6ca60fSLars-Peter Clausen  * connected to a DMA controller which has a DMAengine driver implementation.
302d6ca60fSLars-Peter Clausen  */
312d6ca60fSLars-Peter Clausen 
322d6ca60fSLars-Peter Clausen struct dmaengine_buffer {
332d6ca60fSLars-Peter Clausen 	struct iio_dma_buffer_queue queue;
342d6ca60fSLars-Peter Clausen 
352d6ca60fSLars-Peter Clausen 	struct dma_chan *chan;
362d6ca60fSLars-Peter Clausen 	struct list_head active;
372d6ca60fSLars-Peter Clausen 
382d6ca60fSLars-Peter Clausen 	size_t align;
392d6ca60fSLars-Peter Clausen 	size_t max_size;
402d6ca60fSLars-Peter Clausen };
412d6ca60fSLars-Peter Clausen 
422d6ca60fSLars-Peter Clausen static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
432d6ca60fSLars-Peter Clausen 		struct iio_buffer *buffer)
442d6ca60fSLars-Peter Clausen {
452d6ca60fSLars-Peter Clausen 	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
462d6ca60fSLars-Peter Clausen }
472d6ca60fSLars-Peter Clausen 
482d6ca60fSLars-Peter Clausen static void iio_dmaengine_buffer_block_done(void *data)
492d6ca60fSLars-Peter Clausen {
502d6ca60fSLars-Peter Clausen 	struct iio_dma_buffer_block *block = data;
512d6ca60fSLars-Peter Clausen 	unsigned long flags;
522d6ca60fSLars-Peter Clausen 
532d6ca60fSLars-Peter Clausen 	spin_lock_irqsave(&block->queue->list_lock, flags);
542d6ca60fSLars-Peter Clausen 	list_del(&block->head);
552d6ca60fSLars-Peter Clausen 	spin_unlock_irqrestore(&block->queue->list_lock, flags);
562d6ca60fSLars-Peter Clausen 	iio_dma_buffer_block_done(block);
572d6ca60fSLars-Peter Clausen }
582d6ca60fSLars-Peter Clausen 
592d6ca60fSLars-Peter Clausen static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
602d6ca60fSLars-Peter Clausen 	struct iio_dma_buffer_block *block)
612d6ca60fSLars-Peter Clausen {
622d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
632d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
642d6ca60fSLars-Peter Clausen 	struct dma_async_tx_descriptor *desc;
652d6ca60fSLars-Peter Clausen 	dma_cookie_t cookie;
662d6ca60fSLars-Peter Clausen 
672d6ca60fSLars-Peter Clausen 	block->bytes_used = min(block->size, dmaengine_buffer->max_size);
682d6ca60fSLars-Peter Clausen 	block->bytes_used = rounddown(block->bytes_used,
692d6ca60fSLars-Peter Clausen 			dmaengine_buffer->align);
702d6ca60fSLars-Peter Clausen 
712d6ca60fSLars-Peter Clausen 	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
722d6ca60fSLars-Peter Clausen 		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
732d6ca60fSLars-Peter Clausen 		DMA_PREP_INTERRUPT);
742d6ca60fSLars-Peter Clausen 	if (!desc)
752d6ca60fSLars-Peter Clausen 		return -ENOMEM;
762d6ca60fSLars-Peter Clausen 
772d6ca60fSLars-Peter Clausen 	desc->callback = iio_dmaengine_buffer_block_done;
782d6ca60fSLars-Peter Clausen 	desc->callback_param = block;
792d6ca60fSLars-Peter Clausen 
802d6ca60fSLars-Peter Clausen 	cookie = dmaengine_submit(desc);
812d6ca60fSLars-Peter Clausen 	if (dma_submit_error(cookie))
822d6ca60fSLars-Peter Clausen 		return dma_submit_error(cookie);
832d6ca60fSLars-Peter Clausen 
842d6ca60fSLars-Peter Clausen 	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
852d6ca60fSLars-Peter Clausen 	list_add_tail(&block->head, &dmaengine_buffer->active);
862d6ca60fSLars-Peter Clausen 	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
872d6ca60fSLars-Peter Clausen 
882d6ca60fSLars-Peter Clausen 	dma_async_issue_pending(dmaengine_buffer->chan);
892d6ca60fSLars-Peter Clausen 
902d6ca60fSLars-Peter Clausen 	return 0;
912d6ca60fSLars-Peter Clausen }
922d6ca60fSLars-Peter Clausen 
932d6ca60fSLars-Peter Clausen static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
942d6ca60fSLars-Peter Clausen {
952d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
962d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
972d6ca60fSLars-Peter Clausen 
989d452184SLars-Peter Clausen 	dmaengine_terminate_sync(dmaengine_buffer->chan);
992d6ca60fSLars-Peter Clausen 	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
1002d6ca60fSLars-Peter Clausen }
1012d6ca60fSLars-Peter Clausen 
1022d6ca60fSLars-Peter Clausen static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
1032d6ca60fSLars-Peter Clausen {
1042d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
1052d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(buf);
1062d6ca60fSLars-Peter Clausen 
1072d6ca60fSLars-Peter Clausen 	iio_dma_buffer_release(&dmaengine_buffer->queue);
1082d6ca60fSLars-Peter Clausen 	kfree(dmaengine_buffer);
1092d6ca60fSLars-Peter Clausen }
1102d6ca60fSLars-Peter Clausen 
1112d6ca60fSLars-Peter Clausen static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
1122d6ca60fSLars-Peter Clausen 	.read_first_n = iio_dma_buffer_read,
1132d6ca60fSLars-Peter Clausen 	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
1142d6ca60fSLars-Peter Clausen 	.set_length = iio_dma_buffer_set_length,
1152d6ca60fSLars-Peter Clausen 	.request_update = iio_dma_buffer_request_update,
1162d6ca60fSLars-Peter Clausen 	.enable = iio_dma_buffer_enable,
1172d6ca60fSLars-Peter Clausen 	.disable = iio_dma_buffer_disable,
1182d6ca60fSLars-Peter Clausen 	.data_available = iio_dma_buffer_data_available,
1192d6ca60fSLars-Peter Clausen 	.release = iio_dmaengine_buffer_release,
1202d6ca60fSLars-Peter Clausen 
1212d6ca60fSLars-Peter Clausen 	.modes = INDIO_BUFFER_HARDWARE,
1222d6ca60fSLars-Peter Clausen 	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
1232d6ca60fSLars-Peter Clausen };
1242d6ca60fSLars-Peter Clausen 
1252d6ca60fSLars-Peter Clausen static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
1262d6ca60fSLars-Peter Clausen 	.submit = iio_dmaengine_buffer_submit_block,
1272d6ca60fSLars-Peter Clausen 	.abort = iio_dmaengine_buffer_abort,
1282d6ca60fSLars-Peter Clausen };
1292d6ca60fSLars-Peter Clausen 
130*4538c185SLars-Peter Clausen static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
131*4538c185SLars-Peter Clausen 	struct device_attribute *attr, char *buf)
132*4538c185SLars-Peter Clausen {
133*4538c185SLars-Peter Clausen 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
134*4538c185SLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
135*4538c185SLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(indio_dev->buffer);
136*4538c185SLars-Peter Clausen 
137*4538c185SLars-Peter Clausen 	return sprintf(buf, "%u\n", dmaengine_buffer->align);
138*4538c185SLars-Peter Clausen }
139*4538c185SLars-Peter Clausen 
140*4538c185SLars-Peter Clausen static IIO_DEVICE_ATTR(length_align_bytes, 0444,
141*4538c185SLars-Peter Clausen 		       iio_dmaengine_buffer_get_length_align, NULL, 0);
142*4538c185SLars-Peter Clausen 
143*4538c185SLars-Peter Clausen static const struct attribute *iio_dmaengine_buffer_attrs[] = {
144*4538c185SLars-Peter Clausen 	&iio_dev_attr_length_align_bytes.dev_attr.attr,
145*4538c185SLars-Peter Clausen 	NULL,
146*4538c185SLars-Peter Clausen };
147*4538c185SLars-Peter Clausen 
1482d6ca60fSLars-Peter Clausen /**
1492d6ca60fSLars-Peter Clausen  * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
1502d6ca60fSLars-Peter Clausen  * @dev: Parent device for the buffer
1512d6ca60fSLars-Peter Clausen  * @channel: DMA channel name, typically "rx".
1522d6ca60fSLars-Peter Clausen  *
1532d6ca60fSLars-Peter Clausen  * This allocates a new IIO buffer which internally uses the DMAengine framework
1542d6ca60fSLars-Peter Clausen  * to perform its transfers. The parent device will be used to request the DMA
1552d6ca60fSLars-Peter Clausen  * channel.
1562d6ca60fSLars-Peter Clausen  *
1572d6ca60fSLars-Peter Clausen  * Once done using the buffer iio_dmaengine_buffer_free() should be used to
1582d6ca60fSLars-Peter Clausen  * release it.
1592d6ca60fSLars-Peter Clausen  */
1602d6ca60fSLars-Peter Clausen struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
1612d6ca60fSLars-Peter Clausen 	const char *channel)
1622d6ca60fSLars-Peter Clausen {
1632d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer;
1642d6ca60fSLars-Peter Clausen 	unsigned int width, src_width, dest_width;
1652d6ca60fSLars-Peter Clausen 	struct dma_slave_caps caps;
1662d6ca60fSLars-Peter Clausen 	struct dma_chan *chan;
1672d6ca60fSLars-Peter Clausen 	int ret;
1682d6ca60fSLars-Peter Clausen 
1692d6ca60fSLars-Peter Clausen 	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
1702d6ca60fSLars-Peter Clausen 	if (!dmaengine_buffer)
1712d6ca60fSLars-Peter Clausen 		return ERR_PTR(-ENOMEM);
1722d6ca60fSLars-Peter Clausen 
173f339f979SPeter Ujfalusi 	chan = dma_request_chan(dev, channel);
1742d6ca60fSLars-Peter Clausen 	if (IS_ERR(chan)) {
1752d6ca60fSLars-Peter Clausen 		ret = PTR_ERR(chan);
1762d6ca60fSLars-Peter Clausen 		goto err_free;
1772d6ca60fSLars-Peter Clausen 	}
1782d6ca60fSLars-Peter Clausen 
1792d6ca60fSLars-Peter Clausen 	ret = dma_get_slave_caps(chan, &caps);
1802d6ca60fSLars-Peter Clausen 	if (ret < 0)
1812d6ca60fSLars-Peter Clausen 		goto err_free;
1822d6ca60fSLars-Peter Clausen 
1832d6ca60fSLars-Peter Clausen 	/* Needs to be aligned to the maximum of the minimums */
1842d6ca60fSLars-Peter Clausen 	if (caps.src_addr_widths)
1852d6ca60fSLars-Peter Clausen 		src_width = __ffs(caps.src_addr_widths);
1862d6ca60fSLars-Peter Clausen 	else
1872d6ca60fSLars-Peter Clausen 		src_width = 1;
1882d6ca60fSLars-Peter Clausen 	if (caps.dst_addr_widths)
1892d6ca60fSLars-Peter Clausen 		dest_width = __ffs(caps.dst_addr_widths);
1902d6ca60fSLars-Peter Clausen 	else
1912d6ca60fSLars-Peter Clausen 		dest_width = 1;
1922d6ca60fSLars-Peter Clausen 	width = max(src_width, dest_width);
1932d6ca60fSLars-Peter Clausen 
1942d6ca60fSLars-Peter Clausen 	INIT_LIST_HEAD(&dmaengine_buffer->active);
1952d6ca60fSLars-Peter Clausen 	dmaengine_buffer->chan = chan;
1962d6ca60fSLars-Peter Clausen 	dmaengine_buffer->align = width;
1972d6ca60fSLars-Peter Clausen 	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
1982d6ca60fSLars-Peter Clausen 
1992d6ca60fSLars-Peter Clausen 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
2002d6ca60fSLars-Peter Clausen 		&iio_dmaengine_default_ops);
201*4538c185SLars-Peter Clausen 	iio_buffer_set_attrs(&dmaengine_buffer->queue.buffer,
202*4538c185SLars-Peter Clausen 		iio_dmaengine_buffer_attrs);
2032d6ca60fSLars-Peter Clausen 
2042d6ca60fSLars-Peter Clausen 	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
2052d6ca60fSLars-Peter Clausen 
2062d6ca60fSLars-Peter Clausen 	return &dmaengine_buffer->queue.buffer;
2072d6ca60fSLars-Peter Clausen 
2082d6ca60fSLars-Peter Clausen err_free:
2092d6ca60fSLars-Peter Clausen 	kfree(dmaengine_buffer);
2102d6ca60fSLars-Peter Clausen 	return ERR_PTR(ret);
2112d6ca60fSLars-Peter Clausen }
2122d6ca60fSLars-Peter Clausen EXPORT_SYMBOL(iio_dmaengine_buffer_alloc);
2132d6ca60fSLars-Peter Clausen 
2142d6ca60fSLars-Peter Clausen /**
2152d6ca60fSLars-Peter Clausen  * iio_dmaengine_buffer_free() - Free dmaengine buffer
2162d6ca60fSLars-Peter Clausen  * @buffer: Buffer to free
2172d6ca60fSLars-Peter Clausen  *
2182d6ca60fSLars-Peter Clausen  * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
2192d6ca60fSLars-Peter Clausen  */
2202d6ca60fSLars-Peter Clausen void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
2212d6ca60fSLars-Peter Clausen {
2222d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
2232d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(buffer);
2242d6ca60fSLars-Peter Clausen 
2252d6ca60fSLars-Peter Clausen 	iio_dma_buffer_exit(&dmaengine_buffer->queue);
2262d6ca60fSLars-Peter Clausen 	dma_release_channel(dmaengine_buffer->chan);
2272d6ca60fSLars-Peter Clausen 
2282d6ca60fSLars-Peter Clausen 	iio_buffer_put(buffer);
2292d6ca60fSLars-Peter Clausen }
2302d6ca60fSLars-Peter Clausen EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
2310c040d1dSLars-Peter Clausen 
2320c040d1dSLars-Peter Clausen MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
2330c040d1dSLars-Peter Clausen MODULE_DESCRIPTION("DMA buffer for the IIO framework");
2340c040d1dSLars-Peter Clausen MODULE_LICENSE("GPL");
235