xref: /openbmc/linux/drivers/iio/buffer/industrialio-buffer-dmaengine.c (revision 9d452184fc9d9c3a0e6df5910857c6017865e3c0)
12d6ca60fSLars-Peter Clausen /*
22d6ca60fSLars-Peter Clausen  * Copyright 2014-2015 Analog Devices Inc.
32d6ca60fSLars-Peter Clausen  *  Author: Lars-Peter Clausen <lars@metafoo.de>
42d6ca60fSLars-Peter Clausen  *
52d6ca60fSLars-Peter Clausen  * Licensed under the GPL-2 or later.
62d6ca60fSLars-Peter Clausen  */
72d6ca60fSLars-Peter Clausen 
82d6ca60fSLars-Peter Clausen #include <linux/slab.h>
92d6ca60fSLars-Peter Clausen #include <linux/kernel.h>
102d6ca60fSLars-Peter Clausen #include <linux/dmaengine.h>
112d6ca60fSLars-Peter Clausen #include <linux/dma-mapping.h>
122d6ca60fSLars-Peter Clausen #include <linux/spinlock.h>
132d6ca60fSLars-Peter Clausen #include <linux/err.h>
142d6ca60fSLars-Peter Clausen 
152d6ca60fSLars-Peter Clausen #include <linux/iio/iio.h>
162d6ca60fSLars-Peter Clausen #include <linux/iio/buffer.h>
172d6ca60fSLars-Peter Clausen #include <linux/iio/buffer-dma.h>
182d6ca60fSLars-Peter Clausen #include <linux/iio/buffer-dmaengine.h>
192d6ca60fSLars-Peter Clausen 
202d6ca60fSLars-Peter Clausen /*
212d6ca60fSLars-Peter Clausen  * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
222d6ca60fSLars-Peter Clausen  * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
232d6ca60fSLars-Peter Clausen  * used to manage the buffer memory and implement the IIO buffer operations
242d6ca60fSLars-Peter Clausen  * while the DMAengine framework is used to perform the DMA transfers. Combined
252d6ca60fSLars-Peter Clausen  * this results in a device independent fully functional DMA buffer
262d6ca60fSLars-Peter Clausen  * implementation that can be used by device drivers for peripherals which are
272d6ca60fSLars-Peter Clausen  * connected to a DMA controller which has a DMAengine driver implementation.
282d6ca60fSLars-Peter Clausen  */
292d6ca60fSLars-Peter Clausen 
302d6ca60fSLars-Peter Clausen struct dmaengine_buffer {
312d6ca60fSLars-Peter Clausen 	struct iio_dma_buffer_queue queue;
322d6ca60fSLars-Peter Clausen 
332d6ca60fSLars-Peter Clausen 	struct dma_chan *chan;
342d6ca60fSLars-Peter Clausen 	struct list_head active;
352d6ca60fSLars-Peter Clausen 
362d6ca60fSLars-Peter Clausen 	size_t align;
372d6ca60fSLars-Peter Clausen 	size_t max_size;
382d6ca60fSLars-Peter Clausen };
392d6ca60fSLars-Peter Clausen 
402d6ca60fSLars-Peter Clausen static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
412d6ca60fSLars-Peter Clausen 		struct iio_buffer *buffer)
422d6ca60fSLars-Peter Clausen {
432d6ca60fSLars-Peter Clausen 	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
442d6ca60fSLars-Peter Clausen }
452d6ca60fSLars-Peter Clausen 
462d6ca60fSLars-Peter Clausen static void iio_dmaengine_buffer_block_done(void *data)
472d6ca60fSLars-Peter Clausen {
482d6ca60fSLars-Peter Clausen 	struct iio_dma_buffer_block *block = data;
492d6ca60fSLars-Peter Clausen 	unsigned long flags;
502d6ca60fSLars-Peter Clausen 
512d6ca60fSLars-Peter Clausen 	spin_lock_irqsave(&block->queue->list_lock, flags);
522d6ca60fSLars-Peter Clausen 	list_del(&block->head);
532d6ca60fSLars-Peter Clausen 	spin_unlock_irqrestore(&block->queue->list_lock, flags);
542d6ca60fSLars-Peter Clausen 	iio_dma_buffer_block_done(block);
552d6ca60fSLars-Peter Clausen }
562d6ca60fSLars-Peter Clausen 
572d6ca60fSLars-Peter Clausen static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
582d6ca60fSLars-Peter Clausen 	struct iio_dma_buffer_block *block)
592d6ca60fSLars-Peter Clausen {
602d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
612d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
622d6ca60fSLars-Peter Clausen 	struct dma_async_tx_descriptor *desc;
632d6ca60fSLars-Peter Clausen 	dma_cookie_t cookie;
642d6ca60fSLars-Peter Clausen 
652d6ca60fSLars-Peter Clausen 	block->bytes_used = min(block->size, dmaengine_buffer->max_size);
662d6ca60fSLars-Peter Clausen 	block->bytes_used = rounddown(block->bytes_used,
672d6ca60fSLars-Peter Clausen 			dmaengine_buffer->align);
682d6ca60fSLars-Peter Clausen 
692d6ca60fSLars-Peter Clausen 	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
702d6ca60fSLars-Peter Clausen 		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
712d6ca60fSLars-Peter Clausen 		DMA_PREP_INTERRUPT);
722d6ca60fSLars-Peter Clausen 	if (!desc)
732d6ca60fSLars-Peter Clausen 		return -ENOMEM;
742d6ca60fSLars-Peter Clausen 
752d6ca60fSLars-Peter Clausen 	desc->callback = iio_dmaengine_buffer_block_done;
762d6ca60fSLars-Peter Clausen 	desc->callback_param = block;
772d6ca60fSLars-Peter Clausen 
782d6ca60fSLars-Peter Clausen 	cookie = dmaengine_submit(desc);
792d6ca60fSLars-Peter Clausen 	if (dma_submit_error(cookie))
802d6ca60fSLars-Peter Clausen 		return dma_submit_error(cookie);
812d6ca60fSLars-Peter Clausen 
822d6ca60fSLars-Peter Clausen 	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
832d6ca60fSLars-Peter Clausen 	list_add_tail(&block->head, &dmaengine_buffer->active);
842d6ca60fSLars-Peter Clausen 	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
852d6ca60fSLars-Peter Clausen 
862d6ca60fSLars-Peter Clausen 	dma_async_issue_pending(dmaengine_buffer->chan);
872d6ca60fSLars-Peter Clausen 
882d6ca60fSLars-Peter Clausen 	return 0;
892d6ca60fSLars-Peter Clausen }
902d6ca60fSLars-Peter Clausen 
912d6ca60fSLars-Peter Clausen static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
922d6ca60fSLars-Peter Clausen {
932d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
942d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
952d6ca60fSLars-Peter Clausen 
96*9d452184SLars-Peter Clausen 	dmaengine_terminate_sync(dmaengine_buffer->chan);
972d6ca60fSLars-Peter Clausen 	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
982d6ca60fSLars-Peter Clausen }
992d6ca60fSLars-Peter Clausen 
1002d6ca60fSLars-Peter Clausen static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
1012d6ca60fSLars-Peter Clausen {
1022d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
1032d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(buf);
1042d6ca60fSLars-Peter Clausen 
1052d6ca60fSLars-Peter Clausen 	iio_dma_buffer_release(&dmaengine_buffer->queue);
1062d6ca60fSLars-Peter Clausen 	kfree(dmaengine_buffer);
1072d6ca60fSLars-Peter Clausen }
1082d6ca60fSLars-Peter Clausen 
1092d6ca60fSLars-Peter Clausen static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
1102d6ca60fSLars-Peter Clausen 	.read_first_n = iio_dma_buffer_read,
1112d6ca60fSLars-Peter Clausen 	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
1122d6ca60fSLars-Peter Clausen 	.set_length = iio_dma_buffer_set_length,
1132d6ca60fSLars-Peter Clausen 	.request_update = iio_dma_buffer_request_update,
1142d6ca60fSLars-Peter Clausen 	.enable = iio_dma_buffer_enable,
1152d6ca60fSLars-Peter Clausen 	.disable = iio_dma_buffer_disable,
1162d6ca60fSLars-Peter Clausen 	.data_available = iio_dma_buffer_data_available,
1172d6ca60fSLars-Peter Clausen 	.release = iio_dmaengine_buffer_release,
1182d6ca60fSLars-Peter Clausen 
1192d6ca60fSLars-Peter Clausen 	.modes = INDIO_BUFFER_HARDWARE,
1202d6ca60fSLars-Peter Clausen 	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
1212d6ca60fSLars-Peter Clausen };
1222d6ca60fSLars-Peter Clausen 
1232d6ca60fSLars-Peter Clausen static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
1242d6ca60fSLars-Peter Clausen 	.submit = iio_dmaengine_buffer_submit_block,
1252d6ca60fSLars-Peter Clausen 	.abort = iio_dmaengine_buffer_abort,
1262d6ca60fSLars-Peter Clausen };
1272d6ca60fSLars-Peter Clausen 
1282d6ca60fSLars-Peter Clausen /**
1292d6ca60fSLars-Peter Clausen  * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
1302d6ca60fSLars-Peter Clausen  * @dev: Parent device for the buffer
1312d6ca60fSLars-Peter Clausen  * @channel: DMA channel name, typically "rx".
1322d6ca60fSLars-Peter Clausen  *
1332d6ca60fSLars-Peter Clausen  * This allocates a new IIO buffer which internally uses the DMAengine framework
1342d6ca60fSLars-Peter Clausen  * to perform its transfers. The parent device will be used to request the DMA
1352d6ca60fSLars-Peter Clausen  * channel.
1362d6ca60fSLars-Peter Clausen  *
1372d6ca60fSLars-Peter Clausen  * Once done using the buffer iio_dmaengine_buffer_free() should be used to
1382d6ca60fSLars-Peter Clausen  * release it.
1392d6ca60fSLars-Peter Clausen  */
1402d6ca60fSLars-Peter Clausen struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
1412d6ca60fSLars-Peter Clausen 	const char *channel)
1422d6ca60fSLars-Peter Clausen {
1432d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer;
1442d6ca60fSLars-Peter Clausen 	unsigned int width, src_width, dest_width;
1452d6ca60fSLars-Peter Clausen 	struct dma_slave_caps caps;
1462d6ca60fSLars-Peter Clausen 	struct dma_chan *chan;
1472d6ca60fSLars-Peter Clausen 	int ret;
1482d6ca60fSLars-Peter Clausen 
1492d6ca60fSLars-Peter Clausen 	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
1502d6ca60fSLars-Peter Clausen 	if (!dmaengine_buffer)
1512d6ca60fSLars-Peter Clausen 		return ERR_PTR(-ENOMEM);
1522d6ca60fSLars-Peter Clausen 
1532d6ca60fSLars-Peter Clausen 	chan = dma_request_slave_channel_reason(dev, channel);
1542d6ca60fSLars-Peter Clausen 	if (IS_ERR(chan)) {
1552d6ca60fSLars-Peter Clausen 		ret = PTR_ERR(chan);
1562d6ca60fSLars-Peter Clausen 		goto err_free;
1572d6ca60fSLars-Peter Clausen 	}
1582d6ca60fSLars-Peter Clausen 
1592d6ca60fSLars-Peter Clausen 	ret = dma_get_slave_caps(chan, &caps);
1602d6ca60fSLars-Peter Clausen 	if (ret < 0)
1612d6ca60fSLars-Peter Clausen 		goto err_free;
1622d6ca60fSLars-Peter Clausen 
1632d6ca60fSLars-Peter Clausen 	/* Needs to be aligned to the maximum of the minimums */
1642d6ca60fSLars-Peter Clausen 	if (caps.src_addr_widths)
1652d6ca60fSLars-Peter Clausen 		src_width = __ffs(caps.src_addr_widths);
1662d6ca60fSLars-Peter Clausen 	else
1672d6ca60fSLars-Peter Clausen 		src_width = 1;
1682d6ca60fSLars-Peter Clausen 	if (caps.dst_addr_widths)
1692d6ca60fSLars-Peter Clausen 		dest_width = __ffs(caps.dst_addr_widths);
1702d6ca60fSLars-Peter Clausen 	else
1712d6ca60fSLars-Peter Clausen 		dest_width = 1;
1722d6ca60fSLars-Peter Clausen 	width = max(src_width, dest_width);
1732d6ca60fSLars-Peter Clausen 
1742d6ca60fSLars-Peter Clausen 	INIT_LIST_HEAD(&dmaengine_buffer->active);
1752d6ca60fSLars-Peter Clausen 	dmaengine_buffer->chan = chan;
1762d6ca60fSLars-Peter Clausen 	dmaengine_buffer->align = width;
1772d6ca60fSLars-Peter Clausen 	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
1782d6ca60fSLars-Peter Clausen 
1792d6ca60fSLars-Peter Clausen 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
1802d6ca60fSLars-Peter Clausen 		&iio_dmaengine_default_ops);
1812d6ca60fSLars-Peter Clausen 
1822d6ca60fSLars-Peter Clausen 	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
1832d6ca60fSLars-Peter Clausen 
1842d6ca60fSLars-Peter Clausen 	return &dmaengine_buffer->queue.buffer;
1852d6ca60fSLars-Peter Clausen 
1862d6ca60fSLars-Peter Clausen err_free:
1872d6ca60fSLars-Peter Clausen 	kfree(dmaengine_buffer);
1882d6ca60fSLars-Peter Clausen 	return ERR_PTR(ret);
1892d6ca60fSLars-Peter Clausen }
1902d6ca60fSLars-Peter Clausen EXPORT_SYMBOL(iio_dmaengine_buffer_alloc);
1912d6ca60fSLars-Peter Clausen 
1922d6ca60fSLars-Peter Clausen /**
1932d6ca60fSLars-Peter Clausen  * iio_dmaengine_buffer_free() - Free dmaengine buffer
1942d6ca60fSLars-Peter Clausen  * @buffer: Buffer to free
1952d6ca60fSLars-Peter Clausen  *
1962d6ca60fSLars-Peter Clausen  * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
1972d6ca60fSLars-Peter Clausen  */
1982d6ca60fSLars-Peter Clausen void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
1992d6ca60fSLars-Peter Clausen {
2002d6ca60fSLars-Peter Clausen 	struct dmaengine_buffer *dmaengine_buffer =
2012d6ca60fSLars-Peter Clausen 		iio_buffer_to_dmaengine_buffer(buffer);
2022d6ca60fSLars-Peter Clausen 
2032d6ca60fSLars-Peter Clausen 	iio_dma_buffer_exit(&dmaengine_buffer->queue);
2042d6ca60fSLars-Peter Clausen 	dma_release_channel(dmaengine_buffer->chan);
2052d6ca60fSLars-Peter Clausen 
2062d6ca60fSLars-Peter Clausen 	iio_buffer_put(buffer);
2072d6ca60fSLars-Peter Clausen }
2082d6ca60fSLars-Peter Clausen EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
209