xref: /openbmc/linux/drivers/dma/sh/shdma-base.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1901fd852SWolfram Sang // SPDX-License-Identifier: GPL-2.0
29a7b8e00SGuennadi Liakhovetski /*
39a7b8e00SGuennadi Liakhovetski  * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
49a7b8e00SGuennadi Liakhovetski  *
59a7b8e00SGuennadi Liakhovetski  * extracted from shdma.c
69a7b8e00SGuennadi Liakhovetski  *
79a7b8e00SGuennadi Liakhovetski  * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
89a7b8e00SGuennadi Liakhovetski  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
99a7b8e00SGuennadi Liakhovetski  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
109a7b8e00SGuennadi Liakhovetski  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
119a7b8e00SGuennadi Liakhovetski  */
129a7b8e00SGuennadi Liakhovetski 
139a7b8e00SGuennadi Liakhovetski #include <linux/delay.h>
149a7b8e00SGuennadi Liakhovetski #include <linux/shdma-base.h>
159a7b8e00SGuennadi Liakhovetski #include <linux/dmaengine.h>
169a7b8e00SGuennadi Liakhovetski #include <linux/init.h>
179a7b8e00SGuennadi Liakhovetski #include <linux/interrupt.h>
189a7b8e00SGuennadi Liakhovetski #include <linux/module.h>
199a7b8e00SGuennadi Liakhovetski #include <linux/pm_runtime.h>
209a7b8e00SGuennadi Liakhovetski #include <linux/slab.h>
219a7b8e00SGuennadi Liakhovetski #include <linux/spinlock.h>
229a7b8e00SGuennadi Liakhovetski 
239a7b8e00SGuennadi Liakhovetski #include "../dmaengine.h"
249a7b8e00SGuennadi Liakhovetski 
259a7b8e00SGuennadi Liakhovetski /* DMA descriptor control */
269a7b8e00SGuennadi Liakhovetski enum shdma_desc_status {
279a7b8e00SGuennadi Liakhovetski 	DESC_IDLE,
289a7b8e00SGuennadi Liakhovetski 	DESC_PREPARED,
299a7b8e00SGuennadi Liakhovetski 	DESC_SUBMITTED,
309a7b8e00SGuennadi Liakhovetski 	DESC_COMPLETED,	/* completed, have to call callback */
319a7b8e00SGuennadi Liakhovetski 	DESC_WAITING,	/* callback called, waiting for ack / re-submit */
329a7b8e00SGuennadi Liakhovetski };
339a7b8e00SGuennadi Liakhovetski 
349a7b8e00SGuennadi Liakhovetski #define NR_DESCS_PER_CHANNEL 32
359a7b8e00SGuennadi Liakhovetski 
369a7b8e00SGuennadi Liakhovetski #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
379a7b8e00SGuennadi Liakhovetski #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
389a7b8e00SGuennadi Liakhovetski 
399a7b8e00SGuennadi Liakhovetski /*
409a7b8e00SGuennadi Liakhovetski  * For slave DMA we assume, that there is a finite number of DMA slaves in the
419a7b8e00SGuennadi Liakhovetski  * system, and that each such slave can only use a finite number of channels.
429a7b8e00SGuennadi Liakhovetski  * We use slave channel IDs to make sure, that no such slave channel ID is
439a7b8e00SGuennadi Liakhovetski  * allocated more than once.
449a7b8e00SGuennadi Liakhovetski  */
459a7b8e00SGuennadi Liakhovetski static unsigned int slave_num = 256;
469a7b8e00SGuennadi Liakhovetski module_param(slave_num, uint, 0444);
479a7b8e00SGuennadi Liakhovetski 
489a7b8e00SGuennadi Liakhovetski /* A bitmask with slave_num bits */
499a7b8e00SGuennadi Liakhovetski static unsigned long *shdma_slave_used;
509a7b8e00SGuennadi Liakhovetski 
519a7b8e00SGuennadi Liakhovetski /* Called under spin_lock_irq(&schan->chan_lock") */
shdma_chan_xfer_ld_queue(struct shdma_chan * schan)529a7b8e00SGuennadi Liakhovetski static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
539a7b8e00SGuennadi Liakhovetski {
549a7b8e00SGuennadi Liakhovetski 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
559a7b8e00SGuennadi Liakhovetski 	const struct shdma_ops *ops = sdev->ops;
569a7b8e00SGuennadi Liakhovetski 	struct shdma_desc *sdesc;
579a7b8e00SGuennadi Liakhovetski 
589a7b8e00SGuennadi Liakhovetski 	/* DMA work check */
599a7b8e00SGuennadi Liakhovetski 	if (ops->channel_busy(schan))
609a7b8e00SGuennadi Liakhovetski 		return;
619a7b8e00SGuennadi Liakhovetski 
629a7b8e00SGuennadi Liakhovetski 	/* Find the first not transferred descriptor */
639a7b8e00SGuennadi Liakhovetski 	list_for_each_entry(sdesc, &schan->ld_queue, node)
649a7b8e00SGuennadi Liakhovetski 		if (sdesc->mark == DESC_SUBMITTED) {
659a7b8e00SGuennadi Liakhovetski 			ops->start_xfer(schan, sdesc);
669a7b8e00SGuennadi Liakhovetski 			break;
679a7b8e00SGuennadi Liakhovetski 		}
689a7b8e00SGuennadi Liakhovetski }
699a7b8e00SGuennadi Liakhovetski 
shdma_tx_submit(struct dma_async_tx_descriptor * tx)709a7b8e00SGuennadi Liakhovetski static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
719a7b8e00SGuennadi Liakhovetski {
729a7b8e00SGuennadi Liakhovetski 	struct shdma_desc *chunk, *c, *desc =
7391ea74e9SKuninori Morimoto 		container_of(tx, struct shdma_desc, async_tx);
749a7b8e00SGuennadi Liakhovetski 	struct shdma_chan *schan = to_shdma_chan(tx->chan);
759a7b8e00SGuennadi Liakhovetski 	dma_async_tx_callback callback = tx->callback;
769a7b8e00SGuennadi Liakhovetski 	dma_cookie_t cookie;
779a7b8e00SGuennadi Liakhovetski 	bool power_up;
789a7b8e00SGuennadi Liakhovetski 
799a7b8e00SGuennadi Liakhovetski 	spin_lock_irq(&schan->chan_lock);
809a7b8e00SGuennadi Liakhovetski 
819a7b8e00SGuennadi Liakhovetski 	power_up = list_empty(&schan->ld_queue);
829a7b8e00SGuennadi Liakhovetski 
839a7b8e00SGuennadi Liakhovetski 	cookie = dma_cookie_assign(tx);
849a7b8e00SGuennadi Liakhovetski 
859a7b8e00SGuennadi Liakhovetski 	/* Mark all chunks of this descriptor as submitted, move to the queue */
869a7b8e00SGuennadi Liakhovetski 	list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
879a7b8e00SGuennadi Liakhovetski 		/*
889a7b8e00SGuennadi Liakhovetski 		 * All chunks are on the global ld_free, so, we have to find
899a7b8e00SGuennadi Liakhovetski 		 * the end of the chain ourselves
909a7b8e00SGuennadi Liakhovetski 		 */
919a7b8e00SGuennadi Liakhovetski 		if (chunk != desc && (chunk->mark == DESC_IDLE ||
929a7b8e00SGuennadi Liakhovetski 				      chunk->async_tx.cookie > 0 ||
939a7b8e00SGuennadi Liakhovetski 				      chunk->async_tx.cookie == -EBUSY ||
949a7b8e00SGuennadi Liakhovetski 				      &chunk->node == &schan->ld_free))
959a7b8e00SGuennadi Liakhovetski 			break;
969a7b8e00SGuennadi Liakhovetski 		chunk->mark = DESC_SUBMITTED;
9791ea74e9SKuninori Morimoto 		if (chunk->chunks == 1) {
9891ea74e9SKuninori Morimoto 			chunk->async_tx.callback = callback;
9991ea74e9SKuninori Morimoto 			chunk->async_tx.callback_param = tx->callback_param;
10091ea74e9SKuninori Morimoto 		} else {
1019a7b8e00SGuennadi Liakhovetski 			/* Callback goes to the last chunk */
1029a7b8e00SGuennadi Liakhovetski 			chunk->async_tx.callback = NULL;
10391ea74e9SKuninori Morimoto 		}
1049a7b8e00SGuennadi Liakhovetski 		chunk->cookie = cookie;
1059a7b8e00SGuennadi Liakhovetski 		list_move_tail(&chunk->node, &schan->ld_queue);
1069a7b8e00SGuennadi Liakhovetski 
1079a7b8e00SGuennadi Liakhovetski 		dev_dbg(schan->dev, "submit #%d@%p on %d\n",
10891ea74e9SKuninori Morimoto 			tx->cookie, &chunk->async_tx, schan->id);
1099a7b8e00SGuennadi Liakhovetski 	}
1109a7b8e00SGuennadi Liakhovetski 
1119a7b8e00SGuennadi Liakhovetski 	if (power_up) {
1129a7b8e00SGuennadi Liakhovetski 		int ret;
1139a7b8e00SGuennadi Liakhovetski 		schan->pm_state = SHDMA_PM_BUSY;
1149a7b8e00SGuennadi Liakhovetski 
1159a7b8e00SGuennadi Liakhovetski 		ret = pm_runtime_get(schan->dev);
1169a7b8e00SGuennadi Liakhovetski 
1179a7b8e00SGuennadi Liakhovetski 		spin_unlock_irq(&schan->chan_lock);
118*d143f939SVinod Koul 		if (ret < 0)
1199a7b8e00SGuennadi Liakhovetski 			dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
1209a7b8e00SGuennadi Liakhovetski 
1219a7b8e00SGuennadi Liakhovetski 		pm_runtime_barrier(schan->dev);
1229a7b8e00SGuennadi Liakhovetski 
1239a7b8e00SGuennadi Liakhovetski 		spin_lock_irq(&schan->chan_lock);
1249a7b8e00SGuennadi Liakhovetski 
1259a7b8e00SGuennadi Liakhovetski 		/* Have we been reset, while waiting? */
1269a7b8e00SGuennadi Liakhovetski 		if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
1279a7b8e00SGuennadi Liakhovetski 			struct shdma_dev *sdev =
1289a7b8e00SGuennadi Liakhovetski 				to_shdma_dev(schan->dma_chan.device);
1299a7b8e00SGuennadi Liakhovetski 			const struct shdma_ops *ops = sdev->ops;
1309a7b8e00SGuennadi Liakhovetski 			dev_dbg(schan->dev, "Bring up channel %d\n",
1319a7b8e00SGuennadi Liakhovetski 				schan->id);
1329a7b8e00SGuennadi Liakhovetski 			/*
1339a7b8e00SGuennadi Liakhovetski 			 * TODO: .xfer_setup() might fail on some platforms.
1349a7b8e00SGuennadi Liakhovetski 			 * Make it int then, on error remove chunks from the
1359a7b8e00SGuennadi Liakhovetski 			 * queue again
1369a7b8e00SGuennadi Liakhovetski 			 */
137c2cdb7e4SGuennadi Liakhovetski 			ops->setup_xfer(schan, schan->slave_id);
1389a7b8e00SGuennadi Liakhovetski 
1399a7b8e00SGuennadi Liakhovetski 			if (schan->pm_state == SHDMA_PM_PENDING)
1409a7b8e00SGuennadi Liakhovetski 				shdma_chan_xfer_ld_queue(schan);
1419a7b8e00SGuennadi Liakhovetski 			schan->pm_state = SHDMA_PM_ESTABLISHED;
1429a7b8e00SGuennadi Liakhovetski 		}
1439a7b8e00SGuennadi Liakhovetski 	} else {
1449a7b8e00SGuennadi Liakhovetski 		/*
1459a7b8e00SGuennadi Liakhovetski 		 * Tell .device_issue_pending() not to run the queue, interrupts
1469a7b8e00SGuennadi Liakhovetski 		 * will do it anyway
1479a7b8e00SGuennadi Liakhovetski 		 */
1489a7b8e00SGuennadi Liakhovetski 		schan->pm_state = SHDMA_PM_PENDING;
1499a7b8e00SGuennadi Liakhovetski 	}
1509a7b8e00SGuennadi Liakhovetski 
1519a7b8e00SGuennadi Liakhovetski 	spin_unlock_irq(&schan->chan_lock);
1529a7b8e00SGuennadi Liakhovetski 
1539a7b8e00SGuennadi Liakhovetski 	return cookie;
1549a7b8e00SGuennadi Liakhovetski }
1559a7b8e00SGuennadi Liakhovetski 
1569a7b8e00SGuennadi Liakhovetski /* Called with desc_lock held */
shdma_get_desc(struct shdma_chan * schan)1579a7b8e00SGuennadi Liakhovetski static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
1589a7b8e00SGuennadi Liakhovetski {
1599a7b8e00SGuennadi Liakhovetski 	struct shdma_desc *sdesc;
1609a7b8e00SGuennadi Liakhovetski 
1619a7b8e00SGuennadi Liakhovetski 	list_for_each_entry(sdesc, &schan->ld_free, node)
1629a7b8e00SGuennadi Liakhovetski 		if (sdesc->mark != DESC_PREPARED) {
1639a7b8e00SGuennadi Liakhovetski 			BUG_ON(sdesc->mark != DESC_IDLE);
1649a7b8e00SGuennadi Liakhovetski 			list_del(&sdesc->node);
1659a7b8e00SGuennadi Liakhovetski 			return sdesc;
1669a7b8e00SGuennadi Liakhovetski 		}
1679a7b8e00SGuennadi Liakhovetski 
1689a7b8e00SGuennadi Liakhovetski 	return NULL;
1699a7b8e00SGuennadi Liakhovetski }
1709a7b8e00SGuennadi Liakhovetski 
shdma_setup_slave(struct shdma_chan * schan,dma_addr_t slave_addr)171411fdaf8SArnd Bergmann static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
1721ff8df4fSGuennadi Liakhovetski {
1731ff8df4fSGuennadi Liakhovetski 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
1741ff8df4fSGuennadi Liakhovetski 	const struct shdma_ops *ops = sdev->ops;
17567eacc15SGuennadi Liakhovetski 	int ret, match;
17667eacc15SGuennadi Liakhovetski 
17767eacc15SGuennadi Liakhovetski 	if (schan->dev->of_node) {
17867eacc15SGuennadi Liakhovetski 		match = schan->hw_req;
1794981c4dcSGuennadi Liakhovetski 		ret = ops->set_slave(schan, match, slave_addr, true);
18067eacc15SGuennadi Liakhovetski 		if (ret < 0)
18167eacc15SGuennadi Liakhovetski 			return ret;
18267eacc15SGuennadi Liakhovetski 	} else {
183411fdaf8SArnd Bergmann 		match = schan->real_slave_id;
18467eacc15SGuennadi Liakhovetski 	}
1851ff8df4fSGuennadi Liakhovetski 
186411fdaf8SArnd Bergmann 	if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
1871ff8df4fSGuennadi Liakhovetski 		return -EINVAL;
1881ff8df4fSGuennadi Liakhovetski 
189411fdaf8SArnd Bergmann 	if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
1901ff8df4fSGuennadi Liakhovetski 		return -EBUSY;
1911ff8df4fSGuennadi Liakhovetski 
1924981c4dcSGuennadi Liakhovetski 	ret = ops->set_slave(schan, match, slave_addr, false);
1931ff8df4fSGuennadi Liakhovetski 	if (ret < 0) {
194411fdaf8SArnd Bergmann 		clear_bit(schan->real_slave_id, shdma_slave_used);
1951ff8df4fSGuennadi Liakhovetski 		return ret;
1961ff8df4fSGuennadi Liakhovetski 	}
1971ff8df4fSGuennadi Liakhovetski 
198411fdaf8SArnd Bergmann 	schan->slave_id = schan->real_slave_id;
1991ff8df4fSGuennadi Liakhovetski 
2001ff8df4fSGuennadi Liakhovetski 	return 0;
2011ff8df4fSGuennadi Liakhovetski }
2021ff8df4fSGuennadi Liakhovetski 
shdma_alloc_chan_resources(struct dma_chan * chan)2039a7b8e00SGuennadi Liakhovetski static int shdma_alloc_chan_resources(struct dma_chan *chan)
2049a7b8e00SGuennadi Liakhovetski {
2059a7b8e00SGuennadi Liakhovetski 	struct shdma_chan *schan = to_shdma_chan(chan);
2069a7b8e00SGuennadi Liakhovetski 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
2079a7b8e00SGuennadi Liakhovetski 	const struct shdma_ops *ops = sdev->ops;
2089a7b8e00SGuennadi Liakhovetski 	struct shdma_desc *desc;
2099a7b8e00SGuennadi Liakhovetski 	struct shdma_slave *slave = chan->private;
2109a7b8e00SGuennadi Liakhovetski 	int ret, i;
2119a7b8e00SGuennadi Liakhovetski 
2129a7b8e00SGuennadi Liakhovetski 	/*
2139a7b8e00SGuennadi Liakhovetski 	 * This relies on the guarantee from dmaengine that alloc_chan_resources
2149a7b8e00SGuennadi Liakhovetski 	 * never runs concurrently with itself or free_chan_resources.
2159a7b8e00SGuennadi Liakhovetski 	 */
2169a7b8e00SGuennadi Liakhovetski 	if (slave) {
2171ff8df4fSGuennadi Liakhovetski 		/* Legacy mode: .private is set in filter */
218411fdaf8SArnd Bergmann 		schan->real_slave_id = slave->slave_id;
219411fdaf8SArnd Bergmann 		ret = shdma_setup_slave(schan, 0);
2209a7b8e00SGuennadi Liakhovetski 		if (ret < 0)
2219a7b8e00SGuennadi Liakhovetski 			goto esetslave;
222c2cdb7e4SGuennadi Liakhovetski 	} else {
223411fdaf8SArnd Bergmann 		/* Normal mode: real_slave_id was set by filter */
224c2cdb7e4SGuennadi Liakhovetski 		schan->slave_id = -EINVAL;
2259a7b8e00SGuennadi Liakhovetski 	}
2269a7b8e00SGuennadi Liakhovetski 
2279a7b8e00SGuennadi Liakhovetski 	schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
2289a7b8e00SGuennadi Liakhovetski 			      sdev->desc_size, GFP_KERNEL);
2299a7b8e00SGuennadi Liakhovetski 	if (!schan->desc) {
2309a7b8e00SGuennadi Liakhovetski 		ret = -ENOMEM;
2319a7b8e00SGuennadi Liakhovetski 		goto edescalloc;
2329a7b8e00SGuennadi Liakhovetski 	}
2339a7b8e00SGuennadi Liakhovetski 	schan->desc_num = NR_DESCS_PER_CHANNEL;
2349a7b8e00SGuennadi Liakhovetski 
2359a7b8e00SGuennadi Liakhovetski 	for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
2369a7b8e00SGuennadi Liakhovetski 		desc = ops->embedded_desc(schan->desc, i);
2379a7b8e00SGuennadi Liakhovetski 		dma_async_tx_descriptor_init(&desc->async_tx,
2389a7b8e00SGuennadi Liakhovetski 					     &schan->dma_chan);
2399a7b8e00SGuennadi Liakhovetski 		desc->async_tx.tx_submit = shdma_tx_submit;
2409a7b8e00SGuennadi Liakhovetski 		desc->mark = DESC_IDLE;
2419a7b8e00SGuennadi Liakhovetski 
2429a7b8e00SGuennadi Liakhovetski 		list_add(&desc->node, &schan->ld_free);
2439a7b8e00SGuennadi Liakhovetski 	}
2449a7b8e00SGuennadi Liakhovetski 
2459a7b8e00SGuennadi Liakhovetski 	return NR_DESCS_PER_CHANNEL;
2469a7b8e00SGuennadi Liakhovetski 
2479a7b8e00SGuennadi Liakhovetski edescalloc:
2489a7b8e00SGuennadi Liakhovetski 	if (slave)
2499a7b8e00SGuennadi Liakhovetski esetslave:
2509a7b8e00SGuennadi Liakhovetski 		clear_bit(slave->slave_id, shdma_slave_used);
2519a7b8e00SGuennadi Liakhovetski 	chan->private = NULL;
2529a7b8e00SGuennadi Liakhovetski 	return ret;
2539a7b8e00SGuennadi Liakhovetski }
2549a7b8e00SGuennadi Liakhovetski 
255c091ff51SLaurent Pinchart /*
256c091ff51SLaurent Pinchart  * This is the standard shdma filter function to be used as a replacement to the
257411fdaf8SArnd Bergmann  * "old" method, using the .private pointer.
258411fdaf8SArnd Bergmann  * You always have to pass a valid slave id as the argument, old drivers that
259411fdaf8SArnd Bergmann  * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config
260411fdaf8SArnd Bergmann  * need to be updated so we can remove the slave_id field from dma_slave_config.
261c091ff51SLaurent Pinchart  * parameter. If this filter is used, the slave driver, after calling
262c091ff51SLaurent Pinchart  * dma_request_channel(), will also have to call dmaengine_slave_config() with
263411fdaf8SArnd Bergmann  * .direction, and either .src_addr or .dst_addr set.
264411fdaf8SArnd Bergmann  *
265c091ff51SLaurent Pinchart  * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
266c091ff51SLaurent Pinchart  * capability! If this becomes a requirement, hardware glue drivers, using this
267c091ff51SLaurent Pinchart  * services would have to provide their own filters, which first would check
268c091ff51SLaurent Pinchart  * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
269c091ff51SLaurent Pinchart  * this, and only then, in case of a match, call this common filter.
270c091ff51SLaurent Pinchart  * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
271c091ff51SLaurent Pinchart  * In that case the MID-RID value is used for slave channel filtering and is
272c091ff51SLaurent Pinchart  * passed to this function in the "arg" parameter.
273c091ff51SLaurent Pinchart  */
shdma_chan_filter(struct dma_chan * chan,void * arg)274c091ff51SLaurent Pinchart bool shdma_chan_filter(struct dma_chan *chan, void *arg)
275c091ff51SLaurent Pinchart {
276c091ff51SLaurent Pinchart 	struct shdma_chan *schan;
277c091ff51SLaurent Pinchart 	struct shdma_dev *sdev;
278411fdaf8SArnd Bergmann 	int slave_id = (long)arg;
279c091ff51SLaurent Pinchart 	int ret;
280c091ff51SLaurent Pinchart 
281c091ff51SLaurent Pinchart 	/* Only support channels handled by this driver. */
282c091ff51SLaurent Pinchart 	if (chan->device->device_alloc_chan_resources !=
283c091ff51SLaurent Pinchart 	    shdma_alloc_chan_resources)
284c091ff51SLaurent Pinchart 		return false;
285c091ff51SLaurent Pinchart 
286c091ff51SLaurent Pinchart 	schan = to_shdma_chan(chan);
287411fdaf8SArnd Bergmann 	sdev = to_shdma_dev(chan->device);
288c091ff51SLaurent Pinchart 
289411fdaf8SArnd Bergmann 	/*
290411fdaf8SArnd Bergmann 	 * For DT, the schan->slave_id field is generated by the
291411fdaf8SArnd Bergmann 	 * set_slave function from the slave ID that is passed in
292411fdaf8SArnd Bergmann 	 * from xlate. For the non-DT case, the slave ID is
293411fdaf8SArnd Bergmann 	 * directly passed into the filter function by the driver
294411fdaf8SArnd Bergmann 	 */
295411fdaf8SArnd Bergmann 	if (schan->dev->of_node) {
296411fdaf8SArnd Bergmann 		ret = sdev->ops->set_slave(schan, slave_id, 0, true);
297c091ff51SLaurent Pinchart 		if (ret < 0)
298c091ff51SLaurent Pinchart 			return false;
299c091ff51SLaurent Pinchart 
300411fdaf8SArnd Bergmann 		schan->real_slave_id = schan->slave_id;
301411fdaf8SArnd Bergmann 		return true;
302411fdaf8SArnd Bergmann 	}
303411fdaf8SArnd Bergmann 
304411fdaf8SArnd Bergmann 	if (slave_id < 0) {
305411fdaf8SArnd Bergmann 		/* No slave requested - arbitrary channel */
306411fdaf8SArnd Bergmann 		dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n");
307411fdaf8SArnd Bergmann 		return true;
308411fdaf8SArnd Bergmann 	}
309411fdaf8SArnd Bergmann 
310411fdaf8SArnd Bergmann 	if (slave_id >= slave_num)
311411fdaf8SArnd Bergmann 		return false;
312411fdaf8SArnd Bergmann 
313411fdaf8SArnd Bergmann 	ret = sdev->ops->set_slave(schan, slave_id, 0, true);
314411fdaf8SArnd Bergmann 	if (ret < 0)
315411fdaf8SArnd Bergmann 		return false;
316411fdaf8SArnd Bergmann 
317411fdaf8SArnd Bergmann 	schan->real_slave_id = slave_id;
318411fdaf8SArnd Bergmann 
319c091ff51SLaurent Pinchart 	return true;
320c091ff51SLaurent Pinchart }
321c091ff51SLaurent Pinchart EXPORT_SYMBOL(shdma_chan_filter);
322c091ff51SLaurent Pinchart 
__ld_cleanup(struct shdma_chan * schan,bool all)3239a7b8e00SGuennadi Liakhovetski static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
3249a7b8e00SGuennadi Liakhovetski {
3259a7b8e00SGuennadi Liakhovetski 	struct shdma_desc *desc, *_desc;
3269a7b8e00SGuennadi Liakhovetski 	/* Is the "exposed" head of a chain acked? */
3279a7b8e00SGuennadi Liakhovetski 	bool head_acked = false;
3289a7b8e00SGuennadi Liakhovetski 	dma_cookie_t cookie = 0;
3299a7b8e00SGuennadi Liakhovetski 	dma_async_tx_callback callback = NULL;
33073fc45e3SDave Jiang 	struct dmaengine_desc_callback cb;
3319a7b8e00SGuennadi Liakhovetski 	unsigned long flags;
332dfbb85caSKuninori Morimoto 	LIST_HEAD(cyclic_list);
3339a7b8e00SGuennadi Liakhovetski 
33473fc45e3SDave Jiang 	memset(&cb, 0, sizeof(cb));
3359a7b8e00SGuennadi Liakhovetski 	spin_lock_irqsave(&schan->chan_lock, flags);
3369a7b8e00SGuennadi Liakhovetski 	list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
3379a7b8e00SGuennadi Liakhovetski 		struct dma_async_tx_descriptor *tx = &desc->async_tx;
3389a7b8e00SGuennadi Liakhovetski 
3399a7b8e00SGuennadi Liakhovetski 		BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
3409a7b8e00SGuennadi Liakhovetski 		BUG_ON(desc->mark != DESC_SUBMITTED &&
3419a7b8e00SGuennadi Liakhovetski 		       desc->mark != DESC_COMPLETED &&
3429a7b8e00SGuennadi Liakhovetski 		       desc->mark != DESC_WAITING);
3439a7b8e00SGuennadi Liakhovetski 
3449a7b8e00SGuennadi Liakhovetski 		/*
3459a7b8e00SGuennadi Liakhovetski 		 * queue is ordered, and we use this loop to (1) clean up all
3469a7b8e00SGuennadi Liakhovetski 		 * completed descriptors, and to (2) update descriptor flags of
3479a7b8e00SGuennadi Liakhovetski 		 * any chunks in a (partially) completed chain
3489a7b8e00SGuennadi Liakhovetski 		 */
3499a7b8e00SGuennadi Liakhovetski 		if (!all && desc->mark == DESC_SUBMITTED &&
3509a7b8e00SGuennadi Liakhovetski 		    desc->cookie != cookie)
3519a7b8e00SGuennadi Liakhovetski 			break;
3529a7b8e00SGuennadi Liakhovetski 
3539a7b8e00SGuennadi Liakhovetski 		if (tx->cookie > 0)
3549a7b8e00SGuennadi Liakhovetski 			cookie = tx->cookie;
3559a7b8e00SGuennadi Liakhovetski 
3569a7b8e00SGuennadi Liakhovetski 		if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
3579a7b8e00SGuennadi Liakhovetski 			if (schan->dma_chan.completed_cookie != desc->cookie - 1)
3589a7b8e00SGuennadi Liakhovetski 				dev_dbg(schan->dev,
3599a7b8e00SGuennadi Liakhovetski 					"Completing cookie %d, expected %d\n",
3609a7b8e00SGuennadi Liakhovetski 					desc->cookie,
3619a7b8e00SGuennadi Liakhovetski 					schan->dma_chan.completed_cookie + 1);
3629a7b8e00SGuennadi Liakhovetski 			schan->dma_chan.completed_cookie = desc->cookie;
3639a7b8e00SGuennadi Liakhovetski 		}
3649a7b8e00SGuennadi Liakhovetski 
3659a7b8e00SGuennadi Liakhovetski 		/* Call callback on the last chunk */
3669a7b8e00SGuennadi Liakhovetski 		if (desc->mark == DESC_COMPLETED && tx->callback) {
3679a7b8e00SGuennadi Liakhovetski 			desc->mark = DESC_WAITING;
36873fc45e3SDave Jiang 			dmaengine_desc_get_callback(tx, &cb);
3699a7b8e00SGuennadi Liakhovetski 			callback = tx->callback;
3709a7b8e00SGuennadi Liakhovetski 			dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
3719a7b8e00SGuennadi Liakhovetski 				tx->cookie, tx, schan->id);
3729a7b8e00SGuennadi Liakhovetski 			BUG_ON(desc->chunks != 1);
3739a7b8e00SGuennadi Liakhovetski 			break;
3749a7b8e00SGuennadi Liakhovetski 		}
3759a7b8e00SGuennadi Liakhovetski 
3769a7b8e00SGuennadi Liakhovetski 		if (tx->cookie > 0 || tx->cookie == -EBUSY) {
3779a7b8e00SGuennadi Liakhovetski 			if (desc->mark == DESC_COMPLETED) {
3789a7b8e00SGuennadi Liakhovetski 				BUG_ON(tx->cookie < 0);
3799a7b8e00SGuennadi Liakhovetski 				desc->mark = DESC_WAITING;
3809a7b8e00SGuennadi Liakhovetski 			}
3819a7b8e00SGuennadi Liakhovetski 			head_acked = async_tx_test_ack(tx);
3829a7b8e00SGuennadi Liakhovetski 		} else {
3839a7b8e00SGuennadi Liakhovetski 			switch (desc->mark) {
3849a7b8e00SGuennadi Liakhovetski 			case DESC_COMPLETED:
3859a7b8e00SGuennadi Liakhovetski 				desc->mark = DESC_WAITING;
386df561f66SGustavo A. R. Silva 				fallthrough;
3879a7b8e00SGuennadi Liakhovetski 			case DESC_WAITING:
3889a7b8e00SGuennadi Liakhovetski 				if (head_acked)
3899a7b8e00SGuennadi Liakhovetski 					async_tx_ack(&desc->async_tx);
3909a7b8e00SGuennadi Liakhovetski 			}
3919a7b8e00SGuennadi Liakhovetski 		}
3929a7b8e00SGuennadi Liakhovetski 
3939a7b8e00SGuennadi Liakhovetski 		dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
3949a7b8e00SGuennadi Liakhovetski 			tx, tx->cookie);
3959a7b8e00SGuennadi Liakhovetski 
3969a7b8e00SGuennadi Liakhovetski 		if (((desc->mark == DESC_COMPLETED ||
3979a7b8e00SGuennadi Liakhovetski 		      desc->mark == DESC_WAITING) &&
3989a7b8e00SGuennadi Liakhovetski 		     async_tx_test_ack(&desc->async_tx)) || all) {
399dfbb85caSKuninori Morimoto 
400dfbb85caSKuninori Morimoto 			if (all || !desc->cyclic) {
4019a7b8e00SGuennadi Liakhovetski 				/* Remove from ld_queue list */
4029a7b8e00SGuennadi Liakhovetski 				desc->mark = DESC_IDLE;
4039a7b8e00SGuennadi Liakhovetski 				list_move(&desc->node, &schan->ld_free);
404dfbb85caSKuninori Morimoto 			} else {
405dfbb85caSKuninori Morimoto 				/* reuse as cyclic */
406dfbb85caSKuninori Morimoto 				desc->mark = DESC_SUBMITTED;
407dfbb85caSKuninori Morimoto 				list_move_tail(&desc->node, &cyclic_list);
408dfbb85caSKuninori Morimoto 			}
4099a7b8e00SGuennadi Liakhovetski 
4109a7b8e00SGuennadi Liakhovetski 			if (list_empty(&schan->ld_queue)) {
4119a7b8e00SGuennadi Liakhovetski 				dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
4129a7b8e00SGuennadi Liakhovetski 				pm_runtime_put(schan->dev);
4139a7b8e00SGuennadi Liakhovetski 				schan->pm_state = SHDMA_PM_ESTABLISHED;
41426fd830aSYoshihiro Shimoda 			} else if (schan->pm_state == SHDMA_PM_PENDING) {
41526fd830aSYoshihiro Shimoda 				shdma_chan_xfer_ld_queue(schan);
4169a7b8e00SGuennadi Liakhovetski 			}
4179a7b8e00SGuennadi Liakhovetski 		}
4189a7b8e00SGuennadi Liakhovetski 	}
4199a7b8e00SGuennadi Liakhovetski 
4209a7b8e00SGuennadi Liakhovetski 	if (all && !callback)
4219a7b8e00SGuennadi Liakhovetski 		/*
4229a7b8e00SGuennadi Liakhovetski 		 * Terminating and the loop completed normally: forgive
4239a7b8e00SGuennadi Liakhovetski 		 * uncompleted cookies
4249a7b8e00SGuennadi Liakhovetski 		 */
4259a7b8e00SGuennadi Liakhovetski 		schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
4269a7b8e00SGuennadi Liakhovetski 
427dfbb85caSKuninori Morimoto 	list_splice_tail(&cyclic_list, &schan->ld_queue);
428dfbb85caSKuninori Morimoto 
4299a7b8e00SGuennadi Liakhovetski 	spin_unlock_irqrestore(&schan->chan_lock, flags);
4309a7b8e00SGuennadi Liakhovetski 
43173fc45e3SDave Jiang 	dmaengine_desc_callback_invoke(&cb, NULL);
4329a7b8e00SGuennadi Liakhovetski 
4339a7b8e00SGuennadi Liakhovetski 	return callback;
4349a7b8e00SGuennadi Liakhovetski }
4359a7b8e00SGuennadi Liakhovetski 
4369a7b8e00SGuennadi Liakhovetski /*
4379a7b8e00SGuennadi Liakhovetski  * shdma_chan_ld_cleanup - Clean up link descriptors
4389a7b8e00SGuennadi Liakhovetski  *
4399a7b8e00SGuennadi Liakhovetski  * Clean up the ld_queue of DMA channel.
4409a7b8e00SGuennadi Liakhovetski  */
shdma_chan_ld_cleanup(struct shdma_chan * schan,bool all)4419a7b8e00SGuennadi Liakhovetski static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
4429a7b8e00SGuennadi Liakhovetski {
4439a7b8e00SGuennadi Liakhovetski 	while (__ld_cleanup(schan, all))
4449a7b8e00SGuennadi Liakhovetski 		;
4459a7b8e00SGuennadi Liakhovetski }
4469a7b8e00SGuennadi Liakhovetski 
4479a7b8e00SGuennadi Liakhovetski /*
4489a7b8e00SGuennadi Liakhovetski  * shdma_free_chan_resources - Free all resources of the channel.
4499a7b8e00SGuennadi Liakhovetski  */
shdma_free_chan_resources(struct dma_chan * chan)4509a7b8e00SGuennadi Liakhovetski static void shdma_free_chan_resources(struct dma_chan *chan)
4519a7b8e00SGuennadi Liakhovetski {
4529a7b8e00SGuennadi Liakhovetski 	struct shdma_chan *schan = to_shdma_chan(chan);
4539a7b8e00SGuennadi Liakhovetski 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
4549a7b8e00SGuennadi Liakhovetski 	const struct shdma_ops *ops = sdev->ops;
4559a7b8e00SGuennadi Liakhovetski 	LIST_HEAD(list);
4569a7b8e00SGuennadi Liakhovetski 
4579a7b8e00SGuennadi Liakhovetski 	/* Protect against ISR */
4589a7b8e00SGuennadi Liakhovetski 	spin_lock_irq(&schan->chan_lock);
4599a7b8e00SGuennadi Liakhovetski 	ops->halt_channel(schan);
4609a7b8e00SGuennadi Liakhovetski 	spin_unlock_irq(&schan->chan_lock);
4619a7b8e00SGuennadi Liakhovetski 
4629a7b8e00SGuennadi Liakhovetski 	/* Now no new interrupts will occur */
4639a7b8e00SGuennadi Liakhovetski 
4649a7b8e00SGuennadi Liakhovetski 	/* Prepared and not submitted descriptors can still be on the queue */
4659a7b8e00SGuennadi Liakhovetski 	if (!list_empty(&schan->ld_queue))
4669a7b8e00SGuennadi Liakhovetski 		shdma_chan_ld_cleanup(schan, true);
4679a7b8e00SGuennadi Liakhovetski 
468c2cdb7e4SGuennadi Liakhovetski 	if (schan->slave_id >= 0) {
4699a7b8e00SGuennadi Liakhovetski 		/* The caller is holding dma_list_mutex */
470c2cdb7e4SGuennadi Liakhovetski 		clear_bit(schan->slave_id, shdma_slave_used);
4719a7b8e00SGuennadi Liakhovetski 		chan->private = NULL;
4729a7b8e00SGuennadi Liakhovetski 	}
4739a7b8e00SGuennadi Liakhovetski 
474411fdaf8SArnd Bergmann 	schan->real_slave_id = 0;
475411fdaf8SArnd Bergmann 
4769a7b8e00SGuennadi Liakhovetski 	spin_lock_irq(&schan->chan_lock);
4779a7b8e00SGuennadi Liakhovetski 
4789a7b8e00SGuennadi Liakhovetski 	list_splice_init(&schan->ld_free, &list);
4799a7b8e00SGuennadi Liakhovetski 	schan->desc_num = 0;
4809a7b8e00SGuennadi Liakhovetski 
4819a7b8e00SGuennadi Liakhovetski 	spin_unlock_irq(&schan->chan_lock);
4829a7b8e00SGuennadi Liakhovetski 
4839a7b8e00SGuennadi Liakhovetski 	kfree(schan->desc);
4849a7b8e00SGuennadi Liakhovetski }
4859a7b8e00SGuennadi Liakhovetski 
4869a7b8e00SGuennadi Liakhovetski /**
4879a7b8e00SGuennadi Liakhovetski  * shdma_add_desc - get, set up and return one transfer descriptor
4889a7b8e00SGuennadi Liakhovetski  * @schan:	DMA channel
4899a7b8e00SGuennadi Liakhovetski  * @flags:	DMA transfer flags
4909a7b8e00SGuennadi Liakhovetski  * @dst:	destination DMA address, incremented when direction equals
4919a7b8e00SGuennadi Liakhovetski  *		DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
4929a7b8e00SGuennadi Liakhovetski  * @src:	source DMA address, incremented when direction equals
4939a7b8e00SGuennadi Liakhovetski  *		DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
4949a7b8e00SGuennadi Liakhovetski  * @len:	DMA transfer length
4959a7b8e00SGuennadi Liakhovetski  * @first:	if NULL, set to the current descriptor and cookie set to -EBUSY
4969a7b8e00SGuennadi Liakhovetski  * @direction:	needed for slave DMA to decide which address to keep constant,
4979a7b8e00SGuennadi Liakhovetski  *		equals DMA_MEM_TO_MEM for MEMCPY
4989a7b8e00SGuennadi Liakhovetski  * Returns 0 or an error
4999a7b8e00SGuennadi Liakhovetski  * Locks: called with desc_lock held
5009a7b8e00SGuennadi Liakhovetski  */
shdma_add_desc(struct shdma_chan * schan,unsigned long flags,dma_addr_t * dst,dma_addr_t * src,size_t * len,struct shdma_desc ** first,enum dma_transfer_direction direction)5019a7b8e00SGuennadi Liakhovetski static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
5029a7b8e00SGuennadi Liakhovetski 	unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
5039a7b8e00SGuennadi Liakhovetski 	struct shdma_desc **first, enum dma_transfer_direction direction)
5049a7b8e00SGuennadi Liakhovetski {
5059a7b8e00SGuennadi Liakhovetski 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
5069a7b8e00SGuennadi Liakhovetski 	const struct shdma_ops *ops = sdev->ops;
5079a7b8e00SGuennadi Liakhovetski 	struct shdma_desc *new;
5089a7b8e00SGuennadi Liakhovetski 	size_t copy_size = *len;
5099a7b8e00SGuennadi Liakhovetski 
5109a7b8e00SGuennadi Liakhovetski 	if (!copy_size)
5119a7b8e00SGuennadi Liakhovetski 		return NULL;
5129a7b8e00SGuennadi Liakhovetski 
5139a7b8e00SGuennadi Liakhovetski 	/* Allocate the link descriptor from the free list */
5149a7b8e00SGuennadi Liakhovetski 	new = shdma_get_desc(schan);
5159a7b8e00SGuennadi Liakhovetski 	if (!new) {
5169a7b8e00SGuennadi Liakhovetski 		dev_err(schan->dev, "No free link descriptor available\n");
5179a7b8e00SGuennadi Liakhovetski 		return NULL;
5189a7b8e00SGuennadi Liakhovetski 	}
5199a7b8e00SGuennadi Liakhovetski 
5209a7b8e00SGuennadi Liakhovetski 	ops->desc_setup(schan, new, *src, *dst, &copy_size);
5219a7b8e00SGuennadi Liakhovetski 
5229a7b8e00SGuennadi Liakhovetski 	if (!*first) {
5239a7b8e00SGuennadi Liakhovetski 		/* First desc */
5249a7b8e00SGuennadi Liakhovetski 		new->async_tx.cookie = -EBUSY;
5259a7b8e00SGuennadi Liakhovetski 		*first = new;
5269a7b8e00SGuennadi Liakhovetski 	} else {
5279a7b8e00SGuennadi Liakhovetski 		/* Other desc - invisible to the user */
5289a7b8e00SGuennadi Liakhovetski 		new->async_tx.cookie = -EINVAL;
5299a7b8e00SGuennadi Liakhovetski 	}
5309a7b8e00SGuennadi Liakhovetski 
5319a7b8e00SGuennadi Liakhovetski 	dev_dbg(schan->dev,
53242e4a12aSLaurent Pinchart 		"chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
53342e4a12aSLaurent Pinchart 		copy_size, *len, src, dst, &new->async_tx,
5349a7b8e00SGuennadi Liakhovetski 		new->async_tx.cookie);
5359a7b8e00SGuennadi Liakhovetski 
5369a7b8e00SGuennadi Liakhovetski 	new->mark = DESC_PREPARED;
5379a7b8e00SGuennadi Liakhovetski 	new->async_tx.flags = flags;
5389a7b8e00SGuennadi Liakhovetski 	new->direction = direction;
5394f46f8acSGuennadi Liakhovetski 	new->partial = 0;
5409a7b8e00SGuennadi Liakhovetski 
5419a7b8e00SGuennadi Liakhovetski 	*len -= copy_size;
5429a7b8e00SGuennadi Liakhovetski 	if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
5439a7b8e00SGuennadi Liakhovetski 		*src += copy_size;
5449a7b8e00SGuennadi Liakhovetski 	if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
5459a7b8e00SGuennadi Liakhovetski 		*dst += copy_size;
5469a7b8e00SGuennadi Liakhovetski 
5479a7b8e00SGuennadi Liakhovetski 	return new;
5489a7b8e00SGuennadi Liakhovetski }
5499a7b8e00SGuennadi Liakhovetski 
5509a7b8e00SGuennadi Liakhovetski /*
5519a7b8e00SGuennadi Liakhovetski  * shdma_prep_sg - prepare transfer descriptors from an SG list
5529a7b8e00SGuennadi Liakhovetski  *
5539a7b8e00SGuennadi Liakhovetski  * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
5549a7b8e00SGuennadi Liakhovetski  * converted to scatter-gather to guarantee consistent locking and a correct
5559a7b8e00SGuennadi Liakhovetski  * list manipulation. For slave DMA direction carries the usual meaning, and,
5569a7b8e00SGuennadi Liakhovetski  * logically, the SG list is RAM and the addr variable contains slave address,
5579a7b8e00SGuennadi Liakhovetski  * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
5589a7b8e00SGuennadi Liakhovetski  * and the SG list contains only one element and points at the source buffer.
5599a7b8e00SGuennadi Liakhovetski  */
shdma_prep_sg(struct shdma_chan * schan,struct scatterlist * sgl,unsigned int sg_len,dma_addr_t * addr,enum dma_transfer_direction direction,unsigned long flags,bool cyclic)5609a7b8e00SGuennadi Liakhovetski static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
5619a7b8e00SGuennadi Liakhovetski 	struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
562dfbb85caSKuninori Morimoto 	enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
5639a7b8e00SGuennadi Liakhovetski {
5649a7b8e00SGuennadi Liakhovetski 	struct scatterlist *sg;
5659a7b8e00SGuennadi Liakhovetski 	struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
5669a7b8e00SGuennadi Liakhovetski 	LIST_HEAD(tx_list);
5679a7b8e00SGuennadi Liakhovetski 	int chunks = 0;
5689a7b8e00SGuennadi Liakhovetski 	unsigned long irq_flags;
5699a7b8e00SGuennadi Liakhovetski 	int i;
5709a7b8e00SGuennadi Liakhovetski 
5719a7b8e00SGuennadi Liakhovetski 	for_each_sg(sgl, sg, sg_len, i)
5729a7b8e00SGuennadi Liakhovetski 		chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
5739a7b8e00SGuennadi Liakhovetski 
5749a7b8e00SGuennadi Liakhovetski 	/* Have to lock the whole loop to protect against concurrent release */
5759a7b8e00SGuennadi Liakhovetski 	spin_lock_irqsave(&schan->chan_lock, irq_flags);
5769a7b8e00SGuennadi Liakhovetski 
5779a7b8e00SGuennadi Liakhovetski 	/*
5789a7b8e00SGuennadi Liakhovetski 	 * Chaining:
5799a7b8e00SGuennadi Liakhovetski 	 * first descriptor is what user is dealing with in all API calls, its
5809a7b8e00SGuennadi Liakhovetski 	 *	cookie is at first set to -EBUSY, at tx-submit to a positive
5819a7b8e00SGuennadi Liakhovetski 	 *	number
5829a7b8e00SGuennadi Liakhovetski 	 * if more than one chunk is needed further chunks have cookie = -EINVAL
5839a7b8e00SGuennadi Liakhovetski 	 * the last chunk, if not equal to the first, has cookie = -ENOSPC
5849a7b8e00SGuennadi Liakhovetski 	 * all chunks are linked onto the tx_list head with their .node heads
5859a7b8e00SGuennadi Liakhovetski 	 *	only during this function, then they are immediately spliced
5869a7b8e00SGuennadi Liakhovetski 	 *	back onto the free list in form of a chain
5879a7b8e00SGuennadi Liakhovetski 	 */
5889a7b8e00SGuennadi Liakhovetski 	for_each_sg(sgl, sg, sg_len, i) {
5899a7b8e00SGuennadi Liakhovetski 		dma_addr_t sg_addr = sg_dma_address(sg);
5909a7b8e00SGuennadi Liakhovetski 		size_t len = sg_dma_len(sg);
5919a7b8e00SGuennadi Liakhovetski 
5929a7b8e00SGuennadi Liakhovetski 		if (!len)
5939a7b8e00SGuennadi Liakhovetski 			goto err_get_desc;
5949a7b8e00SGuennadi Liakhovetski 
5959a7b8e00SGuennadi Liakhovetski 		do {
59642e4a12aSLaurent Pinchart 			dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
59742e4a12aSLaurent Pinchart 				i, sg, len, &sg_addr);
5989a7b8e00SGuennadi Liakhovetski 
5999a7b8e00SGuennadi Liakhovetski 			if (direction == DMA_DEV_TO_MEM)
6009a7b8e00SGuennadi Liakhovetski 				new = shdma_add_desc(schan, flags,
6019a7b8e00SGuennadi Liakhovetski 						&sg_addr, addr, &len, &first,
6029a7b8e00SGuennadi Liakhovetski 						direction);
6039a7b8e00SGuennadi Liakhovetski 			else
6049a7b8e00SGuennadi Liakhovetski 				new = shdma_add_desc(schan, flags,
6059a7b8e00SGuennadi Liakhovetski 						addr, &sg_addr, &len, &first,
6069a7b8e00SGuennadi Liakhovetski 						direction);
6079a7b8e00SGuennadi Liakhovetski 			if (!new)
6089a7b8e00SGuennadi Liakhovetski 				goto err_get_desc;
6099a7b8e00SGuennadi Liakhovetski 
610dfbb85caSKuninori Morimoto 			new->cyclic = cyclic;
611dfbb85caSKuninori Morimoto 			if (cyclic)
612dfbb85caSKuninori Morimoto 				new->chunks = 1;
613dfbb85caSKuninori Morimoto 			else
6149a7b8e00SGuennadi Liakhovetski 				new->chunks = chunks--;
6159a7b8e00SGuennadi Liakhovetski 			list_add_tail(&new->node, &tx_list);
6169a7b8e00SGuennadi Liakhovetski 		} while (len);
6179a7b8e00SGuennadi Liakhovetski 	}
6189a7b8e00SGuennadi Liakhovetski 
6199a7b8e00SGuennadi Liakhovetski 	if (new != first)
6209a7b8e00SGuennadi Liakhovetski 		new->async_tx.cookie = -ENOSPC;
6219a7b8e00SGuennadi Liakhovetski 
6229a7b8e00SGuennadi Liakhovetski 	/* Put them back on the free list, so, they don't get lost */
6239a7b8e00SGuennadi Liakhovetski 	list_splice_tail(&tx_list, &schan->ld_free);
6249a7b8e00SGuennadi Liakhovetski 
6259a7b8e00SGuennadi Liakhovetski 	spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
6269a7b8e00SGuennadi Liakhovetski 
6279a7b8e00SGuennadi Liakhovetski 	return &first->async_tx;
6289a7b8e00SGuennadi Liakhovetski 
6299a7b8e00SGuennadi Liakhovetski err_get_desc:
6309a7b8e00SGuennadi Liakhovetski 	list_for_each_entry(new, &tx_list, node)
6319a7b8e00SGuennadi Liakhovetski 		new->mark = DESC_IDLE;
6329a7b8e00SGuennadi Liakhovetski 	list_splice(&tx_list, &schan->ld_free);
6339a7b8e00SGuennadi Liakhovetski 
6349a7b8e00SGuennadi Liakhovetski 	spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
6359a7b8e00SGuennadi Liakhovetski 
6369a7b8e00SGuennadi Liakhovetski 	return NULL;
6379a7b8e00SGuennadi Liakhovetski }
6389a7b8e00SGuennadi Liakhovetski 
shdma_prep_memcpy(struct dma_chan * chan,dma_addr_t dma_dest,dma_addr_t dma_src,size_t len,unsigned long flags)6399a7b8e00SGuennadi Liakhovetski static struct dma_async_tx_descriptor *shdma_prep_memcpy(
6409a7b8e00SGuennadi Liakhovetski 	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
6419a7b8e00SGuennadi Liakhovetski 	size_t len, unsigned long flags)
6429a7b8e00SGuennadi Liakhovetski {
6439a7b8e00SGuennadi Liakhovetski 	struct shdma_chan *schan = to_shdma_chan(chan);
6449a7b8e00SGuennadi Liakhovetski 	struct scatterlist sg;
6459a7b8e00SGuennadi Liakhovetski 
6469a7b8e00SGuennadi Liakhovetski 	if (!chan || !len)
6479a7b8e00SGuennadi Liakhovetski 		return NULL;
6489a7b8e00SGuennadi Liakhovetski 
6499a7b8e00SGuennadi Liakhovetski 	BUG_ON(!schan->desc_num);
6509a7b8e00SGuennadi Liakhovetski 
6519a7b8e00SGuennadi Liakhovetski 	sg_init_table(&sg, 1);
6529a7b8e00SGuennadi Liakhovetski 	sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
6539a7b8e00SGuennadi Liakhovetski 		    offset_in_page(dma_src));
6549a7b8e00SGuennadi Liakhovetski 	sg_dma_address(&sg) = dma_src;
6559a7b8e00SGuennadi Liakhovetski 	sg_dma_len(&sg) = len;
6569a7b8e00SGuennadi Liakhovetski 
657dfbb85caSKuninori Morimoto 	return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
658dfbb85caSKuninori Morimoto 			     flags, false);
6599a7b8e00SGuennadi Liakhovetski }
6609a7b8e00SGuennadi Liakhovetski 
shdma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)6619a7b8e00SGuennadi Liakhovetski static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
6629a7b8e00SGuennadi Liakhovetski 	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
6639a7b8e00SGuennadi Liakhovetski 	enum dma_transfer_direction direction, unsigned long flags, void *context)
6649a7b8e00SGuennadi Liakhovetski {
6659a7b8e00SGuennadi Liakhovetski 	struct shdma_chan *schan = to_shdma_chan(chan);
6669a7b8e00SGuennadi Liakhovetski 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
6679a7b8e00SGuennadi Liakhovetski 	const struct shdma_ops *ops = sdev->ops;
668c2cdb7e4SGuennadi Liakhovetski 	int slave_id = schan->slave_id;
6699a7b8e00SGuennadi Liakhovetski 	dma_addr_t slave_addr;
6709a7b8e00SGuennadi Liakhovetski 
6719a7b8e00SGuennadi Liakhovetski 	if (!chan)
6729a7b8e00SGuennadi Liakhovetski 		return NULL;
6739a7b8e00SGuennadi Liakhovetski 
6749a7b8e00SGuennadi Liakhovetski 	BUG_ON(!schan->desc_num);
6759a7b8e00SGuennadi Liakhovetski 
6769a7b8e00SGuennadi Liakhovetski 	/* Someone calling slave DMA on a generic channel? */
677c2cdb7e4SGuennadi Liakhovetski 	if (slave_id < 0 || !sg_len) {
678c2cdb7e4SGuennadi Liakhovetski 		dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
679c2cdb7e4SGuennadi Liakhovetski 			 __func__, sg_len, slave_id);
6809a7b8e00SGuennadi Liakhovetski 		return NULL;
6819a7b8e00SGuennadi Liakhovetski 	}
6829a7b8e00SGuennadi Liakhovetski 
6839a7b8e00SGuennadi Liakhovetski 	slave_addr = ops->slave_addr(schan);
6849a7b8e00SGuennadi Liakhovetski 
6859a7b8e00SGuennadi Liakhovetski 	return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
686dfbb85caSKuninori Morimoto 			     direction, flags, false);
687dfbb85caSKuninori Morimoto }
688dfbb85caSKuninori Morimoto 
689877d8425SVinod Koul #define SHDMA_MAX_SG_LEN 32
690877d8425SVinod Koul 
shdma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)691a6876543SVinod Koul static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
692dfbb85caSKuninori Morimoto 	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
693dfbb85caSKuninori Morimoto 	size_t period_len, enum dma_transfer_direction direction,
69431c1e5a1SLaurent Pinchart 	unsigned long flags)
695dfbb85caSKuninori Morimoto {
696dfbb85caSKuninori Morimoto 	struct shdma_chan *schan = to_shdma_chan(chan);
697dfbb85caSKuninori Morimoto 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
6984415b03aSLaurent Pinchart 	struct dma_async_tx_descriptor *desc;
699dfbb85caSKuninori Morimoto 	const struct shdma_ops *ops = sdev->ops;
700dfbb85caSKuninori Morimoto 	unsigned int sg_len = buf_len / period_len;
701dfbb85caSKuninori Morimoto 	int slave_id = schan->slave_id;
702dfbb85caSKuninori Morimoto 	dma_addr_t slave_addr;
7034415b03aSLaurent Pinchart 	struct scatterlist *sgl;
704dfbb85caSKuninori Morimoto 	int i;
705dfbb85caSKuninori Morimoto 
706dfbb85caSKuninori Morimoto 	if (!chan)
707dfbb85caSKuninori Morimoto 		return NULL;
708dfbb85caSKuninori Morimoto 
709dfbb85caSKuninori Morimoto 	BUG_ON(!schan->desc_num);
710dfbb85caSKuninori Morimoto 
711877d8425SVinod Koul 	if (sg_len > SHDMA_MAX_SG_LEN) {
7121986f03bSColin Ian King 		dev_err(schan->dev, "sg length %d exceeds limit %d",
713877d8425SVinod Koul 				sg_len, SHDMA_MAX_SG_LEN);
714877d8425SVinod Koul 		return NULL;
715877d8425SVinod Koul 	}
716877d8425SVinod Koul 
717dfbb85caSKuninori Morimoto 	/* Someone calling slave DMA on a generic channel? */
718dfbb85caSKuninori Morimoto 	if (slave_id < 0 || (buf_len < period_len)) {
719dfbb85caSKuninori Morimoto 		dev_warn(schan->dev,
7209d9f71a8SVinod Koul 			"%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
721dfbb85caSKuninori Morimoto 			__func__, buf_len, period_len, slave_id);
722dfbb85caSKuninori Morimoto 		return NULL;
723dfbb85caSKuninori Morimoto 	}
724dfbb85caSKuninori Morimoto 
725dfbb85caSKuninori Morimoto 	slave_addr = ops->slave_addr(schan);
726dfbb85caSKuninori Morimoto 
7274415b03aSLaurent Pinchart 	/*
7284415b03aSLaurent Pinchart 	 * Allocate the sg list dynamically as it would consumer too much stack
7294415b03aSLaurent Pinchart 	 * space.
7304415b03aSLaurent Pinchart 	 */
731aafa88f3SJulia Lawall 	sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL);
7324415b03aSLaurent Pinchart 	if (!sgl)
7334415b03aSLaurent Pinchart 		return NULL;
7344415b03aSLaurent Pinchart 
735dfbb85caSKuninori Morimoto 	sg_init_table(sgl, sg_len);
7364415b03aSLaurent Pinchart 
737dfbb85caSKuninori Morimoto 	for (i = 0; i < sg_len; i++) {
738dfbb85caSKuninori Morimoto 		dma_addr_t src = buf_addr + (period_len * i);
739dfbb85caSKuninori Morimoto 
740dfbb85caSKuninori Morimoto 		sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
741dfbb85caSKuninori Morimoto 			    offset_in_page(src));
742dfbb85caSKuninori Morimoto 		sg_dma_address(&sgl[i]) = src;
743dfbb85caSKuninori Morimoto 		sg_dma_len(&sgl[i]) = period_len;
744dfbb85caSKuninori Morimoto 	}
745dfbb85caSKuninori Morimoto 
7464415b03aSLaurent Pinchart 	desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
747dfbb85caSKuninori Morimoto 			     direction, flags, true);
7484415b03aSLaurent Pinchart 
7494415b03aSLaurent Pinchart 	kfree(sgl);
7504415b03aSLaurent Pinchart 	return desc;
7519a7b8e00SGuennadi Liakhovetski }
7529a7b8e00SGuennadi Liakhovetski 
shdma_terminate_all(struct dma_chan * chan)753be60f940SMaxime Ripard static int shdma_terminate_all(struct dma_chan *chan)
7549a7b8e00SGuennadi Liakhovetski {
7559a7b8e00SGuennadi Liakhovetski 	struct shdma_chan *schan = to_shdma_chan(chan);
7569a7b8e00SGuennadi Liakhovetski 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
7579a7b8e00SGuennadi Liakhovetski 	const struct shdma_ops *ops = sdev->ops;
7589a7b8e00SGuennadi Liakhovetski 	unsigned long flags;
7599a7b8e00SGuennadi Liakhovetski 
7609a7b8e00SGuennadi Liakhovetski 	spin_lock_irqsave(&schan->chan_lock, flags);
7619a7b8e00SGuennadi Liakhovetski 	ops->halt_channel(schan);
7624f46f8acSGuennadi Liakhovetski 
7634f46f8acSGuennadi Liakhovetski 	if (ops->get_partial && !list_empty(&schan->ld_queue)) {
7644f46f8acSGuennadi Liakhovetski 		/* Record partial transfer */
7654f46f8acSGuennadi Liakhovetski 		struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
7664f46f8acSGuennadi Liakhovetski 							   struct shdma_desc, node);
7674f46f8acSGuennadi Liakhovetski 		desc->partial = ops->get_partial(schan, desc);
7684f46f8acSGuennadi Liakhovetski 	}
7694f46f8acSGuennadi Liakhovetski 
7709a7b8e00SGuennadi Liakhovetski 	spin_unlock_irqrestore(&schan->chan_lock, flags);
7719a7b8e00SGuennadi Liakhovetski 
7729a7b8e00SGuennadi Liakhovetski 	shdma_chan_ld_cleanup(schan, true);
773be60f940SMaxime Ripard 
774be60f940SMaxime Ripard 	return 0;
775be60f940SMaxime Ripard }
776be60f940SMaxime Ripard 
shdma_config(struct dma_chan * chan,struct dma_slave_config * config)777be60f940SMaxime Ripard static int shdma_config(struct dma_chan *chan,
778be60f940SMaxime Ripard 			struct dma_slave_config *config)
779be60f940SMaxime Ripard {
780be60f940SMaxime Ripard 	struct shdma_chan *schan = to_shdma_chan(chan);
781be60f940SMaxime Ripard 
7821ff8df4fSGuennadi Liakhovetski 	/*
7831ff8df4fSGuennadi Liakhovetski 	 * So far only .slave_id is used, but the slave drivers are
7841ff8df4fSGuennadi Liakhovetski 	 * encouraged to also set a transfer direction and an address.
7851ff8df4fSGuennadi Liakhovetski 	 */
786be60f940SMaxime Ripard 	if (!config)
7871ff8df4fSGuennadi Liakhovetski 		return -EINVAL;
788411fdaf8SArnd Bergmann 
789411fdaf8SArnd Bergmann 	/*
7901ff8df4fSGuennadi Liakhovetski 	 * We could lock this, but you shouldn't be configuring the
7911ff8df4fSGuennadi Liakhovetski 	 * channel, while using it...
7921ff8df4fSGuennadi Liakhovetski 	 */
793411fdaf8SArnd Bergmann 	return shdma_setup_slave(schan,
7944981c4dcSGuennadi Liakhovetski 				 config->direction == DMA_DEV_TO_MEM ?
7954981c4dcSGuennadi Liakhovetski 				 config->src_addr : config->dst_addr);
7969a7b8e00SGuennadi Liakhovetski }
7979a7b8e00SGuennadi Liakhovetski 
shdma_issue_pending(struct dma_chan * chan)7989a7b8e00SGuennadi Liakhovetski static void shdma_issue_pending(struct dma_chan *chan)
7999a7b8e00SGuennadi Liakhovetski {
8009a7b8e00SGuennadi Liakhovetski 	struct shdma_chan *schan = to_shdma_chan(chan);
8019a7b8e00SGuennadi Liakhovetski 
8029a7b8e00SGuennadi Liakhovetski 	spin_lock_irq(&schan->chan_lock);
8039a7b8e00SGuennadi Liakhovetski 	if (schan->pm_state == SHDMA_PM_ESTABLISHED)
8049a7b8e00SGuennadi Liakhovetski 		shdma_chan_xfer_ld_queue(schan);
8059a7b8e00SGuennadi Liakhovetski 	else
8069a7b8e00SGuennadi Liakhovetski 		schan->pm_state = SHDMA_PM_PENDING;
8079a7b8e00SGuennadi Liakhovetski 	spin_unlock_irq(&schan->chan_lock);
8089a7b8e00SGuennadi Liakhovetski }
8099a7b8e00SGuennadi Liakhovetski 
shdma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)8109a7b8e00SGuennadi Liakhovetski static enum dma_status shdma_tx_status(struct dma_chan *chan,
8119a7b8e00SGuennadi Liakhovetski 					dma_cookie_t cookie,
8129a7b8e00SGuennadi Liakhovetski 					struct dma_tx_state *txstate)
8139a7b8e00SGuennadi Liakhovetski {
8149a7b8e00SGuennadi Liakhovetski 	struct shdma_chan *schan = to_shdma_chan(chan);
8159a7b8e00SGuennadi Liakhovetski 	enum dma_status status;
8169a7b8e00SGuennadi Liakhovetski 	unsigned long flags;
8179a7b8e00SGuennadi Liakhovetski 
8189a7b8e00SGuennadi Liakhovetski 	shdma_chan_ld_cleanup(schan, false);
8199a7b8e00SGuennadi Liakhovetski 
8209a7b8e00SGuennadi Liakhovetski 	spin_lock_irqsave(&schan->chan_lock, flags);
8219a7b8e00SGuennadi Liakhovetski 
8229a7b8e00SGuennadi Liakhovetski 	status = dma_cookie_status(chan, cookie, txstate);
8239a7b8e00SGuennadi Liakhovetski 
8249a7b8e00SGuennadi Liakhovetski 	/*
8259a7b8e00SGuennadi Liakhovetski 	 * If we don't find cookie on the queue, it has been aborted and we have
8269a7b8e00SGuennadi Liakhovetski 	 * to report error
8279a7b8e00SGuennadi Liakhovetski 	 */
828a8d8d268SVinod Koul 	if (status != DMA_COMPLETE) {
8299a7b8e00SGuennadi Liakhovetski 		struct shdma_desc *sdesc;
8309a7b8e00SGuennadi Liakhovetski 		status = DMA_ERROR;
8319a7b8e00SGuennadi Liakhovetski 		list_for_each_entry(sdesc, &schan->ld_queue, node)
8329a7b8e00SGuennadi Liakhovetski 			if (sdesc->cookie == cookie) {
8339a7b8e00SGuennadi Liakhovetski 				status = DMA_IN_PROGRESS;
8349a7b8e00SGuennadi Liakhovetski 				break;
8359a7b8e00SGuennadi Liakhovetski 			}
8369a7b8e00SGuennadi Liakhovetski 	}
8379a7b8e00SGuennadi Liakhovetski 
8389a7b8e00SGuennadi Liakhovetski 	spin_unlock_irqrestore(&schan->chan_lock, flags);
8399a7b8e00SGuennadi Liakhovetski 
8409a7b8e00SGuennadi Liakhovetski 	return status;
8419a7b8e00SGuennadi Liakhovetski }
8429a7b8e00SGuennadi Liakhovetski 
8439a7b8e00SGuennadi Liakhovetski /* Called from error IRQ or NMI */
shdma_reset(struct shdma_dev * sdev)8449a7b8e00SGuennadi Liakhovetski bool shdma_reset(struct shdma_dev *sdev)
8459a7b8e00SGuennadi Liakhovetski {
8469a7b8e00SGuennadi Liakhovetski 	const struct shdma_ops *ops = sdev->ops;
8479a7b8e00SGuennadi Liakhovetski 	struct shdma_chan *schan;
8489a7b8e00SGuennadi Liakhovetski 	unsigned int handled = 0;
8499a7b8e00SGuennadi Liakhovetski 	int i;
8509a7b8e00SGuennadi Liakhovetski 
8519a7b8e00SGuennadi Liakhovetski 	/* Reset all channels */
8529a7b8e00SGuennadi Liakhovetski 	shdma_for_each_chan(schan, sdev, i) {
8539a7b8e00SGuennadi Liakhovetski 		struct shdma_desc *sdesc;
8549a7b8e00SGuennadi Liakhovetski 		LIST_HEAD(dl);
8559a7b8e00SGuennadi Liakhovetski 
8569a7b8e00SGuennadi Liakhovetski 		if (!schan)
8579a7b8e00SGuennadi Liakhovetski 			continue;
8589a7b8e00SGuennadi Liakhovetski 
8599a7b8e00SGuennadi Liakhovetski 		spin_lock(&schan->chan_lock);
8609a7b8e00SGuennadi Liakhovetski 
8619a7b8e00SGuennadi Liakhovetski 		/* Stop the channel */
8629a7b8e00SGuennadi Liakhovetski 		ops->halt_channel(schan);
8639a7b8e00SGuennadi Liakhovetski 
8649a7b8e00SGuennadi Liakhovetski 		list_splice_init(&schan->ld_queue, &dl);
8659a7b8e00SGuennadi Liakhovetski 
8669a7b8e00SGuennadi Liakhovetski 		if (!list_empty(&dl)) {
8679a7b8e00SGuennadi Liakhovetski 			dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
8689a7b8e00SGuennadi Liakhovetski 			pm_runtime_put(schan->dev);
8699a7b8e00SGuennadi Liakhovetski 		}
8709a7b8e00SGuennadi Liakhovetski 		schan->pm_state = SHDMA_PM_ESTABLISHED;
8719a7b8e00SGuennadi Liakhovetski 
8729a7b8e00SGuennadi Liakhovetski 		spin_unlock(&schan->chan_lock);
8739a7b8e00SGuennadi Liakhovetski 
8749a7b8e00SGuennadi Liakhovetski 		/* Complete all  */
8759a7b8e00SGuennadi Liakhovetski 		list_for_each_entry(sdesc, &dl, node) {
8769a7b8e00SGuennadi Liakhovetski 			struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
87773fc45e3SDave Jiang 
8789a7b8e00SGuennadi Liakhovetski 			sdesc->mark = DESC_IDLE;
87973fc45e3SDave Jiang 			dmaengine_desc_get_callback_invoke(tx, NULL);
8809a7b8e00SGuennadi Liakhovetski 		}
8819a7b8e00SGuennadi Liakhovetski 
8829a7b8e00SGuennadi Liakhovetski 		spin_lock(&schan->chan_lock);
8839a7b8e00SGuennadi Liakhovetski 		list_splice(&dl, &schan->ld_free);
8849a7b8e00SGuennadi Liakhovetski 		spin_unlock(&schan->chan_lock);
8859a7b8e00SGuennadi Liakhovetski 
8869a7b8e00SGuennadi Liakhovetski 		handled++;
8879a7b8e00SGuennadi Liakhovetski 	}
8889a7b8e00SGuennadi Liakhovetski 
8899a7b8e00SGuennadi Liakhovetski 	return !!handled;
8909a7b8e00SGuennadi Liakhovetski }
8919a7b8e00SGuennadi Liakhovetski EXPORT_SYMBOL(shdma_reset);
8929a7b8e00SGuennadi Liakhovetski 
chan_irq(int irq,void * dev)8939a7b8e00SGuennadi Liakhovetski static irqreturn_t chan_irq(int irq, void *dev)
8949a7b8e00SGuennadi Liakhovetski {
8959a7b8e00SGuennadi Liakhovetski 	struct shdma_chan *schan = dev;
8969a7b8e00SGuennadi Liakhovetski 	const struct shdma_ops *ops =
8979a7b8e00SGuennadi Liakhovetski 		to_shdma_dev(schan->dma_chan.device)->ops;
8989a7b8e00SGuennadi Liakhovetski 	irqreturn_t ret;
8999a7b8e00SGuennadi Liakhovetski 
9009a7b8e00SGuennadi Liakhovetski 	spin_lock(&schan->chan_lock);
9019a7b8e00SGuennadi Liakhovetski 
9029a7b8e00SGuennadi Liakhovetski 	ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
9039a7b8e00SGuennadi Liakhovetski 
9049a7b8e00SGuennadi Liakhovetski 	spin_unlock(&schan->chan_lock);
9059a7b8e00SGuennadi Liakhovetski 
9069a7b8e00SGuennadi Liakhovetski 	return ret;
9079a7b8e00SGuennadi Liakhovetski }
9089a7b8e00SGuennadi Liakhovetski 
chan_irqt(int irq,void * dev)9099a7b8e00SGuennadi Liakhovetski static irqreturn_t chan_irqt(int irq, void *dev)
9109a7b8e00SGuennadi Liakhovetski {
9119a7b8e00SGuennadi Liakhovetski 	struct shdma_chan *schan = dev;
9129a7b8e00SGuennadi Liakhovetski 	const struct shdma_ops *ops =
9139a7b8e00SGuennadi Liakhovetski 		to_shdma_dev(schan->dma_chan.device)->ops;
9149a7b8e00SGuennadi Liakhovetski 	struct shdma_desc *sdesc;
9159a7b8e00SGuennadi Liakhovetski 
9169a7b8e00SGuennadi Liakhovetski 	spin_lock_irq(&schan->chan_lock);
9179a7b8e00SGuennadi Liakhovetski 	list_for_each_entry(sdesc, &schan->ld_queue, node) {
9189a7b8e00SGuennadi Liakhovetski 		if (sdesc->mark == DESC_SUBMITTED &&
9199a7b8e00SGuennadi Liakhovetski 		    ops->desc_completed(schan, sdesc)) {
9209a7b8e00SGuennadi Liakhovetski 			dev_dbg(schan->dev, "done #%d@%p\n",
9219a7b8e00SGuennadi Liakhovetski 				sdesc->async_tx.cookie, &sdesc->async_tx);
9229a7b8e00SGuennadi Liakhovetski 			sdesc->mark = DESC_COMPLETED;
9239a7b8e00SGuennadi Liakhovetski 			break;
9249a7b8e00SGuennadi Liakhovetski 		}
9259a7b8e00SGuennadi Liakhovetski 	}
9269a7b8e00SGuennadi Liakhovetski 	/* Next desc */
9279a7b8e00SGuennadi Liakhovetski 	shdma_chan_xfer_ld_queue(schan);
9289a7b8e00SGuennadi Liakhovetski 	spin_unlock_irq(&schan->chan_lock);
9299a7b8e00SGuennadi Liakhovetski 
9309a7b8e00SGuennadi Liakhovetski 	shdma_chan_ld_cleanup(schan, false);
9319a7b8e00SGuennadi Liakhovetski 
9329a7b8e00SGuennadi Liakhovetski 	return IRQ_HANDLED;
9339a7b8e00SGuennadi Liakhovetski }
9349a7b8e00SGuennadi Liakhovetski 
shdma_request_irq(struct shdma_chan * schan,int irq,unsigned long flags,const char * name)9359a7b8e00SGuennadi Liakhovetski int shdma_request_irq(struct shdma_chan *schan, int irq,
9369a7b8e00SGuennadi Liakhovetski 			   unsigned long flags, const char *name)
9379a7b8e00SGuennadi Liakhovetski {
938c1c63a14SGuennadi Liakhovetski 	int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
939c1c63a14SGuennadi Liakhovetski 					    chan_irqt, flags, name, schan);
9409a7b8e00SGuennadi Liakhovetski 
9419a7b8e00SGuennadi Liakhovetski 	schan->irq = ret < 0 ? ret : irq;
9429a7b8e00SGuennadi Liakhovetski 
9439a7b8e00SGuennadi Liakhovetski 	return ret;
9449a7b8e00SGuennadi Liakhovetski }
9459a7b8e00SGuennadi Liakhovetski EXPORT_SYMBOL(shdma_request_irq);
9469a7b8e00SGuennadi Liakhovetski 
shdma_chan_probe(struct shdma_dev * sdev,struct shdma_chan * schan,int id)9479a7b8e00SGuennadi Liakhovetski void shdma_chan_probe(struct shdma_dev *sdev,
9489a7b8e00SGuennadi Liakhovetski 			   struct shdma_chan *schan, int id)
9499a7b8e00SGuennadi Liakhovetski {
9509a7b8e00SGuennadi Liakhovetski 	schan->pm_state = SHDMA_PM_ESTABLISHED;
9519a7b8e00SGuennadi Liakhovetski 
9529a7b8e00SGuennadi Liakhovetski 	/* reference struct dma_device */
9539a7b8e00SGuennadi Liakhovetski 	schan->dma_chan.device = &sdev->dma_dev;
9549a7b8e00SGuennadi Liakhovetski 	dma_cookie_init(&schan->dma_chan);
9559a7b8e00SGuennadi Liakhovetski 
9569a7b8e00SGuennadi Liakhovetski 	schan->dev = sdev->dma_dev.dev;
9579a7b8e00SGuennadi Liakhovetski 	schan->id = id;
9589a7b8e00SGuennadi Liakhovetski 
9599a7b8e00SGuennadi Liakhovetski 	if (!schan->max_xfer_len)
9609a7b8e00SGuennadi Liakhovetski 		schan->max_xfer_len = PAGE_SIZE;
9619a7b8e00SGuennadi Liakhovetski 
9629a7b8e00SGuennadi Liakhovetski 	spin_lock_init(&schan->chan_lock);
9639a7b8e00SGuennadi Liakhovetski 
9649a7b8e00SGuennadi Liakhovetski 	/* Init descripter manage list */
9659a7b8e00SGuennadi Liakhovetski 	INIT_LIST_HEAD(&schan->ld_queue);
9669a7b8e00SGuennadi Liakhovetski 	INIT_LIST_HEAD(&schan->ld_free);
9679a7b8e00SGuennadi Liakhovetski 
9689a7b8e00SGuennadi Liakhovetski 	/* Add the channel to DMA device channel list */
9699a7b8e00SGuennadi Liakhovetski 	list_add_tail(&schan->dma_chan.device_node,
9709a7b8e00SGuennadi Liakhovetski 			&sdev->dma_dev.channels);
9711e916474SMaxime Ripard 	sdev->schan[id] = schan;
9729a7b8e00SGuennadi Liakhovetski }
9739a7b8e00SGuennadi Liakhovetski EXPORT_SYMBOL(shdma_chan_probe);
9749a7b8e00SGuennadi Liakhovetski 
shdma_chan_remove(struct shdma_chan * schan)9759a7b8e00SGuennadi Liakhovetski void shdma_chan_remove(struct shdma_chan *schan)
9769a7b8e00SGuennadi Liakhovetski {
9779a7b8e00SGuennadi Liakhovetski 	list_del(&schan->dma_chan.device_node);
9789a7b8e00SGuennadi Liakhovetski }
9799a7b8e00SGuennadi Liakhovetski EXPORT_SYMBOL(shdma_chan_remove);
9809a7b8e00SGuennadi Liakhovetski 
shdma_init(struct device * dev,struct shdma_dev * sdev,int chan_num)9819a7b8e00SGuennadi Liakhovetski int shdma_init(struct device *dev, struct shdma_dev *sdev,
9829a7b8e00SGuennadi Liakhovetski 		    int chan_num)
9839a7b8e00SGuennadi Liakhovetski {
9849a7b8e00SGuennadi Liakhovetski 	struct dma_device *dma_dev = &sdev->dma_dev;
9859a7b8e00SGuennadi Liakhovetski 
9869a7b8e00SGuennadi Liakhovetski 	/*
9879a7b8e00SGuennadi Liakhovetski 	 * Require all call-backs for now, they can trivially be made optional
9889a7b8e00SGuennadi Liakhovetski 	 * later as required
9899a7b8e00SGuennadi Liakhovetski 	 */
9909a7b8e00SGuennadi Liakhovetski 	if (!sdev->ops ||
9919a7b8e00SGuennadi Liakhovetski 	    !sdev->desc_size ||
9929a7b8e00SGuennadi Liakhovetski 	    !sdev->ops->embedded_desc ||
9939a7b8e00SGuennadi Liakhovetski 	    !sdev->ops->start_xfer ||
9949a7b8e00SGuennadi Liakhovetski 	    !sdev->ops->setup_xfer ||
9959a7b8e00SGuennadi Liakhovetski 	    !sdev->ops->set_slave ||
9969a7b8e00SGuennadi Liakhovetski 	    !sdev->ops->desc_setup ||
9979a7b8e00SGuennadi Liakhovetski 	    !sdev->ops->slave_addr ||
9989a7b8e00SGuennadi Liakhovetski 	    !sdev->ops->channel_busy ||
9999a7b8e00SGuennadi Liakhovetski 	    !sdev->ops->halt_channel ||
10009a7b8e00SGuennadi Liakhovetski 	    !sdev->ops->desc_completed)
10019a7b8e00SGuennadi Liakhovetski 		return -EINVAL;
10029a7b8e00SGuennadi Liakhovetski 
10039a7b8e00SGuennadi Liakhovetski 	sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
10049a7b8e00SGuennadi Liakhovetski 	if (!sdev->schan)
10059a7b8e00SGuennadi Liakhovetski 		return -ENOMEM;
10069a7b8e00SGuennadi Liakhovetski 
10079a7b8e00SGuennadi Liakhovetski 	INIT_LIST_HEAD(&dma_dev->channels);
10089a7b8e00SGuennadi Liakhovetski 
10099a7b8e00SGuennadi Liakhovetski 	/* Common and MEMCPY operations */
10109a7b8e00SGuennadi Liakhovetski 	dma_dev->device_alloc_chan_resources
10119a7b8e00SGuennadi Liakhovetski 		= shdma_alloc_chan_resources;
10129a7b8e00SGuennadi Liakhovetski 	dma_dev->device_free_chan_resources = shdma_free_chan_resources;
10139a7b8e00SGuennadi Liakhovetski 	dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
10149a7b8e00SGuennadi Liakhovetski 	dma_dev->device_tx_status = shdma_tx_status;
10159a7b8e00SGuennadi Liakhovetski 	dma_dev->device_issue_pending = shdma_issue_pending;
10169a7b8e00SGuennadi Liakhovetski 
10179a7b8e00SGuennadi Liakhovetski 	/* Compulsory for DMA_SLAVE fields */
10189a7b8e00SGuennadi Liakhovetski 	dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
1019dfbb85caSKuninori Morimoto 	dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
1020be60f940SMaxime Ripard 	dma_dev->device_config = shdma_config;
1021be60f940SMaxime Ripard 	dma_dev->device_terminate_all = shdma_terminate_all;
10229a7b8e00SGuennadi Liakhovetski 
10239a7b8e00SGuennadi Liakhovetski 	dma_dev->dev = dev;
10249a7b8e00SGuennadi Liakhovetski 
10259a7b8e00SGuennadi Liakhovetski 	return 0;
10269a7b8e00SGuennadi Liakhovetski }
10279a7b8e00SGuennadi Liakhovetski EXPORT_SYMBOL(shdma_init);
10289a7b8e00SGuennadi Liakhovetski 
shdma_cleanup(struct shdma_dev * sdev)10299a7b8e00SGuennadi Liakhovetski void shdma_cleanup(struct shdma_dev *sdev)
10309a7b8e00SGuennadi Liakhovetski {
10319a7b8e00SGuennadi Liakhovetski 	kfree(sdev->schan);
10329a7b8e00SGuennadi Liakhovetski }
10339a7b8e00SGuennadi Liakhovetski EXPORT_SYMBOL(shdma_cleanup);
10349a7b8e00SGuennadi Liakhovetski 
shdma_enter(void)10359a7b8e00SGuennadi Liakhovetski static int __init shdma_enter(void)
10369a7b8e00SGuennadi Liakhovetski {
1037d5aeba45SChristophe JAILLET 	shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL);
10389a7b8e00SGuennadi Liakhovetski 	if (!shdma_slave_used)
10399a7b8e00SGuennadi Liakhovetski 		return -ENOMEM;
10409a7b8e00SGuennadi Liakhovetski 	return 0;
10419a7b8e00SGuennadi Liakhovetski }
10429a7b8e00SGuennadi Liakhovetski module_init(shdma_enter);
10439a7b8e00SGuennadi Liakhovetski 
shdma_exit(void)10449a7b8e00SGuennadi Liakhovetski static void __exit shdma_exit(void)
10459a7b8e00SGuennadi Liakhovetski {
1046d5aeba45SChristophe JAILLET 	bitmap_free(shdma_slave_used);
10479a7b8e00SGuennadi Liakhovetski }
10489a7b8e00SGuennadi Liakhovetski module_exit(shdma_exit);
10499a7b8e00SGuennadi Liakhovetski 
10509a7b8e00SGuennadi Liakhovetski MODULE_DESCRIPTION("SH-DMA driver base library");
10519a7b8e00SGuennadi Liakhovetski MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1052