xref: /openbmc/linux/drivers/dma/pl330.c (revision 9e5ed094)
1b3040e40SJassi Brar /* linux/drivers/dma/pl330.c
2b3040e40SJassi Brar  *
3b3040e40SJassi Brar  * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4b3040e40SJassi Brar  *	Jaswinder Singh <jassi.brar@samsung.com>
5b3040e40SJassi Brar  *
6b3040e40SJassi Brar  * This program is free software; you can redistribute it and/or modify
7b3040e40SJassi Brar  * it under the terms of the GNU General Public License as published by
8b3040e40SJassi Brar  * the Free Software Foundation; either version 2 of the License, or
9b3040e40SJassi Brar  * (at your option) any later version.
10b3040e40SJassi Brar  */
11b3040e40SJassi Brar 
12b3040e40SJassi Brar #include <linux/io.h>
13b3040e40SJassi Brar #include <linux/init.h>
14b3040e40SJassi Brar #include <linux/slab.h>
15b3040e40SJassi Brar #include <linux/module.h>
16b3040e40SJassi Brar #include <linux/dmaengine.h>
17b3040e40SJassi Brar #include <linux/interrupt.h>
18b3040e40SJassi Brar #include <linux/amba/bus.h>
19b3040e40SJassi Brar #include <linux/amba/pl330.h>
20a2f5203fSBoojin Kim #include <linux/pm_runtime.h>
211b9bb715SBoojin Kim #include <linux/scatterlist.h>
2293ed5544SThomas Abraham #include <linux/of.h>
23b3040e40SJassi Brar 
24b3040e40SJassi Brar #define NR_DEFAULT_DESC	16
25b3040e40SJassi Brar 
26b3040e40SJassi Brar enum desc_status {
27b3040e40SJassi Brar 	/* In the DMAC pool */
28b3040e40SJassi Brar 	FREE,
29b3040e40SJassi Brar 	/*
30b3040e40SJassi Brar 	 * Allocted to some channel during prep_xxx
31b3040e40SJassi Brar 	 * Also may be sitting on the work_list.
32b3040e40SJassi Brar 	 */
33b3040e40SJassi Brar 	PREP,
34b3040e40SJassi Brar 	/*
35b3040e40SJassi Brar 	 * Sitting on the work_list and already submitted
36b3040e40SJassi Brar 	 * to the PL330 core. Not more than two descriptors
37b3040e40SJassi Brar 	 * of a channel can be BUSY at any time.
38b3040e40SJassi Brar 	 */
39b3040e40SJassi Brar 	BUSY,
40b3040e40SJassi Brar 	/*
41b3040e40SJassi Brar 	 * Sitting on the channel work_list but xfer done
42b3040e40SJassi Brar 	 * by PL330 core
43b3040e40SJassi Brar 	 */
44b3040e40SJassi Brar 	DONE,
45b3040e40SJassi Brar };
46b3040e40SJassi Brar 
47b3040e40SJassi Brar struct dma_pl330_chan {
48b3040e40SJassi Brar 	/* Schedule desc completion */
49b3040e40SJassi Brar 	struct tasklet_struct task;
50b3040e40SJassi Brar 
51b3040e40SJassi Brar 	/* DMA-Engine Channel */
52b3040e40SJassi Brar 	struct dma_chan chan;
53b3040e40SJassi Brar 
54b3040e40SJassi Brar 	/* Last completed cookie */
55b3040e40SJassi Brar 	dma_cookie_t completed;
56b3040e40SJassi Brar 
57b3040e40SJassi Brar 	/* List of to be xfered descriptors */
58b3040e40SJassi Brar 	struct list_head work_list;
59b3040e40SJassi Brar 
60b3040e40SJassi Brar 	/* Pointer to the DMAC that manages this channel,
61b3040e40SJassi Brar 	 * NULL if the channel is available to be acquired.
62b3040e40SJassi Brar 	 * As the parent, this DMAC also provides descriptors
63b3040e40SJassi Brar 	 * to the channel.
64b3040e40SJassi Brar 	 */
65b3040e40SJassi Brar 	struct dma_pl330_dmac *dmac;
66b3040e40SJassi Brar 
67b3040e40SJassi Brar 	/* To protect channel manipulation */
68b3040e40SJassi Brar 	spinlock_t lock;
69b3040e40SJassi Brar 
70b3040e40SJassi Brar 	/* Token of a hardware channel thread of PL330 DMAC
71b3040e40SJassi Brar 	 * NULL if the channel is available to be acquired.
72b3040e40SJassi Brar 	 */
73b3040e40SJassi Brar 	void *pl330_chid;
741b9bb715SBoojin Kim 
751b9bb715SBoojin Kim 	/* For D-to-M and M-to-D channels */
761b9bb715SBoojin Kim 	int burst_sz; /* the peripheral fifo width */
771d0c1d60SBoojin Kim 	int burst_len; /* the number of burst */
781b9bb715SBoojin Kim 	dma_addr_t fifo_addr;
7942bc9cf4SBoojin Kim 
8042bc9cf4SBoojin Kim 	/* for cyclic capability */
8142bc9cf4SBoojin Kim 	bool cyclic;
82b3040e40SJassi Brar };
83b3040e40SJassi Brar 
84b3040e40SJassi Brar struct dma_pl330_dmac {
85b3040e40SJassi Brar 	struct pl330_info pif;
86b3040e40SJassi Brar 
87b3040e40SJassi Brar 	/* DMA-Engine Device */
88b3040e40SJassi Brar 	struct dma_device ddma;
89b3040e40SJassi Brar 
90b3040e40SJassi Brar 	/* Pool of descriptors available for the DMAC's channels */
91b3040e40SJassi Brar 	struct list_head desc_pool;
92b3040e40SJassi Brar 	/* To protect desc_pool manipulation */
93b3040e40SJassi Brar 	spinlock_t pool_lock;
94b3040e40SJassi Brar 
95b3040e40SJassi Brar 	/* Peripheral channels connected to this DMAC */
964e0e6109SRob Herring 	struct dma_pl330_chan *peripherals; /* keep at end */
97a2f5203fSBoojin Kim 
98a2f5203fSBoojin Kim 	struct clk *clk;
99b3040e40SJassi Brar };
100b3040e40SJassi Brar 
101b3040e40SJassi Brar struct dma_pl330_desc {
102b3040e40SJassi Brar 	/* To attach to a queue as child */
103b3040e40SJassi Brar 	struct list_head node;
104b3040e40SJassi Brar 
105b3040e40SJassi Brar 	/* Descriptor for the DMA Engine API */
106b3040e40SJassi Brar 	struct dma_async_tx_descriptor txd;
107b3040e40SJassi Brar 
108b3040e40SJassi Brar 	/* Xfer for PL330 core */
109b3040e40SJassi Brar 	struct pl330_xfer px;
110b3040e40SJassi Brar 
111b3040e40SJassi Brar 	struct pl330_reqcfg rqcfg;
112b3040e40SJassi Brar 	struct pl330_req req;
113b3040e40SJassi Brar 
114b3040e40SJassi Brar 	enum desc_status status;
115b3040e40SJassi Brar 
116b3040e40SJassi Brar 	/* The channel which currently holds this desc */
117b3040e40SJassi Brar 	struct dma_pl330_chan *pchan;
118b3040e40SJassi Brar };
119b3040e40SJassi Brar 
1203e2ec13aSThomas Abraham /* forward declaration */
1213e2ec13aSThomas Abraham static struct amba_driver pl330_driver;
1223e2ec13aSThomas Abraham 
123b3040e40SJassi Brar static inline struct dma_pl330_chan *
124b3040e40SJassi Brar to_pchan(struct dma_chan *ch)
125b3040e40SJassi Brar {
126b3040e40SJassi Brar 	if (!ch)
127b3040e40SJassi Brar 		return NULL;
128b3040e40SJassi Brar 
129b3040e40SJassi Brar 	return container_of(ch, struct dma_pl330_chan, chan);
130b3040e40SJassi Brar }
131b3040e40SJassi Brar 
132b3040e40SJassi Brar static inline struct dma_pl330_desc *
133b3040e40SJassi Brar to_desc(struct dma_async_tx_descriptor *tx)
134b3040e40SJassi Brar {
135b3040e40SJassi Brar 	return container_of(tx, struct dma_pl330_desc, txd);
136b3040e40SJassi Brar }
137b3040e40SJassi Brar 
138b3040e40SJassi Brar static inline void free_desc_list(struct list_head *list)
139b3040e40SJassi Brar {
140b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac;
141b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
142b3040e40SJassi Brar 	struct dma_pl330_chan *pch;
143b3040e40SJassi Brar 	unsigned long flags;
144b3040e40SJassi Brar 
145b3040e40SJassi Brar 	if (list_empty(list))
146b3040e40SJassi Brar 		return;
147b3040e40SJassi Brar 
148b3040e40SJassi Brar 	/* Finish off the work list */
149b3040e40SJassi Brar 	list_for_each_entry(desc, list, node) {
150b3040e40SJassi Brar 		dma_async_tx_callback callback;
151b3040e40SJassi Brar 		void *param;
152b3040e40SJassi Brar 
153b3040e40SJassi Brar 		/* All desc in a list belong to same channel */
154b3040e40SJassi Brar 		pch = desc->pchan;
155b3040e40SJassi Brar 		callback = desc->txd.callback;
156b3040e40SJassi Brar 		param = desc->txd.callback_param;
157b3040e40SJassi Brar 
158b3040e40SJassi Brar 		if (callback)
159b3040e40SJassi Brar 			callback(param);
160b3040e40SJassi Brar 
161b3040e40SJassi Brar 		desc->pchan = NULL;
162b3040e40SJassi Brar 	}
163b3040e40SJassi Brar 
164b3040e40SJassi Brar 	pdmac = pch->dmac;
165b3040e40SJassi Brar 
166b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
167b3040e40SJassi Brar 	list_splice_tail_init(list, &pdmac->desc_pool);
168b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
169b3040e40SJassi Brar }
170b3040e40SJassi Brar 
17142bc9cf4SBoojin Kim static inline void handle_cyclic_desc_list(struct list_head *list)
17242bc9cf4SBoojin Kim {
17342bc9cf4SBoojin Kim 	struct dma_pl330_desc *desc;
17442bc9cf4SBoojin Kim 	struct dma_pl330_chan *pch;
17542bc9cf4SBoojin Kim 	unsigned long flags;
17642bc9cf4SBoojin Kim 
17742bc9cf4SBoojin Kim 	if (list_empty(list))
17842bc9cf4SBoojin Kim 		return;
17942bc9cf4SBoojin Kim 
18042bc9cf4SBoojin Kim 	list_for_each_entry(desc, list, node) {
18142bc9cf4SBoojin Kim 		dma_async_tx_callback callback;
18242bc9cf4SBoojin Kim 
18342bc9cf4SBoojin Kim 		/* Change status to reload it */
18442bc9cf4SBoojin Kim 		desc->status = PREP;
18542bc9cf4SBoojin Kim 		pch = desc->pchan;
18642bc9cf4SBoojin Kim 		callback = desc->txd.callback;
18742bc9cf4SBoojin Kim 		if (callback)
18842bc9cf4SBoojin Kim 			callback(desc->txd.callback_param);
18942bc9cf4SBoojin Kim 	}
19042bc9cf4SBoojin Kim 
19142bc9cf4SBoojin Kim 	spin_lock_irqsave(&pch->lock, flags);
19242bc9cf4SBoojin Kim 	list_splice_tail_init(list, &pch->work_list);
19342bc9cf4SBoojin Kim 	spin_unlock_irqrestore(&pch->lock, flags);
19442bc9cf4SBoojin Kim }
19542bc9cf4SBoojin Kim 
196b3040e40SJassi Brar static inline void fill_queue(struct dma_pl330_chan *pch)
197b3040e40SJassi Brar {
198b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
199b3040e40SJassi Brar 	int ret;
200b3040e40SJassi Brar 
201b3040e40SJassi Brar 	list_for_each_entry(desc, &pch->work_list, node) {
202b3040e40SJassi Brar 
203b3040e40SJassi Brar 		/* If already submitted */
204b3040e40SJassi Brar 		if (desc->status == BUSY)
205b3040e40SJassi Brar 			break;
206b3040e40SJassi Brar 
207b3040e40SJassi Brar 		ret = pl330_submit_req(pch->pl330_chid,
208b3040e40SJassi Brar 						&desc->req);
209b3040e40SJassi Brar 		if (!ret) {
210b3040e40SJassi Brar 			desc->status = BUSY;
211b3040e40SJassi Brar 			break;
212b3040e40SJassi Brar 		} else if (ret == -EAGAIN) {
213b3040e40SJassi Brar 			/* QFull or DMAC Dying */
214b3040e40SJassi Brar 			break;
215b3040e40SJassi Brar 		} else {
216b3040e40SJassi Brar 			/* Unacceptable request */
217b3040e40SJassi Brar 			desc->status = DONE;
218b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
219b3040e40SJassi Brar 					__func__, __LINE__, desc->txd.cookie);
220b3040e40SJassi Brar 			tasklet_schedule(&pch->task);
221b3040e40SJassi Brar 		}
222b3040e40SJassi Brar 	}
223b3040e40SJassi Brar }
224b3040e40SJassi Brar 
225b3040e40SJassi Brar static void pl330_tasklet(unsigned long data)
226b3040e40SJassi Brar {
227b3040e40SJassi Brar 	struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
228b3040e40SJassi Brar 	struct dma_pl330_desc *desc, *_dt;
229b3040e40SJassi Brar 	unsigned long flags;
230b3040e40SJassi Brar 	LIST_HEAD(list);
231b3040e40SJassi Brar 
232b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
233b3040e40SJassi Brar 
234b3040e40SJassi Brar 	/* Pick up ripe tomatoes */
235b3040e40SJassi Brar 	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
236b3040e40SJassi Brar 		if (desc->status == DONE) {
237b3040e40SJassi Brar 			pch->completed = desc->txd.cookie;
238b3040e40SJassi Brar 			list_move_tail(&desc->node, &list);
239b3040e40SJassi Brar 		}
240b3040e40SJassi Brar 
241b3040e40SJassi Brar 	/* Try to submit a req imm. next to the last completed cookie */
242b3040e40SJassi Brar 	fill_queue(pch);
243b3040e40SJassi Brar 
244b3040e40SJassi Brar 	/* Make sure the PL330 Channel thread is active */
245b3040e40SJassi Brar 	pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
246b3040e40SJassi Brar 
247b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
248b3040e40SJassi Brar 
24942bc9cf4SBoojin Kim 	if (pch->cyclic)
25042bc9cf4SBoojin Kim 		handle_cyclic_desc_list(&list);
25142bc9cf4SBoojin Kim 	else
252b3040e40SJassi Brar 		free_desc_list(&list);
253b3040e40SJassi Brar }
254b3040e40SJassi Brar 
255b3040e40SJassi Brar static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
256b3040e40SJassi Brar {
257b3040e40SJassi Brar 	struct dma_pl330_desc *desc = token;
258b3040e40SJassi Brar 	struct dma_pl330_chan *pch = desc->pchan;
259b3040e40SJassi Brar 	unsigned long flags;
260b3040e40SJassi Brar 
261b3040e40SJassi Brar 	/* If desc aborted */
262b3040e40SJassi Brar 	if (!pch)
263b3040e40SJassi Brar 		return;
264b3040e40SJassi Brar 
265b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
266b3040e40SJassi Brar 
267b3040e40SJassi Brar 	desc->status = DONE;
268b3040e40SJassi Brar 
269b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
270b3040e40SJassi Brar 
271b3040e40SJassi Brar 	tasklet_schedule(&pch->task);
272b3040e40SJassi Brar }
273b3040e40SJassi Brar 
2743e2ec13aSThomas Abraham bool pl330_filter(struct dma_chan *chan, void *param)
2753e2ec13aSThomas Abraham {
276cd072515SThomas Abraham 	u8 *peri_id;
2773e2ec13aSThomas Abraham 
2783e2ec13aSThomas Abraham 	if (chan->device->dev->driver != &pl330_driver.drv)
2793e2ec13aSThomas Abraham 		return false;
2803e2ec13aSThomas Abraham 
28193ed5544SThomas Abraham #ifdef CONFIG_OF
28293ed5544SThomas Abraham 	if (chan->device->dev->of_node) {
28393ed5544SThomas Abraham 		const __be32 *prop_value;
28493ed5544SThomas Abraham 		phandle phandle;
28593ed5544SThomas Abraham 		struct device_node *node;
28693ed5544SThomas Abraham 
28793ed5544SThomas Abraham 		prop_value = ((struct property *)param)->value;
28893ed5544SThomas Abraham 		phandle = be32_to_cpup(prop_value++);
28993ed5544SThomas Abraham 		node = of_find_node_by_phandle(phandle);
29093ed5544SThomas Abraham 		return ((chan->private == node) &&
29193ed5544SThomas Abraham 				(chan->chan_id == be32_to_cpup(prop_value)));
29293ed5544SThomas Abraham 	}
29393ed5544SThomas Abraham #endif
29493ed5544SThomas Abraham 
295cd072515SThomas Abraham 	peri_id = chan->private;
296cd072515SThomas Abraham 	return *peri_id == (unsigned)param;
2973e2ec13aSThomas Abraham }
2983e2ec13aSThomas Abraham EXPORT_SYMBOL(pl330_filter);
2993e2ec13aSThomas Abraham 
300b3040e40SJassi Brar static int pl330_alloc_chan_resources(struct dma_chan *chan)
301b3040e40SJassi Brar {
302b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
303b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = pch->dmac;
304b3040e40SJassi Brar 	unsigned long flags;
305b3040e40SJassi Brar 
306b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
307b3040e40SJassi Brar 
308b3040e40SJassi Brar 	pch->completed = chan->cookie = 1;
30942bc9cf4SBoojin Kim 	pch->cyclic = false;
310b3040e40SJassi Brar 
311b3040e40SJassi Brar 	pch->pl330_chid = pl330_request_channel(&pdmac->pif);
312b3040e40SJassi Brar 	if (!pch->pl330_chid) {
313b3040e40SJassi Brar 		spin_unlock_irqrestore(&pch->lock, flags);
314b3040e40SJassi Brar 		return 0;
315b3040e40SJassi Brar 	}
316b3040e40SJassi Brar 
317b3040e40SJassi Brar 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
318b3040e40SJassi Brar 
319b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
320b3040e40SJassi Brar 
321b3040e40SJassi Brar 	return 1;
322b3040e40SJassi Brar }
323b3040e40SJassi Brar 
324b3040e40SJassi Brar static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
325b3040e40SJassi Brar {
326b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
327ae43b886SBoojin Kim 	struct dma_pl330_desc *desc, *_dt;
328b3040e40SJassi Brar 	unsigned long flags;
3291d0c1d60SBoojin Kim 	struct dma_pl330_dmac *pdmac = pch->dmac;
3301d0c1d60SBoojin Kim 	struct dma_slave_config *slave_config;
331ae43b886SBoojin Kim 	LIST_HEAD(list);
332b3040e40SJassi Brar 
3331d0c1d60SBoojin Kim 	switch (cmd) {
3341d0c1d60SBoojin Kim 	case DMA_TERMINATE_ALL:
335b3040e40SJassi Brar 		spin_lock_irqsave(&pch->lock, flags);
336b3040e40SJassi Brar 
337b3040e40SJassi Brar 		/* FLUSH the PL330 Channel thread */
338b3040e40SJassi Brar 		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
339b3040e40SJassi Brar 
340b3040e40SJassi Brar 		/* Mark all desc done */
341ae43b886SBoojin Kim 		list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
342b3040e40SJassi Brar 			desc->status = DONE;
343ae43b886SBoojin Kim 			pch->completed = desc->txd.cookie;
344ae43b886SBoojin Kim 			list_move_tail(&desc->node, &list);
345ae43b886SBoojin Kim 		}
346b3040e40SJassi Brar 
347ae43b886SBoojin Kim 		list_splice_tail_init(&list, &pdmac->desc_pool);
348b3040e40SJassi Brar 		spin_unlock_irqrestore(&pch->lock, flags);
3491d0c1d60SBoojin Kim 		break;
3501d0c1d60SBoojin Kim 	case DMA_SLAVE_CONFIG:
3511d0c1d60SBoojin Kim 		slave_config = (struct dma_slave_config *)arg;
352b3040e40SJassi Brar 
353db8196dfSVinod Koul 		if (slave_config->direction == DMA_MEM_TO_DEV) {
3541d0c1d60SBoojin Kim 			if (slave_config->dst_addr)
3551d0c1d60SBoojin Kim 				pch->fifo_addr = slave_config->dst_addr;
3561d0c1d60SBoojin Kim 			if (slave_config->dst_addr_width)
3571d0c1d60SBoojin Kim 				pch->burst_sz = __ffs(slave_config->dst_addr_width);
3581d0c1d60SBoojin Kim 			if (slave_config->dst_maxburst)
3591d0c1d60SBoojin Kim 				pch->burst_len = slave_config->dst_maxburst;
360db8196dfSVinod Koul 		} else if (slave_config->direction == DMA_DEV_TO_MEM) {
3611d0c1d60SBoojin Kim 			if (slave_config->src_addr)
3621d0c1d60SBoojin Kim 				pch->fifo_addr = slave_config->src_addr;
3631d0c1d60SBoojin Kim 			if (slave_config->src_addr_width)
3641d0c1d60SBoojin Kim 				pch->burst_sz = __ffs(slave_config->src_addr_width);
3651d0c1d60SBoojin Kim 			if (slave_config->src_maxburst)
3661d0c1d60SBoojin Kim 				pch->burst_len = slave_config->src_maxburst;
3671d0c1d60SBoojin Kim 		}
3681d0c1d60SBoojin Kim 		break;
3691d0c1d60SBoojin Kim 	default:
3701d0c1d60SBoojin Kim 		dev_err(pch->dmac->pif.dev, "Not supported command.\n");
3711d0c1d60SBoojin Kim 		return -ENXIO;
3721d0c1d60SBoojin Kim 	}
373b3040e40SJassi Brar 
374b3040e40SJassi Brar 	return 0;
375b3040e40SJassi Brar }
376b3040e40SJassi Brar 
377b3040e40SJassi Brar static void pl330_free_chan_resources(struct dma_chan *chan)
378b3040e40SJassi Brar {
379b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
380b3040e40SJassi Brar 	unsigned long flags;
381b3040e40SJassi Brar 
382b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
383b3040e40SJassi Brar 
384b3040e40SJassi Brar 	tasklet_kill(&pch->task);
385b3040e40SJassi Brar 
386b3040e40SJassi Brar 	pl330_release_channel(pch->pl330_chid);
387b3040e40SJassi Brar 	pch->pl330_chid = NULL;
388b3040e40SJassi Brar 
38942bc9cf4SBoojin Kim 	if (pch->cyclic)
39042bc9cf4SBoojin Kim 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
39142bc9cf4SBoojin Kim 
392b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
393b3040e40SJassi Brar }
394b3040e40SJassi Brar 
395b3040e40SJassi Brar static enum dma_status
396b3040e40SJassi Brar pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
397b3040e40SJassi Brar 		 struct dma_tx_state *txstate)
398b3040e40SJassi Brar {
399b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
400b3040e40SJassi Brar 	dma_cookie_t last_done, last_used;
401b3040e40SJassi Brar 	int ret;
402b3040e40SJassi Brar 
403b3040e40SJassi Brar 	last_done = pch->completed;
404b3040e40SJassi Brar 	last_used = chan->cookie;
405b3040e40SJassi Brar 
406b3040e40SJassi Brar 	ret = dma_async_is_complete(cookie, last_done, last_used);
407b3040e40SJassi Brar 
408b3040e40SJassi Brar 	dma_set_tx_state(txstate, last_done, last_used, 0);
409b3040e40SJassi Brar 
410b3040e40SJassi Brar 	return ret;
411b3040e40SJassi Brar }
412b3040e40SJassi Brar 
413b3040e40SJassi Brar static void pl330_issue_pending(struct dma_chan *chan)
414b3040e40SJassi Brar {
415b3040e40SJassi Brar 	pl330_tasklet((unsigned long) to_pchan(chan));
416b3040e40SJassi Brar }
417b3040e40SJassi Brar 
418b3040e40SJassi Brar /*
419b3040e40SJassi Brar  * We returned the last one of the circular list of descriptor(s)
420b3040e40SJassi Brar  * from prep_xxx, so the argument to submit corresponds to the last
421b3040e40SJassi Brar  * descriptor of the list.
422b3040e40SJassi Brar  */
423b3040e40SJassi Brar static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
424b3040e40SJassi Brar {
425b3040e40SJassi Brar 	struct dma_pl330_desc *desc, *last = to_desc(tx);
426b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(tx->chan);
427b3040e40SJassi Brar 	dma_cookie_t cookie;
428b3040e40SJassi Brar 	unsigned long flags;
429b3040e40SJassi Brar 
430b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
431b3040e40SJassi Brar 
432b3040e40SJassi Brar 	/* Assign cookies to all nodes */
433b3040e40SJassi Brar 	cookie = tx->chan->cookie;
434b3040e40SJassi Brar 
435b3040e40SJassi Brar 	while (!list_empty(&last->node)) {
436b3040e40SJassi Brar 		desc = list_entry(last->node.next, struct dma_pl330_desc, node);
437b3040e40SJassi Brar 
438b3040e40SJassi Brar 		if (++cookie < 0)
439b3040e40SJassi Brar 			cookie = 1;
440b3040e40SJassi Brar 		desc->txd.cookie = cookie;
441b3040e40SJassi Brar 
442b3040e40SJassi Brar 		list_move_tail(&desc->node, &pch->work_list);
443b3040e40SJassi Brar 	}
444b3040e40SJassi Brar 
445b3040e40SJassi Brar 	if (++cookie < 0)
446b3040e40SJassi Brar 		cookie = 1;
447b3040e40SJassi Brar 	last->txd.cookie = cookie;
448b3040e40SJassi Brar 
449b3040e40SJassi Brar 	list_add_tail(&last->node, &pch->work_list);
450b3040e40SJassi Brar 
451b3040e40SJassi Brar 	tx->chan->cookie = cookie;
452b3040e40SJassi Brar 
453b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
454b3040e40SJassi Brar 
455b3040e40SJassi Brar 	return cookie;
456b3040e40SJassi Brar }
457b3040e40SJassi Brar 
458b3040e40SJassi Brar static inline void _init_desc(struct dma_pl330_desc *desc)
459b3040e40SJassi Brar {
460b3040e40SJassi Brar 	desc->pchan = NULL;
461b3040e40SJassi Brar 	desc->req.x = &desc->px;
462b3040e40SJassi Brar 	desc->req.token = desc;
463b3040e40SJassi Brar 	desc->rqcfg.swap = SWAP_NO;
464b3040e40SJassi Brar 	desc->rqcfg.privileged = 0;
465b3040e40SJassi Brar 	desc->rqcfg.insnaccess = 0;
466b3040e40SJassi Brar 	desc->rqcfg.scctl = SCCTRL0;
467b3040e40SJassi Brar 	desc->rqcfg.dcctl = DCCTRL0;
468b3040e40SJassi Brar 	desc->req.cfg = &desc->rqcfg;
469b3040e40SJassi Brar 	desc->req.xfer_cb = dma_pl330_rqcb;
470b3040e40SJassi Brar 	desc->txd.tx_submit = pl330_tx_submit;
471b3040e40SJassi Brar 
472b3040e40SJassi Brar 	INIT_LIST_HEAD(&desc->node);
473b3040e40SJassi Brar }
474b3040e40SJassi Brar 
475b3040e40SJassi Brar /* Returns the number of descriptors added to the DMAC pool */
476b3040e40SJassi Brar int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
477b3040e40SJassi Brar {
478b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
479b3040e40SJassi Brar 	unsigned long flags;
480b3040e40SJassi Brar 	int i;
481b3040e40SJassi Brar 
482b3040e40SJassi Brar 	if (!pdmac)
483b3040e40SJassi Brar 		return 0;
484b3040e40SJassi Brar 
485b3040e40SJassi Brar 	desc = kmalloc(count * sizeof(*desc), flg);
486b3040e40SJassi Brar 	if (!desc)
487b3040e40SJassi Brar 		return 0;
488b3040e40SJassi Brar 
489b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
490b3040e40SJassi Brar 
491b3040e40SJassi Brar 	for (i = 0; i < count; i++) {
492b3040e40SJassi Brar 		_init_desc(&desc[i]);
493b3040e40SJassi Brar 		list_add_tail(&desc[i].node, &pdmac->desc_pool);
494b3040e40SJassi Brar 	}
495b3040e40SJassi Brar 
496b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
497b3040e40SJassi Brar 
498b3040e40SJassi Brar 	return count;
499b3040e40SJassi Brar }
500b3040e40SJassi Brar 
501b3040e40SJassi Brar static struct dma_pl330_desc *
502b3040e40SJassi Brar pluck_desc(struct dma_pl330_dmac *pdmac)
503b3040e40SJassi Brar {
504b3040e40SJassi Brar 	struct dma_pl330_desc *desc = NULL;
505b3040e40SJassi Brar 	unsigned long flags;
506b3040e40SJassi Brar 
507b3040e40SJassi Brar 	if (!pdmac)
508b3040e40SJassi Brar 		return NULL;
509b3040e40SJassi Brar 
510b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
511b3040e40SJassi Brar 
512b3040e40SJassi Brar 	if (!list_empty(&pdmac->desc_pool)) {
513b3040e40SJassi Brar 		desc = list_entry(pdmac->desc_pool.next,
514b3040e40SJassi Brar 				struct dma_pl330_desc, node);
515b3040e40SJassi Brar 
516b3040e40SJassi Brar 		list_del_init(&desc->node);
517b3040e40SJassi Brar 
518b3040e40SJassi Brar 		desc->status = PREP;
519b3040e40SJassi Brar 		desc->txd.callback = NULL;
520b3040e40SJassi Brar 	}
521b3040e40SJassi Brar 
522b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
523b3040e40SJassi Brar 
524b3040e40SJassi Brar 	return desc;
525b3040e40SJassi Brar }
526b3040e40SJassi Brar 
527b3040e40SJassi Brar static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
528b3040e40SJassi Brar {
529b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = pch->dmac;
530cd072515SThomas Abraham 	u8 *peri_id = pch->chan.private;
531b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
532b3040e40SJassi Brar 
533b3040e40SJassi Brar 	/* Pluck one desc from the pool of DMAC */
534b3040e40SJassi Brar 	desc = pluck_desc(pdmac);
535b3040e40SJassi Brar 
536b3040e40SJassi Brar 	/* If the DMAC pool is empty, alloc new */
537b3040e40SJassi Brar 	if (!desc) {
538b3040e40SJassi Brar 		if (!add_desc(pdmac, GFP_ATOMIC, 1))
539b3040e40SJassi Brar 			return NULL;
540b3040e40SJassi Brar 
541b3040e40SJassi Brar 		/* Try again */
542b3040e40SJassi Brar 		desc = pluck_desc(pdmac);
543b3040e40SJassi Brar 		if (!desc) {
544b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev,
545b3040e40SJassi Brar 				"%s:%d ALERT!\n", __func__, __LINE__);
546b3040e40SJassi Brar 			return NULL;
547b3040e40SJassi Brar 		}
548b3040e40SJassi Brar 	}
549b3040e40SJassi Brar 
550b3040e40SJassi Brar 	/* Initialize the descriptor */
551b3040e40SJassi Brar 	desc->pchan = pch;
552b3040e40SJassi Brar 	desc->txd.cookie = 0;
553b3040e40SJassi Brar 	async_tx_ack(&desc->txd);
554b3040e40SJassi Brar 
555cd072515SThomas Abraham 	desc->req.peri = peri_id ? pch->chan.chan_id : 0;
556b3040e40SJassi Brar 
557b3040e40SJassi Brar 	dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
558b3040e40SJassi Brar 
559b3040e40SJassi Brar 	return desc;
560b3040e40SJassi Brar }
561b3040e40SJassi Brar 
562b3040e40SJassi Brar static inline void fill_px(struct pl330_xfer *px,
563b3040e40SJassi Brar 		dma_addr_t dst, dma_addr_t src, size_t len)
564b3040e40SJassi Brar {
565b3040e40SJassi Brar 	px->next = NULL;
566b3040e40SJassi Brar 	px->bytes = len;
567b3040e40SJassi Brar 	px->dst_addr = dst;
568b3040e40SJassi Brar 	px->src_addr = src;
569b3040e40SJassi Brar }
570b3040e40SJassi Brar 
571b3040e40SJassi Brar static struct dma_pl330_desc *
572b3040e40SJassi Brar __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
573b3040e40SJassi Brar 		dma_addr_t src, size_t len)
574b3040e40SJassi Brar {
575b3040e40SJassi Brar 	struct dma_pl330_desc *desc = pl330_get_desc(pch);
576b3040e40SJassi Brar 
577b3040e40SJassi Brar 	if (!desc) {
578b3040e40SJassi Brar 		dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
579b3040e40SJassi Brar 			__func__, __LINE__);
580b3040e40SJassi Brar 		return NULL;
581b3040e40SJassi Brar 	}
582b3040e40SJassi Brar 
583b3040e40SJassi Brar 	/*
584b3040e40SJassi Brar 	 * Ideally we should lookout for reqs bigger than
585b3040e40SJassi Brar 	 * those that can be programmed with 256 bytes of
586b3040e40SJassi Brar 	 * MC buffer, but considering a req size is seldom
587b3040e40SJassi Brar 	 * going to be word-unaligned and more than 200MB,
588b3040e40SJassi Brar 	 * we take it easy.
589b3040e40SJassi Brar 	 * Also, should the limit is reached we'd rather
590b3040e40SJassi Brar 	 * have the platform increase MC buffer size than
591b3040e40SJassi Brar 	 * complicating this API driver.
592b3040e40SJassi Brar 	 */
593b3040e40SJassi Brar 	fill_px(&desc->px, dst, src, len);
594b3040e40SJassi Brar 
595b3040e40SJassi Brar 	return desc;
596b3040e40SJassi Brar }
597b3040e40SJassi Brar 
598b3040e40SJassi Brar /* Call after fixing burst size */
599b3040e40SJassi Brar static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
600b3040e40SJassi Brar {
601b3040e40SJassi Brar 	struct dma_pl330_chan *pch = desc->pchan;
602b3040e40SJassi Brar 	struct pl330_info *pi = &pch->dmac->pif;
603b3040e40SJassi Brar 	int burst_len;
604b3040e40SJassi Brar 
605b3040e40SJassi Brar 	burst_len = pi->pcfg.data_bus_width / 8;
606b3040e40SJassi Brar 	burst_len *= pi->pcfg.data_buf_dep;
607b3040e40SJassi Brar 	burst_len >>= desc->rqcfg.brst_size;
608b3040e40SJassi Brar 
609b3040e40SJassi Brar 	/* src/dst_burst_len can't be more than 16 */
610b3040e40SJassi Brar 	if (burst_len > 16)
611b3040e40SJassi Brar 		burst_len = 16;
612b3040e40SJassi Brar 
613b3040e40SJassi Brar 	while (burst_len > 1) {
614b3040e40SJassi Brar 		if (!(len % (burst_len << desc->rqcfg.brst_size)))
615b3040e40SJassi Brar 			break;
616b3040e40SJassi Brar 		burst_len--;
617b3040e40SJassi Brar 	}
618b3040e40SJassi Brar 
619b3040e40SJassi Brar 	return burst_len;
620b3040e40SJassi Brar }
621b3040e40SJassi Brar 
62242bc9cf4SBoojin Kim static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
62342bc9cf4SBoojin Kim 		struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
624db8196dfSVinod Koul 		size_t period_len, enum dma_transfer_direction direction)
62542bc9cf4SBoojin Kim {
62642bc9cf4SBoojin Kim 	struct dma_pl330_desc *desc;
62742bc9cf4SBoojin Kim 	struct dma_pl330_chan *pch = to_pchan(chan);
62842bc9cf4SBoojin Kim 	dma_addr_t dst;
62942bc9cf4SBoojin Kim 	dma_addr_t src;
63042bc9cf4SBoojin Kim 
63142bc9cf4SBoojin Kim 	desc = pl330_get_desc(pch);
63242bc9cf4SBoojin Kim 	if (!desc) {
63342bc9cf4SBoojin Kim 		dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
63442bc9cf4SBoojin Kim 			__func__, __LINE__);
63542bc9cf4SBoojin Kim 		return NULL;
63642bc9cf4SBoojin Kim 	}
63742bc9cf4SBoojin Kim 
63842bc9cf4SBoojin Kim 	switch (direction) {
639db8196dfSVinod Koul 	case DMA_MEM_TO_DEV:
64042bc9cf4SBoojin Kim 		desc->rqcfg.src_inc = 1;
64142bc9cf4SBoojin Kim 		desc->rqcfg.dst_inc = 0;
642cd072515SThomas Abraham 		desc->req.rqtype = MEMTODEV;
64342bc9cf4SBoojin Kim 		src = dma_addr;
64442bc9cf4SBoojin Kim 		dst = pch->fifo_addr;
64542bc9cf4SBoojin Kim 		break;
646db8196dfSVinod Koul 	case DMA_DEV_TO_MEM:
64742bc9cf4SBoojin Kim 		desc->rqcfg.src_inc = 0;
64842bc9cf4SBoojin Kim 		desc->rqcfg.dst_inc = 1;
649cd072515SThomas Abraham 		desc->req.rqtype = DEVTOMEM;
65042bc9cf4SBoojin Kim 		src = pch->fifo_addr;
65142bc9cf4SBoojin Kim 		dst = dma_addr;
65242bc9cf4SBoojin Kim 		break;
65342bc9cf4SBoojin Kim 	default:
65442bc9cf4SBoojin Kim 		dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
65542bc9cf4SBoojin Kim 		__func__, __LINE__);
65642bc9cf4SBoojin Kim 		return NULL;
65742bc9cf4SBoojin Kim 	}
65842bc9cf4SBoojin Kim 
65942bc9cf4SBoojin Kim 	desc->rqcfg.brst_size = pch->burst_sz;
66042bc9cf4SBoojin Kim 	desc->rqcfg.brst_len = 1;
66142bc9cf4SBoojin Kim 
66242bc9cf4SBoojin Kim 	pch->cyclic = true;
66342bc9cf4SBoojin Kim 
66442bc9cf4SBoojin Kim 	fill_px(&desc->px, dst, src, period_len);
66542bc9cf4SBoojin Kim 
66642bc9cf4SBoojin Kim 	return &desc->txd;
66742bc9cf4SBoojin Kim }
66842bc9cf4SBoojin Kim 
669b3040e40SJassi Brar static struct dma_async_tx_descriptor *
670b3040e40SJassi Brar pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
671b3040e40SJassi Brar 		dma_addr_t src, size_t len, unsigned long flags)
672b3040e40SJassi Brar {
673b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
674b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
675b3040e40SJassi Brar 	struct pl330_info *pi;
676b3040e40SJassi Brar 	int burst;
677b3040e40SJassi Brar 
6784e0e6109SRob Herring 	if (unlikely(!pch || !len))
679b3040e40SJassi Brar 		return NULL;
680b3040e40SJassi Brar 
681b3040e40SJassi Brar 	pi = &pch->dmac->pif;
682b3040e40SJassi Brar 
683b3040e40SJassi Brar 	desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
684b3040e40SJassi Brar 	if (!desc)
685b3040e40SJassi Brar 		return NULL;
686b3040e40SJassi Brar 
687b3040e40SJassi Brar 	desc->rqcfg.src_inc = 1;
688b3040e40SJassi Brar 	desc->rqcfg.dst_inc = 1;
689cd072515SThomas Abraham 	desc->req.rqtype = MEMTOMEM;
690b3040e40SJassi Brar 
691b3040e40SJassi Brar 	/* Select max possible burst size */
692b3040e40SJassi Brar 	burst = pi->pcfg.data_bus_width / 8;
693b3040e40SJassi Brar 
694b3040e40SJassi Brar 	while (burst > 1) {
695b3040e40SJassi Brar 		if (!(len % burst))
696b3040e40SJassi Brar 			break;
697b3040e40SJassi Brar 		burst /= 2;
698b3040e40SJassi Brar 	}
699b3040e40SJassi Brar 
700b3040e40SJassi Brar 	desc->rqcfg.brst_size = 0;
701b3040e40SJassi Brar 	while (burst != (1 << desc->rqcfg.brst_size))
702b3040e40SJassi Brar 		desc->rqcfg.brst_size++;
703b3040e40SJassi Brar 
704b3040e40SJassi Brar 	desc->rqcfg.brst_len = get_burst_len(desc, len);
705b3040e40SJassi Brar 
706b3040e40SJassi Brar 	desc->txd.flags = flags;
707b3040e40SJassi Brar 
708b3040e40SJassi Brar 	return &desc->txd;
709b3040e40SJassi Brar }
710b3040e40SJassi Brar 
711b3040e40SJassi Brar static struct dma_async_tx_descriptor *
712b3040e40SJassi Brar pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
713db8196dfSVinod Koul 		unsigned int sg_len, enum dma_transfer_direction direction,
714b3040e40SJassi Brar 		unsigned long flg)
715b3040e40SJassi Brar {
716b3040e40SJassi Brar 	struct dma_pl330_desc *first, *desc = NULL;
717b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
718b3040e40SJassi Brar 	struct scatterlist *sg;
719b3040e40SJassi Brar 	unsigned long flags;
7201b9bb715SBoojin Kim 	int i;
721b3040e40SJassi Brar 	dma_addr_t addr;
722b3040e40SJassi Brar 
723cd072515SThomas Abraham 	if (unlikely(!pch || !sgl || !sg_len))
724b3040e40SJassi Brar 		return NULL;
725b3040e40SJassi Brar 
7261b9bb715SBoojin Kim 	addr = pch->fifo_addr;
727b3040e40SJassi Brar 
728b3040e40SJassi Brar 	first = NULL;
729b3040e40SJassi Brar 
730b3040e40SJassi Brar 	for_each_sg(sgl, sg, sg_len, i) {
731b3040e40SJassi Brar 
732b3040e40SJassi Brar 		desc = pl330_get_desc(pch);
733b3040e40SJassi Brar 		if (!desc) {
734b3040e40SJassi Brar 			struct dma_pl330_dmac *pdmac = pch->dmac;
735b3040e40SJassi Brar 
736b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev,
737b3040e40SJassi Brar 				"%s:%d Unable to fetch desc\n",
738b3040e40SJassi Brar 				__func__, __LINE__);
739b3040e40SJassi Brar 			if (!first)
740b3040e40SJassi Brar 				return NULL;
741b3040e40SJassi Brar 
742b3040e40SJassi Brar 			spin_lock_irqsave(&pdmac->pool_lock, flags);
743b3040e40SJassi Brar 
744b3040e40SJassi Brar 			while (!list_empty(&first->node)) {
745b3040e40SJassi Brar 				desc = list_entry(first->node.next,
746b3040e40SJassi Brar 						struct dma_pl330_desc, node);
747b3040e40SJassi Brar 				list_move_tail(&desc->node, &pdmac->desc_pool);
748b3040e40SJassi Brar 			}
749b3040e40SJassi Brar 
750b3040e40SJassi Brar 			list_move_tail(&first->node, &pdmac->desc_pool);
751b3040e40SJassi Brar 
752b3040e40SJassi Brar 			spin_unlock_irqrestore(&pdmac->pool_lock, flags);
753b3040e40SJassi Brar 
754b3040e40SJassi Brar 			return NULL;
755b3040e40SJassi Brar 		}
756b3040e40SJassi Brar 
757b3040e40SJassi Brar 		if (!first)
758b3040e40SJassi Brar 			first = desc;
759b3040e40SJassi Brar 		else
760b3040e40SJassi Brar 			list_add_tail(&desc->node, &first->node);
761b3040e40SJassi Brar 
762db8196dfSVinod Koul 		if (direction == DMA_MEM_TO_DEV) {
763b3040e40SJassi Brar 			desc->rqcfg.src_inc = 1;
764b3040e40SJassi Brar 			desc->rqcfg.dst_inc = 0;
765cd072515SThomas Abraham 			desc->req.rqtype = MEMTODEV;
766b3040e40SJassi Brar 			fill_px(&desc->px,
767b3040e40SJassi Brar 				addr, sg_dma_address(sg), sg_dma_len(sg));
768b3040e40SJassi Brar 		} else {
769b3040e40SJassi Brar 			desc->rqcfg.src_inc = 0;
770b3040e40SJassi Brar 			desc->rqcfg.dst_inc = 1;
771cd072515SThomas Abraham 			desc->req.rqtype = DEVTOMEM;
772b3040e40SJassi Brar 			fill_px(&desc->px,
773b3040e40SJassi Brar 				sg_dma_address(sg), addr, sg_dma_len(sg));
774b3040e40SJassi Brar 		}
775b3040e40SJassi Brar 
7761b9bb715SBoojin Kim 		desc->rqcfg.brst_size = pch->burst_sz;
777b3040e40SJassi Brar 		desc->rqcfg.brst_len = 1;
778b3040e40SJassi Brar 	}
779b3040e40SJassi Brar 
780b3040e40SJassi Brar 	/* Return the last desc in the chain */
781b3040e40SJassi Brar 	desc->txd.flags = flg;
782b3040e40SJassi Brar 	return &desc->txd;
783b3040e40SJassi Brar }
784b3040e40SJassi Brar 
785b3040e40SJassi Brar static irqreturn_t pl330_irq_handler(int irq, void *data)
786b3040e40SJassi Brar {
787b3040e40SJassi Brar 	if (pl330_update(data))
788b3040e40SJassi Brar 		return IRQ_HANDLED;
789b3040e40SJassi Brar 	else
790b3040e40SJassi Brar 		return IRQ_NONE;
791b3040e40SJassi Brar }
792b3040e40SJassi Brar 
793b3040e40SJassi Brar static int __devinit
794aa25afadSRussell King pl330_probe(struct amba_device *adev, const struct amba_id *id)
795b3040e40SJassi Brar {
796b3040e40SJassi Brar 	struct dma_pl330_platdata *pdat;
797b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac;
798b3040e40SJassi Brar 	struct dma_pl330_chan *pch;
799b3040e40SJassi Brar 	struct pl330_info *pi;
800b3040e40SJassi Brar 	struct dma_device *pd;
801b3040e40SJassi Brar 	struct resource *res;
802b3040e40SJassi Brar 	int i, ret, irq;
8034e0e6109SRob Herring 	int num_chan;
804b3040e40SJassi Brar 
805b3040e40SJassi Brar 	pdat = adev->dev.platform_data;
806b3040e40SJassi Brar 
807b3040e40SJassi Brar 	/* Allocate a new DMAC and its Channels */
8084e0e6109SRob Herring 	pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
809b3040e40SJassi Brar 	if (!pdmac) {
810b3040e40SJassi Brar 		dev_err(&adev->dev, "unable to allocate mem\n");
811b3040e40SJassi Brar 		return -ENOMEM;
812b3040e40SJassi Brar 	}
813b3040e40SJassi Brar 
814b3040e40SJassi Brar 	pi = &pdmac->pif;
815b3040e40SJassi Brar 	pi->dev = &adev->dev;
816b3040e40SJassi Brar 	pi->pl330_data = NULL;
8174e0e6109SRob Herring 	pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
818b3040e40SJassi Brar 
819b3040e40SJassi Brar 	res = &adev->res;
820b3040e40SJassi Brar 	request_mem_region(res->start, resource_size(res), "dma-pl330");
821b3040e40SJassi Brar 
822b3040e40SJassi Brar 	pi->base = ioremap(res->start, resource_size(res));
823b3040e40SJassi Brar 	if (!pi->base) {
824b3040e40SJassi Brar 		ret = -ENXIO;
825b3040e40SJassi Brar 		goto probe_err1;
826b3040e40SJassi Brar 	}
827b3040e40SJassi Brar 
828a2f5203fSBoojin Kim 	pdmac->clk = clk_get(&adev->dev, "dma");
829a2f5203fSBoojin Kim 	if (IS_ERR(pdmac->clk)) {
830a2f5203fSBoojin Kim 		dev_err(&adev->dev, "Cannot get operation clock.\n");
831a2f5203fSBoojin Kim 		ret = -EINVAL;
832a2f5203fSBoojin Kim 		goto probe_err1;
833a2f5203fSBoojin Kim 	}
834a2f5203fSBoojin Kim 
835a2f5203fSBoojin Kim 	amba_set_drvdata(adev, pdmac);
836a2f5203fSBoojin Kim 
8373506c0d5STushar Behera #ifndef CONFIG_PM_RUNTIME
838a2f5203fSBoojin Kim 	/* enable dma clk */
839a2f5203fSBoojin Kim 	clk_enable(pdmac->clk);
840a2f5203fSBoojin Kim #endif
841a2f5203fSBoojin Kim 
842b3040e40SJassi Brar 	irq = adev->irq[0];
843b3040e40SJassi Brar 	ret = request_irq(irq, pl330_irq_handler, 0,
844b3040e40SJassi Brar 			dev_name(&adev->dev), pi);
845b3040e40SJassi Brar 	if (ret)
846b3040e40SJassi Brar 		goto probe_err2;
847b3040e40SJassi Brar 
848b3040e40SJassi Brar 	ret = pl330_add(pi);
849b3040e40SJassi Brar 	if (ret)
850b3040e40SJassi Brar 		goto probe_err3;
851b3040e40SJassi Brar 
852b3040e40SJassi Brar 	INIT_LIST_HEAD(&pdmac->desc_pool);
853b3040e40SJassi Brar 	spin_lock_init(&pdmac->pool_lock);
854b3040e40SJassi Brar 
855b3040e40SJassi Brar 	/* Create a descriptor pool of default size */
856b3040e40SJassi Brar 	if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
857b3040e40SJassi Brar 		dev_warn(&adev->dev, "unable to allocate desc\n");
858b3040e40SJassi Brar 
859b3040e40SJassi Brar 	pd = &pdmac->ddma;
860b3040e40SJassi Brar 	INIT_LIST_HEAD(&pd->channels);
861b3040e40SJassi Brar 
862b3040e40SJassi Brar 	/* Initialize channel parameters */
86393ed5544SThomas Abraham 	num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
86493ed5544SThomas Abraham 			(u8)pi->pcfg.num_chan);
8654e0e6109SRob Herring 	pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
8664e0e6109SRob Herring 
8674e0e6109SRob Herring 	for (i = 0; i < num_chan; i++) {
868b3040e40SJassi Brar 		pch = &pdmac->peripherals[i];
86993ed5544SThomas Abraham 		if (!adev->dev.of_node)
870cd072515SThomas Abraham 			pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
87193ed5544SThomas Abraham 		else
87293ed5544SThomas Abraham 			pch->chan.private = adev->dev.of_node;
873b3040e40SJassi Brar 
874b3040e40SJassi Brar 		INIT_LIST_HEAD(&pch->work_list);
875b3040e40SJassi Brar 		spin_lock_init(&pch->lock);
876b3040e40SJassi Brar 		pch->pl330_chid = NULL;
877b3040e40SJassi Brar 		pch->chan.device = pd;
878b3040e40SJassi Brar 		pch->dmac = pdmac;
879b3040e40SJassi Brar 
880b3040e40SJassi Brar 		/* Add the channel to the DMAC list */
881b3040e40SJassi Brar 		list_add_tail(&pch->chan.device_node, &pd->channels);
882b3040e40SJassi Brar 	}
883b3040e40SJassi Brar 
884b3040e40SJassi Brar 	pd->dev = &adev->dev;
88593ed5544SThomas Abraham 	if (pdat) {
886cd072515SThomas Abraham 		pd->cap_mask = pdat->cap_mask;
88793ed5544SThomas Abraham 	} else {
888cd072515SThomas Abraham 		dma_cap_set(DMA_MEMCPY, pd->cap_mask);
88993ed5544SThomas Abraham 		if (pi->pcfg.num_peri) {
89093ed5544SThomas Abraham 			dma_cap_set(DMA_SLAVE, pd->cap_mask);
89193ed5544SThomas Abraham 			dma_cap_set(DMA_CYCLIC, pd->cap_mask);
89293ed5544SThomas Abraham 		}
89393ed5544SThomas Abraham 	}
894b3040e40SJassi Brar 
895b3040e40SJassi Brar 	pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
896b3040e40SJassi Brar 	pd->device_free_chan_resources = pl330_free_chan_resources;
897b3040e40SJassi Brar 	pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
89842bc9cf4SBoojin Kim 	pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
899b3040e40SJassi Brar 	pd->device_tx_status = pl330_tx_status;
900b3040e40SJassi Brar 	pd->device_prep_slave_sg = pl330_prep_slave_sg;
901b3040e40SJassi Brar 	pd->device_control = pl330_control;
902b3040e40SJassi Brar 	pd->device_issue_pending = pl330_issue_pending;
903b3040e40SJassi Brar 
904b3040e40SJassi Brar 	ret = dma_async_device_register(pd);
905b3040e40SJassi Brar 	if (ret) {
906b3040e40SJassi Brar 		dev_err(&adev->dev, "unable to register DMAC\n");
907b3040e40SJassi Brar 		goto probe_err4;
908b3040e40SJassi Brar 	}
909b3040e40SJassi Brar 
910b3040e40SJassi Brar 	dev_info(&adev->dev,
911b3040e40SJassi Brar 		"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
912b3040e40SJassi Brar 	dev_info(&adev->dev,
913b3040e40SJassi Brar 		"\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
914b3040e40SJassi Brar 		pi->pcfg.data_buf_dep,
915b3040e40SJassi Brar 		pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
916b3040e40SJassi Brar 		pi->pcfg.num_peri, pi->pcfg.num_events);
917b3040e40SJassi Brar 
918b3040e40SJassi Brar 	return 0;
919b3040e40SJassi Brar 
920b3040e40SJassi Brar probe_err4:
921b3040e40SJassi Brar 	pl330_del(pi);
922b3040e40SJassi Brar probe_err3:
923b3040e40SJassi Brar 	free_irq(irq, pi);
924b3040e40SJassi Brar probe_err2:
925b3040e40SJassi Brar 	iounmap(pi->base);
926b3040e40SJassi Brar probe_err1:
927b3040e40SJassi Brar 	release_mem_region(res->start, resource_size(res));
928b3040e40SJassi Brar 	kfree(pdmac);
929b3040e40SJassi Brar 
930b3040e40SJassi Brar 	return ret;
931b3040e40SJassi Brar }
932b3040e40SJassi Brar 
933b3040e40SJassi Brar static int __devexit pl330_remove(struct amba_device *adev)
934b3040e40SJassi Brar {
935b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
936b3040e40SJassi Brar 	struct dma_pl330_chan *pch, *_p;
937b3040e40SJassi Brar 	struct pl330_info *pi;
938b3040e40SJassi Brar 	struct resource *res;
939b3040e40SJassi Brar 	int irq;
940b3040e40SJassi Brar 
941b3040e40SJassi Brar 	if (!pdmac)
942b3040e40SJassi Brar 		return 0;
943b3040e40SJassi Brar 
944b3040e40SJassi Brar 	amba_set_drvdata(adev, NULL);
945b3040e40SJassi Brar 
946b3040e40SJassi Brar 	/* Idle the DMAC */
947b3040e40SJassi Brar 	list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
948b3040e40SJassi Brar 			chan.device_node) {
949b3040e40SJassi Brar 
950b3040e40SJassi Brar 		/* Remove the channel */
951b3040e40SJassi Brar 		list_del(&pch->chan.device_node);
952b3040e40SJassi Brar 
953b3040e40SJassi Brar 		/* Flush the channel */
954b3040e40SJassi Brar 		pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
955b3040e40SJassi Brar 		pl330_free_chan_resources(&pch->chan);
956b3040e40SJassi Brar 	}
957b3040e40SJassi Brar 
958b3040e40SJassi Brar 	pi = &pdmac->pif;
959b3040e40SJassi Brar 
960b3040e40SJassi Brar 	pl330_del(pi);
961b3040e40SJassi Brar 
962b3040e40SJassi Brar 	irq = adev->irq[0];
963b3040e40SJassi Brar 	free_irq(irq, pi);
964b3040e40SJassi Brar 
965b3040e40SJassi Brar 	iounmap(pi->base);
966b3040e40SJassi Brar 
967b3040e40SJassi Brar 	res = &adev->res;
968b3040e40SJassi Brar 	release_mem_region(res->start, resource_size(res));
969b3040e40SJassi Brar 
9703506c0d5STushar Behera #ifndef CONFIG_PM_RUNTIME
971a2f5203fSBoojin Kim 	clk_disable(pdmac->clk);
972a2f5203fSBoojin Kim #endif
973a2f5203fSBoojin Kim 
974b3040e40SJassi Brar 	kfree(pdmac);
975b3040e40SJassi Brar 
976b3040e40SJassi Brar 	return 0;
977b3040e40SJassi Brar }
978b3040e40SJassi Brar 
979b3040e40SJassi Brar static struct amba_id pl330_ids[] = {
980b3040e40SJassi Brar 	{
981b3040e40SJassi Brar 		.id	= 0x00041330,
982b3040e40SJassi Brar 		.mask	= 0x000fffff,
983b3040e40SJassi Brar 	},
984b3040e40SJassi Brar 	{ 0, 0 },
985b3040e40SJassi Brar };
986b3040e40SJassi Brar 
987e8fa516aSDave Martin MODULE_DEVICE_TABLE(amba, pl330_ids);
988e8fa516aSDave Martin 
989a2f5203fSBoojin Kim #ifdef CONFIG_PM_RUNTIME
990a2f5203fSBoojin Kim static int pl330_runtime_suspend(struct device *dev)
991a2f5203fSBoojin Kim {
992a2f5203fSBoojin Kim 	struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
993a2f5203fSBoojin Kim 
994a2f5203fSBoojin Kim 	if (!pdmac) {
995a2f5203fSBoojin Kim 		dev_err(dev, "failed to get dmac\n");
996a2f5203fSBoojin Kim 		return -ENODEV;
997a2f5203fSBoojin Kim 	}
998a2f5203fSBoojin Kim 
999a2f5203fSBoojin Kim 	clk_disable(pdmac->clk);
1000a2f5203fSBoojin Kim 
1001a2f5203fSBoojin Kim 	return 0;
1002a2f5203fSBoojin Kim }
1003a2f5203fSBoojin Kim 
1004a2f5203fSBoojin Kim static int pl330_runtime_resume(struct device *dev)
1005a2f5203fSBoojin Kim {
1006a2f5203fSBoojin Kim 	struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
1007a2f5203fSBoojin Kim 
1008a2f5203fSBoojin Kim 	if (!pdmac) {
1009a2f5203fSBoojin Kim 		dev_err(dev, "failed to get dmac\n");
1010a2f5203fSBoojin Kim 		return -ENODEV;
1011a2f5203fSBoojin Kim 	}
1012a2f5203fSBoojin Kim 
1013a2f5203fSBoojin Kim 	clk_enable(pdmac->clk);
1014a2f5203fSBoojin Kim 
1015a2f5203fSBoojin Kim 	return 0;
1016a2f5203fSBoojin Kim }
1017a2f5203fSBoojin Kim #else
1018a2f5203fSBoojin Kim #define pl330_runtime_suspend	NULL
1019a2f5203fSBoojin Kim #define pl330_runtime_resume	NULL
1020a2f5203fSBoojin Kim #endif /* CONFIG_PM_RUNTIME */
1021a2f5203fSBoojin Kim 
1022a2f5203fSBoojin Kim static const struct dev_pm_ops pl330_pm_ops = {
1023a2f5203fSBoojin Kim 	.runtime_suspend = pl330_runtime_suspend,
1024a2f5203fSBoojin Kim 	.runtime_resume = pl330_runtime_resume,
1025a2f5203fSBoojin Kim };
1026a2f5203fSBoojin Kim 
1027b3040e40SJassi Brar static struct amba_driver pl330_driver = {
1028b3040e40SJassi Brar 	.drv = {
1029b3040e40SJassi Brar 		.owner = THIS_MODULE,
1030b3040e40SJassi Brar 		.name = "dma-pl330",
1031a2f5203fSBoojin Kim 		.pm = &pl330_pm_ops,
1032b3040e40SJassi Brar 	},
1033b3040e40SJassi Brar 	.id_table = pl330_ids,
1034b3040e40SJassi Brar 	.probe = pl330_probe,
1035b3040e40SJassi Brar 	.remove = pl330_remove,
1036b3040e40SJassi Brar };
1037b3040e40SJassi Brar 
10389e5ed094Sviresh kumar module_amba_driver(pl330_driver);
1039b3040e40SJassi Brar 
1040b3040e40SJassi Brar MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1041b3040e40SJassi Brar MODULE_DESCRIPTION("API Driver for PL330 DMAC");
1042b3040e40SJassi Brar MODULE_LICENSE("GPL");
1043