xref: /openbmc/linux/drivers/dma/pl330.c (revision 96a2af41)
1b3040e40SJassi Brar /* linux/drivers/dma/pl330.c
2b3040e40SJassi Brar  *
3b3040e40SJassi Brar  * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4b3040e40SJassi Brar  *	Jaswinder Singh <jassi.brar@samsung.com>
5b3040e40SJassi Brar  *
6b3040e40SJassi Brar  * This program is free software; you can redistribute it and/or modify
7b3040e40SJassi Brar  * it under the terms of the GNU General Public License as published by
8b3040e40SJassi Brar  * the Free Software Foundation; either version 2 of the License, or
9b3040e40SJassi Brar  * (at your option) any later version.
10b3040e40SJassi Brar  */
11b3040e40SJassi Brar 
12b3040e40SJassi Brar #include <linux/io.h>
13b3040e40SJassi Brar #include <linux/init.h>
14b3040e40SJassi Brar #include <linux/slab.h>
15b3040e40SJassi Brar #include <linux/module.h>
16b3040e40SJassi Brar #include <linux/dmaengine.h>
17b3040e40SJassi Brar #include <linux/interrupt.h>
18b3040e40SJassi Brar #include <linux/amba/bus.h>
19b3040e40SJassi Brar #include <linux/amba/pl330.h>
20a2f5203fSBoojin Kim #include <linux/pm_runtime.h>
211b9bb715SBoojin Kim #include <linux/scatterlist.h>
2293ed5544SThomas Abraham #include <linux/of.h>
23b3040e40SJassi Brar 
24d2ebfb33SRussell King - ARM Linux #include "dmaengine.h"
25d2ebfb33SRussell King - ARM Linux 
26b3040e40SJassi Brar #define NR_DEFAULT_DESC	16
27b3040e40SJassi Brar 
28b3040e40SJassi Brar enum desc_status {
29b3040e40SJassi Brar 	/* In the DMAC pool */
30b3040e40SJassi Brar 	FREE,
31b3040e40SJassi Brar 	/*
32b3040e40SJassi Brar 	 * Allocted to some channel during prep_xxx
33b3040e40SJassi Brar 	 * Also may be sitting on the work_list.
34b3040e40SJassi Brar 	 */
35b3040e40SJassi Brar 	PREP,
36b3040e40SJassi Brar 	/*
37b3040e40SJassi Brar 	 * Sitting on the work_list and already submitted
38b3040e40SJassi Brar 	 * to the PL330 core. Not more than two descriptors
39b3040e40SJassi Brar 	 * of a channel can be BUSY at any time.
40b3040e40SJassi Brar 	 */
41b3040e40SJassi Brar 	BUSY,
42b3040e40SJassi Brar 	/*
43b3040e40SJassi Brar 	 * Sitting on the channel work_list but xfer done
44b3040e40SJassi Brar 	 * by PL330 core
45b3040e40SJassi Brar 	 */
46b3040e40SJassi Brar 	DONE,
47b3040e40SJassi Brar };
48b3040e40SJassi Brar 
49b3040e40SJassi Brar struct dma_pl330_chan {
50b3040e40SJassi Brar 	/* Schedule desc completion */
51b3040e40SJassi Brar 	struct tasklet_struct task;
52b3040e40SJassi Brar 
53b3040e40SJassi Brar 	/* DMA-Engine Channel */
54b3040e40SJassi Brar 	struct dma_chan chan;
55b3040e40SJassi Brar 
56b3040e40SJassi Brar 	/* List of to be xfered descriptors */
57b3040e40SJassi Brar 	struct list_head work_list;
58b3040e40SJassi Brar 
59b3040e40SJassi Brar 	/* Pointer to the DMAC that manages this channel,
60b3040e40SJassi Brar 	 * NULL if the channel is available to be acquired.
61b3040e40SJassi Brar 	 * As the parent, this DMAC also provides descriptors
62b3040e40SJassi Brar 	 * to the channel.
63b3040e40SJassi Brar 	 */
64b3040e40SJassi Brar 	struct dma_pl330_dmac *dmac;
65b3040e40SJassi Brar 
66b3040e40SJassi Brar 	/* To protect channel manipulation */
67b3040e40SJassi Brar 	spinlock_t lock;
68b3040e40SJassi Brar 
69b3040e40SJassi Brar 	/* Token of a hardware channel thread of PL330 DMAC
70b3040e40SJassi Brar 	 * NULL if the channel is available to be acquired.
71b3040e40SJassi Brar 	 */
72b3040e40SJassi Brar 	void *pl330_chid;
731b9bb715SBoojin Kim 
741b9bb715SBoojin Kim 	/* For D-to-M and M-to-D channels */
751b9bb715SBoojin Kim 	int burst_sz; /* the peripheral fifo width */
761d0c1d60SBoojin Kim 	int burst_len; /* the number of burst */
771b9bb715SBoojin Kim 	dma_addr_t fifo_addr;
7842bc9cf4SBoojin Kim 
7942bc9cf4SBoojin Kim 	/* for cyclic capability */
8042bc9cf4SBoojin Kim 	bool cyclic;
81b3040e40SJassi Brar };
82b3040e40SJassi Brar 
83b3040e40SJassi Brar struct dma_pl330_dmac {
84b3040e40SJassi Brar 	struct pl330_info pif;
85b3040e40SJassi Brar 
86b3040e40SJassi Brar 	/* DMA-Engine Device */
87b3040e40SJassi Brar 	struct dma_device ddma;
88b3040e40SJassi Brar 
89b3040e40SJassi Brar 	/* Pool of descriptors available for the DMAC's channels */
90b3040e40SJassi Brar 	struct list_head desc_pool;
91b3040e40SJassi Brar 	/* To protect desc_pool manipulation */
92b3040e40SJassi Brar 	spinlock_t pool_lock;
93b3040e40SJassi Brar 
94b3040e40SJassi Brar 	/* Peripheral channels connected to this DMAC */
954e0e6109SRob Herring 	struct dma_pl330_chan *peripherals; /* keep at end */
96a2f5203fSBoojin Kim 
97a2f5203fSBoojin Kim 	struct clk *clk;
98b3040e40SJassi Brar };
99b3040e40SJassi Brar 
100b3040e40SJassi Brar struct dma_pl330_desc {
101b3040e40SJassi Brar 	/* To attach to a queue as child */
102b3040e40SJassi Brar 	struct list_head node;
103b3040e40SJassi Brar 
104b3040e40SJassi Brar 	/* Descriptor for the DMA Engine API */
105b3040e40SJassi Brar 	struct dma_async_tx_descriptor txd;
106b3040e40SJassi Brar 
107b3040e40SJassi Brar 	/* Xfer for PL330 core */
108b3040e40SJassi Brar 	struct pl330_xfer px;
109b3040e40SJassi Brar 
110b3040e40SJassi Brar 	struct pl330_reqcfg rqcfg;
111b3040e40SJassi Brar 	struct pl330_req req;
112b3040e40SJassi Brar 
113b3040e40SJassi Brar 	enum desc_status status;
114b3040e40SJassi Brar 
115b3040e40SJassi Brar 	/* The channel which currently holds this desc */
116b3040e40SJassi Brar 	struct dma_pl330_chan *pchan;
117b3040e40SJassi Brar };
118b3040e40SJassi Brar 
1193e2ec13aSThomas Abraham /* forward declaration */
1203e2ec13aSThomas Abraham static struct amba_driver pl330_driver;
1213e2ec13aSThomas Abraham 
122b3040e40SJassi Brar static inline struct dma_pl330_chan *
123b3040e40SJassi Brar to_pchan(struct dma_chan *ch)
124b3040e40SJassi Brar {
125b3040e40SJassi Brar 	if (!ch)
126b3040e40SJassi Brar 		return NULL;
127b3040e40SJassi Brar 
128b3040e40SJassi Brar 	return container_of(ch, struct dma_pl330_chan, chan);
129b3040e40SJassi Brar }
130b3040e40SJassi Brar 
131b3040e40SJassi Brar static inline struct dma_pl330_desc *
132b3040e40SJassi Brar to_desc(struct dma_async_tx_descriptor *tx)
133b3040e40SJassi Brar {
134b3040e40SJassi Brar 	return container_of(tx, struct dma_pl330_desc, txd);
135b3040e40SJassi Brar }
136b3040e40SJassi Brar 
137b3040e40SJassi Brar static inline void free_desc_list(struct list_head *list)
138b3040e40SJassi Brar {
139b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac;
140b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
141b3040e40SJassi Brar 	struct dma_pl330_chan *pch;
142b3040e40SJassi Brar 	unsigned long flags;
143b3040e40SJassi Brar 
144b3040e40SJassi Brar 	if (list_empty(list))
145b3040e40SJassi Brar 		return;
146b3040e40SJassi Brar 
147b3040e40SJassi Brar 	/* Finish off the work list */
148b3040e40SJassi Brar 	list_for_each_entry(desc, list, node) {
149b3040e40SJassi Brar 		dma_async_tx_callback callback;
150b3040e40SJassi Brar 		void *param;
151b3040e40SJassi Brar 
152b3040e40SJassi Brar 		/* All desc in a list belong to same channel */
153b3040e40SJassi Brar 		pch = desc->pchan;
154b3040e40SJassi Brar 		callback = desc->txd.callback;
155b3040e40SJassi Brar 		param = desc->txd.callback_param;
156b3040e40SJassi Brar 
157b3040e40SJassi Brar 		if (callback)
158b3040e40SJassi Brar 			callback(param);
159b3040e40SJassi Brar 
160b3040e40SJassi Brar 		desc->pchan = NULL;
161b3040e40SJassi Brar 	}
162b3040e40SJassi Brar 
163b3040e40SJassi Brar 	pdmac = pch->dmac;
164b3040e40SJassi Brar 
165b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
166b3040e40SJassi Brar 	list_splice_tail_init(list, &pdmac->desc_pool);
167b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
168b3040e40SJassi Brar }
169b3040e40SJassi Brar 
17042bc9cf4SBoojin Kim static inline void handle_cyclic_desc_list(struct list_head *list)
17142bc9cf4SBoojin Kim {
17242bc9cf4SBoojin Kim 	struct dma_pl330_desc *desc;
17342bc9cf4SBoojin Kim 	struct dma_pl330_chan *pch;
17442bc9cf4SBoojin Kim 	unsigned long flags;
17542bc9cf4SBoojin Kim 
17642bc9cf4SBoojin Kim 	if (list_empty(list))
17742bc9cf4SBoojin Kim 		return;
17842bc9cf4SBoojin Kim 
17942bc9cf4SBoojin Kim 	list_for_each_entry(desc, list, node) {
18042bc9cf4SBoojin Kim 		dma_async_tx_callback callback;
18142bc9cf4SBoojin Kim 
18242bc9cf4SBoojin Kim 		/* Change status to reload it */
18342bc9cf4SBoojin Kim 		desc->status = PREP;
18442bc9cf4SBoojin Kim 		pch = desc->pchan;
18542bc9cf4SBoojin Kim 		callback = desc->txd.callback;
18642bc9cf4SBoojin Kim 		if (callback)
18742bc9cf4SBoojin Kim 			callback(desc->txd.callback_param);
18842bc9cf4SBoojin Kim 	}
18942bc9cf4SBoojin Kim 
19042bc9cf4SBoojin Kim 	spin_lock_irqsave(&pch->lock, flags);
19142bc9cf4SBoojin Kim 	list_splice_tail_init(list, &pch->work_list);
19242bc9cf4SBoojin Kim 	spin_unlock_irqrestore(&pch->lock, flags);
19342bc9cf4SBoojin Kim }
19442bc9cf4SBoojin Kim 
195b3040e40SJassi Brar static inline void fill_queue(struct dma_pl330_chan *pch)
196b3040e40SJassi Brar {
197b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
198b3040e40SJassi Brar 	int ret;
199b3040e40SJassi Brar 
200b3040e40SJassi Brar 	list_for_each_entry(desc, &pch->work_list, node) {
201b3040e40SJassi Brar 
202b3040e40SJassi Brar 		/* If already submitted */
203b3040e40SJassi Brar 		if (desc->status == BUSY)
204b3040e40SJassi Brar 			break;
205b3040e40SJassi Brar 
206b3040e40SJassi Brar 		ret = pl330_submit_req(pch->pl330_chid,
207b3040e40SJassi Brar 						&desc->req);
208b3040e40SJassi Brar 		if (!ret) {
209b3040e40SJassi Brar 			desc->status = BUSY;
210b3040e40SJassi Brar 			break;
211b3040e40SJassi Brar 		} else if (ret == -EAGAIN) {
212b3040e40SJassi Brar 			/* QFull or DMAC Dying */
213b3040e40SJassi Brar 			break;
214b3040e40SJassi Brar 		} else {
215b3040e40SJassi Brar 			/* Unacceptable request */
216b3040e40SJassi Brar 			desc->status = DONE;
217b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
218b3040e40SJassi Brar 					__func__, __LINE__, desc->txd.cookie);
219b3040e40SJassi Brar 			tasklet_schedule(&pch->task);
220b3040e40SJassi Brar 		}
221b3040e40SJassi Brar 	}
222b3040e40SJassi Brar }
223b3040e40SJassi Brar 
224b3040e40SJassi Brar static void pl330_tasklet(unsigned long data)
225b3040e40SJassi Brar {
226b3040e40SJassi Brar 	struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
227b3040e40SJassi Brar 	struct dma_pl330_desc *desc, *_dt;
228b3040e40SJassi Brar 	unsigned long flags;
229b3040e40SJassi Brar 	LIST_HEAD(list);
230b3040e40SJassi Brar 
231b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
232b3040e40SJassi Brar 
233b3040e40SJassi Brar 	/* Pick up ripe tomatoes */
234b3040e40SJassi Brar 	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
235b3040e40SJassi Brar 		if (desc->status == DONE) {
236f7fbce07SRussell King - ARM Linux 			dma_cookie_complete(&desc->txd);
237b3040e40SJassi Brar 			list_move_tail(&desc->node, &list);
238b3040e40SJassi Brar 		}
239b3040e40SJassi Brar 
240b3040e40SJassi Brar 	/* Try to submit a req imm. next to the last completed cookie */
241b3040e40SJassi Brar 	fill_queue(pch);
242b3040e40SJassi Brar 
243b3040e40SJassi Brar 	/* Make sure the PL330 Channel thread is active */
244b3040e40SJassi Brar 	pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
245b3040e40SJassi Brar 
246b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
247b3040e40SJassi Brar 
24842bc9cf4SBoojin Kim 	if (pch->cyclic)
24942bc9cf4SBoojin Kim 		handle_cyclic_desc_list(&list);
25042bc9cf4SBoojin Kim 	else
251b3040e40SJassi Brar 		free_desc_list(&list);
252b3040e40SJassi Brar }
253b3040e40SJassi Brar 
254b3040e40SJassi Brar static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
255b3040e40SJassi Brar {
256b3040e40SJassi Brar 	struct dma_pl330_desc *desc = token;
257b3040e40SJassi Brar 	struct dma_pl330_chan *pch = desc->pchan;
258b3040e40SJassi Brar 	unsigned long flags;
259b3040e40SJassi Brar 
260b3040e40SJassi Brar 	/* If desc aborted */
261b3040e40SJassi Brar 	if (!pch)
262b3040e40SJassi Brar 		return;
263b3040e40SJassi Brar 
264b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
265b3040e40SJassi Brar 
266b3040e40SJassi Brar 	desc->status = DONE;
267b3040e40SJassi Brar 
268b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
269b3040e40SJassi Brar 
270b3040e40SJassi Brar 	tasklet_schedule(&pch->task);
271b3040e40SJassi Brar }
272b3040e40SJassi Brar 
2733e2ec13aSThomas Abraham bool pl330_filter(struct dma_chan *chan, void *param)
2743e2ec13aSThomas Abraham {
275cd072515SThomas Abraham 	u8 *peri_id;
2763e2ec13aSThomas Abraham 
2773e2ec13aSThomas Abraham 	if (chan->device->dev->driver != &pl330_driver.drv)
2783e2ec13aSThomas Abraham 		return false;
2793e2ec13aSThomas Abraham 
28093ed5544SThomas Abraham #ifdef CONFIG_OF
28193ed5544SThomas Abraham 	if (chan->device->dev->of_node) {
28293ed5544SThomas Abraham 		const __be32 *prop_value;
28393ed5544SThomas Abraham 		phandle phandle;
28493ed5544SThomas Abraham 		struct device_node *node;
28593ed5544SThomas Abraham 
28693ed5544SThomas Abraham 		prop_value = ((struct property *)param)->value;
28793ed5544SThomas Abraham 		phandle = be32_to_cpup(prop_value++);
28893ed5544SThomas Abraham 		node = of_find_node_by_phandle(phandle);
28993ed5544SThomas Abraham 		return ((chan->private == node) &&
29093ed5544SThomas Abraham 				(chan->chan_id == be32_to_cpup(prop_value)));
29193ed5544SThomas Abraham 	}
29293ed5544SThomas Abraham #endif
29393ed5544SThomas Abraham 
294cd072515SThomas Abraham 	peri_id = chan->private;
295cd072515SThomas Abraham 	return *peri_id == (unsigned)param;
2963e2ec13aSThomas Abraham }
2973e2ec13aSThomas Abraham EXPORT_SYMBOL(pl330_filter);
2983e2ec13aSThomas Abraham 
299b3040e40SJassi Brar static int pl330_alloc_chan_resources(struct dma_chan *chan)
300b3040e40SJassi Brar {
301b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
302b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = pch->dmac;
303b3040e40SJassi Brar 	unsigned long flags;
304b3040e40SJassi Brar 
305b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
306b3040e40SJassi Brar 
3074d4e58deSRussell King - ARM Linux 	chan->completed_cookie = chan->cookie = 1;
30842bc9cf4SBoojin Kim 	pch->cyclic = false;
309b3040e40SJassi Brar 
310b3040e40SJassi Brar 	pch->pl330_chid = pl330_request_channel(&pdmac->pif);
311b3040e40SJassi Brar 	if (!pch->pl330_chid) {
312b3040e40SJassi Brar 		spin_unlock_irqrestore(&pch->lock, flags);
313b3040e40SJassi Brar 		return 0;
314b3040e40SJassi Brar 	}
315b3040e40SJassi Brar 
316b3040e40SJassi Brar 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
317b3040e40SJassi Brar 
318b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
319b3040e40SJassi Brar 
320b3040e40SJassi Brar 	return 1;
321b3040e40SJassi Brar }
322b3040e40SJassi Brar 
323b3040e40SJassi Brar static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
324b3040e40SJassi Brar {
325b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
326ae43b886SBoojin Kim 	struct dma_pl330_desc *desc, *_dt;
327b3040e40SJassi Brar 	unsigned long flags;
3281d0c1d60SBoojin Kim 	struct dma_pl330_dmac *pdmac = pch->dmac;
3291d0c1d60SBoojin Kim 	struct dma_slave_config *slave_config;
330ae43b886SBoojin Kim 	LIST_HEAD(list);
331b3040e40SJassi Brar 
3321d0c1d60SBoojin Kim 	switch (cmd) {
3331d0c1d60SBoojin Kim 	case DMA_TERMINATE_ALL:
334b3040e40SJassi Brar 		spin_lock_irqsave(&pch->lock, flags);
335b3040e40SJassi Brar 
336b3040e40SJassi Brar 		/* FLUSH the PL330 Channel thread */
337b3040e40SJassi Brar 		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
338b3040e40SJassi Brar 
339b3040e40SJassi Brar 		/* Mark all desc done */
340ae43b886SBoojin Kim 		list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
341b3040e40SJassi Brar 			desc->status = DONE;
342ae43b886SBoojin Kim 			pch->completed = desc->txd.cookie;
343ae43b886SBoojin Kim 			list_move_tail(&desc->node, &list);
344ae43b886SBoojin Kim 		}
345b3040e40SJassi Brar 
346ae43b886SBoojin Kim 		list_splice_tail_init(&list, &pdmac->desc_pool);
347b3040e40SJassi Brar 		spin_unlock_irqrestore(&pch->lock, flags);
3481d0c1d60SBoojin Kim 		break;
3491d0c1d60SBoojin Kim 	case DMA_SLAVE_CONFIG:
3501d0c1d60SBoojin Kim 		slave_config = (struct dma_slave_config *)arg;
351b3040e40SJassi Brar 
352db8196dfSVinod Koul 		if (slave_config->direction == DMA_MEM_TO_DEV) {
3531d0c1d60SBoojin Kim 			if (slave_config->dst_addr)
3541d0c1d60SBoojin Kim 				pch->fifo_addr = slave_config->dst_addr;
3551d0c1d60SBoojin Kim 			if (slave_config->dst_addr_width)
3561d0c1d60SBoojin Kim 				pch->burst_sz = __ffs(slave_config->dst_addr_width);
3571d0c1d60SBoojin Kim 			if (slave_config->dst_maxburst)
3581d0c1d60SBoojin Kim 				pch->burst_len = slave_config->dst_maxburst;
359db8196dfSVinod Koul 		} else if (slave_config->direction == DMA_DEV_TO_MEM) {
3601d0c1d60SBoojin Kim 			if (slave_config->src_addr)
3611d0c1d60SBoojin Kim 				pch->fifo_addr = slave_config->src_addr;
3621d0c1d60SBoojin Kim 			if (slave_config->src_addr_width)
3631d0c1d60SBoojin Kim 				pch->burst_sz = __ffs(slave_config->src_addr_width);
3641d0c1d60SBoojin Kim 			if (slave_config->src_maxburst)
3651d0c1d60SBoojin Kim 				pch->burst_len = slave_config->src_maxburst;
3661d0c1d60SBoojin Kim 		}
3671d0c1d60SBoojin Kim 		break;
3681d0c1d60SBoojin Kim 	default:
3691d0c1d60SBoojin Kim 		dev_err(pch->dmac->pif.dev, "Not supported command.\n");
3701d0c1d60SBoojin Kim 		return -ENXIO;
3711d0c1d60SBoojin Kim 	}
372b3040e40SJassi Brar 
373b3040e40SJassi Brar 	return 0;
374b3040e40SJassi Brar }
375b3040e40SJassi Brar 
376b3040e40SJassi Brar static void pl330_free_chan_resources(struct dma_chan *chan)
377b3040e40SJassi Brar {
378b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
379b3040e40SJassi Brar 	unsigned long flags;
380b3040e40SJassi Brar 
381b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
382b3040e40SJassi Brar 
383b3040e40SJassi Brar 	tasklet_kill(&pch->task);
384b3040e40SJassi Brar 
385b3040e40SJassi Brar 	pl330_release_channel(pch->pl330_chid);
386b3040e40SJassi Brar 	pch->pl330_chid = NULL;
387b3040e40SJassi Brar 
38842bc9cf4SBoojin Kim 	if (pch->cyclic)
38942bc9cf4SBoojin Kim 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
39042bc9cf4SBoojin Kim 
391b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
392b3040e40SJassi Brar }
393b3040e40SJassi Brar 
394b3040e40SJassi Brar static enum dma_status
395b3040e40SJassi Brar pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
396b3040e40SJassi Brar 		 struct dma_tx_state *txstate)
397b3040e40SJassi Brar {
39896a2af41SRussell King - ARM Linux 	return dma_cookie_status(chan, cookie, txstate);
399b3040e40SJassi Brar }
400b3040e40SJassi Brar 
401b3040e40SJassi Brar static void pl330_issue_pending(struct dma_chan *chan)
402b3040e40SJassi Brar {
403b3040e40SJassi Brar 	pl330_tasklet((unsigned long) to_pchan(chan));
404b3040e40SJassi Brar }
405b3040e40SJassi Brar 
406b3040e40SJassi Brar /*
407b3040e40SJassi Brar  * We returned the last one of the circular list of descriptor(s)
408b3040e40SJassi Brar  * from prep_xxx, so the argument to submit corresponds to the last
409b3040e40SJassi Brar  * descriptor of the list.
410b3040e40SJassi Brar  */
411b3040e40SJassi Brar static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
412b3040e40SJassi Brar {
413b3040e40SJassi Brar 	struct dma_pl330_desc *desc, *last = to_desc(tx);
414b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(tx->chan);
415b3040e40SJassi Brar 	dma_cookie_t cookie;
416b3040e40SJassi Brar 	unsigned long flags;
417b3040e40SJassi Brar 
418b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
419b3040e40SJassi Brar 
420b3040e40SJassi Brar 	/* Assign cookies to all nodes */
421b3040e40SJassi Brar 	while (!list_empty(&last->node)) {
422b3040e40SJassi Brar 		desc = list_entry(last->node.next, struct dma_pl330_desc, node);
423b3040e40SJassi Brar 
424884485e1SRussell King - ARM Linux 		dma_cookie_assign(&desc->txd);
425b3040e40SJassi Brar 
426b3040e40SJassi Brar 		list_move_tail(&desc->node, &pch->work_list);
427b3040e40SJassi Brar 	}
428b3040e40SJassi Brar 
429884485e1SRussell King - ARM Linux 	cookie = dma_cookie_assign(&last->txd);
430b3040e40SJassi Brar 	list_add_tail(&last->node, &pch->work_list);
431b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
432b3040e40SJassi Brar 
433b3040e40SJassi Brar 	return cookie;
434b3040e40SJassi Brar }
435b3040e40SJassi Brar 
436b3040e40SJassi Brar static inline void _init_desc(struct dma_pl330_desc *desc)
437b3040e40SJassi Brar {
438b3040e40SJassi Brar 	desc->pchan = NULL;
439b3040e40SJassi Brar 	desc->req.x = &desc->px;
440b3040e40SJassi Brar 	desc->req.token = desc;
441b3040e40SJassi Brar 	desc->rqcfg.swap = SWAP_NO;
442b3040e40SJassi Brar 	desc->rqcfg.privileged = 0;
443b3040e40SJassi Brar 	desc->rqcfg.insnaccess = 0;
444b3040e40SJassi Brar 	desc->rqcfg.scctl = SCCTRL0;
445b3040e40SJassi Brar 	desc->rqcfg.dcctl = DCCTRL0;
446b3040e40SJassi Brar 	desc->req.cfg = &desc->rqcfg;
447b3040e40SJassi Brar 	desc->req.xfer_cb = dma_pl330_rqcb;
448b3040e40SJassi Brar 	desc->txd.tx_submit = pl330_tx_submit;
449b3040e40SJassi Brar 
450b3040e40SJassi Brar 	INIT_LIST_HEAD(&desc->node);
451b3040e40SJassi Brar }
452b3040e40SJassi Brar 
453b3040e40SJassi Brar /* Returns the number of descriptors added to the DMAC pool */
454b3040e40SJassi Brar int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
455b3040e40SJassi Brar {
456b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
457b3040e40SJassi Brar 	unsigned long flags;
458b3040e40SJassi Brar 	int i;
459b3040e40SJassi Brar 
460b3040e40SJassi Brar 	if (!pdmac)
461b3040e40SJassi Brar 		return 0;
462b3040e40SJassi Brar 
463b3040e40SJassi Brar 	desc = kmalloc(count * sizeof(*desc), flg);
464b3040e40SJassi Brar 	if (!desc)
465b3040e40SJassi Brar 		return 0;
466b3040e40SJassi Brar 
467b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
468b3040e40SJassi Brar 
469b3040e40SJassi Brar 	for (i = 0; i < count; i++) {
470b3040e40SJassi Brar 		_init_desc(&desc[i]);
471b3040e40SJassi Brar 		list_add_tail(&desc[i].node, &pdmac->desc_pool);
472b3040e40SJassi Brar 	}
473b3040e40SJassi Brar 
474b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
475b3040e40SJassi Brar 
476b3040e40SJassi Brar 	return count;
477b3040e40SJassi Brar }
478b3040e40SJassi Brar 
479b3040e40SJassi Brar static struct dma_pl330_desc *
480b3040e40SJassi Brar pluck_desc(struct dma_pl330_dmac *pdmac)
481b3040e40SJassi Brar {
482b3040e40SJassi Brar 	struct dma_pl330_desc *desc = NULL;
483b3040e40SJassi Brar 	unsigned long flags;
484b3040e40SJassi Brar 
485b3040e40SJassi Brar 	if (!pdmac)
486b3040e40SJassi Brar 		return NULL;
487b3040e40SJassi Brar 
488b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
489b3040e40SJassi Brar 
490b3040e40SJassi Brar 	if (!list_empty(&pdmac->desc_pool)) {
491b3040e40SJassi Brar 		desc = list_entry(pdmac->desc_pool.next,
492b3040e40SJassi Brar 				struct dma_pl330_desc, node);
493b3040e40SJassi Brar 
494b3040e40SJassi Brar 		list_del_init(&desc->node);
495b3040e40SJassi Brar 
496b3040e40SJassi Brar 		desc->status = PREP;
497b3040e40SJassi Brar 		desc->txd.callback = NULL;
498b3040e40SJassi Brar 	}
499b3040e40SJassi Brar 
500b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
501b3040e40SJassi Brar 
502b3040e40SJassi Brar 	return desc;
503b3040e40SJassi Brar }
504b3040e40SJassi Brar 
505b3040e40SJassi Brar static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
506b3040e40SJassi Brar {
507b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = pch->dmac;
508cd072515SThomas Abraham 	u8 *peri_id = pch->chan.private;
509b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
510b3040e40SJassi Brar 
511b3040e40SJassi Brar 	/* Pluck one desc from the pool of DMAC */
512b3040e40SJassi Brar 	desc = pluck_desc(pdmac);
513b3040e40SJassi Brar 
514b3040e40SJassi Brar 	/* If the DMAC pool is empty, alloc new */
515b3040e40SJassi Brar 	if (!desc) {
516b3040e40SJassi Brar 		if (!add_desc(pdmac, GFP_ATOMIC, 1))
517b3040e40SJassi Brar 			return NULL;
518b3040e40SJassi Brar 
519b3040e40SJassi Brar 		/* Try again */
520b3040e40SJassi Brar 		desc = pluck_desc(pdmac);
521b3040e40SJassi Brar 		if (!desc) {
522b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev,
523b3040e40SJassi Brar 				"%s:%d ALERT!\n", __func__, __LINE__);
524b3040e40SJassi Brar 			return NULL;
525b3040e40SJassi Brar 		}
526b3040e40SJassi Brar 	}
527b3040e40SJassi Brar 
528b3040e40SJassi Brar 	/* Initialize the descriptor */
529b3040e40SJassi Brar 	desc->pchan = pch;
530b3040e40SJassi Brar 	desc->txd.cookie = 0;
531b3040e40SJassi Brar 	async_tx_ack(&desc->txd);
532b3040e40SJassi Brar 
533cd072515SThomas Abraham 	desc->req.peri = peri_id ? pch->chan.chan_id : 0;
534b3040e40SJassi Brar 
535b3040e40SJassi Brar 	dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
536b3040e40SJassi Brar 
537b3040e40SJassi Brar 	return desc;
538b3040e40SJassi Brar }
539b3040e40SJassi Brar 
540b3040e40SJassi Brar static inline void fill_px(struct pl330_xfer *px,
541b3040e40SJassi Brar 		dma_addr_t dst, dma_addr_t src, size_t len)
542b3040e40SJassi Brar {
543b3040e40SJassi Brar 	px->next = NULL;
544b3040e40SJassi Brar 	px->bytes = len;
545b3040e40SJassi Brar 	px->dst_addr = dst;
546b3040e40SJassi Brar 	px->src_addr = src;
547b3040e40SJassi Brar }
548b3040e40SJassi Brar 
549b3040e40SJassi Brar static struct dma_pl330_desc *
550b3040e40SJassi Brar __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
551b3040e40SJassi Brar 		dma_addr_t src, size_t len)
552b3040e40SJassi Brar {
553b3040e40SJassi Brar 	struct dma_pl330_desc *desc = pl330_get_desc(pch);
554b3040e40SJassi Brar 
555b3040e40SJassi Brar 	if (!desc) {
556b3040e40SJassi Brar 		dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
557b3040e40SJassi Brar 			__func__, __LINE__);
558b3040e40SJassi Brar 		return NULL;
559b3040e40SJassi Brar 	}
560b3040e40SJassi Brar 
561b3040e40SJassi Brar 	/*
562b3040e40SJassi Brar 	 * Ideally we should lookout for reqs bigger than
563b3040e40SJassi Brar 	 * those that can be programmed with 256 bytes of
564b3040e40SJassi Brar 	 * MC buffer, but considering a req size is seldom
565b3040e40SJassi Brar 	 * going to be word-unaligned and more than 200MB,
566b3040e40SJassi Brar 	 * we take it easy.
567b3040e40SJassi Brar 	 * Also, should the limit is reached we'd rather
568b3040e40SJassi Brar 	 * have the platform increase MC buffer size than
569b3040e40SJassi Brar 	 * complicating this API driver.
570b3040e40SJassi Brar 	 */
571b3040e40SJassi Brar 	fill_px(&desc->px, dst, src, len);
572b3040e40SJassi Brar 
573b3040e40SJassi Brar 	return desc;
574b3040e40SJassi Brar }
575b3040e40SJassi Brar 
576b3040e40SJassi Brar /* Call after fixing burst size */
577b3040e40SJassi Brar static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
578b3040e40SJassi Brar {
579b3040e40SJassi Brar 	struct dma_pl330_chan *pch = desc->pchan;
580b3040e40SJassi Brar 	struct pl330_info *pi = &pch->dmac->pif;
581b3040e40SJassi Brar 	int burst_len;
582b3040e40SJassi Brar 
583b3040e40SJassi Brar 	burst_len = pi->pcfg.data_bus_width / 8;
584b3040e40SJassi Brar 	burst_len *= pi->pcfg.data_buf_dep;
585b3040e40SJassi Brar 	burst_len >>= desc->rqcfg.brst_size;
586b3040e40SJassi Brar 
587b3040e40SJassi Brar 	/* src/dst_burst_len can't be more than 16 */
588b3040e40SJassi Brar 	if (burst_len > 16)
589b3040e40SJassi Brar 		burst_len = 16;
590b3040e40SJassi Brar 
591b3040e40SJassi Brar 	while (burst_len > 1) {
592b3040e40SJassi Brar 		if (!(len % (burst_len << desc->rqcfg.brst_size)))
593b3040e40SJassi Brar 			break;
594b3040e40SJassi Brar 		burst_len--;
595b3040e40SJassi Brar 	}
596b3040e40SJassi Brar 
597b3040e40SJassi Brar 	return burst_len;
598b3040e40SJassi Brar }
599b3040e40SJassi Brar 
60042bc9cf4SBoojin Kim static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
60142bc9cf4SBoojin Kim 		struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
602db8196dfSVinod Koul 		size_t period_len, enum dma_transfer_direction direction)
60342bc9cf4SBoojin Kim {
60442bc9cf4SBoojin Kim 	struct dma_pl330_desc *desc;
60542bc9cf4SBoojin Kim 	struct dma_pl330_chan *pch = to_pchan(chan);
60642bc9cf4SBoojin Kim 	dma_addr_t dst;
60742bc9cf4SBoojin Kim 	dma_addr_t src;
60842bc9cf4SBoojin Kim 
60942bc9cf4SBoojin Kim 	desc = pl330_get_desc(pch);
61042bc9cf4SBoojin Kim 	if (!desc) {
61142bc9cf4SBoojin Kim 		dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
61242bc9cf4SBoojin Kim 			__func__, __LINE__);
61342bc9cf4SBoojin Kim 		return NULL;
61442bc9cf4SBoojin Kim 	}
61542bc9cf4SBoojin Kim 
61642bc9cf4SBoojin Kim 	switch (direction) {
617db8196dfSVinod Koul 	case DMA_MEM_TO_DEV:
61842bc9cf4SBoojin Kim 		desc->rqcfg.src_inc = 1;
61942bc9cf4SBoojin Kim 		desc->rqcfg.dst_inc = 0;
620cd072515SThomas Abraham 		desc->req.rqtype = MEMTODEV;
62142bc9cf4SBoojin Kim 		src = dma_addr;
62242bc9cf4SBoojin Kim 		dst = pch->fifo_addr;
62342bc9cf4SBoojin Kim 		break;
624db8196dfSVinod Koul 	case DMA_DEV_TO_MEM:
62542bc9cf4SBoojin Kim 		desc->rqcfg.src_inc = 0;
62642bc9cf4SBoojin Kim 		desc->rqcfg.dst_inc = 1;
627cd072515SThomas Abraham 		desc->req.rqtype = DEVTOMEM;
62842bc9cf4SBoojin Kim 		src = pch->fifo_addr;
62942bc9cf4SBoojin Kim 		dst = dma_addr;
63042bc9cf4SBoojin Kim 		break;
63142bc9cf4SBoojin Kim 	default:
63242bc9cf4SBoojin Kim 		dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
63342bc9cf4SBoojin Kim 		__func__, __LINE__);
63442bc9cf4SBoojin Kim 		return NULL;
63542bc9cf4SBoojin Kim 	}
63642bc9cf4SBoojin Kim 
63742bc9cf4SBoojin Kim 	desc->rqcfg.brst_size = pch->burst_sz;
63842bc9cf4SBoojin Kim 	desc->rqcfg.brst_len = 1;
63942bc9cf4SBoojin Kim 
64042bc9cf4SBoojin Kim 	pch->cyclic = true;
64142bc9cf4SBoojin Kim 
64242bc9cf4SBoojin Kim 	fill_px(&desc->px, dst, src, period_len);
64342bc9cf4SBoojin Kim 
64442bc9cf4SBoojin Kim 	return &desc->txd;
64542bc9cf4SBoojin Kim }
64642bc9cf4SBoojin Kim 
647b3040e40SJassi Brar static struct dma_async_tx_descriptor *
648b3040e40SJassi Brar pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
649b3040e40SJassi Brar 		dma_addr_t src, size_t len, unsigned long flags)
650b3040e40SJassi Brar {
651b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
652b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
653b3040e40SJassi Brar 	struct pl330_info *pi;
654b3040e40SJassi Brar 	int burst;
655b3040e40SJassi Brar 
6564e0e6109SRob Herring 	if (unlikely(!pch || !len))
657b3040e40SJassi Brar 		return NULL;
658b3040e40SJassi Brar 
659b3040e40SJassi Brar 	pi = &pch->dmac->pif;
660b3040e40SJassi Brar 
661b3040e40SJassi Brar 	desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
662b3040e40SJassi Brar 	if (!desc)
663b3040e40SJassi Brar 		return NULL;
664b3040e40SJassi Brar 
665b3040e40SJassi Brar 	desc->rqcfg.src_inc = 1;
666b3040e40SJassi Brar 	desc->rqcfg.dst_inc = 1;
667cd072515SThomas Abraham 	desc->req.rqtype = MEMTOMEM;
668b3040e40SJassi Brar 
669b3040e40SJassi Brar 	/* Select max possible burst size */
670b3040e40SJassi Brar 	burst = pi->pcfg.data_bus_width / 8;
671b3040e40SJassi Brar 
672b3040e40SJassi Brar 	while (burst > 1) {
673b3040e40SJassi Brar 		if (!(len % burst))
674b3040e40SJassi Brar 			break;
675b3040e40SJassi Brar 		burst /= 2;
676b3040e40SJassi Brar 	}
677b3040e40SJassi Brar 
678b3040e40SJassi Brar 	desc->rqcfg.brst_size = 0;
679b3040e40SJassi Brar 	while (burst != (1 << desc->rqcfg.brst_size))
680b3040e40SJassi Brar 		desc->rqcfg.brst_size++;
681b3040e40SJassi Brar 
682b3040e40SJassi Brar 	desc->rqcfg.brst_len = get_burst_len(desc, len);
683b3040e40SJassi Brar 
684b3040e40SJassi Brar 	desc->txd.flags = flags;
685b3040e40SJassi Brar 
686b3040e40SJassi Brar 	return &desc->txd;
687b3040e40SJassi Brar }
688b3040e40SJassi Brar 
689b3040e40SJassi Brar static struct dma_async_tx_descriptor *
690b3040e40SJassi Brar pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
691db8196dfSVinod Koul 		unsigned int sg_len, enum dma_transfer_direction direction,
692b3040e40SJassi Brar 		unsigned long flg)
693b3040e40SJassi Brar {
694b3040e40SJassi Brar 	struct dma_pl330_desc *first, *desc = NULL;
695b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
696b3040e40SJassi Brar 	struct scatterlist *sg;
697b3040e40SJassi Brar 	unsigned long flags;
6981b9bb715SBoojin Kim 	int i;
699b3040e40SJassi Brar 	dma_addr_t addr;
700b3040e40SJassi Brar 
701cd072515SThomas Abraham 	if (unlikely(!pch || !sgl || !sg_len))
702b3040e40SJassi Brar 		return NULL;
703b3040e40SJassi Brar 
7041b9bb715SBoojin Kim 	addr = pch->fifo_addr;
705b3040e40SJassi Brar 
706b3040e40SJassi Brar 	first = NULL;
707b3040e40SJassi Brar 
708b3040e40SJassi Brar 	for_each_sg(sgl, sg, sg_len, i) {
709b3040e40SJassi Brar 
710b3040e40SJassi Brar 		desc = pl330_get_desc(pch);
711b3040e40SJassi Brar 		if (!desc) {
712b3040e40SJassi Brar 			struct dma_pl330_dmac *pdmac = pch->dmac;
713b3040e40SJassi Brar 
714b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev,
715b3040e40SJassi Brar 				"%s:%d Unable to fetch desc\n",
716b3040e40SJassi Brar 				__func__, __LINE__);
717b3040e40SJassi Brar 			if (!first)
718b3040e40SJassi Brar 				return NULL;
719b3040e40SJassi Brar 
720b3040e40SJassi Brar 			spin_lock_irqsave(&pdmac->pool_lock, flags);
721b3040e40SJassi Brar 
722b3040e40SJassi Brar 			while (!list_empty(&first->node)) {
723b3040e40SJassi Brar 				desc = list_entry(first->node.next,
724b3040e40SJassi Brar 						struct dma_pl330_desc, node);
725b3040e40SJassi Brar 				list_move_tail(&desc->node, &pdmac->desc_pool);
726b3040e40SJassi Brar 			}
727b3040e40SJassi Brar 
728b3040e40SJassi Brar 			list_move_tail(&first->node, &pdmac->desc_pool);
729b3040e40SJassi Brar 
730b3040e40SJassi Brar 			spin_unlock_irqrestore(&pdmac->pool_lock, flags);
731b3040e40SJassi Brar 
732b3040e40SJassi Brar 			return NULL;
733b3040e40SJassi Brar 		}
734b3040e40SJassi Brar 
735b3040e40SJassi Brar 		if (!first)
736b3040e40SJassi Brar 			first = desc;
737b3040e40SJassi Brar 		else
738b3040e40SJassi Brar 			list_add_tail(&desc->node, &first->node);
739b3040e40SJassi Brar 
740db8196dfSVinod Koul 		if (direction == DMA_MEM_TO_DEV) {
741b3040e40SJassi Brar 			desc->rqcfg.src_inc = 1;
742b3040e40SJassi Brar 			desc->rqcfg.dst_inc = 0;
743cd072515SThomas Abraham 			desc->req.rqtype = MEMTODEV;
744b3040e40SJassi Brar 			fill_px(&desc->px,
745b3040e40SJassi Brar 				addr, sg_dma_address(sg), sg_dma_len(sg));
746b3040e40SJassi Brar 		} else {
747b3040e40SJassi Brar 			desc->rqcfg.src_inc = 0;
748b3040e40SJassi Brar 			desc->rqcfg.dst_inc = 1;
749cd072515SThomas Abraham 			desc->req.rqtype = DEVTOMEM;
750b3040e40SJassi Brar 			fill_px(&desc->px,
751b3040e40SJassi Brar 				sg_dma_address(sg), addr, sg_dma_len(sg));
752b3040e40SJassi Brar 		}
753b3040e40SJassi Brar 
7541b9bb715SBoojin Kim 		desc->rqcfg.brst_size = pch->burst_sz;
755b3040e40SJassi Brar 		desc->rqcfg.brst_len = 1;
756b3040e40SJassi Brar 	}
757b3040e40SJassi Brar 
758b3040e40SJassi Brar 	/* Return the last desc in the chain */
759b3040e40SJassi Brar 	desc->txd.flags = flg;
760b3040e40SJassi Brar 	return &desc->txd;
761b3040e40SJassi Brar }
762b3040e40SJassi Brar 
763b3040e40SJassi Brar static irqreturn_t pl330_irq_handler(int irq, void *data)
764b3040e40SJassi Brar {
765b3040e40SJassi Brar 	if (pl330_update(data))
766b3040e40SJassi Brar 		return IRQ_HANDLED;
767b3040e40SJassi Brar 	else
768b3040e40SJassi Brar 		return IRQ_NONE;
769b3040e40SJassi Brar }
770b3040e40SJassi Brar 
771b3040e40SJassi Brar static int __devinit
772aa25afadSRussell King pl330_probe(struct amba_device *adev, const struct amba_id *id)
773b3040e40SJassi Brar {
774b3040e40SJassi Brar 	struct dma_pl330_platdata *pdat;
775b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac;
776b3040e40SJassi Brar 	struct dma_pl330_chan *pch;
777b3040e40SJassi Brar 	struct pl330_info *pi;
778b3040e40SJassi Brar 	struct dma_device *pd;
779b3040e40SJassi Brar 	struct resource *res;
780b3040e40SJassi Brar 	int i, ret, irq;
7814e0e6109SRob Herring 	int num_chan;
782b3040e40SJassi Brar 
783b3040e40SJassi Brar 	pdat = adev->dev.platform_data;
784b3040e40SJassi Brar 
785b3040e40SJassi Brar 	/* Allocate a new DMAC and its Channels */
7864e0e6109SRob Herring 	pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
787b3040e40SJassi Brar 	if (!pdmac) {
788b3040e40SJassi Brar 		dev_err(&adev->dev, "unable to allocate mem\n");
789b3040e40SJassi Brar 		return -ENOMEM;
790b3040e40SJassi Brar 	}
791b3040e40SJassi Brar 
792b3040e40SJassi Brar 	pi = &pdmac->pif;
793b3040e40SJassi Brar 	pi->dev = &adev->dev;
794b3040e40SJassi Brar 	pi->pl330_data = NULL;
7954e0e6109SRob Herring 	pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
796b3040e40SJassi Brar 
797b3040e40SJassi Brar 	res = &adev->res;
798b3040e40SJassi Brar 	request_mem_region(res->start, resource_size(res), "dma-pl330");
799b3040e40SJassi Brar 
800b3040e40SJassi Brar 	pi->base = ioremap(res->start, resource_size(res));
801b3040e40SJassi Brar 	if (!pi->base) {
802b3040e40SJassi Brar 		ret = -ENXIO;
803b3040e40SJassi Brar 		goto probe_err1;
804b3040e40SJassi Brar 	}
805b3040e40SJassi Brar 
806a2f5203fSBoojin Kim 	pdmac->clk = clk_get(&adev->dev, "dma");
807a2f5203fSBoojin Kim 	if (IS_ERR(pdmac->clk)) {
808a2f5203fSBoojin Kim 		dev_err(&adev->dev, "Cannot get operation clock.\n");
809a2f5203fSBoojin Kim 		ret = -EINVAL;
8107bec78e0SJulia Lawall 		goto probe_err2;
811a2f5203fSBoojin Kim 	}
812a2f5203fSBoojin Kim 
813a2f5203fSBoojin Kim 	amba_set_drvdata(adev, pdmac);
814a2f5203fSBoojin Kim 
8153506c0d5STushar Behera #ifndef CONFIG_PM_RUNTIME
816a2f5203fSBoojin Kim 	/* enable dma clk */
817a2f5203fSBoojin Kim 	clk_enable(pdmac->clk);
818a2f5203fSBoojin Kim #endif
819a2f5203fSBoojin Kim 
820b3040e40SJassi Brar 	irq = adev->irq[0];
821b3040e40SJassi Brar 	ret = request_irq(irq, pl330_irq_handler, 0,
822b3040e40SJassi Brar 			dev_name(&adev->dev), pi);
823b3040e40SJassi Brar 	if (ret)
8247bec78e0SJulia Lawall 		goto probe_err3;
825b3040e40SJassi Brar 
826b3040e40SJassi Brar 	ret = pl330_add(pi);
827b3040e40SJassi Brar 	if (ret)
8287bec78e0SJulia Lawall 		goto probe_err4;
829b3040e40SJassi Brar 
830b3040e40SJassi Brar 	INIT_LIST_HEAD(&pdmac->desc_pool);
831b3040e40SJassi Brar 	spin_lock_init(&pdmac->pool_lock);
832b3040e40SJassi Brar 
833b3040e40SJassi Brar 	/* Create a descriptor pool of default size */
834b3040e40SJassi Brar 	if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
835b3040e40SJassi Brar 		dev_warn(&adev->dev, "unable to allocate desc\n");
836b3040e40SJassi Brar 
837b3040e40SJassi Brar 	pd = &pdmac->ddma;
838b3040e40SJassi Brar 	INIT_LIST_HEAD(&pd->channels);
839b3040e40SJassi Brar 
840b3040e40SJassi Brar 	/* Initialize channel parameters */
84193ed5544SThomas Abraham 	num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
84293ed5544SThomas Abraham 			(u8)pi->pcfg.num_chan);
8434e0e6109SRob Herring 	pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
8444e0e6109SRob Herring 
8454e0e6109SRob Herring 	for (i = 0; i < num_chan; i++) {
846b3040e40SJassi Brar 		pch = &pdmac->peripherals[i];
84793ed5544SThomas Abraham 		if (!adev->dev.of_node)
848cd072515SThomas Abraham 			pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
84993ed5544SThomas Abraham 		else
85093ed5544SThomas Abraham 			pch->chan.private = adev->dev.of_node;
851b3040e40SJassi Brar 
852b3040e40SJassi Brar 		INIT_LIST_HEAD(&pch->work_list);
853b3040e40SJassi Brar 		spin_lock_init(&pch->lock);
854b3040e40SJassi Brar 		pch->pl330_chid = NULL;
855b3040e40SJassi Brar 		pch->chan.device = pd;
856b3040e40SJassi Brar 		pch->dmac = pdmac;
857b3040e40SJassi Brar 
858b3040e40SJassi Brar 		/* Add the channel to the DMAC list */
859b3040e40SJassi Brar 		list_add_tail(&pch->chan.device_node, &pd->channels);
860b3040e40SJassi Brar 	}
861b3040e40SJassi Brar 
862b3040e40SJassi Brar 	pd->dev = &adev->dev;
86393ed5544SThomas Abraham 	if (pdat) {
864cd072515SThomas Abraham 		pd->cap_mask = pdat->cap_mask;
86593ed5544SThomas Abraham 	} else {
866cd072515SThomas Abraham 		dma_cap_set(DMA_MEMCPY, pd->cap_mask);
86793ed5544SThomas Abraham 		if (pi->pcfg.num_peri) {
86893ed5544SThomas Abraham 			dma_cap_set(DMA_SLAVE, pd->cap_mask);
86993ed5544SThomas Abraham 			dma_cap_set(DMA_CYCLIC, pd->cap_mask);
87093ed5544SThomas Abraham 		}
87193ed5544SThomas Abraham 	}
872b3040e40SJassi Brar 
873b3040e40SJassi Brar 	pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
874b3040e40SJassi Brar 	pd->device_free_chan_resources = pl330_free_chan_resources;
875b3040e40SJassi Brar 	pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
87642bc9cf4SBoojin Kim 	pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
877b3040e40SJassi Brar 	pd->device_tx_status = pl330_tx_status;
878b3040e40SJassi Brar 	pd->device_prep_slave_sg = pl330_prep_slave_sg;
879b3040e40SJassi Brar 	pd->device_control = pl330_control;
880b3040e40SJassi Brar 	pd->device_issue_pending = pl330_issue_pending;
881b3040e40SJassi Brar 
882b3040e40SJassi Brar 	ret = dma_async_device_register(pd);
883b3040e40SJassi Brar 	if (ret) {
884b3040e40SJassi Brar 		dev_err(&adev->dev, "unable to register DMAC\n");
8857bec78e0SJulia Lawall 		goto probe_err5;
886b3040e40SJassi Brar 	}
887b3040e40SJassi Brar 
888b3040e40SJassi Brar 	dev_info(&adev->dev,
889b3040e40SJassi Brar 		"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
890b3040e40SJassi Brar 	dev_info(&adev->dev,
891b3040e40SJassi Brar 		"\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
892b3040e40SJassi Brar 		pi->pcfg.data_buf_dep,
893b3040e40SJassi Brar 		pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
894b3040e40SJassi Brar 		pi->pcfg.num_peri, pi->pcfg.num_events);
895b3040e40SJassi Brar 
896b3040e40SJassi Brar 	return 0;
897b3040e40SJassi Brar 
8987bec78e0SJulia Lawall probe_err5:
899b3040e40SJassi Brar 	pl330_del(pi);
9007bec78e0SJulia Lawall probe_err4:
901b3040e40SJassi Brar 	free_irq(irq, pi);
9027bec78e0SJulia Lawall probe_err3:
9037bec78e0SJulia Lawall #ifndef CONFIG_PM_RUNTIME
9047bec78e0SJulia Lawall 	clk_disable(pdmac->clk);
9057bec78e0SJulia Lawall #endif
9067bec78e0SJulia Lawall 	clk_put(pdmac->clk);
907b3040e40SJassi Brar probe_err2:
908b3040e40SJassi Brar 	iounmap(pi->base);
909b3040e40SJassi Brar probe_err1:
910b3040e40SJassi Brar 	release_mem_region(res->start, resource_size(res));
911b3040e40SJassi Brar 	kfree(pdmac);
912b3040e40SJassi Brar 
913b3040e40SJassi Brar 	return ret;
914b3040e40SJassi Brar }
915b3040e40SJassi Brar 
916b3040e40SJassi Brar static int __devexit pl330_remove(struct amba_device *adev)
917b3040e40SJassi Brar {
918b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
919b3040e40SJassi Brar 	struct dma_pl330_chan *pch, *_p;
920b3040e40SJassi Brar 	struct pl330_info *pi;
921b3040e40SJassi Brar 	struct resource *res;
922b3040e40SJassi Brar 	int irq;
923b3040e40SJassi Brar 
924b3040e40SJassi Brar 	if (!pdmac)
925b3040e40SJassi Brar 		return 0;
926b3040e40SJassi Brar 
927b3040e40SJassi Brar 	amba_set_drvdata(adev, NULL);
928b3040e40SJassi Brar 
929b3040e40SJassi Brar 	/* Idle the DMAC */
930b3040e40SJassi Brar 	list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
931b3040e40SJassi Brar 			chan.device_node) {
932b3040e40SJassi Brar 
933b3040e40SJassi Brar 		/* Remove the channel */
934b3040e40SJassi Brar 		list_del(&pch->chan.device_node);
935b3040e40SJassi Brar 
936b3040e40SJassi Brar 		/* Flush the channel */
937b3040e40SJassi Brar 		pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
938b3040e40SJassi Brar 		pl330_free_chan_resources(&pch->chan);
939b3040e40SJassi Brar 	}
940b3040e40SJassi Brar 
941b3040e40SJassi Brar 	pi = &pdmac->pif;
942b3040e40SJassi Brar 
943b3040e40SJassi Brar 	pl330_del(pi);
944b3040e40SJassi Brar 
945b3040e40SJassi Brar 	irq = adev->irq[0];
946b3040e40SJassi Brar 	free_irq(irq, pi);
947b3040e40SJassi Brar 
948b3040e40SJassi Brar 	iounmap(pi->base);
949b3040e40SJassi Brar 
950b3040e40SJassi Brar 	res = &adev->res;
951b3040e40SJassi Brar 	release_mem_region(res->start, resource_size(res));
952b3040e40SJassi Brar 
9533506c0d5STushar Behera #ifndef CONFIG_PM_RUNTIME
954a2f5203fSBoojin Kim 	clk_disable(pdmac->clk);
955a2f5203fSBoojin Kim #endif
956a2f5203fSBoojin Kim 
957b3040e40SJassi Brar 	kfree(pdmac);
958b3040e40SJassi Brar 
959b3040e40SJassi Brar 	return 0;
960b3040e40SJassi Brar }
961b3040e40SJassi Brar 
962b3040e40SJassi Brar static struct amba_id pl330_ids[] = {
963b3040e40SJassi Brar 	{
964b3040e40SJassi Brar 		.id	= 0x00041330,
965b3040e40SJassi Brar 		.mask	= 0x000fffff,
966b3040e40SJassi Brar 	},
967b3040e40SJassi Brar 	{ 0, 0 },
968b3040e40SJassi Brar };
969b3040e40SJassi Brar 
970e8fa516aSDave Martin MODULE_DEVICE_TABLE(amba, pl330_ids);
971e8fa516aSDave Martin 
972a2f5203fSBoojin Kim #ifdef CONFIG_PM_RUNTIME
973a2f5203fSBoojin Kim static int pl330_runtime_suspend(struct device *dev)
974a2f5203fSBoojin Kim {
975a2f5203fSBoojin Kim 	struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
976a2f5203fSBoojin Kim 
977a2f5203fSBoojin Kim 	if (!pdmac) {
978a2f5203fSBoojin Kim 		dev_err(dev, "failed to get dmac\n");
979a2f5203fSBoojin Kim 		return -ENODEV;
980a2f5203fSBoojin Kim 	}
981a2f5203fSBoojin Kim 
982a2f5203fSBoojin Kim 	clk_disable(pdmac->clk);
983a2f5203fSBoojin Kim 
984a2f5203fSBoojin Kim 	return 0;
985a2f5203fSBoojin Kim }
986a2f5203fSBoojin Kim 
987a2f5203fSBoojin Kim static int pl330_runtime_resume(struct device *dev)
988a2f5203fSBoojin Kim {
989a2f5203fSBoojin Kim 	struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
990a2f5203fSBoojin Kim 
991a2f5203fSBoojin Kim 	if (!pdmac) {
992a2f5203fSBoojin Kim 		dev_err(dev, "failed to get dmac\n");
993a2f5203fSBoojin Kim 		return -ENODEV;
994a2f5203fSBoojin Kim 	}
995a2f5203fSBoojin Kim 
996a2f5203fSBoojin Kim 	clk_enable(pdmac->clk);
997a2f5203fSBoojin Kim 
998a2f5203fSBoojin Kim 	return 0;
999a2f5203fSBoojin Kim }
1000a2f5203fSBoojin Kim #else
1001a2f5203fSBoojin Kim #define pl330_runtime_suspend	NULL
1002a2f5203fSBoojin Kim #define pl330_runtime_resume	NULL
1003a2f5203fSBoojin Kim #endif /* CONFIG_PM_RUNTIME */
1004a2f5203fSBoojin Kim 
1005a2f5203fSBoojin Kim static const struct dev_pm_ops pl330_pm_ops = {
1006a2f5203fSBoojin Kim 	.runtime_suspend = pl330_runtime_suspend,
1007a2f5203fSBoojin Kim 	.runtime_resume = pl330_runtime_resume,
1008a2f5203fSBoojin Kim };
1009a2f5203fSBoojin Kim 
1010b3040e40SJassi Brar static struct amba_driver pl330_driver = {
1011b3040e40SJassi Brar 	.drv = {
1012b3040e40SJassi Brar 		.owner = THIS_MODULE,
1013b3040e40SJassi Brar 		.name = "dma-pl330",
1014a2f5203fSBoojin Kim 		.pm = &pl330_pm_ops,
1015b3040e40SJassi Brar 	},
1016b3040e40SJassi Brar 	.id_table = pl330_ids,
1017b3040e40SJassi Brar 	.probe = pl330_probe,
1018b3040e40SJassi Brar 	.remove = pl330_remove,
1019b3040e40SJassi Brar };
1020b3040e40SJassi Brar 
1021b3040e40SJassi Brar static int __init pl330_init(void)
1022b3040e40SJassi Brar {
1023b3040e40SJassi Brar 	return amba_driver_register(&pl330_driver);
1024b3040e40SJassi Brar }
1025b3040e40SJassi Brar module_init(pl330_init);
1026b3040e40SJassi Brar 
1027b3040e40SJassi Brar static void __exit pl330_exit(void)
1028b3040e40SJassi Brar {
1029b3040e40SJassi Brar 	amba_driver_unregister(&pl330_driver);
1030b3040e40SJassi Brar 	return;
1031b3040e40SJassi Brar }
1032b3040e40SJassi Brar module_exit(pl330_exit);
1033b3040e40SJassi Brar 
1034b3040e40SJassi Brar MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1035b3040e40SJassi Brar MODULE_DESCRIPTION("API Driver for PL330 DMAC");
1036b3040e40SJassi Brar MODULE_LICENSE("GPL");
1037