xref: /openbmc/linux/drivers/dma/pl330.c (revision 1b9bb715)
1b3040e40SJassi Brar /* linux/drivers/dma/pl330.c
2b3040e40SJassi Brar  *
3b3040e40SJassi Brar  * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4b3040e40SJassi Brar  *	Jaswinder Singh <jassi.brar@samsung.com>
5b3040e40SJassi Brar  *
6b3040e40SJassi Brar  * This program is free software; you can redistribute it and/or modify
7b3040e40SJassi Brar  * it under the terms of the GNU General Public License as published by
8b3040e40SJassi Brar  * the Free Software Foundation; either version 2 of the License, or
9b3040e40SJassi Brar  * (at your option) any later version.
10b3040e40SJassi Brar  */
11b3040e40SJassi Brar 
12b3040e40SJassi Brar #include <linux/io.h>
13b3040e40SJassi Brar #include <linux/init.h>
14b3040e40SJassi Brar #include <linux/slab.h>
15b3040e40SJassi Brar #include <linux/module.h>
16b3040e40SJassi Brar #include <linux/dmaengine.h>
17b3040e40SJassi Brar #include <linux/interrupt.h>
18b3040e40SJassi Brar #include <linux/amba/bus.h>
19b3040e40SJassi Brar #include <linux/amba/pl330.h>
20a2f5203fSBoojin Kim #include <linux/pm_runtime.h>
211b9bb715SBoojin Kim #include <linux/scatterlist.h>
22b3040e40SJassi Brar 
23b3040e40SJassi Brar #define NR_DEFAULT_DESC	16
24b3040e40SJassi Brar 
25b3040e40SJassi Brar enum desc_status {
26b3040e40SJassi Brar 	/* In the DMAC pool */
27b3040e40SJassi Brar 	FREE,
28b3040e40SJassi Brar 	/*
29b3040e40SJassi Brar 	 * Allocted to some channel during prep_xxx
30b3040e40SJassi Brar 	 * Also may be sitting on the work_list.
31b3040e40SJassi Brar 	 */
32b3040e40SJassi Brar 	PREP,
33b3040e40SJassi Brar 	/*
34b3040e40SJassi Brar 	 * Sitting on the work_list and already submitted
35b3040e40SJassi Brar 	 * to the PL330 core. Not more than two descriptors
36b3040e40SJassi Brar 	 * of a channel can be BUSY at any time.
37b3040e40SJassi Brar 	 */
38b3040e40SJassi Brar 	BUSY,
39b3040e40SJassi Brar 	/*
40b3040e40SJassi Brar 	 * Sitting on the channel work_list but xfer done
41b3040e40SJassi Brar 	 * by PL330 core
42b3040e40SJassi Brar 	 */
43b3040e40SJassi Brar 	DONE,
44b3040e40SJassi Brar };
45b3040e40SJassi Brar 
46b3040e40SJassi Brar struct dma_pl330_chan {
47b3040e40SJassi Brar 	/* Schedule desc completion */
48b3040e40SJassi Brar 	struct tasklet_struct task;
49b3040e40SJassi Brar 
50b3040e40SJassi Brar 	/* DMA-Engine Channel */
51b3040e40SJassi Brar 	struct dma_chan chan;
52b3040e40SJassi Brar 
53b3040e40SJassi Brar 	/* Last completed cookie */
54b3040e40SJassi Brar 	dma_cookie_t completed;
55b3040e40SJassi Brar 
56b3040e40SJassi Brar 	/* List of to be xfered descriptors */
57b3040e40SJassi Brar 	struct list_head work_list;
58b3040e40SJassi Brar 
59b3040e40SJassi Brar 	/* Pointer to the DMAC that manages this channel,
60b3040e40SJassi Brar 	 * NULL if the channel is available to be acquired.
61b3040e40SJassi Brar 	 * As the parent, this DMAC also provides descriptors
62b3040e40SJassi Brar 	 * to the channel.
63b3040e40SJassi Brar 	 */
64b3040e40SJassi Brar 	struct dma_pl330_dmac *dmac;
65b3040e40SJassi Brar 
66b3040e40SJassi Brar 	/* To protect channel manipulation */
67b3040e40SJassi Brar 	spinlock_t lock;
68b3040e40SJassi Brar 
69b3040e40SJassi Brar 	/* Token of a hardware channel thread of PL330 DMAC
70b3040e40SJassi Brar 	 * NULL if the channel is available to be acquired.
71b3040e40SJassi Brar 	 */
72b3040e40SJassi Brar 	void *pl330_chid;
731b9bb715SBoojin Kim 
741b9bb715SBoojin Kim 	/* For D-to-M and M-to-D channels */
751b9bb715SBoojin Kim 	int burst_sz; /* the peripheral fifo width */
761b9bb715SBoojin Kim 	dma_addr_t fifo_addr;
77b3040e40SJassi Brar };
78b3040e40SJassi Brar 
79b3040e40SJassi Brar struct dma_pl330_dmac {
80b3040e40SJassi Brar 	struct pl330_info pif;
81b3040e40SJassi Brar 
82b3040e40SJassi Brar 	/* DMA-Engine Device */
83b3040e40SJassi Brar 	struct dma_device ddma;
84b3040e40SJassi Brar 
85b3040e40SJassi Brar 	/* Pool of descriptors available for the DMAC's channels */
86b3040e40SJassi Brar 	struct list_head desc_pool;
87b3040e40SJassi Brar 	/* To protect desc_pool manipulation */
88b3040e40SJassi Brar 	spinlock_t pool_lock;
89b3040e40SJassi Brar 
90b3040e40SJassi Brar 	/* Peripheral channels connected to this DMAC */
914e0e6109SRob Herring 	struct dma_pl330_chan *peripherals; /* keep at end */
92a2f5203fSBoojin Kim 
93a2f5203fSBoojin Kim 	struct clk *clk;
94b3040e40SJassi Brar };
95b3040e40SJassi Brar 
96b3040e40SJassi Brar struct dma_pl330_desc {
97b3040e40SJassi Brar 	/* To attach to a queue as child */
98b3040e40SJassi Brar 	struct list_head node;
99b3040e40SJassi Brar 
100b3040e40SJassi Brar 	/* Descriptor for the DMA Engine API */
101b3040e40SJassi Brar 	struct dma_async_tx_descriptor txd;
102b3040e40SJassi Brar 
103b3040e40SJassi Brar 	/* Xfer for PL330 core */
104b3040e40SJassi Brar 	struct pl330_xfer px;
105b3040e40SJassi Brar 
106b3040e40SJassi Brar 	struct pl330_reqcfg rqcfg;
107b3040e40SJassi Brar 	struct pl330_req req;
108b3040e40SJassi Brar 
109b3040e40SJassi Brar 	enum desc_status status;
110b3040e40SJassi Brar 
111b3040e40SJassi Brar 	/* The channel which currently holds this desc */
112b3040e40SJassi Brar 	struct dma_pl330_chan *pchan;
113b3040e40SJassi Brar };
114b3040e40SJassi Brar 
115b3040e40SJassi Brar static inline struct dma_pl330_chan *
116b3040e40SJassi Brar to_pchan(struct dma_chan *ch)
117b3040e40SJassi Brar {
118b3040e40SJassi Brar 	if (!ch)
119b3040e40SJassi Brar 		return NULL;
120b3040e40SJassi Brar 
121b3040e40SJassi Brar 	return container_of(ch, struct dma_pl330_chan, chan);
122b3040e40SJassi Brar }
123b3040e40SJassi Brar 
124b3040e40SJassi Brar static inline struct dma_pl330_desc *
125b3040e40SJassi Brar to_desc(struct dma_async_tx_descriptor *tx)
126b3040e40SJassi Brar {
127b3040e40SJassi Brar 	return container_of(tx, struct dma_pl330_desc, txd);
128b3040e40SJassi Brar }
129b3040e40SJassi Brar 
130b3040e40SJassi Brar static inline void free_desc_list(struct list_head *list)
131b3040e40SJassi Brar {
132b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac;
133b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
134b3040e40SJassi Brar 	struct dma_pl330_chan *pch;
135b3040e40SJassi Brar 	unsigned long flags;
136b3040e40SJassi Brar 
137b3040e40SJassi Brar 	if (list_empty(list))
138b3040e40SJassi Brar 		return;
139b3040e40SJassi Brar 
140b3040e40SJassi Brar 	/* Finish off the work list */
141b3040e40SJassi Brar 	list_for_each_entry(desc, list, node) {
142b3040e40SJassi Brar 		dma_async_tx_callback callback;
143b3040e40SJassi Brar 		void *param;
144b3040e40SJassi Brar 
145b3040e40SJassi Brar 		/* All desc in a list belong to same channel */
146b3040e40SJassi Brar 		pch = desc->pchan;
147b3040e40SJassi Brar 		callback = desc->txd.callback;
148b3040e40SJassi Brar 		param = desc->txd.callback_param;
149b3040e40SJassi Brar 
150b3040e40SJassi Brar 		if (callback)
151b3040e40SJassi Brar 			callback(param);
152b3040e40SJassi Brar 
153b3040e40SJassi Brar 		desc->pchan = NULL;
154b3040e40SJassi Brar 	}
155b3040e40SJassi Brar 
156b3040e40SJassi Brar 	pdmac = pch->dmac;
157b3040e40SJassi Brar 
158b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
159b3040e40SJassi Brar 	list_splice_tail_init(list, &pdmac->desc_pool);
160b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
161b3040e40SJassi Brar }
162b3040e40SJassi Brar 
163b3040e40SJassi Brar static inline void fill_queue(struct dma_pl330_chan *pch)
164b3040e40SJassi Brar {
165b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
166b3040e40SJassi Brar 	int ret;
167b3040e40SJassi Brar 
168b3040e40SJassi Brar 	list_for_each_entry(desc, &pch->work_list, node) {
169b3040e40SJassi Brar 
170b3040e40SJassi Brar 		/* If already submitted */
171b3040e40SJassi Brar 		if (desc->status == BUSY)
172b3040e40SJassi Brar 			break;
173b3040e40SJassi Brar 
174b3040e40SJassi Brar 		ret = pl330_submit_req(pch->pl330_chid,
175b3040e40SJassi Brar 						&desc->req);
176b3040e40SJassi Brar 		if (!ret) {
177b3040e40SJassi Brar 			desc->status = BUSY;
178b3040e40SJassi Brar 			break;
179b3040e40SJassi Brar 		} else if (ret == -EAGAIN) {
180b3040e40SJassi Brar 			/* QFull or DMAC Dying */
181b3040e40SJassi Brar 			break;
182b3040e40SJassi Brar 		} else {
183b3040e40SJassi Brar 			/* Unacceptable request */
184b3040e40SJassi Brar 			desc->status = DONE;
185b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
186b3040e40SJassi Brar 					__func__, __LINE__, desc->txd.cookie);
187b3040e40SJassi Brar 			tasklet_schedule(&pch->task);
188b3040e40SJassi Brar 		}
189b3040e40SJassi Brar 	}
190b3040e40SJassi Brar }
191b3040e40SJassi Brar 
192b3040e40SJassi Brar static void pl330_tasklet(unsigned long data)
193b3040e40SJassi Brar {
194b3040e40SJassi Brar 	struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
195b3040e40SJassi Brar 	struct dma_pl330_desc *desc, *_dt;
196b3040e40SJassi Brar 	unsigned long flags;
197b3040e40SJassi Brar 	LIST_HEAD(list);
198b3040e40SJassi Brar 
199b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
200b3040e40SJassi Brar 
201b3040e40SJassi Brar 	/* Pick up ripe tomatoes */
202b3040e40SJassi Brar 	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
203b3040e40SJassi Brar 		if (desc->status == DONE) {
204b3040e40SJassi Brar 			pch->completed = desc->txd.cookie;
205b3040e40SJassi Brar 			list_move_tail(&desc->node, &list);
206b3040e40SJassi Brar 		}
207b3040e40SJassi Brar 
208b3040e40SJassi Brar 	/* Try to submit a req imm. next to the last completed cookie */
209b3040e40SJassi Brar 	fill_queue(pch);
210b3040e40SJassi Brar 
211b3040e40SJassi Brar 	/* Make sure the PL330 Channel thread is active */
212b3040e40SJassi Brar 	pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
213b3040e40SJassi Brar 
214b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
215b3040e40SJassi Brar 
216b3040e40SJassi Brar 	free_desc_list(&list);
217b3040e40SJassi Brar }
218b3040e40SJassi Brar 
219b3040e40SJassi Brar static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
220b3040e40SJassi Brar {
221b3040e40SJassi Brar 	struct dma_pl330_desc *desc = token;
222b3040e40SJassi Brar 	struct dma_pl330_chan *pch = desc->pchan;
223b3040e40SJassi Brar 	unsigned long flags;
224b3040e40SJassi Brar 
225b3040e40SJassi Brar 	/* If desc aborted */
226b3040e40SJassi Brar 	if (!pch)
227b3040e40SJassi Brar 		return;
228b3040e40SJassi Brar 
229b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
230b3040e40SJassi Brar 
231b3040e40SJassi Brar 	desc->status = DONE;
232b3040e40SJassi Brar 
233b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
234b3040e40SJassi Brar 
235b3040e40SJassi Brar 	tasklet_schedule(&pch->task);
236b3040e40SJassi Brar }
237b3040e40SJassi Brar 
238b3040e40SJassi Brar static int pl330_alloc_chan_resources(struct dma_chan *chan)
239b3040e40SJassi Brar {
240b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
241b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = pch->dmac;
242b3040e40SJassi Brar 	unsigned long flags;
243b3040e40SJassi Brar 
244b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
245b3040e40SJassi Brar 
246b3040e40SJassi Brar 	pch->completed = chan->cookie = 1;
247b3040e40SJassi Brar 
248b3040e40SJassi Brar 	pch->pl330_chid = pl330_request_channel(&pdmac->pif);
249b3040e40SJassi Brar 	if (!pch->pl330_chid) {
250b3040e40SJassi Brar 		spin_unlock_irqrestore(&pch->lock, flags);
251b3040e40SJassi Brar 		return 0;
252b3040e40SJassi Brar 	}
253b3040e40SJassi Brar 
254b3040e40SJassi Brar 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
255b3040e40SJassi Brar 
256b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
257b3040e40SJassi Brar 
258b3040e40SJassi Brar 	return 1;
259b3040e40SJassi Brar }
260b3040e40SJassi Brar 
261b3040e40SJassi Brar static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
262b3040e40SJassi Brar {
263b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
264b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
265b3040e40SJassi Brar 	unsigned long flags;
266b3040e40SJassi Brar 
267b3040e40SJassi Brar 	/* Only supports DMA_TERMINATE_ALL */
268b3040e40SJassi Brar 	if (cmd != DMA_TERMINATE_ALL)
269b3040e40SJassi Brar 		return -ENXIO;
270b3040e40SJassi Brar 
271b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
272b3040e40SJassi Brar 
273b3040e40SJassi Brar 	/* FLUSH the PL330 Channel thread */
274b3040e40SJassi Brar 	pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
275b3040e40SJassi Brar 
276b3040e40SJassi Brar 	/* Mark all desc done */
277b3040e40SJassi Brar 	list_for_each_entry(desc, &pch->work_list, node)
278b3040e40SJassi Brar 		desc->status = DONE;
279b3040e40SJassi Brar 
280b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
281b3040e40SJassi Brar 
282b3040e40SJassi Brar 	pl330_tasklet((unsigned long) pch);
283b3040e40SJassi Brar 
284b3040e40SJassi Brar 	return 0;
285b3040e40SJassi Brar }
286b3040e40SJassi Brar 
287b3040e40SJassi Brar static void pl330_free_chan_resources(struct dma_chan *chan)
288b3040e40SJassi Brar {
289b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
290b3040e40SJassi Brar 	unsigned long flags;
291b3040e40SJassi Brar 
292b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
293b3040e40SJassi Brar 
294b3040e40SJassi Brar 	tasklet_kill(&pch->task);
295b3040e40SJassi Brar 
296b3040e40SJassi Brar 	pl330_release_channel(pch->pl330_chid);
297b3040e40SJassi Brar 	pch->pl330_chid = NULL;
298b3040e40SJassi Brar 
299b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
300b3040e40SJassi Brar }
301b3040e40SJassi Brar 
302b3040e40SJassi Brar static enum dma_status
303b3040e40SJassi Brar pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
304b3040e40SJassi Brar 		 struct dma_tx_state *txstate)
305b3040e40SJassi Brar {
306b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
307b3040e40SJassi Brar 	dma_cookie_t last_done, last_used;
308b3040e40SJassi Brar 	int ret;
309b3040e40SJassi Brar 
310b3040e40SJassi Brar 	last_done = pch->completed;
311b3040e40SJassi Brar 	last_used = chan->cookie;
312b3040e40SJassi Brar 
313b3040e40SJassi Brar 	ret = dma_async_is_complete(cookie, last_done, last_used);
314b3040e40SJassi Brar 
315b3040e40SJassi Brar 	dma_set_tx_state(txstate, last_done, last_used, 0);
316b3040e40SJassi Brar 
317b3040e40SJassi Brar 	return ret;
318b3040e40SJassi Brar }
319b3040e40SJassi Brar 
320b3040e40SJassi Brar static void pl330_issue_pending(struct dma_chan *chan)
321b3040e40SJassi Brar {
322b3040e40SJassi Brar 	pl330_tasklet((unsigned long) to_pchan(chan));
323b3040e40SJassi Brar }
324b3040e40SJassi Brar 
325b3040e40SJassi Brar /*
326b3040e40SJassi Brar  * We returned the last one of the circular list of descriptor(s)
327b3040e40SJassi Brar  * from prep_xxx, so the argument to submit corresponds to the last
328b3040e40SJassi Brar  * descriptor of the list.
329b3040e40SJassi Brar  */
330b3040e40SJassi Brar static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
331b3040e40SJassi Brar {
332b3040e40SJassi Brar 	struct dma_pl330_desc *desc, *last = to_desc(tx);
333b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(tx->chan);
334b3040e40SJassi Brar 	dma_cookie_t cookie;
335b3040e40SJassi Brar 	unsigned long flags;
336b3040e40SJassi Brar 
337b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
338b3040e40SJassi Brar 
339b3040e40SJassi Brar 	/* Assign cookies to all nodes */
340b3040e40SJassi Brar 	cookie = tx->chan->cookie;
341b3040e40SJassi Brar 
342b3040e40SJassi Brar 	while (!list_empty(&last->node)) {
343b3040e40SJassi Brar 		desc = list_entry(last->node.next, struct dma_pl330_desc, node);
344b3040e40SJassi Brar 
345b3040e40SJassi Brar 		if (++cookie < 0)
346b3040e40SJassi Brar 			cookie = 1;
347b3040e40SJassi Brar 		desc->txd.cookie = cookie;
348b3040e40SJassi Brar 
349b3040e40SJassi Brar 		list_move_tail(&desc->node, &pch->work_list);
350b3040e40SJassi Brar 	}
351b3040e40SJassi Brar 
352b3040e40SJassi Brar 	if (++cookie < 0)
353b3040e40SJassi Brar 		cookie = 1;
354b3040e40SJassi Brar 	last->txd.cookie = cookie;
355b3040e40SJassi Brar 
356b3040e40SJassi Brar 	list_add_tail(&last->node, &pch->work_list);
357b3040e40SJassi Brar 
358b3040e40SJassi Brar 	tx->chan->cookie = cookie;
359b3040e40SJassi Brar 
360b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
361b3040e40SJassi Brar 
362b3040e40SJassi Brar 	return cookie;
363b3040e40SJassi Brar }
364b3040e40SJassi Brar 
365b3040e40SJassi Brar static inline void _init_desc(struct dma_pl330_desc *desc)
366b3040e40SJassi Brar {
367b3040e40SJassi Brar 	desc->pchan = NULL;
368b3040e40SJassi Brar 	desc->req.x = &desc->px;
369b3040e40SJassi Brar 	desc->req.token = desc;
370b3040e40SJassi Brar 	desc->rqcfg.swap = SWAP_NO;
371b3040e40SJassi Brar 	desc->rqcfg.privileged = 0;
372b3040e40SJassi Brar 	desc->rqcfg.insnaccess = 0;
373b3040e40SJassi Brar 	desc->rqcfg.scctl = SCCTRL0;
374b3040e40SJassi Brar 	desc->rqcfg.dcctl = DCCTRL0;
375b3040e40SJassi Brar 	desc->req.cfg = &desc->rqcfg;
376b3040e40SJassi Brar 	desc->req.xfer_cb = dma_pl330_rqcb;
377b3040e40SJassi Brar 	desc->txd.tx_submit = pl330_tx_submit;
378b3040e40SJassi Brar 
379b3040e40SJassi Brar 	INIT_LIST_HEAD(&desc->node);
380b3040e40SJassi Brar }
381b3040e40SJassi Brar 
382b3040e40SJassi Brar /* Returns the number of descriptors added to the DMAC pool */
383b3040e40SJassi Brar int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
384b3040e40SJassi Brar {
385b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
386b3040e40SJassi Brar 	unsigned long flags;
387b3040e40SJassi Brar 	int i;
388b3040e40SJassi Brar 
389b3040e40SJassi Brar 	if (!pdmac)
390b3040e40SJassi Brar 		return 0;
391b3040e40SJassi Brar 
392b3040e40SJassi Brar 	desc = kmalloc(count * sizeof(*desc), flg);
393b3040e40SJassi Brar 	if (!desc)
394b3040e40SJassi Brar 		return 0;
395b3040e40SJassi Brar 
396b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
397b3040e40SJassi Brar 
398b3040e40SJassi Brar 	for (i = 0; i < count; i++) {
399b3040e40SJassi Brar 		_init_desc(&desc[i]);
400b3040e40SJassi Brar 		list_add_tail(&desc[i].node, &pdmac->desc_pool);
401b3040e40SJassi Brar 	}
402b3040e40SJassi Brar 
403b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
404b3040e40SJassi Brar 
405b3040e40SJassi Brar 	return count;
406b3040e40SJassi Brar }
407b3040e40SJassi Brar 
408b3040e40SJassi Brar static struct dma_pl330_desc *
409b3040e40SJassi Brar pluck_desc(struct dma_pl330_dmac *pdmac)
410b3040e40SJassi Brar {
411b3040e40SJassi Brar 	struct dma_pl330_desc *desc = NULL;
412b3040e40SJassi Brar 	unsigned long flags;
413b3040e40SJassi Brar 
414b3040e40SJassi Brar 	if (!pdmac)
415b3040e40SJassi Brar 		return NULL;
416b3040e40SJassi Brar 
417b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
418b3040e40SJassi Brar 
419b3040e40SJassi Brar 	if (!list_empty(&pdmac->desc_pool)) {
420b3040e40SJassi Brar 		desc = list_entry(pdmac->desc_pool.next,
421b3040e40SJassi Brar 				struct dma_pl330_desc, node);
422b3040e40SJassi Brar 
423b3040e40SJassi Brar 		list_del_init(&desc->node);
424b3040e40SJassi Brar 
425b3040e40SJassi Brar 		desc->status = PREP;
426b3040e40SJassi Brar 		desc->txd.callback = NULL;
427b3040e40SJassi Brar 	}
428b3040e40SJassi Brar 
429b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
430b3040e40SJassi Brar 
431b3040e40SJassi Brar 	return desc;
432b3040e40SJassi Brar }
433b3040e40SJassi Brar 
434b3040e40SJassi Brar static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
435b3040e40SJassi Brar {
436b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = pch->dmac;
437b3040e40SJassi Brar 	struct dma_pl330_peri *peri = pch->chan.private;
438b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
439b3040e40SJassi Brar 
440b3040e40SJassi Brar 	/* Pluck one desc from the pool of DMAC */
441b3040e40SJassi Brar 	desc = pluck_desc(pdmac);
442b3040e40SJassi Brar 
443b3040e40SJassi Brar 	/* If the DMAC pool is empty, alloc new */
444b3040e40SJassi Brar 	if (!desc) {
445b3040e40SJassi Brar 		if (!add_desc(pdmac, GFP_ATOMIC, 1))
446b3040e40SJassi Brar 			return NULL;
447b3040e40SJassi Brar 
448b3040e40SJassi Brar 		/* Try again */
449b3040e40SJassi Brar 		desc = pluck_desc(pdmac);
450b3040e40SJassi Brar 		if (!desc) {
451b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev,
452b3040e40SJassi Brar 				"%s:%d ALERT!\n", __func__, __LINE__);
453b3040e40SJassi Brar 			return NULL;
454b3040e40SJassi Brar 		}
455b3040e40SJassi Brar 	}
456b3040e40SJassi Brar 
457b3040e40SJassi Brar 	/* Initialize the descriptor */
458b3040e40SJassi Brar 	desc->pchan = pch;
459b3040e40SJassi Brar 	desc->txd.cookie = 0;
460b3040e40SJassi Brar 	async_tx_ack(&desc->txd);
461b3040e40SJassi Brar 
4624e0e6109SRob Herring 	if (peri) {
463b3040e40SJassi Brar 		desc->req.rqtype = peri->rqtype;
4641b9bb715SBoojin Kim 		desc->req.peri = pch->chan.chan_id;
4654e0e6109SRob Herring 	} else {
4664e0e6109SRob Herring 		desc->req.rqtype = MEMTOMEM;
4674e0e6109SRob Herring 		desc->req.peri = 0;
4684e0e6109SRob Herring 	}
469b3040e40SJassi Brar 
470b3040e40SJassi Brar 	dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
471b3040e40SJassi Brar 
472b3040e40SJassi Brar 	return desc;
473b3040e40SJassi Brar }
474b3040e40SJassi Brar 
475b3040e40SJassi Brar static inline void fill_px(struct pl330_xfer *px,
476b3040e40SJassi Brar 		dma_addr_t dst, dma_addr_t src, size_t len)
477b3040e40SJassi Brar {
478b3040e40SJassi Brar 	px->next = NULL;
479b3040e40SJassi Brar 	px->bytes = len;
480b3040e40SJassi Brar 	px->dst_addr = dst;
481b3040e40SJassi Brar 	px->src_addr = src;
482b3040e40SJassi Brar }
483b3040e40SJassi Brar 
484b3040e40SJassi Brar static struct dma_pl330_desc *
485b3040e40SJassi Brar __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
486b3040e40SJassi Brar 		dma_addr_t src, size_t len)
487b3040e40SJassi Brar {
488b3040e40SJassi Brar 	struct dma_pl330_desc *desc = pl330_get_desc(pch);
489b3040e40SJassi Brar 
490b3040e40SJassi Brar 	if (!desc) {
491b3040e40SJassi Brar 		dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
492b3040e40SJassi Brar 			__func__, __LINE__);
493b3040e40SJassi Brar 		return NULL;
494b3040e40SJassi Brar 	}
495b3040e40SJassi Brar 
496b3040e40SJassi Brar 	/*
497b3040e40SJassi Brar 	 * Ideally we should lookout for reqs bigger than
498b3040e40SJassi Brar 	 * those that can be programmed with 256 bytes of
499b3040e40SJassi Brar 	 * MC buffer, but considering a req size is seldom
500b3040e40SJassi Brar 	 * going to be word-unaligned and more than 200MB,
501b3040e40SJassi Brar 	 * we take it easy.
502b3040e40SJassi Brar 	 * Also, should the limit is reached we'd rather
503b3040e40SJassi Brar 	 * have the platform increase MC buffer size than
504b3040e40SJassi Brar 	 * complicating this API driver.
505b3040e40SJassi Brar 	 */
506b3040e40SJassi Brar 	fill_px(&desc->px, dst, src, len);
507b3040e40SJassi Brar 
508b3040e40SJassi Brar 	return desc;
509b3040e40SJassi Brar }
510b3040e40SJassi Brar 
511b3040e40SJassi Brar /* Call after fixing burst size */
512b3040e40SJassi Brar static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
513b3040e40SJassi Brar {
514b3040e40SJassi Brar 	struct dma_pl330_chan *pch = desc->pchan;
515b3040e40SJassi Brar 	struct pl330_info *pi = &pch->dmac->pif;
516b3040e40SJassi Brar 	int burst_len;
517b3040e40SJassi Brar 
518b3040e40SJassi Brar 	burst_len = pi->pcfg.data_bus_width / 8;
519b3040e40SJassi Brar 	burst_len *= pi->pcfg.data_buf_dep;
520b3040e40SJassi Brar 	burst_len >>= desc->rqcfg.brst_size;
521b3040e40SJassi Brar 
522b3040e40SJassi Brar 	/* src/dst_burst_len can't be more than 16 */
523b3040e40SJassi Brar 	if (burst_len > 16)
524b3040e40SJassi Brar 		burst_len = 16;
525b3040e40SJassi Brar 
526b3040e40SJassi Brar 	while (burst_len > 1) {
527b3040e40SJassi Brar 		if (!(len % (burst_len << desc->rqcfg.brst_size)))
528b3040e40SJassi Brar 			break;
529b3040e40SJassi Brar 		burst_len--;
530b3040e40SJassi Brar 	}
531b3040e40SJassi Brar 
532b3040e40SJassi Brar 	return burst_len;
533b3040e40SJassi Brar }
534b3040e40SJassi Brar 
535b3040e40SJassi Brar static struct dma_async_tx_descriptor *
536b3040e40SJassi Brar pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
537b3040e40SJassi Brar 		dma_addr_t src, size_t len, unsigned long flags)
538b3040e40SJassi Brar {
539b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
540b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
541b3040e40SJassi Brar 	struct dma_pl330_peri *peri = chan->private;
542b3040e40SJassi Brar 	struct pl330_info *pi;
543b3040e40SJassi Brar 	int burst;
544b3040e40SJassi Brar 
5454e0e6109SRob Herring 	if (unlikely(!pch || !len))
546b3040e40SJassi Brar 		return NULL;
547b3040e40SJassi Brar 
5484e0e6109SRob Herring 	if (peri && peri->rqtype != MEMTOMEM)
549b3040e40SJassi Brar 		return NULL;
550b3040e40SJassi Brar 
551b3040e40SJassi Brar 	pi = &pch->dmac->pif;
552b3040e40SJassi Brar 
553b3040e40SJassi Brar 	desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
554b3040e40SJassi Brar 	if (!desc)
555b3040e40SJassi Brar 		return NULL;
556b3040e40SJassi Brar 
557b3040e40SJassi Brar 	desc->rqcfg.src_inc = 1;
558b3040e40SJassi Brar 	desc->rqcfg.dst_inc = 1;
559b3040e40SJassi Brar 
560b3040e40SJassi Brar 	/* Select max possible burst size */
561b3040e40SJassi Brar 	burst = pi->pcfg.data_bus_width / 8;
562b3040e40SJassi Brar 
563b3040e40SJassi Brar 	while (burst > 1) {
564b3040e40SJassi Brar 		if (!(len % burst))
565b3040e40SJassi Brar 			break;
566b3040e40SJassi Brar 		burst /= 2;
567b3040e40SJassi Brar 	}
568b3040e40SJassi Brar 
569b3040e40SJassi Brar 	desc->rqcfg.brst_size = 0;
570b3040e40SJassi Brar 	while (burst != (1 << desc->rqcfg.brst_size))
571b3040e40SJassi Brar 		desc->rqcfg.brst_size++;
572b3040e40SJassi Brar 
573b3040e40SJassi Brar 	desc->rqcfg.brst_len = get_burst_len(desc, len);
574b3040e40SJassi Brar 
575b3040e40SJassi Brar 	desc->txd.flags = flags;
576b3040e40SJassi Brar 
577b3040e40SJassi Brar 	return &desc->txd;
578b3040e40SJassi Brar }
579b3040e40SJassi Brar 
580b3040e40SJassi Brar static struct dma_async_tx_descriptor *
581b3040e40SJassi Brar pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
582b3040e40SJassi Brar 		unsigned int sg_len, enum dma_data_direction direction,
583b3040e40SJassi Brar 		unsigned long flg)
584b3040e40SJassi Brar {
585b3040e40SJassi Brar 	struct dma_pl330_desc *first, *desc = NULL;
586b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
587b3040e40SJassi Brar 	struct dma_pl330_peri *peri = chan->private;
588b3040e40SJassi Brar 	struct scatterlist *sg;
589b3040e40SJassi Brar 	unsigned long flags;
5901b9bb715SBoojin Kim 	int i;
591b3040e40SJassi Brar 	dma_addr_t addr;
592b3040e40SJassi Brar 
5934e0e6109SRob Herring 	if (unlikely(!pch || !sgl || !sg_len || !peri))
594b3040e40SJassi Brar 		return NULL;
595b3040e40SJassi Brar 
596b3040e40SJassi Brar 	/* Make sure the direction is consistent */
597b3040e40SJassi Brar 	if ((direction == DMA_TO_DEVICE &&
598b3040e40SJassi Brar 				peri->rqtype != MEMTODEV) ||
599b3040e40SJassi Brar 			(direction == DMA_FROM_DEVICE &&
600b3040e40SJassi Brar 				peri->rqtype != DEVTOMEM)) {
601b3040e40SJassi Brar 		dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
602b3040e40SJassi Brar 				__func__, __LINE__);
603b3040e40SJassi Brar 		return NULL;
604b3040e40SJassi Brar 	}
605b3040e40SJassi Brar 
6061b9bb715SBoojin Kim 	addr = pch->fifo_addr;
607b3040e40SJassi Brar 
608b3040e40SJassi Brar 	first = NULL;
609b3040e40SJassi Brar 
610b3040e40SJassi Brar 	for_each_sg(sgl, sg, sg_len, i) {
611b3040e40SJassi Brar 
612b3040e40SJassi Brar 		desc = pl330_get_desc(pch);
613b3040e40SJassi Brar 		if (!desc) {
614b3040e40SJassi Brar 			struct dma_pl330_dmac *pdmac = pch->dmac;
615b3040e40SJassi Brar 
616b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev,
617b3040e40SJassi Brar 				"%s:%d Unable to fetch desc\n",
618b3040e40SJassi Brar 				__func__, __LINE__);
619b3040e40SJassi Brar 			if (!first)
620b3040e40SJassi Brar 				return NULL;
621b3040e40SJassi Brar 
622b3040e40SJassi Brar 			spin_lock_irqsave(&pdmac->pool_lock, flags);
623b3040e40SJassi Brar 
624b3040e40SJassi Brar 			while (!list_empty(&first->node)) {
625b3040e40SJassi Brar 				desc = list_entry(first->node.next,
626b3040e40SJassi Brar 						struct dma_pl330_desc, node);
627b3040e40SJassi Brar 				list_move_tail(&desc->node, &pdmac->desc_pool);
628b3040e40SJassi Brar 			}
629b3040e40SJassi Brar 
630b3040e40SJassi Brar 			list_move_tail(&first->node, &pdmac->desc_pool);
631b3040e40SJassi Brar 
632b3040e40SJassi Brar 			spin_unlock_irqrestore(&pdmac->pool_lock, flags);
633b3040e40SJassi Brar 
634b3040e40SJassi Brar 			return NULL;
635b3040e40SJassi Brar 		}
636b3040e40SJassi Brar 
637b3040e40SJassi Brar 		if (!first)
638b3040e40SJassi Brar 			first = desc;
639b3040e40SJassi Brar 		else
640b3040e40SJassi Brar 			list_add_tail(&desc->node, &first->node);
641b3040e40SJassi Brar 
642b3040e40SJassi Brar 		if (direction == DMA_TO_DEVICE) {
643b3040e40SJassi Brar 			desc->rqcfg.src_inc = 1;
644b3040e40SJassi Brar 			desc->rqcfg.dst_inc = 0;
645b3040e40SJassi Brar 			fill_px(&desc->px,
646b3040e40SJassi Brar 				addr, sg_dma_address(sg), sg_dma_len(sg));
647b3040e40SJassi Brar 		} else {
648b3040e40SJassi Brar 			desc->rqcfg.src_inc = 0;
649b3040e40SJassi Brar 			desc->rqcfg.dst_inc = 1;
650b3040e40SJassi Brar 			fill_px(&desc->px,
651b3040e40SJassi Brar 				sg_dma_address(sg), addr, sg_dma_len(sg));
652b3040e40SJassi Brar 		}
653b3040e40SJassi Brar 
6541b9bb715SBoojin Kim 		desc->rqcfg.brst_size = pch->burst_sz;
655b3040e40SJassi Brar 		desc->rqcfg.brst_len = 1;
656b3040e40SJassi Brar 	}
657b3040e40SJassi Brar 
658b3040e40SJassi Brar 	/* Return the last desc in the chain */
659b3040e40SJassi Brar 	desc->txd.flags = flg;
660b3040e40SJassi Brar 	return &desc->txd;
661b3040e40SJassi Brar }
662b3040e40SJassi Brar 
663b3040e40SJassi Brar static irqreturn_t pl330_irq_handler(int irq, void *data)
664b3040e40SJassi Brar {
665b3040e40SJassi Brar 	if (pl330_update(data))
666b3040e40SJassi Brar 		return IRQ_HANDLED;
667b3040e40SJassi Brar 	else
668b3040e40SJassi Brar 		return IRQ_NONE;
669b3040e40SJassi Brar }
670b3040e40SJassi Brar 
671b3040e40SJassi Brar static int __devinit
672aa25afadSRussell King pl330_probe(struct amba_device *adev, const struct amba_id *id)
673b3040e40SJassi Brar {
674b3040e40SJassi Brar 	struct dma_pl330_platdata *pdat;
675b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac;
676b3040e40SJassi Brar 	struct dma_pl330_chan *pch;
677b3040e40SJassi Brar 	struct pl330_info *pi;
678b3040e40SJassi Brar 	struct dma_device *pd;
679b3040e40SJassi Brar 	struct resource *res;
680b3040e40SJassi Brar 	int i, ret, irq;
6814e0e6109SRob Herring 	int num_chan;
682b3040e40SJassi Brar 
683b3040e40SJassi Brar 	pdat = adev->dev.platform_data;
684b3040e40SJassi Brar 
685b3040e40SJassi Brar 	/* Allocate a new DMAC and its Channels */
6864e0e6109SRob Herring 	pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
687b3040e40SJassi Brar 	if (!pdmac) {
688b3040e40SJassi Brar 		dev_err(&adev->dev, "unable to allocate mem\n");
689b3040e40SJassi Brar 		return -ENOMEM;
690b3040e40SJassi Brar 	}
691b3040e40SJassi Brar 
692b3040e40SJassi Brar 	pi = &pdmac->pif;
693b3040e40SJassi Brar 	pi->dev = &adev->dev;
694b3040e40SJassi Brar 	pi->pl330_data = NULL;
6954e0e6109SRob Herring 	pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
696b3040e40SJassi Brar 
697b3040e40SJassi Brar 	res = &adev->res;
698b3040e40SJassi Brar 	request_mem_region(res->start, resource_size(res), "dma-pl330");
699b3040e40SJassi Brar 
700b3040e40SJassi Brar 	pi->base = ioremap(res->start, resource_size(res));
701b3040e40SJassi Brar 	if (!pi->base) {
702b3040e40SJassi Brar 		ret = -ENXIO;
703b3040e40SJassi Brar 		goto probe_err1;
704b3040e40SJassi Brar 	}
705b3040e40SJassi Brar 
706a2f5203fSBoojin Kim 	pdmac->clk = clk_get(&adev->dev, "dma");
707a2f5203fSBoojin Kim 	if (IS_ERR(pdmac->clk)) {
708a2f5203fSBoojin Kim 		dev_err(&adev->dev, "Cannot get operation clock.\n");
709a2f5203fSBoojin Kim 		ret = -EINVAL;
710a2f5203fSBoojin Kim 		goto probe_err1;
711a2f5203fSBoojin Kim 	}
712a2f5203fSBoojin Kim 
713a2f5203fSBoojin Kim 	amba_set_drvdata(adev, pdmac);
714a2f5203fSBoojin Kim 
715a2f5203fSBoojin Kim #ifdef CONFIG_PM_RUNTIME
716a2f5203fSBoojin Kim 	/* to use the runtime PM helper functions */
717a2f5203fSBoojin Kim 	pm_runtime_enable(&adev->dev);
718a2f5203fSBoojin Kim 
719a2f5203fSBoojin Kim 	/* enable the power domain */
720a2f5203fSBoojin Kim 	if (pm_runtime_get_sync(&adev->dev)) {
721a2f5203fSBoojin Kim 		dev_err(&adev->dev, "failed to get runtime pm\n");
722a2f5203fSBoojin Kim 		ret = -ENODEV;
723a2f5203fSBoojin Kim 		goto probe_err1;
724a2f5203fSBoojin Kim 	}
725a2f5203fSBoojin Kim #else
726a2f5203fSBoojin Kim 	/* enable dma clk */
727a2f5203fSBoojin Kim 	clk_enable(pdmac->clk);
728a2f5203fSBoojin Kim #endif
729a2f5203fSBoojin Kim 
730b3040e40SJassi Brar 	irq = adev->irq[0];
731b3040e40SJassi Brar 	ret = request_irq(irq, pl330_irq_handler, 0,
732b3040e40SJassi Brar 			dev_name(&adev->dev), pi);
733b3040e40SJassi Brar 	if (ret)
734b3040e40SJassi Brar 		goto probe_err2;
735b3040e40SJassi Brar 
736b3040e40SJassi Brar 	ret = pl330_add(pi);
737b3040e40SJassi Brar 	if (ret)
738b3040e40SJassi Brar 		goto probe_err3;
739b3040e40SJassi Brar 
740b3040e40SJassi Brar 	INIT_LIST_HEAD(&pdmac->desc_pool);
741b3040e40SJassi Brar 	spin_lock_init(&pdmac->pool_lock);
742b3040e40SJassi Brar 
743b3040e40SJassi Brar 	/* Create a descriptor pool of default size */
744b3040e40SJassi Brar 	if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
745b3040e40SJassi Brar 		dev_warn(&adev->dev, "unable to allocate desc\n");
746b3040e40SJassi Brar 
747b3040e40SJassi Brar 	pd = &pdmac->ddma;
748b3040e40SJassi Brar 	INIT_LIST_HEAD(&pd->channels);
749b3040e40SJassi Brar 
750b3040e40SJassi Brar 	/* Initialize channel parameters */
7514e0e6109SRob Herring 	num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
7524e0e6109SRob Herring 	pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
7534e0e6109SRob Herring 
7544e0e6109SRob Herring 	for (i = 0; i < num_chan; i++) {
755b3040e40SJassi Brar 		pch = &pdmac->peripherals[i];
7564e0e6109SRob Herring 		if (pdat) {
7574e0e6109SRob Herring 			struct dma_pl330_peri *peri = &pdat->peri[i];
758b3040e40SJassi Brar 
759b3040e40SJassi Brar 			switch (peri->rqtype) {
760b3040e40SJassi Brar 			case MEMTOMEM:
761b3040e40SJassi Brar 				dma_cap_set(DMA_MEMCPY, pd->cap_mask);
762b3040e40SJassi Brar 				break;
763b3040e40SJassi Brar 			case MEMTODEV:
764b3040e40SJassi Brar 			case DEVTOMEM:
765b3040e40SJassi Brar 				dma_cap_set(DMA_SLAVE, pd->cap_mask);
766b3040e40SJassi Brar 				break;
767b3040e40SJassi Brar 			default:
768b3040e40SJassi Brar 				dev_err(&adev->dev, "DEVTODEV Not Supported\n");
769b3040e40SJassi Brar 				continue;
770b3040e40SJassi Brar 			}
7714e0e6109SRob Herring 			pch->chan.private = peri;
7724e0e6109SRob Herring 		} else {
7734e0e6109SRob Herring 			dma_cap_set(DMA_MEMCPY, pd->cap_mask);
7744e0e6109SRob Herring 			pch->chan.private = NULL;
7754e0e6109SRob Herring 		}
776b3040e40SJassi Brar 
777b3040e40SJassi Brar 		INIT_LIST_HEAD(&pch->work_list);
778b3040e40SJassi Brar 		spin_lock_init(&pch->lock);
779b3040e40SJassi Brar 		pch->pl330_chid = NULL;
780b3040e40SJassi Brar 		pch->chan.device = pd;
781b3040e40SJassi Brar 		pch->chan.chan_id = i;
782b3040e40SJassi Brar 		pch->dmac = pdmac;
783b3040e40SJassi Brar 
784b3040e40SJassi Brar 		/* Add the channel to the DMAC list */
785b3040e40SJassi Brar 		pd->chancnt++;
786b3040e40SJassi Brar 		list_add_tail(&pch->chan.device_node, &pd->channels);
787b3040e40SJassi Brar 	}
788b3040e40SJassi Brar 
789b3040e40SJassi Brar 	pd->dev = &adev->dev;
790b3040e40SJassi Brar 
791b3040e40SJassi Brar 	pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
792b3040e40SJassi Brar 	pd->device_free_chan_resources = pl330_free_chan_resources;
793b3040e40SJassi Brar 	pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
794b3040e40SJassi Brar 	pd->device_tx_status = pl330_tx_status;
795b3040e40SJassi Brar 	pd->device_prep_slave_sg = pl330_prep_slave_sg;
796b3040e40SJassi Brar 	pd->device_control = pl330_control;
797b3040e40SJassi Brar 	pd->device_issue_pending = pl330_issue_pending;
798b3040e40SJassi Brar 
799b3040e40SJassi Brar 	ret = dma_async_device_register(pd);
800b3040e40SJassi Brar 	if (ret) {
801b3040e40SJassi Brar 		dev_err(&adev->dev, "unable to register DMAC\n");
802b3040e40SJassi Brar 		goto probe_err4;
803b3040e40SJassi Brar 	}
804b3040e40SJassi Brar 
805b3040e40SJassi Brar 	dev_info(&adev->dev,
806b3040e40SJassi Brar 		"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
807b3040e40SJassi Brar 	dev_info(&adev->dev,
808b3040e40SJassi Brar 		"\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
809b3040e40SJassi Brar 		pi->pcfg.data_buf_dep,
810b3040e40SJassi Brar 		pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
811b3040e40SJassi Brar 		pi->pcfg.num_peri, pi->pcfg.num_events);
812b3040e40SJassi Brar 
813b3040e40SJassi Brar 	return 0;
814b3040e40SJassi Brar 
815b3040e40SJassi Brar probe_err4:
816b3040e40SJassi Brar 	pl330_del(pi);
817b3040e40SJassi Brar probe_err3:
818b3040e40SJassi Brar 	free_irq(irq, pi);
819b3040e40SJassi Brar probe_err2:
820b3040e40SJassi Brar 	iounmap(pi->base);
821b3040e40SJassi Brar probe_err1:
822b3040e40SJassi Brar 	release_mem_region(res->start, resource_size(res));
823b3040e40SJassi Brar 	kfree(pdmac);
824b3040e40SJassi Brar 
825b3040e40SJassi Brar 	return ret;
826b3040e40SJassi Brar }
827b3040e40SJassi Brar 
828b3040e40SJassi Brar static int __devexit pl330_remove(struct amba_device *adev)
829b3040e40SJassi Brar {
830b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
831b3040e40SJassi Brar 	struct dma_pl330_chan *pch, *_p;
832b3040e40SJassi Brar 	struct pl330_info *pi;
833b3040e40SJassi Brar 	struct resource *res;
834b3040e40SJassi Brar 	int irq;
835b3040e40SJassi Brar 
836b3040e40SJassi Brar 	if (!pdmac)
837b3040e40SJassi Brar 		return 0;
838b3040e40SJassi Brar 
839b3040e40SJassi Brar 	amba_set_drvdata(adev, NULL);
840b3040e40SJassi Brar 
841b3040e40SJassi Brar 	/* Idle the DMAC */
842b3040e40SJassi Brar 	list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
843b3040e40SJassi Brar 			chan.device_node) {
844b3040e40SJassi Brar 
845b3040e40SJassi Brar 		/* Remove the channel */
846b3040e40SJassi Brar 		list_del(&pch->chan.device_node);
847b3040e40SJassi Brar 
848b3040e40SJassi Brar 		/* Flush the channel */
849b3040e40SJassi Brar 		pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
850b3040e40SJassi Brar 		pl330_free_chan_resources(&pch->chan);
851b3040e40SJassi Brar 	}
852b3040e40SJassi Brar 
853b3040e40SJassi Brar 	pi = &pdmac->pif;
854b3040e40SJassi Brar 
855b3040e40SJassi Brar 	pl330_del(pi);
856b3040e40SJassi Brar 
857b3040e40SJassi Brar 	irq = adev->irq[0];
858b3040e40SJassi Brar 	free_irq(irq, pi);
859b3040e40SJassi Brar 
860b3040e40SJassi Brar 	iounmap(pi->base);
861b3040e40SJassi Brar 
862b3040e40SJassi Brar 	res = &adev->res;
863b3040e40SJassi Brar 	release_mem_region(res->start, resource_size(res));
864b3040e40SJassi Brar 
865a2f5203fSBoojin Kim #ifdef CONFIG_PM_RUNTIME
866a2f5203fSBoojin Kim 	pm_runtime_put(&adev->dev);
867a2f5203fSBoojin Kim 	pm_runtime_disable(&adev->dev);
868a2f5203fSBoojin Kim #else
869a2f5203fSBoojin Kim 	clk_disable(pdmac->clk);
870a2f5203fSBoojin Kim #endif
871a2f5203fSBoojin Kim 
872b3040e40SJassi Brar 	kfree(pdmac);
873b3040e40SJassi Brar 
874b3040e40SJassi Brar 	return 0;
875b3040e40SJassi Brar }
876b3040e40SJassi Brar 
877b3040e40SJassi Brar static struct amba_id pl330_ids[] = {
878b3040e40SJassi Brar 	{
879b3040e40SJassi Brar 		.id	= 0x00041330,
880b3040e40SJassi Brar 		.mask	= 0x000fffff,
881b3040e40SJassi Brar 	},
882b3040e40SJassi Brar 	{ 0, 0 },
883b3040e40SJassi Brar };
884b3040e40SJassi Brar 
885a2f5203fSBoojin Kim #ifdef CONFIG_PM_RUNTIME
886a2f5203fSBoojin Kim static int pl330_runtime_suspend(struct device *dev)
887a2f5203fSBoojin Kim {
888a2f5203fSBoojin Kim 	struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
889a2f5203fSBoojin Kim 
890a2f5203fSBoojin Kim 	if (!pdmac) {
891a2f5203fSBoojin Kim 		dev_err(dev, "failed to get dmac\n");
892a2f5203fSBoojin Kim 		return -ENODEV;
893a2f5203fSBoojin Kim 	}
894a2f5203fSBoojin Kim 
895a2f5203fSBoojin Kim 	clk_disable(pdmac->clk);
896a2f5203fSBoojin Kim 
897a2f5203fSBoojin Kim 	return 0;
898a2f5203fSBoojin Kim }
899a2f5203fSBoojin Kim 
900a2f5203fSBoojin Kim static int pl330_runtime_resume(struct device *dev)
901a2f5203fSBoojin Kim {
902a2f5203fSBoojin Kim 	struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
903a2f5203fSBoojin Kim 
904a2f5203fSBoojin Kim 	if (!pdmac) {
905a2f5203fSBoojin Kim 		dev_err(dev, "failed to get dmac\n");
906a2f5203fSBoojin Kim 		return -ENODEV;
907a2f5203fSBoojin Kim 	}
908a2f5203fSBoojin Kim 
909a2f5203fSBoojin Kim 	clk_enable(pdmac->clk);
910a2f5203fSBoojin Kim 
911a2f5203fSBoojin Kim 	return 0;
912a2f5203fSBoojin Kim }
913a2f5203fSBoojin Kim #else
914a2f5203fSBoojin Kim #define pl330_runtime_suspend	NULL
915a2f5203fSBoojin Kim #define pl330_runtime_resume	NULL
916a2f5203fSBoojin Kim #endif /* CONFIG_PM_RUNTIME */
917a2f5203fSBoojin Kim 
918a2f5203fSBoojin Kim static const struct dev_pm_ops pl330_pm_ops = {
919a2f5203fSBoojin Kim 	.runtime_suspend = pl330_runtime_suspend,
920a2f5203fSBoojin Kim 	.runtime_resume = pl330_runtime_resume,
921a2f5203fSBoojin Kim };
922a2f5203fSBoojin Kim 
923b3040e40SJassi Brar static struct amba_driver pl330_driver = {
924b3040e40SJassi Brar 	.drv = {
925b3040e40SJassi Brar 		.owner = THIS_MODULE,
926b3040e40SJassi Brar 		.name = "dma-pl330",
927a2f5203fSBoojin Kim 		.pm = &pl330_pm_ops,
928b3040e40SJassi Brar 	},
929b3040e40SJassi Brar 	.id_table = pl330_ids,
930b3040e40SJassi Brar 	.probe = pl330_probe,
931b3040e40SJassi Brar 	.remove = pl330_remove,
932b3040e40SJassi Brar };
933b3040e40SJassi Brar 
934b3040e40SJassi Brar static int __init pl330_init(void)
935b3040e40SJassi Brar {
936b3040e40SJassi Brar 	return amba_driver_register(&pl330_driver);
937b3040e40SJassi Brar }
938b3040e40SJassi Brar module_init(pl330_init);
939b3040e40SJassi Brar 
940b3040e40SJassi Brar static void __exit pl330_exit(void)
941b3040e40SJassi Brar {
942b3040e40SJassi Brar 	amba_driver_unregister(&pl330_driver);
943b3040e40SJassi Brar 	return;
944b3040e40SJassi Brar }
945b3040e40SJassi Brar module_exit(pl330_exit);
946b3040e40SJassi Brar 
947b3040e40SJassi Brar MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
948b3040e40SJassi Brar MODULE_DESCRIPTION("API Driver for PL330 DMAC");
949b3040e40SJassi Brar MODULE_LICENSE("GPL");
950