xref: /openbmc/linux/drivers/dma/pl330.c (revision 884485e1)
1b3040e40SJassi Brar /* linux/drivers/dma/pl330.c
2b3040e40SJassi Brar  *
3b3040e40SJassi Brar  * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4b3040e40SJassi Brar  *	Jaswinder Singh <jassi.brar@samsung.com>
5b3040e40SJassi Brar  *
6b3040e40SJassi Brar  * This program is free software; you can redistribute it and/or modify
7b3040e40SJassi Brar  * it under the terms of the GNU General Public License as published by
8b3040e40SJassi Brar  * the Free Software Foundation; either version 2 of the License, or
9b3040e40SJassi Brar  * (at your option) any later version.
10b3040e40SJassi Brar  */
11b3040e40SJassi Brar 
12b3040e40SJassi Brar #include <linux/io.h>
13b3040e40SJassi Brar #include <linux/init.h>
14b3040e40SJassi Brar #include <linux/slab.h>
15b3040e40SJassi Brar #include <linux/module.h>
16b3040e40SJassi Brar #include <linux/dmaengine.h>
17b3040e40SJassi Brar #include <linux/interrupt.h>
18b3040e40SJassi Brar #include <linux/amba/bus.h>
19b3040e40SJassi Brar #include <linux/amba/pl330.h>
20a2f5203fSBoojin Kim #include <linux/pm_runtime.h>
211b9bb715SBoojin Kim #include <linux/scatterlist.h>
2293ed5544SThomas Abraham #include <linux/of.h>
23b3040e40SJassi Brar 
24d2ebfb33SRussell King - ARM Linux #include "dmaengine.h"
25d2ebfb33SRussell King - ARM Linux 
26b3040e40SJassi Brar #define NR_DEFAULT_DESC	16
27b3040e40SJassi Brar 
28b3040e40SJassi Brar enum desc_status {
29b3040e40SJassi Brar 	/* In the DMAC pool */
30b3040e40SJassi Brar 	FREE,
31b3040e40SJassi Brar 	/*
32b3040e40SJassi Brar 	 * Allocted to some channel during prep_xxx
33b3040e40SJassi Brar 	 * Also may be sitting on the work_list.
34b3040e40SJassi Brar 	 */
35b3040e40SJassi Brar 	PREP,
36b3040e40SJassi Brar 	/*
37b3040e40SJassi Brar 	 * Sitting on the work_list and already submitted
38b3040e40SJassi Brar 	 * to the PL330 core. Not more than two descriptors
39b3040e40SJassi Brar 	 * of a channel can be BUSY at any time.
40b3040e40SJassi Brar 	 */
41b3040e40SJassi Brar 	BUSY,
42b3040e40SJassi Brar 	/*
43b3040e40SJassi Brar 	 * Sitting on the channel work_list but xfer done
44b3040e40SJassi Brar 	 * by PL330 core
45b3040e40SJassi Brar 	 */
46b3040e40SJassi Brar 	DONE,
47b3040e40SJassi Brar };
48b3040e40SJassi Brar 
49b3040e40SJassi Brar struct dma_pl330_chan {
50b3040e40SJassi Brar 	/* Schedule desc completion */
51b3040e40SJassi Brar 	struct tasklet_struct task;
52b3040e40SJassi Brar 
53b3040e40SJassi Brar 	/* DMA-Engine Channel */
54b3040e40SJassi Brar 	struct dma_chan chan;
55b3040e40SJassi Brar 
56b3040e40SJassi Brar 	/* List of to be xfered descriptors */
57b3040e40SJassi Brar 	struct list_head work_list;
58b3040e40SJassi Brar 
59b3040e40SJassi Brar 	/* Pointer to the DMAC that manages this channel,
60b3040e40SJassi Brar 	 * NULL if the channel is available to be acquired.
61b3040e40SJassi Brar 	 * As the parent, this DMAC also provides descriptors
62b3040e40SJassi Brar 	 * to the channel.
63b3040e40SJassi Brar 	 */
64b3040e40SJassi Brar 	struct dma_pl330_dmac *dmac;
65b3040e40SJassi Brar 
66b3040e40SJassi Brar 	/* To protect channel manipulation */
67b3040e40SJassi Brar 	spinlock_t lock;
68b3040e40SJassi Brar 
69b3040e40SJassi Brar 	/* Token of a hardware channel thread of PL330 DMAC
70b3040e40SJassi Brar 	 * NULL if the channel is available to be acquired.
71b3040e40SJassi Brar 	 */
72b3040e40SJassi Brar 	void *pl330_chid;
731b9bb715SBoojin Kim 
741b9bb715SBoojin Kim 	/* For D-to-M and M-to-D channels */
751b9bb715SBoojin Kim 	int burst_sz; /* the peripheral fifo width */
761d0c1d60SBoojin Kim 	int burst_len; /* the number of burst */
771b9bb715SBoojin Kim 	dma_addr_t fifo_addr;
7842bc9cf4SBoojin Kim 
7942bc9cf4SBoojin Kim 	/* for cyclic capability */
8042bc9cf4SBoojin Kim 	bool cyclic;
81b3040e40SJassi Brar };
82b3040e40SJassi Brar 
83b3040e40SJassi Brar struct dma_pl330_dmac {
84b3040e40SJassi Brar 	struct pl330_info pif;
85b3040e40SJassi Brar 
86b3040e40SJassi Brar 	/* DMA-Engine Device */
87b3040e40SJassi Brar 	struct dma_device ddma;
88b3040e40SJassi Brar 
89b3040e40SJassi Brar 	/* Pool of descriptors available for the DMAC's channels */
90b3040e40SJassi Brar 	struct list_head desc_pool;
91b3040e40SJassi Brar 	/* To protect desc_pool manipulation */
92b3040e40SJassi Brar 	spinlock_t pool_lock;
93b3040e40SJassi Brar 
94b3040e40SJassi Brar 	/* Peripheral channels connected to this DMAC */
954e0e6109SRob Herring 	struct dma_pl330_chan *peripherals; /* keep at end */
96a2f5203fSBoojin Kim 
97a2f5203fSBoojin Kim 	struct clk *clk;
98b3040e40SJassi Brar };
99b3040e40SJassi Brar 
100b3040e40SJassi Brar struct dma_pl330_desc {
101b3040e40SJassi Brar 	/* To attach to a queue as child */
102b3040e40SJassi Brar 	struct list_head node;
103b3040e40SJassi Brar 
104b3040e40SJassi Brar 	/* Descriptor for the DMA Engine API */
105b3040e40SJassi Brar 	struct dma_async_tx_descriptor txd;
106b3040e40SJassi Brar 
107b3040e40SJassi Brar 	/* Xfer for PL330 core */
108b3040e40SJassi Brar 	struct pl330_xfer px;
109b3040e40SJassi Brar 
110b3040e40SJassi Brar 	struct pl330_reqcfg rqcfg;
111b3040e40SJassi Brar 	struct pl330_req req;
112b3040e40SJassi Brar 
113b3040e40SJassi Brar 	enum desc_status status;
114b3040e40SJassi Brar 
115b3040e40SJassi Brar 	/* The channel which currently holds this desc */
116b3040e40SJassi Brar 	struct dma_pl330_chan *pchan;
117b3040e40SJassi Brar };
118b3040e40SJassi Brar 
1193e2ec13aSThomas Abraham /* forward declaration */
1203e2ec13aSThomas Abraham static struct amba_driver pl330_driver;
1213e2ec13aSThomas Abraham 
122b3040e40SJassi Brar static inline struct dma_pl330_chan *
123b3040e40SJassi Brar to_pchan(struct dma_chan *ch)
124b3040e40SJassi Brar {
125b3040e40SJassi Brar 	if (!ch)
126b3040e40SJassi Brar 		return NULL;
127b3040e40SJassi Brar 
128b3040e40SJassi Brar 	return container_of(ch, struct dma_pl330_chan, chan);
129b3040e40SJassi Brar }
130b3040e40SJassi Brar 
131b3040e40SJassi Brar static inline struct dma_pl330_desc *
132b3040e40SJassi Brar to_desc(struct dma_async_tx_descriptor *tx)
133b3040e40SJassi Brar {
134b3040e40SJassi Brar 	return container_of(tx, struct dma_pl330_desc, txd);
135b3040e40SJassi Brar }
136b3040e40SJassi Brar 
137b3040e40SJassi Brar static inline void free_desc_list(struct list_head *list)
138b3040e40SJassi Brar {
139b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac;
140b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
141b3040e40SJassi Brar 	struct dma_pl330_chan *pch;
142b3040e40SJassi Brar 	unsigned long flags;
143b3040e40SJassi Brar 
144b3040e40SJassi Brar 	if (list_empty(list))
145b3040e40SJassi Brar 		return;
146b3040e40SJassi Brar 
147b3040e40SJassi Brar 	/* Finish off the work list */
148b3040e40SJassi Brar 	list_for_each_entry(desc, list, node) {
149b3040e40SJassi Brar 		dma_async_tx_callback callback;
150b3040e40SJassi Brar 		void *param;
151b3040e40SJassi Brar 
152b3040e40SJassi Brar 		/* All desc in a list belong to same channel */
153b3040e40SJassi Brar 		pch = desc->pchan;
154b3040e40SJassi Brar 		callback = desc->txd.callback;
155b3040e40SJassi Brar 		param = desc->txd.callback_param;
156b3040e40SJassi Brar 
157b3040e40SJassi Brar 		if (callback)
158b3040e40SJassi Brar 			callback(param);
159b3040e40SJassi Brar 
160b3040e40SJassi Brar 		desc->pchan = NULL;
161b3040e40SJassi Brar 	}
162b3040e40SJassi Brar 
163b3040e40SJassi Brar 	pdmac = pch->dmac;
164b3040e40SJassi Brar 
165b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
166b3040e40SJassi Brar 	list_splice_tail_init(list, &pdmac->desc_pool);
167b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
168b3040e40SJassi Brar }
169b3040e40SJassi Brar 
17042bc9cf4SBoojin Kim static inline void handle_cyclic_desc_list(struct list_head *list)
17142bc9cf4SBoojin Kim {
17242bc9cf4SBoojin Kim 	struct dma_pl330_desc *desc;
17342bc9cf4SBoojin Kim 	struct dma_pl330_chan *pch;
17442bc9cf4SBoojin Kim 	unsigned long flags;
17542bc9cf4SBoojin Kim 
17642bc9cf4SBoojin Kim 	if (list_empty(list))
17742bc9cf4SBoojin Kim 		return;
17842bc9cf4SBoojin Kim 
17942bc9cf4SBoojin Kim 	list_for_each_entry(desc, list, node) {
18042bc9cf4SBoojin Kim 		dma_async_tx_callback callback;
18142bc9cf4SBoojin Kim 
18242bc9cf4SBoojin Kim 		/* Change status to reload it */
18342bc9cf4SBoojin Kim 		desc->status = PREP;
18442bc9cf4SBoojin Kim 		pch = desc->pchan;
18542bc9cf4SBoojin Kim 		callback = desc->txd.callback;
18642bc9cf4SBoojin Kim 		if (callback)
18742bc9cf4SBoojin Kim 			callback(desc->txd.callback_param);
18842bc9cf4SBoojin Kim 	}
18942bc9cf4SBoojin Kim 
19042bc9cf4SBoojin Kim 	spin_lock_irqsave(&pch->lock, flags);
19142bc9cf4SBoojin Kim 	list_splice_tail_init(list, &pch->work_list);
19242bc9cf4SBoojin Kim 	spin_unlock_irqrestore(&pch->lock, flags);
19342bc9cf4SBoojin Kim }
19442bc9cf4SBoojin Kim 
195b3040e40SJassi Brar static inline void fill_queue(struct dma_pl330_chan *pch)
196b3040e40SJassi Brar {
197b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
198b3040e40SJassi Brar 	int ret;
199b3040e40SJassi Brar 
200b3040e40SJassi Brar 	list_for_each_entry(desc, &pch->work_list, node) {
201b3040e40SJassi Brar 
202b3040e40SJassi Brar 		/* If already submitted */
203b3040e40SJassi Brar 		if (desc->status == BUSY)
204b3040e40SJassi Brar 			break;
205b3040e40SJassi Brar 
206b3040e40SJassi Brar 		ret = pl330_submit_req(pch->pl330_chid,
207b3040e40SJassi Brar 						&desc->req);
208b3040e40SJassi Brar 		if (!ret) {
209b3040e40SJassi Brar 			desc->status = BUSY;
210b3040e40SJassi Brar 			break;
211b3040e40SJassi Brar 		} else if (ret == -EAGAIN) {
212b3040e40SJassi Brar 			/* QFull or DMAC Dying */
213b3040e40SJassi Brar 			break;
214b3040e40SJassi Brar 		} else {
215b3040e40SJassi Brar 			/* Unacceptable request */
216b3040e40SJassi Brar 			desc->status = DONE;
217b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
218b3040e40SJassi Brar 					__func__, __LINE__, desc->txd.cookie);
219b3040e40SJassi Brar 			tasklet_schedule(&pch->task);
220b3040e40SJassi Brar 		}
221b3040e40SJassi Brar 	}
222b3040e40SJassi Brar }
223b3040e40SJassi Brar 
224b3040e40SJassi Brar static void pl330_tasklet(unsigned long data)
225b3040e40SJassi Brar {
226b3040e40SJassi Brar 	struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
227b3040e40SJassi Brar 	struct dma_pl330_desc *desc, *_dt;
228b3040e40SJassi Brar 	unsigned long flags;
229b3040e40SJassi Brar 	LIST_HEAD(list);
230b3040e40SJassi Brar 
231b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
232b3040e40SJassi Brar 
233b3040e40SJassi Brar 	/* Pick up ripe tomatoes */
234b3040e40SJassi Brar 	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
235b3040e40SJassi Brar 		if (desc->status == DONE) {
2364d4e58deSRussell King - ARM Linux 			pch->chan.completed_cookie = desc->txd.cookie;
237b3040e40SJassi Brar 			list_move_tail(&desc->node, &list);
238b3040e40SJassi Brar 		}
239b3040e40SJassi Brar 
240b3040e40SJassi Brar 	/* Try to submit a req imm. next to the last completed cookie */
241b3040e40SJassi Brar 	fill_queue(pch);
242b3040e40SJassi Brar 
243b3040e40SJassi Brar 	/* Make sure the PL330 Channel thread is active */
244b3040e40SJassi Brar 	pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
245b3040e40SJassi Brar 
246b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
247b3040e40SJassi Brar 
24842bc9cf4SBoojin Kim 	if (pch->cyclic)
24942bc9cf4SBoojin Kim 		handle_cyclic_desc_list(&list);
25042bc9cf4SBoojin Kim 	else
251b3040e40SJassi Brar 		free_desc_list(&list);
252b3040e40SJassi Brar }
253b3040e40SJassi Brar 
254b3040e40SJassi Brar static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
255b3040e40SJassi Brar {
256b3040e40SJassi Brar 	struct dma_pl330_desc *desc = token;
257b3040e40SJassi Brar 	struct dma_pl330_chan *pch = desc->pchan;
258b3040e40SJassi Brar 	unsigned long flags;
259b3040e40SJassi Brar 
260b3040e40SJassi Brar 	/* If desc aborted */
261b3040e40SJassi Brar 	if (!pch)
262b3040e40SJassi Brar 		return;
263b3040e40SJassi Brar 
264b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
265b3040e40SJassi Brar 
266b3040e40SJassi Brar 	desc->status = DONE;
267b3040e40SJassi Brar 
268b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
269b3040e40SJassi Brar 
270b3040e40SJassi Brar 	tasklet_schedule(&pch->task);
271b3040e40SJassi Brar }
272b3040e40SJassi Brar 
2733e2ec13aSThomas Abraham bool pl330_filter(struct dma_chan *chan, void *param)
2743e2ec13aSThomas Abraham {
275cd072515SThomas Abraham 	u8 *peri_id;
2763e2ec13aSThomas Abraham 
2773e2ec13aSThomas Abraham 	if (chan->device->dev->driver != &pl330_driver.drv)
2783e2ec13aSThomas Abraham 		return false;
2793e2ec13aSThomas Abraham 
28093ed5544SThomas Abraham #ifdef CONFIG_OF
28193ed5544SThomas Abraham 	if (chan->device->dev->of_node) {
28293ed5544SThomas Abraham 		const __be32 *prop_value;
28393ed5544SThomas Abraham 		phandle phandle;
28493ed5544SThomas Abraham 		struct device_node *node;
28593ed5544SThomas Abraham 
28693ed5544SThomas Abraham 		prop_value = ((struct property *)param)->value;
28793ed5544SThomas Abraham 		phandle = be32_to_cpup(prop_value++);
28893ed5544SThomas Abraham 		node = of_find_node_by_phandle(phandle);
28993ed5544SThomas Abraham 		return ((chan->private == node) &&
29093ed5544SThomas Abraham 				(chan->chan_id == be32_to_cpup(prop_value)));
29193ed5544SThomas Abraham 	}
29293ed5544SThomas Abraham #endif
29393ed5544SThomas Abraham 
294cd072515SThomas Abraham 	peri_id = chan->private;
295cd072515SThomas Abraham 	return *peri_id == (unsigned)param;
2963e2ec13aSThomas Abraham }
2973e2ec13aSThomas Abraham EXPORT_SYMBOL(pl330_filter);
2983e2ec13aSThomas Abraham 
299b3040e40SJassi Brar static int pl330_alloc_chan_resources(struct dma_chan *chan)
300b3040e40SJassi Brar {
301b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
302b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = pch->dmac;
303b3040e40SJassi Brar 	unsigned long flags;
304b3040e40SJassi Brar 
305b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
306b3040e40SJassi Brar 
3074d4e58deSRussell King - ARM Linux 	chan->completed_cookie = chan->cookie = 1;
30842bc9cf4SBoojin Kim 	pch->cyclic = false;
309b3040e40SJassi Brar 
310b3040e40SJassi Brar 	pch->pl330_chid = pl330_request_channel(&pdmac->pif);
311b3040e40SJassi Brar 	if (!pch->pl330_chid) {
312b3040e40SJassi Brar 		spin_unlock_irqrestore(&pch->lock, flags);
313b3040e40SJassi Brar 		return 0;
314b3040e40SJassi Brar 	}
315b3040e40SJassi Brar 
316b3040e40SJassi Brar 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
317b3040e40SJassi Brar 
318b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
319b3040e40SJassi Brar 
320b3040e40SJassi Brar 	return 1;
321b3040e40SJassi Brar }
322b3040e40SJassi Brar 
323b3040e40SJassi Brar static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
324b3040e40SJassi Brar {
325b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
326ae43b886SBoojin Kim 	struct dma_pl330_desc *desc, *_dt;
327b3040e40SJassi Brar 	unsigned long flags;
3281d0c1d60SBoojin Kim 	struct dma_pl330_dmac *pdmac = pch->dmac;
3291d0c1d60SBoojin Kim 	struct dma_slave_config *slave_config;
330ae43b886SBoojin Kim 	LIST_HEAD(list);
331b3040e40SJassi Brar 
3321d0c1d60SBoojin Kim 	switch (cmd) {
3331d0c1d60SBoojin Kim 	case DMA_TERMINATE_ALL:
334b3040e40SJassi Brar 		spin_lock_irqsave(&pch->lock, flags);
335b3040e40SJassi Brar 
336b3040e40SJassi Brar 		/* FLUSH the PL330 Channel thread */
337b3040e40SJassi Brar 		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
338b3040e40SJassi Brar 
339b3040e40SJassi Brar 		/* Mark all desc done */
340ae43b886SBoojin Kim 		list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
341b3040e40SJassi Brar 			desc->status = DONE;
342ae43b886SBoojin Kim 			pch->completed = desc->txd.cookie;
343ae43b886SBoojin Kim 			list_move_tail(&desc->node, &list);
344ae43b886SBoojin Kim 		}
345b3040e40SJassi Brar 
346ae43b886SBoojin Kim 		list_splice_tail_init(&list, &pdmac->desc_pool);
347b3040e40SJassi Brar 		spin_unlock_irqrestore(&pch->lock, flags);
3481d0c1d60SBoojin Kim 		break;
3491d0c1d60SBoojin Kim 	case DMA_SLAVE_CONFIG:
3501d0c1d60SBoojin Kim 		slave_config = (struct dma_slave_config *)arg;
351b3040e40SJassi Brar 
352db8196dfSVinod Koul 		if (slave_config->direction == DMA_MEM_TO_DEV) {
3531d0c1d60SBoojin Kim 			if (slave_config->dst_addr)
3541d0c1d60SBoojin Kim 				pch->fifo_addr = slave_config->dst_addr;
3551d0c1d60SBoojin Kim 			if (slave_config->dst_addr_width)
3561d0c1d60SBoojin Kim 				pch->burst_sz = __ffs(slave_config->dst_addr_width);
3571d0c1d60SBoojin Kim 			if (slave_config->dst_maxburst)
3581d0c1d60SBoojin Kim 				pch->burst_len = slave_config->dst_maxburst;
359db8196dfSVinod Koul 		} else if (slave_config->direction == DMA_DEV_TO_MEM) {
3601d0c1d60SBoojin Kim 			if (slave_config->src_addr)
3611d0c1d60SBoojin Kim 				pch->fifo_addr = slave_config->src_addr;
3621d0c1d60SBoojin Kim 			if (slave_config->src_addr_width)
3631d0c1d60SBoojin Kim 				pch->burst_sz = __ffs(slave_config->src_addr_width);
3641d0c1d60SBoojin Kim 			if (slave_config->src_maxburst)
3651d0c1d60SBoojin Kim 				pch->burst_len = slave_config->src_maxburst;
3661d0c1d60SBoojin Kim 		}
3671d0c1d60SBoojin Kim 		break;
3681d0c1d60SBoojin Kim 	default:
3691d0c1d60SBoojin Kim 		dev_err(pch->dmac->pif.dev, "Not supported command.\n");
3701d0c1d60SBoojin Kim 		return -ENXIO;
3711d0c1d60SBoojin Kim 	}
372b3040e40SJassi Brar 
373b3040e40SJassi Brar 	return 0;
374b3040e40SJassi Brar }
375b3040e40SJassi Brar 
376b3040e40SJassi Brar static void pl330_free_chan_resources(struct dma_chan *chan)
377b3040e40SJassi Brar {
378b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
379b3040e40SJassi Brar 	unsigned long flags;
380b3040e40SJassi Brar 
381b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
382b3040e40SJassi Brar 
383b3040e40SJassi Brar 	tasklet_kill(&pch->task);
384b3040e40SJassi Brar 
385b3040e40SJassi Brar 	pl330_release_channel(pch->pl330_chid);
386b3040e40SJassi Brar 	pch->pl330_chid = NULL;
387b3040e40SJassi Brar 
38842bc9cf4SBoojin Kim 	if (pch->cyclic)
38942bc9cf4SBoojin Kim 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
39042bc9cf4SBoojin Kim 
391b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
392b3040e40SJassi Brar }
393b3040e40SJassi Brar 
394b3040e40SJassi Brar static enum dma_status
395b3040e40SJassi Brar pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
396b3040e40SJassi Brar 		 struct dma_tx_state *txstate)
397b3040e40SJassi Brar {
398b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
399b3040e40SJassi Brar 	dma_cookie_t last_done, last_used;
400b3040e40SJassi Brar 	int ret;
401b3040e40SJassi Brar 
4024d4e58deSRussell King - ARM Linux 	last_done = chan->completed_cookie;
403b3040e40SJassi Brar 	last_used = chan->cookie;
404b3040e40SJassi Brar 
405b3040e40SJassi Brar 	ret = dma_async_is_complete(cookie, last_done, last_used);
406b3040e40SJassi Brar 
407b3040e40SJassi Brar 	dma_set_tx_state(txstate, last_done, last_used, 0);
408b3040e40SJassi Brar 
409b3040e40SJassi Brar 	return ret;
410b3040e40SJassi Brar }
411b3040e40SJassi Brar 
412b3040e40SJassi Brar static void pl330_issue_pending(struct dma_chan *chan)
413b3040e40SJassi Brar {
414b3040e40SJassi Brar 	pl330_tasklet((unsigned long) to_pchan(chan));
415b3040e40SJassi Brar }
416b3040e40SJassi Brar 
417b3040e40SJassi Brar /*
418b3040e40SJassi Brar  * We returned the last one of the circular list of descriptor(s)
419b3040e40SJassi Brar  * from prep_xxx, so the argument to submit corresponds to the last
420b3040e40SJassi Brar  * descriptor of the list.
421b3040e40SJassi Brar  */
422b3040e40SJassi Brar static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
423b3040e40SJassi Brar {
424b3040e40SJassi Brar 	struct dma_pl330_desc *desc, *last = to_desc(tx);
425b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(tx->chan);
426b3040e40SJassi Brar 	dma_cookie_t cookie;
427b3040e40SJassi Brar 	unsigned long flags;
428b3040e40SJassi Brar 
429b3040e40SJassi Brar 	spin_lock_irqsave(&pch->lock, flags);
430b3040e40SJassi Brar 
431b3040e40SJassi Brar 	/* Assign cookies to all nodes */
432b3040e40SJassi Brar 	while (!list_empty(&last->node)) {
433b3040e40SJassi Brar 		desc = list_entry(last->node.next, struct dma_pl330_desc, node);
434b3040e40SJassi Brar 
435884485e1SRussell King - ARM Linux 		dma_cookie_assign(&desc->txd);
436b3040e40SJassi Brar 
437b3040e40SJassi Brar 		list_move_tail(&desc->node, &pch->work_list);
438b3040e40SJassi Brar 	}
439b3040e40SJassi Brar 
440884485e1SRussell King - ARM Linux 	cookie = dma_cookie_assign(&last->txd);
441b3040e40SJassi Brar 	list_add_tail(&last->node, &pch->work_list);
442b3040e40SJassi Brar 	spin_unlock_irqrestore(&pch->lock, flags);
443b3040e40SJassi Brar 
444b3040e40SJassi Brar 	return cookie;
445b3040e40SJassi Brar }
446b3040e40SJassi Brar 
447b3040e40SJassi Brar static inline void _init_desc(struct dma_pl330_desc *desc)
448b3040e40SJassi Brar {
449b3040e40SJassi Brar 	desc->pchan = NULL;
450b3040e40SJassi Brar 	desc->req.x = &desc->px;
451b3040e40SJassi Brar 	desc->req.token = desc;
452b3040e40SJassi Brar 	desc->rqcfg.swap = SWAP_NO;
453b3040e40SJassi Brar 	desc->rqcfg.privileged = 0;
454b3040e40SJassi Brar 	desc->rqcfg.insnaccess = 0;
455b3040e40SJassi Brar 	desc->rqcfg.scctl = SCCTRL0;
456b3040e40SJassi Brar 	desc->rqcfg.dcctl = DCCTRL0;
457b3040e40SJassi Brar 	desc->req.cfg = &desc->rqcfg;
458b3040e40SJassi Brar 	desc->req.xfer_cb = dma_pl330_rqcb;
459b3040e40SJassi Brar 	desc->txd.tx_submit = pl330_tx_submit;
460b3040e40SJassi Brar 
461b3040e40SJassi Brar 	INIT_LIST_HEAD(&desc->node);
462b3040e40SJassi Brar }
463b3040e40SJassi Brar 
464b3040e40SJassi Brar /* Returns the number of descriptors added to the DMAC pool */
465b3040e40SJassi Brar int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
466b3040e40SJassi Brar {
467b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
468b3040e40SJassi Brar 	unsigned long flags;
469b3040e40SJassi Brar 	int i;
470b3040e40SJassi Brar 
471b3040e40SJassi Brar 	if (!pdmac)
472b3040e40SJassi Brar 		return 0;
473b3040e40SJassi Brar 
474b3040e40SJassi Brar 	desc = kmalloc(count * sizeof(*desc), flg);
475b3040e40SJassi Brar 	if (!desc)
476b3040e40SJassi Brar 		return 0;
477b3040e40SJassi Brar 
478b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
479b3040e40SJassi Brar 
480b3040e40SJassi Brar 	for (i = 0; i < count; i++) {
481b3040e40SJassi Brar 		_init_desc(&desc[i]);
482b3040e40SJassi Brar 		list_add_tail(&desc[i].node, &pdmac->desc_pool);
483b3040e40SJassi Brar 	}
484b3040e40SJassi Brar 
485b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
486b3040e40SJassi Brar 
487b3040e40SJassi Brar 	return count;
488b3040e40SJassi Brar }
489b3040e40SJassi Brar 
490b3040e40SJassi Brar static struct dma_pl330_desc *
491b3040e40SJassi Brar pluck_desc(struct dma_pl330_dmac *pdmac)
492b3040e40SJassi Brar {
493b3040e40SJassi Brar 	struct dma_pl330_desc *desc = NULL;
494b3040e40SJassi Brar 	unsigned long flags;
495b3040e40SJassi Brar 
496b3040e40SJassi Brar 	if (!pdmac)
497b3040e40SJassi Brar 		return NULL;
498b3040e40SJassi Brar 
499b3040e40SJassi Brar 	spin_lock_irqsave(&pdmac->pool_lock, flags);
500b3040e40SJassi Brar 
501b3040e40SJassi Brar 	if (!list_empty(&pdmac->desc_pool)) {
502b3040e40SJassi Brar 		desc = list_entry(pdmac->desc_pool.next,
503b3040e40SJassi Brar 				struct dma_pl330_desc, node);
504b3040e40SJassi Brar 
505b3040e40SJassi Brar 		list_del_init(&desc->node);
506b3040e40SJassi Brar 
507b3040e40SJassi Brar 		desc->status = PREP;
508b3040e40SJassi Brar 		desc->txd.callback = NULL;
509b3040e40SJassi Brar 	}
510b3040e40SJassi Brar 
511b3040e40SJassi Brar 	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
512b3040e40SJassi Brar 
513b3040e40SJassi Brar 	return desc;
514b3040e40SJassi Brar }
515b3040e40SJassi Brar 
516b3040e40SJassi Brar static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
517b3040e40SJassi Brar {
518b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = pch->dmac;
519cd072515SThomas Abraham 	u8 *peri_id = pch->chan.private;
520b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
521b3040e40SJassi Brar 
522b3040e40SJassi Brar 	/* Pluck one desc from the pool of DMAC */
523b3040e40SJassi Brar 	desc = pluck_desc(pdmac);
524b3040e40SJassi Brar 
525b3040e40SJassi Brar 	/* If the DMAC pool is empty, alloc new */
526b3040e40SJassi Brar 	if (!desc) {
527b3040e40SJassi Brar 		if (!add_desc(pdmac, GFP_ATOMIC, 1))
528b3040e40SJassi Brar 			return NULL;
529b3040e40SJassi Brar 
530b3040e40SJassi Brar 		/* Try again */
531b3040e40SJassi Brar 		desc = pluck_desc(pdmac);
532b3040e40SJassi Brar 		if (!desc) {
533b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev,
534b3040e40SJassi Brar 				"%s:%d ALERT!\n", __func__, __LINE__);
535b3040e40SJassi Brar 			return NULL;
536b3040e40SJassi Brar 		}
537b3040e40SJassi Brar 	}
538b3040e40SJassi Brar 
539b3040e40SJassi Brar 	/* Initialize the descriptor */
540b3040e40SJassi Brar 	desc->pchan = pch;
541b3040e40SJassi Brar 	desc->txd.cookie = 0;
542b3040e40SJassi Brar 	async_tx_ack(&desc->txd);
543b3040e40SJassi Brar 
544cd072515SThomas Abraham 	desc->req.peri = peri_id ? pch->chan.chan_id : 0;
545b3040e40SJassi Brar 
546b3040e40SJassi Brar 	dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
547b3040e40SJassi Brar 
548b3040e40SJassi Brar 	return desc;
549b3040e40SJassi Brar }
550b3040e40SJassi Brar 
551b3040e40SJassi Brar static inline void fill_px(struct pl330_xfer *px,
552b3040e40SJassi Brar 		dma_addr_t dst, dma_addr_t src, size_t len)
553b3040e40SJassi Brar {
554b3040e40SJassi Brar 	px->next = NULL;
555b3040e40SJassi Brar 	px->bytes = len;
556b3040e40SJassi Brar 	px->dst_addr = dst;
557b3040e40SJassi Brar 	px->src_addr = src;
558b3040e40SJassi Brar }
559b3040e40SJassi Brar 
560b3040e40SJassi Brar static struct dma_pl330_desc *
561b3040e40SJassi Brar __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
562b3040e40SJassi Brar 		dma_addr_t src, size_t len)
563b3040e40SJassi Brar {
564b3040e40SJassi Brar 	struct dma_pl330_desc *desc = pl330_get_desc(pch);
565b3040e40SJassi Brar 
566b3040e40SJassi Brar 	if (!desc) {
567b3040e40SJassi Brar 		dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
568b3040e40SJassi Brar 			__func__, __LINE__);
569b3040e40SJassi Brar 		return NULL;
570b3040e40SJassi Brar 	}
571b3040e40SJassi Brar 
572b3040e40SJassi Brar 	/*
573b3040e40SJassi Brar 	 * Ideally we should lookout for reqs bigger than
574b3040e40SJassi Brar 	 * those that can be programmed with 256 bytes of
575b3040e40SJassi Brar 	 * MC buffer, but considering a req size is seldom
576b3040e40SJassi Brar 	 * going to be word-unaligned and more than 200MB,
577b3040e40SJassi Brar 	 * we take it easy.
578b3040e40SJassi Brar 	 * Also, should the limit is reached we'd rather
579b3040e40SJassi Brar 	 * have the platform increase MC buffer size than
580b3040e40SJassi Brar 	 * complicating this API driver.
581b3040e40SJassi Brar 	 */
582b3040e40SJassi Brar 	fill_px(&desc->px, dst, src, len);
583b3040e40SJassi Brar 
584b3040e40SJassi Brar 	return desc;
585b3040e40SJassi Brar }
586b3040e40SJassi Brar 
587b3040e40SJassi Brar /* Call after fixing burst size */
588b3040e40SJassi Brar static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
589b3040e40SJassi Brar {
590b3040e40SJassi Brar 	struct dma_pl330_chan *pch = desc->pchan;
591b3040e40SJassi Brar 	struct pl330_info *pi = &pch->dmac->pif;
592b3040e40SJassi Brar 	int burst_len;
593b3040e40SJassi Brar 
594b3040e40SJassi Brar 	burst_len = pi->pcfg.data_bus_width / 8;
595b3040e40SJassi Brar 	burst_len *= pi->pcfg.data_buf_dep;
596b3040e40SJassi Brar 	burst_len >>= desc->rqcfg.brst_size;
597b3040e40SJassi Brar 
598b3040e40SJassi Brar 	/* src/dst_burst_len can't be more than 16 */
599b3040e40SJassi Brar 	if (burst_len > 16)
600b3040e40SJassi Brar 		burst_len = 16;
601b3040e40SJassi Brar 
602b3040e40SJassi Brar 	while (burst_len > 1) {
603b3040e40SJassi Brar 		if (!(len % (burst_len << desc->rqcfg.brst_size)))
604b3040e40SJassi Brar 			break;
605b3040e40SJassi Brar 		burst_len--;
606b3040e40SJassi Brar 	}
607b3040e40SJassi Brar 
608b3040e40SJassi Brar 	return burst_len;
609b3040e40SJassi Brar }
610b3040e40SJassi Brar 
61142bc9cf4SBoojin Kim static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
61242bc9cf4SBoojin Kim 		struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
613db8196dfSVinod Koul 		size_t period_len, enum dma_transfer_direction direction)
61442bc9cf4SBoojin Kim {
61542bc9cf4SBoojin Kim 	struct dma_pl330_desc *desc;
61642bc9cf4SBoojin Kim 	struct dma_pl330_chan *pch = to_pchan(chan);
61742bc9cf4SBoojin Kim 	dma_addr_t dst;
61842bc9cf4SBoojin Kim 	dma_addr_t src;
61942bc9cf4SBoojin Kim 
62042bc9cf4SBoojin Kim 	desc = pl330_get_desc(pch);
62142bc9cf4SBoojin Kim 	if (!desc) {
62242bc9cf4SBoojin Kim 		dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
62342bc9cf4SBoojin Kim 			__func__, __LINE__);
62442bc9cf4SBoojin Kim 		return NULL;
62542bc9cf4SBoojin Kim 	}
62642bc9cf4SBoojin Kim 
62742bc9cf4SBoojin Kim 	switch (direction) {
628db8196dfSVinod Koul 	case DMA_MEM_TO_DEV:
62942bc9cf4SBoojin Kim 		desc->rqcfg.src_inc = 1;
63042bc9cf4SBoojin Kim 		desc->rqcfg.dst_inc = 0;
631cd072515SThomas Abraham 		desc->req.rqtype = MEMTODEV;
63242bc9cf4SBoojin Kim 		src = dma_addr;
63342bc9cf4SBoojin Kim 		dst = pch->fifo_addr;
63442bc9cf4SBoojin Kim 		break;
635db8196dfSVinod Koul 	case DMA_DEV_TO_MEM:
63642bc9cf4SBoojin Kim 		desc->rqcfg.src_inc = 0;
63742bc9cf4SBoojin Kim 		desc->rqcfg.dst_inc = 1;
638cd072515SThomas Abraham 		desc->req.rqtype = DEVTOMEM;
63942bc9cf4SBoojin Kim 		src = pch->fifo_addr;
64042bc9cf4SBoojin Kim 		dst = dma_addr;
64142bc9cf4SBoojin Kim 		break;
64242bc9cf4SBoojin Kim 	default:
64342bc9cf4SBoojin Kim 		dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
64442bc9cf4SBoojin Kim 		__func__, __LINE__);
64542bc9cf4SBoojin Kim 		return NULL;
64642bc9cf4SBoojin Kim 	}
64742bc9cf4SBoojin Kim 
64842bc9cf4SBoojin Kim 	desc->rqcfg.brst_size = pch->burst_sz;
64942bc9cf4SBoojin Kim 	desc->rqcfg.brst_len = 1;
65042bc9cf4SBoojin Kim 
65142bc9cf4SBoojin Kim 	pch->cyclic = true;
65242bc9cf4SBoojin Kim 
65342bc9cf4SBoojin Kim 	fill_px(&desc->px, dst, src, period_len);
65442bc9cf4SBoojin Kim 
65542bc9cf4SBoojin Kim 	return &desc->txd;
65642bc9cf4SBoojin Kim }
65742bc9cf4SBoojin Kim 
658b3040e40SJassi Brar static struct dma_async_tx_descriptor *
659b3040e40SJassi Brar pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
660b3040e40SJassi Brar 		dma_addr_t src, size_t len, unsigned long flags)
661b3040e40SJassi Brar {
662b3040e40SJassi Brar 	struct dma_pl330_desc *desc;
663b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
664b3040e40SJassi Brar 	struct pl330_info *pi;
665b3040e40SJassi Brar 	int burst;
666b3040e40SJassi Brar 
6674e0e6109SRob Herring 	if (unlikely(!pch || !len))
668b3040e40SJassi Brar 		return NULL;
669b3040e40SJassi Brar 
670b3040e40SJassi Brar 	pi = &pch->dmac->pif;
671b3040e40SJassi Brar 
672b3040e40SJassi Brar 	desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
673b3040e40SJassi Brar 	if (!desc)
674b3040e40SJassi Brar 		return NULL;
675b3040e40SJassi Brar 
676b3040e40SJassi Brar 	desc->rqcfg.src_inc = 1;
677b3040e40SJassi Brar 	desc->rqcfg.dst_inc = 1;
678cd072515SThomas Abraham 	desc->req.rqtype = MEMTOMEM;
679b3040e40SJassi Brar 
680b3040e40SJassi Brar 	/* Select max possible burst size */
681b3040e40SJassi Brar 	burst = pi->pcfg.data_bus_width / 8;
682b3040e40SJassi Brar 
683b3040e40SJassi Brar 	while (burst > 1) {
684b3040e40SJassi Brar 		if (!(len % burst))
685b3040e40SJassi Brar 			break;
686b3040e40SJassi Brar 		burst /= 2;
687b3040e40SJassi Brar 	}
688b3040e40SJassi Brar 
689b3040e40SJassi Brar 	desc->rqcfg.brst_size = 0;
690b3040e40SJassi Brar 	while (burst != (1 << desc->rqcfg.brst_size))
691b3040e40SJassi Brar 		desc->rqcfg.brst_size++;
692b3040e40SJassi Brar 
693b3040e40SJassi Brar 	desc->rqcfg.brst_len = get_burst_len(desc, len);
694b3040e40SJassi Brar 
695b3040e40SJassi Brar 	desc->txd.flags = flags;
696b3040e40SJassi Brar 
697b3040e40SJassi Brar 	return &desc->txd;
698b3040e40SJassi Brar }
699b3040e40SJassi Brar 
700b3040e40SJassi Brar static struct dma_async_tx_descriptor *
701b3040e40SJassi Brar pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
702db8196dfSVinod Koul 		unsigned int sg_len, enum dma_transfer_direction direction,
703b3040e40SJassi Brar 		unsigned long flg)
704b3040e40SJassi Brar {
705b3040e40SJassi Brar 	struct dma_pl330_desc *first, *desc = NULL;
706b3040e40SJassi Brar 	struct dma_pl330_chan *pch = to_pchan(chan);
707b3040e40SJassi Brar 	struct scatterlist *sg;
708b3040e40SJassi Brar 	unsigned long flags;
7091b9bb715SBoojin Kim 	int i;
710b3040e40SJassi Brar 	dma_addr_t addr;
711b3040e40SJassi Brar 
712cd072515SThomas Abraham 	if (unlikely(!pch || !sgl || !sg_len))
713b3040e40SJassi Brar 		return NULL;
714b3040e40SJassi Brar 
7151b9bb715SBoojin Kim 	addr = pch->fifo_addr;
716b3040e40SJassi Brar 
717b3040e40SJassi Brar 	first = NULL;
718b3040e40SJassi Brar 
719b3040e40SJassi Brar 	for_each_sg(sgl, sg, sg_len, i) {
720b3040e40SJassi Brar 
721b3040e40SJassi Brar 		desc = pl330_get_desc(pch);
722b3040e40SJassi Brar 		if (!desc) {
723b3040e40SJassi Brar 			struct dma_pl330_dmac *pdmac = pch->dmac;
724b3040e40SJassi Brar 
725b3040e40SJassi Brar 			dev_err(pch->dmac->pif.dev,
726b3040e40SJassi Brar 				"%s:%d Unable to fetch desc\n",
727b3040e40SJassi Brar 				__func__, __LINE__);
728b3040e40SJassi Brar 			if (!first)
729b3040e40SJassi Brar 				return NULL;
730b3040e40SJassi Brar 
731b3040e40SJassi Brar 			spin_lock_irqsave(&pdmac->pool_lock, flags);
732b3040e40SJassi Brar 
733b3040e40SJassi Brar 			while (!list_empty(&first->node)) {
734b3040e40SJassi Brar 				desc = list_entry(first->node.next,
735b3040e40SJassi Brar 						struct dma_pl330_desc, node);
736b3040e40SJassi Brar 				list_move_tail(&desc->node, &pdmac->desc_pool);
737b3040e40SJassi Brar 			}
738b3040e40SJassi Brar 
739b3040e40SJassi Brar 			list_move_tail(&first->node, &pdmac->desc_pool);
740b3040e40SJassi Brar 
741b3040e40SJassi Brar 			spin_unlock_irqrestore(&pdmac->pool_lock, flags);
742b3040e40SJassi Brar 
743b3040e40SJassi Brar 			return NULL;
744b3040e40SJassi Brar 		}
745b3040e40SJassi Brar 
746b3040e40SJassi Brar 		if (!first)
747b3040e40SJassi Brar 			first = desc;
748b3040e40SJassi Brar 		else
749b3040e40SJassi Brar 			list_add_tail(&desc->node, &first->node);
750b3040e40SJassi Brar 
751db8196dfSVinod Koul 		if (direction == DMA_MEM_TO_DEV) {
752b3040e40SJassi Brar 			desc->rqcfg.src_inc = 1;
753b3040e40SJassi Brar 			desc->rqcfg.dst_inc = 0;
754cd072515SThomas Abraham 			desc->req.rqtype = MEMTODEV;
755b3040e40SJassi Brar 			fill_px(&desc->px,
756b3040e40SJassi Brar 				addr, sg_dma_address(sg), sg_dma_len(sg));
757b3040e40SJassi Brar 		} else {
758b3040e40SJassi Brar 			desc->rqcfg.src_inc = 0;
759b3040e40SJassi Brar 			desc->rqcfg.dst_inc = 1;
760cd072515SThomas Abraham 			desc->req.rqtype = DEVTOMEM;
761b3040e40SJassi Brar 			fill_px(&desc->px,
762b3040e40SJassi Brar 				sg_dma_address(sg), addr, sg_dma_len(sg));
763b3040e40SJassi Brar 		}
764b3040e40SJassi Brar 
7651b9bb715SBoojin Kim 		desc->rqcfg.brst_size = pch->burst_sz;
766b3040e40SJassi Brar 		desc->rqcfg.brst_len = 1;
767b3040e40SJassi Brar 	}
768b3040e40SJassi Brar 
769b3040e40SJassi Brar 	/* Return the last desc in the chain */
770b3040e40SJassi Brar 	desc->txd.flags = flg;
771b3040e40SJassi Brar 	return &desc->txd;
772b3040e40SJassi Brar }
773b3040e40SJassi Brar 
774b3040e40SJassi Brar static irqreturn_t pl330_irq_handler(int irq, void *data)
775b3040e40SJassi Brar {
776b3040e40SJassi Brar 	if (pl330_update(data))
777b3040e40SJassi Brar 		return IRQ_HANDLED;
778b3040e40SJassi Brar 	else
779b3040e40SJassi Brar 		return IRQ_NONE;
780b3040e40SJassi Brar }
781b3040e40SJassi Brar 
782b3040e40SJassi Brar static int __devinit
783aa25afadSRussell King pl330_probe(struct amba_device *adev, const struct amba_id *id)
784b3040e40SJassi Brar {
785b3040e40SJassi Brar 	struct dma_pl330_platdata *pdat;
786b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac;
787b3040e40SJassi Brar 	struct dma_pl330_chan *pch;
788b3040e40SJassi Brar 	struct pl330_info *pi;
789b3040e40SJassi Brar 	struct dma_device *pd;
790b3040e40SJassi Brar 	struct resource *res;
791b3040e40SJassi Brar 	int i, ret, irq;
7924e0e6109SRob Herring 	int num_chan;
793b3040e40SJassi Brar 
794b3040e40SJassi Brar 	pdat = adev->dev.platform_data;
795b3040e40SJassi Brar 
796b3040e40SJassi Brar 	/* Allocate a new DMAC and its Channels */
7974e0e6109SRob Herring 	pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
798b3040e40SJassi Brar 	if (!pdmac) {
799b3040e40SJassi Brar 		dev_err(&adev->dev, "unable to allocate mem\n");
800b3040e40SJassi Brar 		return -ENOMEM;
801b3040e40SJassi Brar 	}
802b3040e40SJassi Brar 
803b3040e40SJassi Brar 	pi = &pdmac->pif;
804b3040e40SJassi Brar 	pi->dev = &adev->dev;
805b3040e40SJassi Brar 	pi->pl330_data = NULL;
8064e0e6109SRob Herring 	pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
807b3040e40SJassi Brar 
808b3040e40SJassi Brar 	res = &adev->res;
809b3040e40SJassi Brar 	request_mem_region(res->start, resource_size(res), "dma-pl330");
810b3040e40SJassi Brar 
811b3040e40SJassi Brar 	pi->base = ioremap(res->start, resource_size(res));
812b3040e40SJassi Brar 	if (!pi->base) {
813b3040e40SJassi Brar 		ret = -ENXIO;
814b3040e40SJassi Brar 		goto probe_err1;
815b3040e40SJassi Brar 	}
816b3040e40SJassi Brar 
817a2f5203fSBoojin Kim 	pdmac->clk = clk_get(&adev->dev, "dma");
818a2f5203fSBoojin Kim 	if (IS_ERR(pdmac->clk)) {
819a2f5203fSBoojin Kim 		dev_err(&adev->dev, "Cannot get operation clock.\n");
820a2f5203fSBoojin Kim 		ret = -EINVAL;
8217bec78e0SJulia Lawall 		goto probe_err2;
822a2f5203fSBoojin Kim 	}
823a2f5203fSBoojin Kim 
824a2f5203fSBoojin Kim 	amba_set_drvdata(adev, pdmac);
825a2f5203fSBoojin Kim 
8263506c0d5STushar Behera #ifndef CONFIG_PM_RUNTIME
827a2f5203fSBoojin Kim 	/* enable dma clk */
828a2f5203fSBoojin Kim 	clk_enable(pdmac->clk);
829a2f5203fSBoojin Kim #endif
830a2f5203fSBoojin Kim 
831b3040e40SJassi Brar 	irq = adev->irq[0];
832b3040e40SJassi Brar 	ret = request_irq(irq, pl330_irq_handler, 0,
833b3040e40SJassi Brar 			dev_name(&adev->dev), pi);
834b3040e40SJassi Brar 	if (ret)
8357bec78e0SJulia Lawall 		goto probe_err3;
836b3040e40SJassi Brar 
837b3040e40SJassi Brar 	ret = pl330_add(pi);
838b3040e40SJassi Brar 	if (ret)
8397bec78e0SJulia Lawall 		goto probe_err4;
840b3040e40SJassi Brar 
841b3040e40SJassi Brar 	INIT_LIST_HEAD(&pdmac->desc_pool);
842b3040e40SJassi Brar 	spin_lock_init(&pdmac->pool_lock);
843b3040e40SJassi Brar 
844b3040e40SJassi Brar 	/* Create a descriptor pool of default size */
845b3040e40SJassi Brar 	if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
846b3040e40SJassi Brar 		dev_warn(&adev->dev, "unable to allocate desc\n");
847b3040e40SJassi Brar 
848b3040e40SJassi Brar 	pd = &pdmac->ddma;
849b3040e40SJassi Brar 	INIT_LIST_HEAD(&pd->channels);
850b3040e40SJassi Brar 
851b3040e40SJassi Brar 	/* Initialize channel parameters */
85293ed5544SThomas Abraham 	num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
85393ed5544SThomas Abraham 			(u8)pi->pcfg.num_chan);
8544e0e6109SRob Herring 	pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
8554e0e6109SRob Herring 
8564e0e6109SRob Herring 	for (i = 0; i < num_chan; i++) {
857b3040e40SJassi Brar 		pch = &pdmac->peripherals[i];
85893ed5544SThomas Abraham 		if (!adev->dev.of_node)
859cd072515SThomas Abraham 			pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
86093ed5544SThomas Abraham 		else
86193ed5544SThomas Abraham 			pch->chan.private = adev->dev.of_node;
862b3040e40SJassi Brar 
863b3040e40SJassi Brar 		INIT_LIST_HEAD(&pch->work_list);
864b3040e40SJassi Brar 		spin_lock_init(&pch->lock);
865b3040e40SJassi Brar 		pch->pl330_chid = NULL;
866b3040e40SJassi Brar 		pch->chan.device = pd;
867b3040e40SJassi Brar 		pch->dmac = pdmac;
868b3040e40SJassi Brar 
869b3040e40SJassi Brar 		/* Add the channel to the DMAC list */
870b3040e40SJassi Brar 		list_add_tail(&pch->chan.device_node, &pd->channels);
871b3040e40SJassi Brar 	}
872b3040e40SJassi Brar 
873b3040e40SJassi Brar 	pd->dev = &adev->dev;
87493ed5544SThomas Abraham 	if (pdat) {
875cd072515SThomas Abraham 		pd->cap_mask = pdat->cap_mask;
87693ed5544SThomas Abraham 	} else {
877cd072515SThomas Abraham 		dma_cap_set(DMA_MEMCPY, pd->cap_mask);
87893ed5544SThomas Abraham 		if (pi->pcfg.num_peri) {
87993ed5544SThomas Abraham 			dma_cap_set(DMA_SLAVE, pd->cap_mask);
88093ed5544SThomas Abraham 			dma_cap_set(DMA_CYCLIC, pd->cap_mask);
88193ed5544SThomas Abraham 		}
88293ed5544SThomas Abraham 	}
883b3040e40SJassi Brar 
884b3040e40SJassi Brar 	pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
885b3040e40SJassi Brar 	pd->device_free_chan_resources = pl330_free_chan_resources;
886b3040e40SJassi Brar 	pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
88742bc9cf4SBoojin Kim 	pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
888b3040e40SJassi Brar 	pd->device_tx_status = pl330_tx_status;
889b3040e40SJassi Brar 	pd->device_prep_slave_sg = pl330_prep_slave_sg;
890b3040e40SJassi Brar 	pd->device_control = pl330_control;
891b3040e40SJassi Brar 	pd->device_issue_pending = pl330_issue_pending;
892b3040e40SJassi Brar 
893b3040e40SJassi Brar 	ret = dma_async_device_register(pd);
894b3040e40SJassi Brar 	if (ret) {
895b3040e40SJassi Brar 		dev_err(&adev->dev, "unable to register DMAC\n");
8967bec78e0SJulia Lawall 		goto probe_err5;
897b3040e40SJassi Brar 	}
898b3040e40SJassi Brar 
899b3040e40SJassi Brar 	dev_info(&adev->dev,
900b3040e40SJassi Brar 		"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
901b3040e40SJassi Brar 	dev_info(&adev->dev,
902b3040e40SJassi Brar 		"\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
903b3040e40SJassi Brar 		pi->pcfg.data_buf_dep,
904b3040e40SJassi Brar 		pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
905b3040e40SJassi Brar 		pi->pcfg.num_peri, pi->pcfg.num_events);
906b3040e40SJassi Brar 
907b3040e40SJassi Brar 	return 0;
908b3040e40SJassi Brar 
9097bec78e0SJulia Lawall probe_err5:
910b3040e40SJassi Brar 	pl330_del(pi);
9117bec78e0SJulia Lawall probe_err4:
912b3040e40SJassi Brar 	free_irq(irq, pi);
9137bec78e0SJulia Lawall probe_err3:
9147bec78e0SJulia Lawall #ifndef CONFIG_PM_RUNTIME
9157bec78e0SJulia Lawall 	clk_disable(pdmac->clk);
9167bec78e0SJulia Lawall #endif
9177bec78e0SJulia Lawall 	clk_put(pdmac->clk);
918b3040e40SJassi Brar probe_err2:
919b3040e40SJassi Brar 	iounmap(pi->base);
920b3040e40SJassi Brar probe_err1:
921b3040e40SJassi Brar 	release_mem_region(res->start, resource_size(res));
922b3040e40SJassi Brar 	kfree(pdmac);
923b3040e40SJassi Brar 
924b3040e40SJassi Brar 	return ret;
925b3040e40SJassi Brar }
926b3040e40SJassi Brar 
927b3040e40SJassi Brar static int __devexit pl330_remove(struct amba_device *adev)
928b3040e40SJassi Brar {
929b3040e40SJassi Brar 	struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
930b3040e40SJassi Brar 	struct dma_pl330_chan *pch, *_p;
931b3040e40SJassi Brar 	struct pl330_info *pi;
932b3040e40SJassi Brar 	struct resource *res;
933b3040e40SJassi Brar 	int irq;
934b3040e40SJassi Brar 
935b3040e40SJassi Brar 	if (!pdmac)
936b3040e40SJassi Brar 		return 0;
937b3040e40SJassi Brar 
938b3040e40SJassi Brar 	amba_set_drvdata(adev, NULL);
939b3040e40SJassi Brar 
940b3040e40SJassi Brar 	/* Idle the DMAC */
941b3040e40SJassi Brar 	list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
942b3040e40SJassi Brar 			chan.device_node) {
943b3040e40SJassi Brar 
944b3040e40SJassi Brar 		/* Remove the channel */
945b3040e40SJassi Brar 		list_del(&pch->chan.device_node);
946b3040e40SJassi Brar 
947b3040e40SJassi Brar 		/* Flush the channel */
948b3040e40SJassi Brar 		pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
949b3040e40SJassi Brar 		pl330_free_chan_resources(&pch->chan);
950b3040e40SJassi Brar 	}
951b3040e40SJassi Brar 
952b3040e40SJassi Brar 	pi = &pdmac->pif;
953b3040e40SJassi Brar 
954b3040e40SJassi Brar 	pl330_del(pi);
955b3040e40SJassi Brar 
956b3040e40SJassi Brar 	irq = adev->irq[0];
957b3040e40SJassi Brar 	free_irq(irq, pi);
958b3040e40SJassi Brar 
959b3040e40SJassi Brar 	iounmap(pi->base);
960b3040e40SJassi Brar 
961b3040e40SJassi Brar 	res = &adev->res;
962b3040e40SJassi Brar 	release_mem_region(res->start, resource_size(res));
963b3040e40SJassi Brar 
9643506c0d5STushar Behera #ifndef CONFIG_PM_RUNTIME
965a2f5203fSBoojin Kim 	clk_disable(pdmac->clk);
966a2f5203fSBoojin Kim #endif
967a2f5203fSBoojin Kim 
968b3040e40SJassi Brar 	kfree(pdmac);
969b3040e40SJassi Brar 
970b3040e40SJassi Brar 	return 0;
971b3040e40SJassi Brar }
972b3040e40SJassi Brar 
973b3040e40SJassi Brar static struct amba_id pl330_ids[] = {
974b3040e40SJassi Brar 	{
975b3040e40SJassi Brar 		.id	= 0x00041330,
976b3040e40SJassi Brar 		.mask	= 0x000fffff,
977b3040e40SJassi Brar 	},
978b3040e40SJassi Brar 	{ 0, 0 },
979b3040e40SJassi Brar };
980b3040e40SJassi Brar 
981e8fa516aSDave Martin MODULE_DEVICE_TABLE(amba, pl330_ids);
982e8fa516aSDave Martin 
983a2f5203fSBoojin Kim #ifdef CONFIG_PM_RUNTIME
984a2f5203fSBoojin Kim static int pl330_runtime_suspend(struct device *dev)
985a2f5203fSBoojin Kim {
986a2f5203fSBoojin Kim 	struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
987a2f5203fSBoojin Kim 
988a2f5203fSBoojin Kim 	if (!pdmac) {
989a2f5203fSBoojin Kim 		dev_err(dev, "failed to get dmac\n");
990a2f5203fSBoojin Kim 		return -ENODEV;
991a2f5203fSBoojin Kim 	}
992a2f5203fSBoojin Kim 
993a2f5203fSBoojin Kim 	clk_disable(pdmac->clk);
994a2f5203fSBoojin Kim 
995a2f5203fSBoojin Kim 	return 0;
996a2f5203fSBoojin Kim }
997a2f5203fSBoojin Kim 
998a2f5203fSBoojin Kim static int pl330_runtime_resume(struct device *dev)
999a2f5203fSBoojin Kim {
1000a2f5203fSBoojin Kim 	struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
1001a2f5203fSBoojin Kim 
1002a2f5203fSBoojin Kim 	if (!pdmac) {
1003a2f5203fSBoojin Kim 		dev_err(dev, "failed to get dmac\n");
1004a2f5203fSBoojin Kim 		return -ENODEV;
1005a2f5203fSBoojin Kim 	}
1006a2f5203fSBoojin Kim 
1007a2f5203fSBoojin Kim 	clk_enable(pdmac->clk);
1008a2f5203fSBoojin Kim 
1009a2f5203fSBoojin Kim 	return 0;
1010a2f5203fSBoojin Kim }
1011a2f5203fSBoojin Kim #else
1012a2f5203fSBoojin Kim #define pl330_runtime_suspend	NULL
1013a2f5203fSBoojin Kim #define pl330_runtime_resume	NULL
1014a2f5203fSBoojin Kim #endif /* CONFIG_PM_RUNTIME */
1015a2f5203fSBoojin Kim 
1016a2f5203fSBoojin Kim static const struct dev_pm_ops pl330_pm_ops = {
1017a2f5203fSBoojin Kim 	.runtime_suspend = pl330_runtime_suspend,
1018a2f5203fSBoojin Kim 	.runtime_resume = pl330_runtime_resume,
1019a2f5203fSBoojin Kim };
1020a2f5203fSBoojin Kim 
1021b3040e40SJassi Brar static struct amba_driver pl330_driver = {
1022b3040e40SJassi Brar 	.drv = {
1023b3040e40SJassi Brar 		.owner = THIS_MODULE,
1024b3040e40SJassi Brar 		.name = "dma-pl330",
1025a2f5203fSBoojin Kim 		.pm = &pl330_pm_ops,
1026b3040e40SJassi Brar 	},
1027b3040e40SJassi Brar 	.id_table = pl330_ids,
1028b3040e40SJassi Brar 	.probe = pl330_probe,
1029b3040e40SJassi Brar 	.remove = pl330_remove,
1030b3040e40SJassi Brar };
1031b3040e40SJassi Brar 
1032b3040e40SJassi Brar static int __init pl330_init(void)
1033b3040e40SJassi Brar {
1034b3040e40SJassi Brar 	return amba_driver_register(&pl330_driver);
1035b3040e40SJassi Brar }
1036b3040e40SJassi Brar module_init(pl330_init);
1037b3040e40SJassi Brar 
1038b3040e40SJassi Brar static void __exit pl330_exit(void)
1039b3040e40SJassi Brar {
1040b3040e40SJassi Brar 	amba_driver_unregister(&pl330_driver);
1041b3040e40SJassi Brar 	return;
1042b3040e40SJassi Brar }
1043b3040e40SJassi Brar module_exit(pl330_exit);
1044b3040e40SJassi Brar 
1045b3040e40SJassi Brar MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1046b3040e40SJassi Brar MODULE_DESCRIPTION("API Driver for PL330 DMAC");
1047b3040e40SJassi Brar MODULE_LICENSE("GPL");
1048