xref: /openbmc/linux/drivers/dma/ti/omap-dma.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d88b1397SPeter Ujfalusi /*
3d88b1397SPeter Ujfalusi  * OMAP DMAengine support
4d88b1397SPeter Ujfalusi  */
54c74ecf7STony Lindgren #include <linux/cpu_pm.h>
6d88b1397SPeter Ujfalusi #include <linux/delay.h>
7d88b1397SPeter Ujfalusi #include <linux/dmaengine.h>
8d88b1397SPeter Ujfalusi #include <linux/dma-mapping.h>
9d88b1397SPeter Ujfalusi #include <linux/dmapool.h>
10d88b1397SPeter Ujfalusi #include <linux/err.h>
11d88b1397SPeter Ujfalusi #include <linux/init.h>
12d88b1397SPeter Ujfalusi #include <linux/interrupt.h>
13d88b1397SPeter Ujfalusi #include <linux/list.h>
14d88b1397SPeter Ujfalusi #include <linux/module.h>
15d88b1397SPeter Ujfalusi #include <linux/omap-dma.h>
16d88b1397SPeter Ujfalusi #include <linux/platform_device.h>
17d88b1397SPeter Ujfalusi #include <linux/slab.h>
18d88b1397SPeter Ujfalusi #include <linux/spinlock.h>
19*897500c7SRob Herring #include <linux/of.h>
20d88b1397SPeter Ujfalusi #include <linux/of_dma.h>
21d88b1397SPeter Ujfalusi 
22d88b1397SPeter Ujfalusi #include "../virt-dma.h"
23d88b1397SPeter Ujfalusi 
24d88b1397SPeter Ujfalusi #define OMAP_SDMA_REQUESTS	127
25d88b1397SPeter Ujfalusi #define OMAP_SDMA_CHANNELS	32
26d88b1397SPeter Ujfalusi 
274c74ecf7STony Lindgren struct omap_dma_config {
284c74ecf7STony Lindgren 	int lch_end;
299938ee9cSTony Lindgren 	unsigned int rw_priority:1;
30f4cfa36dSTony Lindgren 	unsigned int needs_busy_check:1;
314c74ecf7STony Lindgren 	unsigned int may_lose_context:1;
3261ecb539STony Lindgren 	unsigned int needs_lch_clear:1;
334c74ecf7STony Lindgren };
344c74ecf7STony Lindgren 
354c74ecf7STony Lindgren struct omap_dma_context {
364c74ecf7STony Lindgren 	u32 irqenable_l0;
374c74ecf7STony Lindgren 	u32 irqenable_l1;
384c74ecf7STony Lindgren 	u32 ocp_sysconfig;
394c74ecf7STony Lindgren 	u32 gcr;
404c74ecf7STony Lindgren };
414c74ecf7STony Lindgren 
42d88b1397SPeter Ujfalusi struct omap_dmadev {
43d88b1397SPeter Ujfalusi 	struct dma_device ddev;
44d88b1397SPeter Ujfalusi 	spinlock_t lock;
45d88b1397SPeter Ujfalusi 	void __iomem *base;
46d88b1397SPeter Ujfalusi 	const struct omap_dma_reg *reg_map;
47d88b1397SPeter Ujfalusi 	struct omap_system_dma_plat_info *plat;
484c74ecf7STony Lindgren 	const struct omap_dma_config *cfg;
494c74ecf7STony Lindgren 	struct notifier_block nb;
504c74ecf7STony Lindgren 	struct omap_dma_context context;
514c74ecf7STony Lindgren 	int lch_count;
5261ecb539STony Lindgren 	DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS);
5361ecb539STony Lindgren 	struct mutex lch_lock;		/* for assigning logical channels */
54d88b1397SPeter Ujfalusi 	bool legacy;
55d88b1397SPeter Ujfalusi 	bool ll123_supported;
56d88b1397SPeter Ujfalusi 	struct dma_pool *desc_pool;
57d88b1397SPeter Ujfalusi 	unsigned dma_requests;
58d88b1397SPeter Ujfalusi 	spinlock_t irq_lock;
59d88b1397SPeter Ujfalusi 	uint32_t irq_enable_mask;
60d88b1397SPeter Ujfalusi 	struct omap_chan **lch_map;
61d88b1397SPeter Ujfalusi };
62d88b1397SPeter Ujfalusi 
63d88b1397SPeter Ujfalusi struct omap_chan {
64d88b1397SPeter Ujfalusi 	struct virt_dma_chan vc;
65d88b1397SPeter Ujfalusi 	void __iomem *channel_base;
66d88b1397SPeter Ujfalusi 	const struct omap_dma_reg *reg_map;
67d88b1397SPeter Ujfalusi 	uint32_t ccr;
68d88b1397SPeter Ujfalusi 
69d88b1397SPeter Ujfalusi 	struct dma_slave_config	cfg;
70d88b1397SPeter Ujfalusi 	unsigned dma_sig;
71d88b1397SPeter Ujfalusi 	bool cyclic;
72d88b1397SPeter Ujfalusi 	bool paused;
73d88b1397SPeter Ujfalusi 	bool running;
74d88b1397SPeter Ujfalusi 
75d88b1397SPeter Ujfalusi 	int dma_ch;
76d88b1397SPeter Ujfalusi 	struct omap_desc *desc;
77d88b1397SPeter Ujfalusi 	unsigned sgidx;
78d88b1397SPeter Ujfalusi };
79d88b1397SPeter Ujfalusi 
80d88b1397SPeter Ujfalusi #define DESC_NXT_SV_REFRESH	(0x1 << 24)
81d88b1397SPeter Ujfalusi #define DESC_NXT_SV_REUSE	(0x2 << 24)
82d88b1397SPeter Ujfalusi #define DESC_NXT_DV_REFRESH	(0x1 << 26)
83d88b1397SPeter Ujfalusi #define DESC_NXT_DV_REUSE	(0x2 << 26)
84d88b1397SPeter Ujfalusi #define DESC_NTYPE_TYPE2	(0x2 << 29)
85d88b1397SPeter Ujfalusi 
86d88b1397SPeter Ujfalusi /* Type 2 descriptor with Source or Destination address update */
87d88b1397SPeter Ujfalusi struct omap_type2_desc {
88d88b1397SPeter Ujfalusi 	uint32_t next_desc;
89d88b1397SPeter Ujfalusi 	uint32_t en;
90d88b1397SPeter Ujfalusi 	uint32_t addr; /* src or dst */
91d88b1397SPeter Ujfalusi 	uint16_t fn;
92d88b1397SPeter Ujfalusi 	uint16_t cicr;
93d88b1397SPeter Ujfalusi 	int16_t cdei;
94d88b1397SPeter Ujfalusi 	int16_t csei;
95d88b1397SPeter Ujfalusi 	int32_t cdfi;
96d88b1397SPeter Ujfalusi 	int32_t csfi;
97d88b1397SPeter Ujfalusi } __packed;
98d88b1397SPeter Ujfalusi 
99d88b1397SPeter Ujfalusi struct omap_sg {
100d88b1397SPeter Ujfalusi 	dma_addr_t addr;
101d88b1397SPeter Ujfalusi 	uint32_t en;		/* number of elements (24-bit) */
102d88b1397SPeter Ujfalusi 	uint32_t fn;		/* number of frames (16-bit) */
103d88b1397SPeter Ujfalusi 	int32_t fi;		/* for double indexing */
104d88b1397SPeter Ujfalusi 	int16_t ei;		/* for double indexing */
105d88b1397SPeter Ujfalusi 
106d88b1397SPeter Ujfalusi 	/* Linked list */
107d88b1397SPeter Ujfalusi 	struct omap_type2_desc *t2_desc;
108d88b1397SPeter Ujfalusi 	dma_addr_t t2_desc_paddr;
109d88b1397SPeter Ujfalusi };
110d88b1397SPeter Ujfalusi 
111d88b1397SPeter Ujfalusi struct omap_desc {
112d88b1397SPeter Ujfalusi 	struct virt_dma_desc vd;
113d88b1397SPeter Ujfalusi 	bool using_ll;
114d88b1397SPeter Ujfalusi 	enum dma_transfer_direction dir;
115d88b1397SPeter Ujfalusi 	dma_addr_t dev_addr;
1164689d35cSPeter Ujfalusi 	bool polled;
117d88b1397SPeter Ujfalusi 
118d88b1397SPeter Ujfalusi 	int32_t fi;		/* for OMAP_DMA_SYNC_PACKET / double indexing */
119d88b1397SPeter Ujfalusi 	int16_t ei;		/* for double indexing */
120d88b1397SPeter Ujfalusi 	uint8_t es;		/* CSDP_DATA_TYPE_xxx */
121d88b1397SPeter Ujfalusi 	uint32_t ccr;		/* CCR value */
122d88b1397SPeter Ujfalusi 	uint16_t clnk_ctrl;	/* CLNK_CTRL value */
123d88b1397SPeter Ujfalusi 	uint16_t cicr;		/* CICR value */
124d88b1397SPeter Ujfalusi 	uint32_t csdp;		/* CSDP value */
125d88b1397SPeter Ujfalusi 
126d88b1397SPeter Ujfalusi 	unsigned sglen;
1275ca3364aSGustavo A. R. Silva 	struct omap_sg sg[];
128d88b1397SPeter Ujfalusi };
129d88b1397SPeter Ujfalusi 
130d88b1397SPeter Ujfalusi enum {
131d88b1397SPeter Ujfalusi 	CAPS_0_SUPPORT_LL123	= BIT(20),	/* Linked List type1/2/3 */
132d88b1397SPeter Ujfalusi 	CAPS_0_SUPPORT_LL4	= BIT(21),	/* Linked List type4 */
133d88b1397SPeter Ujfalusi 
134d88b1397SPeter Ujfalusi 	CCR_FS			= BIT(5),
135d88b1397SPeter Ujfalusi 	CCR_READ_PRIORITY	= BIT(6),
136d88b1397SPeter Ujfalusi 	CCR_ENABLE		= BIT(7),
137d88b1397SPeter Ujfalusi 	CCR_AUTO_INIT		= BIT(8),	/* OMAP1 only */
138d88b1397SPeter Ujfalusi 	CCR_REPEAT		= BIT(9),	/* OMAP1 only */
139d88b1397SPeter Ujfalusi 	CCR_OMAP31_DISABLE	= BIT(10),	/* OMAP1 only */
140d88b1397SPeter Ujfalusi 	CCR_SUSPEND_SENSITIVE	= BIT(8),	/* OMAP2+ only */
141d88b1397SPeter Ujfalusi 	CCR_RD_ACTIVE		= BIT(9),	/* OMAP2+ only */
142d88b1397SPeter Ujfalusi 	CCR_WR_ACTIVE		= BIT(10),	/* OMAP2+ only */
143d88b1397SPeter Ujfalusi 	CCR_SRC_AMODE_CONSTANT	= 0 << 12,
144d88b1397SPeter Ujfalusi 	CCR_SRC_AMODE_POSTINC	= 1 << 12,
145d88b1397SPeter Ujfalusi 	CCR_SRC_AMODE_SGLIDX	= 2 << 12,
146d88b1397SPeter Ujfalusi 	CCR_SRC_AMODE_DBLIDX	= 3 << 12,
147d88b1397SPeter Ujfalusi 	CCR_DST_AMODE_CONSTANT	= 0 << 14,
148d88b1397SPeter Ujfalusi 	CCR_DST_AMODE_POSTINC	= 1 << 14,
149d88b1397SPeter Ujfalusi 	CCR_DST_AMODE_SGLIDX	= 2 << 14,
150d88b1397SPeter Ujfalusi 	CCR_DST_AMODE_DBLIDX	= 3 << 14,
151d88b1397SPeter Ujfalusi 	CCR_CONSTANT_FILL	= BIT(16),
152d88b1397SPeter Ujfalusi 	CCR_TRANSPARENT_COPY	= BIT(17),
153d88b1397SPeter Ujfalusi 	CCR_BS			= BIT(18),
154d88b1397SPeter Ujfalusi 	CCR_SUPERVISOR		= BIT(22),
155d88b1397SPeter Ujfalusi 	CCR_PREFETCH		= BIT(23),
156d88b1397SPeter Ujfalusi 	CCR_TRIGGER_SRC		= BIT(24),
157d88b1397SPeter Ujfalusi 	CCR_BUFFERING_DISABLE	= BIT(25),
158d88b1397SPeter Ujfalusi 	CCR_WRITE_PRIORITY	= BIT(26),
159d88b1397SPeter Ujfalusi 	CCR_SYNC_ELEMENT	= 0,
160d88b1397SPeter Ujfalusi 	CCR_SYNC_FRAME		= CCR_FS,
161d88b1397SPeter Ujfalusi 	CCR_SYNC_BLOCK		= CCR_BS,
162d88b1397SPeter Ujfalusi 	CCR_SYNC_PACKET		= CCR_BS | CCR_FS,
163d88b1397SPeter Ujfalusi 
164d88b1397SPeter Ujfalusi 	CSDP_DATA_TYPE_8	= 0,
165d88b1397SPeter Ujfalusi 	CSDP_DATA_TYPE_16	= 1,
166d88b1397SPeter Ujfalusi 	CSDP_DATA_TYPE_32	= 2,
167d88b1397SPeter Ujfalusi 	CSDP_SRC_PORT_EMIFF	= 0 << 2, /* OMAP1 only */
168d88b1397SPeter Ujfalusi 	CSDP_SRC_PORT_EMIFS	= 1 << 2, /* OMAP1 only */
169d88b1397SPeter Ujfalusi 	CSDP_SRC_PORT_OCP_T1	= 2 << 2, /* OMAP1 only */
170d88b1397SPeter Ujfalusi 	CSDP_SRC_PORT_TIPB	= 3 << 2, /* OMAP1 only */
171d88b1397SPeter Ujfalusi 	CSDP_SRC_PORT_OCP_T2	= 4 << 2, /* OMAP1 only */
172d88b1397SPeter Ujfalusi 	CSDP_SRC_PORT_MPUI	= 5 << 2, /* OMAP1 only */
173d88b1397SPeter Ujfalusi 	CSDP_SRC_PACKED		= BIT(6),
174d88b1397SPeter Ujfalusi 	CSDP_SRC_BURST_1	= 0 << 7,
175d88b1397SPeter Ujfalusi 	CSDP_SRC_BURST_16	= 1 << 7,
176d88b1397SPeter Ujfalusi 	CSDP_SRC_BURST_32	= 2 << 7,
177d88b1397SPeter Ujfalusi 	CSDP_SRC_BURST_64	= 3 << 7,
178d88b1397SPeter Ujfalusi 	CSDP_DST_PORT_EMIFF	= 0 << 9, /* OMAP1 only */
179d88b1397SPeter Ujfalusi 	CSDP_DST_PORT_EMIFS	= 1 << 9, /* OMAP1 only */
180d88b1397SPeter Ujfalusi 	CSDP_DST_PORT_OCP_T1	= 2 << 9, /* OMAP1 only */
181d88b1397SPeter Ujfalusi 	CSDP_DST_PORT_TIPB	= 3 << 9, /* OMAP1 only */
182d88b1397SPeter Ujfalusi 	CSDP_DST_PORT_OCP_T2	= 4 << 9, /* OMAP1 only */
183d88b1397SPeter Ujfalusi 	CSDP_DST_PORT_MPUI	= 5 << 9, /* OMAP1 only */
184d88b1397SPeter Ujfalusi 	CSDP_DST_PACKED		= BIT(13),
185d88b1397SPeter Ujfalusi 	CSDP_DST_BURST_1	= 0 << 14,
186d88b1397SPeter Ujfalusi 	CSDP_DST_BURST_16	= 1 << 14,
187d88b1397SPeter Ujfalusi 	CSDP_DST_BURST_32	= 2 << 14,
188d88b1397SPeter Ujfalusi 	CSDP_DST_BURST_64	= 3 << 14,
189d88b1397SPeter Ujfalusi 	CSDP_WRITE_NON_POSTED	= 0 << 16,
190d88b1397SPeter Ujfalusi 	CSDP_WRITE_POSTED	= 1 << 16,
191d88b1397SPeter Ujfalusi 	CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
192d88b1397SPeter Ujfalusi 
193d88b1397SPeter Ujfalusi 	CICR_TOUT_IE		= BIT(0),	/* OMAP1 only */
194d88b1397SPeter Ujfalusi 	CICR_DROP_IE		= BIT(1),
195d88b1397SPeter Ujfalusi 	CICR_HALF_IE		= BIT(2),
196d88b1397SPeter Ujfalusi 	CICR_FRAME_IE		= BIT(3),
197d88b1397SPeter Ujfalusi 	CICR_LAST_IE		= BIT(4),
198d88b1397SPeter Ujfalusi 	CICR_BLOCK_IE		= BIT(5),
199d88b1397SPeter Ujfalusi 	CICR_PKT_IE		= BIT(7),	/* OMAP2+ only */
200d88b1397SPeter Ujfalusi 	CICR_TRANS_ERR_IE	= BIT(8),	/* OMAP2+ only */
201d88b1397SPeter Ujfalusi 	CICR_SUPERVISOR_ERR_IE	= BIT(10),	/* OMAP2+ only */
202d88b1397SPeter Ujfalusi 	CICR_MISALIGNED_ERR_IE	= BIT(11),	/* OMAP2+ only */
203d88b1397SPeter Ujfalusi 	CICR_DRAIN_IE		= BIT(12),	/* OMAP2+ only */
204d88b1397SPeter Ujfalusi 	CICR_SUPER_BLOCK_IE	= BIT(14),	/* OMAP2+ only */
205d88b1397SPeter Ujfalusi 
206d88b1397SPeter Ujfalusi 	CLNK_CTRL_ENABLE_LNK	= BIT(15),
207d88b1397SPeter Ujfalusi 
208d88b1397SPeter Ujfalusi 	CDP_DST_VALID_INC	= 0 << 0,
209d88b1397SPeter Ujfalusi 	CDP_DST_VALID_RELOAD	= 1 << 0,
210d88b1397SPeter Ujfalusi 	CDP_DST_VALID_REUSE	= 2 << 0,
211d88b1397SPeter Ujfalusi 	CDP_SRC_VALID_INC	= 0 << 2,
212d88b1397SPeter Ujfalusi 	CDP_SRC_VALID_RELOAD	= 1 << 2,
213d88b1397SPeter Ujfalusi 	CDP_SRC_VALID_REUSE	= 2 << 2,
214d88b1397SPeter Ujfalusi 	CDP_NTYPE_TYPE1		= 1 << 4,
215d88b1397SPeter Ujfalusi 	CDP_NTYPE_TYPE2		= 2 << 4,
216d88b1397SPeter Ujfalusi 	CDP_NTYPE_TYPE3		= 3 << 4,
217d88b1397SPeter Ujfalusi 	CDP_TMODE_NORMAL	= 0 << 8,
218d88b1397SPeter Ujfalusi 	CDP_TMODE_LLIST		= 1 << 8,
219d88b1397SPeter Ujfalusi 	CDP_FAST		= BIT(10),
220d88b1397SPeter Ujfalusi };
221d88b1397SPeter Ujfalusi 
222d88b1397SPeter Ujfalusi static const unsigned es_bytes[] = {
223d88b1397SPeter Ujfalusi 	[CSDP_DATA_TYPE_8] = 1,
224d88b1397SPeter Ujfalusi 	[CSDP_DATA_TYPE_16] = 2,
225d88b1397SPeter Ujfalusi 	[CSDP_DATA_TYPE_32] = 4,
226d88b1397SPeter Ujfalusi };
227d88b1397SPeter Ujfalusi 
2289c71b9ebSArnd Bergmann static bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
229d88b1397SPeter Ujfalusi static struct of_dma_filter_info omap_dma_info = {
230d88b1397SPeter Ujfalusi 	.filter_fn = omap_dma_filter_fn,
231d88b1397SPeter Ujfalusi };
232d88b1397SPeter Ujfalusi 
to_omap_dma_dev(struct dma_device * d)233d88b1397SPeter Ujfalusi static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
234d88b1397SPeter Ujfalusi {
235d88b1397SPeter Ujfalusi 	return container_of(d, struct omap_dmadev, ddev);
236d88b1397SPeter Ujfalusi }
237d88b1397SPeter Ujfalusi 
to_omap_dma_chan(struct dma_chan * c)238d88b1397SPeter Ujfalusi static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
239d88b1397SPeter Ujfalusi {
240d88b1397SPeter Ujfalusi 	return container_of(c, struct omap_chan, vc.chan);
241d88b1397SPeter Ujfalusi }
242d88b1397SPeter Ujfalusi 
to_omap_dma_desc(struct dma_async_tx_descriptor * t)243d88b1397SPeter Ujfalusi static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
244d88b1397SPeter Ujfalusi {
245d88b1397SPeter Ujfalusi 	return container_of(t, struct omap_desc, vd.tx);
246d88b1397SPeter Ujfalusi }
247d88b1397SPeter Ujfalusi 
omap_dma_desc_free(struct virt_dma_desc * vd)248d88b1397SPeter Ujfalusi static void omap_dma_desc_free(struct virt_dma_desc *vd)
249d88b1397SPeter Ujfalusi {
250d88b1397SPeter Ujfalusi 	struct omap_desc *d = to_omap_dma_desc(&vd->tx);
251d88b1397SPeter Ujfalusi 
252d88b1397SPeter Ujfalusi 	if (d->using_ll) {
253d88b1397SPeter Ujfalusi 		struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device);
254d88b1397SPeter Ujfalusi 		int i;
255d88b1397SPeter Ujfalusi 
256d88b1397SPeter Ujfalusi 		for (i = 0; i < d->sglen; i++) {
257d88b1397SPeter Ujfalusi 			if (d->sg[i].t2_desc)
258d88b1397SPeter Ujfalusi 				dma_pool_free(od->desc_pool, d->sg[i].t2_desc,
259d88b1397SPeter Ujfalusi 					      d->sg[i].t2_desc_paddr);
260d88b1397SPeter Ujfalusi 		}
261d88b1397SPeter Ujfalusi 	}
262d88b1397SPeter Ujfalusi 
263d88b1397SPeter Ujfalusi 	kfree(d);
264d88b1397SPeter Ujfalusi }
265d88b1397SPeter Ujfalusi 
omap_dma_fill_type2_desc(struct omap_desc * d,int idx,enum dma_transfer_direction dir,bool last)266d88b1397SPeter Ujfalusi static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx,
267d88b1397SPeter Ujfalusi 				     enum dma_transfer_direction dir, bool last)
268d88b1397SPeter Ujfalusi {
269d88b1397SPeter Ujfalusi 	struct omap_sg *sg = &d->sg[idx];
270d88b1397SPeter Ujfalusi 	struct omap_type2_desc *t2_desc = sg->t2_desc;
271d88b1397SPeter Ujfalusi 
272d88b1397SPeter Ujfalusi 	if (idx)
273d88b1397SPeter Ujfalusi 		d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr;
274d88b1397SPeter Ujfalusi 	if (last)
275d88b1397SPeter Ujfalusi 		t2_desc->next_desc = 0xfffffffc;
276d88b1397SPeter Ujfalusi 
277d88b1397SPeter Ujfalusi 	t2_desc->en = sg->en;
278d88b1397SPeter Ujfalusi 	t2_desc->addr = sg->addr;
279d88b1397SPeter Ujfalusi 	t2_desc->fn = sg->fn & 0xffff;
280d88b1397SPeter Ujfalusi 	t2_desc->cicr = d->cicr;
281d88b1397SPeter Ujfalusi 	if (!last)
282d88b1397SPeter Ujfalusi 		t2_desc->cicr &= ~CICR_BLOCK_IE;
283d88b1397SPeter Ujfalusi 
284d88b1397SPeter Ujfalusi 	switch (dir) {
285d88b1397SPeter Ujfalusi 	case DMA_DEV_TO_MEM:
286d88b1397SPeter Ujfalusi 		t2_desc->cdei = sg->ei;
287d88b1397SPeter Ujfalusi 		t2_desc->csei = d->ei;
288d88b1397SPeter Ujfalusi 		t2_desc->cdfi = sg->fi;
289d88b1397SPeter Ujfalusi 		t2_desc->csfi = d->fi;
290d88b1397SPeter Ujfalusi 
291d88b1397SPeter Ujfalusi 		t2_desc->en |= DESC_NXT_DV_REFRESH;
292d88b1397SPeter Ujfalusi 		t2_desc->en |= DESC_NXT_SV_REUSE;
293d88b1397SPeter Ujfalusi 		break;
294d88b1397SPeter Ujfalusi 	case DMA_MEM_TO_DEV:
295d88b1397SPeter Ujfalusi 		t2_desc->cdei = d->ei;
296d88b1397SPeter Ujfalusi 		t2_desc->csei = sg->ei;
297d88b1397SPeter Ujfalusi 		t2_desc->cdfi = d->fi;
298d88b1397SPeter Ujfalusi 		t2_desc->csfi = sg->fi;
299d88b1397SPeter Ujfalusi 
300d88b1397SPeter Ujfalusi 		t2_desc->en |= DESC_NXT_SV_REFRESH;
301d88b1397SPeter Ujfalusi 		t2_desc->en |= DESC_NXT_DV_REUSE;
302d88b1397SPeter Ujfalusi 		break;
303d88b1397SPeter Ujfalusi 	default:
304d88b1397SPeter Ujfalusi 		return;
305d88b1397SPeter Ujfalusi 	}
306d88b1397SPeter Ujfalusi 
307d88b1397SPeter Ujfalusi 	t2_desc->en |= DESC_NTYPE_TYPE2;
308d88b1397SPeter Ujfalusi }
309d88b1397SPeter Ujfalusi 
omap_dma_write(uint32_t val,unsigned type,void __iomem * addr)310d88b1397SPeter Ujfalusi static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
311d88b1397SPeter Ujfalusi {
312d88b1397SPeter Ujfalusi 	switch (type) {
313d88b1397SPeter Ujfalusi 	case OMAP_DMA_REG_16BIT:
314d88b1397SPeter Ujfalusi 		writew_relaxed(val, addr);
315d88b1397SPeter Ujfalusi 		break;
316d88b1397SPeter Ujfalusi 	case OMAP_DMA_REG_2X16BIT:
317d88b1397SPeter Ujfalusi 		writew_relaxed(val, addr);
318d88b1397SPeter Ujfalusi 		writew_relaxed(val >> 16, addr + 2);
319d88b1397SPeter Ujfalusi 		break;
320d88b1397SPeter Ujfalusi 	case OMAP_DMA_REG_32BIT:
321d88b1397SPeter Ujfalusi 		writel_relaxed(val, addr);
322d88b1397SPeter Ujfalusi 		break;
323d88b1397SPeter Ujfalusi 	default:
324d88b1397SPeter Ujfalusi 		WARN_ON(1);
325d88b1397SPeter Ujfalusi 	}
326d88b1397SPeter Ujfalusi }
327d88b1397SPeter Ujfalusi 
omap_dma_read(unsigned type,void __iomem * addr)328d88b1397SPeter Ujfalusi static unsigned omap_dma_read(unsigned type, void __iomem *addr)
329d88b1397SPeter Ujfalusi {
330d88b1397SPeter Ujfalusi 	unsigned val;
331d88b1397SPeter Ujfalusi 
332d88b1397SPeter Ujfalusi 	switch (type) {
333d88b1397SPeter Ujfalusi 	case OMAP_DMA_REG_16BIT:
334d88b1397SPeter Ujfalusi 		val = readw_relaxed(addr);
335d88b1397SPeter Ujfalusi 		break;
336d88b1397SPeter Ujfalusi 	case OMAP_DMA_REG_2X16BIT:
337d88b1397SPeter Ujfalusi 		val = readw_relaxed(addr);
338d88b1397SPeter Ujfalusi 		val |= readw_relaxed(addr + 2) << 16;
339d88b1397SPeter Ujfalusi 		break;
340d88b1397SPeter Ujfalusi 	case OMAP_DMA_REG_32BIT:
341d88b1397SPeter Ujfalusi 		val = readl_relaxed(addr);
342d88b1397SPeter Ujfalusi 		break;
343d88b1397SPeter Ujfalusi 	default:
344d88b1397SPeter Ujfalusi 		WARN_ON(1);
345d88b1397SPeter Ujfalusi 		val = 0;
346d88b1397SPeter Ujfalusi 	}
347d88b1397SPeter Ujfalusi 
348d88b1397SPeter Ujfalusi 	return val;
349d88b1397SPeter Ujfalusi }
350d88b1397SPeter Ujfalusi 
omap_dma_glbl_write(struct omap_dmadev * od,unsigned reg,unsigned val)351d88b1397SPeter Ujfalusi static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
352d88b1397SPeter Ujfalusi {
353d88b1397SPeter Ujfalusi 	const struct omap_dma_reg *r = od->reg_map + reg;
354d88b1397SPeter Ujfalusi 
355d88b1397SPeter Ujfalusi 	WARN_ON(r->stride);
356d88b1397SPeter Ujfalusi 
357d88b1397SPeter Ujfalusi 	omap_dma_write(val, r->type, od->base + r->offset);
358d88b1397SPeter Ujfalusi }
359d88b1397SPeter Ujfalusi 
omap_dma_glbl_read(struct omap_dmadev * od,unsigned reg)360d88b1397SPeter Ujfalusi static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
361d88b1397SPeter Ujfalusi {
362d88b1397SPeter Ujfalusi 	const struct omap_dma_reg *r = od->reg_map + reg;
363d88b1397SPeter Ujfalusi 
364d88b1397SPeter Ujfalusi 	WARN_ON(r->stride);
365d88b1397SPeter Ujfalusi 
366d88b1397SPeter Ujfalusi 	return omap_dma_read(r->type, od->base + r->offset);
367d88b1397SPeter Ujfalusi }
368d88b1397SPeter Ujfalusi 
omap_dma_chan_write(struct omap_chan * c,unsigned reg,unsigned val)369d88b1397SPeter Ujfalusi static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
370d88b1397SPeter Ujfalusi {
371d88b1397SPeter Ujfalusi 	const struct omap_dma_reg *r = c->reg_map + reg;
372d88b1397SPeter Ujfalusi 
373d88b1397SPeter Ujfalusi 	omap_dma_write(val, r->type, c->channel_base + r->offset);
374d88b1397SPeter Ujfalusi }
375d88b1397SPeter Ujfalusi 
omap_dma_chan_read(struct omap_chan * c,unsigned reg)376d88b1397SPeter Ujfalusi static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
377d88b1397SPeter Ujfalusi {
378d88b1397SPeter Ujfalusi 	const struct omap_dma_reg *r = c->reg_map + reg;
379d88b1397SPeter Ujfalusi 
380d88b1397SPeter Ujfalusi 	return omap_dma_read(r->type, c->channel_base + r->offset);
381d88b1397SPeter Ujfalusi }
382d88b1397SPeter Ujfalusi 
omap_dma_clear_csr(struct omap_chan * c)383d88b1397SPeter Ujfalusi static void omap_dma_clear_csr(struct omap_chan *c)
384d88b1397SPeter Ujfalusi {
385d88b1397SPeter Ujfalusi 	if (dma_omap1())
386d88b1397SPeter Ujfalusi 		omap_dma_chan_read(c, CSR);
387d88b1397SPeter Ujfalusi 	else
388d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CSR, ~0);
389d88b1397SPeter Ujfalusi }
390d88b1397SPeter Ujfalusi 
omap_dma_get_csr(struct omap_chan * c)391d88b1397SPeter Ujfalusi static unsigned omap_dma_get_csr(struct omap_chan *c)
392d88b1397SPeter Ujfalusi {
393d88b1397SPeter Ujfalusi 	unsigned val = omap_dma_chan_read(c, CSR);
394d88b1397SPeter Ujfalusi 
395d88b1397SPeter Ujfalusi 	if (!dma_omap1())
396d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CSR, val);
397d88b1397SPeter Ujfalusi 
398d88b1397SPeter Ujfalusi 	return val;
399d88b1397SPeter Ujfalusi }
400d88b1397SPeter Ujfalusi 
omap_dma_clear_lch(struct omap_dmadev * od,int lch)4014c74ecf7STony Lindgren static void omap_dma_clear_lch(struct omap_dmadev *od, int lch)
4024c74ecf7STony Lindgren {
4034c74ecf7STony Lindgren 	struct omap_chan *c;
4044c74ecf7STony Lindgren 	int i;
4054c74ecf7STony Lindgren 
4064c74ecf7STony Lindgren 	c = od->lch_map[lch];
4074c74ecf7STony Lindgren 	if (!c)
4084c74ecf7STony Lindgren 		return;
4094c74ecf7STony Lindgren 
4104c74ecf7STony Lindgren 	for (i = CSDP; i <= od->cfg->lch_end; i++)
4114c74ecf7STony Lindgren 		omap_dma_chan_write(c, i, 0);
4124c74ecf7STony Lindgren }
4134c74ecf7STony Lindgren 
omap_dma_assign(struct omap_dmadev * od,struct omap_chan * c,unsigned lch)414d88b1397SPeter Ujfalusi static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
415d88b1397SPeter Ujfalusi 	unsigned lch)
416d88b1397SPeter Ujfalusi {
417d88b1397SPeter Ujfalusi 	c->channel_base = od->base + od->plat->channel_stride * lch;
418d88b1397SPeter Ujfalusi 
419d88b1397SPeter Ujfalusi 	od->lch_map[lch] = c;
420d88b1397SPeter Ujfalusi }
421d88b1397SPeter Ujfalusi 
omap_dma_start(struct omap_chan * c,struct omap_desc * d)422d88b1397SPeter Ujfalusi static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
423d88b1397SPeter Ujfalusi {
424d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
425d88b1397SPeter Ujfalusi 	uint16_t cicr = d->cicr;
426d88b1397SPeter Ujfalusi 
427d88b1397SPeter Ujfalusi 	if (__dma_omap15xx(od->plat->dma_attr))
428d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CPC, 0);
429d88b1397SPeter Ujfalusi 	else
430d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CDAC, 0);
431d88b1397SPeter Ujfalusi 
432d88b1397SPeter Ujfalusi 	omap_dma_clear_csr(c);
433d88b1397SPeter Ujfalusi 
434d88b1397SPeter Ujfalusi 	if (d->using_ll) {
435d88b1397SPeter Ujfalusi 		uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST;
436d88b1397SPeter Ujfalusi 
437d88b1397SPeter Ujfalusi 		if (d->dir == DMA_DEV_TO_MEM)
438d88b1397SPeter Ujfalusi 			cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE);
439d88b1397SPeter Ujfalusi 		else
440d88b1397SPeter Ujfalusi 			cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD);
441d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CDP, cdp);
442d88b1397SPeter Ujfalusi 
443d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr);
444d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CCDN, 0);
445d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CCFN, 0xffff);
446d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CCEN, 0xffffff);
447d88b1397SPeter Ujfalusi 
448d88b1397SPeter Ujfalusi 		cicr &= ~CICR_BLOCK_IE;
449d88b1397SPeter Ujfalusi 	} else if (od->ll123_supported) {
450d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CDP, 0);
451d88b1397SPeter Ujfalusi 	}
452d88b1397SPeter Ujfalusi 
453d88b1397SPeter Ujfalusi 	/* Enable interrupts */
454d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, CICR, cicr);
455d88b1397SPeter Ujfalusi 
456d88b1397SPeter Ujfalusi 	/* Enable channel */
457d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
458d88b1397SPeter Ujfalusi 
459d88b1397SPeter Ujfalusi 	c->running = true;
460d88b1397SPeter Ujfalusi }
461d88b1397SPeter Ujfalusi 
omap_dma_drain_chan(struct omap_chan * c)462d88b1397SPeter Ujfalusi static void omap_dma_drain_chan(struct omap_chan *c)
463d88b1397SPeter Ujfalusi {
464d88b1397SPeter Ujfalusi 	int i;
465d88b1397SPeter Ujfalusi 	u32 val;
466d88b1397SPeter Ujfalusi 
467d88b1397SPeter Ujfalusi 	/* Wait for sDMA FIFO to drain */
468d88b1397SPeter Ujfalusi 	for (i = 0; ; i++) {
469d88b1397SPeter Ujfalusi 		val = omap_dma_chan_read(c, CCR);
470d88b1397SPeter Ujfalusi 		if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
471d88b1397SPeter Ujfalusi 			break;
472d88b1397SPeter Ujfalusi 
473d88b1397SPeter Ujfalusi 		if (i > 100)
474d88b1397SPeter Ujfalusi 			break;
475d88b1397SPeter Ujfalusi 
476d88b1397SPeter Ujfalusi 		udelay(5);
477d88b1397SPeter Ujfalusi 	}
478d88b1397SPeter Ujfalusi 
479d88b1397SPeter Ujfalusi 	if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
480d88b1397SPeter Ujfalusi 		dev_err(c->vc.chan.device->dev,
481d88b1397SPeter Ujfalusi 			"DMA drain did not complete on lch %d\n",
482d88b1397SPeter Ujfalusi 			c->dma_ch);
483d88b1397SPeter Ujfalusi }
484d88b1397SPeter Ujfalusi 
omap_dma_stop(struct omap_chan * c)485d88b1397SPeter Ujfalusi static int omap_dma_stop(struct omap_chan *c)
486d88b1397SPeter Ujfalusi {
487d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
488d88b1397SPeter Ujfalusi 	uint32_t val;
489d88b1397SPeter Ujfalusi 
490d88b1397SPeter Ujfalusi 	/* disable irq */
491d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, CICR, 0);
492d88b1397SPeter Ujfalusi 
493d88b1397SPeter Ujfalusi 	omap_dma_clear_csr(c);
494d88b1397SPeter Ujfalusi 
495d88b1397SPeter Ujfalusi 	val = omap_dma_chan_read(c, CCR);
496d88b1397SPeter Ujfalusi 	if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
497d88b1397SPeter Ujfalusi 		uint32_t sysconfig;
498d88b1397SPeter Ujfalusi 
499d88b1397SPeter Ujfalusi 		sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
500d88b1397SPeter Ujfalusi 		val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
501d88b1397SPeter Ujfalusi 		val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
502d88b1397SPeter Ujfalusi 		omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
503d88b1397SPeter Ujfalusi 
504d88b1397SPeter Ujfalusi 		val = omap_dma_chan_read(c, CCR);
505d88b1397SPeter Ujfalusi 		val &= ~CCR_ENABLE;
506d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CCR, val);
507d88b1397SPeter Ujfalusi 
508d88b1397SPeter Ujfalusi 		if (!(c->ccr & CCR_BUFFERING_DISABLE))
509d88b1397SPeter Ujfalusi 			omap_dma_drain_chan(c);
510d88b1397SPeter Ujfalusi 
511d88b1397SPeter Ujfalusi 		omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
512d88b1397SPeter Ujfalusi 	} else {
513d88b1397SPeter Ujfalusi 		if (!(val & CCR_ENABLE))
514d88b1397SPeter Ujfalusi 			return -EINVAL;
515d88b1397SPeter Ujfalusi 
516d88b1397SPeter Ujfalusi 		val &= ~CCR_ENABLE;
517d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CCR, val);
518d88b1397SPeter Ujfalusi 
519d88b1397SPeter Ujfalusi 		if (!(c->ccr & CCR_BUFFERING_DISABLE))
520d88b1397SPeter Ujfalusi 			omap_dma_drain_chan(c);
521d88b1397SPeter Ujfalusi 	}
522d88b1397SPeter Ujfalusi 
523d88b1397SPeter Ujfalusi 	mb();
524d88b1397SPeter Ujfalusi 
525d88b1397SPeter Ujfalusi 	if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
526d88b1397SPeter Ujfalusi 		val = omap_dma_chan_read(c, CLNK_CTRL);
527d88b1397SPeter Ujfalusi 
528d88b1397SPeter Ujfalusi 		if (dma_omap1())
529d88b1397SPeter Ujfalusi 			val |= 1 << 14; /* set the STOP_LNK bit */
530d88b1397SPeter Ujfalusi 		else
531d88b1397SPeter Ujfalusi 			val &= ~CLNK_CTRL_ENABLE_LNK;
532d88b1397SPeter Ujfalusi 
533d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CLNK_CTRL, val);
534d88b1397SPeter Ujfalusi 	}
535d88b1397SPeter Ujfalusi 	c->running = false;
536d88b1397SPeter Ujfalusi 	return 0;
537d88b1397SPeter Ujfalusi }
538d88b1397SPeter Ujfalusi 
omap_dma_start_sg(struct omap_chan * c,struct omap_desc * d)539d88b1397SPeter Ujfalusi static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
540d88b1397SPeter Ujfalusi {
541d88b1397SPeter Ujfalusi 	struct omap_sg *sg = d->sg + c->sgidx;
542d88b1397SPeter Ujfalusi 	unsigned cxsa, cxei, cxfi;
543d88b1397SPeter Ujfalusi 
544d88b1397SPeter Ujfalusi 	if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
545d88b1397SPeter Ujfalusi 		cxsa = CDSA;
546d88b1397SPeter Ujfalusi 		cxei = CDEI;
547d88b1397SPeter Ujfalusi 		cxfi = CDFI;
548d88b1397SPeter Ujfalusi 	} else {
549d88b1397SPeter Ujfalusi 		cxsa = CSSA;
550d88b1397SPeter Ujfalusi 		cxei = CSEI;
551d88b1397SPeter Ujfalusi 		cxfi = CSFI;
552d88b1397SPeter Ujfalusi 	}
553d88b1397SPeter Ujfalusi 
554d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, cxsa, sg->addr);
555d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, cxei, sg->ei);
556d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, cxfi, sg->fi);
557d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, CEN, sg->en);
558d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, CFN, sg->fn);
559d88b1397SPeter Ujfalusi 
560d88b1397SPeter Ujfalusi 	omap_dma_start(c, d);
561d88b1397SPeter Ujfalusi 	c->sgidx++;
562d88b1397SPeter Ujfalusi }
563d88b1397SPeter Ujfalusi 
omap_dma_start_desc(struct omap_chan * c)564d88b1397SPeter Ujfalusi static void omap_dma_start_desc(struct omap_chan *c)
565d88b1397SPeter Ujfalusi {
566d88b1397SPeter Ujfalusi 	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
567d88b1397SPeter Ujfalusi 	struct omap_desc *d;
568d88b1397SPeter Ujfalusi 	unsigned cxsa, cxei, cxfi;
569d88b1397SPeter Ujfalusi 
570d88b1397SPeter Ujfalusi 	if (!vd) {
571d88b1397SPeter Ujfalusi 		c->desc = NULL;
572d88b1397SPeter Ujfalusi 		return;
573d88b1397SPeter Ujfalusi 	}
574d88b1397SPeter Ujfalusi 
575d88b1397SPeter Ujfalusi 	list_del(&vd->node);
576d88b1397SPeter Ujfalusi 
577d88b1397SPeter Ujfalusi 	c->desc = d = to_omap_dma_desc(&vd->tx);
578d88b1397SPeter Ujfalusi 	c->sgidx = 0;
579d88b1397SPeter Ujfalusi 
580d88b1397SPeter Ujfalusi 	/*
581d88b1397SPeter Ujfalusi 	 * This provides the necessary barrier to ensure data held in
582d88b1397SPeter Ujfalusi 	 * DMA coherent memory is visible to the DMA engine prior to
583d88b1397SPeter Ujfalusi 	 * the transfer starting.
584d88b1397SPeter Ujfalusi 	 */
585d88b1397SPeter Ujfalusi 	mb();
586d88b1397SPeter Ujfalusi 
587d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, CCR, d->ccr);
588d88b1397SPeter Ujfalusi 	if (dma_omap1())
589d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CCR2, d->ccr >> 16);
590d88b1397SPeter Ujfalusi 
591d88b1397SPeter Ujfalusi 	if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
592d88b1397SPeter Ujfalusi 		cxsa = CSSA;
593d88b1397SPeter Ujfalusi 		cxei = CSEI;
594d88b1397SPeter Ujfalusi 		cxfi = CSFI;
595d88b1397SPeter Ujfalusi 	} else {
596d88b1397SPeter Ujfalusi 		cxsa = CDSA;
597d88b1397SPeter Ujfalusi 		cxei = CDEI;
598d88b1397SPeter Ujfalusi 		cxfi = CDFI;
599d88b1397SPeter Ujfalusi 	}
600d88b1397SPeter Ujfalusi 
601d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, cxsa, d->dev_addr);
602d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, cxei, d->ei);
603d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, cxfi, d->fi);
604d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, CSDP, d->csdp);
605d88b1397SPeter Ujfalusi 	omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
606d88b1397SPeter Ujfalusi 
607d88b1397SPeter Ujfalusi 	omap_dma_start_sg(c, d);
608d88b1397SPeter Ujfalusi }
609d88b1397SPeter Ujfalusi 
omap_dma_callback(int ch,u16 status,void * data)610d88b1397SPeter Ujfalusi static void omap_dma_callback(int ch, u16 status, void *data)
611d88b1397SPeter Ujfalusi {
612d88b1397SPeter Ujfalusi 	struct omap_chan *c = data;
613d88b1397SPeter Ujfalusi 	struct omap_desc *d;
614d88b1397SPeter Ujfalusi 	unsigned long flags;
615d88b1397SPeter Ujfalusi 
616d88b1397SPeter Ujfalusi 	spin_lock_irqsave(&c->vc.lock, flags);
617d88b1397SPeter Ujfalusi 	d = c->desc;
618d88b1397SPeter Ujfalusi 	if (d) {
619d88b1397SPeter Ujfalusi 		if (c->cyclic) {
620d88b1397SPeter Ujfalusi 			vchan_cyclic_callback(&d->vd);
621d88b1397SPeter Ujfalusi 		} else if (d->using_ll || c->sgidx == d->sglen) {
622d88b1397SPeter Ujfalusi 			omap_dma_start_desc(c);
623d88b1397SPeter Ujfalusi 			vchan_cookie_complete(&d->vd);
624d88b1397SPeter Ujfalusi 		} else {
625d88b1397SPeter Ujfalusi 			omap_dma_start_sg(c, d);
626d88b1397SPeter Ujfalusi 		}
627d88b1397SPeter Ujfalusi 	}
628d88b1397SPeter Ujfalusi 	spin_unlock_irqrestore(&c->vc.lock, flags);
629d88b1397SPeter Ujfalusi }
630d88b1397SPeter Ujfalusi 
omap_dma_irq(int irq,void * devid)631d88b1397SPeter Ujfalusi static irqreturn_t omap_dma_irq(int irq, void *devid)
632d88b1397SPeter Ujfalusi {
633d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = devid;
634d88b1397SPeter Ujfalusi 	unsigned status, channel;
635d88b1397SPeter Ujfalusi 
636d88b1397SPeter Ujfalusi 	spin_lock(&od->irq_lock);
637d88b1397SPeter Ujfalusi 
638d88b1397SPeter Ujfalusi 	status = omap_dma_glbl_read(od, IRQSTATUS_L1);
639d88b1397SPeter Ujfalusi 	status &= od->irq_enable_mask;
640d88b1397SPeter Ujfalusi 	if (status == 0) {
641d88b1397SPeter Ujfalusi 		spin_unlock(&od->irq_lock);
642d88b1397SPeter Ujfalusi 		return IRQ_NONE;
643d88b1397SPeter Ujfalusi 	}
644d88b1397SPeter Ujfalusi 
645d88b1397SPeter Ujfalusi 	while ((channel = ffs(status)) != 0) {
646d88b1397SPeter Ujfalusi 		unsigned mask, csr;
647d88b1397SPeter Ujfalusi 		struct omap_chan *c;
648d88b1397SPeter Ujfalusi 
649d88b1397SPeter Ujfalusi 		channel -= 1;
650d88b1397SPeter Ujfalusi 		mask = BIT(channel);
651d88b1397SPeter Ujfalusi 		status &= ~mask;
652d88b1397SPeter Ujfalusi 
653d88b1397SPeter Ujfalusi 		c = od->lch_map[channel];
654d88b1397SPeter Ujfalusi 		if (c == NULL) {
655d88b1397SPeter Ujfalusi 			/* This should never happen */
656d88b1397SPeter Ujfalusi 			dev_err(od->ddev.dev, "invalid channel %u\n", channel);
657d88b1397SPeter Ujfalusi 			continue;
658d88b1397SPeter Ujfalusi 		}
659d88b1397SPeter Ujfalusi 
660d88b1397SPeter Ujfalusi 		csr = omap_dma_get_csr(c);
661d88b1397SPeter Ujfalusi 		omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
662d88b1397SPeter Ujfalusi 
663d88b1397SPeter Ujfalusi 		omap_dma_callback(channel, csr, c);
664d88b1397SPeter Ujfalusi 	}
665d88b1397SPeter Ujfalusi 
666d88b1397SPeter Ujfalusi 	spin_unlock(&od->irq_lock);
667d88b1397SPeter Ujfalusi 
668d88b1397SPeter Ujfalusi 	return IRQ_HANDLED;
669d88b1397SPeter Ujfalusi }
670d88b1397SPeter Ujfalusi 
omap_dma_get_lch(struct omap_dmadev * od,int * lch)67161ecb539STony Lindgren static int omap_dma_get_lch(struct omap_dmadev *od, int *lch)
67261ecb539STony Lindgren {
67361ecb539STony Lindgren 	int channel;
67461ecb539STony Lindgren 
67561ecb539STony Lindgren 	mutex_lock(&od->lch_lock);
67661ecb539STony Lindgren 	channel = find_first_zero_bit(od->lch_bitmap, od->lch_count);
67761ecb539STony Lindgren 	if (channel >= od->lch_count)
67861ecb539STony Lindgren 		goto out_busy;
67961ecb539STony Lindgren 	set_bit(channel, od->lch_bitmap);
68061ecb539STony Lindgren 	mutex_unlock(&od->lch_lock);
68161ecb539STony Lindgren 
68261ecb539STony Lindgren 	omap_dma_clear_lch(od, channel);
68361ecb539STony Lindgren 	*lch = channel;
68461ecb539STony Lindgren 
68561ecb539STony Lindgren 	return 0;
68661ecb539STony Lindgren 
68761ecb539STony Lindgren out_busy:
68861ecb539STony Lindgren 	mutex_unlock(&od->lch_lock);
68961ecb539STony Lindgren 	*lch = -EINVAL;
69061ecb539STony Lindgren 
69161ecb539STony Lindgren 	return -EBUSY;
69261ecb539STony Lindgren }
69361ecb539STony Lindgren 
omap_dma_put_lch(struct omap_dmadev * od,int lch)69461ecb539STony Lindgren static void omap_dma_put_lch(struct omap_dmadev *od, int lch)
69561ecb539STony Lindgren {
69661ecb539STony Lindgren 	omap_dma_clear_lch(od, lch);
69761ecb539STony Lindgren 	mutex_lock(&od->lch_lock);
69861ecb539STony Lindgren 	clear_bit(lch, od->lch_bitmap);
69961ecb539STony Lindgren 	mutex_unlock(&od->lch_lock);
70061ecb539STony Lindgren }
70161ecb539STony Lindgren 
omap_dma_legacy(struct omap_dmadev * od)70252ef8efcSArnd Bergmann static inline bool omap_dma_legacy(struct omap_dmadev *od)
70352ef8efcSArnd Bergmann {
70452ef8efcSArnd Bergmann 	return IS_ENABLED(CONFIG_ARCH_OMAP1) && od->legacy;
70552ef8efcSArnd Bergmann }
70652ef8efcSArnd Bergmann 
omap_dma_alloc_chan_resources(struct dma_chan * chan)707d88b1397SPeter Ujfalusi static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
708d88b1397SPeter Ujfalusi {
709d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
710d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
711d88b1397SPeter Ujfalusi 	struct device *dev = od->ddev.dev;
712d88b1397SPeter Ujfalusi 	int ret;
713d88b1397SPeter Ujfalusi 
71452ef8efcSArnd Bergmann 	if (omap_dma_legacy(od)) {
715d88b1397SPeter Ujfalusi 		ret = omap_request_dma(c->dma_sig, "DMA engine",
716d88b1397SPeter Ujfalusi 				       omap_dma_callback, c, &c->dma_ch);
717d88b1397SPeter Ujfalusi 	} else {
71861ecb539STony Lindgren 		ret = omap_dma_get_lch(od, &c->dma_ch);
719d88b1397SPeter Ujfalusi 	}
720d88b1397SPeter Ujfalusi 
721d88b1397SPeter Ujfalusi 	dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
722d88b1397SPeter Ujfalusi 
723d88b1397SPeter Ujfalusi 	if (ret >= 0) {
724d88b1397SPeter Ujfalusi 		omap_dma_assign(od, c, c->dma_ch);
725d88b1397SPeter Ujfalusi 
72652ef8efcSArnd Bergmann 		if (!omap_dma_legacy(od)) {
727d88b1397SPeter Ujfalusi 			unsigned val;
728d88b1397SPeter Ujfalusi 
729d88b1397SPeter Ujfalusi 			spin_lock_irq(&od->irq_lock);
730d88b1397SPeter Ujfalusi 			val = BIT(c->dma_ch);
731d88b1397SPeter Ujfalusi 			omap_dma_glbl_write(od, IRQSTATUS_L1, val);
732d88b1397SPeter Ujfalusi 			od->irq_enable_mask |= val;
733d88b1397SPeter Ujfalusi 			omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
734d88b1397SPeter Ujfalusi 
735d88b1397SPeter Ujfalusi 			val = omap_dma_glbl_read(od, IRQENABLE_L0);
736d88b1397SPeter Ujfalusi 			val &= ~BIT(c->dma_ch);
737d88b1397SPeter Ujfalusi 			omap_dma_glbl_write(od, IRQENABLE_L0, val);
738d88b1397SPeter Ujfalusi 			spin_unlock_irq(&od->irq_lock);
739d88b1397SPeter Ujfalusi 		}
740d88b1397SPeter Ujfalusi 	}
741d88b1397SPeter Ujfalusi 
742d88b1397SPeter Ujfalusi 	if (dma_omap1()) {
743d88b1397SPeter Ujfalusi 		if (__dma_omap16xx(od->plat->dma_attr)) {
744d88b1397SPeter Ujfalusi 			c->ccr = CCR_OMAP31_DISABLE;
745d88b1397SPeter Ujfalusi 			/* Duplicate what plat-omap/dma.c does */
746d88b1397SPeter Ujfalusi 			c->ccr |= c->dma_ch + 1;
747d88b1397SPeter Ujfalusi 		} else {
748d88b1397SPeter Ujfalusi 			c->ccr = c->dma_sig & 0x1f;
749d88b1397SPeter Ujfalusi 		}
750d88b1397SPeter Ujfalusi 	} else {
751d88b1397SPeter Ujfalusi 		c->ccr = c->dma_sig & 0x1f;
752d88b1397SPeter Ujfalusi 		c->ccr |= (c->dma_sig & ~0x1f) << 14;
753d88b1397SPeter Ujfalusi 	}
754d88b1397SPeter Ujfalusi 	if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
755d88b1397SPeter Ujfalusi 		c->ccr |= CCR_BUFFERING_DISABLE;
756d88b1397SPeter Ujfalusi 
757d88b1397SPeter Ujfalusi 	return ret;
758d88b1397SPeter Ujfalusi }
759d88b1397SPeter Ujfalusi 
omap_dma_free_chan_resources(struct dma_chan * chan)760d88b1397SPeter Ujfalusi static void omap_dma_free_chan_resources(struct dma_chan *chan)
761d88b1397SPeter Ujfalusi {
762d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
763d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
764d88b1397SPeter Ujfalusi 
76552ef8efcSArnd Bergmann 	if (!omap_dma_legacy(od)) {
766d88b1397SPeter Ujfalusi 		spin_lock_irq(&od->irq_lock);
767d88b1397SPeter Ujfalusi 		od->irq_enable_mask &= ~BIT(c->dma_ch);
768d88b1397SPeter Ujfalusi 		omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
769d88b1397SPeter Ujfalusi 		spin_unlock_irq(&od->irq_lock);
770d88b1397SPeter Ujfalusi 	}
771d88b1397SPeter Ujfalusi 
772d88b1397SPeter Ujfalusi 	c->channel_base = NULL;
773d88b1397SPeter Ujfalusi 	od->lch_map[c->dma_ch] = NULL;
774d88b1397SPeter Ujfalusi 	vchan_free_chan_resources(&c->vc);
77561ecb539STony Lindgren 
77652ef8efcSArnd Bergmann 	if (omap_dma_legacy(od))
777d88b1397SPeter Ujfalusi 		omap_free_dma(c->dma_ch);
77861ecb539STony Lindgren 	else
77961ecb539STony Lindgren 		omap_dma_put_lch(od, c->dma_ch);
780d88b1397SPeter Ujfalusi 
781d88b1397SPeter Ujfalusi 	dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
782d88b1397SPeter Ujfalusi 		c->dma_sig);
783d88b1397SPeter Ujfalusi 	c->dma_sig = 0;
784d88b1397SPeter Ujfalusi }
785d88b1397SPeter Ujfalusi 
omap_dma_sg_size(struct omap_sg * sg)786d88b1397SPeter Ujfalusi static size_t omap_dma_sg_size(struct omap_sg *sg)
787d88b1397SPeter Ujfalusi {
788d88b1397SPeter Ujfalusi 	return sg->en * sg->fn;
789d88b1397SPeter Ujfalusi }
790d88b1397SPeter Ujfalusi 
omap_dma_desc_size(struct omap_desc * d)791d88b1397SPeter Ujfalusi static size_t omap_dma_desc_size(struct omap_desc *d)
792d88b1397SPeter Ujfalusi {
793d88b1397SPeter Ujfalusi 	unsigned i;
794d88b1397SPeter Ujfalusi 	size_t size;
795d88b1397SPeter Ujfalusi 
796d88b1397SPeter Ujfalusi 	for (size = i = 0; i < d->sglen; i++)
797d88b1397SPeter Ujfalusi 		size += omap_dma_sg_size(&d->sg[i]);
798d88b1397SPeter Ujfalusi 
799d88b1397SPeter Ujfalusi 	return size * es_bytes[d->es];
800d88b1397SPeter Ujfalusi }
801d88b1397SPeter Ujfalusi 
omap_dma_desc_size_pos(struct omap_desc * d,dma_addr_t addr)802d88b1397SPeter Ujfalusi static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
803d88b1397SPeter Ujfalusi {
804d88b1397SPeter Ujfalusi 	unsigned i;
805d88b1397SPeter Ujfalusi 	size_t size, es_size = es_bytes[d->es];
806d88b1397SPeter Ujfalusi 
807d88b1397SPeter Ujfalusi 	for (size = i = 0; i < d->sglen; i++) {
808d88b1397SPeter Ujfalusi 		size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
809d88b1397SPeter Ujfalusi 
810d88b1397SPeter Ujfalusi 		if (size)
811d88b1397SPeter Ujfalusi 			size += this_size;
812d88b1397SPeter Ujfalusi 		else if (addr >= d->sg[i].addr &&
813d88b1397SPeter Ujfalusi 			 addr < d->sg[i].addr + this_size)
814d88b1397SPeter Ujfalusi 			size += d->sg[i].addr + this_size - addr;
815d88b1397SPeter Ujfalusi 	}
816d88b1397SPeter Ujfalusi 	return size;
817d88b1397SPeter Ujfalusi }
818d88b1397SPeter Ujfalusi 
819d88b1397SPeter Ujfalusi /*
820d88b1397SPeter Ujfalusi  * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
821d88b1397SPeter Ujfalusi  * read before the DMA controller finished disabling the channel.
822d88b1397SPeter Ujfalusi  */
omap_dma_chan_read_3_3(struct omap_chan * c,unsigned reg)823d88b1397SPeter Ujfalusi static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
824d88b1397SPeter Ujfalusi {
825d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
826d88b1397SPeter Ujfalusi 	uint32_t val;
827d88b1397SPeter Ujfalusi 
828d88b1397SPeter Ujfalusi 	val = omap_dma_chan_read(c, reg);
829d88b1397SPeter Ujfalusi 	if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
830d88b1397SPeter Ujfalusi 		val = omap_dma_chan_read(c, reg);
831d88b1397SPeter Ujfalusi 
832d88b1397SPeter Ujfalusi 	return val;
833d88b1397SPeter Ujfalusi }
834d88b1397SPeter Ujfalusi 
omap_dma_get_src_pos(struct omap_chan * c)835d88b1397SPeter Ujfalusi static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
836d88b1397SPeter Ujfalusi {
837d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
838d88b1397SPeter Ujfalusi 	dma_addr_t addr, cdac;
839d88b1397SPeter Ujfalusi 
840d88b1397SPeter Ujfalusi 	if (__dma_omap15xx(od->plat->dma_attr)) {
841d88b1397SPeter Ujfalusi 		addr = omap_dma_chan_read(c, CPC);
842d88b1397SPeter Ujfalusi 	} else {
843d88b1397SPeter Ujfalusi 		addr = omap_dma_chan_read_3_3(c, CSAC);
844d88b1397SPeter Ujfalusi 		cdac = omap_dma_chan_read_3_3(c, CDAC);
845d88b1397SPeter Ujfalusi 
846d88b1397SPeter Ujfalusi 		/*
847d88b1397SPeter Ujfalusi 		 * CDAC == 0 indicates that the DMA transfer on the channel has
848d88b1397SPeter Ujfalusi 		 * not been started (no data has been transferred so far).
849d88b1397SPeter Ujfalusi 		 * Return the programmed source start address in this case.
850d88b1397SPeter Ujfalusi 		 */
851d88b1397SPeter Ujfalusi 		if (cdac == 0)
852d88b1397SPeter Ujfalusi 			addr = omap_dma_chan_read(c, CSSA);
853d88b1397SPeter Ujfalusi 	}
854d88b1397SPeter Ujfalusi 
855d88b1397SPeter Ujfalusi 	if (dma_omap1())
856d88b1397SPeter Ujfalusi 		addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
857d88b1397SPeter Ujfalusi 
858d88b1397SPeter Ujfalusi 	return addr;
859d88b1397SPeter Ujfalusi }
860d88b1397SPeter Ujfalusi 
omap_dma_get_dst_pos(struct omap_chan * c)861d88b1397SPeter Ujfalusi static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
862d88b1397SPeter Ujfalusi {
863d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
864d88b1397SPeter Ujfalusi 	dma_addr_t addr;
865d88b1397SPeter Ujfalusi 
866d88b1397SPeter Ujfalusi 	if (__dma_omap15xx(od->plat->dma_attr)) {
867d88b1397SPeter Ujfalusi 		addr = omap_dma_chan_read(c, CPC);
868d88b1397SPeter Ujfalusi 	} else {
869d88b1397SPeter Ujfalusi 		addr = omap_dma_chan_read_3_3(c, CDAC);
870d88b1397SPeter Ujfalusi 
871d88b1397SPeter Ujfalusi 		/*
872d88b1397SPeter Ujfalusi 		 * CDAC == 0 indicates that the DMA transfer on the channel
873d88b1397SPeter Ujfalusi 		 * has not been started (no data has been transferred so
874d88b1397SPeter Ujfalusi 		 * far).  Return the programmed destination start address in
875d88b1397SPeter Ujfalusi 		 * this case.
876d88b1397SPeter Ujfalusi 		 */
877d88b1397SPeter Ujfalusi 		if (addr == 0)
878d88b1397SPeter Ujfalusi 			addr = omap_dma_chan_read(c, CDSA);
879d88b1397SPeter Ujfalusi 	}
880d88b1397SPeter Ujfalusi 
881d88b1397SPeter Ujfalusi 	if (dma_omap1())
882d88b1397SPeter Ujfalusi 		addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
883d88b1397SPeter Ujfalusi 
884d88b1397SPeter Ujfalusi 	return addr;
885d88b1397SPeter Ujfalusi }
886d88b1397SPeter Ujfalusi 
omap_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)887d88b1397SPeter Ujfalusi static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
888d88b1397SPeter Ujfalusi 	dma_cookie_t cookie, struct dma_tx_state *txstate)
889d88b1397SPeter Ujfalusi {
890d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
891d88b1397SPeter Ujfalusi 	enum dma_status ret;
892d88b1397SPeter Ujfalusi 	unsigned long flags;
8934689d35cSPeter Ujfalusi 	struct omap_desc *d = NULL;
894d88b1397SPeter Ujfalusi 
895d88b1397SPeter Ujfalusi 	ret = dma_cookie_status(chan, cookie, txstate);
8964689d35cSPeter Ujfalusi 	if (ret == DMA_COMPLETE)
897d88b1397SPeter Ujfalusi 		return ret;
898d88b1397SPeter Ujfalusi 
899d88b1397SPeter Ujfalusi 	spin_lock_irqsave(&c->vc.lock, flags);
9004689d35cSPeter Ujfalusi 	if (c->desc && c->desc->vd.tx.cookie == cookie)
9014689d35cSPeter Ujfalusi 		d = c->desc;
902aac86703SPeter Ujfalusi 
9034689d35cSPeter Ujfalusi 	if (!txstate)
9044689d35cSPeter Ujfalusi 		goto out;
9054689d35cSPeter Ujfalusi 
9064689d35cSPeter Ujfalusi 	if (d) {
907d88b1397SPeter Ujfalusi 		dma_addr_t pos;
908d88b1397SPeter Ujfalusi 
909d88b1397SPeter Ujfalusi 		if (d->dir == DMA_MEM_TO_DEV)
910d88b1397SPeter Ujfalusi 			pos = omap_dma_get_src_pos(c);
911d88b1397SPeter Ujfalusi 		else if (d->dir == DMA_DEV_TO_MEM  || d->dir == DMA_MEM_TO_MEM)
912d88b1397SPeter Ujfalusi 			pos = omap_dma_get_dst_pos(c);
913d88b1397SPeter Ujfalusi 		else
914d88b1397SPeter Ujfalusi 			pos = 0;
915d88b1397SPeter Ujfalusi 
916d88b1397SPeter Ujfalusi 		txstate->residue = omap_dma_desc_size_pos(d, pos);
917d88b1397SPeter Ujfalusi 	} else {
9187a09c09cSPeter Ujfalusi 		struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie);
9197a09c09cSPeter Ujfalusi 
9207a09c09cSPeter Ujfalusi 		if (vd)
9217a09c09cSPeter Ujfalusi 			txstate->residue = omap_dma_desc_size(
9227a09c09cSPeter Ujfalusi 						to_omap_dma_desc(&vd->tx));
9237a09c09cSPeter Ujfalusi 		else
924d88b1397SPeter Ujfalusi 			txstate->residue = 0;
925d88b1397SPeter Ujfalusi 	}
926aac86703SPeter Ujfalusi 
9274689d35cSPeter Ujfalusi out:
9284689d35cSPeter Ujfalusi 	if (ret == DMA_IN_PROGRESS && c->paused) {
929d88b1397SPeter Ujfalusi 		ret = DMA_PAUSED;
9304689d35cSPeter Ujfalusi 	} else if (d && d->polled && c->running) {
9314689d35cSPeter Ujfalusi 		uint32_t ccr = omap_dma_chan_read(c, CCR);
9324689d35cSPeter Ujfalusi 		/*
9334689d35cSPeter Ujfalusi 		 * The channel is no longer active, set the return value
9344689d35cSPeter Ujfalusi 		 * accordingly and mark it as completed
9354689d35cSPeter Ujfalusi 		 */
9364689d35cSPeter Ujfalusi 		if (!(ccr & CCR_ENABLE)) {
9374689d35cSPeter Ujfalusi 			ret = DMA_COMPLETE;
9384689d35cSPeter Ujfalusi 			omap_dma_start_desc(c);
9394689d35cSPeter Ujfalusi 			vchan_cookie_complete(&d->vd);
9404689d35cSPeter Ujfalusi 		}
9414689d35cSPeter Ujfalusi 	}
942aac86703SPeter Ujfalusi 
943d88b1397SPeter Ujfalusi 	spin_unlock_irqrestore(&c->vc.lock, flags);
944d88b1397SPeter Ujfalusi 
945d88b1397SPeter Ujfalusi 	return ret;
946d88b1397SPeter Ujfalusi }
947d88b1397SPeter Ujfalusi 
omap_dma_issue_pending(struct dma_chan * chan)948d88b1397SPeter Ujfalusi static void omap_dma_issue_pending(struct dma_chan *chan)
949d88b1397SPeter Ujfalusi {
950d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
951d88b1397SPeter Ujfalusi 	unsigned long flags;
952d88b1397SPeter Ujfalusi 
953d88b1397SPeter Ujfalusi 	spin_lock_irqsave(&c->vc.lock, flags);
954d88b1397SPeter Ujfalusi 	if (vchan_issue_pending(&c->vc) && !c->desc)
955d88b1397SPeter Ujfalusi 		omap_dma_start_desc(c);
956d88b1397SPeter Ujfalusi 	spin_unlock_irqrestore(&c->vc.lock, flags);
957d88b1397SPeter Ujfalusi }
958d88b1397SPeter Ujfalusi 
omap_dma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned sglen,enum dma_transfer_direction dir,unsigned long tx_flags,void * context)959d88b1397SPeter Ujfalusi static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
960d88b1397SPeter Ujfalusi 	struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
961d88b1397SPeter Ujfalusi 	enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
962d88b1397SPeter Ujfalusi {
963d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
964d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
965d88b1397SPeter Ujfalusi 	enum dma_slave_buswidth dev_width;
966d88b1397SPeter Ujfalusi 	struct scatterlist *sgent;
967d88b1397SPeter Ujfalusi 	struct omap_desc *d;
968d88b1397SPeter Ujfalusi 	dma_addr_t dev_addr;
969d88b1397SPeter Ujfalusi 	unsigned i, es, en, frame_bytes;
970d88b1397SPeter Ujfalusi 	bool ll_failed = false;
971d88b1397SPeter Ujfalusi 	u32 burst;
972d88b1397SPeter Ujfalusi 	u32 port_window, port_window_bytes;
973d88b1397SPeter Ujfalusi 
974d88b1397SPeter Ujfalusi 	if (dir == DMA_DEV_TO_MEM) {
975d88b1397SPeter Ujfalusi 		dev_addr = c->cfg.src_addr;
976d88b1397SPeter Ujfalusi 		dev_width = c->cfg.src_addr_width;
977d88b1397SPeter Ujfalusi 		burst = c->cfg.src_maxburst;
978d88b1397SPeter Ujfalusi 		port_window = c->cfg.src_port_window_size;
979d88b1397SPeter Ujfalusi 	} else if (dir == DMA_MEM_TO_DEV) {
980d88b1397SPeter Ujfalusi 		dev_addr = c->cfg.dst_addr;
981d88b1397SPeter Ujfalusi 		dev_width = c->cfg.dst_addr_width;
982d88b1397SPeter Ujfalusi 		burst = c->cfg.dst_maxburst;
983d88b1397SPeter Ujfalusi 		port_window = c->cfg.dst_port_window_size;
984d88b1397SPeter Ujfalusi 	} else {
985d88b1397SPeter Ujfalusi 		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
986d88b1397SPeter Ujfalusi 		return NULL;
987d88b1397SPeter Ujfalusi 	}
988d88b1397SPeter Ujfalusi 
989d88b1397SPeter Ujfalusi 	/* Bus width translates to the element size (ES) */
990d88b1397SPeter Ujfalusi 	switch (dev_width) {
991d88b1397SPeter Ujfalusi 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
992d88b1397SPeter Ujfalusi 		es = CSDP_DATA_TYPE_8;
993d88b1397SPeter Ujfalusi 		break;
994d88b1397SPeter Ujfalusi 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
995d88b1397SPeter Ujfalusi 		es = CSDP_DATA_TYPE_16;
996d88b1397SPeter Ujfalusi 		break;
997d88b1397SPeter Ujfalusi 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
998d88b1397SPeter Ujfalusi 		es = CSDP_DATA_TYPE_32;
999d88b1397SPeter Ujfalusi 		break;
1000d88b1397SPeter Ujfalusi 	default: /* not reached */
1001d88b1397SPeter Ujfalusi 		return NULL;
1002d88b1397SPeter Ujfalusi 	}
1003d88b1397SPeter Ujfalusi 
1004d88b1397SPeter Ujfalusi 	/* Now allocate and setup the descriptor. */
10052996148aSLinus Torvalds 	d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC);
1006d88b1397SPeter Ujfalusi 	if (!d)
1007d88b1397SPeter Ujfalusi 		return NULL;
1008d88b1397SPeter Ujfalusi 
1009d88b1397SPeter Ujfalusi 	d->dir = dir;
1010d88b1397SPeter Ujfalusi 	d->dev_addr = dev_addr;
1011d88b1397SPeter Ujfalusi 	d->es = es;
1012d88b1397SPeter Ujfalusi 
1013d88b1397SPeter Ujfalusi 	/* When the port_window is used, one frame must cover the window */
1014d88b1397SPeter Ujfalusi 	if (port_window) {
1015d88b1397SPeter Ujfalusi 		burst = port_window;
1016d88b1397SPeter Ujfalusi 		port_window_bytes = port_window * es_bytes[es];
1017d88b1397SPeter Ujfalusi 
1018d88b1397SPeter Ujfalusi 		d->ei = 1;
1019d88b1397SPeter Ujfalusi 		/*
1020d88b1397SPeter Ujfalusi 		 * One frame covers the port_window and by  configure
1021d88b1397SPeter Ujfalusi 		 * the source frame index to be -1 * (port_window - 1)
1022d88b1397SPeter Ujfalusi 		 * we instruct the sDMA that after a frame is processed
1023d88b1397SPeter Ujfalusi 		 * it should move back to the start of the window.
1024d88b1397SPeter Ujfalusi 		 */
1025d88b1397SPeter Ujfalusi 		d->fi = -(port_window_bytes - 1);
1026d88b1397SPeter Ujfalusi 	}
1027d88b1397SPeter Ujfalusi 
1028d88b1397SPeter Ujfalusi 	d->ccr = c->ccr | CCR_SYNC_FRAME;
1029d88b1397SPeter Ujfalusi 	if (dir == DMA_DEV_TO_MEM) {
1030d88b1397SPeter Ujfalusi 		d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
1031d88b1397SPeter Ujfalusi 
1032d88b1397SPeter Ujfalusi 		d->ccr |= CCR_DST_AMODE_POSTINC;
1033d88b1397SPeter Ujfalusi 		if (port_window) {
1034d88b1397SPeter Ujfalusi 			d->ccr |= CCR_SRC_AMODE_DBLIDX;
1035d88b1397SPeter Ujfalusi 
1036d88b1397SPeter Ujfalusi 			if (port_window_bytes >= 64)
1037d88b1397SPeter Ujfalusi 				d->csdp |= CSDP_SRC_BURST_64;
1038d88b1397SPeter Ujfalusi 			else if (port_window_bytes >= 32)
1039d88b1397SPeter Ujfalusi 				d->csdp |= CSDP_SRC_BURST_32;
1040d88b1397SPeter Ujfalusi 			else if (port_window_bytes >= 16)
1041d88b1397SPeter Ujfalusi 				d->csdp |= CSDP_SRC_BURST_16;
1042d88b1397SPeter Ujfalusi 
1043d88b1397SPeter Ujfalusi 		} else {
1044d88b1397SPeter Ujfalusi 			d->ccr |= CCR_SRC_AMODE_CONSTANT;
1045d88b1397SPeter Ujfalusi 		}
1046d88b1397SPeter Ujfalusi 	} else {
1047d88b1397SPeter Ujfalusi 		d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
1048d88b1397SPeter Ujfalusi 
1049d88b1397SPeter Ujfalusi 		d->ccr |= CCR_SRC_AMODE_POSTINC;
1050d88b1397SPeter Ujfalusi 		if (port_window) {
1051d88b1397SPeter Ujfalusi 			d->ccr |= CCR_DST_AMODE_DBLIDX;
1052d88b1397SPeter Ujfalusi 
1053d88b1397SPeter Ujfalusi 			if (port_window_bytes >= 64)
1054d88b1397SPeter Ujfalusi 				d->csdp |= CSDP_DST_BURST_64;
1055d88b1397SPeter Ujfalusi 			else if (port_window_bytes >= 32)
1056d88b1397SPeter Ujfalusi 				d->csdp |= CSDP_DST_BURST_32;
1057d88b1397SPeter Ujfalusi 			else if (port_window_bytes >= 16)
1058d88b1397SPeter Ujfalusi 				d->csdp |= CSDP_DST_BURST_16;
1059d88b1397SPeter Ujfalusi 		} else {
1060d88b1397SPeter Ujfalusi 			d->ccr |= CCR_DST_AMODE_CONSTANT;
1061d88b1397SPeter Ujfalusi 		}
1062d88b1397SPeter Ujfalusi 	}
1063d88b1397SPeter Ujfalusi 
1064d88b1397SPeter Ujfalusi 	d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
1065d88b1397SPeter Ujfalusi 	d->csdp |= es;
1066d88b1397SPeter Ujfalusi 
1067d88b1397SPeter Ujfalusi 	if (dma_omap1()) {
1068d88b1397SPeter Ujfalusi 		d->cicr |= CICR_TOUT_IE;
1069d88b1397SPeter Ujfalusi 
1070d88b1397SPeter Ujfalusi 		if (dir == DMA_DEV_TO_MEM)
1071d88b1397SPeter Ujfalusi 			d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
1072d88b1397SPeter Ujfalusi 		else
1073d88b1397SPeter Ujfalusi 			d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
1074d88b1397SPeter Ujfalusi 	} else {
1075d88b1397SPeter Ujfalusi 		if (dir == DMA_DEV_TO_MEM)
1076d88b1397SPeter Ujfalusi 			d->ccr |= CCR_TRIGGER_SRC;
1077d88b1397SPeter Ujfalusi 
1078d88b1397SPeter Ujfalusi 		d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1079d88b1397SPeter Ujfalusi 
1080d88b1397SPeter Ujfalusi 		if (port_window)
1081d88b1397SPeter Ujfalusi 			d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
1082d88b1397SPeter Ujfalusi 	}
1083d88b1397SPeter Ujfalusi 	if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
1084d88b1397SPeter Ujfalusi 		d->clnk_ctrl = c->dma_ch;
1085d88b1397SPeter Ujfalusi 
1086d88b1397SPeter Ujfalusi 	/*
1087d88b1397SPeter Ujfalusi 	 * Build our scatterlist entries: each contains the address,
1088d88b1397SPeter Ujfalusi 	 * the number of elements (EN) in each frame, and the number of
1089d88b1397SPeter Ujfalusi 	 * frames (FN).  Number of bytes for this entry = ES * EN * FN.
1090d88b1397SPeter Ujfalusi 	 *
1091d88b1397SPeter Ujfalusi 	 * Burst size translates to number of elements with frame sync.
1092d88b1397SPeter Ujfalusi 	 * Note: DMA engine defines burst to be the number of dev-width
1093d88b1397SPeter Ujfalusi 	 * transfers.
1094d88b1397SPeter Ujfalusi 	 */
1095d88b1397SPeter Ujfalusi 	en = burst;
1096d88b1397SPeter Ujfalusi 	frame_bytes = es_bytes[es] * en;
1097d88b1397SPeter Ujfalusi 
1098d88b1397SPeter Ujfalusi 	if (sglen >= 2)
1099d88b1397SPeter Ujfalusi 		d->using_ll = od->ll123_supported;
1100d88b1397SPeter Ujfalusi 
1101d88b1397SPeter Ujfalusi 	for_each_sg(sgl, sgent, sglen, i) {
1102d88b1397SPeter Ujfalusi 		struct omap_sg *osg = &d->sg[i];
1103d88b1397SPeter Ujfalusi 
1104d88b1397SPeter Ujfalusi 		osg->addr = sg_dma_address(sgent);
1105d88b1397SPeter Ujfalusi 		osg->en = en;
1106d88b1397SPeter Ujfalusi 		osg->fn = sg_dma_len(sgent) / frame_bytes;
1107d88b1397SPeter Ujfalusi 
1108d88b1397SPeter Ujfalusi 		if (d->using_ll) {
1109d88b1397SPeter Ujfalusi 			osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
1110d88b1397SPeter Ujfalusi 						      &osg->t2_desc_paddr);
1111d88b1397SPeter Ujfalusi 			if (!osg->t2_desc) {
1112d88b1397SPeter Ujfalusi 				dev_err(chan->device->dev,
1113d88b1397SPeter Ujfalusi 					"t2_desc[%d] allocation failed\n", i);
1114d88b1397SPeter Ujfalusi 				ll_failed = true;
1115d88b1397SPeter Ujfalusi 				d->using_ll = false;
1116d88b1397SPeter Ujfalusi 				continue;
1117d88b1397SPeter Ujfalusi 			}
1118d88b1397SPeter Ujfalusi 
1119d88b1397SPeter Ujfalusi 			omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1));
1120d88b1397SPeter Ujfalusi 		}
1121d88b1397SPeter Ujfalusi 	}
1122d88b1397SPeter Ujfalusi 
1123d88b1397SPeter Ujfalusi 	d->sglen = sglen;
1124d88b1397SPeter Ujfalusi 
1125d88b1397SPeter Ujfalusi 	/* Release the dma_pool entries if one allocation failed */
1126d88b1397SPeter Ujfalusi 	if (ll_failed) {
1127d88b1397SPeter Ujfalusi 		for (i = 0; i < d->sglen; i++) {
1128d88b1397SPeter Ujfalusi 			struct omap_sg *osg = &d->sg[i];
1129d88b1397SPeter Ujfalusi 
1130d88b1397SPeter Ujfalusi 			if (osg->t2_desc) {
1131d88b1397SPeter Ujfalusi 				dma_pool_free(od->desc_pool, osg->t2_desc,
1132d88b1397SPeter Ujfalusi 					      osg->t2_desc_paddr);
1133d88b1397SPeter Ujfalusi 				osg->t2_desc = NULL;
1134d88b1397SPeter Ujfalusi 			}
1135d88b1397SPeter Ujfalusi 		}
1136d88b1397SPeter Ujfalusi 	}
1137d88b1397SPeter Ujfalusi 
1138d88b1397SPeter Ujfalusi 	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1139d88b1397SPeter Ujfalusi }
1140d88b1397SPeter Ujfalusi 
omap_dma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)1141d88b1397SPeter Ujfalusi static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
1142d88b1397SPeter Ujfalusi 	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1143d88b1397SPeter Ujfalusi 	size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
1144d88b1397SPeter Ujfalusi {
1145d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1146d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
1147d88b1397SPeter Ujfalusi 	enum dma_slave_buswidth dev_width;
1148d88b1397SPeter Ujfalusi 	struct omap_desc *d;
1149d88b1397SPeter Ujfalusi 	dma_addr_t dev_addr;
1150d88b1397SPeter Ujfalusi 	unsigned es;
1151d88b1397SPeter Ujfalusi 	u32 burst;
1152d88b1397SPeter Ujfalusi 
1153d88b1397SPeter Ujfalusi 	if (dir == DMA_DEV_TO_MEM) {
1154d88b1397SPeter Ujfalusi 		dev_addr = c->cfg.src_addr;
1155d88b1397SPeter Ujfalusi 		dev_width = c->cfg.src_addr_width;
1156d88b1397SPeter Ujfalusi 		burst = c->cfg.src_maxburst;
1157d88b1397SPeter Ujfalusi 	} else if (dir == DMA_MEM_TO_DEV) {
1158d88b1397SPeter Ujfalusi 		dev_addr = c->cfg.dst_addr;
1159d88b1397SPeter Ujfalusi 		dev_width = c->cfg.dst_addr_width;
1160d88b1397SPeter Ujfalusi 		burst = c->cfg.dst_maxburst;
1161d88b1397SPeter Ujfalusi 	} else {
1162d88b1397SPeter Ujfalusi 		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
1163d88b1397SPeter Ujfalusi 		return NULL;
1164d88b1397SPeter Ujfalusi 	}
1165d88b1397SPeter Ujfalusi 
1166d88b1397SPeter Ujfalusi 	/* Bus width translates to the element size (ES) */
1167d88b1397SPeter Ujfalusi 	switch (dev_width) {
1168d88b1397SPeter Ujfalusi 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1169d88b1397SPeter Ujfalusi 		es = CSDP_DATA_TYPE_8;
1170d88b1397SPeter Ujfalusi 		break;
1171d88b1397SPeter Ujfalusi 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1172d88b1397SPeter Ujfalusi 		es = CSDP_DATA_TYPE_16;
1173d88b1397SPeter Ujfalusi 		break;
1174d88b1397SPeter Ujfalusi 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1175d88b1397SPeter Ujfalusi 		es = CSDP_DATA_TYPE_32;
1176d88b1397SPeter Ujfalusi 		break;
1177d88b1397SPeter Ujfalusi 	default: /* not reached */
1178d88b1397SPeter Ujfalusi 		return NULL;
1179d88b1397SPeter Ujfalusi 	}
1180d88b1397SPeter Ujfalusi 
1181d88b1397SPeter Ujfalusi 	/* Now allocate and setup the descriptor. */
1182d88b1397SPeter Ujfalusi 	d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1183d88b1397SPeter Ujfalusi 	if (!d)
1184d88b1397SPeter Ujfalusi 		return NULL;
1185d88b1397SPeter Ujfalusi 
1186d88b1397SPeter Ujfalusi 	d->dir = dir;
1187d88b1397SPeter Ujfalusi 	d->dev_addr = dev_addr;
1188d88b1397SPeter Ujfalusi 	d->fi = burst;
1189d88b1397SPeter Ujfalusi 	d->es = es;
1190d88b1397SPeter Ujfalusi 	d->sg[0].addr = buf_addr;
1191d88b1397SPeter Ujfalusi 	d->sg[0].en = period_len / es_bytes[es];
1192d88b1397SPeter Ujfalusi 	d->sg[0].fn = buf_len / period_len;
1193d88b1397SPeter Ujfalusi 	d->sglen = 1;
1194d88b1397SPeter Ujfalusi 
1195d88b1397SPeter Ujfalusi 	d->ccr = c->ccr;
1196d88b1397SPeter Ujfalusi 	if (dir == DMA_DEV_TO_MEM)
1197d88b1397SPeter Ujfalusi 		d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
1198d88b1397SPeter Ujfalusi 	else
1199d88b1397SPeter Ujfalusi 		d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
1200d88b1397SPeter Ujfalusi 
1201d88b1397SPeter Ujfalusi 	d->cicr = CICR_DROP_IE;
1202d88b1397SPeter Ujfalusi 	if (flags & DMA_PREP_INTERRUPT)
1203d88b1397SPeter Ujfalusi 		d->cicr |= CICR_FRAME_IE;
1204d88b1397SPeter Ujfalusi 
1205d88b1397SPeter Ujfalusi 	d->csdp = es;
1206d88b1397SPeter Ujfalusi 
1207d88b1397SPeter Ujfalusi 	if (dma_omap1()) {
1208d88b1397SPeter Ujfalusi 		d->cicr |= CICR_TOUT_IE;
1209d88b1397SPeter Ujfalusi 
1210d88b1397SPeter Ujfalusi 		if (dir == DMA_DEV_TO_MEM)
1211d88b1397SPeter Ujfalusi 			d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
1212d88b1397SPeter Ujfalusi 		else
1213d88b1397SPeter Ujfalusi 			d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
1214d88b1397SPeter Ujfalusi 	} else {
1215d88b1397SPeter Ujfalusi 		if (burst)
1216d88b1397SPeter Ujfalusi 			d->ccr |= CCR_SYNC_PACKET;
1217d88b1397SPeter Ujfalusi 		else
1218d88b1397SPeter Ujfalusi 			d->ccr |= CCR_SYNC_ELEMENT;
1219d88b1397SPeter Ujfalusi 
1220d88b1397SPeter Ujfalusi 		if (dir == DMA_DEV_TO_MEM) {
1221d88b1397SPeter Ujfalusi 			d->ccr |= CCR_TRIGGER_SRC;
1222d88b1397SPeter Ujfalusi 			d->csdp |= CSDP_DST_PACKED;
1223d88b1397SPeter Ujfalusi 		} else {
1224d88b1397SPeter Ujfalusi 			d->csdp |= CSDP_SRC_PACKED;
1225d88b1397SPeter Ujfalusi 		}
1226d88b1397SPeter Ujfalusi 
1227d88b1397SPeter Ujfalusi 		d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1228d88b1397SPeter Ujfalusi 
1229d88b1397SPeter Ujfalusi 		d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1230d88b1397SPeter Ujfalusi 	}
1231d88b1397SPeter Ujfalusi 
1232d88b1397SPeter Ujfalusi 	if (__dma_omap15xx(od->plat->dma_attr))
1233d88b1397SPeter Ujfalusi 		d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
1234d88b1397SPeter Ujfalusi 	else
1235d88b1397SPeter Ujfalusi 		d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
1236d88b1397SPeter Ujfalusi 
1237d88b1397SPeter Ujfalusi 	c->cyclic = true;
1238d88b1397SPeter Ujfalusi 
1239d88b1397SPeter Ujfalusi 	return vchan_tx_prep(&c->vc, &d->vd, flags);
1240d88b1397SPeter Ujfalusi }
1241d88b1397SPeter Ujfalusi 
omap_dma_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long tx_flags)1242d88b1397SPeter Ujfalusi static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
1243d88b1397SPeter Ujfalusi 	struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1244d88b1397SPeter Ujfalusi 	size_t len, unsigned long tx_flags)
1245d88b1397SPeter Ujfalusi {
1246d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
1247d88b1397SPeter Ujfalusi 	struct omap_desc *d;
1248d88b1397SPeter Ujfalusi 	uint8_t data_type;
1249d88b1397SPeter Ujfalusi 
1250d88b1397SPeter Ujfalusi 	d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1251d88b1397SPeter Ujfalusi 	if (!d)
1252d88b1397SPeter Ujfalusi 		return NULL;
1253d88b1397SPeter Ujfalusi 
1254d88b1397SPeter Ujfalusi 	data_type = __ffs((src | dest | len));
1255d88b1397SPeter Ujfalusi 	if (data_type > CSDP_DATA_TYPE_32)
1256d88b1397SPeter Ujfalusi 		data_type = CSDP_DATA_TYPE_32;
1257d88b1397SPeter Ujfalusi 
1258d88b1397SPeter Ujfalusi 	d->dir = DMA_MEM_TO_MEM;
1259d88b1397SPeter Ujfalusi 	d->dev_addr = src;
1260d88b1397SPeter Ujfalusi 	d->fi = 0;
1261d88b1397SPeter Ujfalusi 	d->es = data_type;
1262d88b1397SPeter Ujfalusi 	d->sg[0].en = len / BIT(data_type);
1263d88b1397SPeter Ujfalusi 	d->sg[0].fn = 1;
1264d88b1397SPeter Ujfalusi 	d->sg[0].addr = dest;
1265d88b1397SPeter Ujfalusi 	d->sglen = 1;
1266d88b1397SPeter Ujfalusi 	d->ccr = c->ccr;
1267d88b1397SPeter Ujfalusi 	d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
1268d88b1397SPeter Ujfalusi 
12694689d35cSPeter Ujfalusi 	if (tx_flags & DMA_PREP_INTERRUPT)
12704689d35cSPeter Ujfalusi 		d->cicr |= CICR_FRAME_IE;
12714689d35cSPeter Ujfalusi 	else
12724689d35cSPeter Ujfalusi 		d->polled = true;
1273d88b1397SPeter Ujfalusi 
1274d88b1397SPeter Ujfalusi 	d->csdp = data_type;
1275d88b1397SPeter Ujfalusi 
1276d88b1397SPeter Ujfalusi 	if (dma_omap1()) {
1277d88b1397SPeter Ujfalusi 		d->cicr |= CICR_TOUT_IE;
1278d88b1397SPeter Ujfalusi 		d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1279d88b1397SPeter Ujfalusi 	} else {
1280d88b1397SPeter Ujfalusi 		d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1281d88b1397SPeter Ujfalusi 		d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1282d88b1397SPeter Ujfalusi 		d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1283d88b1397SPeter Ujfalusi 	}
1284d88b1397SPeter Ujfalusi 
1285d88b1397SPeter Ujfalusi 	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1286d88b1397SPeter Ujfalusi }
1287d88b1397SPeter Ujfalusi 
omap_dma_prep_dma_interleaved(struct dma_chan * chan,struct dma_interleaved_template * xt,unsigned long flags)1288d88b1397SPeter Ujfalusi static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1289d88b1397SPeter Ujfalusi 	struct dma_chan *chan, struct dma_interleaved_template *xt,
1290d88b1397SPeter Ujfalusi 	unsigned long flags)
1291d88b1397SPeter Ujfalusi {
1292d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
1293d88b1397SPeter Ujfalusi 	struct omap_desc *d;
1294d88b1397SPeter Ujfalusi 	struct omap_sg *sg;
1295d88b1397SPeter Ujfalusi 	uint8_t data_type;
1296d88b1397SPeter Ujfalusi 	size_t src_icg, dst_icg;
1297d88b1397SPeter Ujfalusi 
1298d88b1397SPeter Ujfalusi 	/* Slave mode is not supported */
1299d88b1397SPeter Ujfalusi 	if (is_slave_direction(xt->dir))
1300d88b1397SPeter Ujfalusi 		return NULL;
1301d88b1397SPeter Ujfalusi 
1302d88b1397SPeter Ujfalusi 	if (xt->frame_size != 1 || xt->numf == 0)
1303d88b1397SPeter Ujfalusi 		return NULL;
1304d88b1397SPeter Ujfalusi 
1305d88b1397SPeter Ujfalusi 	d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1306d88b1397SPeter Ujfalusi 	if (!d)
1307d88b1397SPeter Ujfalusi 		return NULL;
1308d88b1397SPeter Ujfalusi 
1309d88b1397SPeter Ujfalusi 	data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
1310d88b1397SPeter Ujfalusi 	if (data_type > CSDP_DATA_TYPE_32)
1311d88b1397SPeter Ujfalusi 		data_type = CSDP_DATA_TYPE_32;
1312d88b1397SPeter Ujfalusi 
1313d88b1397SPeter Ujfalusi 	sg = &d->sg[0];
1314d88b1397SPeter Ujfalusi 	d->dir = DMA_MEM_TO_MEM;
1315d88b1397SPeter Ujfalusi 	d->dev_addr = xt->src_start;
1316d88b1397SPeter Ujfalusi 	d->es = data_type;
1317d88b1397SPeter Ujfalusi 	sg->en = xt->sgl[0].size / BIT(data_type);
1318d88b1397SPeter Ujfalusi 	sg->fn = xt->numf;
1319d88b1397SPeter Ujfalusi 	sg->addr = xt->dst_start;
1320d88b1397SPeter Ujfalusi 	d->sglen = 1;
1321d88b1397SPeter Ujfalusi 	d->ccr = c->ccr;
1322d88b1397SPeter Ujfalusi 
1323d88b1397SPeter Ujfalusi 	src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
1324d88b1397SPeter Ujfalusi 	dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
1325d88b1397SPeter Ujfalusi 	if (src_icg) {
1326d88b1397SPeter Ujfalusi 		d->ccr |= CCR_SRC_AMODE_DBLIDX;
1327d88b1397SPeter Ujfalusi 		d->ei = 1;
1328d555c343SHans Verkuil 		d->fi = src_icg + 1;
1329d88b1397SPeter Ujfalusi 	} else if (xt->src_inc) {
1330d88b1397SPeter Ujfalusi 		d->ccr |= CCR_SRC_AMODE_POSTINC;
1331d88b1397SPeter Ujfalusi 		d->fi = 0;
1332d88b1397SPeter Ujfalusi 	} else {
1333d88b1397SPeter Ujfalusi 		dev_err(chan->device->dev,
1334d88b1397SPeter Ujfalusi 			"%s: SRC constant addressing is not supported\n",
1335d88b1397SPeter Ujfalusi 			__func__);
1336d88b1397SPeter Ujfalusi 		kfree(d);
1337d88b1397SPeter Ujfalusi 		return NULL;
1338d88b1397SPeter Ujfalusi 	}
1339d88b1397SPeter Ujfalusi 
1340d88b1397SPeter Ujfalusi 	if (dst_icg) {
1341d88b1397SPeter Ujfalusi 		d->ccr |= CCR_DST_AMODE_DBLIDX;
1342d88b1397SPeter Ujfalusi 		sg->ei = 1;
1343d555c343SHans Verkuil 		sg->fi = dst_icg + 1;
1344d88b1397SPeter Ujfalusi 	} else if (xt->dst_inc) {
1345d88b1397SPeter Ujfalusi 		d->ccr |= CCR_DST_AMODE_POSTINC;
1346d88b1397SPeter Ujfalusi 		sg->fi = 0;
1347d88b1397SPeter Ujfalusi 	} else {
1348d88b1397SPeter Ujfalusi 		dev_err(chan->device->dev,
1349d88b1397SPeter Ujfalusi 			"%s: DST constant addressing is not supported\n",
1350d88b1397SPeter Ujfalusi 			__func__);
1351d88b1397SPeter Ujfalusi 		kfree(d);
1352d88b1397SPeter Ujfalusi 		return NULL;
1353d88b1397SPeter Ujfalusi 	}
1354d88b1397SPeter Ujfalusi 
1355d88b1397SPeter Ujfalusi 	d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
1356d88b1397SPeter Ujfalusi 
1357d88b1397SPeter Ujfalusi 	d->csdp = data_type;
1358d88b1397SPeter Ujfalusi 
1359d88b1397SPeter Ujfalusi 	if (dma_omap1()) {
1360d88b1397SPeter Ujfalusi 		d->cicr |= CICR_TOUT_IE;
1361d88b1397SPeter Ujfalusi 		d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1362d88b1397SPeter Ujfalusi 	} else {
1363d88b1397SPeter Ujfalusi 		d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1364d88b1397SPeter Ujfalusi 		d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1365d88b1397SPeter Ujfalusi 		d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1366d88b1397SPeter Ujfalusi 	}
1367d88b1397SPeter Ujfalusi 
1368d88b1397SPeter Ujfalusi 	return vchan_tx_prep(&c->vc, &d->vd, flags);
1369d88b1397SPeter Ujfalusi }
1370d88b1397SPeter Ujfalusi 
omap_dma_slave_config(struct dma_chan * chan,struct dma_slave_config * cfg)1371d88b1397SPeter Ujfalusi static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
1372d88b1397SPeter Ujfalusi {
1373d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
1374d88b1397SPeter Ujfalusi 
1375d88b1397SPeter Ujfalusi 	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1376d88b1397SPeter Ujfalusi 	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1377d88b1397SPeter Ujfalusi 		return -EINVAL;
1378d88b1397SPeter Ujfalusi 
1379d88b1397SPeter Ujfalusi 	if (cfg->src_maxburst > chan->device->max_burst ||
1380d88b1397SPeter Ujfalusi 	    cfg->dst_maxburst > chan->device->max_burst)
1381d88b1397SPeter Ujfalusi 		return -EINVAL;
1382d88b1397SPeter Ujfalusi 
1383d88b1397SPeter Ujfalusi 	memcpy(&c->cfg, cfg, sizeof(c->cfg));
1384d88b1397SPeter Ujfalusi 
1385d88b1397SPeter Ujfalusi 	return 0;
1386d88b1397SPeter Ujfalusi }
1387d88b1397SPeter Ujfalusi 
omap_dma_terminate_all(struct dma_chan * chan)1388d88b1397SPeter Ujfalusi static int omap_dma_terminate_all(struct dma_chan *chan)
1389d88b1397SPeter Ujfalusi {
1390d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
1391d88b1397SPeter Ujfalusi 	unsigned long flags;
1392d88b1397SPeter Ujfalusi 	LIST_HEAD(head);
1393d88b1397SPeter Ujfalusi 
1394d88b1397SPeter Ujfalusi 	spin_lock_irqsave(&c->vc.lock, flags);
1395d88b1397SPeter Ujfalusi 
1396d88b1397SPeter Ujfalusi 	/*
1397d88b1397SPeter Ujfalusi 	 * Stop DMA activity: we assume the callback will not be called
1398d88b1397SPeter Ujfalusi 	 * after omap_dma_stop() returns (even if it does, it will see
1399d88b1397SPeter Ujfalusi 	 * c->desc is NULL and exit.)
1400d88b1397SPeter Ujfalusi 	 */
1401d88b1397SPeter Ujfalusi 	if (c->desc) {
1402d88b1397SPeter Ujfalusi 		vchan_terminate_vdesc(&c->desc->vd);
1403d88b1397SPeter Ujfalusi 		c->desc = NULL;
1404d88b1397SPeter Ujfalusi 		/* Avoid stopping the dma twice */
1405d88b1397SPeter Ujfalusi 		if (!c->paused)
1406d88b1397SPeter Ujfalusi 			omap_dma_stop(c);
1407d88b1397SPeter Ujfalusi 	}
1408d88b1397SPeter Ujfalusi 
1409d88b1397SPeter Ujfalusi 	c->cyclic = false;
1410d88b1397SPeter Ujfalusi 	c->paused = false;
1411d88b1397SPeter Ujfalusi 
1412d88b1397SPeter Ujfalusi 	vchan_get_all_descriptors(&c->vc, &head);
1413d88b1397SPeter Ujfalusi 	spin_unlock_irqrestore(&c->vc.lock, flags);
1414d88b1397SPeter Ujfalusi 	vchan_dma_desc_free_list(&c->vc, &head);
1415d88b1397SPeter Ujfalusi 
1416d88b1397SPeter Ujfalusi 	return 0;
1417d88b1397SPeter Ujfalusi }
1418d88b1397SPeter Ujfalusi 
omap_dma_synchronize(struct dma_chan * chan)1419d88b1397SPeter Ujfalusi static void omap_dma_synchronize(struct dma_chan *chan)
1420d88b1397SPeter Ujfalusi {
1421d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
1422d88b1397SPeter Ujfalusi 
1423d88b1397SPeter Ujfalusi 	vchan_synchronize(&c->vc);
1424d88b1397SPeter Ujfalusi }
1425d88b1397SPeter Ujfalusi 
omap_dma_pause(struct dma_chan * chan)1426d88b1397SPeter Ujfalusi static int omap_dma_pause(struct dma_chan *chan)
1427d88b1397SPeter Ujfalusi {
1428d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
1429d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1430d88b1397SPeter Ujfalusi 	unsigned long flags;
1431d88b1397SPeter Ujfalusi 	int ret = -EINVAL;
1432d88b1397SPeter Ujfalusi 	bool can_pause = false;
1433d88b1397SPeter Ujfalusi 
1434d88b1397SPeter Ujfalusi 	spin_lock_irqsave(&od->irq_lock, flags);
1435d88b1397SPeter Ujfalusi 
1436d88b1397SPeter Ujfalusi 	if (!c->desc)
1437d88b1397SPeter Ujfalusi 		goto out;
1438d88b1397SPeter Ujfalusi 
1439d88b1397SPeter Ujfalusi 	if (c->cyclic)
1440d88b1397SPeter Ujfalusi 		can_pause = true;
1441d88b1397SPeter Ujfalusi 
1442d88b1397SPeter Ujfalusi 	/*
1443d88b1397SPeter Ujfalusi 	 * We do not allow DMA_MEM_TO_DEV transfers to be paused.
1444d88b1397SPeter Ujfalusi 	 * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
1445d88b1397SPeter Ujfalusi 	 * "When a channel is disabled during a transfer, the channel undergoes
1446d88b1397SPeter Ujfalusi 	 * an abort, unless it is hardware-source-synchronized …".
1447d88b1397SPeter Ujfalusi 	 * A source-synchronised channel is one where the fetching of data is
1448d88b1397SPeter Ujfalusi 	 * under control of the device. In other words, a device-to-memory
1449d88b1397SPeter Ujfalusi 	 * transfer. So, a destination-synchronised channel (which would be a
14502ed4ba94STom Rix 	 * memory-to-device transfer) undergoes an abort if the CCR_ENABLE
1451d88b1397SPeter Ujfalusi 	 * bit is cleared.
1452d88b1397SPeter Ujfalusi 	 * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
1453d88b1397SPeter Ujfalusi 	 * aborts immediately after completion of current read/write
1454d88b1397SPeter Ujfalusi 	 * transactions and then the FIFO is cleaned up." The term "cleaned up"
1455d88b1397SPeter Ujfalusi 	 * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
1456d88b1397SPeter Ujfalusi 	 * are both clear _before_ disabling the channel, otherwise data loss
1457d88b1397SPeter Ujfalusi 	 * will occur.
1458d88b1397SPeter Ujfalusi 	 * The problem is that if the channel is active, then device activity
1459d88b1397SPeter Ujfalusi 	 * can result in DMA activity starting between reading those as both
1460d88b1397SPeter Ujfalusi 	 * clear and the write to DMA_CCR to clear the enable bit hitting the
1461d88b1397SPeter Ujfalusi 	 * hardware. If the DMA hardware can't drain the data in its FIFO to the
1462d88b1397SPeter Ujfalusi 	 * destination, then data loss "might" occur (say if we write to an UART
1463d88b1397SPeter Ujfalusi 	 * and the UART is not accepting any further data).
1464d88b1397SPeter Ujfalusi 	 */
1465d88b1397SPeter Ujfalusi 	else if (c->desc->dir == DMA_DEV_TO_MEM)
1466d88b1397SPeter Ujfalusi 		can_pause = true;
1467d88b1397SPeter Ujfalusi 
1468d88b1397SPeter Ujfalusi 	if (can_pause && !c->paused) {
1469d88b1397SPeter Ujfalusi 		ret = omap_dma_stop(c);
1470d88b1397SPeter Ujfalusi 		if (!ret)
1471d88b1397SPeter Ujfalusi 			c->paused = true;
1472d88b1397SPeter Ujfalusi 	}
1473d88b1397SPeter Ujfalusi out:
1474d88b1397SPeter Ujfalusi 	spin_unlock_irqrestore(&od->irq_lock, flags);
1475d88b1397SPeter Ujfalusi 
1476d88b1397SPeter Ujfalusi 	return ret;
1477d88b1397SPeter Ujfalusi }
1478d88b1397SPeter Ujfalusi 
omap_dma_resume(struct dma_chan * chan)1479d88b1397SPeter Ujfalusi static int omap_dma_resume(struct dma_chan *chan)
1480d88b1397SPeter Ujfalusi {
1481d88b1397SPeter Ujfalusi 	struct omap_chan *c = to_omap_dma_chan(chan);
1482d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1483d88b1397SPeter Ujfalusi 	unsigned long flags;
1484d88b1397SPeter Ujfalusi 	int ret = -EINVAL;
1485d88b1397SPeter Ujfalusi 
1486d88b1397SPeter Ujfalusi 	spin_lock_irqsave(&od->irq_lock, flags);
1487d88b1397SPeter Ujfalusi 
1488d88b1397SPeter Ujfalusi 	if (c->paused && c->desc) {
1489d88b1397SPeter Ujfalusi 		mb();
1490d88b1397SPeter Ujfalusi 
1491d88b1397SPeter Ujfalusi 		/* Restore channel link register */
1492d88b1397SPeter Ujfalusi 		omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
1493d88b1397SPeter Ujfalusi 
1494d88b1397SPeter Ujfalusi 		omap_dma_start(c, c->desc);
1495d88b1397SPeter Ujfalusi 		c->paused = false;
1496d88b1397SPeter Ujfalusi 		ret = 0;
1497d88b1397SPeter Ujfalusi 	}
1498d88b1397SPeter Ujfalusi 	spin_unlock_irqrestore(&od->irq_lock, flags);
1499d88b1397SPeter Ujfalusi 
1500d88b1397SPeter Ujfalusi 	return ret;
1501d88b1397SPeter Ujfalusi }
1502d88b1397SPeter Ujfalusi 
omap_dma_chan_init(struct omap_dmadev * od)1503d88b1397SPeter Ujfalusi static int omap_dma_chan_init(struct omap_dmadev *od)
1504d88b1397SPeter Ujfalusi {
1505d88b1397SPeter Ujfalusi 	struct omap_chan *c;
1506d88b1397SPeter Ujfalusi 
1507d88b1397SPeter Ujfalusi 	c = kzalloc(sizeof(*c), GFP_KERNEL);
1508d88b1397SPeter Ujfalusi 	if (!c)
1509d88b1397SPeter Ujfalusi 		return -ENOMEM;
1510d88b1397SPeter Ujfalusi 
1511d88b1397SPeter Ujfalusi 	c->reg_map = od->reg_map;
1512d88b1397SPeter Ujfalusi 	c->vc.desc_free = omap_dma_desc_free;
1513d88b1397SPeter Ujfalusi 	vchan_init(&c->vc, &od->ddev);
1514d88b1397SPeter Ujfalusi 
1515d88b1397SPeter Ujfalusi 	return 0;
1516d88b1397SPeter Ujfalusi }
1517d88b1397SPeter Ujfalusi 
omap_dma_free(struct omap_dmadev * od)1518d88b1397SPeter Ujfalusi static void omap_dma_free(struct omap_dmadev *od)
1519d88b1397SPeter Ujfalusi {
1520d88b1397SPeter Ujfalusi 	while (!list_empty(&od->ddev.channels)) {
1521d88b1397SPeter Ujfalusi 		struct omap_chan *c = list_first_entry(&od->ddev.channels,
1522d88b1397SPeter Ujfalusi 			struct omap_chan, vc.chan.device_node);
1523d88b1397SPeter Ujfalusi 
1524d88b1397SPeter Ujfalusi 		list_del(&c->vc.chan.device_node);
1525d88b1397SPeter Ujfalusi 		tasklet_kill(&c->vc.task);
1526d88b1397SPeter Ujfalusi 		kfree(c);
1527d88b1397SPeter Ujfalusi 	}
1528d88b1397SPeter Ujfalusi }
1529d88b1397SPeter Ujfalusi 
153029a25b92STony Lindgren /* Currently used by omap2 & 3 to block deeper SoC idle states */
omap_dma_busy(struct omap_dmadev * od)153129a25b92STony Lindgren static bool omap_dma_busy(struct omap_dmadev *od)
1532f4cfa36dSTony Lindgren {
1533f4cfa36dSTony Lindgren 	struct omap_chan *c;
1534f4cfa36dSTony Lindgren 	int lch = -1;
1535f4cfa36dSTony Lindgren 
1536f4cfa36dSTony Lindgren 	while (1) {
153729a25b92STony Lindgren 		lch = find_next_bit(od->lch_bitmap, od->lch_count, lch + 1);
1538f4cfa36dSTony Lindgren 		if (lch >= od->lch_count)
1539f4cfa36dSTony Lindgren 			break;
1540f4cfa36dSTony Lindgren 		c = od->lch_map[lch];
1541f4cfa36dSTony Lindgren 		if (!c)
1542f4cfa36dSTony Lindgren 			continue;
1543f4cfa36dSTony Lindgren 		if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
154429a25b92STony Lindgren 			return true;
1545f4cfa36dSTony Lindgren 	}
154629a25b92STony Lindgren 
154729a25b92STony Lindgren 	return false;
154829a25b92STony Lindgren }
154929a25b92STony Lindgren 
155029a25b92STony Lindgren /* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
omap_dma_busy_notifier(struct notifier_block * nb,unsigned long cmd,void * v)155129a25b92STony Lindgren static int omap_dma_busy_notifier(struct notifier_block *nb,
155229a25b92STony Lindgren 				  unsigned long cmd, void *v)
155329a25b92STony Lindgren {
155429a25b92STony Lindgren 	struct omap_dmadev *od;
155529a25b92STony Lindgren 
155629a25b92STony Lindgren 	od = container_of(nb, struct omap_dmadev, nb);
155729a25b92STony Lindgren 
155829a25b92STony Lindgren 	switch (cmd) {
155929a25b92STony Lindgren 	case CPU_CLUSTER_PM_ENTER:
156029a25b92STony Lindgren 		if (omap_dma_busy(od))
156129a25b92STony Lindgren 			return NOTIFY_BAD;
1562f4cfa36dSTony Lindgren 		break;
1563f4cfa36dSTony Lindgren 	case CPU_CLUSTER_PM_ENTER_FAILED:
1564f4cfa36dSTony Lindgren 	case CPU_CLUSTER_PM_EXIT:
1565f4cfa36dSTony Lindgren 		break;
1566f4cfa36dSTony Lindgren 	}
1567f4cfa36dSTony Lindgren 
1568f4cfa36dSTony Lindgren 	return NOTIFY_OK;
1569f4cfa36dSTony Lindgren }
1570f4cfa36dSTony Lindgren 
15714c74ecf7STony Lindgren /*
15724c74ecf7STony Lindgren  * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0.
15734c74ecf7STony Lindgren  * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for
15744c74ecf7STony Lindgren  * now. Context save seems to be only currently needed on omap3.
15754c74ecf7STony Lindgren  */
omap_dma_context_save(struct omap_dmadev * od)15764c74ecf7STony Lindgren static void omap_dma_context_save(struct omap_dmadev *od)
15774c74ecf7STony Lindgren {
15784c74ecf7STony Lindgren 	od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0);
15794c74ecf7STony Lindgren 	od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1);
15804c74ecf7STony Lindgren 	od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
15814c74ecf7STony Lindgren 	od->context.gcr = omap_dma_glbl_read(od, GCR);
15824c74ecf7STony Lindgren }
15834c74ecf7STony Lindgren 
omap_dma_context_restore(struct omap_dmadev * od)15844c74ecf7STony Lindgren static void omap_dma_context_restore(struct omap_dmadev *od)
15854c74ecf7STony Lindgren {
15864c74ecf7STony Lindgren 	int i;
15874c74ecf7STony Lindgren 
15884c74ecf7STony Lindgren 	omap_dma_glbl_write(od, GCR, od->context.gcr);
15894c74ecf7STony Lindgren 	omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig);
15904c74ecf7STony Lindgren 	omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0);
15914c74ecf7STony Lindgren 	omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1);
15924c74ecf7STony Lindgren 
15934c74ecf7STony Lindgren 	/* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */
15944c74ecf7STony Lindgren 	if (od->plat->errata & DMA_ROMCODE_BUG)
15954c74ecf7STony Lindgren 		omap_dma_glbl_write(od, IRQSTATUS_L0, 0);
15964c74ecf7STony Lindgren 
15974c74ecf7STony Lindgren 	/* Clear dma channels */
15984c74ecf7STony Lindgren 	for (i = 0; i < od->lch_count; i++)
15994c74ecf7STony Lindgren 		omap_dma_clear_lch(od, i);
16004c74ecf7STony Lindgren }
16014c74ecf7STony Lindgren 
16024c74ecf7STony Lindgren /* Currently only used for omap3 */
omap_dma_context_notifier(struct notifier_block * nb,unsigned long cmd,void * v)16034c74ecf7STony Lindgren static int omap_dma_context_notifier(struct notifier_block *nb,
16044c74ecf7STony Lindgren 				     unsigned long cmd, void *v)
16054c74ecf7STony Lindgren {
16064c74ecf7STony Lindgren 	struct omap_dmadev *od;
16074c74ecf7STony Lindgren 
16084c74ecf7STony Lindgren 	od = container_of(nb, struct omap_dmadev, nb);
16094c74ecf7STony Lindgren 
16104c74ecf7STony Lindgren 	switch (cmd) {
16114c74ecf7STony Lindgren 	case CPU_CLUSTER_PM_ENTER:
161229a25b92STony Lindgren 		if (omap_dma_busy(od))
161329a25b92STony Lindgren 			return NOTIFY_BAD;
16144c74ecf7STony Lindgren 		omap_dma_context_save(od);
16154c74ecf7STony Lindgren 		break;
1616340ad031STony Lindgren 	case CPU_CLUSTER_PM_ENTER_FAILED:	/* No need to restore context */
1617340ad031STony Lindgren 		break;
16184c74ecf7STony Lindgren 	case CPU_CLUSTER_PM_EXIT:
16194c74ecf7STony Lindgren 		omap_dma_context_restore(od);
16204c74ecf7STony Lindgren 		break;
16214c74ecf7STony Lindgren 	}
16224c74ecf7STony Lindgren 
16234c74ecf7STony Lindgren 	return NOTIFY_OK;
16244c74ecf7STony Lindgren }
16254c74ecf7STony Lindgren 
omap_dma_init_gcr(struct omap_dmadev * od,int arb_rate,int max_fifo_depth,int tparams)16269938ee9cSTony Lindgren static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate,
16279938ee9cSTony Lindgren 			      int max_fifo_depth, int tparams)
16289938ee9cSTony Lindgren {
16299938ee9cSTony Lindgren 	u32 val;
16309938ee9cSTony Lindgren 
16319938ee9cSTony Lindgren 	/* Set only for omap2430 and later */
16329938ee9cSTony Lindgren 	if (!od->cfg->rw_priority)
16339938ee9cSTony Lindgren 		return;
16349938ee9cSTony Lindgren 
16359938ee9cSTony Lindgren 	if (max_fifo_depth == 0)
16369938ee9cSTony Lindgren 		max_fifo_depth = 1;
16379938ee9cSTony Lindgren 	if (arb_rate == 0)
16389938ee9cSTony Lindgren 		arb_rate = 1;
16399938ee9cSTony Lindgren 
16409938ee9cSTony Lindgren 	val = 0xff & max_fifo_depth;
16419938ee9cSTony Lindgren 	val |= (0x3 & tparams) << 12;
16429938ee9cSTony Lindgren 	val |= (arb_rate & 0xff) << 16;
16439938ee9cSTony Lindgren 
16449938ee9cSTony Lindgren 	omap_dma_glbl_write(od, GCR, val);
16459938ee9cSTony Lindgren }
16469938ee9cSTony Lindgren 
1647d88b1397SPeter Ujfalusi #define OMAP_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1648d88b1397SPeter Ujfalusi 				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1649d88b1397SPeter Ujfalusi 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1650d88b1397SPeter Ujfalusi 
16514c74ecf7STony Lindgren /*
16524c74ecf7STony Lindgren  * No flags currently set for default configuration as omap1 is still
16534c74ecf7STony Lindgren  * using platform data.
16544c74ecf7STony Lindgren  */
16554c74ecf7STony Lindgren static const struct omap_dma_config default_cfg;
16564c74ecf7STony Lindgren 
omap_dma_probe(struct platform_device * pdev)1657d88b1397SPeter Ujfalusi static int omap_dma_probe(struct platform_device *pdev)
1658d88b1397SPeter Ujfalusi {
16594c74ecf7STony Lindgren 	const struct omap_dma_config *conf;
1660d88b1397SPeter Ujfalusi 	struct omap_dmadev *od;
1661d88b1397SPeter Ujfalusi 	int rc, i, irq;
166261ecb539STony Lindgren 	u32 val;
1663d88b1397SPeter Ujfalusi 
1664d88b1397SPeter Ujfalusi 	od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1665d88b1397SPeter Ujfalusi 	if (!od)
1666d88b1397SPeter Ujfalusi 		return -ENOMEM;
1667d88b1397SPeter Ujfalusi 
16684b23603aSTudor Ambarus 	od->base = devm_platform_ioremap_resource(pdev, 0);
1669d88b1397SPeter Ujfalusi 	if (IS_ERR(od->base))
1670d88b1397SPeter Ujfalusi 		return PTR_ERR(od->base);
1671d88b1397SPeter Ujfalusi 
16724c74ecf7STony Lindgren 	conf = of_device_get_match_data(&pdev->dev);
1673211010aeSTony Lindgren 	if (conf) {
16744c74ecf7STony Lindgren 		od->cfg = conf;
1675211010aeSTony Lindgren 		od->plat = dev_get_platdata(&pdev->dev);
167699477263SColin Ian King 		if (!od->plat) {
167799477263SColin Ian King 			dev_err(&pdev->dev, "omap_system_dma_plat_info is missing");
167899477263SColin Ian King 			return -ENODEV;
167999477263SColin Ian King 		}
168052ef8efcSArnd Bergmann 	} else if (IS_ENABLED(CONFIG_ARCH_OMAP1)) {
16814c74ecf7STony Lindgren 		od->cfg = &default_cfg;
16824c74ecf7STony Lindgren 
1683d88b1397SPeter Ujfalusi 		od->plat = omap_get_plat_info();
1684d88b1397SPeter Ujfalusi 		if (!od->plat)
1685d88b1397SPeter Ujfalusi 			return -EPROBE_DEFER;
168652ef8efcSArnd Bergmann 	} else {
168752ef8efcSArnd Bergmann 		return -ENODEV;
1688211010aeSTony Lindgren 	}
1689d88b1397SPeter Ujfalusi 
1690d88b1397SPeter Ujfalusi 	od->reg_map = od->plat->reg_map;
1691d88b1397SPeter Ujfalusi 
1692d88b1397SPeter Ujfalusi 	dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
1693d88b1397SPeter Ujfalusi 	dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
1694d88b1397SPeter Ujfalusi 	dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
1695d88b1397SPeter Ujfalusi 	dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
1696d88b1397SPeter Ujfalusi 	od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
1697d88b1397SPeter Ujfalusi 	od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
1698d88b1397SPeter Ujfalusi 	od->ddev.device_tx_status = omap_dma_tx_status;
1699d88b1397SPeter Ujfalusi 	od->ddev.device_issue_pending = omap_dma_issue_pending;
1700d88b1397SPeter Ujfalusi 	od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1701d88b1397SPeter Ujfalusi 	od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1702d88b1397SPeter Ujfalusi 	od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
1703d88b1397SPeter Ujfalusi 	od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
1704d88b1397SPeter Ujfalusi 	od->ddev.device_config = omap_dma_slave_config;
1705d88b1397SPeter Ujfalusi 	od->ddev.device_pause = omap_dma_pause;
1706d88b1397SPeter Ujfalusi 	od->ddev.device_resume = omap_dma_resume;
1707d88b1397SPeter Ujfalusi 	od->ddev.device_terminate_all = omap_dma_terminate_all;
1708d88b1397SPeter Ujfalusi 	od->ddev.device_synchronize = omap_dma_synchronize;
1709d88b1397SPeter Ujfalusi 	od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1710d88b1397SPeter Ujfalusi 	od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1711d88b1397SPeter Ujfalusi 	od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1712c9bd0946SJanusz Krzysztofik 	if (__dma_omap15xx(od->plat->dma_attr))
1713c9bd0946SJanusz Krzysztofik 		od->ddev.residue_granularity =
1714c9bd0946SJanusz Krzysztofik 				DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1715c9bd0946SJanusz Krzysztofik 	else
1716d88b1397SPeter Ujfalusi 		od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1717d88b1397SPeter Ujfalusi 	od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
1718d88b1397SPeter Ujfalusi 	od->ddev.dev = &pdev->dev;
1719d88b1397SPeter Ujfalusi 	INIT_LIST_HEAD(&od->ddev.channels);
172061ecb539STony Lindgren 	mutex_init(&od->lch_lock);
1721d88b1397SPeter Ujfalusi 	spin_lock_init(&od->lock);
1722d88b1397SPeter Ujfalusi 	spin_lock_init(&od->irq_lock);
1723d88b1397SPeter Ujfalusi 
1724d88b1397SPeter Ujfalusi 	/* Number of DMA requests */
1725d88b1397SPeter Ujfalusi 	od->dma_requests = OMAP_SDMA_REQUESTS;
1726d88b1397SPeter Ujfalusi 	if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
1727d88b1397SPeter Ujfalusi 						      "dma-requests",
1728d88b1397SPeter Ujfalusi 						      &od->dma_requests)) {
1729d88b1397SPeter Ujfalusi 		dev_info(&pdev->dev,
1730d88b1397SPeter Ujfalusi 			 "Missing dma-requests property, using %u.\n",
1731d88b1397SPeter Ujfalusi 			 OMAP_SDMA_REQUESTS);
1732d88b1397SPeter Ujfalusi 	}
1733d88b1397SPeter Ujfalusi 
1734d88b1397SPeter Ujfalusi 	/* Number of available logical channels */
1735d88b1397SPeter Ujfalusi 	if (!pdev->dev.of_node) {
17364c74ecf7STony Lindgren 		od->lch_count = od->plat->dma_attr->lch_count;
17374c74ecf7STony Lindgren 		if (unlikely(!od->lch_count))
17384c74ecf7STony Lindgren 			od->lch_count = OMAP_SDMA_CHANNELS;
1739d88b1397SPeter Ujfalusi 	} else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
17404c74ecf7STony Lindgren 					&od->lch_count)) {
1741d88b1397SPeter Ujfalusi 		dev_info(&pdev->dev,
1742d88b1397SPeter Ujfalusi 			 "Missing dma-channels property, using %u.\n",
1743d88b1397SPeter Ujfalusi 			 OMAP_SDMA_CHANNELS);
17444c74ecf7STony Lindgren 		od->lch_count = OMAP_SDMA_CHANNELS;
1745d88b1397SPeter Ujfalusi 	}
1746d88b1397SPeter Ujfalusi 
174761ecb539STony Lindgren 	/* Mask of allowed logical channels */
174861ecb539STony Lindgren 	if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node,
174961ecb539STony Lindgren 						       "dma-channel-mask",
175061ecb539STony Lindgren 						       &val)) {
175161ecb539STony Lindgren 		/* Tag channels not in mask as reserved */
175261ecb539STony Lindgren 		val = ~val;
175361ecb539STony Lindgren 		bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count);
175461ecb539STony Lindgren 	}
175561ecb539STony Lindgren 	if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED)
175661ecb539STony Lindgren 		bitmap_set(od->lch_bitmap, 0, 2);
175761ecb539STony Lindgren 
17584c74ecf7STony Lindgren 	od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count,
17594c74ecf7STony Lindgren 				   sizeof(*od->lch_map),
1760d88b1397SPeter Ujfalusi 				   GFP_KERNEL);
1761d88b1397SPeter Ujfalusi 	if (!od->lch_map)
1762d88b1397SPeter Ujfalusi 		return -ENOMEM;
1763d88b1397SPeter Ujfalusi 
1764d88b1397SPeter Ujfalusi 	for (i = 0; i < od->dma_requests; i++) {
1765d88b1397SPeter Ujfalusi 		rc = omap_dma_chan_init(od);
1766d88b1397SPeter Ujfalusi 		if (rc) {
1767d88b1397SPeter Ujfalusi 			omap_dma_free(od);
1768d88b1397SPeter Ujfalusi 			return rc;
1769d88b1397SPeter Ujfalusi 		}
1770d88b1397SPeter Ujfalusi 	}
1771d88b1397SPeter Ujfalusi 
1772d88b1397SPeter Ujfalusi 	irq = platform_get_irq(pdev, 1);
1773d88b1397SPeter Ujfalusi 	if (irq <= 0) {
1774d88b1397SPeter Ujfalusi 		dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
1775d88b1397SPeter Ujfalusi 		od->legacy = true;
1776d88b1397SPeter Ujfalusi 	} else {
1777d88b1397SPeter Ujfalusi 		/* Disable all interrupts */
1778d88b1397SPeter Ujfalusi 		od->irq_enable_mask = 0;
1779d88b1397SPeter Ujfalusi 		omap_dma_glbl_write(od, IRQENABLE_L1, 0);
1780d88b1397SPeter Ujfalusi 
1781d88b1397SPeter Ujfalusi 		rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
1782d88b1397SPeter Ujfalusi 				      IRQF_SHARED, "omap-dma-engine", od);
1783962411b0SWenwen Wang 		if (rc) {
1784962411b0SWenwen Wang 			omap_dma_free(od);
1785d88b1397SPeter Ujfalusi 			return rc;
1786d88b1397SPeter Ujfalusi 		}
1787962411b0SWenwen Wang 	}
1788d88b1397SPeter Ujfalusi 
1789d88b1397SPeter Ujfalusi 	if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
1790d88b1397SPeter Ujfalusi 		od->ll123_supported = true;
1791d88b1397SPeter Ujfalusi 
1792d88b1397SPeter Ujfalusi 	od->ddev.filter.map = od->plat->slave_map;
1793d88b1397SPeter Ujfalusi 	od->ddev.filter.mapcnt = od->plat->slavecnt;
1794d88b1397SPeter Ujfalusi 	od->ddev.filter.fn = omap_dma_filter_fn;
1795d88b1397SPeter Ujfalusi 
1796d88b1397SPeter Ujfalusi 	if (od->ll123_supported) {
1797d88b1397SPeter Ujfalusi 		od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
1798d88b1397SPeter Ujfalusi 						&pdev->dev,
1799d88b1397SPeter Ujfalusi 						sizeof(struct omap_type2_desc),
1800d88b1397SPeter Ujfalusi 						4, 0);
1801d88b1397SPeter Ujfalusi 		if (!od->desc_pool) {
1802d88b1397SPeter Ujfalusi 			dev_err(&pdev->dev,
1803d88b1397SPeter Ujfalusi 				"unable to allocate descriptor pool\n");
1804d88b1397SPeter Ujfalusi 			od->ll123_supported = false;
1805d88b1397SPeter Ujfalusi 		}
1806d88b1397SPeter Ujfalusi 	}
1807d88b1397SPeter Ujfalusi 
1808d88b1397SPeter Ujfalusi 	rc = dma_async_device_register(&od->ddev);
1809d88b1397SPeter Ujfalusi 	if (rc) {
1810d88b1397SPeter Ujfalusi 		pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1811d88b1397SPeter Ujfalusi 			rc);
1812d88b1397SPeter Ujfalusi 		omap_dma_free(od);
1813d88b1397SPeter Ujfalusi 		return rc;
1814d88b1397SPeter Ujfalusi 	}
1815d88b1397SPeter Ujfalusi 
1816d88b1397SPeter Ujfalusi 	platform_set_drvdata(pdev, od);
1817d88b1397SPeter Ujfalusi 
1818d88b1397SPeter Ujfalusi 	if (pdev->dev.of_node) {
1819d88b1397SPeter Ujfalusi 		omap_dma_info.dma_cap = od->ddev.cap_mask;
1820d88b1397SPeter Ujfalusi 
1821d88b1397SPeter Ujfalusi 		/* Device-tree DMA controller registration */
1822d88b1397SPeter Ujfalusi 		rc = of_dma_controller_register(pdev->dev.of_node,
1823d88b1397SPeter Ujfalusi 				of_dma_simple_xlate, &omap_dma_info);
1824d88b1397SPeter Ujfalusi 		if (rc) {
1825d88b1397SPeter Ujfalusi 			pr_warn("OMAP-DMA: failed to register DMA controller\n");
1826d88b1397SPeter Ujfalusi 			dma_async_device_unregister(&od->ddev);
1827d88b1397SPeter Ujfalusi 			omap_dma_free(od);
1828d88b1397SPeter Ujfalusi 		}
1829d88b1397SPeter Ujfalusi 	}
1830d88b1397SPeter Ujfalusi 
18319938ee9cSTony Lindgren 	omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0);
18329938ee9cSTony Lindgren 
1833f4cfa36dSTony Lindgren 	if (od->cfg->needs_busy_check) {
1834f4cfa36dSTony Lindgren 		od->nb.notifier_call = omap_dma_busy_notifier;
1835f4cfa36dSTony Lindgren 		cpu_pm_register_notifier(&od->nb);
1836f4cfa36dSTony Lindgren 	} else if (od->cfg->may_lose_context) {
18374c74ecf7STony Lindgren 		od->nb.notifier_call = omap_dma_context_notifier;
18384c74ecf7STony Lindgren 		cpu_pm_register_notifier(&od->nb);
18394c74ecf7STony Lindgren 	}
18404c74ecf7STony Lindgren 
1841d88b1397SPeter Ujfalusi 	dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
1842d88b1397SPeter Ujfalusi 		 od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
1843d88b1397SPeter Ujfalusi 
1844d88b1397SPeter Ujfalusi 	return rc;
1845d88b1397SPeter Ujfalusi }
1846d88b1397SPeter Ujfalusi 
omap_dma_remove(struct platform_device * pdev)1847d88b1397SPeter Ujfalusi static int omap_dma_remove(struct platform_device *pdev)
1848d88b1397SPeter Ujfalusi {
1849d88b1397SPeter Ujfalusi 	struct omap_dmadev *od = platform_get_drvdata(pdev);
1850d88b1397SPeter Ujfalusi 	int irq;
1851d88b1397SPeter Ujfalusi 
18524c74ecf7STony Lindgren 	if (od->cfg->may_lose_context)
18534c74ecf7STony Lindgren 		cpu_pm_unregister_notifier(&od->nb);
18544c74ecf7STony Lindgren 
1855d88b1397SPeter Ujfalusi 	if (pdev->dev.of_node)
1856d88b1397SPeter Ujfalusi 		of_dma_controller_free(pdev->dev.of_node);
1857d88b1397SPeter Ujfalusi 
1858d88b1397SPeter Ujfalusi 	irq = platform_get_irq(pdev, 1);
1859d88b1397SPeter Ujfalusi 	devm_free_irq(&pdev->dev, irq, od);
1860d88b1397SPeter Ujfalusi 
1861d88b1397SPeter Ujfalusi 	dma_async_device_unregister(&od->ddev);
1862d88b1397SPeter Ujfalusi 
186352ef8efcSArnd Bergmann 	if (!omap_dma_legacy(od)) {
1864d88b1397SPeter Ujfalusi 		/* Disable all interrupts */
1865d88b1397SPeter Ujfalusi 		omap_dma_glbl_write(od, IRQENABLE_L0, 0);
1866d88b1397SPeter Ujfalusi 	}
1867d88b1397SPeter Ujfalusi 
1868d88b1397SPeter Ujfalusi 	if (od->ll123_supported)
1869d88b1397SPeter Ujfalusi 		dma_pool_destroy(od->desc_pool);
1870d88b1397SPeter Ujfalusi 
1871d88b1397SPeter Ujfalusi 	omap_dma_free(od);
1872d88b1397SPeter Ujfalusi 
1873d88b1397SPeter Ujfalusi 	return 0;
1874d88b1397SPeter Ujfalusi }
1875d88b1397SPeter Ujfalusi 
18764c74ecf7STony Lindgren static const struct omap_dma_config omap2420_data = {
18774c74ecf7STony Lindgren 	.lch_end = CCFN,
18789938ee9cSTony Lindgren 	.rw_priority = true,
187961ecb539STony Lindgren 	.needs_lch_clear = true,
1880f4cfa36dSTony Lindgren 	.needs_busy_check = true,
18814c74ecf7STony Lindgren };
18824c74ecf7STony Lindgren 
18834c74ecf7STony Lindgren static const struct omap_dma_config omap2430_data = {
18844c74ecf7STony Lindgren 	.lch_end = CCFN,
18859938ee9cSTony Lindgren 	.rw_priority = true,
188661ecb539STony Lindgren 	.needs_lch_clear = true,
18874c74ecf7STony Lindgren };
18884c74ecf7STony Lindgren 
18894c74ecf7STony Lindgren static const struct omap_dma_config omap3430_data = {
18904c74ecf7STony Lindgren 	.lch_end = CCFN,
18919938ee9cSTony Lindgren 	.rw_priority = true,
189261ecb539STony Lindgren 	.needs_lch_clear = true,
18934c74ecf7STony Lindgren 	.may_lose_context = true,
18944c74ecf7STony Lindgren };
18954c74ecf7STony Lindgren 
18964c74ecf7STony Lindgren static const struct omap_dma_config omap3630_data = {
18974c74ecf7STony Lindgren 	.lch_end = CCDN,
18989938ee9cSTony Lindgren 	.rw_priority = true,
189961ecb539STony Lindgren 	.needs_lch_clear = true,
19004c74ecf7STony Lindgren 	.may_lose_context = true,
19014c74ecf7STony Lindgren };
19024c74ecf7STony Lindgren 
19034c74ecf7STony Lindgren static const struct omap_dma_config omap4_data = {
19044c74ecf7STony Lindgren 	.lch_end = CCDN,
19059938ee9cSTony Lindgren 	.rw_priority = true,
190661ecb539STony Lindgren 	.needs_lch_clear = true,
19074c74ecf7STony Lindgren };
19084c74ecf7STony Lindgren 
1909d88b1397SPeter Ujfalusi static const struct of_device_id omap_dma_match[] = {
19104c74ecf7STony Lindgren 	{ .compatible = "ti,omap2420-sdma", .data = &omap2420_data, },
19114c74ecf7STony Lindgren 	{ .compatible = "ti,omap2430-sdma", .data = &omap2430_data, },
19124c74ecf7STony Lindgren 	{ .compatible = "ti,omap3430-sdma", .data = &omap3430_data, },
19134c74ecf7STony Lindgren 	{ .compatible = "ti,omap3630-sdma", .data = &omap3630_data, },
19144c74ecf7STony Lindgren 	{ .compatible = "ti,omap4430-sdma", .data = &omap4_data, },
1915d88b1397SPeter Ujfalusi 	{},
1916d88b1397SPeter Ujfalusi };
1917d88b1397SPeter Ujfalusi MODULE_DEVICE_TABLE(of, omap_dma_match);
1918d88b1397SPeter Ujfalusi 
1919d88b1397SPeter Ujfalusi static struct platform_driver omap_dma_driver = {
1920d88b1397SPeter Ujfalusi 	.probe	= omap_dma_probe,
1921d88b1397SPeter Ujfalusi 	.remove	= omap_dma_remove,
1922d88b1397SPeter Ujfalusi 	.driver = {
1923d88b1397SPeter Ujfalusi 		.name = "omap-dma-engine",
1924d1134d66SKrzysztof Kozlowski 		.of_match_table = omap_dma_match,
1925d88b1397SPeter Ujfalusi 	},
1926d88b1397SPeter Ujfalusi };
1927d88b1397SPeter Ujfalusi 
omap_dma_filter_fn(struct dma_chan * chan,void * param)19289c71b9ebSArnd Bergmann static bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1929d88b1397SPeter Ujfalusi {
1930d88b1397SPeter Ujfalusi 	if (chan->device->dev->driver == &omap_dma_driver.driver) {
1931d88b1397SPeter Ujfalusi 		struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1932d88b1397SPeter Ujfalusi 		struct omap_chan *c = to_omap_dma_chan(chan);
1933d88b1397SPeter Ujfalusi 		unsigned req = *(unsigned *)param;
1934d88b1397SPeter Ujfalusi 
1935d88b1397SPeter Ujfalusi 		if (req <= od->dma_requests) {
1936d88b1397SPeter Ujfalusi 			c->dma_sig = req;
1937d88b1397SPeter Ujfalusi 			return true;
1938d88b1397SPeter Ujfalusi 		}
1939d88b1397SPeter Ujfalusi 	}
1940d88b1397SPeter Ujfalusi 	return false;
1941d88b1397SPeter Ujfalusi }
1942d88b1397SPeter Ujfalusi 
omap_dma_init(void)1943d88b1397SPeter Ujfalusi static int omap_dma_init(void)
1944d88b1397SPeter Ujfalusi {
1945d88b1397SPeter Ujfalusi 	return platform_driver_register(&omap_dma_driver);
1946d88b1397SPeter Ujfalusi }
1947d88b1397SPeter Ujfalusi subsys_initcall(omap_dma_init);
1948d88b1397SPeter Ujfalusi 
omap_dma_exit(void)1949d88b1397SPeter Ujfalusi static void __exit omap_dma_exit(void)
1950d88b1397SPeter Ujfalusi {
1951d88b1397SPeter Ujfalusi 	platform_driver_unregister(&omap_dma_driver);
1952d88b1397SPeter Ujfalusi }
1953d88b1397SPeter Ujfalusi module_exit(omap_dma_exit);
1954d88b1397SPeter Ujfalusi 
1955d88b1397SPeter Ujfalusi MODULE_AUTHOR("Russell King");
1956d88b1397SPeter Ujfalusi MODULE_LICENSE("GPL");
1957