xref: /openbmc/linux/arch/arm/mach-omap2/dma.c (revision be1f9481)
1 /*
2  * OMAP2+ DMA driver
3  *
4  * Copyright (C) 2003 - 2008 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <imre.deak@nokia.com>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
11  *
12  * Copyright (C) 2009 Texas Instruments
13  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
14  *
15  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
16  * Converted DMA library into platform driver
17  *	- G, Manjunath Kondaiah <manjugk@ti.com>
18  *
19  * This program is free software; you can redistribute it and/or modify
20  * it under the terms of the GNU General Public License version 2 as
21  * published by the Free Software Foundation.
22  */
23 
24 #include <linux/err.h>
25 #include <linux/io.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/device.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/omap-dma.h>
32 
33 #include "soc.h"
34 #include "omap_hwmod.h"
35 #include "omap_device.h"
36 
37 #define OMAP2_DMA_STRIDE	0x60
38 
39 static u32 errata;
40 static u8 dma_stride;
41 
42 static struct omap_dma_dev_attr *d;
43 
44 static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
45 
46 static u16 reg_map[] = {
47 	[REVISION]		= 0x00,
48 	[GCR]			= 0x78,
49 	[IRQSTATUS_L0]		= 0x08,
50 	[IRQSTATUS_L1]		= 0x0c,
51 	[IRQSTATUS_L2]		= 0x10,
52 	[IRQSTATUS_L3]		= 0x14,
53 	[IRQENABLE_L0]		= 0x18,
54 	[IRQENABLE_L1]		= 0x1c,
55 	[IRQENABLE_L2]		= 0x20,
56 	[IRQENABLE_L3]		= 0x24,
57 	[SYSSTATUS]		= 0x28,
58 	[OCP_SYSCONFIG]		= 0x2c,
59 	[CAPS_0]		= 0x64,
60 	[CAPS_2]		= 0x6c,
61 	[CAPS_3]		= 0x70,
62 	[CAPS_4]		= 0x74,
63 
64 	/* Common register offsets */
65 	[CCR]			= 0x80,
66 	[CLNK_CTRL]		= 0x84,
67 	[CICR]			= 0x88,
68 	[CSR]			= 0x8c,
69 	[CSDP]			= 0x90,
70 	[CEN]			= 0x94,
71 	[CFN]			= 0x98,
72 	[CSEI]			= 0xa4,
73 	[CSFI]			= 0xa8,
74 	[CDEI]			= 0xac,
75 	[CDFI]			= 0xb0,
76 	[CSAC]			= 0xb4,
77 	[CDAC]			= 0xb8,
78 
79 	/* Channel specific register offsets */
80 	[CSSA]			= 0x9c,
81 	[CDSA]			= 0xa0,
82 	[CCEN]			= 0xbc,
83 	[CCFN]			= 0xc0,
84 	[COLOR]			= 0xc4,
85 
86 	/* OMAP4 specific registers */
87 	[CDP]			= 0xd0,
88 	[CNDP]			= 0xd4,
89 	[CCDN]			= 0xd8,
90 };
91 
92 static void __iomem *dma_base;
93 static inline void dma_write(u32 val, int reg, int lch)
94 {
95 	u8  stride;
96 	u32 offset;
97 
98 	stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
99 	offset = reg_map[reg] + (stride * lch);
100 	__raw_writel(val, dma_base + offset);
101 }
102 
103 static inline u32 dma_read(int reg, int lch)
104 {
105 	u8 stride;
106 	u32 offset, val;
107 
108 	stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
109 	offset = reg_map[reg] + (stride * lch);
110 	val = __raw_readl(dma_base + offset);
111 	return val;
112 }
113 
114 static inline void omap2_disable_irq_lch(int lch)
115 {
116 	u32 val;
117 
118 	val = dma_read(IRQENABLE_L0, lch);
119 	val &= ~(1 << lch);
120 	dma_write(val, IRQENABLE_L0, lch);
121 }
122 
123 static void omap2_clear_dma(int lch)
124 {
125 	int i = dma_common_ch_start;
126 
127 	for (; i <= dma_common_ch_end; i += 1)
128 		dma_write(0, i, lch);
129 }
130 
131 static void omap2_show_dma_caps(void)
132 {
133 	u8 revision = dma_read(REVISION, 0) & 0xff;
134 	printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
135 				revision >> 4, revision & 0xf);
136 	return;
137 }
138 
139 static u32 configure_dma_errata(void)
140 {
141 
142 	/*
143 	 * Errata applicable for OMAP2430ES1.0 and all omap2420
144 	 *
145 	 * I.
146 	 * Erratum ID: Not Available
147 	 * Inter Frame DMA buffering issue DMA will wrongly
148 	 * buffer elements if packing and bursting is enabled. This might
149 	 * result in data gets stalled in FIFO at the end of the block.
150 	 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
151 	 * guarantee no data will stay in the DMA FIFO in case inter frame
152 	 * buffering occurs
153 	 *
154 	 * II.
155 	 * Erratum ID: Not Available
156 	 * DMA may hang when several channels are used in parallel
157 	 * In the following configuration, DMA channel hanging can occur:
158 	 * a. Channel i, hardware synchronized, is enabled
159 	 * b. Another channel (Channel x), software synchronized, is enabled.
160 	 * c. Channel i is disabled before end of transfer
161 	 * d. Channel i is reenabled.
162 	 * e. Steps 1 to 4 are repeated a certain number of times.
163 	 * f. A third channel (Channel y), software synchronized, is enabled.
164 	 * Channel x and Channel y may hang immediately after step 'f'.
165 	 * Workaround:
166 	 * For any channel used - make sure NextLCH_ID is set to the value j.
167 	 */
168 	if (cpu_is_omap2420() || (cpu_is_omap2430() &&
169 				(omap_type() == OMAP2430_REV_ES1_0))) {
170 
171 		SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
172 		SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
173 	}
174 
175 	/*
176 	 * Erratum ID: i378: OMAP2+: sDMA Channel is not disabled
177 	 * after a transaction error.
178 	 * Workaround: SW should explicitely disable the channel.
179 	 */
180 	if (cpu_class_is_omap2())
181 		SET_DMA_ERRATA(DMA_ERRATA_i378);
182 
183 	/*
184 	 * Erratum ID: i541: sDMA FIFO draining does not finish
185 	 * If sDMA channel is disabled on the fly, sDMA enters standby even
186 	 * through FIFO Drain is still in progress
187 	 * Workaround: Put sDMA in NoStandby more before a logical channel is
188 	 * disabled, then put it back to SmartStandby right after the channel
189 	 * finishes FIFO draining.
190 	 */
191 	if (cpu_is_omap34xx())
192 		SET_DMA_ERRATA(DMA_ERRATA_i541);
193 
194 	/*
195 	 * Erratum ID: i88 : Special programming model needed to disable DMA
196 	 * before end of block.
197 	 * Workaround: software must ensure that the DMA is configured in No
198 	 * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
199 	 */
200 	if (omap_type() == OMAP3430_REV_ES1_0)
201 		SET_DMA_ERRATA(DMA_ERRATA_i88);
202 
203 	/*
204 	 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
205 	 * read before the DMA controller finished disabling the channel.
206 	 */
207 	SET_DMA_ERRATA(DMA_ERRATA_3_3);
208 
209 	/*
210 	 * Erratum ID: Not Available
211 	 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
212 	 * after secure sram context save and restore.
213 	 * Work around: Hence we need to manually clear those IRQs to avoid
214 	 * spurious interrupts. This affects only secure devices.
215 	 */
216 	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
217 		SET_DMA_ERRATA(DMA_ROMCODE_BUG);
218 
219 	return errata;
220 }
221 
222 /* One time initializations */
223 static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
224 {
225 	struct platform_device			*pdev;
226 	struct omap_system_dma_plat_info	*p;
227 	struct resource				*mem;
228 	char					*name = "omap_dma_system";
229 
230 	dma_stride		= OMAP2_DMA_STRIDE;
231 	dma_common_ch_start	= CSDP;
232 
233 	p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
234 	if (!p) {
235 		pr_err("%s: Unable to allocate pdata for %s:%s\n",
236 			__func__, name, oh->name);
237 		return -ENOMEM;
238 	}
239 
240 	p->dma_attr		= (struct omap_dma_dev_attr *)oh->dev_attr;
241 	p->disable_irq_lch	= omap2_disable_irq_lch;
242 	p->show_dma_caps	= omap2_show_dma_caps;
243 	p->clear_dma		= omap2_clear_dma;
244 	p->dma_write		= dma_write;
245 	p->dma_read		= dma_read;
246 
247 	p->clear_lch_regs	= NULL;
248 
249 	p->errata		= configure_dma_errata();
250 
251 	pdev = omap_device_build(name, 0, oh, p, sizeof(*p), NULL, 0, 0);
252 	kfree(p);
253 	if (IS_ERR(pdev)) {
254 		pr_err("%s: Can't build omap_device for %s:%s.\n",
255 			__func__, name, oh->name);
256 		return PTR_ERR(pdev);
257 	}
258 
259 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
260 	if (!mem) {
261 		dev_err(&pdev->dev, "%s: no mem resource\n", __func__);
262 		return -EINVAL;
263 	}
264 	dma_base = ioremap(mem->start, resource_size(mem));
265 	if (!dma_base) {
266 		dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
267 		return -ENOMEM;
268 	}
269 
270 	d = oh->dev_attr;
271 	d->chan = kzalloc(sizeof(struct omap_dma_lch) *
272 					(d->lch_count), GFP_KERNEL);
273 
274 	if (!d->chan) {
275 		dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__);
276 		return -ENOMEM;
277 	}
278 
279 	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
280 		d->dev_caps |= HS_CHANNELS_RESERVED;
281 
282 	/* Check the capabilities register for descriptor loading feature */
283 	if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS)
284 		dma_common_ch_end = CCDN;
285 	else
286 		dma_common_ch_end = CCFN;
287 
288 	return 0;
289 }
290 
291 static const struct platform_device_info omap_dma_dev_info = {
292 	.name = "omap-dma-engine",
293 	.id = -1,
294 	.dma_mask = DMA_BIT_MASK(32),
295 };
296 
297 static int __init omap2_system_dma_init(void)
298 {
299 	struct platform_device *pdev;
300 	int res;
301 
302 	res = omap_hwmod_for_each_by_class("dma",
303 			omap2_system_dma_init_dev, NULL);
304 	if (res)
305 		return res;
306 
307 	pdev = platform_device_register_full(&omap_dma_dev_info);
308 	if (IS_ERR(pdev))
309 		return PTR_ERR(pdev);
310 
311 	return res;
312 }
313 omap_arch_initcall(omap2_system_dma_init);
314