xref: /openbmc/linux/arch/arm/mach-omap2/io.c (revision 4800cd83)
1 /*
2  * linux/arch/arm/mach-omap2/io.c
3  *
4  * OMAP2 I/O mapping code
5  *
6  * Copyright (C) 2005 Nokia Corporation
7  * Copyright (C) 2007-2009 Texas Instruments
8  *
9  * Author:
10  *	Juha Yrjola <juha.yrjola@nokia.com>
11  *	Syed Khasim <x0khasim@ti.com>
12  *
13  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License version 2 as
17  * published by the Free Software Foundation.
18  */
19 
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/io.h>
24 #include <linux/clk.h>
25 #include <linux/omapfb.h>
26 
27 #include <asm/tlb.h>
28 
29 #include <asm/mach/map.h>
30 
31 #include <plat/sram.h>
32 #include <plat/sdrc.h>
33 #include <plat/gpmc.h>
34 #include <plat/serial.h>
35 
36 #include "clock2xxx.h"
37 #include "clock3xxx.h"
38 #include "clock44xx.h"
39 #include "io.h"
40 
41 #include <plat/omap-pm.h>
42 #include "powerdomain.h"
43 
44 #include "clockdomain.h"
45 #include <plat/omap_hwmod.h>
46 #include <plat/multi.h>
47 
48 /*
49  * The machine specific code may provide the extra mapping besides the
50  * default mapping provided here.
51  */
52 
53 #ifdef CONFIG_ARCH_OMAP2
54 static struct map_desc omap24xx_io_desc[] __initdata = {
55 	{
56 		.virtual	= L3_24XX_VIRT,
57 		.pfn		= __phys_to_pfn(L3_24XX_PHYS),
58 		.length		= L3_24XX_SIZE,
59 		.type		= MT_DEVICE
60 	},
61 	{
62 		.virtual	= L4_24XX_VIRT,
63 		.pfn		= __phys_to_pfn(L4_24XX_PHYS),
64 		.length		= L4_24XX_SIZE,
65 		.type		= MT_DEVICE
66 	},
67 };
68 
69 #ifdef CONFIG_ARCH_OMAP2420
70 static struct map_desc omap242x_io_desc[] __initdata = {
71 	{
72 		.virtual	= DSP_MEM_2420_VIRT,
73 		.pfn		= __phys_to_pfn(DSP_MEM_2420_PHYS),
74 		.length		= DSP_MEM_2420_SIZE,
75 		.type		= MT_DEVICE
76 	},
77 	{
78 		.virtual	= DSP_IPI_2420_VIRT,
79 		.pfn		= __phys_to_pfn(DSP_IPI_2420_PHYS),
80 		.length		= DSP_IPI_2420_SIZE,
81 		.type		= MT_DEVICE
82 	},
83 	{
84 		.virtual	= DSP_MMU_2420_VIRT,
85 		.pfn		= __phys_to_pfn(DSP_MMU_2420_PHYS),
86 		.length		= DSP_MMU_2420_SIZE,
87 		.type		= MT_DEVICE
88 	},
89 };
90 
91 #endif
92 
93 #ifdef CONFIG_ARCH_OMAP2430
94 static struct map_desc omap243x_io_desc[] __initdata = {
95 	{
96 		.virtual	= L4_WK_243X_VIRT,
97 		.pfn		= __phys_to_pfn(L4_WK_243X_PHYS),
98 		.length		= L4_WK_243X_SIZE,
99 		.type		= MT_DEVICE
100 	},
101 	{
102 		.virtual	= OMAP243X_GPMC_VIRT,
103 		.pfn		= __phys_to_pfn(OMAP243X_GPMC_PHYS),
104 		.length		= OMAP243X_GPMC_SIZE,
105 		.type		= MT_DEVICE
106 	},
107 	{
108 		.virtual	= OMAP243X_SDRC_VIRT,
109 		.pfn		= __phys_to_pfn(OMAP243X_SDRC_PHYS),
110 		.length		= OMAP243X_SDRC_SIZE,
111 		.type		= MT_DEVICE
112 	},
113 	{
114 		.virtual	= OMAP243X_SMS_VIRT,
115 		.pfn		= __phys_to_pfn(OMAP243X_SMS_PHYS),
116 		.length		= OMAP243X_SMS_SIZE,
117 		.type		= MT_DEVICE
118 	},
119 };
120 #endif
121 #endif
122 
123 #ifdef	CONFIG_ARCH_OMAP3
124 static struct map_desc omap34xx_io_desc[] __initdata = {
125 	{
126 		.virtual	= L3_34XX_VIRT,
127 		.pfn		= __phys_to_pfn(L3_34XX_PHYS),
128 		.length		= L3_34XX_SIZE,
129 		.type		= MT_DEVICE
130 	},
131 	{
132 		.virtual	= L4_34XX_VIRT,
133 		.pfn		= __phys_to_pfn(L4_34XX_PHYS),
134 		.length		= L4_34XX_SIZE,
135 		.type		= MT_DEVICE
136 	},
137 	{
138 		.virtual	= OMAP34XX_GPMC_VIRT,
139 		.pfn		= __phys_to_pfn(OMAP34XX_GPMC_PHYS),
140 		.length		= OMAP34XX_GPMC_SIZE,
141 		.type		= MT_DEVICE
142 	},
143 	{
144 		.virtual	= OMAP343X_SMS_VIRT,
145 		.pfn		= __phys_to_pfn(OMAP343X_SMS_PHYS),
146 		.length		= OMAP343X_SMS_SIZE,
147 		.type		= MT_DEVICE
148 	},
149 	{
150 		.virtual	= OMAP343X_SDRC_VIRT,
151 		.pfn		= __phys_to_pfn(OMAP343X_SDRC_PHYS),
152 		.length		= OMAP343X_SDRC_SIZE,
153 		.type		= MT_DEVICE
154 	},
155 	{
156 		.virtual	= L4_PER_34XX_VIRT,
157 		.pfn		= __phys_to_pfn(L4_PER_34XX_PHYS),
158 		.length		= L4_PER_34XX_SIZE,
159 		.type		= MT_DEVICE
160 	},
161 	{
162 		.virtual	= L4_EMU_34XX_VIRT,
163 		.pfn		= __phys_to_pfn(L4_EMU_34XX_PHYS),
164 		.length		= L4_EMU_34XX_SIZE,
165 		.type		= MT_DEVICE
166 	},
167 #if defined(CONFIG_DEBUG_LL) &&							\
168 	(defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3))
169 	{
170 		.virtual	= ZOOM_UART_VIRT,
171 		.pfn		= __phys_to_pfn(ZOOM_UART_BASE),
172 		.length		= SZ_1M,
173 		.type		= MT_DEVICE
174 	},
175 #endif
176 };
177 #endif
178 #ifdef	CONFIG_ARCH_OMAP4
179 static struct map_desc omap44xx_io_desc[] __initdata = {
180 	{
181 		.virtual	= L3_44XX_VIRT,
182 		.pfn		= __phys_to_pfn(L3_44XX_PHYS),
183 		.length		= L3_44XX_SIZE,
184 		.type		= MT_DEVICE,
185 	},
186 	{
187 		.virtual	= L4_44XX_VIRT,
188 		.pfn		= __phys_to_pfn(L4_44XX_PHYS),
189 		.length		= L4_44XX_SIZE,
190 		.type		= MT_DEVICE,
191 	},
192 	{
193 		.virtual	= OMAP44XX_GPMC_VIRT,
194 		.pfn		= __phys_to_pfn(OMAP44XX_GPMC_PHYS),
195 		.length		= OMAP44XX_GPMC_SIZE,
196 		.type		= MT_DEVICE,
197 	},
198 	{
199 		.virtual	= OMAP44XX_EMIF1_VIRT,
200 		.pfn		= __phys_to_pfn(OMAP44XX_EMIF1_PHYS),
201 		.length		= OMAP44XX_EMIF1_SIZE,
202 		.type		= MT_DEVICE,
203 	},
204 	{
205 		.virtual	= OMAP44XX_EMIF2_VIRT,
206 		.pfn		= __phys_to_pfn(OMAP44XX_EMIF2_PHYS),
207 		.length		= OMAP44XX_EMIF2_SIZE,
208 		.type		= MT_DEVICE,
209 	},
210 	{
211 		.virtual	= OMAP44XX_DMM_VIRT,
212 		.pfn		= __phys_to_pfn(OMAP44XX_DMM_PHYS),
213 		.length		= OMAP44XX_DMM_SIZE,
214 		.type		= MT_DEVICE,
215 	},
216 	{
217 		.virtual	= L4_PER_44XX_VIRT,
218 		.pfn		= __phys_to_pfn(L4_PER_44XX_PHYS),
219 		.length		= L4_PER_44XX_SIZE,
220 		.type		= MT_DEVICE,
221 	},
222 	{
223 		.virtual	= L4_EMU_44XX_VIRT,
224 		.pfn		= __phys_to_pfn(L4_EMU_44XX_PHYS),
225 		.length		= L4_EMU_44XX_SIZE,
226 		.type		= MT_DEVICE,
227 	},
228 };
229 #endif
230 
231 static void __init _omap2_map_common_io(void)
232 {
233 	/* Normally devicemaps_init() would flush caches and tlb after
234 	 * mdesc->map_io(), but we must also do it here because of the CPU
235 	 * revision check below.
236 	 */
237 	local_flush_tlb_all();
238 	flush_cache_all();
239 
240 	omap2_check_revision();
241 	omap_sram_init();
242 }
243 
244 #ifdef CONFIG_ARCH_OMAP2420
245 void __init omap242x_map_common_io(void)
246 {
247 	iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc));
248 	iotable_init(omap242x_io_desc, ARRAY_SIZE(omap242x_io_desc));
249 	_omap2_map_common_io();
250 }
251 #endif
252 
253 #ifdef CONFIG_ARCH_OMAP2430
254 void __init omap243x_map_common_io(void)
255 {
256 	iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc));
257 	iotable_init(omap243x_io_desc, ARRAY_SIZE(omap243x_io_desc));
258 	_omap2_map_common_io();
259 }
260 #endif
261 
262 #ifdef CONFIG_ARCH_OMAP3
263 void __init omap34xx_map_common_io(void)
264 {
265 	iotable_init(omap34xx_io_desc, ARRAY_SIZE(omap34xx_io_desc));
266 	_omap2_map_common_io();
267 }
268 #endif
269 
270 #ifdef CONFIG_ARCH_OMAP4
271 void __init omap44xx_map_common_io(void)
272 {
273 	iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
274 	_omap2_map_common_io();
275 }
276 #endif
277 
278 /*
279  * omap2_init_reprogram_sdrc - reprogram SDRC timing parameters
280  *
281  * Sets the CORE DPLL3 M2 divider to the same value that it's at
282  * currently.  This has the effect of setting the SDRC SDRAM AC timing
283  * registers to the values currently defined by the kernel.  Currently
284  * only defined for OMAP3; will return 0 if called on OMAP2.  Returns
285  * -EINVAL if the dpll3_m2_ck cannot be found, 0 if called on OMAP2,
286  * or passes along the return value of clk_set_rate().
287  */
288 static int __init _omap2_init_reprogram_sdrc(void)
289 {
290 	struct clk *dpll3_m2_ck;
291 	int v = -EINVAL;
292 	long rate;
293 
294 	if (!cpu_is_omap34xx())
295 		return 0;
296 
297 	dpll3_m2_ck = clk_get(NULL, "dpll3_m2_ck");
298 	if (IS_ERR(dpll3_m2_ck))
299 		return -EINVAL;
300 
301 	rate = clk_get_rate(dpll3_m2_ck);
302 	pr_info("Reprogramming SDRC clock to %ld Hz\n", rate);
303 	v = clk_set_rate(dpll3_m2_ck, rate);
304 	if (v)
305 		pr_err("dpll3_m2_clk rate change failed: %d\n", v);
306 
307 	clk_put(dpll3_m2_ck);
308 
309 	return v;
310 }
311 
312 static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data)
313 {
314 	return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
315 }
316 
317 void __iomem *omap_irq_base;
318 
319 /*
320  * Initialize asm_irq_base for entry-macro.S
321  */
322 static inline void omap_irq_base_init(void)
323 {
324 	if (cpu_is_omap24xx())
325 		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE);
326 	else if (cpu_is_omap34xx())
327 		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE);
328 	else if (cpu_is_omap44xx())
329 		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE);
330 	else
331 		pr_err("Could not initialize omap_irq_base\n");
332 }
333 
334 void __init omap2_init_common_infrastructure(void)
335 {
336 	u8 postsetup_state;
337 
338 	if (cpu_is_omap242x()) {
339 		omap2xxx_powerdomains_init();
340 		omap2_clockdomains_init();
341 		omap2420_hwmod_init();
342 	} else if (cpu_is_omap243x()) {
343 		omap2xxx_powerdomains_init();
344 		omap2_clockdomains_init();
345 		omap2430_hwmod_init();
346 	} else if (cpu_is_omap34xx()) {
347 		omap3xxx_powerdomains_init();
348 		omap2_clockdomains_init();
349 		omap3xxx_hwmod_init();
350 	} else if (cpu_is_omap44xx()) {
351 		omap44xx_powerdomains_init();
352 		omap44xx_clockdomains_init();
353 		omap44xx_hwmod_init();
354 	} else {
355 		pr_err("Could not init hwmod data - unknown SoC\n");
356         }
357 
358 	/* Set the default postsetup state for all hwmods */
359 #ifdef CONFIG_PM_RUNTIME
360 	postsetup_state = _HWMOD_STATE_IDLE;
361 #else
362 	postsetup_state = _HWMOD_STATE_ENABLED;
363 #endif
364 	omap_hwmod_for_each(_set_hwmod_postsetup_state, &postsetup_state);
365 
366 	/*
367 	 * Set the default postsetup state for unusual modules (like
368 	 * MPU WDT).
369 	 *
370 	 * The postsetup_state is not actually used until
371 	 * omap_hwmod_late_init(), so boards that desire full watchdog
372 	 * coverage of kernel initialization can reprogram the
373 	 * postsetup_state between the calls to
374 	 * omap2_init_common_infra() and omap2_init_common_devices().
375 	 *
376 	 * XXX ideally we could detect whether the MPU WDT was currently
377 	 * enabled here and make this conditional
378 	 */
379 	postsetup_state = _HWMOD_STATE_DISABLED;
380 	omap_hwmod_for_each_by_class("wd_timer",
381 				     _set_hwmod_postsetup_state,
382 				     &postsetup_state);
383 
384 	omap_pm_if_early_init();
385 
386 	if (cpu_is_omap2420())
387 		omap2420_clk_init();
388 	else if (cpu_is_omap2430())
389 		omap2430_clk_init();
390 	else if (cpu_is_omap34xx())
391 		omap3xxx_clk_init();
392 	else if (cpu_is_omap44xx())
393 		omap4xxx_clk_init();
394 	else
395 		pr_err("Could not init clock framework - unknown SoC\n");
396 }
397 
398 void __init omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0,
399 				      struct omap_sdrc_params *sdrc_cs1)
400 {
401 	omap_serial_early_init();
402 
403 	omap_hwmod_late_init();
404 
405 	if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
406 		omap2_sdrc_init(sdrc_cs0, sdrc_cs1);
407 		_omap2_init_reprogram_sdrc();
408 	}
409 	gpmc_init();
410 
411 	omap_irq_base_init();
412 }
413 
414 /*
415  * NOTE: Please use ioremap + __raw_read/write where possible instead of these
416  */
417 
418 u8 omap_readb(u32 pa)
419 {
420 	return __raw_readb(OMAP2_L4_IO_ADDRESS(pa));
421 }
422 EXPORT_SYMBOL(omap_readb);
423 
424 u16 omap_readw(u32 pa)
425 {
426 	return __raw_readw(OMAP2_L4_IO_ADDRESS(pa));
427 }
428 EXPORT_SYMBOL(omap_readw);
429 
430 u32 omap_readl(u32 pa)
431 {
432 	return __raw_readl(OMAP2_L4_IO_ADDRESS(pa));
433 }
434 EXPORT_SYMBOL(omap_readl);
435 
436 void omap_writeb(u8 v, u32 pa)
437 {
438 	__raw_writeb(v, OMAP2_L4_IO_ADDRESS(pa));
439 }
440 EXPORT_SYMBOL(omap_writeb);
441 
442 void omap_writew(u16 v, u32 pa)
443 {
444 	__raw_writew(v, OMAP2_L4_IO_ADDRESS(pa));
445 }
446 EXPORT_SYMBOL(omap_writew);
447 
448 void omap_writel(u32 v, u32 pa)
449 {
450 	__raw_writel(v, OMAP2_L4_IO_ADDRESS(pa));
451 }
452 EXPORT_SYMBOL(omap_writel);
453