1 /* 2 * linux/arch/arm/mach-omap2/io.c 3 * 4 * OMAP2 I/O mapping code 5 * 6 * Copyright (C) 2005 Nokia Corporation 7 * Copyright (C) 2007-2009 Texas Instruments 8 * 9 * Author: 10 * Juha Yrjola <juha.yrjola@nokia.com> 11 * Syed Khasim <x0khasim@ti.com> 12 * 13 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 14 * 15 * This program is free software; you can redistribute it and/or modify 16 * it under the terms of the GNU General Public License version 2 as 17 * published by the Free Software Foundation. 18 */ 19 20 #include <linux/module.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/io.h> 24 #include <linux/clk.h> 25 #include <linux/omapfb.h> 26 27 #include <asm/tlb.h> 28 29 #include <asm/mach/map.h> 30 31 #include <plat/sram.h> 32 #include <plat/sdrc.h> 33 #include <plat/gpmc.h> 34 #include <plat/serial.h> 35 36 #include "clock2xxx.h" 37 #include "clock3xxx.h" 38 #include "clock44xx.h" 39 #include "io.h" 40 41 #include <plat/omap-pm.h> 42 #include "powerdomain.h" 43 44 #include "clockdomain.h" 45 #include <plat/omap_hwmod.h> 46 #include <plat/multi.h> 47 48 /* 49 * The machine specific code may provide the extra mapping besides the 50 * default mapping provided here. 51 */ 52 53 #ifdef CONFIG_ARCH_OMAP2 54 static struct map_desc omap24xx_io_desc[] __initdata = { 55 { 56 .virtual = L3_24XX_VIRT, 57 .pfn = __phys_to_pfn(L3_24XX_PHYS), 58 .length = L3_24XX_SIZE, 59 .type = MT_DEVICE 60 }, 61 { 62 .virtual = L4_24XX_VIRT, 63 .pfn = __phys_to_pfn(L4_24XX_PHYS), 64 .length = L4_24XX_SIZE, 65 .type = MT_DEVICE 66 }, 67 }; 68 69 #ifdef CONFIG_ARCH_OMAP2420 70 static struct map_desc omap242x_io_desc[] __initdata = { 71 { 72 .virtual = DSP_MEM_2420_VIRT, 73 .pfn = __phys_to_pfn(DSP_MEM_2420_PHYS), 74 .length = DSP_MEM_2420_SIZE, 75 .type = MT_DEVICE 76 }, 77 { 78 .virtual = DSP_IPI_2420_VIRT, 79 .pfn = __phys_to_pfn(DSP_IPI_2420_PHYS), 80 .length = DSP_IPI_2420_SIZE, 81 .type = MT_DEVICE 82 }, 83 { 84 .virtual = DSP_MMU_2420_VIRT, 85 .pfn = __phys_to_pfn(DSP_MMU_2420_PHYS), 86 .length = DSP_MMU_2420_SIZE, 87 .type = MT_DEVICE 88 }, 89 }; 90 91 #endif 92 93 #ifdef CONFIG_ARCH_OMAP2430 94 static struct map_desc omap243x_io_desc[] __initdata = { 95 { 96 .virtual = L4_WK_243X_VIRT, 97 .pfn = __phys_to_pfn(L4_WK_243X_PHYS), 98 .length = L4_WK_243X_SIZE, 99 .type = MT_DEVICE 100 }, 101 { 102 .virtual = OMAP243X_GPMC_VIRT, 103 .pfn = __phys_to_pfn(OMAP243X_GPMC_PHYS), 104 .length = OMAP243X_GPMC_SIZE, 105 .type = MT_DEVICE 106 }, 107 { 108 .virtual = OMAP243X_SDRC_VIRT, 109 .pfn = __phys_to_pfn(OMAP243X_SDRC_PHYS), 110 .length = OMAP243X_SDRC_SIZE, 111 .type = MT_DEVICE 112 }, 113 { 114 .virtual = OMAP243X_SMS_VIRT, 115 .pfn = __phys_to_pfn(OMAP243X_SMS_PHYS), 116 .length = OMAP243X_SMS_SIZE, 117 .type = MT_DEVICE 118 }, 119 }; 120 #endif 121 #endif 122 123 #ifdef CONFIG_ARCH_OMAP3 124 static struct map_desc omap34xx_io_desc[] __initdata = { 125 { 126 .virtual = L3_34XX_VIRT, 127 .pfn = __phys_to_pfn(L3_34XX_PHYS), 128 .length = L3_34XX_SIZE, 129 .type = MT_DEVICE 130 }, 131 { 132 .virtual = L4_34XX_VIRT, 133 .pfn = __phys_to_pfn(L4_34XX_PHYS), 134 .length = L4_34XX_SIZE, 135 .type = MT_DEVICE 136 }, 137 { 138 .virtual = OMAP34XX_GPMC_VIRT, 139 .pfn = __phys_to_pfn(OMAP34XX_GPMC_PHYS), 140 .length = OMAP34XX_GPMC_SIZE, 141 .type = MT_DEVICE 142 }, 143 { 144 .virtual = OMAP343X_SMS_VIRT, 145 .pfn = __phys_to_pfn(OMAP343X_SMS_PHYS), 146 .length = OMAP343X_SMS_SIZE, 147 .type = MT_DEVICE 148 }, 149 { 150 .virtual = OMAP343X_SDRC_VIRT, 151 .pfn = __phys_to_pfn(OMAP343X_SDRC_PHYS), 152 .length = OMAP343X_SDRC_SIZE, 153 .type = MT_DEVICE 154 }, 155 { 156 .virtual = L4_PER_34XX_VIRT, 157 .pfn = __phys_to_pfn(L4_PER_34XX_PHYS), 158 .length = L4_PER_34XX_SIZE, 159 .type = MT_DEVICE 160 }, 161 { 162 .virtual = L4_EMU_34XX_VIRT, 163 .pfn = __phys_to_pfn(L4_EMU_34XX_PHYS), 164 .length = L4_EMU_34XX_SIZE, 165 .type = MT_DEVICE 166 }, 167 #if defined(CONFIG_DEBUG_LL) && \ 168 (defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3)) 169 { 170 .virtual = ZOOM_UART_VIRT, 171 .pfn = __phys_to_pfn(ZOOM_UART_BASE), 172 .length = SZ_1M, 173 .type = MT_DEVICE 174 }, 175 #endif 176 }; 177 #endif 178 179 #ifdef CONFIG_SOC_OMAPTI816X 180 static struct map_desc omapti816x_io_desc[] __initdata = { 181 { 182 .virtual = L4_34XX_VIRT, 183 .pfn = __phys_to_pfn(L4_34XX_PHYS), 184 .length = L4_34XX_SIZE, 185 .type = MT_DEVICE 186 }, 187 }; 188 #endif 189 190 #ifdef CONFIG_ARCH_OMAP4 191 static struct map_desc omap44xx_io_desc[] __initdata = { 192 { 193 .virtual = L3_44XX_VIRT, 194 .pfn = __phys_to_pfn(L3_44XX_PHYS), 195 .length = L3_44XX_SIZE, 196 .type = MT_DEVICE, 197 }, 198 { 199 .virtual = L4_44XX_VIRT, 200 .pfn = __phys_to_pfn(L4_44XX_PHYS), 201 .length = L4_44XX_SIZE, 202 .type = MT_DEVICE, 203 }, 204 { 205 .virtual = OMAP44XX_GPMC_VIRT, 206 .pfn = __phys_to_pfn(OMAP44XX_GPMC_PHYS), 207 .length = OMAP44XX_GPMC_SIZE, 208 .type = MT_DEVICE, 209 }, 210 { 211 .virtual = OMAP44XX_EMIF1_VIRT, 212 .pfn = __phys_to_pfn(OMAP44XX_EMIF1_PHYS), 213 .length = OMAP44XX_EMIF1_SIZE, 214 .type = MT_DEVICE, 215 }, 216 { 217 .virtual = OMAP44XX_EMIF2_VIRT, 218 .pfn = __phys_to_pfn(OMAP44XX_EMIF2_PHYS), 219 .length = OMAP44XX_EMIF2_SIZE, 220 .type = MT_DEVICE, 221 }, 222 { 223 .virtual = OMAP44XX_DMM_VIRT, 224 .pfn = __phys_to_pfn(OMAP44XX_DMM_PHYS), 225 .length = OMAP44XX_DMM_SIZE, 226 .type = MT_DEVICE, 227 }, 228 { 229 .virtual = L4_PER_44XX_VIRT, 230 .pfn = __phys_to_pfn(L4_PER_44XX_PHYS), 231 .length = L4_PER_44XX_SIZE, 232 .type = MT_DEVICE, 233 }, 234 { 235 .virtual = L4_EMU_44XX_VIRT, 236 .pfn = __phys_to_pfn(L4_EMU_44XX_PHYS), 237 .length = L4_EMU_44XX_SIZE, 238 .type = MT_DEVICE, 239 }, 240 }; 241 #endif 242 243 static void __init _omap2_map_common_io(void) 244 { 245 /* Normally devicemaps_init() would flush caches and tlb after 246 * mdesc->map_io(), but we must also do it here because of the CPU 247 * revision check below. 248 */ 249 local_flush_tlb_all(); 250 flush_cache_all(); 251 252 omap2_check_revision(); 253 omap_sram_init(); 254 } 255 256 #ifdef CONFIG_ARCH_OMAP2420 257 void __init omap242x_map_common_io(void) 258 { 259 iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc)); 260 iotable_init(omap242x_io_desc, ARRAY_SIZE(omap242x_io_desc)); 261 _omap2_map_common_io(); 262 } 263 #endif 264 265 #ifdef CONFIG_ARCH_OMAP2430 266 void __init omap243x_map_common_io(void) 267 { 268 iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc)); 269 iotable_init(omap243x_io_desc, ARRAY_SIZE(omap243x_io_desc)); 270 _omap2_map_common_io(); 271 } 272 #endif 273 274 #ifdef CONFIG_ARCH_OMAP3 275 void __init omap34xx_map_common_io(void) 276 { 277 iotable_init(omap34xx_io_desc, ARRAY_SIZE(omap34xx_io_desc)); 278 _omap2_map_common_io(); 279 } 280 #endif 281 282 #ifdef CONFIG_SOC_OMAPTI816X 283 void __init omapti816x_map_common_io(void) 284 { 285 iotable_init(omapti816x_io_desc, ARRAY_SIZE(omapti816x_io_desc)); 286 _omap2_map_common_io(); 287 } 288 #endif 289 290 #ifdef CONFIG_ARCH_OMAP4 291 void __init omap44xx_map_common_io(void) 292 { 293 iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc)); 294 _omap2_map_common_io(); 295 } 296 #endif 297 298 /* 299 * omap2_init_reprogram_sdrc - reprogram SDRC timing parameters 300 * 301 * Sets the CORE DPLL3 M2 divider to the same value that it's at 302 * currently. This has the effect of setting the SDRC SDRAM AC timing 303 * registers to the values currently defined by the kernel. Currently 304 * only defined for OMAP3; will return 0 if called on OMAP2. Returns 305 * -EINVAL if the dpll3_m2_ck cannot be found, 0 if called on OMAP2, 306 * or passes along the return value of clk_set_rate(). 307 */ 308 static int __init _omap2_init_reprogram_sdrc(void) 309 { 310 struct clk *dpll3_m2_ck; 311 int v = -EINVAL; 312 long rate; 313 314 if (!cpu_is_omap34xx()) 315 return 0; 316 317 dpll3_m2_ck = clk_get(NULL, "dpll3_m2_ck"); 318 if (IS_ERR(dpll3_m2_ck)) 319 return -EINVAL; 320 321 rate = clk_get_rate(dpll3_m2_ck); 322 pr_info("Reprogramming SDRC clock to %ld Hz\n", rate); 323 v = clk_set_rate(dpll3_m2_ck, rate); 324 if (v) 325 pr_err("dpll3_m2_clk rate change failed: %d\n", v); 326 327 clk_put(dpll3_m2_ck); 328 329 return v; 330 } 331 332 static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data) 333 { 334 return omap_hwmod_set_postsetup_state(oh, *(u8 *)data); 335 } 336 337 void __iomem *omap_irq_base; 338 339 /* 340 * Initialize asm_irq_base for entry-macro.S 341 */ 342 static inline void omap_irq_base_init(void) 343 { 344 if (cpu_is_omap24xx()) 345 omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE); 346 else if (cpu_is_omap34xx()) 347 omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE); 348 else if (cpu_is_omap44xx()) 349 omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE); 350 else 351 pr_err("Could not initialize omap_irq_base\n"); 352 } 353 354 void __init omap2_init_common_infrastructure(void) 355 { 356 u8 postsetup_state; 357 358 if (cpu_is_omap242x()) { 359 omap2xxx_powerdomains_init(); 360 omap2_clockdomains_init(); 361 omap2420_hwmod_init(); 362 } else if (cpu_is_omap243x()) { 363 omap2xxx_powerdomains_init(); 364 omap2_clockdomains_init(); 365 omap2430_hwmod_init(); 366 } else if (cpu_is_omap34xx()) { 367 omap3xxx_powerdomains_init(); 368 omap2_clockdomains_init(); 369 omap3xxx_hwmod_init(); 370 } else if (cpu_is_omap44xx()) { 371 omap44xx_powerdomains_init(); 372 omap44xx_clockdomains_init(); 373 omap44xx_hwmod_init(); 374 } else { 375 pr_err("Could not init hwmod data - unknown SoC\n"); 376 } 377 378 /* Set the default postsetup state for all hwmods */ 379 #ifdef CONFIG_PM_RUNTIME 380 postsetup_state = _HWMOD_STATE_IDLE; 381 #else 382 postsetup_state = _HWMOD_STATE_ENABLED; 383 #endif 384 omap_hwmod_for_each(_set_hwmod_postsetup_state, &postsetup_state); 385 386 /* 387 * Set the default postsetup state for unusual modules (like 388 * MPU WDT). 389 * 390 * The postsetup_state is not actually used until 391 * omap_hwmod_late_init(), so boards that desire full watchdog 392 * coverage of kernel initialization can reprogram the 393 * postsetup_state between the calls to 394 * omap2_init_common_infra() and omap2_init_common_devices(). 395 * 396 * XXX ideally we could detect whether the MPU WDT was currently 397 * enabled here and make this conditional 398 */ 399 postsetup_state = _HWMOD_STATE_DISABLED; 400 omap_hwmod_for_each_by_class("wd_timer", 401 _set_hwmod_postsetup_state, 402 &postsetup_state); 403 404 omap_pm_if_early_init(); 405 406 if (cpu_is_omap2420()) 407 omap2420_clk_init(); 408 else if (cpu_is_omap2430()) 409 omap2430_clk_init(); 410 else if (cpu_is_omap34xx()) 411 omap3xxx_clk_init(); 412 else if (cpu_is_omap44xx()) 413 omap4xxx_clk_init(); 414 else 415 pr_err("Could not init clock framework - unknown SoC\n"); 416 } 417 418 void __init omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0, 419 struct omap_sdrc_params *sdrc_cs1) 420 { 421 if (cpu_is_omap24xx() || omap3_has_sdrc()) { 422 omap2_sdrc_init(sdrc_cs0, sdrc_cs1); 423 _omap2_init_reprogram_sdrc(); 424 } 425 gpmc_init(); 426 427 omap_irq_base_init(); 428 } 429 430 /* 431 * NOTE: Please use ioremap + __raw_read/write where possible instead of these 432 */ 433 434 u8 omap_readb(u32 pa) 435 { 436 return __raw_readb(OMAP2_L4_IO_ADDRESS(pa)); 437 } 438 EXPORT_SYMBOL(omap_readb); 439 440 u16 omap_readw(u32 pa) 441 { 442 return __raw_readw(OMAP2_L4_IO_ADDRESS(pa)); 443 } 444 EXPORT_SYMBOL(omap_readw); 445 446 u32 omap_readl(u32 pa) 447 { 448 return __raw_readl(OMAP2_L4_IO_ADDRESS(pa)); 449 } 450 EXPORT_SYMBOL(omap_readl); 451 452 void omap_writeb(u8 v, u32 pa) 453 { 454 __raw_writeb(v, OMAP2_L4_IO_ADDRESS(pa)); 455 } 456 EXPORT_SYMBOL(omap_writeb); 457 458 void omap_writew(u16 v, u32 pa) 459 { 460 __raw_writew(v, OMAP2_L4_IO_ADDRESS(pa)); 461 } 462 EXPORT_SYMBOL(omap_writew); 463 464 void omap_writel(u32 v, u32 pa) 465 { 466 __raw_writel(v, OMAP2_L4_IO_ADDRESS(pa)); 467 } 468 EXPORT_SYMBOL(omap_writel); 469