1 /* 2 * OMAP MPUSS low power code 3 * 4 * Copyright (C) 2011 Texas Instruments, Inc. 5 * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 * 7 * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU 8 * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller, 9 * CPU0 and CPU1 LPRM modules. 10 * CPU0, CPU1 and MPUSS each have there own power domain and 11 * hence multiple low power combinations of MPUSS are possible. 12 * 13 * The CPU0 and CPU1 can't support Closed switch Retention (CSWR) 14 * because the mode is not supported by hw constraints of dormant 15 * mode. While waking up from the dormant mode, a reset signal 16 * to the Cortex-A9 processor must be asserted by the external 17 * power controller. 18 * 19 * With architectural inputs and hardware recommendations, only 20 * below modes are supported from power gain vs latency point of view. 21 * 22 * CPU0 CPU1 MPUSS 23 * ---------------------------------------------- 24 * ON ON ON 25 * ON(Inactive) OFF ON(Inactive) 26 * OFF OFF CSWR 27 * OFF OFF OSWR 28 * OFF OFF OFF(Device OFF *TBD) 29 * ---------------------------------------------- 30 * 31 * Note: CPU0 is the master core and it is the last CPU to go down 32 * and first to wake-up when MPUSS low power states are excercised 33 * 34 * 35 * This program is free software; you can redistribute it and/or modify 36 * it under the terms of the GNU General Public License version 2 as 37 * published by the Free Software Foundation. 38 */ 39 40 #include <linux/kernel.h> 41 #include <linux/io.h> 42 #include <linux/errno.h> 43 #include <linux/linkage.h> 44 #include <linux/smp.h> 45 46 #include <asm/cacheflush.h> 47 #include <asm/tlbflush.h> 48 #include <asm/smp_scu.h> 49 #include <asm/pgalloc.h> 50 #include <asm/suspend.h> 51 #include <asm/virt.h> 52 #include <asm/hardware/cache-l2x0.h> 53 54 #include "soc.h" 55 #include "common.h" 56 #include "omap44xx.h" 57 #include "omap4-sar-layout.h" 58 #include "pm.h" 59 #include "prcm_mpu44xx.h" 60 #include "prcm_mpu54xx.h" 61 #include "prminst44xx.h" 62 #include "prcm44xx.h" 63 #include "prm44xx.h" 64 #include "prm-regbits-44xx.h" 65 66 static void __iomem *sar_base; 67 static u32 old_cpu1_ns_pa_addr; 68 69 #if defined(CONFIG_PM) && defined(CONFIG_SMP) 70 71 struct omap4_cpu_pm_info { 72 struct powerdomain *pwrdm; 73 void __iomem *scu_sar_addr; 74 void __iomem *wkup_sar_addr; 75 void __iomem *l2x0_sar_addr; 76 }; 77 78 /** 79 * struct cpu_pm_ops - CPU pm operations 80 * @finish_suspend: CPU suspend finisher function pointer 81 * @resume: CPU resume function pointer 82 * @scu_prepare: CPU Snoop Control program function pointer 83 * @hotplug_restart: CPU restart function pointer 84 * 85 * Structure holds functions pointer for CPU low power operations like 86 * suspend, resume and scu programming. 87 */ 88 struct cpu_pm_ops { 89 int (*finish_suspend)(unsigned long cpu_state); 90 void (*resume)(void); 91 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state); 92 void (*hotplug_restart)(void); 93 }; 94 95 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); 96 static struct powerdomain *mpuss_pd; 97 static u32 cpu_context_offset; 98 99 static int default_finish_suspend(unsigned long cpu_state) 100 { 101 omap_do_wfi(); 102 return 0; 103 } 104 105 static void dummy_cpu_resume(void) 106 {} 107 108 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state) 109 {} 110 111 static struct cpu_pm_ops omap_pm_ops = { 112 .finish_suspend = default_finish_suspend, 113 .resume = dummy_cpu_resume, 114 .scu_prepare = dummy_scu_prepare, 115 .hotplug_restart = dummy_cpu_resume, 116 }; 117 118 /* 119 * Program the wakeup routine address for the CPU0 and CPU1 120 * used for OFF or DORMANT wakeup. 121 */ 122 static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr) 123 { 124 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 125 126 if (pm_info->wkup_sar_addr) 127 writel_relaxed(addr, pm_info->wkup_sar_addr); 128 } 129 130 /* 131 * Store the SCU power status value to scratchpad memory 132 */ 133 static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) 134 { 135 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 136 u32 scu_pwr_st; 137 138 switch (cpu_state) { 139 case PWRDM_POWER_RET: 140 scu_pwr_st = SCU_PM_DORMANT; 141 break; 142 case PWRDM_POWER_OFF: 143 scu_pwr_st = SCU_PM_POWEROFF; 144 break; 145 case PWRDM_POWER_ON: 146 case PWRDM_POWER_INACTIVE: 147 default: 148 scu_pwr_st = SCU_PM_NORMAL; 149 break; 150 } 151 152 if (pm_info->scu_sar_addr) 153 writel_relaxed(scu_pwr_st, pm_info->scu_sar_addr); 154 } 155 156 /* Helper functions for MPUSS OSWR */ 157 static inline void mpuss_clear_prev_logic_pwrst(void) 158 { 159 u32 reg; 160 161 reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, 162 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); 163 omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION, 164 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); 165 } 166 167 static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id) 168 { 169 u32 reg; 170 171 if (cpu_id) { 172 reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST, 173 cpu_context_offset); 174 omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST, 175 cpu_context_offset); 176 } else { 177 reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST, 178 cpu_context_offset); 179 omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST, 180 cpu_context_offset); 181 } 182 } 183 184 /* 185 * Store the CPU cluster state for L2X0 low power operations. 186 */ 187 static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state) 188 { 189 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 190 191 if (pm_info->l2x0_sar_addr) 192 writel_relaxed(save_state, pm_info->l2x0_sar_addr); 193 } 194 195 /* 196 * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to 197 * in every restore MPUSS OFF path. 198 */ 199 #ifdef CONFIG_CACHE_L2X0 200 static void __init save_l2x0_context(void) 201 { 202 void __iomem *l2x0_base = omap4_get_l2cache_base(); 203 204 if (l2x0_base && sar_base) { 205 writel_relaxed(l2x0_saved_regs.aux_ctrl, 206 sar_base + L2X0_AUXCTRL_OFFSET); 207 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 208 sar_base + L2X0_PREFETCH_CTRL_OFFSET); 209 } 210 } 211 #else 212 static void __init save_l2x0_context(void) 213 {} 214 #endif 215 216 u32 omap4_get_cpu1_ns_pa_addr(void) 217 { 218 return old_cpu1_ns_pa_addr; 219 } 220 221 /** 222 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function 223 * The purpose of this function is to manage low power programming 224 * of OMAP4 MPUSS subsystem 225 * @cpu : CPU ID 226 * @power_state: Low power state. 227 * 228 * MPUSS states for the context save: 229 * save_state = 230 * 0 - Nothing lost and no need to save: MPUSS INACTIVE 231 * 1 - CPUx L1 and logic lost: MPUSS CSWR 232 * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR 233 * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF 234 */ 235 int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) 236 { 237 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); 238 unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET; 239 unsigned int wakeup_cpu; 240 241 if (omap_rev() == OMAP4430_REV_ES1_0) 242 return -ENXIO; 243 244 switch (power_state) { 245 case PWRDM_POWER_ON: 246 case PWRDM_POWER_INACTIVE: 247 save_state = 0; 248 break; 249 case PWRDM_POWER_OFF: 250 cpu_logic_state = PWRDM_POWER_OFF; 251 save_state = 1; 252 break; 253 case PWRDM_POWER_RET: 254 if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) 255 save_state = 0; 256 break; 257 default: 258 /* 259 * CPUx CSWR is invalid hardware state. Also CPUx OSWR 260 * doesn't make much scense, since logic is lost and $L1 261 * needs to be cleaned because of coherency. This makes 262 * CPUx OSWR equivalent to CPUX OFF and hence not supported 263 */ 264 WARN_ON(1); 265 return -ENXIO; 266 } 267 268 pwrdm_pre_transition(NULL); 269 270 /* 271 * Check MPUSS next state and save interrupt controller if needed. 272 * In MPUSS OSWR or device OFF, interrupt controller contest is lost. 273 */ 274 mpuss_clear_prev_logic_pwrst(); 275 if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) && 276 (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF)) 277 save_state = 2; 278 279 cpu_clear_prev_logic_pwrst(cpu); 280 pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); 281 pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state); 282 set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume)); 283 omap_pm_ops.scu_prepare(cpu, power_state); 284 l2x0_pwrst_prepare(cpu, save_state); 285 286 /* 287 * Call low level function with targeted low power state. 288 */ 289 if (save_state) 290 cpu_suspend(save_state, omap_pm_ops.finish_suspend); 291 else 292 omap_pm_ops.finish_suspend(save_state); 293 294 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu) 295 gic_dist_enable(); 296 297 /* 298 * Restore the CPUx power state to ON otherwise CPUx 299 * power domain can transitions to programmed low power 300 * state while doing WFI outside the low powe code. On 301 * secure devices, CPUx does WFI which can result in 302 * domain transition 303 */ 304 wakeup_cpu = smp_processor_id(); 305 pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 306 307 pwrdm_post_transition(NULL); 308 309 return 0; 310 } 311 312 /** 313 * omap4_hotplug_cpu: OMAP4 CPU hotplug entry 314 * @cpu : CPU ID 315 * @power_state: CPU low power state. 316 */ 317 int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) 318 { 319 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); 320 unsigned int cpu_state = 0; 321 322 if (omap_rev() == OMAP4430_REV_ES1_0) 323 return -ENXIO; 324 325 /* Use the achievable power state for the domain */ 326 power_state = pwrdm_get_valid_lp_state(pm_info->pwrdm, 327 false, power_state); 328 329 if (power_state == PWRDM_POWER_OFF) 330 cpu_state = 1; 331 332 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 333 pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); 334 set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart)); 335 omap_pm_ops.scu_prepare(cpu, power_state); 336 337 /* 338 * CPU never retuns back if targeted power state is OFF mode. 339 * CPU ONLINE follows normal CPU ONLINE ptah via 340 * omap4_secondary_startup(). 341 */ 342 omap_pm_ops.finish_suspend(cpu_state); 343 344 pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 345 return 0; 346 } 347 348 349 /* 350 * Enable Mercury Fast HG retention mode by default. 351 */ 352 static void enable_mercury_retention_mode(void) 353 { 354 u32 reg; 355 356 reg = omap4_prcm_mpu_read_inst_reg(OMAP54XX_PRCM_MPU_DEVICE_INST, 357 OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET); 358 /* Enable HG_EN, HG_RAMPUP = fast mode */ 359 reg |= BIT(24) | BIT(25); 360 omap4_prcm_mpu_write_inst_reg(reg, OMAP54XX_PRCM_MPU_DEVICE_INST, 361 OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET); 362 } 363 364 /* 365 * Initialise OMAP4 MPUSS 366 */ 367 int __init omap4_mpuss_init(void) 368 { 369 struct omap4_cpu_pm_info *pm_info; 370 371 if (omap_rev() == OMAP4430_REV_ES1_0) { 372 WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); 373 return -ENODEV; 374 } 375 376 /* Initilaise per CPU PM information */ 377 pm_info = &per_cpu(omap4_pm_info, 0x0); 378 if (sar_base) { 379 pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; 380 if (cpu_is_omap44xx()) 381 pm_info->wkup_sar_addr = sar_base + 382 CPU0_WAKEUP_NS_PA_ADDR_OFFSET; 383 else 384 pm_info->wkup_sar_addr = sar_base + 385 OMAP5_CPU0_WAKEUP_NS_PA_ADDR_OFFSET; 386 pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0; 387 } 388 pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm"); 389 if (!pm_info->pwrdm) { 390 pr_err("Lookup failed for CPU0 pwrdm\n"); 391 return -ENODEV; 392 } 393 394 /* Clear CPU previous power domain state */ 395 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 396 cpu_clear_prev_logic_pwrst(0); 397 398 /* Initialise CPU0 power domain state to ON */ 399 pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 400 401 pm_info = &per_cpu(omap4_pm_info, 0x1); 402 if (sar_base) { 403 pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; 404 if (cpu_is_omap44xx()) 405 pm_info->wkup_sar_addr = sar_base + 406 CPU1_WAKEUP_NS_PA_ADDR_OFFSET; 407 else 408 pm_info->wkup_sar_addr = sar_base + 409 OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET; 410 pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1; 411 } 412 413 pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm"); 414 if (!pm_info->pwrdm) { 415 pr_err("Lookup failed for CPU1 pwrdm\n"); 416 return -ENODEV; 417 } 418 419 /* Clear CPU previous power domain state */ 420 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 421 cpu_clear_prev_logic_pwrst(1); 422 423 /* Initialise CPU1 power domain state to ON */ 424 pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 425 426 mpuss_pd = pwrdm_lookup("mpu_pwrdm"); 427 if (!mpuss_pd) { 428 pr_err("Failed to lookup MPUSS power domain\n"); 429 return -ENODEV; 430 } 431 pwrdm_clear_all_prev_pwrst(mpuss_pd); 432 mpuss_clear_prev_logic_pwrst(); 433 434 if (sar_base) { 435 /* Save device type on scratchpad for low level code to use */ 436 writel_relaxed((omap_type() != OMAP2_DEVICE_TYPE_GP) ? 1 : 0, 437 sar_base + OMAP_TYPE_OFFSET); 438 save_l2x0_context(); 439 } 440 441 if (cpu_is_omap44xx()) { 442 omap_pm_ops.finish_suspend = omap4_finish_suspend; 443 omap_pm_ops.resume = omap4_cpu_resume; 444 omap_pm_ops.scu_prepare = scu_pwrst_prepare; 445 omap_pm_ops.hotplug_restart = omap4_secondary_startup; 446 cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET; 447 } else if (soc_is_omap54xx() || soc_is_dra7xx()) { 448 cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET; 449 enable_mercury_retention_mode(); 450 } 451 452 if (cpu_is_omap446x()) 453 omap_pm_ops.hotplug_restart = omap4460_secondary_startup; 454 455 return 0; 456 } 457 458 #endif 459 460 /* 461 * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to 462 * current kernel's secondary_startup() early before 463 * clockdomains_init(). Otherwise clockdomain_init() can 464 * wake CPU1 and cause a hang. 465 */ 466 void __init omap4_mpuss_early_init(void) 467 { 468 unsigned long startup_pa; 469 void __iomem *ns_pa_addr; 470 471 if (!(soc_is_omap44xx() || soc_is_omap54xx())) 472 return; 473 474 sar_base = omap4_get_sar_ram_base(); 475 476 /* Save old NS_PA_ADDR for validity checks later on */ 477 if (soc_is_omap44xx()) 478 ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; 479 else 480 ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET; 481 old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr); 482 483 if (soc_is_omap443x()) 484 startup_pa = __pa_symbol(omap4_secondary_startup); 485 else if (soc_is_omap446x()) 486 startup_pa = __pa_symbol(omap4460_secondary_startup); 487 else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) 488 startup_pa = __pa_symbol(omap5_secondary_hyp_startup); 489 else 490 startup_pa = __pa_symbol(omap5_secondary_startup); 491 492 if (soc_is_omap44xx()) 493 writel_relaxed(startup_pa, sar_base + 494 CPU1_WAKEUP_NS_PA_ADDR_OFFSET); 495 else 496 writel_relaxed(startup_pa, sar_base + 497 OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET); 498 } 499