1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * OMAP3 Power Management Routines 4 * 5 * Copyright (C) 2006-2008 Nokia Corporation 6 * Tony Lindgren <tony@atomide.com> 7 * Jouni Hogander 8 * 9 * Copyright (C) 2007 Texas Instruments, Inc. 10 * Rajendra Nayak <rnayak@ti.com> 11 * 12 * Copyright (C) 2005 Texas Instruments, Inc. 13 * Richard Woodruff <r-woodruff2@ti.com> 14 * 15 * Based on pm.c for omap1 16 */ 17 18 #include <linux/cpu_pm.h> 19 #include <linux/pm.h> 20 #include <linux/suspend.h> 21 #include <linux/interrupt.h> 22 #include <linux/module.h> 23 #include <linux/list.h> 24 #include <linux/err.h> 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/slab.h> 28 #include <linux/omap-gpmc.h> 29 30 #include <trace/events/power.h> 31 32 #include <asm/fncpy.h> 33 #include <asm/suspend.h> 34 #include <asm/system_misc.h> 35 36 #include "clockdomain.h" 37 #include "powerdomain.h" 38 #include "soc.h" 39 #include "common.h" 40 #include "cm3xxx.h" 41 #include "cm-regbits-34xx.h" 42 #include "prm-regbits-34xx.h" 43 #include "prm3xxx.h" 44 #include "pm.h" 45 #include "sdrc.h" 46 #include "omap-secure.h" 47 #include "sram.h" 48 #include "control.h" 49 #include "vc.h" 50 51 /* pm34xx errata defined in pm.h */ 52 u16 pm34xx_errata; 53 54 struct power_state { 55 struct powerdomain *pwrdm; 56 u32 next_state; 57 #ifdef CONFIG_SUSPEND 58 u32 saved_state; 59 #endif 60 struct list_head node; 61 }; 62 63 static LIST_HEAD(pwrst_list); 64 65 void (*omap3_do_wfi_sram)(void); 66 67 static struct powerdomain *mpu_pwrdm, *neon_pwrdm; 68 static struct powerdomain *core_pwrdm, *per_pwrdm; 69 70 static void omap3_core_save_context(void) 71 { 72 omap3_ctrl_save_padconf(); 73 74 /* 75 * Force write last pad into memory, as this can fail in some 76 * cases according to errata 1.157, 1.185 77 */ 78 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14), 79 OMAP343X_CONTROL_MEM_WKUP + 0x2a0); 80 81 /* Save the Interrupt controller context */ 82 omap_intc_save_context(); 83 /* Save the GPMC context */ 84 omap3_gpmc_save_context(); 85 /* Save the system control module context, padconf already save above*/ 86 omap3_control_save_context(); 87 } 88 89 static void omap3_core_restore_context(void) 90 { 91 /* Restore the control module context, padconf restored by h/w */ 92 omap3_control_restore_context(); 93 /* Restore the GPMC context */ 94 omap3_gpmc_restore_context(); 95 /* Restore the interrupt controller context */ 96 omap_intc_restore_context(); 97 } 98 99 /* 100 * FIXME: This function should be called before entering off-mode after 101 * OMAP3 secure services have been accessed. Currently it is only called 102 * once during boot sequence, but this works as we are not using secure 103 * services. 104 */ 105 static void omap3_save_secure_ram_context(void) 106 { 107 u32 ret; 108 int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); 109 110 if (omap_type() != OMAP2_DEVICE_TYPE_GP) { 111 /* 112 * MPU next state must be set to POWER_ON temporarily, 113 * otherwise the WFI executed inside the ROM code 114 * will hang the system. 115 */ 116 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); 117 ret = omap3_save_secure_ram(omap3_secure_ram_storage, 118 OMAP3_SAVE_SECURE_RAM_SZ); 119 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); 120 /* Following is for error tracking, it should not happen */ 121 if (ret) { 122 pr_err("save_secure_sram() returns %08x\n", ret); 123 while (1) 124 ; 125 } 126 } 127 } 128 129 static irqreturn_t _prcm_int_handle_io(int irq, void *unused) 130 { 131 int c; 132 133 c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, OMAP3430_ST_IO_MASK | 134 OMAP3430_ST_IO_CHAIN_MASK); 135 136 return c ? IRQ_HANDLED : IRQ_NONE; 137 } 138 139 static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused) 140 { 141 int c; 142 143 /* 144 * Clear all except ST_IO and ST_IO_CHAIN for wkup module, 145 * these are handled in a separate handler to avoid acking 146 * IO events before parsing in mux code 147 */ 148 c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, ~(OMAP3430_ST_IO_MASK | 149 OMAP3430_ST_IO_CHAIN_MASK)); 150 c += omap_prm_clear_mod_irqs(CORE_MOD, 1, ~0); 151 c += omap_prm_clear_mod_irqs(OMAP3430_PER_MOD, 1, ~0); 152 if (omap_rev() > OMAP3430_REV_ES1_0) { 153 c += omap_prm_clear_mod_irqs(CORE_MOD, 3, ~0); 154 c += omap_prm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, ~0); 155 } 156 157 return c ? IRQ_HANDLED : IRQ_NONE; 158 } 159 160 static void omap34xx_save_context(u32 *save) 161 { 162 u32 val; 163 164 /* Read Auxiliary Control Register */ 165 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val)); 166 *save++ = 1; 167 *save++ = val; 168 169 /* Read L2 AUX ctrl register */ 170 asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val)); 171 *save++ = 1; 172 *save++ = val; 173 } 174 175 static int omap34xx_do_sram_idle(unsigned long save_state) 176 { 177 omap34xx_cpu_suspend(save_state); 178 return 0; 179 } 180 181 void omap_sram_idle(void) 182 { 183 /* Variable to tell what needs to be saved and restored 184 * in omap_sram_idle*/ 185 /* save_state = 0 => Nothing to save and restored */ 186 /* save_state = 1 => Only L1 and logic lost */ 187 /* save_state = 2 => Only L2 lost */ 188 /* save_state = 3 => L1, L2 and logic lost */ 189 int save_state = 0; 190 int mpu_next_state = PWRDM_POWER_ON; 191 int per_next_state = PWRDM_POWER_ON; 192 int core_next_state = PWRDM_POWER_ON; 193 u32 sdrc_pwr = 0; 194 int error; 195 196 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); 197 switch (mpu_next_state) { 198 case PWRDM_POWER_ON: 199 case PWRDM_POWER_RET: 200 /* No need to save context */ 201 save_state = 0; 202 break; 203 case PWRDM_POWER_OFF: 204 save_state = 3; 205 break; 206 default: 207 /* Invalid state */ 208 pr_err("Invalid mpu state in sram_idle\n"); 209 return; 210 } 211 212 /* NEON control */ 213 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON) 214 pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state); 215 216 /* Enable IO-PAD and IO-CHAIN wakeups */ 217 per_next_state = pwrdm_read_next_pwrst(per_pwrdm); 218 core_next_state = pwrdm_read_next_pwrst(core_pwrdm); 219 220 pwrdm_pre_transition(NULL); 221 222 /* PER */ 223 if (per_next_state == PWRDM_POWER_OFF) { 224 error = cpu_cluster_pm_enter(); 225 if (error) 226 return; 227 } 228 229 /* CORE */ 230 if (core_next_state < PWRDM_POWER_ON) { 231 if (core_next_state == PWRDM_POWER_OFF) { 232 omap3_core_save_context(); 233 omap3_cm_save_context(); 234 } 235 } 236 237 /* Configure PMIC signaling for I2C4 or sys_off_mode */ 238 omap3_vc_set_pmic_signaling(core_next_state); 239 240 omap3_intc_prepare_idle(); 241 242 /* 243 * On EMU/HS devices ROM code restores a SRDC value 244 * from scratchpad which has automatic self refresh on timeout 245 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443. 246 * Hence store/restore the SDRC_POWER register here. 247 */ 248 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 && 249 (omap_type() == OMAP2_DEVICE_TYPE_EMU || 250 omap_type() == OMAP2_DEVICE_TYPE_SEC) && 251 core_next_state == PWRDM_POWER_OFF) 252 sdrc_pwr = sdrc_read_reg(SDRC_POWER); 253 254 /* 255 * omap3_arm_context is the location where some ARM context 256 * get saved. The rest is placed on the stack, and restored 257 * from there before resuming. 258 */ 259 if (save_state) 260 omap34xx_save_context(omap3_arm_context); 261 if (save_state == 1 || save_state == 3) 262 cpu_suspend(save_state, omap34xx_do_sram_idle); 263 else 264 omap34xx_do_sram_idle(save_state); 265 266 /* Restore normal SDRC POWER settings */ 267 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 && 268 (omap_type() == OMAP2_DEVICE_TYPE_EMU || 269 omap_type() == OMAP2_DEVICE_TYPE_SEC) && 270 core_next_state == PWRDM_POWER_OFF) 271 sdrc_write_reg(sdrc_pwr, SDRC_POWER); 272 273 /* CORE */ 274 if (core_next_state < PWRDM_POWER_ON && 275 pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) { 276 omap3_core_restore_context(); 277 omap3_cm_restore_context(); 278 omap3_sram_restore_context(); 279 omap2_sms_restore_context(); 280 } else { 281 /* 282 * In off-mode resume path above, omap3_core_restore_context 283 * also handles the INTC autoidle restore done here so limit 284 * this to non-off mode resume paths so we don't do it twice. 285 */ 286 omap3_intc_resume_idle(); 287 } 288 289 pwrdm_post_transition(NULL); 290 291 /* PER */ 292 if (per_next_state == PWRDM_POWER_OFF) 293 cpu_cluster_pm_exit(); 294 } 295 296 static void omap3_pm_idle(void) 297 { 298 if (omap_irq_pending()) 299 return; 300 301 omap_sram_idle(); 302 } 303 304 #ifdef CONFIG_SUSPEND 305 static int omap3_pm_suspend(void) 306 { 307 struct power_state *pwrst; 308 int state, ret = 0; 309 310 /* Read current next_pwrsts */ 311 list_for_each_entry(pwrst, &pwrst_list, node) 312 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm); 313 /* Set ones wanted by suspend */ 314 list_for_each_entry(pwrst, &pwrst_list, node) { 315 if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state)) 316 goto restore; 317 if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm)) 318 goto restore; 319 } 320 321 omap3_intc_suspend(); 322 323 omap_sram_idle(); 324 325 restore: 326 /* Restore next_pwrsts */ 327 list_for_each_entry(pwrst, &pwrst_list, node) { 328 state = pwrdm_read_prev_pwrst(pwrst->pwrdm); 329 if (state > pwrst->next_state) { 330 pr_info("Powerdomain (%s) didn't enter target state %d\n", 331 pwrst->pwrdm->name, pwrst->next_state); 332 ret = -1; 333 } 334 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); 335 } 336 if (ret) 337 pr_err("Could not enter target state in pm_suspend\n"); 338 else 339 pr_info("Successfully put all powerdomains to target state\n"); 340 341 return ret; 342 } 343 #else 344 #define omap3_pm_suspend NULL 345 #endif /* CONFIG_SUSPEND */ 346 347 static void __init prcm_setup_regs(void) 348 { 349 omap3_ctrl_init(); 350 351 omap3_prm_init_pm(cpu_is_omap3630(), omap3_has_iva()); 352 } 353 354 void omap3_pm_off_mode_enable(int enable) 355 { 356 struct power_state *pwrst; 357 u32 state; 358 359 if (enable) 360 state = PWRDM_POWER_OFF; 361 else 362 state = PWRDM_POWER_RET; 363 364 list_for_each_entry(pwrst, &pwrst_list, node) { 365 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) && 366 pwrst->pwrdm == core_pwrdm && 367 state == PWRDM_POWER_OFF) { 368 pwrst->next_state = PWRDM_POWER_RET; 369 pr_warn("%s: Core OFF disabled due to errata i583\n", 370 __func__); 371 } else { 372 pwrst->next_state = state; 373 } 374 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); 375 } 376 } 377 378 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm) 379 { 380 struct power_state *pwrst; 381 382 list_for_each_entry(pwrst, &pwrst_list, node) { 383 if (pwrst->pwrdm == pwrdm) 384 return pwrst->next_state; 385 } 386 return -EINVAL; 387 } 388 389 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state) 390 { 391 struct power_state *pwrst; 392 393 list_for_each_entry(pwrst, &pwrst_list, node) { 394 if (pwrst->pwrdm == pwrdm) { 395 pwrst->next_state = state; 396 return 0; 397 } 398 } 399 return -EINVAL; 400 } 401 402 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) 403 { 404 struct power_state *pwrst; 405 406 if (!pwrdm->pwrsts) 407 return 0; 408 409 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC); 410 if (!pwrst) 411 return -ENOMEM; 412 pwrst->pwrdm = pwrdm; 413 pwrst->next_state = PWRDM_POWER_RET; 414 list_add(&pwrst->node, &pwrst_list); 415 416 if (pwrdm_has_hdwr_sar(pwrdm)) 417 pwrdm_enable_hdwr_sar(pwrdm); 418 419 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); 420 } 421 422 /* 423 * Push functions to SRAM 424 * 425 * The minimum set of functions is pushed to SRAM for execution: 426 * - omap3_do_wfi for erratum i581 WA, 427 */ 428 void omap_push_sram_idle(void) 429 { 430 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); 431 } 432 433 static void __init pm_errata_configure(void) 434 { 435 if (cpu_is_omap3630()) { 436 pm34xx_errata |= PM_RTA_ERRATUM_i608; 437 /* Enable the l2 cache toggling in sleep logic */ 438 enable_omap3630_toggle_l2_on_restore(); 439 if (omap_rev() < OMAP3630_REV_ES1_2) 440 pm34xx_errata |= (PM_SDRC_WAKEUP_ERRATUM_i583 | 441 PM_PER_MEMORIES_ERRATUM_i582); 442 } else if (cpu_is_omap34xx()) { 443 pm34xx_errata |= PM_PER_MEMORIES_ERRATUM_i582; 444 } 445 } 446 447 int __init omap3_pm_init(void) 448 { 449 struct power_state *pwrst, *tmp; 450 struct clockdomain *neon_clkdm, *mpu_clkdm, *per_clkdm, *wkup_clkdm; 451 int ret; 452 453 if (!omap3_has_io_chain_ctrl()) 454 pr_warn("PM: no software I/O chain control; some wakeups may be lost\n"); 455 456 pm_errata_configure(); 457 458 /* XXX prcm_setup_regs needs to be before enabling hw 459 * supervised mode for powerdomains */ 460 prcm_setup_regs(); 461 462 ret = request_irq(omap_prcm_event_to_irq("wkup"), 463 _prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL); 464 465 if (ret) { 466 pr_err("pm: Failed to request pm_wkup irq\n"); 467 goto err1; 468 } 469 470 /* IO interrupt is shared with mux code */ 471 ret = request_irq(omap_prcm_event_to_irq("io"), 472 _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io", 473 omap3_pm_init); 474 475 if (ret) { 476 pr_err("pm: Failed to request pm_io irq\n"); 477 goto err2; 478 } 479 480 ret = pwrdm_for_each(pwrdms_setup, NULL); 481 if (ret) { 482 pr_err("Failed to setup powerdomains\n"); 483 goto err3; 484 } 485 486 (void) clkdm_for_each(omap_pm_clkdms_setup, NULL); 487 488 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); 489 if (mpu_pwrdm == NULL) { 490 pr_err("Failed to get mpu_pwrdm\n"); 491 ret = -EINVAL; 492 goto err3; 493 } 494 495 neon_pwrdm = pwrdm_lookup("neon_pwrdm"); 496 per_pwrdm = pwrdm_lookup("per_pwrdm"); 497 core_pwrdm = pwrdm_lookup("core_pwrdm"); 498 499 neon_clkdm = clkdm_lookup("neon_clkdm"); 500 mpu_clkdm = clkdm_lookup("mpu_clkdm"); 501 per_clkdm = clkdm_lookup("per_clkdm"); 502 wkup_clkdm = clkdm_lookup("wkup_clkdm"); 503 504 omap_common_suspend_init(omap3_pm_suspend); 505 506 arm_pm_idle = omap3_pm_idle; 507 omap3_idle_init(); 508 509 /* 510 * RTA is disabled during initialization as per erratum i608 511 * it is safer to disable RTA by the bootloader, but we would like 512 * to be doubly sure here and prevent any mishaps. 513 */ 514 if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608)) 515 omap3630_ctrl_disable_rta(); 516 517 /* 518 * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are 519 * not correctly reset when the PER powerdomain comes back 520 * from OFF or OSWR when the CORE powerdomain is kept active. 521 * See OMAP36xx Erratum i582 "PER Domain reset issue after 522 * Domain-OFF/OSWR Wakeup". This wakeup dependency is not a 523 * complete workaround. The kernel must also prevent the PER 524 * powerdomain from going to OSWR/OFF while the CORE 525 * powerdomain is not going to OSWR/OFF. And if PER last 526 * power state was off while CORE last power state was ON, the 527 * UART3/4 and McBSP2/3 SIDETONE devices need to run a 528 * self-test using their loopback tests; if that fails, those 529 * devices are unusable until the PER/CORE can complete a transition 530 * from ON to OSWR/OFF and then back to ON. 531 * 532 * XXX Technically this workaround is only needed if off-mode 533 * or OSWR is enabled. 534 */ 535 if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582)) 536 clkdm_add_wkdep(per_clkdm, wkup_clkdm); 537 538 clkdm_add_wkdep(neon_clkdm, mpu_clkdm); 539 if (omap_type() != OMAP2_DEVICE_TYPE_GP) { 540 omap3_secure_ram_storage = 541 kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL); 542 if (!omap3_secure_ram_storage) 543 pr_err("Memory allocation failed when allocating for secure sram context\n"); 544 545 local_irq_disable(); 546 547 omap3_save_secure_ram_context(); 548 549 local_irq_enable(); 550 } 551 552 omap3_save_scratchpad_contents(); 553 return ret; 554 555 err3: 556 list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) { 557 list_del(&pwrst->node); 558 kfree(pwrst); 559 } 560 free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init); 561 err2: 562 free_irq(omap_prcm_event_to_irq("wkup"), NULL); 563 err1: 564 return ret; 565 } 566