1 /* 2 * arch/arm/mach-at91/pm.c 3 * AT91 Power Management 4 * 5 * Copyright (C) 2005 David Brownell 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 */ 12 13 #include <linux/genalloc.h> 14 #include <linux/io.h> 15 #include <linux/of_address.h> 16 #include <linux/of.h> 17 #include <linux/of_platform.h> 18 #include <linux/parser.h> 19 #include <linux/suspend.h> 20 21 #include <linux/clk/at91_pmc.h> 22 23 #include <asm/cacheflush.h> 24 #include <asm/fncpy.h> 25 #include <asm/system_misc.h> 26 #include <asm/suspend.h> 27 28 #include "generic.h" 29 #include "pm.h" 30 31 /* 32 * FIXME: this is needed to communicate between the pinctrl driver and 33 * the PM implementation in the machine. Possibly part of the PM 34 * implementation should be moved down into the pinctrl driver and get 35 * called as part of the generic suspend/resume path. 36 */ 37 #ifdef CONFIG_PINCTRL_AT91 38 extern void at91_pinctrl_gpio_suspend(void); 39 extern void at91_pinctrl_gpio_resume(void); 40 #endif 41 42 static const match_table_t pm_modes __initconst = { 43 { 0, "standby" }, 44 { AT91_PM_SLOW_CLOCK, "ulp0" }, 45 { AT91_PM_BACKUP, "backup" }, 46 { -1, NULL }, 47 }; 48 49 static struct at91_pm_data pm_data = { 50 .standby_mode = 0, 51 .suspend_mode = AT91_PM_SLOW_CLOCK, 52 }; 53 54 #define at91_ramc_read(id, field) \ 55 __raw_readl(pm_data.ramc[id] + field) 56 57 #define at91_ramc_write(id, field, value) \ 58 __raw_writel(value, pm_data.ramc[id] + field) 59 60 static int at91_pm_valid_state(suspend_state_t state) 61 { 62 switch (state) { 63 case PM_SUSPEND_ON: 64 case PM_SUSPEND_STANDBY: 65 case PM_SUSPEND_MEM: 66 return 1; 67 68 default: 69 return 0; 70 } 71 } 72 73 static int canary = 0xA5A5A5A5; 74 75 static struct at91_pm_bu { 76 int suspended; 77 unsigned long reserved; 78 phys_addr_t canary; 79 phys_addr_t resume; 80 } *pm_bu; 81 82 /* 83 * Called after processes are frozen, but before we shutdown devices. 84 */ 85 static int at91_pm_begin(suspend_state_t state) 86 { 87 switch (state) { 88 case PM_SUSPEND_MEM: 89 pm_data.mode = pm_data.suspend_mode; 90 break; 91 92 case PM_SUSPEND_STANDBY: 93 pm_data.mode = pm_data.standby_mode; 94 break; 95 96 default: 97 pm_data.mode = -1; 98 } 99 100 return 0; 101 } 102 103 /* 104 * Verify that all the clocks are correct before entering 105 * slow-clock mode. 106 */ 107 static int at91_pm_verify_clocks(void) 108 { 109 unsigned long scsr; 110 int i; 111 112 scsr = readl(pm_data.pmc + AT91_PMC_SCSR); 113 114 /* USB must not be using PLLB */ 115 if ((scsr & pm_data.uhp_udp_mask) != 0) { 116 pr_err("AT91: PM - Suspend-to-RAM with USB still active\n"); 117 return 0; 118 } 119 120 /* PCK0..PCK3 must be disabled, or configured to use clk32k */ 121 for (i = 0; i < 4; i++) { 122 u32 css; 123 124 if ((scsr & (AT91_PMC_PCK0 << i)) == 0) 125 continue; 126 css = readl(pm_data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS; 127 if (css != AT91_PMC_CSS_SLOW) { 128 pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css); 129 return 0; 130 } 131 } 132 133 return 1; 134 } 135 136 /* 137 * Call this from platform driver suspend() to see how deeply to suspend. 138 * For example, some controllers (like OHCI) need one of the PLL clocks 139 * in order to act as a wakeup source, and those are not available when 140 * going into slow clock mode. 141 * 142 * REVISIT: generalize as clk_will_be_available(clk)? Other platforms have 143 * the very same problem (but not using at91 main_clk), and it'd be better 144 * to add one generic API rather than lots of platform-specific ones. 145 */ 146 int at91_suspend_entering_slow_clock(void) 147 { 148 return (pm_data.mode >= AT91_PM_SLOW_CLOCK); 149 } 150 EXPORT_SYMBOL(at91_suspend_entering_slow_clock); 151 152 static void (*at91_suspend_sram_fn)(struct at91_pm_data *); 153 extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data); 154 extern u32 at91_pm_suspend_in_sram_sz; 155 156 static int at91_suspend_finish(unsigned long val) 157 { 158 flush_cache_all(); 159 outer_disable(); 160 161 at91_suspend_sram_fn(&pm_data); 162 163 return 0; 164 } 165 166 static void at91_pm_suspend(suspend_state_t state) 167 { 168 if (pm_data.mode == AT91_PM_BACKUP) { 169 pm_bu->suspended = 1; 170 171 cpu_suspend(0, at91_suspend_finish); 172 173 /* The SRAM is lost between suspend cycles */ 174 at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn, 175 &at91_pm_suspend_in_sram, 176 at91_pm_suspend_in_sram_sz); 177 } else { 178 at91_suspend_finish(0); 179 } 180 181 outer_resume(); 182 } 183 184 /* 185 * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup' 186 * event sources; and reduces DRAM power. But otherwise it's identical to 187 * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks. 188 * 189 * AT91_PM_SLOW_CLOCK is like STANDBY plus slow clock mode, so drivers must 190 * suspend more deeply, the master clock switches to the clk32k and turns off 191 * the main oscillator 192 * 193 * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh 194 */ 195 static int at91_pm_enter(suspend_state_t state) 196 { 197 #ifdef CONFIG_PINCTRL_AT91 198 at91_pinctrl_gpio_suspend(); 199 #endif 200 201 switch (state) { 202 case PM_SUSPEND_MEM: 203 case PM_SUSPEND_STANDBY: 204 /* 205 * Ensure that clocks are in a valid state. 206 */ 207 if ((pm_data.mode >= AT91_PM_SLOW_CLOCK) && 208 !at91_pm_verify_clocks()) 209 goto error; 210 211 at91_pm_suspend(state); 212 213 break; 214 215 case PM_SUSPEND_ON: 216 cpu_do_idle(); 217 break; 218 219 default: 220 pr_debug("AT91: PM - bogus suspend state %d\n", state); 221 goto error; 222 } 223 224 error: 225 #ifdef CONFIG_PINCTRL_AT91 226 at91_pinctrl_gpio_resume(); 227 #endif 228 return 0; 229 } 230 231 /* 232 * Called right prior to thawing processes. 233 */ 234 static void at91_pm_end(void) 235 { 236 } 237 238 239 static const struct platform_suspend_ops at91_pm_ops = { 240 .valid = at91_pm_valid_state, 241 .begin = at91_pm_begin, 242 .enter = at91_pm_enter, 243 .end = at91_pm_end, 244 }; 245 246 static struct platform_device at91_cpuidle_device = { 247 .name = "cpuidle-at91", 248 }; 249 250 /* 251 * The AT91RM9200 goes into self-refresh mode with this command, and will 252 * terminate self-refresh automatically on the next SDRAM access. 253 * 254 * Self-refresh mode is exited as soon as a memory access is made, but we don't 255 * know for sure when that happens. However, we need to restore the low-power 256 * mode if it was enabled before going idle. Restoring low-power mode while 257 * still in self-refresh is "not recommended", but seems to work. 258 */ 259 static void at91rm9200_standby(void) 260 { 261 asm volatile( 262 "b 1f\n\t" 263 ".align 5\n\t" 264 "1: mcr p15, 0, %0, c7, c10, 4\n\t" 265 " str %2, [%1, %3]\n\t" 266 " mcr p15, 0, %0, c7, c0, 4\n\t" 267 : 268 : "r" (0), "r" (pm_data.ramc[0]), 269 "r" (1), "r" (AT91_MC_SDRAMC_SRR)); 270 } 271 272 /* We manage both DDRAM/SDRAM controllers, we need more than one value to 273 * remember. 274 */ 275 static void at91_ddr_standby(void) 276 { 277 /* Those two values allow us to delay self-refresh activation 278 * to the maximum. */ 279 u32 lpr0, lpr1 = 0; 280 u32 mdr, saved_mdr0, saved_mdr1 = 0; 281 u32 saved_lpr0, saved_lpr1 = 0; 282 283 /* LPDDR1 --> force DDR2 mode during self-refresh */ 284 saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR); 285 if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) { 286 mdr = saved_mdr0 & ~AT91_DDRSDRC_MD; 287 mdr |= AT91_DDRSDRC_MD_DDR2; 288 at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr); 289 } 290 291 if (pm_data.ramc[1]) { 292 saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR); 293 lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB; 294 lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH; 295 saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR); 296 if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) { 297 mdr = saved_mdr1 & ~AT91_DDRSDRC_MD; 298 mdr |= AT91_DDRSDRC_MD_DDR2; 299 at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr); 300 } 301 } 302 303 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR); 304 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB; 305 lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH; 306 307 /* self-refresh mode now */ 308 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0); 309 if (pm_data.ramc[1]) 310 at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1); 311 312 cpu_do_idle(); 313 314 at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0); 315 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0); 316 if (pm_data.ramc[1]) { 317 at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1); 318 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); 319 } 320 } 321 322 static void sama5d3_ddr_standby(void) 323 { 324 u32 lpr0; 325 u32 saved_lpr0; 326 327 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR); 328 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB; 329 lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN; 330 331 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0); 332 333 cpu_do_idle(); 334 335 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0); 336 } 337 338 /* We manage both DDRAM/SDRAM controllers, we need more than one value to 339 * remember. 340 */ 341 static void at91sam9_sdram_standby(void) 342 { 343 u32 lpr0, lpr1 = 0; 344 u32 saved_lpr0, saved_lpr1 = 0; 345 346 if (pm_data.ramc[1]) { 347 saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR); 348 lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB; 349 lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH; 350 } 351 352 saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR); 353 lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB; 354 lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH; 355 356 /* self-refresh mode now */ 357 at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0); 358 if (pm_data.ramc[1]) 359 at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1); 360 361 cpu_do_idle(); 362 363 at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0); 364 if (pm_data.ramc[1]) 365 at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1); 366 } 367 368 struct ramc_info { 369 void (*idle)(void); 370 unsigned int memctrl; 371 }; 372 373 static const struct ramc_info ramc_infos[] __initconst = { 374 { .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC}, 375 { .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC}, 376 { .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR}, 377 { .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR}, 378 }; 379 380 static const struct of_device_id ramc_ids[] __initconst = { 381 { .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] }, 382 { .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] }, 383 { .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] }, 384 { .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] }, 385 { /*sentinel*/ } 386 }; 387 388 static __init void at91_dt_ramc(void) 389 { 390 struct device_node *np; 391 const struct of_device_id *of_id; 392 int idx = 0; 393 void *standby = NULL; 394 const struct ramc_info *ramc; 395 396 for_each_matching_node_and_match(np, ramc_ids, &of_id) { 397 pm_data.ramc[idx] = of_iomap(np, 0); 398 if (!pm_data.ramc[idx]) 399 panic(pr_fmt("unable to map ramc[%d] cpu registers\n"), idx); 400 401 ramc = of_id->data; 402 if (!standby) 403 standby = ramc->idle; 404 pm_data.memctrl = ramc->memctrl; 405 406 idx++; 407 } 408 409 if (!idx) 410 panic(pr_fmt("unable to find compatible ram controller node in dtb\n")); 411 412 if (!standby) { 413 pr_warn("ramc no standby function available\n"); 414 return; 415 } 416 417 at91_cpuidle_device.dev.platform_data = standby; 418 } 419 420 static void at91rm9200_idle(void) 421 { 422 /* 423 * Disable the processor clock. The processor will be automatically 424 * re-enabled by an interrupt or by a reset. 425 */ 426 writel(AT91_PMC_PCK, pm_data.pmc + AT91_PMC_SCDR); 427 } 428 429 static void at91sam9_idle(void) 430 { 431 writel(AT91_PMC_PCK, pm_data.pmc + AT91_PMC_SCDR); 432 cpu_do_idle(); 433 } 434 435 static void __init at91_pm_sram_init(void) 436 { 437 struct gen_pool *sram_pool; 438 phys_addr_t sram_pbase; 439 unsigned long sram_base; 440 struct device_node *node; 441 struct platform_device *pdev = NULL; 442 443 for_each_compatible_node(node, NULL, "mmio-sram") { 444 pdev = of_find_device_by_node(node); 445 if (pdev) { 446 of_node_put(node); 447 break; 448 } 449 } 450 451 if (!pdev) { 452 pr_warn("%s: failed to find sram device!\n", __func__); 453 return; 454 } 455 456 sram_pool = gen_pool_get(&pdev->dev, NULL); 457 if (!sram_pool) { 458 pr_warn("%s: sram pool unavailable!\n", __func__); 459 return; 460 } 461 462 sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz); 463 if (!sram_base) { 464 pr_warn("%s: unable to alloc sram!\n", __func__); 465 return; 466 } 467 468 sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); 469 at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase, 470 at91_pm_suspend_in_sram_sz, false); 471 if (!at91_suspend_sram_fn) { 472 pr_warn("SRAM: Could not map\n"); 473 return; 474 } 475 476 /* Copy the pm suspend handler to SRAM */ 477 at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn, 478 &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz); 479 } 480 481 static void __init at91_pm_backup_init(void) 482 { 483 struct gen_pool *sram_pool; 484 struct device_node *np; 485 struct platform_device *pdev = NULL; 486 487 if ((pm_data.standby_mode != AT91_PM_BACKUP) && 488 (pm_data.suspend_mode != AT91_PM_BACKUP)) 489 return; 490 491 pm_bu = NULL; 492 493 np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-shdwc"); 494 if (!np) { 495 pr_warn("%s: failed to find shdwc!\n", __func__); 496 return; 497 } 498 499 pm_data.shdwc = of_iomap(np, 0); 500 of_node_put(np); 501 502 np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu"); 503 if (!np) { 504 pr_warn("%s: failed to find sfrbu!\n", __func__); 505 goto sfrbu_fail; 506 } 507 508 pm_data.sfrbu = of_iomap(np, 0); 509 of_node_put(np); 510 pm_bu = NULL; 511 512 np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam"); 513 if (!np) 514 goto securam_fail; 515 516 pdev = of_find_device_by_node(np); 517 of_node_put(np); 518 if (!pdev) { 519 pr_warn("%s: failed to find securam device!\n", __func__); 520 goto securam_fail; 521 } 522 523 sram_pool = gen_pool_get(&pdev->dev, NULL); 524 if (!sram_pool) { 525 pr_warn("%s: securam pool unavailable!\n", __func__); 526 goto securam_fail; 527 } 528 529 pm_bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu)); 530 if (!pm_bu) { 531 pr_warn("%s: unable to alloc securam!\n", __func__); 532 goto securam_fail; 533 } 534 535 pm_bu->suspended = 0; 536 pm_bu->canary = __pa_symbol(&canary); 537 pm_bu->resume = __pa_symbol(cpu_resume); 538 539 return; 540 541 sfrbu_fail: 542 iounmap(pm_data.shdwc); 543 pm_data.shdwc = NULL; 544 securam_fail: 545 iounmap(pm_data.sfrbu); 546 pm_data.sfrbu = NULL; 547 548 if (pm_data.standby_mode == AT91_PM_BACKUP) 549 pm_data.standby_mode = AT91_PM_SLOW_CLOCK; 550 if (pm_data.suspend_mode == AT91_PM_BACKUP) 551 pm_data.suspend_mode = AT91_PM_SLOW_CLOCK; 552 } 553 554 struct pmc_info { 555 unsigned long uhp_udp_mask; 556 }; 557 558 static const struct pmc_info pmc_infos[] __initconst = { 559 { .uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP }, 560 { .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP }, 561 { .uhp_udp_mask = AT91SAM926x_PMC_UHP }, 562 }; 563 564 static const struct of_device_id atmel_pmc_ids[] __initconst = { 565 { .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] }, 566 { .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] }, 567 { .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] }, 568 { .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] }, 569 { .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] }, 570 { .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] }, 571 { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] }, 572 { /* sentinel */ }, 573 }; 574 575 static void __init at91_pm_init(void (*pm_idle)(void)) 576 { 577 struct device_node *pmc_np; 578 const struct of_device_id *of_id; 579 const struct pmc_info *pmc; 580 581 if (at91_cpuidle_device.dev.platform_data) 582 platform_device_register(&at91_cpuidle_device); 583 584 pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id); 585 pm_data.pmc = of_iomap(pmc_np, 0); 586 if (!pm_data.pmc) { 587 pr_err("AT91: PM not supported, PMC not found\n"); 588 return; 589 } 590 591 pmc = of_id->data; 592 pm_data.uhp_udp_mask = pmc->uhp_udp_mask; 593 594 if (pm_idle) 595 arm_pm_idle = pm_idle; 596 597 at91_pm_sram_init(); 598 599 if (at91_suspend_sram_fn) { 600 suspend_set_ops(&at91_pm_ops); 601 pr_info("AT91: PM: standby: %s, suspend: %s\n", 602 pm_modes[pm_data.standby_mode].pattern, 603 pm_modes[pm_data.suspend_mode].pattern); 604 } else { 605 pr_info("AT91: PM not supported, due to no SRAM allocated\n"); 606 } 607 } 608 609 void __init at91rm9200_pm_init(void) 610 { 611 if (!IS_ENABLED(CONFIG_SOC_AT91RM9200)) 612 return; 613 614 at91_dt_ramc(); 615 616 /* 617 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh. 618 */ 619 at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0); 620 621 at91_pm_init(at91rm9200_idle); 622 } 623 624 void __init at91sam9_pm_init(void) 625 { 626 if (!IS_ENABLED(CONFIG_SOC_AT91SAM9)) 627 return; 628 629 at91_dt_ramc(); 630 at91_pm_init(at91sam9_idle); 631 } 632 633 void __init sama5_pm_init(void) 634 { 635 if (!IS_ENABLED(CONFIG_SOC_SAMA5)) 636 return; 637 638 at91_dt_ramc(); 639 at91_pm_init(NULL); 640 } 641 642 void __init sama5d2_pm_init(void) 643 { 644 if (!IS_ENABLED(CONFIG_SOC_SAMA5D2)) 645 return; 646 647 at91_pm_backup_init(); 648 sama5_pm_init(); 649 } 650 651 static int __init at91_pm_modes_select(char *str) 652 { 653 char *s; 654 substring_t args[MAX_OPT_ARGS]; 655 int standby, suspend; 656 657 if (!str) 658 return 0; 659 660 s = strsep(&str, ","); 661 standby = match_token(s, pm_modes, args); 662 if (standby < 0) 663 return 0; 664 665 suspend = match_token(str, pm_modes, args); 666 if (suspend < 0) 667 return 0; 668 669 pm_data.standby_mode = standby; 670 pm_data.suspend_mode = suspend; 671 672 return 0; 673 } 674 early_param("atmel.pm_modes", at91_pm_modes_select); 675