1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * arch/arm/mach-at91/pm.c 4 * AT91 Power Management 5 * 6 * Copyright (C) 2005 David Brownell 7 */ 8 9 #include <linux/genalloc.h> 10 #include <linux/io.h> 11 #include <linux/of_address.h> 12 #include <linux/of.h> 13 #include <linux/of_fdt.h> 14 #include <linux/of_platform.h> 15 #include <linux/platform_device.h> 16 #include <linux/parser.h> 17 #include <linux/suspend.h> 18 19 #include <linux/clk.h> 20 #include <linux/clk/at91_pmc.h> 21 #include <linux/platform_data/atmel.h> 22 23 #include <asm/cacheflush.h> 24 #include <asm/fncpy.h> 25 #include <asm/system_misc.h> 26 #include <asm/suspend.h> 27 28 #include "generic.h" 29 #include "pm.h" 30 #include "sam_secure.h" 31 32 #define BACKUP_DDR_PHY_CALIBRATION (9) 33 34 /** 35 * struct at91_pm_bu - AT91 power management backup unit data structure 36 * @suspended: true if suspended to backup mode 37 * @reserved: reserved 38 * @canary: canary data for memory checking after exit from backup mode 39 * @resume: resume API 40 * @ddr_phy_calibration: DDR PHY calibration data: ZQ0CR0, first 8 words 41 * of the memory 42 */ 43 struct at91_pm_bu { 44 int suspended; 45 unsigned long reserved; 46 phys_addr_t canary; 47 phys_addr_t resume; 48 unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION]; 49 }; 50 51 /** 52 * struct at91_pm_sfrbu_regs - registers mapping for SFRBU 53 * @pswbu: power switch BU control registers 54 */ 55 struct at91_pm_sfrbu_regs { 56 struct { 57 u32 key; 58 u32 ctrl; 59 u32 state; 60 u32 softsw; 61 } pswbu; 62 }; 63 64 /** 65 * enum at91_pm_eth_clk - Ethernet clock indexes 66 * @AT91_PM_ETH_PCLK: pclk index 67 * @AT91_PM_ETH_HCLK: hclk index 68 * @AT91_PM_ETH_MAX_CLK: max index 69 */ 70 enum at91_pm_eth_clk { 71 AT91_PM_ETH_PCLK, 72 AT91_PM_ETH_HCLK, 73 AT91_PM_ETH_MAX_CLK, 74 }; 75 76 /** 77 * enum at91_pm_eth - Ethernet controller indexes 78 * @AT91_PM_G_ETH: gigabit Ethernet controller index 79 * @AT91_PM_E_ETH: megabit Ethernet controller index 80 * @AT91_PM_MAX_ETH: max index 81 */ 82 enum at91_pm_eth { 83 AT91_PM_G_ETH, 84 AT91_PM_E_ETH, 85 AT91_PM_MAX_ETH, 86 }; 87 88 /** 89 * struct at91_pm_quirk_eth - AT91 PM Ethernet quirks 90 * @dev: Ethernet device 91 * @np: Ethernet device node 92 * @clks: Ethernet clocks 93 * @modes: power management mode that this quirk applies to 94 * @dns_modes: do not suspend modes: stop suspending if Ethernet is configured 95 * as wakeup source but buggy and no other wakeup source is 96 * available 97 */ 98 struct at91_pm_quirk_eth { 99 struct device *dev; 100 struct device_node *np; 101 struct clk_bulk_data clks[AT91_PM_ETH_MAX_CLK]; 102 u32 modes; 103 u32 dns_modes; 104 }; 105 106 /** 107 * struct at91_pm_quirks - AT91 PM quirks 108 * @eth: Ethernet quirks 109 */ 110 struct at91_pm_quirks { 111 struct at91_pm_quirk_eth eth[AT91_PM_MAX_ETH]; 112 }; 113 114 /** 115 * struct at91_soc_pm - AT91 SoC power management data structure 116 * @config_shdwc_ws: wakeup sources configuration function for SHDWC 117 * @config_pmc_ws: wakeup srouces configuration function for PMC 118 * @ws_ids: wakup sources of_device_id array 119 * @bu: backup unit mapped data (for backup mode) 120 * @quirks: PM quirks 121 * @data: PM data to be used on last phase of suspend 122 * @sfrbu_regs: SFRBU registers mapping 123 * @memcs: memory chip select 124 */ 125 struct at91_soc_pm { 126 int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity); 127 int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity); 128 const struct of_device_id *ws_ids; 129 struct at91_pm_bu *bu; 130 struct at91_pm_quirks quirks; 131 struct at91_pm_data data; 132 struct at91_pm_sfrbu_regs sfrbu_regs; 133 void *memcs; 134 }; 135 136 /** 137 * enum at91_pm_iomaps - IOs that needs to be mapped for different PM modes 138 * @AT91_PM_IOMAP_SHDWC: SHDWC controller 139 * @AT91_PM_IOMAP_SFRBU: SFRBU controller 140 * @AT91_PM_IOMAP_ETHC: Ethernet controller 141 */ 142 enum at91_pm_iomaps { 143 AT91_PM_IOMAP_SHDWC, 144 AT91_PM_IOMAP_SFRBU, 145 AT91_PM_IOMAP_ETHC, 146 }; 147 148 #define AT91_PM_IOMAP(name) BIT(AT91_PM_IOMAP_##name) 149 150 static struct at91_soc_pm soc_pm = { 151 .data = { 152 .standby_mode = AT91_PM_STANDBY, 153 .suspend_mode = AT91_PM_ULP0, 154 }, 155 }; 156 157 static const match_table_t pm_modes __initconst = { 158 { AT91_PM_STANDBY, "standby" }, 159 { AT91_PM_ULP0, "ulp0" }, 160 { AT91_PM_ULP0_FAST, "ulp0-fast" }, 161 { AT91_PM_ULP1, "ulp1" }, 162 { AT91_PM_BACKUP, "backup" }, 163 { -1, NULL }, 164 }; 165 166 #define at91_ramc_read(id, field) \ 167 __raw_readl(soc_pm.data.ramc[id] + field) 168 169 #define at91_ramc_write(id, field, value) \ 170 __raw_writel(value, soc_pm.data.ramc[id] + field) 171 172 static int at91_pm_valid_state(suspend_state_t state) 173 { 174 switch (state) { 175 case PM_SUSPEND_ON: 176 case PM_SUSPEND_STANDBY: 177 case PM_SUSPEND_MEM: 178 return 1; 179 180 default: 181 return 0; 182 } 183 } 184 185 static int canary = 0xA5A5A5A5; 186 187 struct wakeup_source_info { 188 unsigned int pmc_fsmr_bit; 189 unsigned int shdwc_mr_bit; 190 bool set_polarity; 191 }; 192 193 static const struct wakeup_source_info ws_info[] = { 194 { .pmc_fsmr_bit = AT91_PMC_FSTT(10), .set_polarity = true }, 195 { .pmc_fsmr_bit = AT91_PMC_RTCAL, .shdwc_mr_bit = BIT(17) }, 196 { .pmc_fsmr_bit = AT91_PMC_USBAL }, 197 { .pmc_fsmr_bit = AT91_PMC_SDMMC_CD }, 198 { .pmc_fsmr_bit = AT91_PMC_RTTAL }, 199 { .pmc_fsmr_bit = AT91_PMC_RXLP_MCE }, 200 }; 201 202 static const struct of_device_id sama5d2_ws_ids[] = { 203 { .compatible = "atmel,sama5d2-gem", .data = &ws_info[0] }, 204 { .compatible = "atmel,sama5d2-rtc", .data = &ws_info[1] }, 205 { .compatible = "atmel,sama5d3-udc", .data = &ws_info[2] }, 206 { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] }, 207 { .compatible = "usb-ohci", .data = &ws_info[2] }, 208 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] }, 209 { .compatible = "usb-ehci", .data = &ws_info[2] }, 210 { .compatible = "atmel,sama5d2-sdhci", .data = &ws_info[3] }, 211 { /* sentinel */ } 212 }; 213 214 static const struct of_device_id sam9x60_ws_ids[] = { 215 { .compatible = "microchip,sam9x60-rtc", .data = &ws_info[1] }, 216 { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] }, 217 { .compatible = "usb-ohci", .data = &ws_info[2] }, 218 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] }, 219 { .compatible = "usb-ehci", .data = &ws_info[2] }, 220 { .compatible = "microchip,sam9x60-rtt", .data = &ws_info[4] }, 221 { .compatible = "cdns,sam9x60-macb", .data = &ws_info[5] }, 222 { /* sentinel */ } 223 }; 224 225 static const struct of_device_id sama7g5_ws_ids[] = { 226 { .compatible = "microchip,sama7g5-rtc", .data = &ws_info[1] }, 227 { .compatible = "microchip,sama7g5-ohci", .data = &ws_info[2] }, 228 { .compatible = "usb-ohci", .data = &ws_info[2] }, 229 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] }, 230 { .compatible = "usb-ehci", .data = &ws_info[2] }, 231 { .compatible = "microchip,sama7g5-sdhci", .data = &ws_info[3] }, 232 { .compatible = "microchip,sama7g5-rtt", .data = &ws_info[4] }, 233 { /* sentinel */ } 234 }; 235 236 static int at91_pm_config_ws(unsigned int pm_mode, bool set) 237 { 238 const struct wakeup_source_info *wsi; 239 const struct of_device_id *match; 240 struct platform_device *pdev; 241 struct device_node *np; 242 unsigned int mode = 0, polarity = 0, val = 0; 243 244 if (pm_mode != AT91_PM_ULP1) 245 return 0; 246 247 if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids) 248 return -EPERM; 249 250 if (!set) { 251 writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR); 252 return 0; 253 } 254 255 if (soc_pm.config_shdwc_ws) 256 soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity); 257 258 /* SHDWC.MR */ 259 val = readl(soc_pm.data.shdwc + 0x04); 260 261 /* Loop through defined wakeup sources. */ 262 for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) { 263 pdev = of_find_device_by_node(np); 264 if (!pdev) 265 continue; 266 267 if (device_may_wakeup(&pdev->dev)) { 268 wsi = match->data; 269 270 /* Check if enabled on SHDWC. */ 271 if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit)) 272 goto put_device; 273 274 mode |= wsi->pmc_fsmr_bit; 275 if (wsi->set_polarity) 276 polarity |= wsi->pmc_fsmr_bit; 277 } 278 279 put_device: 280 put_device(&pdev->dev); 281 } 282 283 if (mode) { 284 if (soc_pm.config_pmc_ws) 285 soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity); 286 } else { 287 pr_err("AT91: PM: no ULP1 wakeup sources found!"); 288 } 289 290 return mode ? 0 : -EPERM; 291 } 292 293 static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode, 294 u32 *polarity) 295 { 296 u32 val; 297 298 /* SHDWC.WUIR */ 299 val = readl(shdwc + 0x0c); 300 *mode |= (val & 0x3ff); 301 *polarity |= ((val >> 16) & 0x3ff); 302 303 return 0; 304 } 305 306 static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity) 307 { 308 writel(mode, pmc + AT91_PMC_FSMR); 309 writel(polarity, pmc + AT91_PMC_FSPR); 310 311 return 0; 312 } 313 314 static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity) 315 { 316 writel(mode, pmc + AT91_PMC_FSMR); 317 318 return 0; 319 } 320 321 static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth) 322 { 323 struct platform_device *pdev; 324 325 /* Interface NA in DT. */ 326 if (!eth->np) 327 return false; 328 329 /* No quirks for this interface and current suspend mode. */ 330 if (!(eth->modes & BIT(soc_pm.data.mode))) 331 return false; 332 333 if (!eth->dev) { 334 /* Driver not probed. */ 335 pdev = of_find_device_by_node(eth->np); 336 if (!pdev) 337 return false; 338 /* put_device(eth->dev) is called at the end of suspend. */ 339 eth->dev = &pdev->dev; 340 } 341 342 /* No quirks if device isn't a wakeup source. */ 343 if (!device_may_wakeup(eth->dev)) 344 return false; 345 346 return true; 347 } 348 349 static int at91_pm_config_quirks(bool suspend) 350 { 351 struct at91_pm_quirk_eth *eth; 352 int i, j, ret, tmp; 353 354 /* 355 * Ethernet IPs who's device_node pointers are stored into 356 * soc_pm.quirks.eth[].np cannot handle WoL packets while in ULP0, ULP1 357 * or both due to a hardware bug. If they receive WoL packets while in 358 * ULP0 or ULP1 IPs could stop working or the whole system could stop 359 * working. We cannot handle this scenario in the ethernet driver itself 360 * as the driver is common to multiple vendors and also we only know 361 * here, in this file, if we suspend to ULP0 or ULP1 mode. Thus handle 362 * these scenarios here, as quirks. 363 */ 364 for (i = 0; i < AT91_PM_MAX_ETH; i++) { 365 eth = &soc_pm.quirks.eth[i]; 366 367 if (!at91_pm_eth_quirk_is_valid(eth)) 368 continue; 369 370 /* 371 * For modes in dns_modes mask the system blocks if quirk is not 372 * applied but if applied the interface doesn't act at WoL 373 * events. Thus take care to avoid suspending if this interface 374 * is the only configured wakeup source. 375 */ 376 if (suspend && eth->dns_modes & BIT(soc_pm.data.mode)) { 377 int ws_count = 0; 378 #ifdef CONFIG_PM_SLEEP 379 struct wakeup_source *ws; 380 381 for_each_wakeup_source(ws) { 382 if (ws->dev == eth->dev) 383 continue; 384 385 ws_count++; 386 break; 387 } 388 #endif 389 390 /* 391 * Checking !ws is good for all platforms with issues 392 * even when both G_ETH and E_ETH are available as dns_modes 393 * is populated only on G_ETH interface. 394 */ 395 if (!ws_count) { 396 pr_err("AT91: PM: Ethernet cannot resume from WoL!"); 397 ret = -EPERM; 398 put_device(eth->dev); 399 eth->dev = NULL; 400 /* No need to revert clock settings for this eth. */ 401 i--; 402 goto clk_unconfigure; 403 } 404 } 405 406 if (suspend) { 407 clk_bulk_disable_unprepare(AT91_PM_ETH_MAX_CLK, eth->clks); 408 } else { 409 ret = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK, 410 eth->clks); 411 if (ret) 412 goto clk_unconfigure; 413 /* 414 * Release the reference to eth->dev taken in 415 * at91_pm_eth_quirk_is_valid(). 416 */ 417 put_device(eth->dev); 418 eth->dev = NULL; 419 } 420 } 421 422 return 0; 423 424 clk_unconfigure: 425 /* 426 * In case of resume we reach this point if clk_prepare_enable() failed. 427 * we don't want to revert the previous clk_prepare_enable() for the 428 * other IP. 429 */ 430 for (j = i; j >= 0; j--) { 431 eth = &soc_pm.quirks.eth[j]; 432 if (suspend) { 433 if (!at91_pm_eth_quirk_is_valid(eth)) 434 continue; 435 436 tmp = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK, eth->clks); 437 if (tmp) { 438 pr_err("AT91: PM: failed to enable %s clocks\n", 439 j == AT91_PM_G_ETH ? "geth" : "eth"); 440 } 441 } 442 443 /* 444 * Release the reference to eth->dev taken in 445 * at91_pm_eth_quirk_is_valid(). 446 */ 447 put_device(eth->dev); 448 eth->dev = NULL; 449 } 450 451 return ret; 452 } 453 454 /* 455 * Called after processes are frozen, but before we shutdown devices. 456 */ 457 static int at91_pm_begin(suspend_state_t state) 458 { 459 int ret; 460 461 switch (state) { 462 case PM_SUSPEND_MEM: 463 soc_pm.data.mode = soc_pm.data.suspend_mode; 464 break; 465 466 case PM_SUSPEND_STANDBY: 467 soc_pm.data.mode = soc_pm.data.standby_mode; 468 break; 469 470 default: 471 soc_pm.data.mode = -1; 472 } 473 474 ret = at91_pm_config_ws(soc_pm.data.mode, true); 475 if (ret) 476 return ret; 477 478 if (soc_pm.data.mode == AT91_PM_BACKUP) 479 soc_pm.bu->suspended = 1; 480 else if (soc_pm.bu) 481 soc_pm.bu->suspended = 0; 482 483 return 0; 484 } 485 486 /* 487 * Verify that all the clocks are correct before entering 488 * slow-clock mode. 489 */ 490 static int at91_pm_verify_clocks(void) 491 { 492 unsigned long scsr; 493 int i; 494 495 scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR); 496 497 /* USB must not be using PLLB */ 498 if ((scsr & soc_pm.data.uhp_udp_mask) != 0) { 499 pr_err("AT91: PM - Suspend-to-RAM with USB still active\n"); 500 return 0; 501 } 502 503 /* PCK0..PCK3 must be disabled, or configured to use clk32k */ 504 for (i = 0; i < 4; i++) { 505 u32 css; 506 507 if ((scsr & (AT91_PMC_PCK0 << i)) == 0) 508 continue; 509 css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS; 510 if (css != AT91_PMC_CSS_SLOW) { 511 pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css); 512 return 0; 513 } 514 } 515 516 return 1; 517 } 518 519 /* 520 * Call this from platform driver suspend() to see how deeply to suspend. 521 * For example, some controllers (like OHCI) need one of the PLL clocks 522 * in order to act as a wakeup source, and those are not available when 523 * going into slow clock mode. 524 * 525 * REVISIT: generalize as clk_will_be_available(clk)? Other platforms have 526 * the very same problem (but not using at91 main_clk), and it'd be better 527 * to add one generic API rather than lots of platform-specific ones. 528 */ 529 int at91_suspend_entering_slow_clock(void) 530 { 531 return (soc_pm.data.mode >= AT91_PM_ULP0); 532 } 533 EXPORT_SYMBOL(at91_suspend_entering_slow_clock); 534 535 static void (*at91_suspend_sram_fn)(struct at91_pm_data *); 536 extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data); 537 extern u32 at91_pm_suspend_in_sram_sz; 538 539 static int at91_suspend_finish(unsigned long val) 540 { 541 unsigned char modified_gray_code[] = { 542 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d, 543 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b, 544 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13, 545 0x10, 0x11, 546 }; 547 unsigned int tmp, index; 548 int i; 549 550 if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) { 551 /* 552 * Bootloader will perform DDR recalibration and will try to 553 * restore the ZQ0SR0 with the value saved here. But the 554 * calibration is buggy and restoring some values from ZQ0SR0 555 * is forbidden and risky thus we need to provide processed 556 * values for these (modified gray code values). 557 */ 558 tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0); 559 560 /* Store pull-down output impedance select. */ 561 index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f; 562 soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index]; 563 564 /* Store pull-up output impedance select. */ 565 index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f; 566 soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; 567 568 /* Store pull-down on-die termination impedance select. */ 569 index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f; 570 soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; 571 572 /* Store pull-up on-die termination impedance select. */ 573 index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f; 574 soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; 575 576 /* 577 * The 1st 8 words of memory might get corrupted in the process 578 * of DDR PHY recalibration; it is saved here in securam and it 579 * will be restored later, after recalibration, by bootloader 580 */ 581 for (i = 1; i < BACKUP_DDR_PHY_CALIBRATION; i++) 582 soc_pm.bu->ddr_phy_calibration[i] = 583 *((unsigned int *)soc_pm.memcs + (i - 1)); 584 } 585 586 flush_cache_all(); 587 outer_disable(); 588 589 at91_suspend_sram_fn(&soc_pm.data); 590 591 return 0; 592 } 593 594 /** 595 * at91_pm_switch_ba_to_auto() - Configure Backup Unit Power Switch 596 * to automatic/hardware mode. 597 * 598 * The Backup Unit Power Switch can be managed either by software or hardware. 599 * Enabling hardware mode allows the automatic transition of power between 600 * VDDANA (or VDDIN33) and VDDBU (or VBAT, respectively), based on the 601 * availability of these power sources. 602 * 603 * If the Backup Unit Power Switch is already in automatic mode, no action is 604 * required. If it is in software-controlled mode, it is switched to automatic 605 * mode to enhance safety and eliminate the need for toggling between power 606 * sources. 607 */ 608 static void at91_pm_switch_ba_to_auto(void) 609 { 610 unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu); 611 unsigned int val; 612 613 /* Just for safety. */ 614 if (!soc_pm.data.sfrbu) 615 return; 616 617 val = readl(soc_pm.data.sfrbu + offset); 618 619 /* Already on auto/hardware. */ 620 if (!(val & soc_pm.sfrbu_regs.pswbu.ctrl)) 621 return; 622 623 val &= ~soc_pm.sfrbu_regs.pswbu.ctrl; 624 val |= soc_pm.sfrbu_regs.pswbu.key; 625 writel(val, soc_pm.data.sfrbu + offset); 626 } 627 628 static void at91_pm_suspend(suspend_state_t state) 629 { 630 if (soc_pm.data.mode == AT91_PM_BACKUP) { 631 at91_pm_switch_ba_to_auto(); 632 633 cpu_suspend(0, at91_suspend_finish); 634 635 /* The SRAM is lost between suspend cycles */ 636 at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn, 637 &at91_pm_suspend_in_sram, 638 at91_pm_suspend_in_sram_sz); 639 } else { 640 at91_suspend_finish(0); 641 } 642 643 outer_resume(); 644 } 645 646 /* 647 * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup' 648 * event sources; and reduces DRAM power. But otherwise it's identical to 649 * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks. 650 * 651 * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must 652 * suspend more deeply, the master clock switches to the clk32k and turns off 653 * the main oscillator 654 * 655 * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh 656 */ 657 static int at91_pm_enter(suspend_state_t state) 658 { 659 int ret; 660 661 ret = at91_pm_config_quirks(true); 662 if (ret) 663 return ret; 664 665 switch (state) { 666 case PM_SUSPEND_MEM: 667 case PM_SUSPEND_STANDBY: 668 /* 669 * Ensure that clocks are in a valid state. 670 */ 671 if (soc_pm.data.mode >= AT91_PM_ULP0 && 672 !at91_pm_verify_clocks()) 673 goto error; 674 675 at91_pm_suspend(state); 676 677 break; 678 679 case PM_SUSPEND_ON: 680 cpu_do_idle(); 681 break; 682 683 default: 684 pr_debug("AT91: PM - bogus suspend state %d\n", state); 685 goto error; 686 } 687 688 error: 689 at91_pm_config_quirks(false); 690 return 0; 691 } 692 693 /* 694 * Called right prior to thawing processes. 695 */ 696 static void at91_pm_end(void) 697 { 698 at91_pm_config_ws(soc_pm.data.mode, false); 699 } 700 701 702 static const struct platform_suspend_ops at91_pm_ops = { 703 .valid = at91_pm_valid_state, 704 .begin = at91_pm_begin, 705 .enter = at91_pm_enter, 706 .end = at91_pm_end, 707 }; 708 709 static struct platform_device at91_cpuidle_device = { 710 .name = "cpuidle-at91", 711 }; 712 713 /* 714 * The AT91RM9200 goes into self-refresh mode with this command, and will 715 * terminate self-refresh automatically on the next SDRAM access. 716 * 717 * Self-refresh mode is exited as soon as a memory access is made, but we don't 718 * know for sure when that happens. However, we need to restore the low-power 719 * mode if it was enabled before going idle. Restoring low-power mode while 720 * still in self-refresh is "not recommended", but seems to work. 721 */ 722 static void at91rm9200_standby(void) 723 { 724 asm volatile( 725 "b 1f\n\t" 726 ".align 5\n\t" 727 "1: mcr p15, 0, %0, c7, c10, 4\n\t" 728 " str %2, [%1, %3]\n\t" 729 " mcr p15, 0, %0, c7, c0, 4\n\t" 730 : 731 : "r" (0), "r" (soc_pm.data.ramc[0]), 732 "r" (1), "r" (AT91_MC_SDRAMC_SRR)); 733 } 734 735 /* We manage both DDRAM/SDRAM controllers, we need more than one value to 736 * remember. 737 */ 738 static void at91_ddr_standby(void) 739 { 740 /* Those two values allow us to delay self-refresh activation 741 * to the maximum. */ 742 u32 lpr0, lpr1 = 0; 743 u32 mdr, saved_mdr0, saved_mdr1 = 0; 744 u32 saved_lpr0, saved_lpr1 = 0; 745 746 /* LPDDR1 --> force DDR2 mode during self-refresh */ 747 saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR); 748 if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) { 749 mdr = saved_mdr0 & ~AT91_DDRSDRC_MD; 750 mdr |= AT91_DDRSDRC_MD_DDR2; 751 at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr); 752 } 753 754 if (soc_pm.data.ramc[1]) { 755 saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR); 756 lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB; 757 lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH; 758 saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR); 759 if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) { 760 mdr = saved_mdr1 & ~AT91_DDRSDRC_MD; 761 mdr |= AT91_DDRSDRC_MD_DDR2; 762 at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr); 763 } 764 } 765 766 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR); 767 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB; 768 lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH; 769 770 /* self-refresh mode now */ 771 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0); 772 if (soc_pm.data.ramc[1]) 773 at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1); 774 775 cpu_do_idle(); 776 777 at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0); 778 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0); 779 if (soc_pm.data.ramc[1]) { 780 at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1); 781 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); 782 } 783 } 784 785 static void sama5d3_ddr_standby(void) 786 { 787 u32 lpr0; 788 u32 saved_lpr0; 789 790 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR); 791 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB; 792 lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN; 793 794 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0); 795 796 cpu_do_idle(); 797 798 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0); 799 } 800 801 /* We manage both DDRAM/SDRAM controllers, we need more than one value to 802 * remember. 803 */ 804 static void at91sam9_sdram_standby(void) 805 { 806 u32 lpr0, lpr1 = 0; 807 u32 saved_lpr0, saved_lpr1 = 0; 808 809 if (soc_pm.data.ramc[1]) { 810 saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR); 811 lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB; 812 lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH; 813 } 814 815 saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR); 816 lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB; 817 lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH; 818 819 /* self-refresh mode now */ 820 at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0); 821 if (soc_pm.data.ramc[1]) 822 at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1); 823 824 cpu_do_idle(); 825 826 at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0); 827 if (soc_pm.data.ramc[1]) 828 at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1); 829 } 830 831 static void sama7g5_standby(void) 832 { 833 int pwrtmg, ratio; 834 835 pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL); 836 ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO); 837 838 /* 839 * Place RAM into self-refresh after a maximum idle clocks. The maximum 840 * idle clocks is configured by bootloader in 841 * UDDRC_PWRMGT.SELFREF_TO_X32. 842 */ 843 writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN, 844 soc_pm.data.ramc[0] + UDDRC_PWRCTL); 845 /* Divide CPU clock by 16. */ 846 writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO); 847 848 cpu_do_idle(); 849 850 /* Restore previous configuration. */ 851 writel(ratio, soc_pm.data.pmc + AT91_PMC_RATIO); 852 writel(pwrtmg, soc_pm.data.ramc[0] + UDDRC_PWRCTL); 853 } 854 855 struct ramc_info { 856 void (*idle)(void); 857 unsigned int memctrl; 858 }; 859 860 static const struct ramc_info ramc_infos[] __initconst = { 861 { .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC}, 862 { .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC}, 863 { .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR}, 864 { .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR}, 865 { .idle = sama7g5_standby, }, 866 }; 867 868 static const struct of_device_id ramc_ids[] __initconst = { 869 { .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] }, 870 { .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] }, 871 { .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] }, 872 { .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] }, 873 { .compatible = "microchip,sama7g5-uddrc", .data = &ramc_infos[4], }, 874 { /*sentinel*/ } 875 }; 876 877 static const struct of_device_id ramc_phy_ids[] __initconst = { 878 { .compatible = "microchip,sama7g5-ddr3phy", }, 879 { /* Sentinel. */ }, 880 }; 881 882 static __init int at91_dt_ramc(bool phy_mandatory) 883 { 884 struct device_node *np; 885 const struct of_device_id *of_id; 886 int idx = 0; 887 void *standby = NULL; 888 const struct ramc_info *ramc; 889 int ret; 890 891 for_each_matching_node_and_match(np, ramc_ids, &of_id) { 892 soc_pm.data.ramc[idx] = of_iomap(np, 0); 893 if (!soc_pm.data.ramc[idx]) { 894 pr_err("unable to map ramc[%d] cpu registers\n", idx); 895 ret = -ENOMEM; 896 of_node_put(np); 897 goto unmap_ramc; 898 } 899 900 ramc = of_id->data; 901 if (ramc) { 902 if (!standby) 903 standby = ramc->idle; 904 soc_pm.data.memctrl = ramc->memctrl; 905 } 906 907 idx++; 908 } 909 910 if (!idx) { 911 pr_err("unable to find compatible ram controller node in dtb\n"); 912 ret = -ENODEV; 913 goto unmap_ramc; 914 } 915 916 /* Lookup for DDR PHY node, if any. */ 917 for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) { 918 soc_pm.data.ramc_phy = of_iomap(np, 0); 919 if (!soc_pm.data.ramc_phy) { 920 pr_err("unable to map ramc phy cpu registers\n"); 921 ret = -ENOMEM; 922 of_node_put(np); 923 goto unmap_ramc; 924 } 925 } 926 927 if (phy_mandatory && !soc_pm.data.ramc_phy) { 928 pr_err("DDR PHY is mandatory!\n"); 929 ret = -ENODEV; 930 goto unmap_ramc; 931 } 932 933 if (!standby) { 934 pr_warn("ramc no standby function available\n"); 935 return 0; 936 } 937 938 at91_cpuidle_device.dev.platform_data = standby; 939 940 return 0; 941 942 unmap_ramc: 943 while (idx) 944 iounmap(soc_pm.data.ramc[--idx]); 945 946 return ret; 947 } 948 949 static void at91rm9200_idle(void) 950 { 951 /* 952 * Disable the processor clock. The processor will be automatically 953 * re-enabled by an interrupt or by a reset. 954 */ 955 writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR); 956 } 957 958 static void at91sam9_idle(void) 959 { 960 writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR); 961 cpu_do_idle(); 962 } 963 964 static void __init at91_pm_sram_init(void) 965 { 966 struct gen_pool *sram_pool; 967 phys_addr_t sram_pbase; 968 unsigned long sram_base; 969 struct device_node *node; 970 struct platform_device *pdev = NULL; 971 972 for_each_compatible_node(node, NULL, "mmio-sram") { 973 pdev = of_find_device_by_node(node); 974 if (pdev) { 975 of_node_put(node); 976 break; 977 } 978 } 979 980 if (!pdev) { 981 pr_warn("%s: failed to find sram device!\n", __func__); 982 return; 983 } 984 985 sram_pool = gen_pool_get(&pdev->dev, NULL); 986 if (!sram_pool) { 987 pr_warn("%s: sram pool unavailable!\n", __func__); 988 goto out_put_device; 989 } 990 991 sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz); 992 if (!sram_base) { 993 pr_warn("%s: unable to alloc sram!\n", __func__); 994 goto out_put_device; 995 } 996 997 sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); 998 at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase, 999 at91_pm_suspend_in_sram_sz, false); 1000 if (!at91_suspend_sram_fn) { 1001 pr_warn("SRAM: Could not map\n"); 1002 goto out_put_device; 1003 } 1004 1005 /* Copy the pm suspend handler to SRAM */ 1006 at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn, 1007 &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz); 1008 return; 1009 1010 out_put_device: 1011 put_device(&pdev->dev); 1012 return; 1013 } 1014 1015 static bool __init at91_is_pm_mode_active(int pm_mode) 1016 { 1017 return (soc_pm.data.standby_mode == pm_mode || 1018 soc_pm.data.suspend_mode == pm_mode); 1019 } 1020 1021 static int __init at91_pm_backup_scan_memcs(unsigned long node, 1022 const char *uname, int depth, 1023 void *data) 1024 { 1025 const char *type; 1026 const __be32 *reg; 1027 int *located = data; 1028 int size; 1029 1030 /* Memory node already located. */ 1031 if (*located) 1032 return 0; 1033 1034 type = of_get_flat_dt_prop(node, "device_type", NULL); 1035 1036 /* We are scanning "memory" nodes only. */ 1037 if (!type || strcmp(type, "memory")) 1038 return 0; 1039 1040 reg = of_get_flat_dt_prop(node, "reg", &size); 1041 if (reg) { 1042 soc_pm.memcs = __va((phys_addr_t)be32_to_cpu(*reg)); 1043 *located = 1; 1044 } 1045 1046 return 0; 1047 } 1048 1049 static int __init at91_pm_backup_init(void) 1050 { 1051 struct gen_pool *sram_pool; 1052 struct device_node *np; 1053 struct platform_device *pdev; 1054 int ret = -ENODEV, located = 0; 1055 1056 if (!IS_ENABLED(CONFIG_SOC_SAMA5D2) && 1057 !IS_ENABLED(CONFIG_SOC_SAMA7G5)) 1058 return -EPERM; 1059 1060 if (!at91_is_pm_mode_active(AT91_PM_BACKUP)) 1061 return 0; 1062 1063 np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam"); 1064 if (!np) 1065 return ret; 1066 1067 pdev = of_find_device_by_node(np); 1068 of_node_put(np); 1069 if (!pdev) { 1070 pr_warn("%s: failed to find securam device!\n", __func__); 1071 return ret; 1072 } 1073 1074 sram_pool = gen_pool_get(&pdev->dev, NULL); 1075 if (!sram_pool) { 1076 pr_warn("%s: securam pool unavailable!\n", __func__); 1077 goto securam_fail; 1078 } 1079 1080 soc_pm.bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu)); 1081 if (!soc_pm.bu) { 1082 pr_warn("%s: unable to alloc securam!\n", __func__); 1083 ret = -ENOMEM; 1084 goto securam_fail; 1085 } 1086 1087 soc_pm.bu->suspended = 0; 1088 soc_pm.bu->canary = __pa_symbol(&canary); 1089 soc_pm.bu->resume = __pa_symbol(cpu_resume); 1090 if (soc_pm.data.ramc_phy) { 1091 of_scan_flat_dt(at91_pm_backup_scan_memcs, &located); 1092 if (!located) 1093 goto securam_fail; 1094 } 1095 1096 return 0; 1097 1098 securam_fail: 1099 put_device(&pdev->dev); 1100 return ret; 1101 } 1102 1103 static void __init at91_pm_secure_init(void) 1104 { 1105 int suspend_mode; 1106 struct arm_smccc_res res; 1107 1108 suspend_mode = soc_pm.data.suspend_mode; 1109 1110 res = sam_smccc_call(SAMA5_SMC_SIP_SET_SUSPEND_MODE, 1111 suspend_mode, 0); 1112 if (res.a0 == 0) { 1113 pr_info("AT91: Secure PM: suspend mode set to %s\n", 1114 pm_modes[suspend_mode].pattern); 1115 return; 1116 } 1117 1118 pr_warn("AT91: Secure PM: %s mode not supported !\n", 1119 pm_modes[suspend_mode].pattern); 1120 1121 res = sam_smccc_call(SAMA5_SMC_SIP_GET_SUSPEND_MODE, 0, 0); 1122 if (res.a0 == 0) { 1123 pr_warn("AT91: Secure PM: failed to get default mode\n"); 1124 return; 1125 } 1126 1127 pr_info("AT91: Secure PM: using default suspend mode %s\n", 1128 pm_modes[suspend_mode].pattern); 1129 1130 soc_pm.data.suspend_mode = res.a1; 1131 } 1132 static const struct of_device_id atmel_shdwc_ids[] = { 1133 { .compatible = "atmel,sama5d2-shdwc" }, 1134 { .compatible = "microchip,sam9x60-shdwc" }, 1135 { .compatible = "microchip,sama7g5-shdwc" }, 1136 { /* sentinel. */ } 1137 }; 1138 1139 static const struct of_device_id gmac_ids[] __initconst = { 1140 { .compatible = "atmel,sama5d3-gem" }, 1141 { .compatible = "atmel,sama5d2-gem" }, 1142 { .compatible = "atmel,sama5d29-gem" }, 1143 { .compatible = "microchip,sama7g5-gem" }, 1144 { }, 1145 }; 1146 1147 static const struct of_device_id emac_ids[] __initconst = { 1148 { .compatible = "atmel,sama5d3-macb" }, 1149 { .compatible = "microchip,sama7g5-emac" }, 1150 { }, 1151 }; 1152 1153 /* 1154 * Replaces _mode_to_replace with a supported mode that doesn't depend 1155 * on controller pointed by _map_bitmask 1156 * @_maps: u32 array containing AT91_PM_IOMAP() flags and indexed by AT91 1157 * PM mode 1158 * @_map_bitmask: AT91_PM_IOMAP() bitmask; if _mode_to_replace depends on 1159 * controller represented by _map_bitmask, _mode_to_replace needs to be 1160 * updated 1161 * @_mode_to_replace: standby_mode or suspend_mode that need to be 1162 * updated 1163 * @_mode_to_check: standby_mode or suspend_mode; this is needed here 1164 * to avoid having standby_mode and suspend_mode set with the same AT91 1165 * PM mode 1166 */ 1167 #define AT91_PM_REPLACE_MODE(_maps, _map_bitmask, _mode_to_replace, \ 1168 _mode_to_check) \ 1169 do { \ 1170 if (((_maps)[(_mode_to_replace)]) & (_map_bitmask)) { \ 1171 int _mode_to_use, _mode_complementary; \ 1172 /* Use ULP0 if it doesn't need _map_bitmask. */ \ 1173 if (!((_maps)[AT91_PM_ULP0] & (_map_bitmask))) {\ 1174 _mode_to_use = AT91_PM_ULP0; \ 1175 _mode_complementary = AT91_PM_STANDBY; \ 1176 } else { \ 1177 _mode_to_use = AT91_PM_STANDBY; \ 1178 _mode_complementary = AT91_PM_STANDBY; \ 1179 } \ 1180 \ 1181 if ((_mode_to_check) != _mode_to_use) \ 1182 (_mode_to_replace) = _mode_to_use; \ 1183 else \ 1184 (_mode_to_replace) = _mode_complementary;\ 1185 } \ 1186 } while (0) 1187 1188 /* 1189 * Replaces standby and suspend modes with default supported modes: 1190 * ULP0 and STANDBY. 1191 * @_maps: u32 array indexed by AT91 PM mode containing AT91_PM_IOMAP() 1192 * flags 1193 * @_map: controller specific name; standby and suspend mode need to be 1194 * replaced in order to not depend on this controller 1195 */ 1196 #define AT91_PM_REPLACE_MODES(_maps, _map) \ 1197 do { \ 1198 AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\ 1199 (soc_pm.data.standby_mode), \ 1200 (soc_pm.data.suspend_mode)); \ 1201 AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\ 1202 (soc_pm.data.suspend_mode), \ 1203 (soc_pm.data.standby_mode)); \ 1204 } while (0) 1205 1206 static int __init at91_pm_get_eth_clks(struct device_node *np, 1207 struct clk_bulk_data *clks) 1208 { 1209 clks[AT91_PM_ETH_PCLK].clk = of_clk_get_by_name(np, "pclk"); 1210 if (IS_ERR(clks[AT91_PM_ETH_PCLK].clk)) 1211 return PTR_ERR(clks[AT91_PM_ETH_PCLK].clk); 1212 1213 clks[AT91_PM_ETH_HCLK].clk = of_clk_get_by_name(np, "hclk"); 1214 if (IS_ERR(clks[AT91_PM_ETH_HCLK].clk)) 1215 return PTR_ERR(clks[AT91_PM_ETH_HCLK].clk); 1216 1217 return 0; 1218 } 1219 1220 static int __init at91_pm_eth_clks_empty(struct clk_bulk_data *clks) 1221 { 1222 return IS_ERR(clks[AT91_PM_ETH_PCLK].clk) || 1223 IS_ERR(clks[AT91_PM_ETH_HCLK].clk); 1224 } 1225 1226 static void __init at91_pm_modes_init(const u32 *maps, int len) 1227 { 1228 struct at91_pm_quirk_eth *gmac = &soc_pm.quirks.eth[AT91_PM_G_ETH]; 1229 struct at91_pm_quirk_eth *emac = &soc_pm.quirks.eth[AT91_PM_E_ETH]; 1230 struct device_node *np; 1231 int ret; 1232 1233 ret = at91_pm_backup_init(); 1234 if (ret) { 1235 if (soc_pm.data.standby_mode == AT91_PM_BACKUP) 1236 soc_pm.data.standby_mode = AT91_PM_ULP0; 1237 if (soc_pm.data.suspend_mode == AT91_PM_BACKUP) 1238 soc_pm.data.suspend_mode = AT91_PM_ULP0; 1239 } 1240 1241 if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) || 1242 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC)) { 1243 np = of_find_matching_node(NULL, atmel_shdwc_ids); 1244 if (!np) { 1245 pr_warn("%s: failed to find shdwc!\n", __func__); 1246 AT91_PM_REPLACE_MODES(maps, SHDWC); 1247 } else { 1248 soc_pm.data.shdwc = of_iomap(np, 0); 1249 of_node_put(np); 1250 } 1251 } 1252 1253 if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) || 1254 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU)) { 1255 np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu"); 1256 if (!np) { 1257 pr_warn("%s: failed to find sfrbu!\n", __func__); 1258 AT91_PM_REPLACE_MODES(maps, SFRBU); 1259 } else { 1260 soc_pm.data.sfrbu = of_iomap(np, 0); 1261 of_node_put(np); 1262 } 1263 } 1264 1265 if ((at91_is_pm_mode_active(AT91_PM_ULP1) || 1266 at91_is_pm_mode_active(AT91_PM_ULP0) || 1267 at91_is_pm_mode_active(AT91_PM_ULP0_FAST)) && 1268 (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(ETHC) || 1269 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(ETHC))) { 1270 np = of_find_matching_node(NULL, gmac_ids); 1271 if (!np) { 1272 np = of_find_matching_node(NULL, emac_ids); 1273 if (np) 1274 goto get_emac_clks; 1275 AT91_PM_REPLACE_MODES(maps, ETHC); 1276 goto unmap_unused_nodes; 1277 } else { 1278 gmac->np = np; 1279 at91_pm_get_eth_clks(np, gmac->clks); 1280 } 1281 1282 np = of_find_matching_node(NULL, emac_ids); 1283 if (!np) { 1284 if (at91_pm_eth_clks_empty(gmac->clks)) 1285 AT91_PM_REPLACE_MODES(maps, ETHC); 1286 } else { 1287 get_emac_clks: 1288 emac->np = np; 1289 ret = at91_pm_get_eth_clks(np, emac->clks); 1290 if (ret && at91_pm_eth_clks_empty(gmac->clks)) { 1291 of_node_put(gmac->np); 1292 of_node_put(emac->np); 1293 gmac->np = NULL; 1294 emac->np = NULL; 1295 } 1296 } 1297 } 1298 1299 unmap_unused_nodes: 1300 /* Unmap all unnecessary. */ 1301 if (soc_pm.data.shdwc && 1302 !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) || 1303 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC))) { 1304 iounmap(soc_pm.data.shdwc); 1305 soc_pm.data.shdwc = NULL; 1306 } 1307 1308 if (soc_pm.data.sfrbu && 1309 !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) || 1310 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU))) { 1311 iounmap(soc_pm.data.sfrbu); 1312 soc_pm.data.sfrbu = NULL; 1313 } 1314 1315 return; 1316 } 1317 1318 struct pmc_info { 1319 unsigned long uhp_udp_mask; 1320 unsigned long mckr; 1321 unsigned long version; 1322 }; 1323 1324 static const struct pmc_info pmc_infos[] __initconst = { 1325 { 1326 .uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP, 1327 .mckr = 0x30, 1328 .version = AT91_PMC_V1, 1329 }, 1330 1331 { 1332 .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP, 1333 .mckr = 0x30, 1334 .version = AT91_PMC_V1, 1335 }, 1336 { 1337 .uhp_udp_mask = AT91SAM926x_PMC_UHP, 1338 .mckr = 0x30, 1339 .version = AT91_PMC_V1, 1340 }, 1341 { .uhp_udp_mask = 0, 1342 .mckr = 0x30, 1343 .version = AT91_PMC_V1, 1344 }, 1345 { 1346 .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP, 1347 .mckr = 0x28, 1348 .version = AT91_PMC_V2, 1349 }, 1350 { 1351 .mckr = 0x28, 1352 .version = AT91_PMC_V2, 1353 }, 1354 1355 }; 1356 1357 static const struct of_device_id atmel_pmc_ids[] __initconst = { 1358 { .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] }, 1359 { .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] }, 1360 { .compatible = "atmel,at91sam9261-pmc", .data = &pmc_infos[1] }, 1361 { .compatible = "atmel,at91sam9263-pmc", .data = &pmc_infos[1] }, 1362 { .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] }, 1363 { .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] }, 1364 { .compatible = "atmel,at91sam9rl-pmc", .data = &pmc_infos[3] }, 1365 { .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] }, 1366 { .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] }, 1367 { .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] }, 1368 { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] }, 1369 { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] }, 1370 { .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] }, 1371 { /* sentinel */ }, 1372 }; 1373 1374 static void __init at91_pm_modes_validate(const int *modes, int len) 1375 { 1376 u8 i, standby = 0, suspend = 0; 1377 int mode; 1378 1379 for (i = 0; i < len; i++) { 1380 if (standby && suspend) 1381 break; 1382 1383 if (modes[i] == soc_pm.data.standby_mode && !standby) { 1384 standby = 1; 1385 continue; 1386 } 1387 1388 if (modes[i] == soc_pm.data.suspend_mode && !suspend) { 1389 suspend = 1; 1390 continue; 1391 } 1392 } 1393 1394 if (!standby) { 1395 if (soc_pm.data.suspend_mode == AT91_PM_STANDBY) 1396 mode = AT91_PM_ULP0; 1397 else 1398 mode = AT91_PM_STANDBY; 1399 1400 pr_warn("AT91: PM: %s mode not supported! Using %s.\n", 1401 pm_modes[soc_pm.data.standby_mode].pattern, 1402 pm_modes[mode].pattern); 1403 soc_pm.data.standby_mode = mode; 1404 } 1405 1406 if (!suspend) { 1407 if (soc_pm.data.standby_mode == AT91_PM_ULP0) 1408 mode = AT91_PM_STANDBY; 1409 else 1410 mode = AT91_PM_ULP0; 1411 1412 pr_warn("AT91: PM: %s mode not supported! Using %s.\n", 1413 pm_modes[soc_pm.data.suspend_mode].pattern, 1414 pm_modes[mode].pattern); 1415 soc_pm.data.suspend_mode = mode; 1416 } 1417 } 1418 1419 static void __init at91_pm_init(void (*pm_idle)(void)) 1420 { 1421 struct device_node *pmc_np; 1422 const struct of_device_id *of_id; 1423 const struct pmc_info *pmc; 1424 1425 if (at91_cpuidle_device.dev.platform_data) 1426 platform_device_register(&at91_cpuidle_device); 1427 1428 pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id); 1429 soc_pm.data.pmc = of_iomap(pmc_np, 0); 1430 of_node_put(pmc_np); 1431 if (!soc_pm.data.pmc) { 1432 pr_err("AT91: PM not supported, PMC not found\n"); 1433 return; 1434 } 1435 1436 pmc = of_id->data; 1437 soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask; 1438 soc_pm.data.pmc_mckr_offset = pmc->mckr; 1439 soc_pm.data.pmc_version = pmc->version; 1440 1441 if (pm_idle) 1442 arm_pm_idle = pm_idle; 1443 1444 at91_pm_sram_init(); 1445 1446 if (at91_suspend_sram_fn) { 1447 suspend_set_ops(&at91_pm_ops); 1448 pr_info("AT91: PM: standby: %s, suspend: %s\n", 1449 pm_modes[soc_pm.data.standby_mode].pattern, 1450 pm_modes[soc_pm.data.suspend_mode].pattern); 1451 } else { 1452 pr_info("AT91: PM not supported, due to no SRAM allocated\n"); 1453 } 1454 } 1455 1456 void __init at91rm9200_pm_init(void) 1457 { 1458 int ret; 1459 1460 if (!IS_ENABLED(CONFIG_SOC_AT91RM9200)) 1461 return; 1462 1463 /* 1464 * Force STANDBY and ULP0 mode to avoid calling 1465 * at91_pm_modes_validate() which may increase booting time. 1466 * Platform supports anyway only STANDBY and ULP0 modes. 1467 */ 1468 soc_pm.data.standby_mode = AT91_PM_STANDBY; 1469 soc_pm.data.suspend_mode = AT91_PM_ULP0; 1470 1471 ret = at91_dt_ramc(false); 1472 if (ret) 1473 return; 1474 1475 /* 1476 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh. 1477 */ 1478 at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0); 1479 1480 at91_pm_init(at91rm9200_idle); 1481 } 1482 1483 void __init sam9x60_pm_init(void) 1484 { 1485 static const int modes[] __initconst = { 1486 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1, 1487 }; 1488 static const int iomaps[] __initconst = { 1489 [AT91_PM_ULP1] = AT91_PM_IOMAP(SHDWC), 1490 }; 1491 int ret; 1492 1493 if (!IS_ENABLED(CONFIG_SOC_SAM9X60)) 1494 return; 1495 1496 at91_pm_modes_validate(modes, ARRAY_SIZE(modes)); 1497 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps)); 1498 ret = at91_dt_ramc(false); 1499 if (ret) 1500 return; 1501 1502 at91_pm_init(NULL); 1503 1504 soc_pm.ws_ids = sam9x60_ws_ids; 1505 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws; 1506 } 1507 1508 void __init at91sam9_pm_init(void) 1509 { 1510 int ret; 1511 1512 if (!IS_ENABLED(CONFIG_SOC_AT91SAM9)) 1513 return; 1514 1515 /* 1516 * Force STANDBY and ULP0 mode to avoid calling 1517 * at91_pm_modes_validate() which may increase booting time. 1518 * Platform supports anyway only STANDBY and ULP0 modes. 1519 */ 1520 soc_pm.data.standby_mode = AT91_PM_STANDBY; 1521 soc_pm.data.suspend_mode = AT91_PM_ULP0; 1522 1523 ret = at91_dt_ramc(false); 1524 if (ret) 1525 return; 1526 1527 at91_pm_init(at91sam9_idle); 1528 } 1529 1530 void __init sama5_pm_init(void) 1531 { 1532 static const int modes[] __initconst = { 1533 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, 1534 }; 1535 static const u32 iomaps[] __initconst = { 1536 [AT91_PM_ULP0] = AT91_PM_IOMAP(ETHC), 1537 [AT91_PM_ULP0_FAST] = AT91_PM_IOMAP(ETHC), 1538 }; 1539 int ret; 1540 1541 if (!IS_ENABLED(CONFIG_SOC_SAMA5)) 1542 return; 1543 1544 at91_pm_modes_validate(modes, ARRAY_SIZE(modes)); 1545 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps)); 1546 ret = at91_dt_ramc(false); 1547 if (ret) 1548 return; 1549 1550 at91_pm_init(NULL); 1551 1552 /* Quirks applies to ULP0, ULP0 fast and ULP1 modes. */ 1553 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) | 1554 BIT(AT91_PM_ULP0_FAST) | 1555 BIT(AT91_PM_ULP1); 1556 /* Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup source. */ 1557 soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) | 1558 BIT(AT91_PM_ULP0_FAST); 1559 } 1560 1561 void __init sama5d2_pm_init(void) 1562 { 1563 static const int modes[] __initconst = { 1564 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1, 1565 AT91_PM_BACKUP, 1566 }; 1567 static const u32 iomaps[] __initconst = { 1568 [AT91_PM_ULP0] = AT91_PM_IOMAP(ETHC), 1569 [AT91_PM_ULP0_FAST] = AT91_PM_IOMAP(ETHC), 1570 [AT91_PM_ULP1] = AT91_PM_IOMAP(SHDWC) | 1571 AT91_PM_IOMAP(ETHC), 1572 [AT91_PM_BACKUP] = AT91_PM_IOMAP(SHDWC) | 1573 AT91_PM_IOMAP(SFRBU), 1574 }; 1575 int ret; 1576 1577 if (!IS_ENABLED(CONFIG_SOC_SAMA5D2)) 1578 return; 1579 1580 if (IS_ENABLED(CONFIG_ATMEL_SECURE_PM)) { 1581 pr_warn("AT91: Secure PM: ignoring standby mode\n"); 1582 at91_pm_secure_init(); 1583 return; 1584 } 1585 1586 at91_pm_modes_validate(modes, ARRAY_SIZE(modes)); 1587 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps)); 1588 ret = at91_dt_ramc(false); 1589 if (ret) 1590 return; 1591 1592 at91_pm_init(NULL); 1593 1594 soc_pm.ws_ids = sama5d2_ws_ids; 1595 soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws; 1596 soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws; 1597 1598 soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8); 1599 soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0); 1600 soc_pm.sfrbu_regs.pswbu.softsw = BIT(1); 1601 soc_pm.sfrbu_regs.pswbu.state = BIT(3); 1602 1603 /* Quirk applies to ULP0, ULP0 fast and ULP1 modes. */ 1604 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) | 1605 BIT(AT91_PM_ULP0_FAST) | 1606 BIT(AT91_PM_ULP1); 1607 /* 1608 * Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup 1609 * source. 1610 */ 1611 soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) | 1612 BIT(AT91_PM_ULP0_FAST); 1613 } 1614 1615 void __init sama7_pm_init(void) 1616 { 1617 static const int modes[] __initconst = { 1618 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP1, AT91_PM_BACKUP, 1619 }; 1620 static const u32 iomaps[] __initconst = { 1621 [AT91_PM_ULP0] = AT91_PM_IOMAP(SFRBU), 1622 [AT91_PM_ULP1] = AT91_PM_IOMAP(SFRBU) | 1623 AT91_PM_IOMAP(SHDWC) | 1624 AT91_PM_IOMAP(ETHC), 1625 [AT91_PM_BACKUP] = AT91_PM_IOMAP(SFRBU) | 1626 AT91_PM_IOMAP(SHDWC), 1627 }; 1628 int ret; 1629 1630 if (!IS_ENABLED(CONFIG_SOC_SAMA7)) 1631 return; 1632 1633 at91_pm_modes_validate(modes, ARRAY_SIZE(modes)); 1634 1635 ret = at91_dt_ramc(true); 1636 if (ret) 1637 return; 1638 1639 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps)); 1640 at91_pm_init(NULL); 1641 1642 soc_pm.ws_ids = sama7g5_ws_ids; 1643 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws; 1644 1645 soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8); 1646 soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0); 1647 soc_pm.sfrbu_regs.pswbu.softsw = BIT(1); 1648 soc_pm.sfrbu_regs.pswbu.state = BIT(2); 1649 1650 /* Quirks applies to ULP1 for both Ethernet interfaces. */ 1651 soc_pm.quirks.eth[AT91_PM_E_ETH].modes = BIT(AT91_PM_ULP1); 1652 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP1); 1653 } 1654 1655 static int __init at91_pm_modes_select(char *str) 1656 { 1657 char *s; 1658 substring_t args[MAX_OPT_ARGS]; 1659 int standby, suspend; 1660 1661 if (!str) 1662 return 0; 1663 1664 s = strsep(&str, ","); 1665 standby = match_token(s, pm_modes, args); 1666 if (standby < 0) 1667 return 0; 1668 1669 suspend = match_token(str, pm_modes, args); 1670 if (suspend < 0) 1671 return 0; 1672 1673 soc_pm.data.standby_mode = standby; 1674 soc_pm.data.suspend_mode = suspend; 1675 1676 return 0; 1677 } 1678 early_param("atmel.pm_modes", at91_pm_modes_select); 1679