1 /* 2 * Power Management Service Unit(PMSU) support for Armada 370/XP platforms. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Yehuda Yitschak <yehuday@marvell.com> 7 * Gregory Clement <gregory.clement@free-electrons.com> 8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 9 * 10 * This file is licensed under the terms of the GNU General Public 11 * License version 2. This program is licensed "as is" without any 12 * warranty of any kind, whether express or implied. 13 * 14 * The Armada 370 and Armada XP SOCs have a power management service 15 * unit which is responsible for powering down and waking up CPUs and 16 * other SOC units 17 */ 18 19 #define pr_fmt(fmt) "mvebu-pmsu: " fmt 20 21 #include <linux/clk.h> 22 #include <linux/cpu_pm.h> 23 #include <linux/cpufreq-dt.h> 24 #include <linux/delay.h> 25 #include <linux/init.h> 26 #include <linux/io.h> 27 #include <linux/kernel.h> 28 #include <linux/mbus.h> 29 #include <linux/of_address.h> 30 #include <linux/of_device.h> 31 #include <linux/platform_device.h> 32 #include <linux/pm_opp.h> 33 #include <linux/resource.h> 34 #include <linux/slab.h> 35 #include <linux/smp.h> 36 #include <asm/cacheflush.h> 37 #include <asm/cp15.h> 38 #include <asm/smp_scu.h> 39 #include <asm/smp_plat.h> 40 #include <asm/suspend.h> 41 #include <asm/tlbflush.h> 42 #include "common.h" 43 44 45 #define PMSU_BASE_OFFSET 0x100 46 #define PMSU_REG_SIZE 0x1000 47 48 /* PMSU MP registers */ 49 #define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104) 50 #define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18) 51 #define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16) 52 #define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20) 53 54 #define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108) 55 56 #define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0) 57 58 #define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c) 59 #define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16) 60 #define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17) 61 #define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20) 62 #define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21) 63 #define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22) 64 #define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24) 65 #define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25) 66 67 #define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120) 68 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1) 69 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17) 70 71 #define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124) 72 73 /* PMSU fabric registers */ 74 #define L2C_NFABRIC_PM_CTL 0x4 75 #define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20) 76 77 /* PMSU delay registers */ 78 #define PMSU_POWERDOWN_DELAY 0xF04 79 #define PMSU_POWERDOWN_DELAY_PMU BIT(1) 80 #define PMSU_POWERDOWN_DELAY_MASK 0xFFFE 81 #define PMSU_DFLT_ARMADA38X_DELAY 0x64 82 83 /* CA9 MPcore SoC Control registers */ 84 85 #define MPCORE_RESET_CTL 0x64 86 #define MPCORE_RESET_CTL_L2 BIT(0) 87 #define MPCORE_RESET_CTL_DEBUG BIT(16) 88 89 #define SRAM_PHYS_BASE 0xFFFF0000 90 #define BOOTROM_BASE 0xFFF00000 91 #define BOOTROM_SIZE 0x100000 92 93 #define ARMADA_370_CRYPT0_ENG_TARGET 0x9 94 #define ARMADA_370_CRYPT0_ENG_ATTR 0x1 95 96 extern void ll_disable_coherency(void); 97 extern void ll_enable_coherency(void); 98 99 extern void armada_370_xp_cpu_resume(void); 100 extern void armada_38x_cpu_resume(void); 101 102 static phys_addr_t pmsu_mp_phys_base; 103 static void __iomem *pmsu_mp_base; 104 105 static void *mvebu_cpu_resume; 106 107 static const struct of_device_id of_pmsu_table[] = { 108 { .compatible = "marvell,armada-370-pmsu", }, 109 { .compatible = "marvell,armada-370-xp-pmsu", }, 110 { .compatible = "marvell,armada-380-pmsu", }, 111 { /* end of list */ }, 112 }; 113 114 void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) 115 { 116 writel(virt_to_phys(boot_addr), pmsu_mp_base + 117 PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); 118 } 119 120 extern unsigned char mvebu_boot_wa_start; 121 extern unsigned char mvebu_boot_wa_end; 122 123 /* 124 * This function sets up the boot address workaround needed for SMP 125 * boot on Armada 375 Z1 and cpuidle on Armada 370. It unmaps the 126 * BootROM Mbus window, and instead remaps a crypto SRAM into which a 127 * custom piece of code is copied to replace the problematic BootROM. 128 */ 129 int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target, 130 unsigned int crypto_eng_attribute, 131 phys_addr_t resume_addr_reg) 132 { 133 void __iomem *sram_virt_base; 134 u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start; 135 136 mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE); 137 mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute, 138 SRAM_PHYS_BASE, SZ_64K); 139 140 sram_virt_base = ioremap(SRAM_PHYS_BASE, SZ_64K); 141 if (!sram_virt_base) { 142 pr_err("Unable to map SRAM to setup the boot address WA\n"); 143 return -ENOMEM; 144 } 145 146 memcpy(sram_virt_base, &mvebu_boot_wa_start, code_len); 147 148 /* 149 * The last word of the code copied in SRAM must contain the 150 * physical base address of the PMSU register. We 151 * intentionally store this address in the native endianness 152 * of the system. 153 */ 154 __raw_writel((unsigned long)resume_addr_reg, 155 sram_virt_base + code_len - 4); 156 157 iounmap(sram_virt_base); 158 159 return 0; 160 } 161 162 static int __init mvebu_v7_pmsu_init(void) 163 { 164 struct device_node *np; 165 struct resource res; 166 int ret = 0; 167 168 np = of_find_matching_node(NULL, of_pmsu_table); 169 if (!np) 170 return 0; 171 172 pr_info("Initializing Power Management Service Unit\n"); 173 174 if (of_address_to_resource(np, 0, &res)) { 175 pr_err("unable to get resource\n"); 176 ret = -ENOENT; 177 goto out; 178 } 179 180 if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) { 181 pr_warn(FW_WARN "deprecated pmsu binding\n"); 182 res.start = res.start - PMSU_BASE_OFFSET; 183 res.end = res.start + PMSU_REG_SIZE - 1; 184 } 185 186 if (!request_mem_region(res.start, resource_size(&res), 187 np->full_name)) { 188 pr_err("unable to request region\n"); 189 ret = -EBUSY; 190 goto out; 191 } 192 193 pmsu_mp_phys_base = res.start; 194 195 pmsu_mp_base = ioremap(res.start, resource_size(&res)); 196 if (!pmsu_mp_base) { 197 pr_err("unable to map registers\n"); 198 release_mem_region(res.start, resource_size(&res)); 199 ret = -ENOMEM; 200 goto out; 201 } 202 203 out: 204 of_node_put(np); 205 return ret; 206 } 207 208 static void mvebu_v7_pmsu_enable_l2_powerdown_onidle(void) 209 { 210 u32 reg; 211 212 if (pmsu_mp_base == NULL) 213 return; 214 215 /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */ 216 reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL); 217 reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN; 218 writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL); 219 } 220 221 enum pmsu_idle_prepare_flags { 222 PMSU_PREPARE_NORMAL = 0, 223 PMSU_PREPARE_DEEP_IDLE = BIT(0), 224 PMSU_PREPARE_SNOOP_DISABLE = BIT(1), 225 }; 226 227 /* No locking is needed because we only access per-CPU registers */ 228 static int mvebu_v7_pmsu_idle_prepare(unsigned long flags) 229 { 230 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 231 u32 reg; 232 233 if (pmsu_mp_base == NULL) 234 return -EINVAL; 235 236 /* 237 * Adjust the PMSU configuration to wait for WFI signal, enable 238 * IRQ and FIQ as wakeup events, set wait for snoop queue empty 239 * indication and mask IRQ and FIQ from CPU 240 */ 241 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 242 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 243 PMSU_STATUS_AND_MASK_IRQ_WAKEUP | 244 PMSU_STATUS_AND_MASK_FIQ_WAKEUP | 245 PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT | 246 PMSU_STATUS_AND_MASK_IRQ_MASK | 247 PMSU_STATUS_AND_MASK_FIQ_MASK; 248 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 249 250 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 251 /* ask HW to power down the L2 Cache if needed */ 252 if (flags & PMSU_PREPARE_DEEP_IDLE) 253 reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 254 255 /* request power down */ 256 reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ; 257 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 258 259 if (flags & PMSU_PREPARE_SNOOP_DISABLE) { 260 /* Disable snoop disable by HW - SW is taking care of it */ 261 reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 262 reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP; 263 writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 264 } 265 266 return 0; 267 } 268 269 int armada_370_xp_pmsu_idle_enter(unsigned long deepidle) 270 { 271 unsigned long flags = PMSU_PREPARE_SNOOP_DISABLE; 272 int ret; 273 274 if (deepidle) 275 flags |= PMSU_PREPARE_DEEP_IDLE; 276 277 ret = mvebu_v7_pmsu_idle_prepare(flags); 278 if (ret) 279 return ret; 280 281 v7_exit_coherency_flush(all); 282 283 ll_disable_coherency(); 284 285 dsb(); 286 287 wfi(); 288 289 /* If we are here, wfi failed. As processors run out of 290 * coherency for some time, tlbs might be stale, so flush them 291 */ 292 local_flush_tlb_all(); 293 294 ll_enable_coherency(); 295 296 /* Test the CR_C bit and set it if it was cleared */ 297 asm volatile( 298 "mrc p15, 0, r0, c1, c0, 0 \n\t" 299 "tst r0, %0 \n\t" 300 "orreq r0, r0, #(1 << 2) \n\t" 301 "mcreq p15, 0, r0, c1, c0, 0 \n\t" 302 "isb " 303 : : "Ir" (CR_C) : "r0"); 304 305 pr_debug("Failed to suspend the system\n"); 306 307 return 0; 308 } 309 310 static int armada_370_xp_cpu_suspend(unsigned long deepidle) 311 { 312 return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter); 313 } 314 315 int armada_38x_do_cpu_suspend(unsigned long deepidle) 316 { 317 unsigned long flags = 0; 318 319 if (deepidle) 320 flags |= PMSU_PREPARE_DEEP_IDLE; 321 322 mvebu_v7_pmsu_idle_prepare(flags); 323 /* 324 * Already flushed cache, but do it again as the outer cache 325 * functions dirty the cache with spinlocks 326 */ 327 v7_exit_coherency_flush(louis); 328 329 scu_power_mode(mvebu_get_scu_base(), SCU_PM_POWEROFF); 330 331 cpu_do_idle(); 332 333 return 1; 334 } 335 336 static int armada_38x_cpu_suspend(unsigned long deepidle) 337 { 338 return cpu_suspend(false, armada_38x_do_cpu_suspend); 339 } 340 341 /* No locking is needed because we only access per-CPU registers */ 342 void mvebu_v7_pmsu_idle_exit(void) 343 { 344 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 345 u32 reg; 346 347 if (pmsu_mp_base == NULL) 348 return; 349 /* cancel ask HW to power down the L2 Cache if possible */ 350 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 351 reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 352 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 353 354 /* cancel Enable wakeup events and mask interrupts */ 355 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 356 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP); 357 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 358 reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT; 359 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK); 360 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 361 } 362 363 static int mvebu_v7_cpu_pm_notify(struct notifier_block *self, 364 unsigned long action, void *hcpu) 365 { 366 if (action == CPU_PM_ENTER) { 367 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 368 mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cpu_resume); 369 } else if (action == CPU_PM_EXIT) { 370 mvebu_v7_pmsu_idle_exit(); 371 } 372 373 return NOTIFY_OK; 374 } 375 376 static struct notifier_block mvebu_v7_cpu_pm_notifier = { 377 .notifier_call = mvebu_v7_cpu_pm_notify, 378 }; 379 380 static struct platform_device mvebu_v7_cpuidle_device; 381 382 static int broken_idle(struct device_node *np) 383 { 384 if (of_property_read_bool(np, "broken-idle")) { 385 pr_warn("CPU idle is currently broken: disabling\n"); 386 return 1; 387 } 388 389 return 0; 390 } 391 392 static __init int armada_370_cpuidle_init(void) 393 { 394 struct device_node *np; 395 phys_addr_t redirect_reg; 396 397 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 398 if (!np) 399 return -ENODEV; 400 401 if (broken_idle(np)) 402 goto end; 403 404 /* 405 * On Armada 370, there is "a slow exit process from the deep 406 * idle state due to heavy L1/L2 cache cleanup operations 407 * performed by the BootROM software". To avoid this, we 408 * replace the restart code of the bootrom by a a simple jump 409 * to the boot address. Then the code located at this boot 410 * address will take care of the initialization. 411 */ 412 redirect_reg = pmsu_mp_phys_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(0); 413 mvebu_setup_boot_addr_wa(ARMADA_370_CRYPT0_ENG_TARGET, 414 ARMADA_370_CRYPT0_ENG_ATTR, 415 redirect_reg); 416 417 mvebu_cpu_resume = armada_370_xp_cpu_resume; 418 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 419 mvebu_v7_cpuidle_device.name = "cpuidle-armada-370"; 420 421 end: 422 of_node_put(np); 423 return 0; 424 } 425 426 static __init int armada_38x_cpuidle_init(void) 427 { 428 struct device_node *np; 429 void __iomem *mpsoc_base; 430 u32 reg; 431 432 pr_warn("CPU idle is currently broken on Armada 38x: disabling\n"); 433 return 0; 434 435 np = of_find_compatible_node(NULL, NULL, 436 "marvell,armada-380-coherency-fabric"); 437 if (!np) 438 return -ENODEV; 439 440 if (broken_idle(np)) 441 goto end; 442 443 of_node_put(np); 444 445 np = of_find_compatible_node(NULL, NULL, 446 "marvell,armada-380-mpcore-soc-ctrl"); 447 if (!np) 448 return -ENODEV; 449 mpsoc_base = of_iomap(np, 0); 450 BUG_ON(!mpsoc_base); 451 452 /* Set up reset mask when powering down the cpus */ 453 reg = readl(mpsoc_base + MPCORE_RESET_CTL); 454 reg |= MPCORE_RESET_CTL_L2; 455 reg |= MPCORE_RESET_CTL_DEBUG; 456 writel(reg, mpsoc_base + MPCORE_RESET_CTL); 457 iounmap(mpsoc_base); 458 459 /* Set up delay */ 460 reg = readl(pmsu_mp_base + PMSU_POWERDOWN_DELAY); 461 reg &= ~PMSU_POWERDOWN_DELAY_MASK; 462 reg |= PMSU_DFLT_ARMADA38X_DELAY; 463 reg |= PMSU_POWERDOWN_DELAY_PMU; 464 writel(reg, pmsu_mp_base + PMSU_POWERDOWN_DELAY); 465 466 mvebu_cpu_resume = armada_38x_cpu_resume; 467 mvebu_v7_cpuidle_device.dev.platform_data = armada_38x_cpu_suspend; 468 mvebu_v7_cpuidle_device.name = "cpuidle-armada-38x"; 469 470 end: 471 of_node_put(np); 472 return 0; 473 } 474 475 static __init int armada_xp_cpuidle_init(void) 476 { 477 struct device_node *np; 478 479 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 480 if (!np) 481 return -ENODEV; 482 483 if (broken_idle(np)) 484 goto end; 485 486 mvebu_cpu_resume = armada_370_xp_cpu_resume; 487 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 488 mvebu_v7_cpuidle_device.name = "cpuidle-armada-xp"; 489 490 end: 491 of_node_put(np); 492 return 0; 493 } 494 495 static int __init mvebu_v7_cpu_pm_init(void) 496 { 497 struct device_node *np; 498 int ret; 499 500 np = of_find_matching_node(NULL, of_pmsu_table); 501 if (!np) 502 return 0; 503 of_node_put(np); 504 505 /* 506 * Currently the CPU idle support for Armada 38x is broken, as 507 * the CPU hotplug uses some of the CPU idle functions it is 508 * broken too, so let's disable it 509 */ 510 if (of_machine_is_compatible("marvell,armada380")) { 511 cpu_hotplug_disable(); 512 pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling\n"); 513 } 514 515 if (of_machine_is_compatible("marvell,armadaxp")) 516 ret = armada_xp_cpuidle_init(); 517 else if (of_machine_is_compatible("marvell,armada370")) 518 ret = armada_370_cpuidle_init(); 519 else if (of_machine_is_compatible("marvell,armada380")) 520 ret = armada_38x_cpuidle_init(); 521 else 522 return 0; 523 524 if (ret) 525 return ret; 526 527 mvebu_v7_pmsu_enable_l2_powerdown_onidle(); 528 if (mvebu_v7_cpuidle_device.name) 529 platform_device_register(&mvebu_v7_cpuidle_device); 530 cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier); 531 532 return 0; 533 } 534 535 arch_initcall(mvebu_v7_cpu_pm_init); 536 early_initcall(mvebu_v7_pmsu_init); 537 538 static void mvebu_pmsu_dfs_request_local(void *data) 539 { 540 u32 reg; 541 u32 cpu = smp_processor_id(); 542 unsigned long flags; 543 544 local_irq_save(flags); 545 546 /* Prepare to enter idle */ 547 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 548 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 549 PMSU_STATUS_AND_MASK_IRQ_MASK | 550 PMSU_STATUS_AND_MASK_FIQ_MASK; 551 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 552 553 /* Request the DFS transition */ 554 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 555 reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ; 556 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 557 558 /* The fact of entering idle will trigger the DFS transition */ 559 wfi(); 560 561 /* 562 * We're back from idle, the DFS transition has completed, 563 * clear the idle wait indication. 564 */ 565 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 566 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 567 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 568 569 local_irq_restore(flags); 570 } 571 572 int mvebu_pmsu_dfs_request(int cpu) 573 { 574 unsigned long timeout; 575 int hwcpu = cpu_logical_map(cpu); 576 u32 reg; 577 578 /* Clear any previous DFS DONE event */ 579 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 580 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE; 581 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 582 583 /* Mask the DFS done interrupt, since we are going to poll */ 584 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 585 reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 586 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 587 588 /* Trigger the DFS on the appropriate CPU */ 589 smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local, 590 NULL, false); 591 592 /* Poll until the DFS done event is generated */ 593 timeout = jiffies + HZ; 594 while (time_before(jiffies, timeout)) { 595 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 596 if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE) 597 break; 598 udelay(10); 599 } 600 601 if (time_after(jiffies, timeout)) 602 return -ETIME; 603 604 /* Restore the DFS mask to its original state */ 605 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 606 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 607 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 608 609 return 0; 610 } 611 612 struct cpufreq_dt_platform_data cpufreq_dt_pd = { 613 .independent_clocks = true, 614 }; 615 616 static int __init armada_xp_pmsu_cpufreq_init(void) 617 { 618 struct device_node *np; 619 struct resource res; 620 int ret, cpu; 621 622 if (!of_machine_is_compatible("marvell,armadaxp")) 623 return 0; 624 625 /* 626 * In order to have proper cpufreq handling, we need to ensure 627 * that the Device Tree description of the CPU clock includes 628 * the definition of the PMU DFS registers. If not, we do not 629 * register the clock notifier and the cpufreq driver. This 630 * piece of code is only for compatibility with old Device 631 * Trees. 632 */ 633 np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock"); 634 if (!np) 635 return 0; 636 637 ret = of_address_to_resource(np, 1, &res); 638 if (ret) { 639 pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n"); 640 of_node_put(np); 641 return 0; 642 } 643 644 of_node_put(np); 645 646 /* 647 * For each CPU, this loop registers the operating points 648 * supported (which are the nominal CPU frequency and half of 649 * it), and registers the clock notifier that will take care 650 * of doing the PMSU part of a frequency transition. 651 */ 652 for_each_possible_cpu(cpu) { 653 struct device *cpu_dev; 654 struct clk *clk; 655 int ret; 656 657 cpu_dev = get_cpu_device(cpu); 658 if (!cpu_dev) { 659 pr_err("Cannot get CPU %d\n", cpu); 660 continue; 661 } 662 663 clk = clk_get(cpu_dev, 0); 664 if (IS_ERR(clk)) { 665 pr_err("Cannot get clock for CPU %d\n", cpu); 666 return PTR_ERR(clk); 667 } 668 669 /* 670 * In case of a failure of dev_pm_opp_add(), we don't 671 * bother with cleaning up the registered OPP (there's 672 * no function to do so), and simply cancel the 673 * registration of the cpufreq device. 674 */ 675 ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0); 676 if (ret) { 677 clk_put(clk); 678 return ret; 679 } 680 681 ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0); 682 if (ret) { 683 clk_put(clk); 684 return ret; 685 } 686 } 687 688 platform_device_register_data(NULL, "cpufreq-dt", -1, 689 &cpufreq_dt_pd, sizeof(cpufreq_dt_pd)); 690 return 0; 691 } 692 693 device_initcall(armada_xp_pmsu_cpufreq_init); 694