1 /* 2 * Power Management Service Unit(PMSU) support for Armada 370/XP platforms. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Yehuda Yitschak <yehuday@marvell.com> 7 * Gregory Clement <gregory.clement@free-electrons.com> 8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 9 * 10 * This file is licensed under the terms of the GNU General Public 11 * License version 2. This program is licensed "as is" without any 12 * warranty of any kind, whether express or implied. 13 * 14 * The Armada 370 and Armada XP SOCs have a power management service 15 * unit which is responsible for powering down and waking up CPUs and 16 * other SOC units 17 */ 18 19 #define pr_fmt(fmt) "mvebu-pmsu: " fmt 20 21 #include <linux/clk.h> 22 #include <linux/cpu_pm.h> 23 #include <linux/cpufreq-dt.h> 24 #include <linux/delay.h> 25 #include <linux/init.h> 26 #include <linux/io.h> 27 #include <linux/kernel.h> 28 #include <linux/mbus.h> 29 #include <linux/of_address.h> 30 #include <linux/of_device.h> 31 #include <linux/platform_device.h> 32 #include <linux/pm_opp.h> 33 #include <linux/resource.h> 34 #include <linux/slab.h> 35 #include <linux/smp.h> 36 #include <asm/cacheflush.h> 37 #include <asm/cp15.h> 38 #include <asm/smp_scu.h> 39 #include <asm/smp_plat.h> 40 #include <asm/suspend.h> 41 #include <asm/tlbflush.h> 42 #include "common.h" 43 44 45 #define PMSU_BASE_OFFSET 0x100 46 #define PMSU_REG_SIZE 0x1000 47 48 /* PMSU MP registers */ 49 #define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104) 50 #define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18) 51 #define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16) 52 #define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20) 53 54 #define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108) 55 56 #define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0) 57 58 #define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c) 59 #define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16) 60 #define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17) 61 #define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20) 62 #define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21) 63 #define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22) 64 #define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24) 65 #define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25) 66 67 #define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120) 68 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1) 69 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17) 70 71 #define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124) 72 73 /* PMSU fabric registers */ 74 #define L2C_NFABRIC_PM_CTL 0x4 75 #define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20) 76 77 /* PMSU delay registers */ 78 #define PMSU_POWERDOWN_DELAY 0xF04 79 #define PMSU_POWERDOWN_DELAY_PMU BIT(1) 80 #define PMSU_POWERDOWN_DELAY_MASK 0xFFFE 81 #define PMSU_DFLT_ARMADA38X_DELAY 0x64 82 83 /* CA9 MPcore SoC Control registers */ 84 85 #define MPCORE_RESET_CTL 0x64 86 #define MPCORE_RESET_CTL_L2 BIT(0) 87 #define MPCORE_RESET_CTL_DEBUG BIT(16) 88 89 #define SRAM_PHYS_BASE 0xFFFF0000 90 #define BOOTROM_BASE 0xFFF00000 91 #define BOOTROM_SIZE 0x100000 92 93 #define ARMADA_370_CRYPT0_ENG_TARGET 0x9 94 #define ARMADA_370_CRYPT0_ENG_ATTR 0x1 95 96 extern void ll_disable_coherency(void); 97 extern void ll_enable_coherency(void); 98 99 extern void armada_370_xp_cpu_resume(void); 100 extern void armada_38x_cpu_resume(void); 101 102 static phys_addr_t pmsu_mp_phys_base; 103 static void __iomem *pmsu_mp_base; 104 105 static void *mvebu_cpu_resume; 106 107 static const struct of_device_id of_pmsu_table[] = { 108 { .compatible = "marvell,armada-370-pmsu", }, 109 { .compatible = "marvell,armada-370-xp-pmsu", }, 110 { .compatible = "marvell,armada-380-pmsu", }, 111 { /* end of list */ }, 112 }; 113 114 void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) 115 { 116 writel(virt_to_phys(boot_addr), pmsu_mp_base + 117 PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); 118 } 119 120 extern unsigned char mvebu_boot_wa_start; 121 extern unsigned char mvebu_boot_wa_end; 122 123 /* 124 * This function sets up the boot address workaround needed for SMP 125 * boot on Armada 375 Z1 and cpuidle on Armada 370. It unmaps the 126 * BootROM Mbus window, and instead remaps a crypto SRAM into which a 127 * custom piece of code is copied to replace the problematic BootROM. 128 */ 129 int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target, 130 unsigned int crypto_eng_attribute, 131 phys_addr_t resume_addr_reg) 132 { 133 void __iomem *sram_virt_base; 134 u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start; 135 136 mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE); 137 mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute, 138 SRAM_PHYS_BASE, SZ_64K); 139 140 sram_virt_base = ioremap(SRAM_PHYS_BASE, SZ_64K); 141 if (!sram_virt_base) { 142 pr_err("Unable to map SRAM to setup the boot address WA\n"); 143 return -ENOMEM; 144 } 145 146 memcpy(sram_virt_base, &mvebu_boot_wa_start, code_len); 147 148 /* 149 * The last word of the code copied in SRAM must contain the 150 * physical base address of the PMSU register. We 151 * intentionally store this address in the native endianness 152 * of the system. 153 */ 154 __raw_writel((unsigned long)resume_addr_reg, 155 sram_virt_base + code_len - 4); 156 157 iounmap(sram_virt_base); 158 159 return 0; 160 } 161 162 static int __init mvebu_v7_pmsu_init(void) 163 { 164 struct device_node *np; 165 struct resource res; 166 int ret = 0; 167 168 np = of_find_matching_node(NULL, of_pmsu_table); 169 if (!np) 170 return 0; 171 172 pr_info("Initializing Power Management Service Unit\n"); 173 174 if (of_address_to_resource(np, 0, &res)) { 175 pr_err("unable to get resource\n"); 176 ret = -ENOENT; 177 goto out; 178 } 179 180 if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) { 181 pr_warn(FW_WARN "deprecated pmsu binding\n"); 182 res.start = res.start - PMSU_BASE_OFFSET; 183 res.end = res.start + PMSU_REG_SIZE - 1; 184 } 185 186 if (!request_mem_region(res.start, resource_size(&res), 187 np->full_name)) { 188 pr_err("unable to request region\n"); 189 ret = -EBUSY; 190 goto out; 191 } 192 193 pmsu_mp_phys_base = res.start; 194 195 pmsu_mp_base = ioremap(res.start, resource_size(&res)); 196 if (!pmsu_mp_base) { 197 pr_err("unable to map registers\n"); 198 release_mem_region(res.start, resource_size(&res)); 199 ret = -ENOMEM; 200 goto out; 201 } 202 203 out: 204 of_node_put(np); 205 return ret; 206 } 207 208 static void mvebu_v7_pmsu_enable_l2_powerdown_onidle(void) 209 { 210 u32 reg; 211 212 if (pmsu_mp_base == NULL) 213 return; 214 215 /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */ 216 reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL); 217 reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN; 218 writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL); 219 } 220 221 enum pmsu_idle_prepare_flags { 222 PMSU_PREPARE_NORMAL = 0, 223 PMSU_PREPARE_DEEP_IDLE = BIT(0), 224 PMSU_PREPARE_SNOOP_DISABLE = BIT(1), 225 }; 226 227 /* No locking is needed because we only access per-CPU registers */ 228 static int mvebu_v7_pmsu_idle_prepare(unsigned long flags) 229 { 230 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 231 u32 reg; 232 233 if (pmsu_mp_base == NULL) 234 return -EINVAL; 235 236 /* 237 * Adjust the PMSU configuration to wait for WFI signal, enable 238 * IRQ and FIQ as wakeup events, set wait for snoop queue empty 239 * indication and mask IRQ and FIQ from CPU 240 */ 241 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 242 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 243 PMSU_STATUS_AND_MASK_IRQ_WAKEUP | 244 PMSU_STATUS_AND_MASK_FIQ_WAKEUP | 245 PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT | 246 PMSU_STATUS_AND_MASK_IRQ_MASK | 247 PMSU_STATUS_AND_MASK_FIQ_MASK; 248 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 249 250 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 251 /* ask HW to power down the L2 Cache if needed */ 252 if (flags & PMSU_PREPARE_DEEP_IDLE) 253 reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 254 255 /* request power down */ 256 reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ; 257 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 258 259 if (flags & PMSU_PREPARE_SNOOP_DISABLE) { 260 /* Disable snoop disable by HW - SW is taking care of it */ 261 reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 262 reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP; 263 writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 264 } 265 266 return 0; 267 } 268 269 int armada_370_xp_pmsu_idle_enter(unsigned long deepidle) 270 { 271 unsigned long flags = PMSU_PREPARE_SNOOP_DISABLE; 272 int ret; 273 274 if (deepidle) 275 flags |= PMSU_PREPARE_DEEP_IDLE; 276 277 ret = mvebu_v7_pmsu_idle_prepare(flags); 278 if (ret) 279 return ret; 280 281 v7_exit_coherency_flush(all); 282 283 ll_disable_coherency(); 284 285 dsb(); 286 287 wfi(); 288 289 /* If we are here, wfi failed. As processors run out of 290 * coherency for some time, tlbs might be stale, so flush them 291 */ 292 local_flush_tlb_all(); 293 294 ll_enable_coherency(); 295 296 /* Test the CR_C bit and set it if it was cleared */ 297 asm volatile( 298 "mrc p15, 0, r0, c1, c0, 0 \n\t" 299 "tst r0, #(1 << 2) \n\t" 300 "orreq r0, r0, #(1 << 2) \n\t" 301 "mcreq p15, 0, r0, c1, c0, 0 \n\t" 302 "isb " 303 : : : "r0"); 304 305 pr_debug("Failed to suspend the system\n"); 306 307 return 0; 308 } 309 310 static int armada_370_xp_cpu_suspend(unsigned long deepidle) 311 { 312 return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter); 313 } 314 315 int armada_38x_do_cpu_suspend(unsigned long deepidle) 316 { 317 unsigned long flags = 0; 318 319 if (deepidle) 320 flags |= PMSU_PREPARE_DEEP_IDLE; 321 322 mvebu_v7_pmsu_idle_prepare(flags); 323 /* 324 * Already flushed cache, but do it again as the outer cache 325 * functions dirty the cache with spinlocks 326 */ 327 v7_exit_coherency_flush(louis); 328 329 scu_power_mode(mvebu_get_scu_base(), SCU_PM_POWEROFF); 330 331 cpu_do_idle(); 332 333 return 1; 334 } 335 336 static int armada_38x_cpu_suspend(unsigned long deepidle) 337 { 338 return cpu_suspend(false, armada_38x_do_cpu_suspend); 339 } 340 341 /* No locking is needed because we only access per-CPU registers */ 342 void mvebu_v7_pmsu_idle_exit(void) 343 { 344 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 345 u32 reg; 346 347 if (pmsu_mp_base == NULL) 348 return; 349 /* cancel ask HW to power down the L2 Cache if possible */ 350 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 351 reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 352 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 353 354 /* cancel Enable wakeup events and mask interrupts */ 355 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 356 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP); 357 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 358 reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT; 359 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK); 360 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 361 } 362 363 static int mvebu_v7_cpu_pm_notify(struct notifier_block *self, 364 unsigned long action, void *hcpu) 365 { 366 if (action == CPU_PM_ENTER) { 367 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 368 mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cpu_resume); 369 } else if (action == CPU_PM_EXIT) { 370 mvebu_v7_pmsu_idle_exit(); 371 } 372 373 return NOTIFY_OK; 374 } 375 376 static struct notifier_block mvebu_v7_cpu_pm_notifier = { 377 .notifier_call = mvebu_v7_cpu_pm_notify, 378 }; 379 380 static struct platform_device mvebu_v7_cpuidle_device; 381 382 static __init int armada_370_cpuidle_init(void) 383 { 384 struct device_node *np; 385 phys_addr_t redirect_reg; 386 387 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 388 if (!np) 389 return -ENODEV; 390 of_node_put(np); 391 392 /* 393 * On Armada 370, there is "a slow exit process from the deep 394 * idle state due to heavy L1/L2 cache cleanup operations 395 * performed by the BootROM software". To avoid this, we 396 * replace the restart code of the bootrom by a a simple jump 397 * to the boot address. Then the code located at this boot 398 * address will take care of the initialization. 399 */ 400 redirect_reg = pmsu_mp_phys_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(0); 401 mvebu_setup_boot_addr_wa(ARMADA_370_CRYPT0_ENG_TARGET, 402 ARMADA_370_CRYPT0_ENG_ATTR, 403 redirect_reg); 404 405 mvebu_cpu_resume = armada_370_xp_cpu_resume; 406 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 407 mvebu_v7_cpuidle_device.name = "cpuidle-armada-370"; 408 409 return 0; 410 } 411 412 static __init int armada_38x_cpuidle_init(void) 413 { 414 struct device_node *np; 415 void __iomem *mpsoc_base; 416 u32 reg; 417 418 pr_warn("CPU idle is currently broken on Armada 38x: disabling\n"); 419 return 0; 420 421 np = of_find_compatible_node(NULL, NULL, 422 "marvell,armada-380-coherency-fabric"); 423 if (!np) 424 return -ENODEV; 425 of_node_put(np); 426 427 np = of_find_compatible_node(NULL, NULL, 428 "marvell,armada-380-mpcore-soc-ctrl"); 429 if (!np) 430 return -ENODEV; 431 mpsoc_base = of_iomap(np, 0); 432 BUG_ON(!mpsoc_base); 433 of_node_put(np); 434 435 /* Set up reset mask when powering down the cpus */ 436 reg = readl(mpsoc_base + MPCORE_RESET_CTL); 437 reg |= MPCORE_RESET_CTL_L2; 438 reg |= MPCORE_RESET_CTL_DEBUG; 439 writel(reg, mpsoc_base + MPCORE_RESET_CTL); 440 iounmap(mpsoc_base); 441 442 /* Set up delay */ 443 reg = readl(pmsu_mp_base + PMSU_POWERDOWN_DELAY); 444 reg &= ~PMSU_POWERDOWN_DELAY_MASK; 445 reg |= PMSU_DFLT_ARMADA38X_DELAY; 446 reg |= PMSU_POWERDOWN_DELAY_PMU; 447 writel(reg, pmsu_mp_base + PMSU_POWERDOWN_DELAY); 448 449 mvebu_cpu_resume = armada_38x_cpu_resume; 450 mvebu_v7_cpuidle_device.dev.platform_data = armada_38x_cpu_suspend; 451 mvebu_v7_cpuidle_device.name = "cpuidle-armada-38x"; 452 453 return 0; 454 } 455 456 static __init int armada_xp_cpuidle_init(void) 457 { 458 struct device_node *np; 459 460 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 461 if (!np) 462 return -ENODEV; 463 of_node_put(np); 464 465 mvebu_cpu_resume = armada_370_xp_cpu_resume; 466 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 467 mvebu_v7_cpuidle_device.name = "cpuidle-armada-xp"; 468 469 return 0; 470 } 471 472 static int __init mvebu_v7_cpu_pm_init(void) 473 { 474 struct device_node *np; 475 int ret; 476 477 np = of_find_matching_node(NULL, of_pmsu_table); 478 if (!np) 479 return 0; 480 of_node_put(np); 481 482 /* 483 * Currently the CPU idle support for Armada 38x is broken, as 484 * the CPU hotplug uses some of the CPU idle functions it is 485 * broken too, so let's disable it 486 */ 487 if (of_machine_is_compatible("marvell,armada380")) { 488 cpu_hotplug_disable(); 489 pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling\n"); 490 } 491 492 if (of_machine_is_compatible("marvell,armadaxp")) 493 ret = armada_xp_cpuidle_init(); 494 else if (of_machine_is_compatible("marvell,armada370")) 495 ret = armada_370_cpuidle_init(); 496 else if (of_machine_is_compatible("marvell,armada380")) 497 ret = armada_38x_cpuidle_init(); 498 else 499 return 0; 500 501 if (ret) 502 return ret; 503 504 mvebu_v7_pmsu_enable_l2_powerdown_onidle(); 505 if (mvebu_v7_cpuidle_device.name) 506 platform_device_register(&mvebu_v7_cpuidle_device); 507 cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier); 508 509 return 0; 510 } 511 512 arch_initcall(mvebu_v7_cpu_pm_init); 513 early_initcall(mvebu_v7_pmsu_init); 514 515 static void mvebu_pmsu_dfs_request_local(void *data) 516 { 517 u32 reg; 518 u32 cpu = smp_processor_id(); 519 unsigned long flags; 520 521 local_irq_save(flags); 522 523 /* Prepare to enter idle */ 524 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 525 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 526 PMSU_STATUS_AND_MASK_IRQ_MASK | 527 PMSU_STATUS_AND_MASK_FIQ_MASK; 528 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 529 530 /* Request the DFS transition */ 531 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 532 reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ; 533 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 534 535 /* The fact of entering idle will trigger the DFS transition */ 536 wfi(); 537 538 /* 539 * We're back from idle, the DFS transition has completed, 540 * clear the idle wait indication. 541 */ 542 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 543 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 544 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 545 546 local_irq_restore(flags); 547 } 548 549 int mvebu_pmsu_dfs_request(int cpu) 550 { 551 unsigned long timeout; 552 int hwcpu = cpu_logical_map(cpu); 553 u32 reg; 554 555 /* Clear any previous DFS DONE event */ 556 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 557 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE; 558 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 559 560 /* Mask the DFS done interrupt, since we are going to poll */ 561 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 562 reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 563 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 564 565 /* Trigger the DFS on the appropriate CPU */ 566 smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local, 567 NULL, false); 568 569 /* Poll until the DFS done event is generated */ 570 timeout = jiffies + HZ; 571 while (time_before(jiffies, timeout)) { 572 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 573 if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE) 574 break; 575 udelay(10); 576 } 577 578 if (time_after(jiffies, timeout)) 579 return -ETIME; 580 581 /* Restore the DFS mask to its original state */ 582 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 583 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 584 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 585 586 return 0; 587 } 588 589 struct cpufreq_dt_platform_data cpufreq_dt_pd = { 590 .independent_clocks = true, 591 }; 592 593 static int __init armada_xp_pmsu_cpufreq_init(void) 594 { 595 struct device_node *np; 596 struct resource res; 597 int ret, cpu; 598 599 if (!of_machine_is_compatible("marvell,armadaxp")) 600 return 0; 601 602 /* 603 * In order to have proper cpufreq handling, we need to ensure 604 * that the Device Tree description of the CPU clock includes 605 * the definition of the PMU DFS registers. If not, we do not 606 * register the clock notifier and the cpufreq driver. This 607 * piece of code is only for compatibility with old Device 608 * Trees. 609 */ 610 np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock"); 611 if (!np) 612 return 0; 613 614 ret = of_address_to_resource(np, 1, &res); 615 if (ret) { 616 pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n"); 617 of_node_put(np); 618 return 0; 619 } 620 621 of_node_put(np); 622 623 /* 624 * For each CPU, this loop registers the operating points 625 * supported (which are the nominal CPU frequency and half of 626 * it), and registers the clock notifier that will take care 627 * of doing the PMSU part of a frequency transition. 628 */ 629 for_each_possible_cpu(cpu) { 630 struct device *cpu_dev; 631 struct clk *clk; 632 int ret; 633 634 cpu_dev = get_cpu_device(cpu); 635 if (!cpu_dev) { 636 pr_err("Cannot get CPU %d\n", cpu); 637 continue; 638 } 639 640 clk = clk_get(cpu_dev, 0); 641 if (IS_ERR(clk)) { 642 pr_err("Cannot get clock for CPU %d\n", cpu); 643 return PTR_ERR(clk); 644 } 645 646 /* 647 * In case of a failure of dev_pm_opp_add(), we don't 648 * bother with cleaning up the registered OPP (there's 649 * no function to do so), and simply cancel the 650 * registration of the cpufreq device. 651 */ 652 ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0); 653 if (ret) { 654 clk_put(clk); 655 return ret; 656 } 657 658 ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0); 659 if (ret) { 660 clk_put(clk); 661 return ret; 662 } 663 } 664 665 platform_device_register_data(NULL, "cpufreq-dt", -1, 666 &cpufreq_dt_pd, sizeof(cpufreq_dt_pd)); 667 return 0; 668 } 669 670 device_initcall(armada_xp_pmsu_cpufreq_init); 671