1 /* 2 * Power Management Service Unit(PMSU) support for Armada 370/XP platforms. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Yehuda Yitschak <yehuday@marvell.com> 7 * Gregory Clement <gregory.clement@free-electrons.com> 8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 9 * 10 * This file is licensed under the terms of the GNU General Public 11 * License version 2. This program is licensed "as is" without any 12 * warranty of any kind, whether express or implied. 13 * 14 * The Armada 370 and Armada XP SOCs have a power management service 15 * unit which is responsible for powering down and waking up CPUs and 16 * other SOC units 17 */ 18 19 #define pr_fmt(fmt) "mvebu-pmsu: " fmt 20 21 #include <linux/clk.h> 22 #include <linux/cpu_pm.h> 23 #include <linux/cpufreq-dt.h> 24 #include <linux/delay.h> 25 #include <linux/init.h> 26 #include <linux/io.h> 27 #include <linux/kernel.h> 28 #include <linux/mbus.h> 29 #include <linux/of_address.h> 30 #include <linux/of_device.h> 31 #include <linux/platform_device.h> 32 #include <linux/pm_opp.h> 33 #include <linux/resource.h> 34 #include <linux/slab.h> 35 #include <linux/smp.h> 36 #include <asm/cacheflush.h> 37 #include <asm/cp15.h> 38 #include <asm/smp_scu.h> 39 #include <asm/smp_plat.h> 40 #include <asm/suspend.h> 41 #include <asm/tlbflush.h> 42 #include "common.h" 43 44 45 #define PMSU_BASE_OFFSET 0x100 46 #define PMSU_REG_SIZE 0x1000 47 48 /* PMSU MP registers */ 49 #define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104) 50 #define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18) 51 #define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16) 52 #define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20) 53 54 #define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108) 55 56 #define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0) 57 58 #define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c) 59 #define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16) 60 #define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17) 61 #define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20) 62 #define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21) 63 #define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22) 64 #define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24) 65 #define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25) 66 67 #define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120) 68 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1) 69 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17) 70 71 #define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124) 72 73 /* PMSU fabric registers */ 74 #define L2C_NFABRIC_PM_CTL 0x4 75 #define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20) 76 77 /* PMSU delay registers */ 78 #define PMSU_POWERDOWN_DELAY 0xF04 79 #define PMSU_POWERDOWN_DELAY_PMU BIT(1) 80 #define PMSU_POWERDOWN_DELAY_MASK 0xFFFE 81 #define PMSU_DFLT_ARMADA38X_DELAY 0x64 82 83 /* CA9 MPcore SoC Control registers */ 84 85 #define MPCORE_RESET_CTL 0x64 86 #define MPCORE_RESET_CTL_L2 BIT(0) 87 #define MPCORE_RESET_CTL_DEBUG BIT(16) 88 89 #define SRAM_PHYS_BASE 0xFFFF0000 90 #define BOOTROM_BASE 0xFFF00000 91 #define BOOTROM_SIZE 0x100000 92 93 #define ARMADA_370_CRYPT0_ENG_TARGET 0x9 94 #define ARMADA_370_CRYPT0_ENG_ATTR 0x1 95 96 extern void ll_disable_coherency(void); 97 extern void ll_enable_coherency(void); 98 99 extern void armada_370_xp_cpu_resume(void); 100 extern void armada_38x_cpu_resume(void); 101 102 static phys_addr_t pmsu_mp_phys_base; 103 static void __iomem *pmsu_mp_base; 104 105 static void *mvebu_cpu_resume; 106 107 static struct of_device_id of_pmsu_table[] = { 108 { .compatible = "marvell,armada-370-pmsu", }, 109 { .compatible = "marvell,armada-370-xp-pmsu", }, 110 { .compatible = "marvell,armada-380-pmsu", }, 111 { /* end of list */ }, 112 }; 113 114 void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) 115 { 116 writel(virt_to_phys(boot_addr), pmsu_mp_base + 117 PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); 118 } 119 120 extern unsigned char mvebu_boot_wa_start; 121 extern unsigned char mvebu_boot_wa_end; 122 123 /* 124 * This function sets up the boot address workaround needed for SMP 125 * boot on Armada 375 Z1 and cpuidle on Armada 370. It unmaps the 126 * BootROM Mbus window, and instead remaps a crypto SRAM into which a 127 * custom piece of code is copied to replace the problematic BootROM. 128 */ 129 int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target, 130 unsigned int crypto_eng_attribute, 131 phys_addr_t resume_addr_reg) 132 { 133 void __iomem *sram_virt_base; 134 u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start; 135 136 mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE); 137 mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute, 138 SRAM_PHYS_BASE, SZ_64K); 139 140 sram_virt_base = ioremap(SRAM_PHYS_BASE, SZ_64K); 141 if (!sram_virt_base) { 142 pr_err("Unable to map SRAM to setup the boot address WA\n"); 143 return -ENOMEM; 144 } 145 146 memcpy(sram_virt_base, &mvebu_boot_wa_start, code_len); 147 148 /* 149 * The last word of the code copied in SRAM must contain the 150 * physical base address of the PMSU register. We 151 * intentionally store this address in the native endianness 152 * of the system. 153 */ 154 __raw_writel((unsigned long)resume_addr_reg, 155 sram_virt_base + code_len - 4); 156 157 iounmap(sram_virt_base); 158 159 return 0; 160 } 161 162 static int __init mvebu_v7_pmsu_init(void) 163 { 164 struct device_node *np; 165 struct resource res; 166 int ret = 0; 167 168 np = of_find_matching_node(NULL, of_pmsu_table); 169 if (!np) 170 return 0; 171 172 pr_info("Initializing Power Management Service Unit\n"); 173 174 if (of_address_to_resource(np, 0, &res)) { 175 pr_err("unable to get resource\n"); 176 ret = -ENOENT; 177 goto out; 178 } 179 180 if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) { 181 pr_warn(FW_WARN "deprecated pmsu binding\n"); 182 res.start = res.start - PMSU_BASE_OFFSET; 183 res.end = res.start + PMSU_REG_SIZE - 1; 184 } 185 186 if (!request_mem_region(res.start, resource_size(&res), 187 np->full_name)) { 188 pr_err("unable to request region\n"); 189 ret = -EBUSY; 190 goto out; 191 } 192 193 pmsu_mp_phys_base = res.start; 194 195 pmsu_mp_base = ioremap(res.start, resource_size(&res)); 196 if (!pmsu_mp_base) { 197 pr_err("unable to map registers\n"); 198 release_mem_region(res.start, resource_size(&res)); 199 ret = -ENOMEM; 200 goto out; 201 } 202 203 out: 204 of_node_put(np); 205 return ret; 206 } 207 208 static void mvebu_v7_pmsu_enable_l2_powerdown_onidle(void) 209 { 210 u32 reg; 211 212 if (pmsu_mp_base == NULL) 213 return; 214 215 /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */ 216 reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL); 217 reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN; 218 writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL); 219 } 220 221 enum pmsu_idle_prepare_flags { 222 PMSU_PREPARE_NORMAL = 0, 223 PMSU_PREPARE_DEEP_IDLE = BIT(0), 224 PMSU_PREPARE_SNOOP_DISABLE = BIT(1), 225 }; 226 227 /* No locking is needed because we only access per-CPU registers */ 228 static int mvebu_v7_pmsu_idle_prepare(unsigned long flags) 229 { 230 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 231 u32 reg; 232 233 if (pmsu_mp_base == NULL) 234 return -EINVAL; 235 236 /* 237 * Adjust the PMSU configuration to wait for WFI signal, enable 238 * IRQ and FIQ as wakeup events, set wait for snoop queue empty 239 * indication and mask IRQ and FIQ from CPU 240 */ 241 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 242 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 243 PMSU_STATUS_AND_MASK_IRQ_WAKEUP | 244 PMSU_STATUS_AND_MASK_FIQ_WAKEUP | 245 PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT | 246 PMSU_STATUS_AND_MASK_IRQ_MASK | 247 PMSU_STATUS_AND_MASK_FIQ_MASK; 248 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 249 250 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 251 /* ask HW to power down the L2 Cache if needed */ 252 if (flags & PMSU_PREPARE_DEEP_IDLE) 253 reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 254 255 /* request power down */ 256 reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ; 257 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 258 259 if (flags & PMSU_PREPARE_SNOOP_DISABLE) { 260 /* Disable snoop disable by HW - SW is taking care of it */ 261 reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 262 reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP; 263 writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 264 } 265 266 return 0; 267 } 268 269 int armada_370_xp_pmsu_idle_enter(unsigned long deepidle) 270 { 271 unsigned long flags = PMSU_PREPARE_SNOOP_DISABLE; 272 int ret; 273 274 if (deepidle) 275 flags |= PMSU_PREPARE_DEEP_IDLE; 276 277 ret = mvebu_v7_pmsu_idle_prepare(flags); 278 if (ret) 279 return ret; 280 281 v7_exit_coherency_flush(all); 282 283 ll_disable_coherency(); 284 285 dsb(); 286 287 wfi(); 288 289 /* If we are here, wfi failed. As processors run out of 290 * coherency for some time, tlbs might be stale, so flush them 291 */ 292 local_flush_tlb_all(); 293 294 ll_enable_coherency(); 295 296 /* Test the CR_C bit and set it if it was cleared */ 297 asm volatile( 298 "mrc p15, 0, r0, c1, c0, 0 \n\t" 299 "tst r0, #(1 << 2) \n\t" 300 "orreq r0, r0, #(1 << 2) \n\t" 301 "mcreq p15, 0, r0, c1, c0, 0 \n\t" 302 "isb " 303 : : : "r0"); 304 305 pr_debug("Failed to suspend the system\n"); 306 307 return 0; 308 } 309 310 static int armada_370_xp_cpu_suspend(unsigned long deepidle) 311 { 312 return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter); 313 } 314 315 int armada_38x_do_cpu_suspend(unsigned long deepidle) 316 { 317 unsigned long flags = 0; 318 319 if (deepidle) 320 flags |= PMSU_PREPARE_DEEP_IDLE; 321 322 mvebu_v7_pmsu_idle_prepare(flags); 323 /* 324 * Already flushed cache, but do it again as the outer cache 325 * functions dirty the cache with spinlocks 326 */ 327 v7_exit_coherency_flush(louis); 328 329 scu_power_mode(mvebu_get_scu_base(), SCU_PM_POWEROFF); 330 331 cpu_do_idle(); 332 333 return 1; 334 } 335 336 static int armada_38x_cpu_suspend(unsigned long deepidle) 337 { 338 return cpu_suspend(false, armada_38x_do_cpu_suspend); 339 } 340 341 /* No locking is needed because we only access per-CPU registers */ 342 void mvebu_v7_pmsu_idle_exit(void) 343 { 344 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 345 u32 reg; 346 347 if (pmsu_mp_base == NULL) 348 return; 349 /* cancel ask HW to power down the L2 Cache if possible */ 350 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 351 reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 352 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 353 354 /* cancel Enable wakeup events and mask interrupts */ 355 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 356 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP); 357 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 358 reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT; 359 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK); 360 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 361 } 362 363 static int mvebu_v7_cpu_pm_notify(struct notifier_block *self, 364 unsigned long action, void *hcpu) 365 { 366 if (action == CPU_PM_ENTER) { 367 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 368 mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cpu_resume); 369 } else if (action == CPU_PM_EXIT) { 370 mvebu_v7_pmsu_idle_exit(); 371 } 372 373 return NOTIFY_OK; 374 } 375 376 static struct notifier_block mvebu_v7_cpu_pm_notifier = { 377 .notifier_call = mvebu_v7_cpu_pm_notify, 378 }; 379 380 static struct platform_device mvebu_v7_cpuidle_device; 381 382 static __init int armada_370_cpuidle_init(void) 383 { 384 struct device_node *np; 385 phys_addr_t redirect_reg; 386 387 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 388 if (!np) 389 return -ENODEV; 390 of_node_put(np); 391 392 /* 393 * On Armada 370, there is "a slow exit process from the deep 394 * idle state due to heavy L1/L2 cache cleanup operations 395 * performed by the BootROM software". To avoid this, we 396 * replace the restart code of the bootrom by a a simple jump 397 * to the boot address. Then the code located at this boot 398 * address will take care of the initialization. 399 */ 400 redirect_reg = pmsu_mp_phys_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(0); 401 mvebu_setup_boot_addr_wa(ARMADA_370_CRYPT0_ENG_TARGET, 402 ARMADA_370_CRYPT0_ENG_ATTR, 403 redirect_reg); 404 405 mvebu_cpu_resume = armada_370_xp_cpu_resume; 406 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 407 mvebu_v7_cpuidle_device.name = "cpuidle-armada-370"; 408 409 return 0; 410 } 411 412 static __init int armada_38x_cpuidle_init(void) 413 { 414 struct device_node *np; 415 void __iomem *mpsoc_base; 416 u32 reg; 417 418 np = of_find_compatible_node(NULL, NULL, 419 "marvell,armada-380-coherency-fabric"); 420 if (!np) 421 return -ENODEV; 422 of_node_put(np); 423 424 np = of_find_compatible_node(NULL, NULL, 425 "marvell,armada-380-mpcore-soc-ctrl"); 426 if (!np) 427 return -ENODEV; 428 mpsoc_base = of_iomap(np, 0); 429 BUG_ON(!mpsoc_base); 430 of_node_put(np); 431 432 /* Set up reset mask when powering down the cpus */ 433 reg = readl(mpsoc_base + MPCORE_RESET_CTL); 434 reg |= MPCORE_RESET_CTL_L2; 435 reg |= MPCORE_RESET_CTL_DEBUG; 436 writel(reg, mpsoc_base + MPCORE_RESET_CTL); 437 iounmap(mpsoc_base); 438 439 /* Set up delay */ 440 reg = readl(pmsu_mp_base + PMSU_POWERDOWN_DELAY); 441 reg &= ~PMSU_POWERDOWN_DELAY_MASK; 442 reg |= PMSU_DFLT_ARMADA38X_DELAY; 443 reg |= PMSU_POWERDOWN_DELAY_PMU; 444 writel(reg, pmsu_mp_base + PMSU_POWERDOWN_DELAY); 445 446 mvebu_cpu_resume = armada_38x_cpu_resume; 447 mvebu_v7_cpuidle_device.dev.platform_data = armada_38x_cpu_suspend; 448 mvebu_v7_cpuidle_device.name = "cpuidle-armada-38x"; 449 450 return 0; 451 } 452 453 static __init int armada_xp_cpuidle_init(void) 454 { 455 struct device_node *np; 456 457 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 458 if (!np) 459 return -ENODEV; 460 of_node_put(np); 461 462 mvebu_cpu_resume = armada_370_xp_cpu_resume; 463 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 464 mvebu_v7_cpuidle_device.name = "cpuidle-armada-xp"; 465 466 return 0; 467 } 468 469 static int __init mvebu_v7_cpu_pm_init(void) 470 { 471 struct device_node *np; 472 int ret; 473 474 np = of_find_matching_node(NULL, of_pmsu_table); 475 if (!np) 476 return 0; 477 of_node_put(np); 478 479 if (of_machine_is_compatible("marvell,armadaxp")) 480 ret = armada_xp_cpuidle_init(); 481 else if (of_machine_is_compatible("marvell,armada370")) 482 ret = armada_370_cpuidle_init(); 483 else if (of_machine_is_compatible("marvell,armada380")) 484 ret = armada_38x_cpuidle_init(); 485 else 486 return 0; 487 488 if (ret) 489 return ret; 490 491 mvebu_v7_pmsu_enable_l2_powerdown_onidle(); 492 platform_device_register(&mvebu_v7_cpuidle_device); 493 cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier); 494 495 return 0; 496 } 497 498 arch_initcall(mvebu_v7_cpu_pm_init); 499 early_initcall(mvebu_v7_pmsu_init); 500 501 static void mvebu_pmsu_dfs_request_local(void *data) 502 { 503 u32 reg; 504 u32 cpu = smp_processor_id(); 505 unsigned long flags; 506 507 local_irq_save(flags); 508 509 /* Prepare to enter idle */ 510 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 511 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 512 PMSU_STATUS_AND_MASK_IRQ_MASK | 513 PMSU_STATUS_AND_MASK_FIQ_MASK; 514 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 515 516 /* Request the DFS transition */ 517 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 518 reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ; 519 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 520 521 /* The fact of entering idle will trigger the DFS transition */ 522 wfi(); 523 524 /* 525 * We're back from idle, the DFS transition has completed, 526 * clear the idle wait indication. 527 */ 528 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 529 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 530 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 531 532 local_irq_restore(flags); 533 } 534 535 int mvebu_pmsu_dfs_request(int cpu) 536 { 537 unsigned long timeout; 538 int hwcpu = cpu_logical_map(cpu); 539 u32 reg; 540 541 /* Clear any previous DFS DONE event */ 542 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 543 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE; 544 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 545 546 /* Mask the DFS done interrupt, since we are going to poll */ 547 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 548 reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 549 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 550 551 /* Trigger the DFS on the appropriate CPU */ 552 smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local, 553 NULL, false); 554 555 /* Poll until the DFS done event is generated */ 556 timeout = jiffies + HZ; 557 while (time_before(jiffies, timeout)) { 558 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 559 if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE) 560 break; 561 udelay(10); 562 } 563 564 if (time_after(jiffies, timeout)) 565 return -ETIME; 566 567 /* Restore the DFS mask to its original state */ 568 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 569 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 570 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 571 572 return 0; 573 } 574 575 struct cpufreq_dt_platform_data cpufreq_dt_pd = { 576 .independent_clocks = true, 577 }; 578 579 static int __init armada_xp_pmsu_cpufreq_init(void) 580 { 581 struct device_node *np; 582 struct resource res; 583 int ret, cpu; 584 585 if (!of_machine_is_compatible("marvell,armadaxp")) 586 return 0; 587 588 /* 589 * In order to have proper cpufreq handling, we need to ensure 590 * that the Device Tree description of the CPU clock includes 591 * the definition of the PMU DFS registers. If not, we do not 592 * register the clock notifier and the cpufreq driver. This 593 * piece of code is only for compatibility with old Device 594 * Trees. 595 */ 596 np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock"); 597 if (!np) 598 return 0; 599 600 ret = of_address_to_resource(np, 1, &res); 601 if (ret) { 602 pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n"); 603 of_node_put(np); 604 return 0; 605 } 606 607 of_node_put(np); 608 609 /* 610 * For each CPU, this loop registers the operating points 611 * supported (which are the nominal CPU frequency and half of 612 * it), and registers the clock notifier that will take care 613 * of doing the PMSU part of a frequency transition. 614 */ 615 for_each_possible_cpu(cpu) { 616 struct device *cpu_dev; 617 struct clk *clk; 618 int ret; 619 620 cpu_dev = get_cpu_device(cpu); 621 if (!cpu_dev) { 622 pr_err("Cannot get CPU %d\n", cpu); 623 continue; 624 } 625 626 clk = clk_get(cpu_dev, 0); 627 if (IS_ERR(clk)) { 628 pr_err("Cannot get clock for CPU %d\n", cpu); 629 return PTR_ERR(clk); 630 } 631 632 /* 633 * In case of a failure of dev_pm_opp_add(), we don't 634 * bother with cleaning up the registered OPP (there's 635 * no function to do so), and simply cancel the 636 * registration of the cpufreq device. 637 */ 638 ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0); 639 if (ret) { 640 clk_put(clk); 641 return ret; 642 } 643 644 ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0); 645 if (ret) { 646 clk_put(clk); 647 return ret; 648 } 649 } 650 651 platform_device_register_data(NULL, "cpufreq-dt", -1, 652 &cpufreq_dt_pd, sizeof(cpufreq_dt_pd)); 653 return 0; 654 } 655 656 device_initcall(armada_xp_pmsu_cpufreq_init); 657