1 /* 2 * Power Management Service Unit(PMSU) support for Armada 370/XP platforms. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Yehuda Yitschak <yehuday@marvell.com> 7 * Gregory Clement <gregory.clement@free-electrons.com> 8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 9 * 10 * This file is licensed under the terms of the GNU General Public 11 * License version 2. This program is licensed "as is" without any 12 * warranty of any kind, whether express or implied. 13 * 14 * The Armada 370 and Armada XP SOCs have a power management service 15 * unit which is responsible for powering down and waking up CPUs and 16 * other SOC units 17 */ 18 19 #define pr_fmt(fmt) "mvebu-pmsu: " fmt 20 21 #include <linux/clk.h> 22 #include <linux/cpu_pm.h> 23 #include <linux/delay.h> 24 #include <linux/init.h> 25 #include <linux/io.h> 26 #include <linux/kernel.h> 27 #include <linux/mbus.h> 28 #include <linux/of_address.h> 29 #include <linux/of_device.h> 30 #include <linux/platform_device.h> 31 #include <linux/resource.h> 32 #include <linux/slab.h> 33 #include <linux/smp.h> 34 #include <asm/cacheflush.h> 35 #include <asm/cp15.h> 36 #include <asm/smp_scu.h> 37 #include <asm/smp_plat.h> 38 #include <asm/suspend.h> 39 #include <asm/tlbflush.h> 40 #include "common.h" 41 42 43 #define PMSU_BASE_OFFSET 0x100 44 #define PMSU_REG_SIZE 0x1000 45 46 /* PMSU MP registers */ 47 #define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104) 48 #define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18) 49 #define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16) 50 #define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20) 51 52 #define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108) 53 54 #define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0) 55 56 #define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c) 57 #define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16) 58 #define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17) 59 #define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20) 60 #define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21) 61 #define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22) 62 #define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24) 63 #define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25) 64 65 #define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120) 66 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1) 67 #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17) 68 69 #define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124) 70 71 /* PMSU fabric registers */ 72 #define L2C_NFABRIC_PM_CTL 0x4 73 #define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20) 74 75 /* PMSU delay registers */ 76 #define PMSU_POWERDOWN_DELAY 0xF04 77 #define PMSU_POWERDOWN_DELAY_PMU BIT(1) 78 #define PMSU_POWERDOWN_DELAY_MASK 0xFFFE 79 #define PMSU_DFLT_ARMADA38X_DELAY 0x64 80 81 /* CA9 MPcore SoC Control registers */ 82 83 #define MPCORE_RESET_CTL 0x64 84 #define MPCORE_RESET_CTL_L2 BIT(0) 85 #define MPCORE_RESET_CTL_DEBUG BIT(16) 86 87 #define SRAM_PHYS_BASE 0xFFFF0000 88 #define BOOTROM_BASE 0xFFF00000 89 #define BOOTROM_SIZE 0x100000 90 91 #define ARMADA_370_CRYPT0_ENG_TARGET 0x9 92 #define ARMADA_370_CRYPT0_ENG_ATTR 0x1 93 94 extern void ll_disable_coherency(void); 95 extern void ll_enable_coherency(void); 96 97 extern void armada_370_xp_cpu_resume(void); 98 extern void armada_38x_cpu_resume(void); 99 100 static phys_addr_t pmsu_mp_phys_base; 101 static void __iomem *pmsu_mp_base; 102 103 static void *mvebu_cpu_resume; 104 105 static const struct of_device_id of_pmsu_table[] = { 106 { .compatible = "marvell,armada-370-pmsu", }, 107 { .compatible = "marvell,armada-370-xp-pmsu", }, 108 { .compatible = "marvell,armada-380-pmsu", }, 109 { /* end of list */ }, 110 }; 111 112 void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) 113 { 114 writel(virt_to_phys(boot_addr), pmsu_mp_base + 115 PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); 116 } 117 118 extern unsigned char mvebu_boot_wa_start; 119 extern unsigned char mvebu_boot_wa_end; 120 121 /* 122 * This function sets up the boot address workaround needed for SMP 123 * boot on Armada 375 Z1 and cpuidle on Armada 370. It unmaps the 124 * BootROM Mbus window, and instead remaps a crypto SRAM into which a 125 * custom piece of code is copied to replace the problematic BootROM. 126 */ 127 int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target, 128 unsigned int crypto_eng_attribute, 129 phys_addr_t resume_addr_reg) 130 { 131 void __iomem *sram_virt_base; 132 u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start; 133 134 mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE); 135 mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute, 136 SRAM_PHYS_BASE, SZ_64K); 137 138 sram_virt_base = ioremap(SRAM_PHYS_BASE, SZ_64K); 139 if (!sram_virt_base) { 140 pr_err("Unable to map SRAM to setup the boot address WA\n"); 141 return -ENOMEM; 142 } 143 144 memcpy(sram_virt_base, &mvebu_boot_wa_start, code_len); 145 146 /* 147 * The last word of the code copied in SRAM must contain the 148 * physical base address of the PMSU register. We 149 * intentionally store this address in the native endianness 150 * of the system. 151 */ 152 __raw_writel((unsigned long)resume_addr_reg, 153 sram_virt_base + code_len - 4); 154 155 iounmap(sram_virt_base); 156 157 return 0; 158 } 159 160 static int __init mvebu_v7_pmsu_init(void) 161 { 162 struct device_node *np; 163 struct resource res; 164 int ret = 0; 165 166 np = of_find_matching_node(NULL, of_pmsu_table); 167 if (!np) 168 return 0; 169 170 pr_info("Initializing Power Management Service Unit\n"); 171 172 if (of_address_to_resource(np, 0, &res)) { 173 pr_err("unable to get resource\n"); 174 ret = -ENOENT; 175 goto out; 176 } 177 178 if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) { 179 pr_warn(FW_WARN "deprecated pmsu binding\n"); 180 res.start = res.start - PMSU_BASE_OFFSET; 181 res.end = res.start + PMSU_REG_SIZE - 1; 182 } 183 184 if (!request_mem_region(res.start, resource_size(&res), 185 np->full_name)) { 186 pr_err("unable to request region\n"); 187 ret = -EBUSY; 188 goto out; 189 } 190 191 pmsu_mp_phys_base = res.start; 192 193 pmsu_mp_base = ioremap(res.start, resource_size(&res)); 194 if (!pmsu_mp_base) { 195 pr_err("unable to map registers\n"); 196 release_mem_region(res.start, resource_size(&res)); 197 ret = -ENOMEM; 198 goto out; 199 } 200 201 out: 202 of_node_put(np); 203 return ret; 204 } 205 206 static void mvebu_v7_pmsu_enable_l2_powerdown_onidle(void) 207 { 208 u32 reg; 209 210 if (pmsu_mp_base == NULL) 211 return; 212 213 /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */ 214 reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL); 215 reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN; 216 writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL); 217 } 218 219 enum pmsu_idle_prepare_flags { 220 PMSU_PREPARE_NORMAL = 0, 221 PMSU_PREPARE_DEEP_IDLE = BIT(0), 222 PMSU_PREPARE_SNOOP_DISABLE = BIT(1), 223 }; 224 225 /* No locking is needed because we only access per-CPU registers */ 226 static int mvebu_v7_pmsu_idle_prepare(unsigned long flags) 227 { 228 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 229 u32 reg; 230 231 if (pmsu_mp_base == NULL) 232 return -EINVAL; 233 234 /* 235 * Adjust the PMSU configuration to wait for WFI signal, enable 236 * IRQ and FIQ as wakeup events, set wait for snoop queue empty 237 * indication and mask IRQ and FIQ from CPU 238 */ 239 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 240 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 241 PMSU_STATUS_AND_MASK_IRQ_WAKEUP | 242 PMSU_STATUS_AND_MASK_FIQ_WAKEUP | 243 PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT | 244 PMSU_STATUS_AND_MASK_IRQ_MASK | 245 PMSU_STATUS_AND_MASK_FIQ_MASK; 246 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 247 248 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 249 /* ask HW to power down the L2 Cache if needed */ 250 if (flags & PMSU_PREPARE_DEEP_IDLE) 251 reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 252 253 /* request power down */ 254 reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ; 255 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 256 257 if (flags & PMSU_PREPARE_SNOOP_DISABLE) { 258 /* Disable snoop disable by HW - SW is taking care of it */ 259 reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 260 reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP; 261 writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 262 } 263 264 return 0; 265 } 266 267 int armada_370_xp_pmsu_idle_enter(unsigned long deepidle) 268 { 269 unsigned long flags = PMSU_PREPARE_SNOOP_DISABLE; 270 int ret; 271 272 if (deepidle) 273 flags |= PMSU_PREPARE_DEEP_IDLE; 274 275 ret = mvebu_v7_pmsu_idle_prepare(flags); 276 if (ret) 277 return ret; 278 279 v7_exit_coherency_flush(all); 280 281 ll_disable_coherency(); 282 283 dsb(); 284 285 wfi(); 286 287 /* If we are here, wfi failed. As processors run out of 288 * coherency for some time, tlbs might be stale, so flush them 289 */ 290 local_flush_tlb_all(); 291 292 ll_enable_coherency(); 293 294 /* Test the CR_C bit and set it if it was cleared */ 295 asm volatile( 296 "mrc p15, 0, r0, c1, c0, 0 \n\t" 297 "tst r0, %0 \n\t" 298 "orreq r0, r0, #(1 << 2) \n\t" 299 "mcreq p15, 0, r0, c1, c0, 0 \n\t" 300 "isb " 301 : : "Ir" (CR_C) : "r0"); 302 303 pr_debug("Failed to suspend the system\n"); 304 305 return 0; 306 } 307 308 static int armada_370_xp_cpu_suspend(unsigned long deepidle) 309 { 310 return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter); 311 } 312 313 int armada_38x_do_cpu_suspend(unsigned long deepidle) 314 { 315 unsigned long flags = 0; 316 317 if (deepidle) 318 flags |= PMSU_PREPARE_DEEP_IDLE; 319 320 mvebu_v7_pmsu_idle_prepare(flags); 321 /* 322 * Already flushed cache, but do it again as the outer cache 323 * functions dirty the cache with spinlocks 324 */ 325 v7_exit_coherency_flush(louis); 326 327 scu_power_mode(mvebu_get_scu_base(), SCU_PM_POWEROFF); 328 329 cpu_do_idle(); 330 331 return 1; 332 } 333 334 static int armada_38x_cpu_suspend(unsigned long deepidle) 335 { 336 return cpu_suspend(false, armada_38x_do_cpu_suspend); 337 } 338 339 /* No locking is needed because we only access per-CPU registers */ 340 void mvebu_v7_pmsu_idle_exit(void) 341 { 342 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 343 u32 reg; 344 345 if (pmsu_mp_base == NULL) 346 return; 347 /* cancel ask HW to power down the L2 Cache if possible */ 348 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 349 reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 350 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 351 352 /* cancel Enable wakeup events and mask interrupts */ 353 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 354 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP); 355 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 356 reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT; 357 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK); 358 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 359 } 360 361 static int mvebu_v7_cpu_pm_notify(struct notifier_block *self, 362 unsigned long action, void *hcpu) 363 { 364 if (action == CPU_PM_ENTER) { 365 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 366 mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cpu_resume); 367 } else if (action == CPU_PM_EXIT) { 368 mvebu_v7_pmsu_idle_exit(); 369 } 370 371 return NOTIFY_OK; 372 } 373 374 static struct notifier_block mvebu_v7_cpu_pm_notifier = { 375 .notifier_call = mvebu_v7_cpu_pm_notify, 376 }; 377 378 static struct platform_device mvebu_v7_cpuidle_device; 379 380 static int broken_idle(struct device_node *np) 381 { 382 if (of_property_read_bool(np, "broken-idle")) { 383 pr_warn("CPU idle is currently broken: disabling\n"); 384 return 1; 385 } 386 387 return 0; 388 } 389 390 static __init int armada_370_cpuidle_init(void) 391 { 392 struct device_node *np; 393 phys_addr_t redirect_reg; 394 395 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 396 if (!np) 397 return -ENODEV; 398 399 if (broken_idle(np)) 400 goto end; 401 402 /* 403 * On Armada 370, there is "a slow exit process from the deep 404 * idle state due to heavy L1/L2 cache cleanup operations 405 * performed by the BootROM software". To avoid this, we 406 * replace the restart code of the bootrom by a a simple jump 407 * to the boot address. Then the code located at this boot 408 * address will take care of the initialization. 409 */ 410 redirect_reg = pmsu_mp_phys_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(0); 411 mvebu_setup_boot_addr_wa(ARMADA_370_CRYPT0_ENG_TARGET, 412 ARMADA_370_CRYPT0_ENG_ATTR, 413 redirect_reg); 414 415 mvebu_cpu_resume = armada_370_xp_cpu_resume; 416 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 417 mvebu_v7_cpuidle_device.name = "cpuidle-armada-370"; 418 419 end: 420 of_node_put(np); 421 return 0; 422 } 423 424 static __init int armada_38x_cpuidle_init(void) 425 { 426 struct device_node *np; 427 void __iomem *mpsoc_base; 428 u32 reg; 429 430 pr_warn("CPU idle is currently broken on Armada 38x: disabling\n"); 431 return 0; 432 433 np = of_find_compatible_node(NULL, NULL, 434 "marvell,armada-380-coherency-fabric"); 435 if (!np) 436 return -ENODEV; 437 438 if (broken_idle(np)) 439 goto end; 440 441 of_node_put(np); 442 443 np = of_find_compatible_node(NULL, NULL, 444 "marvell,armada-380-mpcore-soc-ctrl"); 445 if (!np) 446 return -ENODEV; 447 mpsoc_base = of_iomap(np, 0); 448 BUG_ON(!mpsoc_base); 449 450 /* Set up reset mask when powering down the cpus */ 451 reg = readl(mpsoc_base + MPCORE_RESET_CTL); 452 reg |= MPCORE_RESET_CTL_L2; 453 reg |= MPCORE_RESET_CTL_DEBUG; 454 writel(reg, mpsoc_base + MPCORE_RESET_CTL); 455 iounmap(mpsoc_base); 456 457 /* Set up delay */ 458 reg = readl(pmsu_mp_base + PMSU_POWERDOWN_DELAY); 459 reg &= ~PMSU_POWERDOWN_DELAY_MASK; 460 reg |= PMSU_DFLT_ARMADA38X_DELAY; 461 reg |= PMSU_POWERDOWN_DELAY_PMU; 462 writel(reg, pmsu_mp_base + PMSU_POWERDOWN_DELAY); 463 464 mvebu_cpu_resume = armada_38x_cpu_resume; 465 mvebu_v7_cpuidle_device.dev.platform_data = armada_38x_cpu_suspend; 466 mvebu_v7_cpuidle_device.name = "cpuidle-armada-38x"; 467 468 end: 469 of_node_put(np); 470 return 0; 471 } 472 473 static __init int armada_xp_cpuidle_init(void) 474 { 475 struct device_node *np; 476 477 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 478 if (!np) 479 return -ENODEV; 480 481 if (broken_idle(np)) 482 goto end; 483 484 mvebu_cpu_resume = armada_370_xp_cpu_resume; 485 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 486 mvebu_v7_cpuidle_device.name = "cpuidle-armada-xp"; 487 488 end: 489 of_node_put(np); 490 return 0; 491 } 492 493 static int __init mvebu_v7_cpu_pm_init(void) 494 { 495 struct device_node *np; 496 int ret; 497 498 np = of_find_matching_node(NULL, of_pmsu_table); 499 if (!np) 500 return 0; 501 of_node_put(np); 502 503 /* 504 * Currently the CPU idle support for Armada 38x is broken, as 505 * the CPU hotplug uses some of the CPU idle functions it is 506 * broken too, so let's disable it 507 */ 508 if (of_machine_is_compatible("marvell,armada380")) { 509 cpu_hotplug_disable(); 510 pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling\n"); 511 } 512 513 if (of_machine_is_compatible("marvell,armadaxp")) 514 ret = armada_xp_cpuidle_init(); 515 else if (of_machine_is_compatible("marvell,armada370")) 516 ret = armada_370_cpuidle_init(); 517 else if (of_machine_is_compatible("marvell,armada380")) 518 ret = armada_38x_cpuidle_init(); 519 else 520 return 0; 521 522 if (ret) 523 return ret; 524 525 mvebu_v7_pmsu_enable_l2_powerdown_onidle(); 526 if (mvebu_v7_cpuidle_device.name) 527 platform_device_register(&mvebu_v7_cpuidle_device); 528 cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier); 529 530 return 0; 531 } 532 533 arch_initcall(mvebu_v7_cpu_pm_init); 534 early_initcall(mvebu_v7_pmsu_init); 535 536 static void mvebu_pmsu_dfs_request_local(void *data) 537 { 538 u32 reg; 539 u32 cpu = smp_processor_id(); 540 unsigned long flags; 541 542 local_irq_save(flags); 543 544 /* Prepare to enter idle */ 545 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 546 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 547 PMSU_STATUS_AND_MASK_IRQ_MASK | 548 PMSU_STATUS_AND_MASK_FIQ_MASK; 549 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 550 551 /* Request the DFS transition */ 552 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 553 reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ; 554 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 555 556 /* The fact of entering idle will trigger the DFS transition */ 557 wfi(); 558 559 /* 560 * We're back from idle, the DFS transition has completed, 561 * clear the idle wait indication. 562 */ 563 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 564 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 565 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 566 567 local_irq_restore(flags); 568 } 569 570 int mvebu_pmsu_dfs_request(int cpu) 571 { 572 unsigned long timeout; 573 int hwcpu = cpu_logical_map(cpu); 574 u32 reg; 575 576 /* Clear any previous DFS DONE event */ 577 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 578 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE; 579 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 580 581 /* Mask the DFS done interrupt, since we are going to poll */ 582 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 583 reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 584 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 585 586 /* Trigger the DFS on the appropriate CPU */ 587 smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local, 588 NULL, false); 589 590 /* Poll until the DFS done event is generated */ 591 timeout = jiffies + HZ; 592 while (time_before(jiffies, timeout)) { 593 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 594 if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE) 595 break; 596 udelay(10); 597 } 598 599 if (time_after(jiffies, timeout)) 600 return -ETIME; 601 602 /* Restore the DFS mask to its original state */ 603 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 604 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 605 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 606 607 return 0; 608 } 609