1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerNV cpuidle code 4 * 5 * Copyright 2015 IBM Corp. 6 */ 7 8 #include <linux/types.h> 9 #include <linux/mm.h> 10 #include <linux/slab.h> 11 #include <linux/of.h> 12 #include <linux/device.h> 13 #include <linux/cpu.h> 14 15 #include <asm/asm-prototypes.h> 16 #include <asm/firmware.h> 17 #include <asm/machdep.h> 18 #include <asm/opal.h> 19 #include <asm/cputhreads.h> 20 #include <asm/cpuidle.h> 21 #include <asm/code-patching.h> 22 #include <asm/smp.h> 23 #include <asm/runlatch.h> 24 #include <asm/dbell.h> 25 26 #include "powernv.h" 27 #include "subcore.h" 28 29 /* Power ISA 3.0 allows for stop states 0x0 - 0xF */ 30 #define MAX_STOP_STATE 0xF 31 32 #define P9_STOP_SPR_MSR 2000 33 #define P9_STOP_SPR_PSSCR 855 34 35 static u32 supported_cpuidle_states; 36 struct pnv_idle_states_t *pnv_idle_states; 37 int nr_pnv_idle_states; 38 39 /* 40 * The default stop state that will be used by ppc_md.power_save 41 * function on platforms that support stop instruction. 42 */ 43 static u64 pnv_default_stop_val; 44 static u64 pnv_default_stop_mask; 45 static bool default_stop_found; 46 47 /* 48 * First stop state levels when SPR and TB loss can occur. 49 */ 50 static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1; 51 static u64 pnv_first_spr_loss_level = MAX_STOP_STATE + 1; 52 53 /* 54 * psscr value and mask of the deepest stop idle state. 55 * Used when a cpu is offlined. 56 */ 57 static u64 pnv_deepest_stop_psscr_val; 58 static u64 pnv_deepest_stop_psscr_mask; 59 static u64 pnv_deepest_stop_flag; 60 static bool deepest_stop_found; 61 62 static unsigned long power7_offline_type; 63 64 static int pnv_save_sprs_for_deep_states(void) 65 { 66 int cpu; 67 int rc; 68 69 /* 70 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across 71 * all cpus at boot. Get these reg values of current cpu and use the 72 * same across all cpus. 73 */ 74 uint64_t lpcr_val = mfspr(SPRN_LPCR); 75 uint64_t hid0_val = mfspr(SPRN_HID0); 76 uint64_t hid1_val = mfspr(SPRN_HID1); 77 uint64_t hid4_val = mfspr(SPRN_HID4); 78 uint64_t hid5_val = mfspr(SPRN_HID5); 79 uint64_t hmeer_val = mfspr(SPRN_HMEER); 80 uint64_t msr_val = MSR_IDLE; 81 uint64_t psscr_val = pnv_deepest_stop_psscr_val; 82 83 for_each_present_cpu(cpu) { 84 uint64_t pir = get_hard_smp_processor_id(cpu); 85 uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu]; 86 87 rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val); 88 if (rc != 0) 89 return rc; 90 91 rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); 92 if (rc != 0) 93 return rc; 94 95 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 96 rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val); 97 if (rc) 98 return rc; 99 100 rc = opal_slw_set_reg(pir, 101 P9_STOP_SPR_PSSCR, psscr_val); 102 103 if (rc) 104 return rc; 105 } 106 107 /* HIDs are per core registers */ 108 if (cpu_thread_in_core(cpu) == 0) { 109 110 rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val); 111 if (rc != 0) 112 return rc; 113 114 rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val); 115 if (rc != 0) 116 return rc; 117 118 /* Only p8 needs to set extra HID regiters */ 119 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 120 121 rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val); 122 if (rc != 0) 123 return rc; 124 125 rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val); 126 if (rc != 0) 127 return rc; 128 129 rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val); 130 if (rc != 0) 131 return rc; 132 } 133 } 134 } 135 136 return 0; 137 } 138 139 u32 pnv_get_supported_cpuidle_states(void) 140 { 141 return supported_cpuidle_states; 142 } 143 EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states); 144 145 static void pnv_fastsleep_workaround_apply(void *info) 146 147 { 148 int rc; 149 int *err = info; 150 151 rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP, 152 OPAL_CONFIG_IDLE_APPLY); 153 if (rc) 154 *err = 1; 155 } 156 157 static bool power7_fastsleep_workaround_entry = true; 158 static bool power7_fastsleep_workaround_exit = true; 159 160 /* 161 * Used to store fastsleep workaround state 162 * 0 - Workaround applied/undone at fastsleep entry/exit path (Default) 163 * 1 - Workaround applied once, never undone. 164 */ 165 static u8 fastsleep_workaround_applyonce; 166 167 static ssize_t show_fastsleep_workaround_applyonce(struct device *dev, 168 struct device_attribute *attr, char *buf) 169 { 170 return sprintf(buf, "%u\n", fastsleep_workaround_applyonce); 171 } 172 173 static ssize_t store_fastsleep_workaround_applyonce(struct device *dev, 174 struct device_attribute *attr, const char *buf, 175 size_t count) 176 { 177 cpumask_t primary_thread_mask; 178 int err; 179 u8 val; 180 181 if (kstrtou8(buf, 0, &val) || val != 1) 182 return -EINVAL; 183 184 if (fastsleep_workaround_applyonce == 1) 185 return count; 186 187 /* 188 * fastsleep_workaround_applyonce = 1 implies 189 * fastsleep workaround needs to be left in 'applied' state on all 190 * the cores. Do this by- 191 * 1. Disable the 'undo' workaround in fastsleep exit path 192 * 2. Sendi IPIs to all the cores which have at least one online thread 193 * 3. Disable the 'apply' workaround in fastsleep entry path 194 * 195 * There is no need to send ipi to cores which have all threads 196 * offlined, as last thread of the core entering fastsleep or deeper 197 * state would have applied workaround. 198 */ 199 power7_fastsleep_workaround_exit = false; 200 201 get_online_cpus(); 202 primary_thread_mask = cpu_online_cores_map(); 203 on_each_cpu_mask(&primary_thread_mask, 204 pnv_fastsleep_workaround_apply, 205 &err, 1); 206 put_online_cpus(); 207 if (err) { 208 pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply"); 209 goto fail; 210 } 211 212 power7_fastsleep_workaround_entry = false; 213 214 fastsleep_workaround_applyonce = 1; 215 216 return count; 217 fail: 218 return -EIO; 219 } 220 221 static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600, 222 show_fastsleep_workaround_applyonce, 223 store_fastsleep_workaround_applyonce); 224 225 static inline void atomic_start_thread_idle(void) 226 { 227 int cpu = raw_smp_processor_id(); 228 int first = cpu_first_thread_sibling(cpu); 229 int thread_nr = cpu_thread_in_core(cpu); 230 unsigned long *state = &paca_ptrs[first]->idle_state; 231 232 clear_bit(thread_nr, state); 233 } 234 235 static inline void atomic_stop_thread_idle(void) 236 { 237 int cpu = raw_smp_processor_id(); 238 int first = cpu_first_thread_sibling(cpu); 239 int thread_nr = cpu_thread_in_core(cpu); 240 unsigned long *state = &paca_ptrs[first]->idle_state; 241 242 set_bit(thread_nr, state); 243 } 244 245 static inline void atomic_lock_thread_idle(void) 246 { 247 int cpu = raw_smp_processor_id(); 248 int first = cpu_first_thread_sibling(cpu); 249 unsigned long *state = &paca_ptrs[first]->idle_state; 250 251 while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, state))) 252 barrier(); 253 } 254 255 static inline void atomic_unlock_and_stop_thread_idle(void) 256 { 257 int cpu = raw_smp_processor_id(); 258 int first = cpu_first_thread_sibling(cpu); 259 unsigned long thread = 1UL << cpu_thread_in_core(cpu); 260 unsigned long *state = &paca_ptrs[first]->idle_state; 261 u64 s = READ_ONCE(*state); 262 u64 new, tmp; 263 264 BUG_ON(!(s & PNV_CORE_IDLE_LOCK_BIT)); 265 BUG_ON(s & thread); 266 267 again: 268 new = (s | thread) & ~PNV_CORE_IDLE_LOCK_BIT; 269 tmp = cmpxchg(state, s, new); 270 if (unlikely(tmp != s)) { 271 s = tmp; 272 goto again; 273 } 274 } 275 276 static inline void atomic_unlock_thread_idle(void) 277 { 278 int cpu = raw_smp_processor_id(); 279 int first = cpu_first_thread_sibling(cpu); 280 unsigned long *state = &paca_ptrs[first]->idle_state; 281 282 BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, state)); 283 clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, state); 284 } 285 286 /* P7 and P8 */ 287 struct p7_sprs { 288 /* per core */ 289 u64 tscr; 290 u64 worc; 291 292 /* per subcore */ 293 u64 sdr1; 294 u64 rpr; 295 296 /* per thread */ 297 u64 lpcr; 298 u64 hfscr; 299 u64 fscr; 300 u64 purr; 301 u64 spurr; 302 u64 dscr; 303 u64 wort; 304 305 /* per thread SPRs that get lost in shallow states */ 306 u64 amr; 307 u64 iamr; 308 u64 amor; 309 u64 uamor; 310 }; 311 312 static unsigned long power7_idle_insn(unsigned long type) 313 { 314 int cpu = raw_smp_processor_id(); 315 int first = cpu_first_thread_sibling(cpu); 316 unsigned long *state = &paca_ptrs[first]->idle_state; 317 unsigned long thread = 1UL << cpu_thread_in_core(cpu); 318 unsigned long core_thread_mask = (1UL << threads_per_core) - 1; 319 unsigned long srr1; 320 bool full_winkle; 321 struct p7_sprs sprs = {}; /* avoid false use-uninitialised */ 322 bool sprs_saved = false; 323 int rc; 324 325 if (unlikely(type != PNV_THREAD_NAP)) { 326 atomic_lock_thread_idle(); 327 328 BUG_ON(!(*state & thread)); 329 *state &= ~thread; 330 331 if (power7_fastsleep_workaround_entry) { 332 if ((*state & core_thread_mask) == 0) { 333 rc = opal_config_cpu_idle_state( 334 OPAL_CONFIG_IDLE_FASTSLEEP, 335 OPAL_CONFIG_IDLE_APPLY); 336 BUG_ON(rc); 337 } 338 } 339 340 if (type == PNV_THREAD_WINKLE) { 341 sprs.tscr = mfspr(SPRN_TSCR); 342 sprs.worc = mfspr(SPRN_WORC); 343 344 sprs.sdr1 = mfspr(SPRN_SDR1); 345 sprs.rpr = mfspr(SPRN_RPR); 346 347 sprs.lpcr = mfspr(SPRN_LPCR); 348 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 349 sprs.hfscr = mfspr(SPRN_HFSCR); 350 sprs.fscr = mfspr(SPRN_FSCR); 351 } 352 sprs.purr = mfspr(SPRN_PURR); 353 sprs.spurr = mfspr(SPRN_SPURR); 354 sprs.dscr = mfspr(SPRN_DSCR); 355 sprs.wort = mfspr(SPRN_WORT); 356 357 sprs_saved = true; 358 359 /* 360 * Increment winkle counter and set all winkle bits if 361 * all threads are winkling. This allows wakeup side to 362 * distinguish between fast sleep and winkle state 363 * loss. Fast sleep still has to resync the timebase so 364 * this may not be a really big win. 365 */ 366 *state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; 367 if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) 368 >> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT 369 == threads_per_core) 370 *state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS; 371 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); 372 } 373 374 atomic_unlock_thread_idle(); 375 } 376 377 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 378 sprs.amr = mfspr(SPRN_AMR); 379 sprs.iamr = mfspr(SPRN_IAMR); 380 sprs.amor = mfspr(SPRN_AMOR); 381 sprs.uamor = mfspr(SPRN_UAMOR); 382 } 383 384 local_paca->thread_idle_state = type; 385 srr1 = isa206_idle_insn_mayloss(type); /* go idle */ 386 local_paca->thread_idle_state = PNV_THREAD_RUNNING; 387 388 WARN_ON_ONCE(!srr1); 389 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); 390 391 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 392 if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) { 393 /* 394 * We don't need an isync after the mtsprs here because 395 * the upcoming mtmsrd is execution synchronizing. 396 */ 397 mtspr(SPRN_AMR, sprs.amr); 398 mtspr(SPRN_IAMR, sprs.iamr); 399 mtspr(SPRN_AMOR, sprs.amor); 400 mtspr(SPRN_UAMOR, sprs.uamor); 401 } 402 } 403 404 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) 405 hmi_exception_realmode(NULL); 406 407 if (likely((srr1 & SRR1_WAKESTATE) != SRR1_WS_HVLOSS)) { 408 if (unlikely(type != PNV_THREAD_NAP)) { 409 atomic_lock_thread_idle(); 410 if (type == PNV_THREAD_WINKLE) { 411 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); 412 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; 413 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT); 414 } 415 atomic_unlock_and_stop_thread_idle(); 416 } 417 return srr1; 418 } 419 420 /* HV state loss */ 421 BUG_ON(type == PNV_THREAD_NAP); 422 423 atomic_lock_thread_idle(); 424 425 full_winkle = false; 426 if (type == PNV_THREAD_WINKLE) { 427 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); 428 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; 429 if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) { 430 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT); 431 full_winkle = true; 432 BUG_ON(!sprs_saved); 433 } 434 } 435 436 WARN_ON(*state & thread); 437 438 if ((*state & core_thread_mask) != 0) 439 goto core_woken; 440 441 /* Per-core SPRs */ 442 if (full_winkle) { 443 mtspr(SPRN_TSCR, sprs.tscr); 444 mtspr(SPRN_WORC, sprs.worc); 445 } 446 447 if (power7_fastsleep_workaround_exit) { 448 rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP, 449 OPAL_CONFIG_IDLE_UNDO); 450 BUG_ON(rc); 451 } 452 453 /* TB */ 454 if (opal_resync_timebase() != OPAL_SUCCESS) 455 BUG(); 456 457 core_woken: 458 if (!full_winkle) 459 goto subcore_woken; 460 461 if ((*state & local_paca->subcore_sibling_mask) != 0) 462 goto subcore_woken; 463 464 /* Per-subcore SPRs */ 465 mtspr(SPRN_SDR1, sprs.sdr1); 466 mtspr(SPRN_RPR, sprs.rpr); 467 468 subcore_woken: 469 /* 470 * isync after restoring shared SPRs and before unlocking. Unlock 471 * only contains hwsync which does not necessarily do the right 472 * thing for SPRs. 473 */ 474 isync(); 475 atomic_unlock_and_stop_thread_idle(); 476 477 /* Fast sleep does not lose SPRs */ 478 if (!full_winkle) 479 return srr1; 480 481 /* Per-thread SPRs */ 482 mtspr(SPRN_LPCR, sprs.lpcr); 483 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 484 mtspr(SPRN_HFSCR, sprs.hfscr); 485 mtspr(SPRN_FSCR, sprs.fscr); 486 } 487 mtspr(SPRN_PURR, sprs.purr); 488 mtspr(SPRN_SPURR, sprs.spurr); 489 mtspr(SPRN_DSCR, sprs.dscr); 490 mtspr(SPRN_WORT, sprs.wort); 491 492 mtspr(SPRN_SPRG3, local_paca->sprg_vdso); 493 494 /* 495 * The SLB has to be restored here, but it sometimes still 496 * contains entries, so the __ variant must be used to prevent 497 * multi hits. 498 */ 499 __slb_restore_bolted_realmode(); 500 501 return srr1; 502 } 503 504 extern unsigned long idle_kvm_start_guest(unsigned long srr1); 505 506 #ifdef CONFIG_HOTPLUG_CPU 507 static unsigned long power7_offline(void) 508 { 509 unsigned long srr1; 510 511 mtmsr(MSR_IDLE); 512 513 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 514 /* Tell KVM we're entering idle. */ 515 /******************************************************/ 516 /* N O T E W E L L ! ! ! N O T E W E L L */ 517 /* The following store to HSTATE_HWTHREAD_STATE(r13) */ 518 /* MUST occur in real mode, i.e. with the MMU off, */ 519 /* and the MMU must stay off until we clear this flag */ 520 /* and test HSTATE_HWTHREAD_REQ(r13) in */ 521 /* pnv_powersave_wakeup in this file. */ 522 /* The reason is that another thread can switch the */ 523 /* MMU to a guest context whenever this flag is set */ 524 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ 525 /* that would potentially cause this thread to start */ 526 /* executing instructions from guest memory in */ 527 /* hypervisor mode, leading to a host crash or data */ 528 /* corruption, or worse. */ 529 /******************************************************/ 530 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE; 531 #endif 532 533 __ppc64_runlatch_off(); 534 srr1 = power7_idle_insn(power7_offline_type); 535 __ppc64_runlatch_on(); 536 537 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 538 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL; 539 /* Order setting hwthread_state vs. testing hwthread_req */ 540 smp_mb(); 541 if (local_paca->kvm_hstate.hwthread_req) 542 srr1 = idle_kvm_start_guest(srr1); 543 #endif 544 545 mtmsr(MSR_KERNEL); 546 547 return srr1; 548 } 549 #endif 550 551 void power7_idle_type(unsigned long type) 552 { 553 unsigned long srr1; 554 555 if (!prep_irq_for_idle_irqsoff()) 556 return; 557 558 mtmsr(MSR_IDLE); 559 __ppc64_runlatch_off(); 560 srr1 = power7_idle_insn(type); 561 __ppc64_runlatch_on(); 562 mtmsr(MSR_KERNEL); 563 564 fini_irq_for_idle_irqsoff(); 565 irq_set_pending_from_srr1(srr1); 566 } 567 568 void power7_idle(void) 569 { 570 if (!powersave_nap) 571 return; 572 573 power7_idle_type(PNV_THREAD_NAP); 574 } 575 576 struct p9_sprs { 577 /* per core */ 578 u64 ptcr; 579 u64 rpr; 580 u64 tscr; 581 u64 ldbar; 582 583 /* per thread */ 584 u64 lpcr; 585 u64 hfscr; 586 u64 fscr; 587 u64 pid; 588 u64 purr; 589 u64 spurr; 590 u64 dscr; 591 u64 wort; 592 593 u64 mmcra; 594 u32 mmcr0; 595 u32 mmcr1; 596 u64 mmcr2; 597 598 /* per thread SPRs that get lost in shallow states */ 599 u64 amr; 600 u64 iamr; 601 u64 amor; 602 u64 uamor; 603 }; 604 605 static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) 606 { 607 int cpu = raw_smp_processor_id(); 608 int first = cpu_first_thread_sibling(cpu); 609 unsigned long *state = &paca_ptrs[first]->idle_state; 610 unsigned long core_thread_mask = (1UL << threads_per_core) - 1; 611 unsigned long srr1; 612 unsigned long pls; 613 unsigned long mmcr0 = 0; 614 struct p9_sprs sprs = {}; /* avoid false used-uninitialised */ 615 bool sprs_saved = false; 616 617 if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { 618 /* EC=ESL=0 case */ 619 620 BUG_ON(!mmu_on); 621 622 /* 623 * Wake synchronously. SRESET via xscom may still cause 624 * a 0x100 powersave wakeup with SRR1 reason! 625 */ 626 srr1 = isa300_idle_stop_noloss(psscr); /* go idle */ 627 if (likely(!srr1)) 628 return 0; 629 630 /* 631 * Registers not saved, can't recover! 632 * This would be a hardware bug 633 */ 634 BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS); 635 636 goto out; 637 } 638 639 /* EC=ESL=1 case */ 640 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 641 if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG)) { 642 local_paca->requested_psscr = psscr; 643 /* order setting requested_psscr vs testing dont_stop */ 644 smp_mb(); 645 if (atomic_read(&local_paca->dont_stop)) { 646 local_paca->requested_psscr = 0; 647 return 0; 648 } 649 } 650 #endif 651 652 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) { 653 /* 654 * POWER9 DD2 can incorrectly set PMAO when waking up 655 * after a state-loss idle. Saving and restoring MMCR0 656 * over idle is a workaround. 657 */ 658 mmcr0 = mfspr(SPRN_MMCR0); 659 } 660 if ((psscr & PSSCR_RL_MASK) >= pnv_first_spr_loss_level) { 661 sprs.lpcr = mfspr(SPRN_LPCR); 662 sprs.hfscr = mfspr(SPRN_HFSCR); 663 sprs.fscr = mfspr(SPRN_FSCR); 664 sprs.pid = mfspr(SPRN_PID); 665 sprs.purr = mfspr(SPRN_PURR); 666 sprs.spurr = mfspr(SPRN_SPURR); 667 sprs.dscr = mfspr(SPRN_DSCR); 668 sprs.wort = mfspr(SPRN_WORT); 669 670 sprs.mmcra = mfspr(SPRN_MMCRA); 671 sprs.mmcr0 = mfspr(SPRN_MMCR0); 672 sprs.mmcr1 = mfspr(SPRN_MMCR1); 673 sprs.mmcr2 = mfspr(SPRN_MMCR2); 674 675 sprs.ptcr = mfspr(SPRN_PTCR); 676 sprs.rpr = mfspr(SPRN_RPR); 677 sprs.tscr = mfspr(SPRN_TSCR); 678 sprs.ldbar = mfspr(SPRN_LDBAR); 679 680 sprs_saved = true; 681 682 atomic_start_thread_idle(); 683 } 684 685 sprs.amr = mfspr(SPRN_AMR); 686 sprs.iamr = mfspr(SPRN_IAMR); 687 sprs.amor = mfspr(SPRN_AMOR); 688 sprs.uamor = mfspr(SPRN_UAMOR); 689 690 srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */ 691 692 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 693 local_paca->requested_psscr = 0; 694 #endif 695 696 psscr = mfspr(SPRN_PSSCR); 697 698 WARN_ON_ONCE(!srr1); 699 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); 700 701 if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) { 702 unsigned long mmcra; 703 704 /* 705 * We don't need an isync after the mtsprs here because the 706 * upcoming mtmsrd is execution synchronizing. 707 */ 708 mtspr(SPRN_AMR, sprs.amr); 709 mtspr(SPRN_IAMR, sprs.iamr); 710 mtspr(SPRN_AMOR, sprs.amor); 711 mtspr(SPRN_UAMOR, sprs.uamor); 712 713 /* 714 * Workaround for POWER9 DD2.0, if we lost resources, the ERAT 715 * might have been corrupted and needs flushing. We also need 716 * to reload MMCR0 (see mmcr0 comment above). 717 */ 718 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) { 719 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT); 720 mtspr(SPRN_MMCR0, mmcr0); 721 } 722 723 /* 724 * DD2.2 and earlier need to set then clear bit 60 in MMCRA 725 * to ensure the PMU starts running. 726 */ 727 mmcra = mfspr(SPRN_MMCRA); 728 mmcra |= PPC_BIT(60); 729 mtspr(SPRN_MMCRA, mmcra); 730 mmcra &= ~PPC_BIT(60); 731 mtspr(SPRN_MMCRA, mmcra); 732 } 733 734 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) 735 hmi_exception_realmode(NULL); 736 737 /* 738 * On POWER9, SRR1 bits do not match exactly as expected. 739 * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so 740 * just always test PSSCR for SPR/TB state loss. 741 */ 742 pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT; 743 if (likely(pls < pnv_first_spr_loss_level)) { 744 if (sprs_saved) 745 atomic_stop_thread_idle(); 746 goto out; 747 } 748 749 /* HV state loss */ 750 BUG_ON(!sprs_saved); 751 752 atomic_lock_thread_idle(); 753 754 if ((*state & core_thread_mask) != 0) 755 goto core_woken; 756 757 /* Per-core SPRs */ 758 mtspr(SPRN_PTCR, sprs.ptcr); 759 mtspr(SPRN_RPR, sprs.rpr); 760 mtspr(SPRN_TSCR, sprs.tscr); 761 762 if (pls >= pnv_first_tb_loss_level) { 763 /* TB loss */ 764 if (opal_resync_timebase() != OPAL_SUCCESS) 765 BUG(); 766 } 767 768 /* 769 * isync after restoring shared SPRs and before unlocking. Unlock 770 * only contains hwsync which does not necessarily do the right 771 * thing for SPRs. 772 */ 773 isync(); 774 775 core_woken: 776 atomic_unlock_and_stop_thread_idle(); 777 778 /* Per-thread SPRs */ 779 mtspr(SPRN_LPCR, sprs.lpcr); 780 mtspr(SPRN_HFSCR, sprs.hfscr); 781 mtspr(SPRN_FSCR, sprs.fscr); 782 mtspr(SPRN_PID, sprs.pid); 783 mtspr(SPRN_PURR, sprs.purr); 784 mtspr(SPRN_SPURR, sprs.spurr); 785 mtspr(SPRN_DSCR, sprs.dscr); 786 mtspr(SPRN_WORT, sprs.wort); 787 788 mtspr(SPRN_MMCRA, sprs.mmcra); 789 mtspr(SPRN_MMCR0, sprs.mmcr0); 790 mtspr(SPRN_MMCR1, sprs.mmcr1); 791 mtspr(SPRN_MMCR2, sprs.mmcr2); 792 mtspr(SPRN_LDBAR, sprs.ldbar); 793 794 mtspr(SPRN_SPRG3, local_paca->sprg_vdso); 795 796 if (!radix_enabled()) 797 __slb_restore_bolted_realmode(); 798 799 out: 800 if (mmu_on) 801 mtmsr(MSR_KERNEL); 802 803 return srr1; 804 } 805 806 #ifdef CONFIG_HOTPLUG_CPU 807 static unsigned long power9_offline_stop(unsigned long psscr) 808 { 809 unsigned long srr1; 810 811 #ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 812 __ppc64_runlatch_off(); 813 srr1 = power9_idle_stop(psscr, true); 814 __ppc64_runlatch_on(); 815 #else 816 /* 817 * Tell KVM we're entering idle. 818 * This does not have to be done in real mode because the P9 MMU 819 * is independent per-thread. Some steppings share radix/hash mode 820 * between threads, but in that case KVM has a barrier sync in real 821 * mode before and after switching between radix and hash. 822 * 823 * kvm_start_guest must still be called in real mode though, hence 824 * the false argument. 825 */ 826 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE; 827 828 __ppc64_runlatch_off(); 829 srr1 = power9_idle_stop(psscr, false); 830 __ppc64_runlatch_on(); 831 832 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL; 833 /* Order setting hwthread_state vs. testing hwthread_req */ 834 smp_mb(); 835 if (local_paca->kvm_hstate.hwthread_req) 836 srr1 = idle_kvm_start_guest(srr1); 837 mtmsr(MSR_KERNEL); 838 #endif 839 840 return srr1; 841 } 842 #endif 843 844 void power9_idle_type(unsigned long stop_psscr_val, 845 unsigned long stop_psscr_mask) 846 { 847 unsigned long psscr; 848 unsigned long srr1; 849 850 if (!prep_irq_for_idle_irqsoff()) 851 return; 852 853 psscr = mfspr(SPRN_PSSCR); 854 psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val; 855 856 __ppc64_runlatch_off(); 857 srr1 = power9_idle_stop(psscr, true); 858 __ppc64_runlatch_on(); 859 860 fini_irq_for_idle_irqsoff(); 861 862 irq_set_pending_from_srr1(srr1); 863 } 864 865 /* 866 * Used for ppc_md.power_save which needs a function with no parameters 867 */ 868 void power9_idle(void) 869 { 870 power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask); 871 } 872 873 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 874 /* 875 * This is used in working around bugs in thread reconfiguration 876 * on POWER9 (at least up to Nimbus DD2.2) relating to transactional 877 * memory and the way that XER[SO] is checkpointed. 878 * This function forces the core into SMT4 in order by asking 879 * all other threads not to stop, and sending a message to any 880 * that are in a stop state. 881 * Must be called with preemption disabled. 882 */ 883 void pnv_power9_force_smt4_catch(void) 884 { 885 int cpu, cpu0, thr; 886 int awake_threads = 1; /* this thread is awake */ 887 int poke_threads = 0; 888 int need_awake = threads_per_core; 889 890 cpu = smp_processor_id(); 891 cpu0 = cpu & ~(threads_per_core - 1); 892 for (thr = 0; thr < threads_per_core; ++thr) { 893 if (cpu != cpu0 + thr) 894 atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop); 895 } 896 /* order setting dont_stop vs testing requested_psscr */ 897 smp_mb(); 898 for (thr = 0; thr < threads_per_core; ++thr) { 899 if (!paca_ptrs[cpu0+thr]->requested_psscr) 900 ++awake_threads; 901 else 902 poke_threads |= (1 << thr); 903 } 904 905 /* If at least 3 threads are awake, the core is in SMT4 already */ 906 if (awake_threads < need_awake) { 907 /* We have to wake some threads; we'll use msgsnd */ 908 for (thr = 0; thr < threads_per_core; ++thr) { 909 if (poke_threads & (1 << thr)) { 910 ppc_msgsnd_sync(); 911 ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, 912 paca_ptrs[cpu0+thr]->hw_cpu_id); 913 } 914 } 915 /* now spin until at least 3 threads are awake */ 916 do { 917 for (thr = 0; thr < threads_per_core; ++thr) { 918 if ((poke_threads & (1 << thr)) && 919 !paca_ptrs[cpu0+thr]->requested_psscr) { 920 ++awake_threads; 921 poke_threads &= ~(1 << thr); 922 } 923 } 924 } while (awake_threads < need_awake); 925 } 926 } 927 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch); 928 929 void pnv_power9_force_smt4_release(void) 930 { 931 int cpu, cpu0, thr; 932 933 cpu = smp_processor_id(); 934 cpu0 = cpu & ~(threads_per_core - 1); 935 936 /* clear all the dont_stop flags */ 937 for (thr = 0; thr < threads_per_core; ++thr) { 938 if (cpu != cpu0 + thr) 939 atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop); 940 } 941 } 942 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release); 943 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 944 945 #ifdef CONFIG_HOTPLUG_CPU 946 947 void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) 948 { 949 u64 pir = get_hard_smp_processor_id(cpu); 950 951 mtspr(SPRN_LPCR, lpcr_val); 952 953 /* 954 * Program the LPCR via stop-api only if the deepest stop state 955 * can lose hypervisor context. 956 */ 957 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) 958 opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); 959 } 960 961 /* 962 * pnv_cpu_offline: A function that puts the CPU into the deepest 963 * available platform idle state on a CPU-Offline. 964 * interrupts hard disabled and no lazy irq pending. 965 */ 966 unsigned long pnv_cpu_offline(unsigned int cpu) 967 { 968 unsigned long srr1; 969 970 __ppc64_runlatch_off(); 971 972 if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) { 973 unsigned long psscr; 974 975 psscr = mfspr(SPRN_PSSCR); 976 psscr = (psscr & ~pnv_deepest_stop_psscr_mask) | 977 pnv_deepest_stop_psscr_val; 978 srr1 = power9_offline_stop(psscr); 979 } else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) { 980 srr1 = power7_offline(); 981 } else { 982 /* This is the fallback method. We emulate snooze */ 983 while (!generic_check_cpu_restart(cpu)) { 984 HMT_low(); 985 HMT_very_low(); 986 } 987 srr1 = 0; 988 HMT_medium(); 989 } 990 991 __ppc64_runlatch_on(); 992 993 return srr1; 994 } 995 #endif 996 997 /* 998 * Power ISA 3.0 idle initialization. 999 * 1000 * POWER ISA 3.0 defines a new SPR Processor stop Status and Control 1001 * Register (PSSCR) to control idle behavior. 1002 * 1003 * PSSCR layout: 1004 * ---------------------------------------------------------- 1005 * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL | 1006 * ---------------------------------------------------------- 1007 * 0 4 41 42 43 44 48 54 56 60 1008 * 1009 * PSSCR key fields: 1010 * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the 1011 * lowest power-saving state the thread entered since stop instruction was 1012 * last executed. 1013 * 1014 * Bit 41 - Status Disable(SD) 1015 * 0 - Shows PLS entries 1016 * 1 - PLS entries are all 0 1017 * 1018 * Bit 42 - Enable State Loss 1019 * 0 - No state is lost irrespective of other fields 1020 * 1 - Allows state loss 1021 * 1022 * Bit 43 - Exit Criterion 1023 * 0 - Exit from power-save mode on any interrupt 1024 * 1 - Exit from power-save mode controlled by LPCR's PECE bits 1025 * 1026 * Bits 44:47 - Power-Saving Level Limit 1027 * This limits the power-saving level that can be entered into. 1028 * 1029 * Bits 60:63 - Requested Level 1030 * Used to specify which power-saving level must be entered on executing 1031 * stop instruction 1032 */ 1033 1034 int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags) 1035 { 1036 int err = 0; 1037 1038 /* 1039 * psscr_mask == 0xf indicates an older firmware. 1040 * Set remaining fields of psscr to the default values. 1041 * See NOTE above definition of PSSCR_HV_DEFAULT_VAL 1042 */ 1043 if (*psscr_mask == 0xf) { 1044 *psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL; 1045 *psscr_mask = PSSCR_HV_DEFAULT_MASK; 1046 return err; 1047 } 1048 1049 /* 1050 * New firmware is expected to set the psscr_val bits correctly. 1051 * Validate that the following invariants are correctly maintained by 1052 * the new firmware. 1053 * - ESL bit value matches the EC bit value. 1054 * - ESL bit is set for all the deep stop states. 1055 */ 1056 if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) { 1057 err = ERR_EC_ESL_MISMATCH; 1058 } else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) && 1059 GET_PSSCR_ESL(*psscr_val) == 0) { 1060 err = ERR_DEEP_STATE_ESL_MISMATCH; 1061 } 1062 1063 return err; 1064 } 1065 1066 /* 1067 * pnv_arch300_idle_init: Initializes the default idle state, first 1068 * deep idle state and deepest idle state on 1069 * ISA 3.0 CPUs. 1070 * 1071 * @np: /ibm,opal/power-mgt device node 1072 * @flags: cpu-idle-state-flags array 1073 * @dt_idle_states: Number of idle state entries 1074 * Returns 0 on success 1075 */ 1076 static void __init pnv_power9_idle_init(void) 1077 { 1078 u64 max_residency_ns = 0; 1079 int i; 1080 1081 /* 1082 * pnv_deepest_stop_{val,mask} should be set to values corresponding to 1083 * the deepest stop state. 1084 * 1085 * pnv_default_stop_{val,mask} should be set to values corresponding to 1086 * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state. 1087 */ 1088 pnv_first_tb_loss_level = MAX_STOP_STATE + 1; 1089 pnv_first_spr_loss_level = MAX_STOP_STATE + 1; 1090 for (i = 0; i < nr_pnv_idle_states; i++) { 1091 int err; 1092 struct pnv_idle_states_t *state = &pnv_idle_states[i]; 1093 u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK; 1094 1095 if ((state->flags & OPAL_PM_TIMEBASE_STOP) && 1096 (pnv_first_tb_loss_level > psscr_rl)) 1097 pnv_first_tb_loss_level = psscr_rl; 1098 1099 if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) && 1100 (pnv_first_spr_loss_level > psscr_rl)) 1101 pnv_first_spr_loss_level = psscr_rl; 1102 1103 /* 1104 * The idle code does not deal with TB loss occurring 1105 * in a shallower state than SPR loss, so force it to 1106 * behave like SPRs are lost if TB is lost. POWER9 would 1107 * never encouter this, but a POWER8 core would if it 1108 * implemented the stop instruction. So this is for forward 1109 * compatibility. 1110 */ 1111 if ((state->flags & OPAL_PM_TIMEBASE_STOP) && 1112 (pnv_first_spr_loss_level > psscr_rl)) 1113 pnv_first_spr_loss_level = psscr_rl; 1114 1115 err = validate_psscr_val_mask(&state->psscr_val, 1116 &state->psscr_mask, 1117 state->flags); 1118 if (err) { 1119 report_invalid_psscr_val(state->psscr_val, err); 1120 continue; 1121 } 1122 1123 state->valid = true; 1124 1125 if (max_residency_ns < state->residency_ns) { 1126 max_residency_ns = state->residency_ns; 1127 pnv_deepest_stop_psscr_val = state->psscr_val; 1128 pnv_deepest_stop_psscr_mask = state->psscr_mask; 1129 pnv_deepest_stop_flag = state->flags; 1130 deepest_stop_found = true; 1131 } 1132 1133 if (!default_stop_found && 1134 (state->flags & OPAL_PM_STOP_INST_FAST)) { 1135 pnv_default_stop_val = state->psscr_val; 1136 pnv_default_stop_mask = state->psscr_mask; 1137 default_stop_found = true; 1138 WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT); 1139 } 1140 } 1141 1142 if (unlikely(!default_stop_found)) { 1143 pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n"); 1144 } else { 1145 ppc_md.power_save = power9_idle; 1146 pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n", 1147 pnv_default_stop_val, pnv_default_stop_mask); 1148 } 1149 1150 if (unlikely(!deepest_stop_found)) { 1151 pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait"); 1152 } else { 1153 pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n", 1154 pnv_deepest_stop_psscr_val, 1155 pnv_deepest_stop_psscr_mask); 1156 } 1157 1158 pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n", 1159 pnv_first_spr_loss_level); 1160 1161 pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n", 1162 pnv_first_tb_loss_level); 1163 } 1164 1165 static void __init pnv_disable_deep_states(void) 1166 { 1167 /* 1168 * The stop-api is unable to restore hypervisor 1169 * resources on wakeup from platform idle states which 1170 * lose full context. So disable such states. 1171 */ 1172 supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT; 1173 pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n"); 1174 pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n"); 1175 1176 if (cpu_has_feature(CPU_FTR_ARCH_300) && 1177 (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) { 1178 /* 1179 * Use the default stop state for CPU-Hotplug 1180 * if available. 1181 */ 1182 if (default_stop_found) { 1183 pnv_deepest_stop_psscr_val = pnv_default_stop_val; 1184 pnv_deepest_stop_psscr_mask = pnv_default_stop_mask; 1185 pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n", 1186 pnv_deepest_stop_psscr_val); 1187 } else { /* Fallback to snooze loop for CPU-Hotplug */ 1188 deepest_stop_found = false; 1189 pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n"); 1190 } 1191 } 1192 } 1193 1194 /* 1195 * Probe device tree for supported idle states 1196 */ 1197 static void __init pnv_probe_idle_states(void) 1198 { 1199 int i; 1200 1201 if (nr_pnv_idle_states < 0) { 1202 pr_warn("cpuidle-powernv: no idle states found in the DT\n"); 1203 return; 1204 } 1205 1206 if (cpu_has_feature(CPU_FTR_ARCH_300)) 1207 pnv_power9_idle_init(); 1208 1209 for (i = 0; i < nr_pnv_idle_states; i++) 1210 supported_cpuidle_states |= pnv_idle_states[i].flags; 1211 } 1212 1213 /* 1214 * This function parses device-tree and populates all the information 1215 * into pnv_idle_states structure. It also sets up nr_pnv_idle_states 1216 * which is the number of cpuidle states discovered through device-tree. 1217 */ 1218 1219 static int pnv_parse_cpuidle_dt(void) 1220 { 1221 struct device_node *np; 1222 int nr_idle_states, i; 1223 int rc = 0; 1224 u32 *temp_u32; 1225 u64 *temp_u64; 1226 const char **temp_string; 1227 1228 np = of_find_node_by_path("/ibm,opal/power-mgt"); 1229 if (!np) { 1230 pr_warn("opal: PowerMgmt Node not found\n"); 1231 return -ENODEV; 1232 } 1233 nr_idle_states = of_property_count_u32_elems(np, 1234 "ibm,cpu-idle-state-flags"); 1235 1236 pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states), 1237 GFP_KERNEL); 1238 temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL); 1239 temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL); 1240 temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL); 1241 1242 if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) { 1243 pr_err("Could not allocate memory for dt parsing\n"); 1244 rc = -ENOMEM; 1245 goto out; 1246 } 1247 1248 /* Read flags */ 1249 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags", 1250 temp_u32, nr_idle_states)) { 1251 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n"); 1252 rc = -EINVAL; 1253 goto out; 1254 } 1255 for (i = 0; i < nr_idle_states; i++) 1256 pnv_idle_states[i].flags = temp_u32[i]; 1257 1258 /* Read latencies */ 1259 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns", 1260 temp_u32, nr_idle_states)) { 1261 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n"); 1262 rc = -EINVAL; 1263 goto out; 1264 } 1265 for (i = 0; i < nr_idle_states; i++) 1266 pnv_idle_states[i].latency_ns = temp_u32[i]; 1267 1268 /* Read residencies */ 1269 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns", 1270 temp_u32, nr_idle_states)) { 1271 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n"); 1272 rc = -EINVAL; 1273 goto out; 1274 } 1275 for (i = 0; i < nr_idle_states; i++) 1276 pnv_idle_states[i].residency_ns = temp_u32[i]; 1277 1278 /* For power9 */ 1279 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 1280 /* Read pm_crtl_val */ 1281 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr", 1282 temp_u64, nr_idle_states)) { 1283 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n"); 1284 rc = -EINVAL; 1285 goto out; 1286 } 1287 for (i = 0; i < nr_idle_states; i++) 1288 pnv_idle_states[i].psscr_val = temp_u64[i]; 1289 1290 /* Read pm_crtl_mask */ 1291 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask", 1292 temp_u64, nr_idle_states)) { 1293 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n"); 1294 rc = -EINVAL; 1295 goto out; 1296 } 1297 for (i = 0; i < nr_idle_states; i++) 1298 pnv_idle_states[i].psscr_mask = temp_u64[i]; 1299 } 1300 1301 /* 1302 * power8 specific properties ibm,cpu-idle-state-pmicr-mask and 1303 * ibm,cpu-idle-state-pmicr-val were never used and there is no 1304 * plan to use it in near future. Hence, not parsing these properties 1305 */ 1306 1307 if (of_property_read_string_array(np, "ibm,cpu-idle-state-names", 1308 temp_string, nr_idle_states) < 0) { 1309 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n"); 1310 rc = -EINVAL; 1311 goto out; 1312 } 1313 for (i = 0; i < nr_idle_states; i++) 1314 strlcpy(pnv_idle_states[i].name, temp_string[i], 1315 PNV_IDLE_NAME_LEN); 1316 nr_pnv_idle_states = nr_idle_states; 1317 rc = 0; 1318 out: 1319 kfree(temp_u32); 1320 kfree(temp_u64); 1321 kfree(temp_string); 1322 return rc; 1323 } 1324 1325 static int __init pnv_init_idle_states(void) 1326 { 1327 int cpu; 1328 int rc = 0; 1329 1330 /* Set up PACA fields */ 1331 for_each_present_cpu(cpu) { 1332 struct paca_struct *p = paca_ptrs[cpu]; 1333 1334 p->idle_state = 0; 1335 if (cpu == cpu_first_thread_sibling(cpu)) 1336 p->idle_state = (1 << threads_per_core) - 1; 1337 1338 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 1339 /* P7/P8 nap */ 1340 p->thread_idle_state = PNV_THREAD_RUNNING; 1341 } else { 1342 /* P9 stop */ 1343 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1344 p->requested_psscr = 0; 1345 atomic_set(&p->dont_stop, 0); 1346 #endif 1347 } 1348 } 1349 1350 /* In case we error out nr_pnv_idle_states will be zero */ 1351 nr_pnv_idle_states = 0; 1352 supported_cpuidle_states = 0; 1353 1354 if (cpuidle_disable != IDLE_NO_OVERRIDE) 1355 goto out; 1356 rc = pnv_parse_cpuidle_dt(); 1357 if (rc) 1358 return rc; 1359 pnv_probe_idle_states(); 1360 1361 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 1362 if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { 1363 power7_fastsleep_workaround_entry = false; 1364 power7_fastsleep_workaround_exit = false; 1365 } else { 1366 /* 1367 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that 1368 * workaround is needed to use fastsleep. Provide sysfs 1369 * control to choose how this workaround has to be 1370 * applied. 1371 */ 1372 device_create_file(cpu_subsys.dev_root, 1373 &dev_attr_fastsleep_workaround_applyonce); 1374 } 1375 1376 update_subcore_sibling_mask(); 1377 1378 if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) { 1379 ppc_md.power_save = power7_idle; 1380 power7_offline_type = PNV_THREAD_NAP; 1381 } 1382 1383 if ((supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) && 1384 (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)) 1385 power7_offline_type = PNV_THREAD_WINKLE; 1386 else if ((supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED) || 1387 (supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) 1388 power7_offline_type = PNV_THREAD_SLEEP; 1389 } 1390 1391 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) { 1392 if (pnv_save_sprs_for_deep_states()) 1393 pnv_disable_deep_states(); 1394 } 1395 1396 out: 1397 return 0; 1398 } 1399 machine_subsys_initcall(powernv, pnv_init_idle_states); 1400