1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerNV cpuidle code 4 * 5 * Copyright 2015 IBM Corp. 6 */ 7 8 #include <linux/types.h> 9 #include <linux/mm.h> 10 #include <linux/slab.h> 11 #include <linux/of.h> 12 #include <linux/device.h> 13 #include <linux/cpu.h> 14 15 #include <asm/asm-prototypes.h> 16 #include <asm/firmware.h> 17 #include <asm/interrupt.h> 18 #include <asm/machdep.h> 19 #include <asm/opal.h> 20 #include <asm/cputhreads.h> 21 #include <asm/cpuidle.h> 22 #include <asm/code-patching.h> 23 #include <asm/smp.h> 24 #include <asm/runlatch.h> 25 #include <asm/dbell.h> 26 27 #include "powernv.h" 28 #include "subcore.h" 29 30 /* Power ISA 3.0 allows for stop states 0x0 - 0xF */ 31 #define MAX_STOP_STATE 0xF 32 33 #define P9_STOP_SPR_MSR 2000 34 #define P9_STOP_SPR_PSSCR 855 35 36 static u32 supported_cpuidle_states; 37 struct pnv_idle_states_t *pnv_idle_states; 38 int nr_pnv_idle_states; 39 40 /* 41 * The default stop state that will be used by ppc_md.power_save 42 * function on platforms that support stop instruction. 43 */ 44 static u64 pnv_default_stop_val; 45 static u64 pnv_default_stop_mask; 46 static bool default_stop_found; 47 48 /* 49 * First stop state levels when SPR and TB loss can occur. 50 */ 51 static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1; 52 static u64 deep_spr_loss_state = MAX_STOP_STATE + 1; 53 54 /* 55 * psscr value and mask of the deepest stop idle state. 56 * Used when a cpu is offlined. 57 */ 58 static u64 pnv_deepest_stop_psscr_val; 59 static u64 pnv_deepest_stop_psscr_mask; 60 static u64 pnv_deepest_stop_flag; 61 static bool deepest_stop_found; 62 63 static unsigned long power7_offline_type; 64 65 static int pnv_save_sprs_for_deep_states(void) 66 { 67 int cpu; 68 int rc; 69 70 /* 71 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across 72 * all cpus at boot. Get these reg values of current cpu and use the 73 * same across all cpus. 74 */ 75 uint64_t lpcr_val = mfspr(SPRN_LPCR); 76 uint64_t hid0_val = mfspr(SPRN_HID0); 77 uint64_t hmeer_val = mfspr(SPRN_HMEER); 78 uint64_t msr_val = MSR_IDLE; 79 uint64_t psscr_val = pnv_deepest_stop_psscr_val; 80 81 for_each_present_cpu(cpu) { 82 uint64_t pir = get_hard_smp_processor_id(cpu); 83 uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu]; 84 85 rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val); 86 if (rc != 0) 87 return rc; 88 89 rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); 90 if (rc != 0) 91 return rc; 92 93 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 94 rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val); 95 if (rc) 96 return rc; 97 98 rc = opal_slw_set_reg(pir, 99 P9_STOP_SPR_PSSCR, psscr_val); 100 101 if (rc) 102 return rc; 103 } 104 105 /* HIDs are per core registers */ 106 if (cpu_thread_in_core(cpu) == 0) { 107 108 rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val); 109 if (rc != 0) 110 return rc; 111 112 rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val); 113 if (rc != 0) 114 return rc; 115 116 /* Only p8 needs to set extra HID regiters */ 117 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 118 uint64_t hid1_val = mfspr(SPRN_HID1); 119 uint64_t hid4_val = mfspr(SPRN_HID4); 120 uint64_t hid5_val = mfspr(SPRN_HID5); 121 122 rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val); 123 if (rc != 0) 124 return rc; 125 126 rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val); 127 if (rc != 0) 128 return rc; 129 130 rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val); 131 if (rc != 0) 132 return rc; 133 } 134 } 135 } 136 137 return 0; 138 } 139 140 u32 pnv_get_supported_cpuidle_states(void) 141 { 142 return supported_cpuidle_states; 143 } 144 EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states); 145 146 static void pnv_fastsleep_workaround_apply(void *info) 147 148 { 149 int cpu = smp_processor_id(); 150 int rc; 151 int *err = info; 152 153 if (cpu_first_thread_sibling(cpu) != cpu) 154 return; 155 156 rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP, 157 OPAL_CONFIG_IDLE_APPLY); 158 if (rc) 159 *err = 1; 160 } 161 162 static bool power7_fastsleep_workaround_entry = true; 163 static bool power7_fastsleep_workaround_exit = true; 164 165 /* 166 * Used to store fastsleep workaround state 167 * 0 - Workaround applied/undone at fastsleep entry/exit path (Default) 168 * 1 - Workaround applied once, never undone. 169 */ 170 static u8 fastsleep_workaround_applyonce; 171 172 static ssize_t show_fastsleep_workaround_applyonce(struct device *dev, 173 struct device_attribute *attr, char *buf) 174 { 175 return sprintf(buf, "%u\n", fastsleep_workaround_applyonce); 176 } 177 178 static ssize_t store_fastsleep_workaround_applyonce(struct device *dev, 179 struct device_attribute *attr, const char *buf, 180 size_t count) 181 { 182 int err; 183 u8 val; 184 185 if (kstrtou8(buf, 0, &val) || val != 1) 186 return -EINVAL; 187 188 if (fastsleep_workaround_applyonce == 1) 189 return count; 190 191 /* 192 * fastsleep_workaround_applyonce = 1 implies 193 * fastsleep workaround needs to be left in 'applied' state on all 194 * the cores. Do this by- 195 * 1. Disable the 'undo' workaround in fastsleep exit path 196 * 2. Sendi IPIs to all the cores which have at least one online thread 197 * 3. Disable the 'apply' workaround in fastsleep entry path 198 * 199 * There is no need to send ipi to cores which have all threads 200 * offlined, as last thread of the core entering fastsleep or deeper 201 * state would have applied workaround. 202 */ 203 power7_fastsleep_workaround_exit = false; 204 205 cpus_read_lock(); 206 on_each_cpu(pnv_fastsleep_workaround_apply, &err, 1); 207 cpus_read_unlock(); 208 if (err) { 209 pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply"); 210 goto fail; 211 } 212 213 power7_fastsleep_workaround_entry = false; 214 215 fastsleep_workaround_applyonce = 1; 216 217 return count; 218 fail: 219 return -EIO; 220 } 221 222 static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600, 223 show_fastsleep_workaround_applyonce, 224 store_fastsleep_workaround_applyonce); 225 226 static inline void atomic_start_thread_idle(void) 227 { 228 int cpu = raw_smp_processor_id(); 229 int first = cpu_first_thread_sibling(cpu); 230 int thread_nr = cpu_thread_in_core(cpu); 231 unsigned long *state = &paca_ptrs[first]->idle_state; 232 233 clear_bit(thread_nr, state); 234 } 235 236 static inline void atomic_stop_thread_idle(void) 237 { 238 int cpu = raw_smp_processor_id(); 239 int first = cpu_first_thread_sibling(cpu); 240 int thread_nr = cpu_thread_in_core(cpu); 241 unsigned long *state = &paca_ptrs[first]->idle_state; 242 243 set_bit(thread_nr, state); 244 } 245 246 static inline void atomic_lock_thread_idle(void) 247 { 248 int cpu = raw_smp_processor_id(); 249 int first = cpu_first_thread_sibling(cpu); 250 unsigned long *state = &paca_ptrs[first]->idle_state; 251 252 while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, state))) 253 barrier(); 254 } 255 256 static inline void atomic_unlock_and_stop_thread_idle(void) 257 { 258 int cpu = raw_smp_processor_id(); 259 int first = cpu_first_thread_sibling(cpu); 260 unsigned long thread = 1UL << cpu_thread_in_core(cpu); 261 unsigned long *state = &paca_ptrs[first]->idle_state; 262 u64 s = READ_ONCE(*state); 263 u64 new, tmp; 264 265 BUG_ON(!(s & PNV_CORE_IDLE_LOCK_BIT)); 266 BUG_ON(s & thread); 267 268 again: 269 new = (s | thread) & ~PNV_CORE_IDLE_LOCK_BIT; 270 tmp = cmpxchg(state, s, new); 271 if (unlikely(tmp != s)) { 272 s = tmp; 273 goto again; 274 } 275 } 276 277 static inline void atomic_unlock_thread_idle(void) 278 { 279 int cpu = raw_smp_processor_id(); 280 int first = cpu_first_thread_sibling(cpu); 281 unsigned long *state = &paca_ptrs[first]->idle_state; 282 283 BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, state)); 284 clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, state); 285 } 286 287 /* P7 and P8 */ 288 struct p7_sprs { 289 /* per core */ 290 u64 tscr; 291 u64 worc; 292 293 /* per subcore */ 294 u64 sdr1; 295 u64 rpr; 296 297 /* per thread */ 298 u64 lpcr; 299 u64 hfscr; 300 u64 fscr; 301 u64 purr; 302 u64 spurr; 303 u64 dscr; 304 u64 wort; 305 306 /* per thread SPRs that get lost in shallow states */ 307 u64 amr; 308 u64 iamr; 309 u64 uamor; 310 /* amor is restored to constant ~0 */ 311 }; 312 313 static unsigned long power7_idle_insn(unsigned long type) 314 { 315 int cpu = raw_smp_processor_id(); 316 int first = cpu_first_thread_sibling(cpu); 317 unsigned long *state = &paca_ptrs[first]->idle_state; 318 unsigned long thread = 1UL << cpu_thread_in_core(cpu); 319 unsigned long core_thread_mask = (1UL << threads_per_core) - 1; 320 unsigned long srr1; 321 bool full_winkle; 322 struct p7_sprs sprs = {}; /* avoid false use-uninitialised */ 323 bool sprs_saved = false; 324 int rc; 325 326 if (unlikely(type != PNV_THREAD_NAP)) { 327 atomic_lock_thread_idle(); 328 329 BUG_ON(!(*state & thread)); 330 *state &= ~thread; 331 332 if (power7_fastsleep_workaround_entry) { 333 if ((*state & core_thread_mask) == 0) { 334 rc = opal_config_cpu_idle_state( 335 OPAL_CONFIG_IDLE_FASTSLEEP, 336 OPAL_CONFIG_IDLE_APPLY); 337 BUG_ON(rc); 338 } 339 } 340 341 if (type == PNV_THREAD_WINKLE) { 342 sprs.tscr = mfspr(SPRN_TSCR); 343 sprs.worc = mfspr(SPRN_WORC); 344 345 sprs.sdr1 = mfspr(SPRN_SDR1); 346 sprs.rpr = mfspr(SPRN_RPR); 347 348 sprs.lpcr = mfspr(SPRN_LPCR); 349 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 350 sprs.hfscr = mfspr(SPRN_HFSCR); 351 sprs.fscr = mfspr(SPRN_FSCR); 352 } 353 sprs.purr = mfspr(SPRN_PURR); 354 sprs.spurr = mfspr(SPRN_SPURR); 355 sprs.dscr = mfspr(SPRN_DSCR); 356 sprs.wort = mfspr(SPRN_WORT); 357 358 sprs_saved = true; 359 360 /* 361 * Increment winkle counter and set all winkle bits if 362 * all threads are winkling. This allows wakeup side to 363 * distinguish between fast sleep and winkle state 364 * loss. Fast sleep still has to resync the timebase so 365 * this may not be a really big win. 366 */ 367 *state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; 368 if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) 369 >> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT 370 == threads_per_core) 371 *state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS; 372 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); 373 } 374 375 atomic_unlock_thread_idle(); 376 } 377 378 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 379 sprs.amr = mfspr(SPRN_AMR); 380 sprs.iamr = mfspr(SPRN_IAMR); 381 sprs.uamor = mfspr(SPRN_UAMOR); 382 } 383 384 local_paca->thread_idle_state = type; 385 srr1 = isa206_idle_insn_mayloss(type); /* go idle */ 386 local_paca->thread_idle_state = PNV_THREAD_RUNNING; 387 388 WARN_ON_ONCE(!srr1); 389 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); 390 391 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 392 if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) { 393 /* 394 * We don't need an isync after the mtsprs here because 395 * the upcoming mtmsrd is execution synchronizing. 396 */ 397 mtspr(SPRN_AMR, sprs.amr); 398 mtspr(SPRN_IAMR, sprs.iamr); 399 mtspr(SPRN_AMOR, ~0); 400 mtspr(SPRN_UAMOR, sprs.uamor); 401 } 402 } 403 404 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) 405 hmi_exception_realmode(NULL); 406 407 if (likely((srr1 & SRR1_WAKESTATE) != SRR1_WS_HVLOSS)) { 408 if (unlikely(type != PNV_THREAD_NAP)) { 409 atomic_lock_thread_idle(); 410 if (type == PNV_THREAD_WINKLE) { 411 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); 412 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; 413 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT); 414 } 415 atomic_unlock_and_stop_thread_idle(); 416 } 417 return srr1; 418 } 419 420 /* HV state loss */ 421 BUG_ON(type == PNV_THREAD_NAP); 422 423 atomic_lock_thread_idle(); 424 425 full_winkle = false; 426 if (type == PNV_THREAD_WINKLE) { 427 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); 428 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; 429 if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) { 430 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT); 431 full_winkle = true; 432 BUG_ON(!sprs_saved); 433 } 434 } 435 436 WARN_ON(*state & thread); 437 438 if ((*state & core_thread_mask) != 0) 439 goto core_woken; 440 441 /* Per-core SPRs */ 442 if (full_winkle) { 443 mtspr(SPRN_TSCR, sprs.tscr); 444 mtspr(SPRN_WORC, sprs.worc); 445 } 446 447 if (power7_fastsleep_workaround_exit) { 448 rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP, 449 OPAL_CONFIG_IDLE_UNDO); 450 BUG_ON(rc); 451 } 452 453 /* TB */ 454 if (opal_resync_timebase() != OPAL_SUCCESS) 455 BUG(); 456 457 core_woken: 458 if (!full_winkle) 459 goto subcore_woken; 460 461 if ((*state & local_paca->subcore_sibling_mask) != 0) 462 goto subcore_woken; 463 464 /* Per-subcore SPRs */ 465 mtspr(SPRN_SDR1, sprs.sdr1); 466 mtspr(SPRN_RPR, sprs.rpr); 467 468 subcore_woken: 469 /* 470 * isync after restoring shared SPRs and before unlocking. Unlock 471 * only contains hwsync which does not necessarily do the right 472 * thing for SPRs. 473 */ 474 isync(); 475 atomic_unlock_and_stop_thread_idle(); 476 477 /* Fast sleep does not lose SPRs */ 478 if (!full_winkle) 479 return srr1; 480 481 /* Per-thread SPRs */ 482 mtspr(SPRN_LPCR, sprs.lpcr); 483 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 484 mtspr(SPRN_HFSCR, sprs.hfscr); 485 mtspr(SPRN_FSCR, sprs.fscr); 486 } 487 mtspr(SPRN_PURR, sprs.purr); 488 mtspr(SPRN_SPURR, sprs.spurr); 489 mtspr(SPRN_DSCR, sprs.dscr); 490 mtspr(SPRN_WORT, sprs.wort); 491 492 mtspr(SPRN_SPRG3, local_paca->sprg_vdso); 493 494 /* 495 * The SLB has to be restored here, but it sometimes still 496 * contains entries, so the __ variant must be used to prevent 497 * multi hits. 498 */ 499 __slb_restore_bolted_realmode(); 500 501 return srr1; 502 } 503 504 extern unsigned long idle_kvm_start_guest(unsigned long srr1); 505 506 #ifdef CONFIG_HOTPLUG_CPU 507 static unsigned long power7_offline(void) 508 { 509 unsigned long srr1; 510 511 mtmsr(MSR_IDLE); 512 513 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 514 /* Tell KVM we're entering idle. */ 515 /******************************************************/ 516 /* N O T E W E L L ! ! ! N O T E W E L L */ 517 /* The following store to HSTATE_HWTHREAD_STATE(r13) */ 518 /* MUST occur in real mode, i.e. with the MMU off, */ 519 /* and the MMU must stay off until we clear this flag */ 520 /* and test HSTATE_HWTHREAD_REQ(r13) in */ 521 /* pnv_powersave_wakeup in this file. */ 522 /* The reason is that another thread can switch the */ 523 /* MMU to a guest context whenever this flag is set */ 524 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ 525 /* that would potentially cause this thread to start */ 526 /* executing instructions from guest memory in */ 527 /* hypervisor mode, leading to a host crash or data */ 528 /* corruption, or worse. */ 529 /******************************************************/ 530 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE; 531 #endif 532 533 __ppc64_runlatch_off(); 534 srr1 = power7_idle_insn(power7_offline_type); 535 __ppc64_runlatch_on(); 536 537 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 538 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL; 539 /* Order setting hwthread_state vs. testing hwthread_req */ 540 smp_mb(); 541 if (local_paca->kvm_hstate.hwthread_req) 542 srr1 = idle_kvm_start_guest(srr1); 543 #endif 544 545 mtmsr(MSR_KERNEL); 546 547 return srr1; 548 } 549 #endif 550 551 void power7_idle_type(unsigned long type) 552 { 553 unsigned long srr1; 554 555 if (!prep_irq_for_idle_irqsoff()) 556 return; 557 558 mtmsr(MSR_IDLE); 559 __ppc64_runlatch_off(); 560 srr1 = power7_idle_insn(type); 561 __ppc64_runlatch_on(); 562 mtmsr(MSR_KERNEL); 563 564 fini_irq_for_idle_irqsoff(); 565 irq_set_pending_from_srr1(srr1); 566 } 567 568 static void power7_idle(void) 569 { 570 if (!powersave_nap) 571 return; 572 573 power7_idle_type(PNV_THREAD_NAP); 574 } 575 576 struct p9_sprs { 577 /* per core */ 578 u64 ptcr; 579 u64 rpr; 580 u64 tscr; 581 u64 ldbar; 582 583 /* per thread */ 584 u64 lpcr; 585 u64 hfscr; 586 u64 fscr; 587 u64 pid; 588 u64 purr; 589 u64 spurr; 590 u64 dscr; 591 u64 ciabr; 592 593 u64 mmcra; 594 u32 mmcr0; 595 u32 mmcr1; 596 u64 mmcr2; 597 598 /* per thread SPRs that get lost in shallow states */ 599 u64 amr; 600 u64 iamr; 601 u64 amor; 602 u64 uamor; 603 }; 604 605 static unsigned long power9_idle_stop(unsigned long psscr) 606 { 607 int cpu = raw_smp_processor_id(); 608 int first = cpu_first_thread_sibling(cpu); 609 unsigned long *state = &paca_ptrs[first]->idle_state; 610 unsigned long core_thread_mask = (1UL << threads_per_core) - 1; 611 unsigned long srr1; 612 unsigned long pls; 613 unsigned long mmcr0 = 0; 614 unsigned long mmcra = 0; 615 struct p9_sprs sprs = {}; /* avoid false used-uninitialised */ 616 bool sprs_saved = false; 617 618 if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { 619 /* EC=ESL=0 case */ 620 621 /* 622 * Wake synchronously. SRESET via xscom may still cause 623 * a 0x100 powersave wakeup with SRR1 reason! 624 */ 625 srr1 = isa300_idle_stop_noloss(psscr); /* go idle */ 626 if (likely(!srr1)) 627 return 0; 628 629 /* 630 * Registers not saved, can't recover! 631 * This would be a hardware bug 632 */ 633 BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS); 634 635 goto out; 636 } 637 638 /* EC=ESL=1 case */ 639 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 640 if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG)) { 641 local_paca->requested_psscr = psscr; 642 /* order setting requested_psscr vs testing dont_stop */ 643 smp_mb(); 644 if (atomic_read(&local_paca->dont_stop)) { 645 local_paca->requested_psscr = 0; 646 return 0; 647 } 648 } 649 #endif 650 651 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) { 652 /* 653 * POWER9 DD2 can incorrectly set PMAO when waking up 654 * after a state-loss idle. Saving and restoring MMCR0 655 * over idle is a workaround. 656 */ 657 mmcr0 = mfspr(SPRN_MMCR0); 658 } 659 660 if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) { 661 sprs.lpcr = mfspr(SPRN_LPCR); 662 sprs.hfscr = mfspr(SPRN_HFSCR); 663 sprs.fscr = mfspr(SPRN_FSCR); 664 sprs.pid = mfspr(SPRN_PID); 665 sprs.purr = mfspr(SPRN_PURR); 666 sprs.spurr = mfspr(SPRN_SPURR); 667 sprs.dscr = mfspr(SPRN_DSCR); 668 sprs.ciabr = mfspr(SPRN_CIABR); 669 670 sprs.mmcra = mfspr(SPRN_MMCRA); 671 sprs.mmcr0 = mfspr(SPRN_MMCR0); 672 sprs.mmcr1 = mfspr(SPRN_MMCR1); 673 sprs.mmcr2 = mfspr(SPRN_MMCR2); 674 675 sprs.ptcr = mfspr(SPRN_PTCR); 676 sprs.rpr = mfspr(SPRN_RPR); 677 sprs.tscr = mfspr(SPRN_TSCR); 678 if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR)) 679 sprs.ldbar = mfspr(SPRN_LDBAR); 680 681 sprs_saved = true; 682 683 atomic_start_thread_idle(); 684 } 685 686 sprs.amr = mfspr(SPRN_AMR); 687 sprs.iamr = mfspr(SPRN_IAMR); 688 sprs.uamor = mfspr(SPRN_UAMOR); 689 690 srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */ 691 692 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 693 local_paca->requested_psscr = 0; 694 #endif 695 696 psscr = mfspr(SPRN_PSSCR); 697 698 WARN_ON_ONCE(!srr1); 699 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); 700 701 if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) { 702 /* 703 * We don't need an isync after the mtsprs here because the 704 * upcoming mtmsrd is execution synchronizing. 705 */ 706 mtspr(SPRN_AMR, sprs.amr); 707 mtspr(SPRN_IAMR, sprs.iamr); 708 mtspr(SPRN_AMOR, ~0); 709 mtspr(SPRN_UAMOR, sprs.uamor); 710 711 /* 712 * Workaround for POWER9 DD2.0, if we lost resources, the ERAT 713 * might have been corrupted and needs flushing. We also need 714 * to reload MMCR0 (see mmcr0 comment above). 715 */ 716 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) { 717 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT); 718 mtspr(SPRN_MMCR0, mmcr0); 719 } 720 721 /* 722 * DD2.2 and earlier need to set then clear bit 60 in MMCRA 723 * to ensure the PMU starts running. 724 */ 725 mmcra = mfspr(SPRN_MMCRA); 726 mmcra |= PPC_BIT(60); 727 mtspr(SPRN_MMCRA, mmcra); 728 mmcra &= ~PPC_BIT(60); 729 mtspr(SPRN_MMCRA, mmcra); 730 } 731 732 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) 733 hmi_exception_realmode(NULL); 734 735 /* 736 * On POWER9, SRR1 bits do not match exactly as expected. 737 * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so 738 * just always test PSSCR for SPR/TB state loss. 739 */ 740 pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT; 741 if (likely(pls < deep_spr_loss_state)) { 742 if (sprs_saved) 743 atomic_stop_thread_idle(); 744 goto out; 745 } 746 747 /* HV state loss */ 748 BUG_ON(!sprs_saved); 749 750 atomic_lock_thread_idle(); 751 752 if ((*state & core_thread_mask) != 0) 753 goto core_woken; 754 755 /* Per-core SPRs */ 756 mtspr(SPRN_PTCR, sprs.ptcr); 757 mtspr(SPRN_RPR, sprs.rpr); 758 mtspr(SPRN_TSCR, sprs.tscr); 759 760 if (pls >= pnv_first_tb_loss_level) { 761 /* TB loss */ 762 if (opal_resync_timebase() != OPAL_SUCCESS) 763 BUG(); 764 } 765 766 /* 767 * isync after restoring shared SPRs and before unlocking. Unlock 768 * only contains hwsync which does not necessarily do the right 769 * thing for SPRs. 770 */ 771 isync(); 772 773 core_woken: 774 atomic_unlock_and_stop_thread_idle(); 775 776 /* Per-thread SPRs */ 777 mtspr(SPRN_LPCR, sprs.lpcr); 778 mtspr(SPRN_HFSCR, sprs.hfscr); 779 mtspr(SPRN_FSCR, sprs.fscr); 780 mtspr(SPRN_PID, sprs.pid); 781 mtspr(SPRN_PURR, sprs.purr); 782 mtspr(SPRN_SPURR, sprs.spurr); 783 mtspr(SPRN_DSCR, sprs.dscr); 784 mtspr(SPRN_CIABR, sprs.ciabr); 785 786 mtspr(SPRN_MMCRA, sprs.mmcra); 787 mtspr(SPRN_MMCR0, sprs.mmcr0); 788 mtspr(SPRN_MMCR1, sprs.mmcr1); 789 mtspr(SPRN_MMCR2, sprs.mmcr2); 790 if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR)) 791 mtspr(SPRN_LDBAR, sprs.ldbar); 792 793 mtspr(SPRN_SPRG3, local_paca->sprg_vdso); 794 795 if (!radix_enabled()) 796 __slb_restore_bolted_realmode(); 797 798 out: 799 mtmsr(MSR_KERNEL); 800 801 return srr1; 802 } 803 804 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 805 /* 806 * This is used in working around bugs in thread reconfiguration 807 * on POWER9 (at least up to Nimbus DD2.2) relating to transactional 808 * memory and the way that XER[SO] is checkpointed. 809 * This function forces the core into SMT4 in order by asking 810 * all other threads not to stop, and sending a message to any 811 * that are in a stop state. 812 * Must be called with preemption disabled. 813 */ 814 void pnv_power9_force_smt4_catch(void) 815 { 816 int cpu, cpu0, thr; 817 int awake_threads = 1; /* this thread is awake */ 818 int poke_threads = 0; 819 int need_awake = threads_per_core; 820 821 cpu = smp_processor_id(); 822 cpu0 = cpu & ~(threads_per_core - 1); 823 for (thr = 0; thr < threads_per_core; ++thr) { 824 if (cpu != cpu0 + thr) 825 atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop); 826 } 827 /* order setting dont_stop vs testing requested_psscr */ 828 smp_mb(); 829 for (thr = 0; thr < threads_per_core; ++thr) { 830 if (!paca_ptrs[cpu0+thr]->requested_psscr) 831 ++awake_threads; 832 else 833 poke_threads |= (1 << thr); 834 } 835 836 /* If at least 3 threads are awake, the core is in SMT4 already */ 837 if (awake_threads < need_awake) { 838 /* We have to wake some threads; we'll use msgsnd */ 839 for (thr = 0; thr < threads_per_core; ++thr) { 840 if (poke_threads & (1 << thr)) { 841 ppc_msgsnd_sync(); 842 ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, 843 paca_ptrs[cpu0+thr]->hw_cpu_id); 844 } 845 } 846 /* now spin until at least 3 threads are awake */ 847 do { 848 for (thr = 0; thr < threads_per_core; ++thr) { 849 if ((poke_threads & (1 << thr)) && 850 !paca_ptrs[cpu0+thr]->requested_psscr) { 851 ++awake_threads; 852 poke_threads &= ~(1 << thr); 853 } 854 } 855 } while (awake_threads < need_awake); 856 } 857 } 858 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch); 859 860 void pnv_power9_force_smt4_release(void) 861 { 862 int cpu, cpu0, thr; 863 864 cpu = smp_processor_id(); 865 cpu0 = cpu & ~(threads_per_core - 1); 866 867 /* clear all the dont_stop flags */ 868 for (thr = 0; thr < threads_per_core; ++thr) { 869 if (cpu != cpu0 + thr) 870 atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop); 871 } 872 } 873 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release); 874 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 875 876 struct p10_sprs { 877 /* 878 * SPRs that get lost in shallow states: 879 * 880 * P10 loses CR, LR, CTR, FPSCR, VSCR, XER, TAR, SPRG2, and HSPRG1 881 * isa300 idle routines restore CR, LR. 882 * CTR is volatile 883 * idle thread doesn't use FP or VEC 884 * kernel doesn't use TAR 885 * HSPRG1 is only live in HV interrupt entry 886 * SPRG2 is only live in KVM guests, KVM handles it. 887 */ 888 }; 889 890 static unsigned long power10_idle_stop(unsigned long psscr) 891 { 892 int cpu = raw_smp_processor_id(); 893 int first = cpu_first_thread_sibling(cpu); 894 unsigned long *state = &paca_ptrs[first]->idle_state; 895 unsigned long core_thread_mask = (1UL << threads_per_core) - 1; 896 unsigned long srr1; 897 unsigned long pls; 898 // struct p10_sprs sprs = {}; /* avoid false used-uninitialised */ 899 bool sprs_saved = false; 900 901 if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { 902 /* EC=ESL=0 case */ 903 904 /* 905 * Wake synchronously. SRESET via xscom may still cause 906 * a 0x100 powersave wakeup with SRR1 reason! 907 */ 908 srr1 = isa300_idle_stop_noloss(psscr); /* go idle */ 909 if (likely(!srr1)) 910 return 0; 911 912 /* 913 * Registers not saved, can't recover! 914 * This would be a hardware bug 915 */ 916 BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS); 917 918 goto out; 919 } 920 921 /* EC=ESL=1 case */ 922 if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) { 923 /* XXX: save SPRs for deep state loss here. */ 924 925 sprs_saved = true; 926 927 atomic_start_thread_idle(); 928 } 929 930 srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */ 931 932 psscr = mfspr(SPRN_PSSCR); 933 934 WARN_ON_ONCE(!srr1); 935 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); 936 937 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) 938 hmi_exception_realmode(NULL); 939 940 /* 941 * On POWER10, SRR1 bits do not match exactly as expected. 942 * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so 943 * just always test PSSCR for SPR/TB state loss. 944 */ 945 pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT; 946 if (likely(pls < deep_spr_loss_state)) { 947 if (sprs_saved) 948 atomic_stop_thread_idle(); 949 goto out; 950 } 951 952 /* HV state loss */ 953 BUG_ON(!sprs_saved); 954 955 atomic_lock_thread_idle(); 956 957 if ((*state & core_thread_mask) != 0) 958 goto core_woken; 959 960 /* XXX: restore per-core SPRs here */ 961 962 if (pls >= pnv_first_tb_loss_level) { 963 /* TB loss */ 964 if (opal_resync_timebase() != OPAL_SUCCESS) 965 BUG(); 966 } 967 968 /* 969 * isync after restoring shared SPRs and before unlocking. Unlock 970 * only contains hwsync which does not necessarily do the right 971 * thing for SPRs. 972 */ 973 isync(); 974 975 core_woken: 976 atomic_unlock_and_stop_thread_idle(); 977 978 /* XXX: restore per-thread SPRs here */ 979 980 if (!radix_enabled()) 981 __slb_restore_bolted_realmode(); 982 983 out: 984 mtmsr(MSR_KERNEL); 985 986 return srr1; 987 } 988 989 #ifdef CONFIG_HOTPLUG_CPU 990 static unsigned long arch300_offline_stop(unsigned long psscr) 991 { 992 unsigned long srr1; 993 994 if (cpu_has_feature(CPU_FTR_ARCH_31)) 995 srr1 = power10_idle_stop(psscr); 996 else 997 srr1 = power9_idle_stop(psscr); 998 999 return srr1; 1000 } 1001 #endif 1002 1003 void arch300_idle_type(unsigned long stop_psscr_val, 1004 unsigned long stop_psscr_mask) 1005 { 1006 unsigned long psscr; 1007 unsigned long srr1; 1008 1009 if (!prep_irq_for_idle_irqsoff()) 1010 return; 1011 1012 psscr = mfspr(SPRN_PSSCR); 1013 psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val; 1014 1015 __ppc64_runlatch_off(); 1016 if (cpu_has_feature(CPU_FTR_ARCH_31)) 1017 srr1 = power10_idle_stop(psscr); 1018 else 1019 srr1 = power9_idle_stop(psscr); 1020 __ppc64_runlatch_on(); 1021 1022 fini_irq_for_idle_irqsoff(); 1023 1024 irq_set_pending_from_srr1(srr1); 1025 } 1026 1027 /* 1028 * Used for ppc_md.power_save which needs a function with no parameters 1029 */ 1030 static void arch300_idle(void) 1031 { 1032 arch300_idle_type(pnv_default_stop_val, pnv_default_stop_mask); 1033 } 1034 1035 #ifdef CONFIG_HOTPLUG_CPU 1036 1037 void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) 1038 { 1039 u64 pir = get_hard_smp_processor_id(cpu); 1040 1041 mtspr(SPRN_LPCR, lpcr_val); 1042 1043 /* 1044 * Program the LPCR via stop-api only if the deepest stop state 1045 * can lose hypervisor context. 1046 */ 1047 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) 1048 opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); 1049 } 1050 1051 /* 1052 * pnv_cpu_offline: A function that puts the CPU into the deepest 1053 * available platform idle state on a CPU-Offline. 1054 * interrupts hard disabled and no lazy irq pending. 1055 */ 1056 unsigned long pnv_cpu_offline(unsigned int cpu) 1057 { 1058 unsigned long srr1; 1059 1060 __ppc64_runlatch_off(); 1061 1062 if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) { 1063 unsigned long psscr; 1064 1065 psscr = mfspr(SPRN_PSSCR); 1066 psscr = (psscr & ~pnv_deepest_stop_psscr_mask) | 1067 pnv_deepest_stop_psscr_val; 1068 srr1 = arch300_offline_stop(psscr); 1069 } else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) { 1070 srr1 = power7_offline(); 1071 } else { 1072 /* This is the fallback method. We emulate snooze */ 1073 while (!generic_check_cpu_restart(cpu)) { 1074 HMT_low(); 1075 HMT_very_low(); 1076 } 1077 srr1 = 0; 1078 HMT_medium(); 1079 } 1080 1081 __ppc64_runlatch_on(); 1082 1083 return srr1; 1084 } 1085 #endif 1086 1087 /* 1088 * Power ISA 3.0 idle initialization. 1089 * 1090 * POWER ISA 3.0 defines a new SPR Processor stop Status and Control 1091 * Register (PSSCR) to control idle behavior. 1092 * 1093 * PSSCR layout: 1094 * ---------------------------------------------------------- 1095 * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL | 1096 * ---------------------------------------------------------- 1097 * 0 4 41 42 43 44 48 54 56 60 1098 * 1099 * PSSCR key fields: 1100 * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the 1101 * lowest power-saving state the thread entered since stop instruction was 1102 * last executed. 1103 * 1104 * Bit 41 - Status Disable(SD) 1105 * 0 - Shows PLS entries 1106 * 1 - PLS entries are all 0 1107 * 1108 * Bit 42 - Enable State Loss 1109 * 0 - No state is lost irrespective of other fields 1110 * 1 - Allows state loss 1111 * 1112 * Bit 43 - Exit Criterion 1113 * 0 - Exit from power-save mode on any interrupt 1114 * 1 - Exit from power-save mode controlled by LPCR's PECE bits 1115 * 1116 * Bits 44:47 - Power-Saving Level Limit 1117 * This limits the power-saving level that can be entered into. 1118 * 1119 * Bits 60:63 - Requested Level 1120 * Used to specify which power-saving level must be entered on executing 1121 * stop instruction 1122 */ 1123 1124 int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags) 1125 { 1126 int err = 0; 1127 1128 /* 1129 * psscr_mask == 0xf indicates an older firmware. 1130 * Set remaining fields of psscr to the default values. 1131 * See NOTE above definition of PSSCR_HV_DEFAULT_VAL 1132 */ 1133 if (*psscr_mask == 0xf) { 1134 *psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL; 1135 *psscr_mask = PSSCR_HV_DEFAULT_MASK; 1136 return err; 1137 } 1138 1139 /* 1140 * New firmware is expected to set the psscr_val bits correctly. 1141 * Validate that the following invariants are correctly maintained by 1142 * the new firmware. 1143 * - ESL bit value matches the EC bit value. 1144 * - ESL bit is set for all the deep stop states. 1145 */ 1146 if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) { 1147 err = ERR_EC_ESL_MISMATCH; 1148 } else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) && 1149 GET_PSSCR_ESL(*psscr_val) == 0) { 1150 err = ERR_DEEP_STATE_ESL_MISMATCH; 1151 } 1152 1153 return err; 1154 } 1155 1156 /* 1157 * pnv_arch300_idle_init: Initializes the default idle state, first 1158 * deep idle state and deepest idle state on 1159 * ISA 3.0 CPUs. 1160 * 1161 * @np: /ibm,opal/power-mgt device node 1162 * @flags: cpu-idle-state-flags array 1163 * @dt_idle_states: Number of idle state entries 1164 * Returns 0 on success 1165 */ 1166 static void __init pnv_arch300_idle_init(void) 1167 { 1168 u64 max_residency_ns = 0; 1169 int i; 1170 1171 /* stop is not really architected, we only have p9,p10 drivers */ 1172 if (!pvr_version_is(PVR_POWER10) && !pvr_version_is(PVR_POWER9)) 1173 return; 1174 1175 /* 1176 * pnv_deepest_stop_{val,mask} should be set to values corresponding to 1177 * the deepest stop state. 1178 * 1179 * pnv_default_stop_{val,mask} should be set to values corresponding to 1180 * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state. 1181 */ 1182 pnv_first_tb_loss_level = MAX_STOP_STATE + 1; 1183 deep_spr_loss_state = MAX_STOP_STATE + 1; 1184 for (i = 0; i < nr_pnv_idle_states; i++) { 1185 int err; 1186 struct pnv_idle_states_t *state = &pnv_idle_states[i]; 1187 u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK; 1188 1189 /* No deep loss driver implemented for POWER10 yet */ 1190 if (pvr_version_is(PVR_POWER10) && 1191 state->flags & (OPAL_PM_TIMEBASE_STOP|OPAL_PM_LOSE_FULL_CONTEXT)) 1192 continue; 1193 1194 if ((state->flags & OPAL_PM_TIMEBASE_STOP) && 1195 (pnv_first_tb_loss_level > psscr_rl)) 1196 pnv_first_tb_loss_level = psscr_rl; 1197 1198 if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) && 1199 (deep_spr_loss_state > psscr_rl)) 1200 deep_spr_loss_state = psscr_rl; 1201 1202 /* 1203 * The idle code does not deal with TB loss occurring 1204 * in a shallower state than SPR loss, so force it to 1205 * behave like SPRs are lost if TB is lost. POWER9 would 1206 * never encouter this, but a POWER8 core would if it 1207 * implemented the stop instruction. So this is for forward 1208 * compatibility. 1209 */ 1210 if ((state->flags & OPAL_PM_TIMEBASE_STOP) && 1211 (deep_spr_loss_state > psscr_rl)) 1212 deep_spr_loss_state = psscr_rl; 1213 1214 err = validate_psscr_val_mask(&state->psscr_val, 1215 &state->psscr_mask, 1216 state->flags); 1217 if (err) { 1218 report_invalid_psscr_val(state->psscr_val, err); 1219 continue; 1220 } 1221 1222 state->valid = true; 1223 1224 if (max_residency_ns < state->residency_ns) { 1225 max_residency_ns = state->residency_ns; 1226 pnv_deepest_stop_psscr_val = state->psscr_val; 1227 pnv_deepest_stop_psscr_mask = state->psscr_mask; 1228 pnv_deepest_stop_flag = state->flags; 1229 deepest_stop_found = true; 1230 } 1231 1232 if (!default_stop_found && 1233 (state->flags & OPAL_PM_STOP_INST_FAST)) { 1234 pnv_default_stop_val = state->psscr_val; 1235 pnv_default_stop_mask = state->psscr_mask; 1236 default_stop_found = true; 1237 WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT); 1238 } 1239 } 1240 1241 if (unlikely(!default_stop_found)) { 1242 pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n"); 1243 } else { 1244 ppc_md.power_save = arch300_idle; 1245 pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n", 1246 pnv_default_stop_val, pnv_default_stop_mask); 1247 } 1248 1249 if (unlikely(!deepest_stop_found)) { 1250 pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait"); 1251 } else { 1252 pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n", 1253 pnv_deepest_stop_psscr_val, 1254 pnv_deepest_stop_psscr_mask); 1255 } 1256 1257 pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n", 1258 deep_spr_loss_state); 1259 1260 pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n", 1261 pnv_first_tb_loss_level); 1262 } 1263 1264 static void __init pnv_disable_deep_states(void) 1265 { 1266 /* 1267 * The stop-api is unable to restore hypervisor 1268 * resources on wakeup from platform idle states which 1269 * lose full context. So disable such states. 1270 */ 1271 supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT; 1272 pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n"); 1273 pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n"); 1274 1275 if (cpu_has_feature(CPU_FTR_ARCH_300) && 1276 (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) { 1277 /* 1278 * Use the default stop state for CPU-Hotplug 1279 * if available. 1280 */ 1281 if (default_stop_found) { 1282 pnv_deepest_stop_psscr_val = pnv_default_stop_val; 1283 pnv_deepest_stop_psscr_mask = pnv_default_stop_mask; 1284 pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n", 1285 pnv_deepest_stop_psscr_val); 1286 } else { /* Fallback to snooze loop for CPU-Hotplug */ 1287 deepest_stop_found = false; 1288 pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n"); 1289 } 1290 } 1291 } 1292 1293 /* 1294 * Probe device tree for supported idle states 1295 */ 1296 static void __init pnv_probe_idle_states(void) 1297 { 1298 int i; 1299 1300 if (nr_pnv_idle_states < 0) { 1301 pr_warn("cpuidle-powernv: no idle states found in the DT\n"); 1302 return; 1303 } 1304 1305 if (cpu_has_feature(CPU_FTR_ARCH_300)) 1306 pnv_arch300_idle_init(); 1307 1308 for (i = 0; i < nr_pnv_idle_states; i++) 1309 supported_cpuidle_states |= pnv_idle_states[i].flags; 1310 } 1311 1312 /* 1313 * This function parses device-tree and populates all the information 1314 * into pnv_idle_states structure. It also sets up nr_pnv_idle_states 1315 * which is the number of cpuidle states discovered through device-tree. 1316 */ 1317 1318 static int pnv_parse_cpuidle_dt(void) 1319 { 1320 struct device_node *np; 1321 int nr_idle_states, i; 1322 int rc = 0; 1323 u32 *temp_u32; 1324 u64 *temp_u64; 1325 const char **temp_string; 1326 1327 np = of_find_node_by_path("/ibm,opal/power-mgt"); 1328 if (!np) { 1329 pr_warn("opal: PowerMgmt Node not found\n"); 1330 return -ENODEV; 1331 } 1332 nr_idle_states = of_property_count_u32_elems(np, 1333 "ibm,cpu-idle-state-flags"); 1334 1335 pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states), 1336 GFP_KERNEL); 1337 temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL); 1338 temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL); 1339 temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL); 1340 1341 if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) { 1342 pr_err("Could not allocate memory for dt parsing\n"); 1343 rc = -ENOMEM; 1344 goto out; 1345 } 1346 1347 /* Read flags */ 1348 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags", 1349 temp_u32, nr_idle_states)) { 1350 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n"); 1351 rc = -EINVAL; 1352 goto out; 1353 } 1354 for (i = 0; i < nr_idle_states; i++) 1355 pnv_idle_states[i].flags = temp_u32[i]; 1356 1357 /* Read latencies */ 1358 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns", 1359 temp_u32, nr_idle_states)) { 1360 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n"); 1361 rc = -EINVAL; 1362 goto out; 1363 } 1364 for (i = 0; i < nr_idle_states; i++) 1365 pnv_idle_states[i].latency_ns = temp_u32[i]; 1366 1367 /* Read residencies */ 1368 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns", 1369 temp_u32, nr_idle_states)) { 1370 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n"); 1371 rc = -EINVAL; 1372 goto out; 1373 } 1374 for (i = 0; i < nr_idle_states; i++) 1375 pnv_idle_states[i].residency_ns = temp_u32[i]; 1376 1377 /* For power9 and later */ 1378 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 1379 /* Read pm_crtl_val */ 1380 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr", 1381 temp_u64, nr_idle_states)) { 1382 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n"); 1383 rc = -EINVAL; 1384 goto out; 1385 } 1386 for (i = 0; i < nr_idle_states; i++) 1387 pnv_idle_states[i].psscr_val = temp_u64[i]; 1388 1389 /* Read pm_crtl_mask */ 1390 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask", 1391 temp_u64, nr_idle_states)) { 1392 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n"); 1393 rc = -EINVAL; 1394 goto out; 1395 } 1396 for (i = 0; i < nr_idle_states; i++) 1397 pnv_idle_states[i].psscr_mask = temp_u64[i]; 1398 } 1399 1400 /* 1401 * power8 specific properties ibm,cpu-idle-state-pmicr-mask and 1402 * ibm,cpu-idle-state-pmicr-val were never used and there is no 1403 * plan to use it in near future. Hence, not parsing these properties 1404 */ 1405 1406 if (of_property_read_string_array(np, "ibm,cpu-idle-state-names", 1407 temp_string, nr_idle_states) < 0) { 1408 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n"); 1409 rc = -EINVAL; 1410 goto out; 1411 } 1412 for (i = 0; i < nr_idle_states; i++) 1413 strlcpy(pnv_idle_states[i].name, temp_string[i], 1414 PNV_IDLE_NAME_LEN); 1415 nr_pnv_idle_states = nr_idle_states; 1416 rc = 0; 1417 out: 1418 kfree(temp_u32); 1419 kfree(temp_u64); 1420 kfree(temp_string); 1421 return rc; 1422 } 1423 1424 static int __init pnv_init_idle_states(void) 1425 { 1426 int cpu; 1427 int rc = 0; 1428 1429 /* Set up PACA fields */ 1430 for_each_present_cpu(cpu) { 1431 struct paca_struct *p = paca_ptrs[cpu]; 1432 1433 p->idle_state = 0; 1434 if (cpu == cpu_first_thread_sibling(cpu)) 1435 p->idle_state = (1 << threads_per_core) - 1; 1436 1437 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 1438 /* P7/P8 nap */ 1439 p->thread_idle_state = PNV_THREAD_RUNNING; 1440 } else if (pvr_version_is(PVR_POWER9)) { 1441 /* P9 stop workarounds */ 1442 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1443 p->requested_psscr = 0; 1444 atomic_set(&p->dont_stop, 0); 1445 #endif 1446 } 1447 } 1448 1449 /* In case we error out nr_pnv_idle_states will be zero */ 1450 nr_pnv_idle_states = 0; 1451 supported_cpuidle_states = 0; 1452 1453 if (cpuidle_disable != IDLE_NO_OVERRIDE) 1454 goto out; 1455 rc = pnv_parse_cpuidle_dt(); 1456 if (rc) 1457 return rc; 1458 pnv_probe_idle_states(); 1459 1460 if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 1461 if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { 1462 power7_fastsleep_workaround_entry = false; 1463 power7_fastsleep_workaround_exit = false; 1464 } else { 1465 /* 1466 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that 1467 * workaround is needed to use fastsleep. Provide sysfs 1468 * control to choose how this workaround has to be 1469 * applied. 1470 */ 1471 device_create_file(cpu_subsys.dev_root, 1472 &dev_attr_fastsleep_workaround_applyonce); 1473 } 1474 1475 update_subcore_sibling_mask(); 1476 1477 if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) { 1478 ppc_md.power_save = power7_idle; 1479 power7_offline_type = PNV_THREAD_NAP; 1480 } 1481 1482 if ((supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) && 1483 (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)) 1484 power7_offline_type = PNV_THREAD_WINKLE; 1485 else if ((supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED) || 1486 (supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) 1487 power7_offline_type = PNV_THREAD_SLEEP; 1488 } 1489 1490 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) { 1491 if (pnv_save_sprs_for_deep_states()) 1492 pnv_disable_deep_states(); 1493 } 1494 1495 out: 1496 return 0; 1497 } 1498 machine_subsys_initcall(powernv, pnv_init_idle_states); 1499