1/* 2 * This file contains idle entry/exit functions for POWER7, 3 * POWER8 and POWER9 CPUs. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 */ 10 11#include <linux/threads.h> 12#include <asm/processor.h> 13#include <asm/page.h> 14#include <asm/cputable.h> 15#include <asm/thread_info.h> 16#include <asm/ppc_asm.h> 17#include <asm/asm-offsets.h> 18#include <asm/ppc-opcode.h> 19#include <asm/hw_irq.h> 20#include <asm/kvm_book3s_asm.h> 21#include <asm/opal.h> 22#include <asm/cpuidle.h> 23#include <asm/exception-64s.h> 24#include <asm/book3s/64/mmu-hash.h> 25#include <asm/mmu.h> 26 27#undef DEBUG 28 29/* 30 * Use unused space in the interrupt stack to save and restore 31 * registers for winkle support. 32 */ 33#define _MMCR0 GPR0 34#define _SDR1 GPR3 35#define _PTCR GPR3 36#define _RPR GPR4 37#define _SPURR GPR5 38#define _PURR GPR6 39#define _TSCR GPR7 40#define _DSCR GPR8 41#define _AMOR GPR9 42#define _WORT GPR10 43#define _WORC GPR11 44#define _LPCR GPR12 45 46#define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16 47 48 .text 49 50/* 51 * Used by threads before entering deep idle states. Saves SPRs 52 * in interrupt stack frame 53 */ 54save_sprs_to_stack: 55 /* 56 * Note all register i.e per-core, per-subcore or per-thread is saved 57 * here since any thread in the core might wake up first 58 */ 59BEGIN_FTR_SECTION 60 /* 61 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring 62 * SDR1 here 63 */ 64 mfspr r3,SPRN_PTCR 65 std r3,_PTCR(r1) 66 mfspr r3,SPRN_LPCR 67 std r3,_LPCR(r1) 68FTR_SECTION_ELSE 69 mfspr r3,SPRN_SDR1 70 std r3,_SDR1(r1) 71ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 72 mfspr r3,SPRN_RPR 73 std r3,_RPR(r1) 74 mfspr r3,SPRN_SPURR 75 std r3,_SPURR(r1) 76 mfspr r3,SPRN_PURR 77 std r3,_PURR(r1) 78 mfspr r3,SPRN_TSCR 79 std r3,_TSCR(r1) 80 mfspr r3,SPRN_DSCR 81 std r3,_DSCR(r1) 82 mfspr r3,SPRN_AMOR 83 std r3,_AMOR(r1) 84 mfspr r3,SPRN_WORT 85 std r3,_WORT(r1) 86 mfspr r3,SPRN_WORC 87 std r3,_WORC(r1) 88/* 89 * On POWER9, there are idle states such as stop4, invoked via cpuidle, 90 * that lose hypervisor resources. In such cases, we need to save 91 * additional SPRs before entering those idle states so that they can 92 * be restored to their older values on wakeup from the idle state. 93 * 94 * On POWER8, the only such deep idle state is winkle which is used 95 * only in the context of CPU-Hotplug, where these additional SPRs are 96 * reinitiazed to a sane value. Hence there is no need to save/restore 97 * these SPRs. 98 */ 99BEGIN_FTR_SECTION 100 blr 101END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 102 103power9_save_additional_sprs: 104 mfspr r3, SPRN_PID 105 mfspr r4, SPRN_LDBAR 106 std r3, STOP_PID(r13) 107 std r4, STOP_LDBAR(r13) 108 109 mfspr r3, SPRN_FSCR 110 mfspr r4, SPRN_HFSCR 111 std r3, STOP_FSCR(r13) 112 std r4, STOP_HFSCR(r13) 113 114 mfspr r3, SPRN_MMCRA 115 mfspr r4, SPRN_MMCR1 116 std r3, STOP_MMCRA(r13) 117 std r4, STOP_MMCR1(r13) 118 119 mfspr r3, SPRN_MMCR2 120 std r3, STOP_MMCR2(r13) 121 blr 122 123power9_restore_additional_sprs: 124 ld r3,_LPCR(r1) 125 ld r4, STOP_PID(r13) 126 mtspr SPRN_LPCR,r3 127 mtspr SPRN_PID, r4 128 129 ld r3, STOP_LDBAR(r13) 130 ld r4, STOP_FSCR(r13) 131 mtspr SPRN_LDBAR, r3 132 mtspr SPRN_FSCR, r4 133 134 ld r3, STOP_HFSCR(r13) 135 ld r4, STOP_MMCRA(r13) 136 mtspr SPRN_HFSCR, r3 137 mtspr SPRN_MMCRA, r4 138 /* We have already restored PACA_MMCR0 */ 139 ld r3, STOP_MMCR1(r13) 140 ld r4, STOP_MMCR2(r13) 141 mtspr SPRN_MMCR1, r3 142 mtspr SPRN_MMCR2, r4 143 blr 144 145/* 146 * Used by threads when the lock bit of core_idle_state is set. 147 * Threads will spin in HMT_LOW until the lock bit is cleared. 148 * r14 - pointer to core_idle_state 149 * r15 - used to load contents of core_idle_state 150 * r9 - used as a temporary variable 151 */ 152 153core_idle_lock_held: 154 HMT_LOW 1553: lwz r15,0(r14) 156 andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 157 bne 3b 158 HMT_MEDIUM 159 lwarx r15,0,r14 160 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 161 bne- core_idle_lock_held 162 blr 163 164/* 165 * Pass requested state in r3: 166 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 167 * - Requested PSSCR value in POWER9 168 * 169 * Address of idle handler to branch to in realmode in r4 170 */ 171pnv_powersave_common: 172 /* Use r3 to pass state nap/sleep/winkle */ 173 /* NAP is a state loss, we create a regs frame on the 174 * stack, fill it up with the state we care about and 175 * stick a pointer to it in PACAR1. We really only 176 * need to save PC, some CR bits and the NV GPRs, 177 * but for now an interrupt frame will do. 178 */ 179 mtctr r4 180 181 mflr r0 182 std r0,16(r1) 183 stdu r1,-INT_FRAME_SIZE(r1) 184 std r0,_LINK(r1) 185 std r0,_NIP(r1) 186 187 /* We haven't lost state ... yet */ 188 li r0,0 189 stb r0,PACA_NAPSTATELOST(r13) 190 191 /* Continue saving state */ 192 SAVE_GPR(2, r1) 193 SAVE_NVGPRS(r1) 194 mfcr r5 195 std r5,_CCR(r1) 196 std r1,PACAR1(r13) 197 198BEGIN_FTR_SECTION 199 /* 200 * POWER9 does not require real mode to stop, and presently does not 201 * set hwthread_state for KVM (threads don't share MMU context), so 202 * we can remain in virtual mode for this. 203 */ 204 bctr 205END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 206 /* 207 * POWER8 208 * Go to real mode to do the nap, as required by the architecture. 209 * Also, we need to be in real mode before setting hwthread_state, 210 * because as soon as we do that, another thread can switch 211 * the MMU context to the guest. 212 */ 213 LOAD_REG_IMMEDIATE(r7, MSR_IDLE) 214 mtmsrd r7,0 215 bctr 216 217/* 218 * This is the sequence required to execute idle instructions, as 219 * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0. 220 */ 221#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ 222 /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ 223 std r0,0(r1); \ 224 ptesync; \ 225 ld r0,0(r1); \ 226236: cmpd cr0,r0,r0; \ 227 bne 236b; \ 228 IDLE_INST; 229 230 231 .globl pnv_enter_arch207_idle_mode 232pnv_enter_arch207_idle_mode: 233#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 234 /* Tell KVM we're entering idle */ 235 li r4,KVM_HWTHREAD_IN_IDLE 236 /******************************************************/ 237 /* N O T E W E L L ! ! ! N O T E W E L L */ 238 /* The following store to HSTATE_HWTHREAD_STATE(r13) */ 239 /* MUST occur in real mode, i.e. with the MMU off, */ 240 /* and the MMU must stay off until we clear this flag */ 241 /* and test HSTATE_HWTHREAD_REQ(r13) in */ 242 /* pnv_powersave_wakeup in this file. */ 243 /* The reason is that another thread can switch the */ 244 /* MMU to a guest context whenever this flag is set */ 245 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ 246 /* that would potentially cause this thread to start */ 247 /* executing instructions from guest memory in */ 248 /* hypervisor mode, leading to a host crash or data */ 249 /* corruption, or worse. */ 250 /******************************************************/ 251 stb r4,HSTATE_HWTHREAD_STATE(r13) 252#endif 253 stb r3,PACA_THREAD_IDLE_STATE(r13) 254 cmpwi cr3,r3,PNV_THREAD_SLEEP 255 bge cr3,2f 256 IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) 257 /* No return */ 2582: 259 /* Sleep or winkle */ 260 lbz r7,PACA_THREAD_MASK(r13) 261 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 262 li r5,0 263 beq cr3,3f 264 lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h 2653: 266lwarx_loop1: 267 lwarx r15,0,r14 268 269 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 270 bnel- core_idle_lock_held 271 272 add r15,r15,r5 /* Add if winkle */ 273 andc r15,r15,r7 /* Clear thread bit */ 274 275 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS 276 277/* 278 * If cr0 = 0, then current thread is the last thread of the core entering 279 * sleep. Last thread needs to execute the hardware bug workaround code if 280 * required by the platform. 281 * Make the workaround call unconditionally here. The below branch call is 282 * patched out when the idle states are discovered if the platform does not 283 * require it. 284 */ 285.global pnv_fastsleep_workaround_at_entry 286pnv_fastsleep_workaround_at_entry: 287 beq fastsleep_workaround_at_entry 288 289 stwcx. r15,0,r14 290 bne- lwarx_loop1 291 isync 292 293common_enter: /* common code for all the threads entering sleep or winkle */ 294 bgt cr3,enter_winkle 295 IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) 296 297fastsleep_workaround_at_entry: 298 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 299 stwcx. r15,0,r14 300 bne- lwarx_loop1 301 isync 302 303 /* Fast sleep workaround */ 304 li r3,1 305 li r4,1 306 bl opal_config_cpu_idle_state 307 308 /* Unlock */ 309 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 310 lwsync 311 stw r15,0(r14) 312 b common_enter 313 314enter_winkle: 315 bl save_sprs_to_stack 316 317 IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) 318 319/* 320 * r3 - PSSCR value corresponding to the requested stop state. 321 */ 322#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 323power_enter_stop_kvm_rm: 324 /* 325 * This is currently unused because POWER9 KVM does not have to 326 * gather secondary threads into sibling mode, but the code is 327 * here in case that function is required. 328 * 329 * Tell KVM we're entering idle. 330 */ 331 li r4,KVM_HWTHREAD_IN_IDLE 332 /* DO THIS IN REAL MODE! See comment above. */ 333 stb r4,HSTATE_HWTHREAD_STATE(r13) 334#endif 335power_enter_stop: 336/* 337 * Check if we are executing the lite variant with ESL=EC=0 338 */ 339 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED 340 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ 341 bne .Lhandle_esl_ec_set 342 PPC_STOP 343 li r3,0 /* Since we didn't lose state, return 0 */ 344 345 /* 346 * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so 347 * it can determine if the wakeup reason is an HMI in 348 * CHECK_HMI_INTERRUPT. 349 * 350 * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup 351 * reason, so there is no point setting r12 to SRR1. 352 * 353 * Further, we clear r12 here, so that we don't accidentally enter the 354 * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI. 355 */ 356 li r12, 0 357 b pnv_wakeup_noloss 358 359.Lhandle_esl_ec_set: 360 /* 361 * POWER9 DD2 can incorrectly set PMAO when waking up after a 362 * state-loss idle. Saving and restoring MMCR0 over idle is a 363 * workaround. 364 */ 365 mfspr r4,SPRN_MMCR0 366 std r4,_MMCR0(r1) 367 368/* 369 * Check if the requested state is a deep idle state. 370 */ 371 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 372 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 373 cmpd r3,r4 374 bge .Lhandle_deep_stop 375 PPC_STOP /* Does not return (system reset interrupt) */ 376 377.Lhandle_deep_stop: 378/* 379 * Entering deep idle state. 380 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to 381 * stack and enter stop 382 */ 383 lbz r7,PACA_THREAD_MASK(r13) 384 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 385 386lwarx_loop_stop: 387 lwarx r15,0,r14 388 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 389 bnel- core_idle_lock_held 390 andc r15,r15,r7 /* Clear thread bit */ 391 392 stwcx. r15,0,r14 393 bne- lwarx_loop_stop 394 isync 395 396 bl save_sprs_to_stack 397 398 PPC_STOP /* Does not return (system reset interrupt) */ 399 400/* 401 * Entered with MSR[EE]=0 and no soft-masked interrupts pending. 402 * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE). 403 */ 404_GLOBAL(power7_idle_insn) 405 /* Now check if user or arch enabled NAP mode */ 406 LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode) 407 b pnv_powersave_common 408 409#define CHECK_HMI_INTERRUPT \ 410BEGIN_FTR_SECTION_NESTED(66); \ 411 rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \ 412FTR_SECTION_ELSE_NESTED(66); \ 413 rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \ 414ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ 415 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ 416 bne+ 20f; \ 417 /* Invoke opal call to handle hmi */ \ 418 ld r2,PACATOC(r13); \ 419 ld r1,PACAR1(r13); \ 420 std r3,ORIG_GPR3(r1); /* Save original r3 */ \ 421 li r3,0; /* NULL argument */ \ 422 bl hmi_exception_realmode; \ 423 nop; \ 424 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ 42520: nop; 426 427/* 428 * Entered with MSR[EE]=0 and no soft-masked interrupts pending. 429 * r3 contains desired PSSCR register value. 430 */ 431_GLOBAL(power9_idle_stop) 432 std r3, PACA_REQ_PSSCR(r13) 433 mtspr SPRN_PSSCR,r3 434 LOAD_REG_ADDR(r4,power_enter_stop) 435 b pnv_powersave_common 436 /* No return */ 437 438/* 439 * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1, 440 * HSPRG0 will be set to the HSPRG0 value of one of the 441 * threads in this core. Thus the value we have in r13 442 * may not be this thread's paca pointer. 443 * 444 * Fortunately, the TIR remains invariant. Since this thread's 445 * paca pointer is recorded in all its sibling's paca, we can 446 * correctly recover this thread's paca pointer if we 447 * know the index of this thread in the core. 448 * 449 * This index can be obtained from the TIR. 450 * 451 * i.e, thread's position in the core = TIR. 452 * If this value is i, then this thread's paca is 453 * paca->thread_sibling_pacas[i]. 454 */ 455power9_dd1_recover_paca: 456 mfspr r4, SPRN_TIR 457 /* 458 * Since each entry in thread_sibling_pacas is 8 bytes 459 * we need to left-shift by 3 bits. Thus r4 = i * 8 460 */ 461 sldi r4, r4, 3 462 /* Get &paca->thread_sibling_pacas[0] in r5 */ 463 ld r5, PACA_SIBLING_PACA_PTRS(r13) 464 /* Load paca->thread_sibling_pacas[i] into r13 */ 465 ldx r13, r4, r5 466 SET_PACA(r13) 467 /* 468 * Indicate that we have lost NVGPR state 469 * which needs to be restored from the stack. 470 */ 471 li r3, 1 472 stb r3,PACA_NAPSTATELOST(r13) 473 blr 474 475/* 476 * Called from machine check handler for powersave wakeups. 477 * Low level machine check processing has already been done. Now just 478 * go through the wake up path to get everything in order. 479 * 480 * r3 - The original SRR1 value. 481 * Original SRR[01] have been clobbered. 482 * MSR_RI is clear. 483 */ 484.global pnv_powersave_wakeup_mce 485pnv_powersave_wakeup_mce: 486 /* Set cr3 for pnv_powersave_wakeup */ 487 rlwinm r11,r3,47-31,30,31 488 cmpwi cr3,r11,2 489 490 /* 491 * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake 492 * reason into r12, which allows reuse of the system reset wakeup 493 * code without being mistaken for another type of wakeup. 494 */ 495 oris r12,r3,SRR1_WAKEMCE_RESVD@h 496 497 b pnv_powersave_wakeup 498 499#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 500kvm_start_guest_check: 501 li r0,KVM_HWTHREAD_IN_KERNEL 502 stb r0,HSTATE_HWTHREAD_STATE(r13) 503 /* Order setting hwthread_state vs. testing hwthread_req */ 504 sync 505 lbz r0,HSTATE_HWTHREAD_REQ(r13) 506 cmpwi r0,0 507 beqlr 508 b kvm_start_guest 509#endif 510 511/* 512 * Called from reset vector for powersave wakeups. 513 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 514 * r12 - SRR1 515 */ 516.global pnv_powersave_wakeup 517pnv_powersave_wakeup: 518 ld r2, PACATOC(r13) 519 520BEGIN_FTR_SECTION 521BEGIN_FTR_SECTION_NESTED(70) 522 bl power9_dd1_recover_paca 523END_FTR_SECTION_NESTED_IFSET(CPU_FTR_POWER9_DD1, 70) 524 bl pnv_restore_hyp_resource_arch300 525FTR_SECTION_ELSE 526 bl pnv_restore_hyp_resource_arch207 527ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 528 529 li r0,PNV_THREAD_RUNNING 530 stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */ 531 532 mr r3,r12 533 534#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 535BEGIN_FTR_SECTION 536 bl kvm_start_guest_check 537END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 538#endif 539 540 /* Return SRR1 from power7_nap() */ 541 blt cr3,pnv_wakeup_noloss 542 b pnv_wakeup_loss 543 544/* 545 * Check whether we have woken up with hypervisor state loss. 546 * If yes, restore hypervisor state and return back to link. 547 * 548 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 549 */ 550pnv_restore_hyp_resource_arch300: 551 /* 552 * Workaround for POWER9, if we lost resources, the ERAT 553 * might have been mixed up and needs flushing. We also need 554 * to reload MMCR0 (see comment above). We also need to set 555 * then clear bit 60 in MMCRA to ensure the PMU starts running. 556 */ 557 blt cr3,1f 558 PPC_INVALIDATE_ERAT 559 ld r1,PACAR1(r13) 560 mfspr r4,SPRN_MMCRA 561 ori r4,r4,(1 << (63-60)) 562 mtspr SPRN_MMCRA,r4 563 xori r4,r4,(1 << (63-60)) 564 mtspr SPRN_MMCRA,r4 565 ld r4,_MMCR0(r1) 566 mtspr SPRN_MMCR0,r4 5671: 568 /* 569 * POWER ISA 3. Use PSSCR to determine if we 570 * are waking up from deep idle state 571 */ 572 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 573 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 574 575BEGIN_FTR_SECTION_NESTED(71) 576 /* 577 * Assume that we are waking up from the state 578 * same as the Requested Level (RL) in the PSSCR 579 * which are Bits 60-63 580 */ 581 ld r5,PACA_REQ_PSSCR(r13) 582 rldicl r5,r5,0,60 583FTR_SECTION_ELSE_NESTED(71) 584 /* 585 * 0-3 bits correspond to Power-Saving Level Status 586 * which indicates the idle state we are waking up from 587 */ 588 mfspr r5, SPRN_PSSCR 589 rldicl r5,r5,4,60 590ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71) 591 cmpd cr4,r5,r4 592 bge cr4,pnv_wakeup_tb_loss /* returns to caller */ 593 594 blr /* Waking up without hypervisor state loss. */ 595 596/* Same calling convention as arch300 */ 597pnv_restore_hyp_resource_arch207: 598 /* 599 * POWER ISA 2.07 or less. 600 * Check if we slept with sleep or winkle. 601 */ 602 lbz r4,PACA_THREAD_IDLE_STATE(r13) 603 cmpwi cr2,r4,PNV_THREAD_NAP 604 bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ 605 606 /* 607 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking 608 * up from nap. At this stage CR3 shouldn't contains 'gt' since that 609 * indicates we are waking with hypervisor state loss from nap. 610 */ 611 bgt cr3,. 612 613 blr /* Waking up without hypervisor state loss */ 614 615/* 616 * Called if waking up from idle state which can cause either partial or 617 * complete hyp state loss. 618 * In POWER8, called if waking up from fastsleep or winkle 619 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state 620 * 621 * r13 - PACA 622 * cr3 - gt if waking up with partial/complete hypervisor state loss 623 * 624 * If ISA300: 625 * cr4 - gt or eq if waking up from complete hypervisor state loss. 626 * 627 * If ISA207: 628 * r4 - PACA_THREAD_IDLE_STATE 629 */ 630pnv_wakeup_tb_loss: 631 ld r1,PACAR1(r13) 632 /* 633 * Before entering any idle state, the NVGPRs are saved in the stack. 634 * If there was a state loss, or PACA_NAPSTATELOST was set, then the 635 * NVGPRs are restored. If we are here, it is likely that state is lost, 636 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach 637 * here are the same as the test to restore NVGPRS: 638 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300, 639 * and SRR1 test for restoring NVGPRs. 640 * 641 * We are about to clobber NVGPRs now, so set NAPSTATELOST to 642 * guarantee they will always be restored. This might be tightened 643 * with careful reading of specs (particularly for ISA300) but this 644 * is already a slow wakeup path and it's simpler to be safe. 645 */ 646 li r0,1 647 stb r0,PACA_NAPSTATELOST(r13) 648 649 /* 650 * 651 * Save SRR1 and LR in NVGPRs as they might be clobbered in 652 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 653 * to determine the wakeup reason if we branch to kvm_start_guest. LR 654 * is required to return back to reset vector after hypervisor state 655 * restore is complete. 656 */ 657 mr r19,r12 658 mr r18,r4 659 mflr r17 660BEGIN_FTR_SECTION 661 CHECK_HMI_INTERRUPT 662END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 663 664 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 665 lbz r7,PACA_THREAD_MASK(r13) 666 667 /* 668 * Take the core lock to synchronize against other threads. 669 * 670 * Lock bit is set in one of the 2 cases- 671 * a. In the sleep/winkle enter path, the last thread is executing 672 * fastsleep workaround code. 673 * b. In the wake up path, another thread is executing fastsleep 674 * workaround undo code or resyncing timebase or restoring context 675 * In either case loop until the lock bit is cleared. 676 */ 6771: 678 lwarx r15,0,r14 679 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 680 bnel- core_idle_lock_held 681 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 682 stwcx. r15,0,r14 683 bne- 1b 684 isync 685 686 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS 687 cmpwi cr2,r9,0 688 689 /* 690 * At this stage 691 * cr2 - eq if first thread to wakeup in core 692 * cr3- gt if waking up with partial/complete hypervisor state loss 693 * ISA300: 694 * cr4 - gt or eq if waking up from complete hypervisor state loss. 695 */ 696 697BEGIN_FTR_SECTION 698 /* 699 * Were we in winkle? 700 * If yes, check if all threads were in winkle, decrement our 701 * winkle count, set all thread winkle bits if all were in winkle. 702 * Check if our thread has a winkle bit set, and set cr4 accordingly 703 * (to match ISA300, above). Pseudo-code for core idle state 704 * transitions for ISA207 is as follows (everything happens atomically 705 * due to store conditional and/or lock bit): 706 * 707 * nap_idle() { } 708 * nap_wake() { } 709 * 710 * sleep_idle() 711 * { 712 * core_idle_state &= ~thread_in_core 713 * } 714 * 715 * sleep_wake() 716 * { 717 * bool first_in_core, first_in_subcore; 718 * 719 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; 720 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; 721 * 722 * core_idle_state |= thread_in_core; 723 * } 724 * 725 * winkle_idle() 726 * { 727 * core_idle_state &= ~thread_in_core; 728 * core_idle_state += 1 << WINKLE_COUNT_SHIFT; 729 * } 730 * 731 * winkle_wake() 732 * { 733 * bool first_in_core, first_in_subcore, winkle_state_lost; 734 * 735 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; 736 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; 737 * 738 * core_idle_state |= thread_in_core; 739 * 740 * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT)) 741 * core_idle_state |= THREAD_WINKLE_BITS; 742 * core_idle_state -= 1 << WINKLE_COUNT_SHIFT; 743 * 744 * winkle_state_lost = core_idle_state & 745 * (thread_in_core << WINKLE_THREAD_SHIFT); 746 * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT); 747 * } 748 * 749 */ 750 cmpwi r18,PNV_THREAD_WINKLE 751 bne 2f 752 andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h 753 subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h 754 beq 2f 755 ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */ 7562: 757 /* Shift thread bit to winkle mask, then test if this thread is set, 758 * and remove it from the winkle bits */ 759 slwi r8,r7,8 760 and r8,r8,r15 761 andc r15,r15,r8 762 cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */ 763 764 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) 765 and r4,r4,r15 766 cmpwi r4,0 /* Check if first in subcore */ 767 768 or r15,r15,r7 /* Set thread bit */ 769 beq first_thread_in_subcore 770END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 771 772 or r15,r15,r7 /* Set thread bit */ 773 beq cr2,first_thread_in_core 774 775 /* Not first thread in core or subcore to wake up */ 776 b clear_lock 777 778first_thread_in_subcore: 779 /* 780 * If waking up from sleep, subcore state is not lost. Hence 781 * skip subcore state restore 782 */ 783 blt cr4,subcore_state_restored 784 785 /* Restore per-subcore state */ 786 ld r4,_SDR1(r1) 787 mtspr SPRN_SDR1,r4 788 789 ld r4,_RPR(r1) 790 mtspr SPRN_RPR,r4 791 ld r4,_AMOR(r1) 792 mtspr SPRN_AMOR,r4 793 794subcore_state_restored: 795 /* 796 * Check if the thread is also the first thread in the core. If not, 797 * skip to clear_lock. 798 */ 799 bne cr2,clear_lock 800 801first_thread_in_core: 802 803 /* 804 * First thread in the core waking up from any state which can cause 805 * partial or complete hypervisor state loss. It needs to 806 * call the fastsleep workaround code if the platform requires it. 807 * Call it unconditionally here. The below branch instruction will 808 * be patched out if the platform does not have fastsleep or does not 809 * require the workaround. Patching will be performed during the 810 * discovery of idle-states. 811 */ 812.global pnv_fastsleep_workaround_at_exit 813pnv_fastsleep_workaround_at_exit: 814 b fastsleep_workaround_at_exit 815 816timebase_resync: 817 /* 818 * Use cr3 which indicates that we are waking up with atleast partial 819 * hypervisor state loss to determine if TIMEBASE RESYNC is needed. 820 */ 821 ble cr3,.Ltb_resynced 822 /* Time base re-sync */ 823 bl opal_resync_timebase; 824 /* 825 * If waking up from sleep (POWER8), per core state 826 * is not lost, skip to clear_lock. 827 */ 828.Ltb_resynced: 829 blt cr4,clear_lock 830 831 /* 832 * First thread in the core to wake up and its waking up with 833 * complete hypervisor state loss. Restore per core hypervisor 834 * state. 835 */ 836BEGIN_FTR_SECTION 837 ld r4,_PTCR(r1) 838 mtspr SPRN_PTCR,r4 839 ld r4,_RPR(r1) 840 mtspr SPRN_RPR,r4 841END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 842 843 ld r4,_TSCR(r1) 844 mtspr SPRN_TSCR,r4 845 ld r4,_WORC(r1) 846 mtspr SPRN_WORC,r4 847 848clear_lock: 849 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 850 lwsync 851 stw r15,0(r14) 852 853common_exit: 854 /* 855 * Common to all threads. 856 * 857 * If waking up from sleep, hypervisor state is not lost. Hence 858 * skip hypervisor state restore. 859 */ 860 blt cr4,hypervisor_state_restored 861 862 /* Waking up from winkle */ 863 864BEGIN_MMU_FTR_SECTION 865 b no_segments 866END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 867 /* Restore SLB from PACA */ 868 ld r8,PACA_SLBSHADOWPTR(r13) 869 870 .rept SLB_NUM_BOLTED 871 li r3, SLBSHADOW_SAVEAREA 872 LDX_BE r5, r8, r3 873 addi r3, r3, 8 874 LDX_BE r6, r8, r3 875 andis. r7,r5,SLB_ESID_V@h 876 beq 1f 877 slbmte r6,r5 8781: addi r8,r8,16 879 .endr 880no_segments: 881 882 /* Restore per thread state */ 883 884 ld r4,_SPURR(r1) 885 mtspr SPRN_SPURR,r4 886 ld r4,_PURR(r1) 887 mtspr SPRN_PURR,r4 888 ld r4,_DSCR(r1) 889 mtspr SPRN_DSCR,r4 890 ld r4,_WORT(r1) 891 mtspr SPRN_WORT,r4 892 893 /* Call cur_cpu_spec->cpu_restore() */ 894 LOAD_REG_ADDR(r4, cur_cpu_spec) 895 ld r4,0(r4) 896 ld r12,CPU_SPEC_RESTORE(r4) 897#ifdef PPC64_ELF_ABI_v1 898 ld r12,0(r12) 899#endif 900 mtctr r12 901 bctrl 902 903/* 904 * On POWER9, we can come here on wakeup from a cpuidle stop state. 905 * Hence restore the additional SPRs to the saved value. 906 * 907 * On POWER8, we come here only on winkle. Since winkle is used 908 * only in the case of CPU-Hotplug, we don't need to restore 909 * the additional SPRs. 910 */ 911BEGIN_FTR_SECTION 912 bl power9_restore_additional_sprs 913END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 914hypervisor_state_restored: 915 916 mr r12,r19 917 mtlr r17 918 blr /* return to pnv_powersave_wakeup */ 919 920fastsleep_workaround_at_exit: 921 li r3,1 922 li r4,0 923 bl opal_config_cpu_idle_state 924 b timebase_resync 925 926/* 927 * R3 here contains the value that will be returned to the caller 928 * of power7_nap. 929 * R12 contains SRR1 for CHECK_HMI_INTERRUPT. 930 */ 931.global pnv_wakeup_loss 932pnv_wakeup_loss: 933 ld r1,PACAR1(r13) 934BEGIN_FTR_SECTION 935 CHECK_HMI_INTERRUPT 936END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 937 REST_NVGPRS(r1) 938 REST_GPR(2, r1) 939 ld r4,PACAKMSR(r13) 940 ld r5,_LINK(r1) 941 ld r6,_CCR(r1) 942 addi r1,r1,INT_FRAME_SIZE 943 mtlr r5 944 mtcr r6 945 mtmsrd r4 946 blr 947 948/* 949 * R3 here contains the value that will be returned to the caller 950 * of power7_nap. 951 * R12 contains SRR1 for CHECK_HMI_INTERRUPT. 952 */ 953pnv_wakeup_noloss: 954 lbz r0,PACA_NAPSTATELOST(r13) 955 cmpwi r0,0 956 bne pnv_wakeup_loss 957 ld r1,PACAR1(r13) 958BEGIN_FTR_SECTION 959 CHECK_HMI_INTERRUPT 960END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 961 ld r4,PACAKMSR(r13) 962 ld r5,_NIP(r1) 963 ld r6,_CCR(r1) 964 addi r1,r1,INT_FRAME_SIZE 965 mtlr r5 966 mtcr r6 967 mtmsrd r4 968 blr 969