1/* 2 * This file contains idle entry/exit functions for POWER7, 3 * POWER8 and POWER9 CPUs. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 */ 10 11#include <linux/threads.h> 12#include <asm/processor.h> 13#include <asm/page.h> 14#include <asm/cputable.h> 15#include <asm/thread_info.h> 16#include <asm/ppc_asm.h> 17#include <asm/asm-offsets.h> 18#include <asm/ppc-opcode.h> 19#include <asm/hw_irq.h> 20#include <asm/kvm_book3s_asm.h> 21#include <asm/opal.h> 22#include <asm/cpuidle.h> 23#include <asm/exception-64s.h> 24#include <asm/book3s/64/mmu-hash.h> 25#include <asm/mmu.h> 26 27#undef DEBUG 28 29/* 30 * Use unused space in the interrupt stack to save and restore 31 * registers for winkle support. 32 */ 33#define _MMCR0 GPR0 34#define _SDR1 GPR3 35#define _PTCR GPR3 36#define _RPR GPR4 37#define _SPURR GPR5 38#define _PURR GPR6 39#define _TSCR GPR7 40#define _DSCR GPR8 41#define _AMOR GPR9 42#define _WORT GPR10 43#define _WORC GPR11 44#define _LPCR GPR12 45 46#define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16 47 48 .text 49 50/* 51 * Used by threads before entering deep idle states. Saves SPRs 52 * in interrupt stack frame 53 */ 54save_sprs_to_stack: 55 /* 56 * Note all register i.e per-core, per-subcore or per-thread is saved 57 * here since any thread in the core might wake up first 58 */ 59BEGIN_FTR_SECTION 60 /* 61 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring 62 * SDR1 here 63 */ 64 mfspr r3,SPRN_PTCR 65 std r3,_PTCR(r1) 66 mfspr r3,SPRN_LPCR 67 std r3,_LPCR(r1) 68FTR_SECTION_ELSE 69 mfspr r3,SPRN_SDR1 70 std r3,_SDR1(r1) 71ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 72 mfspr r3,SPRN_RPR 73 std r3,_RPR(r1) 74 mfspr r3,SPRN_SPURR 75 std r3,_SPURR(r1) 76 mfspr r3,SPRN_PURR 77 std r3,_PURR(r1) 78 mfspr r3,SPRN_TSCR 79 std r3,_TSCR(r1) 80 mfspr r3,SPRN_DSCR 81 std r3,_DSCR(r1) 82 mfspr r3,SPRN_AMOR 83 std r3,_AMOR(r1) 84 mfspr r3,SPRN_WORT 85 std r3,_WORT(r1) 86 mfspr r3,SPRN_WORC 87 std r3,_WORC(r1) 88 89 blr 90 91/* 92 * Used by threads when the lock bit of core_idle_state is set. 93 * Threads will spin in HMT_LOW until the lock bit is cleared. 94 * r14 - pointer to core_idle_state 95 * r15 - used to load contents of core_idle_state 96 * r9 - used as a temporary variable 97 */ 98 99core_idle_lock_held: 100 HMT_LOW 1013: lwz r15,0(r14) 102 andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 103 bne 3b 104 HMT_MEDIUM 105 lwarx r15,0,r14 106 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 107 bne- core_idle_lock_held 108 blr 109 110/* 111 * Pass requested state in r3: 112 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 113 * - Requested PSSCR value in POWER9 114 * 115 * Address of idle handler to branch to in realmode in r4 116 */ 117pnv_powersave_common: 118 /* Use r3 to pass state nap/sleep/winkle */ 119 /* NAP is a state loss, we create a regs frame on the 120 * stack, fill it up with the state we care about and 121 * stick a pointer to it in PACAR1. We really only 122 * need to save PC, some CR bits and the NV GPRs, 123 * but for now an interrupt frame will do. 124 */ 125 mtctr r4 126 127 mflr r0 128 std r0,16(r1) 129 stdu r1,-INT_FRAME_SIZE(r1) 130 std r0,_LINK(r1) 131 std r0,_NIP(r1) 132 133 /* We haven't lost state ... yet */ 134 li r0,0 135 stb r0,PACA_NAPSTATELOST(r13) 136 137 /* Continue saving state */ 138 SAVE_GPR(2, r1) 139 SAVE_NVGPRS(r1) 140 mfcr r5 141 std r5,_CCR(r1) 142 std r1,PACAR1(r13) 143 144 /* 145 * Go to real mode to do the nap, as required by the architecture. 146 * Also, we need to be in real mode before setting hwthread_state, 147 * because as soon as we do that, another thread can switch 148 * the MMU context to the guest. 149 */ 150 LOAD_REG_IMMEDIATE(r7, MSR_IDLE) 151 mtmsrd r7,0 152 bctr 153 154 .globl pnv_enter_arch207_idle_mode 155pnv_enter_arch207_idle_mode: 156#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 157 /* Tell KVM we're entering idle */ 158 li r4,KVM_HWTHREAD_IN_IDLE 159 /******************************************************/ 160 /* N O T E W E L L ! ! ! N O T E W E L L */ 161 /* The following store to HSTATE_HWTHREAD_STATE(r13) */ 162 /* MUST occur in real mode, i.e. with the MMU off, */ 163 /* and the MMU must stay off until we clear this flag */ 164 /* and test HSTATE_HWTHREAD_REQ(r13) in */ 165 /* pnv_powersave_wakeup in this file. */ 166 /* The reason is that another thread can switch the */ 167 /* MMU to a guest context whenever this flag is set */ 168 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ 169 /* that would potentially cause this thread to start */ 170 /* executing instructions from guest memory in */ 171 /* hypervisor mode, leading to a host crash or data */ 172 /* corruption, or worse. */ 173 /******************************************************/ 174 stb r4,HSTATE_HWTHREAD_STATE(r13) 175#endif 176 stb r3,PACA_THREAD_IDLE_STATE(r13) 177 cmpwi cr3,r3,PNV_THREAD_SLEEP 178 bge cr3,2f 179 IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) 180 /* No return */ 1812: 182 /* Sleep or winkle */ 183 lbz r7,PACA_THREAD_MASK(r13) 184 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 185 li r5,0 186 beq cr3,3f 187 lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h 1883: 189lwarx_loop1: 190 lwarx r15,0,r14 191 192 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 193 bnel- core_idle_lock_held 194 195 add r15,r15,r5 /* Add if winkle */ 196 andc r15,r15,r7 /* Clear thread bit */ 197 198 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS 199 200/* 201 * If cr0 = 0, then current thread is the last thread of the core entering 202 * sleep. Last thread needs to execute the hardware bug workaround code if 203 * required by the platform. 204 * Make the workaround call unconditionally here. The below branch call is 205 * patched out when the idle states are discovered if the platform does not 206 * require it. 207 */ 208.global pnv_fastsleep_workaround_at_entry 209pnv_fastsleep_workaround_at_entry: 210 beq fastsleep_workaround_at_entry 211 212 stwcx. r15,0,r14 213 bne- lwarx_loop1 214 isync 215 216common_enter: /* common code for all the threads entering sleep or winkle */ 217 bgt cr3,enter_winkle 218 IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) 219 220fastsleep_workaround_at_entry: 221 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 222 stwcx. r15,0,r14 223 bne- lwarx_loop1 224 isync 225 226 /* Fast sleep workaround */ 227 li r3,1 228 li r4,1 229 bl opal_config_cpu_idle_state 230 231 /* Unlock */ 232 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 233 lwsync 234 stw r15,0(r14) 235 b common_enter 236 237enter_winkle: 238 bl save_sprs_to_stack 239 240 IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) 241 242/* 243 * r3 - PSSCR value corresponding to the requested stop state. 244 */ 245power_enter_stop: 246#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 247 /* Tell KVM we're entering idle */ 248 li r4,KVM_HWTHREAD_IN_IDLE 249 /* DO THIS IN REAL MODE! See comment above. */ 250 stb r4,HSTATE_HWTHREAD_STATE(r13) 251#endif 252/* 253 * Check if we are executing the lite variant with ESL=EC=0 254 */ 255 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED 256 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ 257 bne .Lhandle_esl_ec_set 258 IDLE_STATE_ENTER_SEQ(PPC_STOP) 259 li r3,0 /* Since we didn't lose state, return 0 */ 260 261 /* 262 * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so 263 * it can determine if the wakeup reason is an HMI in 264 * CHECK_HMI_INTERRUPT. 265 * 266 * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup 267 * reason, so there is no point setting r12 to SRR1. 268 * 269 * Further, we clear r12 here, so that we don't accidentally enter the 270 * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI. 271 */ 272 li r12, 0 273 b pnv_wakeup_noloss 274 275.Lhandle_esl_ec_set: 276 /* 277 * POWER9 DD2 can incorrectly set PMAO when waking up after a 278 * state-loss idle. Saving and restoring MMCR0 over idle is a 279 * workaround. 280 */ 281 mfspr r4,SPRN_MMCR0 282 std r4,_MMCR0(r1) 283 284/* 285 * Check if the requested state is a deep idle state. 286 */ 287 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 288 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 289 cmpd r3,r4 290 bge .Lhandle_deep_stop 291 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) 292.Lhandle_deep_stop: 293/* 294 * Entering deep idle state. 295 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to 296 * stack and enter stop 297 */ 298 lbz r7,PACA_THREAD_MASK(r13) 299 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 300 301lwarx_loop_stop: 302 lwarx r15,0,r14 303 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 304 bnel- core_idle_lock_held 305 andc r15,r15,r7 /* Clear thread bit */ 306 307 stwcx. r15,0,r14 308 bne- lwarx_loop_stop 309 isync 310 311 bl save_sprs_to_stack 312 313 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) 314 315/* 316 * Entered with MSR[EE]=0 and no soft-masked interrupts pending. 317 * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE). 318 */ 319_GLOBAL(power7_idle_insn) 320 /* Now check if user or arch enabled NAP mode */ 321 LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode) 322 b pnv_powersave_common 323 324#define CHECK_HMI_INTERRUPT \ 325BEGIN_FTR_SECTION_NESTED(66); \ 326 rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \ 327FTR_SECTION_ELSE_NESTED(66); \ 328 rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \ 329ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ 330 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ 331 bne+ 20f; \ 332 /* Invoke opal call to handle hmi */ \ 333 ld r2,PACATOC(r13); \ 334 ld r1,PACAR1(r13); \ 335 std r3,ORIG_GPR3(r1); /* Save original r3 */ \ 336 li r3,0; /* NULL argument */ \ 337 bl hmi_exception_realmode; \ 338 nop; \ 339 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ 34020: nop; 341 342/* 343 * Entered with MSR[EE]=0 and no soft-masked interrupts pending. 344 * r3 contains desired PSSCR register value. 345 */ 346_GLOBAL(power9_idle_stop) 347 std r3, PACA_REQ_PSSCR(r13) 348 mtspr SPRN_PSSCR,r3 349 LOAD_REG_ADDR(r4,power_enter_stop) 350 b pnv_powersave_common 351 /* No return */ 352 353/* 354 * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1, 355 * HSPRG0 will be set to the HSPRG0 value of one of the 356 * threads in this core. Thus the value we have in r13 357 * may not be this thread's paca pointer. 358 * 359 * Fortunately, the TIR remains invariant. Since this thread's 360 * paca pointer is recorded in all its sibling's paca, we can 361 * correctly recover this thread's paca pointer if we 362 * know the index of this thread in the core. 363 * 364 * This index can be obtained from the TIR. 365 * 366 * i.e, thread's position in the core = TIR. 367 * If this value is i, then this thread's paca is 368 * paca->thread_sibling_pacas[i]. 369 */ 370power9_dd1_recover_paca: 371 mfspr r4, SPRN_TIR 372 /* 373 * Since each entry in thread_sibling_pacas is 8 bytes 374 * we need to left-shift by 3 bits. Thus r4 = i * 8 375 */ 376 sldi r4, r4, 3 377 /* Get &paca->thread_sibling_pacas[0] in r5 */ 378 ld r5, PACA_SIBLING_PACA_PTRS(r13) 379 /* Load paca->thread_sibling_pacas[i] into r13 */ 380 ldx r13, r4, r5 381 SET_PACA(r13) 382 /* 383 * Indicate that we have lost NVGPR state 384 * which needs to be restored from the stack. 385 */ 386 li r3, 1 387 stb r3,PACA_NAPSTATELOST(r13) 388 blr 389 390/* 391 * Called from machine check handler for powersave wakeups. 392 * Low level machine check processing has already been done. Now just 393 * go through the wake up path to get everything in order. 394 * 395 * r3 - The original SRR1 value. 396 * Original SRR[01] have been clobbered. 397 * MSR_RI is clear. 398 */ 399.global pnv_powersave_wakeup_mce 400pnv_powersave_wakeup_mce: 401 /* Set cr3 for pnv_powersave_wakeup */ 402 rlwinm r11,r3,47-31,30,31 403 cmpwi cr3,r11,2 404 405 /* 406 * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake 407 * reason into r12, which allows reuse of the system reset wakeup 408 * code without being mistaken for another type of wakeup. 409 */ 410 oris r12,r3,SRR1_WAKEMCE_RESVD@h 411 412 b pnv_powersave_wakeup 413 414/* 415 * Called from reset vector for powersave wakeups. 416 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 417 * r12 - SRR1 418 */ 419.global pnv_powersave_wakeup 420pnv_powersave_wakeup: 421 ld r2, PACATOC(r13) 422 423BEGIN_FTR_SECTION 424BEGIN_FTR_SECTION_NESTED(70) 425 bl power9_dd1_recover_paca 426END_FTR_SECTION_NESTED_IFSET(CPU_FTR_POWER9_DD1, 70) 427 bl pnv_restore_hyp_resource_arch300 428FTR_SECTION_ELSE 429 bl pnv_restore_hyp_resource_arch207 430ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 431 432 li r0,PNV_THREAD_RUNNING 433 stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */ 434 435 mr r3,r12 436 437#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 438 li r0,KVM_HWTHREAD_IN_KERNEL 439 stb r0,HSTATE_HWTHREAD_STATE(r13) 440 /* Order setting hwthread_state vs. testing hwthread_req */ 441 sync 442 lbz r0,HSTATE_HWTHREAD_REQ(r13) 443 cmpwi r0,0 444 beq 1f 445 b kvm_start_guest 4461: 447#endif 448 449 /* Return SRR1 from power7_nap() */ 450 blt cr3,pnv_wakeup_noloss 451 b pnv_wakeup_loss 452 453/* 454 * Check whether we have woken up with hypervisor state loss. 455 * If yes, restore hypervisor state and return back to link. 456 * 457 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 458 */ 459pnv_restore_hyp_resource_arch300: 460 /* 461 * Workaround for POWER9, if we lost resources, the ERAT 462 * might have been mixed up and needs flushing. We also need 463 * to reload MMCR0 (see comment above). 464 */ 465 blt cr3,1f 466 PPC_INVALIDATE_ERAT 467 ld r1,PACAR1(r13) 468 ld r4,_MMCR0(r1) 469 mtspr SPRN_MMCR0,r4 4701: 471 /* 472 * POWER ISA 3. Use PSSCR to determine if we 473 * are waking up from deep idle state 474 */ 475 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 476 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 477 478BEGIN_FTR_SECTION_NESTED(71) 479 /* 480 * Assume that we are waking up from the state 481 * same as the Requested Level (RL) in the PSSCR 482 * which are Bits 60-63 483 */ 484 ld r5,PACA_REQ_PSSCR(r13) 485 rldicl r5,r5,0,60 486FTR_SECTION_ELSE_NESTED(71) 487 /* 488 * 0-3 bits correspond to Power-Saving Level Status 489 * which indicates the idle state we are waking up from 490 */ 491 mfspr r5, SPRN_PSSCR 492 rldicl r5,r5,4,60 493ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71) 494 cmpd cr4,r5,r4 495 bge cr4,pnv_wakeup_tb_loss /* returns to caller */ 496 497 blr /* Waking up without hypervisor state loss. */ 498 499/* Same calling convention as arch300 */ 500pnv_restore_hyp_resource_arch207: 501 /* 502 * POWER ISA 2.07 or less. 503 * Check if we slept with sleep or winkle. 504 */ 505 lbz r4,PACA_THREAD_IDLE_STATE(r13) 506 cmpwi cr2,r4,PNV_THREAD_NAP 507 bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ 508 509 /* 510 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking 511 * up from nap. At this stage CR3 shouldn't contains 'gt' since that 512 * indicates we are waking with hypervisor state loss from nap. 513 */ 514 bgt cr3,. 515 516 blr /* Waking up without hypervisor state loss */ 517 518/* 519 * Called if waking up from idle state which can cause either partial or 520 * complete hyp state loss. 521 * In POWER8, called if waking up from fastsleep or winkle 522 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state 523 * 524 * r13 - PACA 525 * cr3 - gt if waking up with partial/complete hypervisor state loss 526 * 527 * If ISA300: 528 * cr4 - gt or eq if waking up from complete hypervisor state loss. 529 * 530 * If ISA207: 531 * r4 - PACA_THREAD_IDLE_STATE 532 */ 533pnv_wakeup_tb_loss: 534 ld r1,PACAR1(r13) 535 /* 536 * Before entering any idle state, the NVGPRs are saved in the stack. 537 * If there was a state loss, or PACA_NAPSTATELOST was set, then the 538 * NVGPRs are restored. If we are here, it is likely that state is lost, 539 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach 540 * here are the same as the test to restore NVGPRS: 541 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300, 542 * and SRR1 test for restoring NVGPRs. 543 * 544 * We are about to clobber NVGPRs now, so set NAPSTATELOST to 545 * guarantee they will always be restored. This might be tightened 546 * with careful reading of specs (particularly for ISA300) but this 547 * is already a slow wakeup path and it's simpler to be safe. 548 */ 549 li r0,1 550 stb r0,PACA_NAPSTATELOST(r13) 551 552 /* 553 * 554 * Save SRR1 and LR in NVGPRs as they might be clobbered in 555 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 556 * to determine the wakeup reason if we branch to kvm_start_guest. LR 557 * is required to return back to reset vector after hypervisor state 558 * restore is complete. 559 */ 560 mr r19,r12 561 mr r18,r4 562 mflr r17 563BEGIN_FTR_SECTION 564 CHECK_HMI_INTERRUPT 565END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 566 567 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 568 lbz r7,PACA_THREAD_MASK(r13) 569 570 /* 571 * Take the core lock to synchronize against other threads. 572 * 573 * Lock bit is set in one of the 2 cases- 574 * a. In the sleep/winkle enter path, the last thread is executing 575 * fastsleep workaround code. 576 * b. In the wake up path, another thread is executing fastsleep 577 * workaround undo code or resyncing timebase or restoring context 578 * In either case loop until the lock bit is cleared. 579 */ 5801: 581 lwarx r15,0,r14 582 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 583 bnel- core_idle_lock_held 584 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 585 stwcx. r15,0,r14 586 bne- 1b 587 isync 588 589 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS 590 cmpwi cr2,r9,0 591 592 /* 593 * At this stage 594 * cr2 - eq if first thread to wakeup in core 595 * cr3- gt if waking up with partial/complete hypervisor state loss 596 * ISA300: 597 * cr4 - gt or eq if waking up from complete hypervisor state loss. 598 */ 599 600BEGIN_FTR_SECTION 601 /* 602 * Were we in winkle? 603 * If yes, check if all threads were in winkle, decrement our 604 * winkle count, set all thread winkle bits if all were in winkle. 605 * Check if our thread has a winkle bit set, and set cr4 accordingly 606 * (to match ISA300, above). Pseudo-code for core idle state 607 * transitions for ISA207 is as follows (everything happens atomically 608 * due to store conditional and/or lock bit): 609 * 610 * nap_idle() { } 611 * nap_wake() { } 612 * 613 * sleep_idle() 614 * { 615 * core_idle_state &= ~thread_in_core 616 * } 617 * 618 * sleep_wake() 619 * { 620 * bool first_in_core, first_in_subcore; 621 * 622 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; 623 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; 624 * 625 * core_idle_state |= thread_in_core; 626 * } 627 * 628 * winkle_idle() 629 * { 630 * core_idle_state &= ~thread_in_core; 631 * core_idle_state += 1 << WINKLE_COUNT_SHIFT; 632 * } 633 * 634 * winkle_wake() 635 * { 636 * bool first_in_core, first_in_subcore, winkle_state_lost; 637 * 638 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; 639 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; 640 * 641 * core_idle_state |= thread_in_core; 642 * 643 * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT)) 644 * core_idle_state |= THREAD_WINKLE_BITS; 645 * core_idle_state -= 1 << WINKLE_COUNT_SHIFT; 646 * 647 * winkle_state_lost = core_idle_state & 648 * (thread_in_core << WINKLE_THREAD_SHIFT); 649 * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT); 650 * } 651 * 652 */ 653 cmpwi r18,PNV_THREAD_WINKLE 654 bne 2f 655 andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h 656 subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h 657 beq 2f 658 ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */ 6592: 660 /* Shift thread bit to winkle mask, then test if this thread is set, 661 * and remove it from the winkle bits */ 662 slwi r8,r7,8 663 and r8,r8,r15 664 andc r15,r15,r8 665 cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */ 666 667 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) 668 and r4,r4,r15 669 cmpwi r4,0 /* Check if first in subcore */ 670 671 or r15,r15,r7 /* Set thread bit */ 672 beq first_thread_in_subcore 673END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 674 675 or r15,r15,r7 /* Set thread bit */ 676 beq cr2,first_thread_in_core 677 678 /* Not first thread in core or subcore to wake up */ 679 b clear_lock 680 681first_thread_in_subcore: 682 /* 683 * If waking up from sleep, subcore state is not lost. Hence 684 * skip subcore state restore 685 */ 686 blt cr4,subcore_state_restored 687 688 /* Restore per-subcore state */ 689 ld r4,_SDR1(r1) 690 mtspr SPRN_SDR1,r4 691 692 ld r4,_RPR(r1) 693 mtspr SPRN_RPR,r4 694 ld r4,_AMOR(r1) 695 mtspr SPRN_AMOR,r4 696 697subcore_state_restored: 698 /* 699 * Check if the thread is also the first thread in the core. If not, 700 * skip to clear_lock. 701 */ 702 bne cr2,clear_lock 703 704first_thread_in_core: 705 706 /* 707 * First thread in the core waking up from any state which can cause 708 * partial or complete hypervisor state loss. It needs to 709 * call the fastsleep workaround code if the platform requires it. 710 * Call it unconditionally here. The below branch instruction will 711 * be patched out if the platform does not have fastsleep or does not 712 * require the workaround. Patching will be performed during the 713 * discovery of idle-states. 714 */ 715.global pnv_fastsleep_workaround_at_exit 716pnv_fastsleep_workaround_at_exit: 717 b fastsleep_workaround_at_exit 718 719timebase_resync: 720 /* 721 * Use cr3 which indicates that we are waking up with atleast partial 722 * hypervisor state loss to determine if TIMEBASE RESYNC is needed. 723 */ 724 ble cr3,.Ltb_resynced 725 /* Time base re-sync */ 726 bl opal_resync_timebase; 727 /* 728 * If waking up from sleep (POWER8), per core state 729 * is not lost, skip to clear_lock. 730 */ 731.Ltb_resynced: 732 blt cr4,clear_lock 733 734 /* 735 * First thread in the core to wake up and its waking up with 736 * complete hypervisor state loss. Restore per core hypervisor 737 * state. 738 */ 739BEGIN_FTR_SECTION 740 ld r4,_PTCR(r1) 741 mtspr SPRN_PTCR,r4 742 ld r4,_RPR(r1) 743 mtspr SPRN_RPR,r4 744END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 745 746 ld r4,_TSCR(r1) 747 mtspr SPRN_TSCR,r4 748 ld r4,_WORC(r1) 749 mtspr SPRN_WORC,r4 750 751clear_lock: 752 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 753 lwsync 754 stw r15,0(r14) 755 756common_exit: 757 /* 758 * Common to all threads. 759 * 760 * If waking up from sleep, hypervisor state is not lost. Hence 761 * skip hypervisor state restore. 762 */ 763 blt cr4,hypervisor_state_restored 764 765 /* Waking up from winkle */ 766 767BEGIN_MMU_FTR_SECTION 768 b no_segments 769END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 770 /* Restore SLB from PACA */ 771 ld r8,PACA_SLBSHADOWPTR(r13) 772 773 .rept SLB_NUM_BOLTED 774 li r3, SLBSHADOW_SAVEAREA 775 LDX_BE r5, r8, r3 776 addi r3, r3, 8 777 LDX_BE r6, r8, r3 778 andis. r7,r5,SLB_ESID_V@h 779 beq 1f 780 slbmte r6,r5 7811: addi r8,r8,16 782 .endr 783no_segments: 784 785 /* Restore per thread state */ 786 787 ld r4,_SPURR(r1) 788 mtspr SPRN_SPURR,r4 789 ld r4,_PURR(r1) 790 mtspr SPRN_PURR,r4 791 ld r4,_DSCR(r1) 792 mtspr SPRN_DSCR,r4 793 ld r4,_WORT(r1) 794 mtspr SPRN_WORT,r4 795 796 /* Call cur_cpu_spec->cpu_restore() */ 797 LOAD_REG_ADDR(r4, cur_cpu_spec) 798 ld r4,0(r4) 799 ld r12,CPU_SPEC_RESTORE(r4) 800#ifdef PPC64_ELF_ABI_v1 801 ld r12,0(r12) 802#endif 803 mtctr r12 804 bctrl 805 806BEGIN_FTR_SECTION 807 ld r4,_LPCR(r1) 808 mtspr SPRN_LPCR,r4 809END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 810hypervisor_state_restored: 811 812 mr r12,r19 813 mtlr r17 814 blr /* return to pnv_powersave_wakeup */ 815 816fastsleep_workaround_at_exit: 817 li r3,1 818 li r4,0 819 bl opal_config_cpu_idle_state 820 b timebase_resync 821 822/* 823 * R3 here contains the value that will be returned to the caller 824 * of power7_nap. 825 * R12 contains SRR1 for CHECK_HMI_INTERRUPT. 826 */ 827.global pnv_wakeup_loss 828pnv_wakeup_loss: 829 ld r1,PACAR1(r13) 830BEGIN_FTR_SECTION 831 CHECK_HMI_INTERRUPT 832END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 833 REST_NVGPRS(r1) 834 REST_GPR(2, r1) 835 ld r4,PACAKMSR(r13) 836 ld r5,_LINK(r1) 837 ld r6,_CCR(r1) 838 addi r1,r1,INT_FRAME_SIZE 839 mtlr r5 840 mtcr r6 841 mtmsrd r4 842 blr 843 844/* 845 * R3 here contains the value that will be returned to the caller 846 * of power7_nap. 847 * R12 contains SRR1 for CHECK_HMI_INTERRUPT. 848 */ 849pnv_wakeup_noloss: 850 lbz r0,PACA_NAPSTATELOST(r13) 851 cmpwi r0,0 852 bne pnv_wakeup_loss 853 ld r1,PACAR1(r13) 854BEGIN_FTR_SECTION 855 CHECK_HMI_INTERRUPT 856END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 857 ld r4,PACAKMSR(r13) 858 ld r5,_NIP(r1) 859 ld r6,_CCR(r1) 860 addi r1,r1,INT_FRAME_SIZE 861 mtlr r5 862 mtcr r6 863 mtmsrd r4 864 blr 865