1/* 2 * This file contains idle entry/exit functions for POWER7, 3 * POWER8 and POWER9 CPUs. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 */ 10 11#include <linux/threads.h> 12#include <asm/processor.h> 13#include <asm/page.h> 14#include <asm/cputable.h> 15#include <asm/thread_info.h> 16#include <asm/ppc_asm.h> 17#include <asm/asm-offsets.h> 18#include <asm/ppc-opcode.h> 19#include <asm/hw_irq.h> 20#include <asm/kvm_book3s_asm.h> 21#include <asm/opal.h> 22#include <asm/cpuidle.h> 23#include <asm/exception-64s.h> 24#include <asm/book3s/64/mmu-hash.h> 25#include <asm/mmu.h> 26 27#undef DEBUG 28 29/* 30 * Use unused space in the interrupt stack to save and restore 31 * registers for winkle support. 32 */ 33#define _SDR1 GPR3 34#define _PTCR GPR3 35#define _RPR GPR4 36#define _SPURR GPR5 37#define _PURR GPR6 38#define _TSCR GPR7 39#define _DSCR GPR8 40#define _AMOR GPR9 41#define _WORT GPR10 42#define _WORC GPR11 43#define _LPCR GPR12 44 45#define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16 46 47 .text 48 49/* 50 * Used by threads before entering deep idle states. Saves SPRs 51 * in interrupt stack frame 52 */ 53save_sprs_to_stack: 54 /* 55 * Note all register i.e per-core, per-subcore or per-thread is saved 56 * here since any thread in the core might wake up first 57 */ 58BEGIN_FTR_SECTION 59 /* 60 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring 61 * SDR1 here 62 */ 63 mfspr r3,SPRN_PTCR 64 std r3,_PTCR(r1) 65 mfspr r3,SPRN_LPCR 66 std r3,_LPCR(r1) 67FTR_SECTION_ELSE 68 mfspr r3,SPRN_SDR1 69 std r3,_SDR1(r1) 70ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 71 mfspr r3,SPRN_RPR 72 std r3,_RPR(r1) 73 mfspr r3,SPRN_SPURR 74 std r3,_SPURR(r1) 75 mfspr r3,SPRN_PURR 76 std r3,_PURR(r1) 77 mfspr r3,SPRN_TSCR 78 std r3,_TSCR(r1) 79 mfspr r3,SPRN_DSCR 80 std r3,_DSCR(r1) 81 mfspr r3,SPRN_AMOR 82 std r3,_AMOR(r1) 83 mfspr r3,SPRN_WORT 84 std r3,_WORT(r1) 85 mfspr r3,SPRN_WORC 86 std r3,_WORC(r1) 87 88 blr 89 90/* 91 * Used by threads when the lock bit of core_idle_state is set. 92 * Threads will spin in HMT_LOW until the lock bit is cleared. 93 * r14 - pointer to core_idle_state 94 * r15 - used to load contents of core_idle_state 95 * r9 - used as a temporary variable 96 */ 97 98core_idle_lock_held: 99 HMT_LOW 1003: lwz r15,0(r14) 101 andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 102 bne 3b 103 HMT_MEDIUM 104 lwarx r15,0,r14 105 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 106 bne- core_idle_lock_held 107 blr 108 109/* 110 * Pass requested state in r3: 111 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 112 * - Requested PSSCR value in POWER9 113 * 114 * Address of idle handler to branch to in realmode in r4 115 */ 116pnv_powersave_common: 117 /* Use r3 to pass state nap/sleep/winkle */ 118 /* NAP is a state loss, we create a regs frame on the 119 * stack, fill it up with the state we care about and 120 * stick a pointer to it in PACAR1. We really only 121 * need to save PC, some CR bits and the NV GPRs, 122 * but for now an interrupt frame will do. 123 */ 124 mtctr r4 125 126 mflr r0 127 std r0,16(r1) 128 stdu r1,-INT_FRAME_SIZE(r1) 129 std r0,_LINK(r1) 130 std r0,_NIP(r1) 131 132 /* We haven't lost state ... yet */ 133 li r0,0 134 stb r0,PACA_NAPSTATELOST(r13) 135 136 /* Continue saving state */ 137 SAVE_GPR(2, r1) 138 SAVE_NVGPRS(r1) 139 mfcr r5 140 std r5,_CCR(r1) 141 std r1,PACAR1(r13) 142 143 /* 144 * Go to real mode to do the nap, as required by the architecture. 145 * Also, we need to be in real mode before setting hwthread_state, 146 * because as soon as we do that, another thread can switch 147 * the MMU context to the guest. 148 */ 149 LOAD_REG_IMMEDIATE(r7, MSR_IDLE) 150 mtmsrd r7,0 151 bctr 152 153 .globl pnv_enter_arch207_idle_mode 154pnv_enter_arch207_idle_mode: 155#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 156 /* Tell KVM we're entering idle */ 157 li r4,KVM_HWTHREAD_IN_IDLE 158 /******************************************************/ 159 /* N O T E W E L L ! ! ! N O T E W E L L */ 160 /* The following store to HSTATE_HWTHREAD_STATE(r13) */ 161 /* MUST occur in real mode, i.e. with the MMU off, */ 162 /* and the MMU must stay off until we clear this flag */ 163 /* and test HSTATE_HWTHREAD_REQ(r13) in */ 164 /* pnv_powersave_wakeup in this file. */ 165 /* The reason is that another thread can switch the */ 166 /* MMU to a guest context whenever this flag is set */ 167 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ 168 /* that would potentially cause this thread to start */ 169 /* executing instructions from guest memory in */ 170 /* hypervisor mode, leading to a host crash or data */ 171 /* corruption, or worse. */ 172 /******************************************************/ 173 stb r4,HSTATE_HWTHREAD_STATE(r13) 174#endif 175 stb r3,PACA_THREAD_IDLE_STATE(r13) 176 cmpwi cr3,r3,PNV_THREAD_SLEEP 177 bge cr3,2f 178 IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) 179 /* No return */ 1802: 181 /* Sleep or winkle */ 182 lbz r7,PACA_THREAD_MASK(r13) 183 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 184 li r5,0 185 beq cr3,3f 186 lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h 1873: 188lwarx_loop1: 189 lwarx r15,0,r14 190 191 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 192 bnel- core_idle_lock_held 193 194 add r15,r15,r5 /* Add if winkle */ 195 andc r15,r15,r7 /* Clear thread bit */ 196 197 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS 198 199/* 200 * If cr0 = 0, then current thread is the last thread of the core entering 201 * sleep. Last thread needs to execute the hardware bug workaround code if 202 * required by the platform. 203 * Make the workaround call unconditionally here. The below branch call is 204 * patched out when the idle states are discovered if the platform does not 205 * require it. 206 */ 207.global pnv_fastsleep_workaround_at_entry 208pnv_fastsleep_workaround_at_entry: 209 beq fastsleep_workaround_at_entry 210 211 stwcx. r15,0,r14 212 bne- lwarx_loop1 213 isync 214 215common_enter: /* common code for all the threads entering sleep or winkle */ 216 bgt cr3,enter_winkle 217 IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) 218 219fastsleep_workaround_at_entry: 220 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 221 stwcx. r15,0,r14 222 bne- lwarx_loop1 223 isync 224 225 /* Fast sleep workaround */ 226 li r3,1 227 li r4,1 228 bl opal_config_cpu_idle_state 229 230 /* Unlock */ 231 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 232 lwsync 233 stw r15,0(r14) 234 b common_enter 235 236enter_winkle: 237 bl save_sprs_to_stack 238 239 IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) 240 241/* 242 * r3 - PSSCR value corresponding to the requested stop state. 243 */ 244power_enter_stop: 245#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 246 /* Tell KVM we're entering idle */ 247 li r4,KVM_HWTHREAD_IN_IDLE 248 /* DO THIS IN REAL MODE! See comment above. */ 249 stb r4,HSTATE_HWTHREAD_STATE(r13) 250#endif 251/* 252 * Check if we are executing the lite variant with ESL=EC=0 253 */ 254 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED 255 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ 256 bne .Lhandle_esl_ec_set 257 IDLE_STATE_ENTER_SEQ(PPC_STOP) 258 li r3,0 /* Since we didn't lose state, return 0 */ 259 260 /* 261 * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so 262 * it can determine if the wakeup reason is an HMI in 263 * CHECK_HMI_INTERRUPT. 264 * 265 * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup 266 * reason, so there is no point setting r12 to SRR1. 267 * 268 * Further, we clear r12 here, so that we don't accidentally enter the 269 * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI. 270 */ 271 li r12, 0 272 b pnv_wakeup_noloss 273 274.Lhandle_esl_ec_set: 275/* 276 * Check if the requested state is a deep idle state. 277 */ 278 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 279 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 280 cmpd r3,r4 281 bge .Lhandle_deep_stop 282 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) 283.Lhandle_deep_stop: 284/* 285 * Entering deep idle state. 286 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to 287 * stack and enter stop 288 */ 289 lbz r7,PACA_THREAD_MASK(r13) 290 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 291 292lwarx_loop_stop: 293 lwarx r15,0,r14 294 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 295 bnel- core_idle_lock_held 296 andc r15,r15,r7 /* Clear thread bit */ 297 298 stwcx. r15,0,r14 299 bne- lwarx_loop_stop 300 isync 301 302 bl save_sprs_to_stack 303 304 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) 305 306/* 307 * Entered with MSR[EE]=0 and no soft-masked interrupts pending. 308 * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE). 309 */ 310_GLOBAL(power7_idle_insn) 311 /* Now check if user or arch enabled NAP mode */ 312 LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode) 313 b pnv_powersave_common 314 315#define CHECK_HMI_INTERRUPT \ 316BEGIN_FTR_SECTION_NESTED(66); \ 317 rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \ 318FTR_SECTION_ELSE_NESTED(66); \ 319 rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \ 320ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ 321 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ 322 bne+ 20f; \ 323 /* Invoke opal call to handle hmi */ \ 324 ld r2,PACATOC(r13); \ 325 ld r1,PACAR1(r13); \ 326 std r3,ORIG_GPR3(r1); /* Save original r3 */ \ 327 li r3,0; /* NULL argument */ \ 328 bl hmi_exception_realmode; \ 329 nop; \ 330 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ 33120: nop; 332 333/* 334 * Entered with MSR[EE]=0 and no soft-masked interrupts pending. 335 * r3 contains desired PSSCR register value. 336 */ 337_GLOBAL(power9_idle_stop) 338 std r3, PACA_REQ_PSSCR(r13) 339 mtspr SPRN_PSSCR,r3 340 LOAD_REG_ADDR(r4,power_enter_stop) 341 b pnv_powersave_common 342 /* No return */ 343 344/* 345 * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1, 346 * HSPRG0 will be set to the HSPRG0 value of one of the 347 * threads in this core. Thus the value we have in r13 348 * may not be this thread's paca pointer. 349 * 350 * Fortunately, the TIR remains invariant. Since this thread's 351 * paca pointer is recorded in all its sibling's paca, we can 352 * correctly recover this thread's paca pointer if we 353 * know the index of this thread in the core. 354 * 355 * This index can be obtained from the TIR. 356 * 357 * i.e, thread's position in the core = TIR. 358 * If this value is i, then this thread's paca is 359 * paca->thread_sibling_pacas[i]. 360 */ 361power9_dd1_recover_paca: 362 mfspr r4, SPRN_TIR 363 /* 364 * Since each entry in thread_sibling_pacas is 8 bytes 365 * we need to left-shift by 3 bits. Thus r4 = i * 8 366 */ 367 sldi r4, r4, 3 368 /* Get &paca->thread_sibling_pacas[0] in r5 */ 369 ld r5, PACA_SIBLING_PACA_PTRS(r13) 370 /* Load paca->thread_sibling_pacas[i] into r13 */ 371 ldx r13, r4, r5 372 SET_PACA(r13) 373 /* 374 * Indicate that we have lost NVGPR state 375 * which needs to be restored from the stack. 376 */ 377 li r3, 1 378 stb r3,PACA_NAPSTATELOST(r13) 379 blr 380 381/* 382 * Called from machine check handler for powersave wakeups. 383 * Low level machine check processing has already been done. Now just 384 * go through the wake up path to get everything in order. 385 * 386 * r3 - The original SRR1 value. 387 * Original SRR[01] have been clobbered. 388 * MSR_RI is clear. 389 */ 390.global pnv_powersave_wakeup_mce 391pnv_powersave_wakeup_mce: 392 /* Set cr3 for pnv_powersave_wakeup */ 393 rlwinm r11,r3,47-31,30,31 394 cmpwi cr3,r11,2 395 396 /* 397 * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake 398 * reason into r12, which allows reuse of the system reset wakeup 399 * code without being mistaken for another type of wakeup. 400 */ 401 oris r12,r3,SRR1_WAKEMCE_RESVD@h 402 403 b pnv_powersave_wakeup 404 405/* 406 * Called from reset vector for powersave wakeups. 407 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 408 * r12 - SRR1 409 */ 410.global pnv_powersave_wakeup 411pnv_powersave_wakeup: 412 ld r2, PACATOC(r13) 413 414BEGIN_FTR_SECTION 415BEGIN_FTR_SECTION_NESTED(70) 416 bl power9_dd1_recover_paca 417END_FTR_SECTION_NESTED_IFSET(CPU_FTR_POWER9_DD1, 70) 418 bl pnv_restore_hyp_resource_arch300 419FTR_SECTION_ELSE 420 bl pnv_restore_hyp_resource_arch207 421ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 422 423 li r0,PNV_THREAD_RUNNING 424 stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */ 425 426 mr r3,r12 427 428#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 429 li r0,KVM_HWTHREAD_IN_KERNEL 430 stb r0,HSTATE_HWTHREAD_STATE(r13) 431 /* Order setting hwthread_state vs. testing hwthread_req */ 432 sync 433 lbz r0,HSTATE_HWTHREAD_REQ(r13) 434 cmpwi r0,0 435 beq 1f 436 b kvm_start_guest 4371: 438#endif 439 440 /* Return SRR1 from power7_nap() */ 441 blt cr3,pnv_wakeup_noloss 442 b pnv_wakeup_loss 443 444/* 445 * Check whether we have woken up with hypervisor state loss. 446 * If yes, restore hypervisor state and return back to link. 447 * 448 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 449 */ 450pnv_restore_hyp_resource_arch300: 451 /* 452 * Workaround for POWER9, if we lost resources, the ERAT 453 * might have been mixed up and needs flushing. 454 */ 455 blt cr3,1f 456 PPC_INVALIDATE_ERAT 4571: 458 /* 459 * POWER ISA 3. Use PSSCR to determine if we 460 * are waking up from deep idle state 461 */ 462 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 463 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 464 465BEGIN_FTR_SECTION_NESTED(71) 466 /* 467 * Assume that we are waking up from the state 468 * same as the Requested Level (RL) in the PSSCR 469 * which are Bits 60-63 470 */ 471 ld r5,PACA_REQ_PSSCR(r13) 472 rldicl r5,r5,0,60 473FTR_SECTION_ELSE_NESTED(71) 474 /* 475 * 0-3 bits correspond to Power-Saving Level Status 476 * which indicates the idle state we are waking up from 477 */ 478 mfspr r5, SPRN_PSSCR 479 rldicl r5,r5,4,60 480ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71) 481 cmpd cr4,r5,r4 482 bge cr4,pnv_wakeup_tb_loss /* returns to caller */ 483 484 blr /* Waking up without hypervisor state loss. */ 485 486/* Same calling convention as arch300 */ 487pnv_restore_hyp_resource_arch207: 488 /* 489 * POWER ISA 2.07 or less. 490 * Check if we slept with sleep or winkle. 491 */ 492 lbz r4,PACA_THREAD_IDLE_STATE(r13) 493 cmpwi cr2,r4,PNV_THREAD_NAP 494 bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ 495 496 /* 497 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking 498 * up from nap. At this stage CR3 shouldn't contains 'gt' since that 499 * indicates we are waking with hypervisor state loss from nap. 500 */ 501 bgt cr3,. 502 503 blr /* Waking up without hypervisor state loss */ 504 505/* 506 * Called if waking up from idle state which can cause either partial or 507 * complete hyp state loss. 508 * In POWER8, called if waking up from fastsleep or winkle 509 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state 510 * 511 * r13 - PACA 512 * cr3 - gt if waking up with partial/complete hypervisor state loss 513 * 514 * If ISA300: 515 * cr4 - gt or eq if waking up from complete hypervisor state loss. 516 * 517 * If ISA207: 518 * r4 - PACA_THREAD_IDLE_STATE 519 */ 520pnv_wakeup_tb_loss: 521 ld r1,PACAR1(r13) 522 /* 523 * Before entering any idle state, the NVGPRs are saved in the stack. 524 * If there was a state loss, or PACA_NAPSTATELOST was set, then the 525 * NVGPRs are restored. If we are here, it is likely that state is lost, 526 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach 527 * here are the same as the test to restore NVGPRS: 528 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300, 529 * and SRR1 test for restoring NVGPRs. 530 * 531 * We are about to clobber NVGPRs now, so set NAPSTATELOST to 532 * guarantee they will always be restored. This might be tightened 533 * with careful reading of specs (particularly for ISA300) but this 534 * is already a slow wakeup path and it's simpler to be safe. 535 */ 536 li r0,1 537 stb r0,PACA_NAPSTATELOST(r13) 538 539 /* 540 * 541 * Save SRR1 and LR in NVGPRs as they might be clobbered in 542 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 543 * to determine the wakeup reason if we branch to kvm_start_guest. LR 544 * is required to return back to reset vector after hypervisor state 545 * restore is complete. 546 */ 547 mr r19,r12 548 mr r18,r4 549 mflr r17 550BEGIN_FTR_SECTION 551 CHECK_HMI_INTERRUPT 552END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 553 554 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 555 lbz r7,PACA_THREAD_MASK(r13) 556 557 /* 558 * Take the core lock to synchronize against other threads. 559 * 560 * Lock bit is set in one of the 2 cases- 561 * a. In the sleep/winkle enter path, the last thread is executing 562 * fastsleep workaround code. 563 * b. In the wake up path, another thread is executing fastsleep 564 * workaround undo code or resyncing timebase or restoring context 565 * In either case loop until the lock bit is cleared. 566 */ 5671: 568 lwarx r15,0,r14 569 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 570 bnel- core_idle_lock_held 571 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 572 stwcx. r15,0,r14 573 bne- 1b 574 isync 575 576 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS 577 cmpwi cr2,r9,0 578 579 /* 580 * At this stage 581 * cr2 - eq if first thread to wakeup in core 582 * cr3- gt if waking up with partial/complete hypervisor state loss 583 * ISA300: 584 * cr4 - gt or eq if waking up from complete hypervisor state loss. 585 */ 586 587BEGIN_FTR_SECTION 588 /* 589 * Were we in winkle? 590 * If yes, check if all threads were in winkle, decrement our 591 * winkle count, set all thread winkle bits if all were in winkle. 592 * Check if our thread has a winkle bit set, and set cr4 accordingly 593 * (to match ISA300, above). Pseudo-code for core idle state 594 * transitions for ISA207 is as follows (everything happens atomically 595 * due to store conditional and/or lock bit): 596 * 597 * nap_idle() { } 598 * nap_wake() { } 599 * 600 * sleep_idle() 601 * { 602 * core_idle_state &= ~thread_in_core 603 * } 604 * 605 * sleep_wake() 606 * { 607 * bool first_in_core, first_in_subcore; 608 * 609 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; 610 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; 611 * 612 * core_idle_state |= thread_in_core; 613 * } 614 * 615 * winkle_idle() 616 * { 617 * core_idle_state &= ~thread_in_core; 618 * core_idle_state += 1 << WINKLE_COUNT_SHIFT; 619 * } 620 * 621 * winkle_wake() 622 * { 623 * bool first_in_core, first_in_subcore, winkle_state_lost; 624 * 625 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; 626 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; 627 * 628 * core_idle_state |= thread_in_core; 629 * 630 * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT)) 631 * core_idle_state |= THREAD_WINKLE_BITS; 632 * core_idle_state -= 1 << WINKLE_COUNT_SHIFT; 633 * 634 * winkle_state_lost = core_idle_state & 635 * (thread_in_core << WINKLE_THREAD_SHIFT); 636 * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT); 637 * } 638 * 639 */ 640 cmpwi r18,PNV_THREAD_WINKLE 641 bne 2f 642 andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h 643 subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h 644 beq 2f 645 ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */ 6462: 647 /* Shift thread bit to winkle mask, then test if this thread is set, 648 * and remove it from the winkle bits */ 649 slwi r8,r7,8 650 and r8,r8,r15 651 andc r15,r15,r8 652 cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */ 653 654 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) 655 and r4,r4,r15 656 cmpwi r4,0 /* Check if first in subcore */ 657 658 or r15,r15,r7 /* Set thread bit */ 659 beq first_thread_in_subcore 660END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 661 662 or r15,r15,r7 /* Set thread bit */ 663 beq cr2,first_thread_in_core 664 665 /* Not first thread in core or subcore to wake up */ 666 b clear_lock 667 668first_thread_in_subcore: 669 /* 670 * If waking up from sleep, subcore state is not lost. Hence 671 * skip subcore state restore 672 */ 673 blt cr4,subcore_state_restored 674 675 /* Restore per-subcore state */ 676 ld r4,_SDR1(r1) 677 mtspr SPRN_SDR1,r4 678 679 ld r4,_RPR(r1) 680 mtspr SPRN_RPR,r4 681 ld r4,_AMOR(r1) 682 mtspr SPRN_AMOR,r4 683 684subcore_state_restored: 685 /* 686 * Check if the thread is also the first thread in the core. If not, 687 * skip to clear_lock. 688 */ 689 bne cr2,clear_lock 690 691first_thread_in_core: 692 693 /* 694 * First thread in the core waking up from any state which can cause 695 * partial or complete hypervisor state loss. It needs to 696 * call the fastsleep workaround code if the platform requires it. 697 * Call it unconditionally here. The below branch instruction will 698 * be patched out if the platform does not have fastsleep or does not 699 * require the workaround. Patching will be performed during the 700 * discovery of idle-states. 701 */ 702.global pnv_fastsleep_workaround_at_exit 703pnv_fastsleep_workaround_at_exit: 704 b fastsleep_workaround_at_exit 705 706timebase_resync: 707 /* 708 * Use cr3 which indicates that we are waking up with atleast partial 709 * hypervisor state loss to determine if TIMEBASE RESYNC is needed. 710 */ 711 ble cr3,.Ltb_resynced 712 /* Time base re-sync */ 713 bl opal_resync_timebase; 714 /* 715 * If waking up from sleep (POWER8), per core state 716 * is not lost, skip to clear_lock. 717 */ 718.Ltb_resynced: 719 blt cr4,clear_lock 720 721 /* 722 * First thread in the core to wake up and its waking up with 723 * complete hypervisor state loss. Restore per core hypervisor 724 * state. 725 */ 726BEGIN_FTR_SECTION 727 ld r4,_PTCR(r1) 728 mtspr SPRN_PTCR,r4 729 ld r4,_RPR(r1) 730 mtspr SPRN_RPR,r4 731END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 732 733 ld r4,_TSCR(r1) 734 mtspr SPRN_TSCR,r4 735 ld r4,_WORC(r1) 736 mtspr SPRN_WORC,r4 737 738clear_lock: 739 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 740 lwsync 741 stw r15,0(r14) 742 743common_exit: 744 /* 745 * Common to all threads. 746 * 747 * If waking up from sleep, hypervisor state is not lost. Hence 748 * skip hypervisor state restore. 749 */ 750 blt cr4,hypervisor_state_restored 751 752 /* Waking up from winkle */ 753 754BEGIN_MMU_FTR_SECTION 755 b no_segments 756END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 757 /* Restore SLB from PACA */ 758 ld r8,PACA_SLBSHADOWPTR(r13) 759 760 .rept SLB_NUM_BOLTED 761 li r3, SLBSHADOW_SAVEAREA 762 LDX_BE r5, r8, r3 763 addi r3, r3, 8 764 LDX_BE r6, r8, r3 765 andis. r7,r5,SLB_ESID_V@h 766 beq 1f 767 slbmte r6,r5 7681: addi r8,r8,16 769 .endr 770no_segments: 771 772 /* Restore per thread state */ 773 774 ld r4,_SPURR(r1) 775 mtspr SPRN_SPURR,r4 776 ld r4,_PURR(r1) 777 mtspr SPRN_PURR,r4 778 ld r4,_DSCR(r1) 779 mtspr SPRN_DSCR,r4 780 ld r4,_WORT(r1) 781 mtspr SPRN_WORT,r4 782 783 /* Call cur_cpu_spec->cpu_restore() */ 784 LOAD_REG_ADDR(r4, cur_cpu_spec) 785 ld r4,0(r4) 786 ld r12,CPU_SPEC_RESTORE(r4) 787#ifdef PPC64_ELF_ABI_v1 788 ld r12,0(r12) 789#endif 790 mtctr r12 791 bctrl 792 793BEGIN_FTR_SECTION 794 ld r4,_LPCR(r1) 795 mtspr SPRN_LPCR,r4 796END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 797hypervisor_state_restored: 798 799 mr r12,r19 800 mtlr r17 801 blr /* return to pnv_powersave_wakeup */ 802 803fastsleep_workaround_at_exit: 804 li r3,1 805 li r4,0 806 bl opal_config_cpu_idle_state 807 b timebase_resync 808 809/* 810 * R3 here contains the value that will be returned to the caller 811 * of power7_nap. 812 * R12 contains SRR1 for CHECK_HMI_INTERRUPT. 813 */ 814.global pnv_wakeup_loss 815pnv_wakeup_loss: 816 ld r1,PACAR1(r13) 817BEGIN_FTR_SECTION 818 CHECK_HMI_INTERRUPT 819END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 820 REST_NVGPRS(r1) 821 REST_GPR(2, r1) 822 ld r4,PACAKMSR(r13) 823 ld r5,_LINK(r1) 824 ld r6,_CCR(r1) 825 addi r1,r1,INT_FRAME_SIZE 826 mtlr r5 827 mtcr r6 828 mtmsrd r4 829 blr 830 831/* 832 * R3 here contains the value that will be returned to the caller 833 * of power7_nap. 834 * R12 contains SRR1 for CHECK_HMI_INTERRUPT. 835 */ 836pnv_wakeup_noloss: 837 lbz r0,PACA_NAPSTATELOST(r13) 838 cmpwi r0,0 839 bne pnv_wakeup_loss 840 ld r1,PACAR1(r13) 841BEGIN_FTR_SECTION 842 CHECK_HMI_INTERRUPT 843END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 844 ld r4,PACAKMSR(r13) 845 ld r5,_NIP(r1) 846 ld r6,_CCR(r1) 847 addi r1,r1,INT_FRAME_SIZE 848 mtlr r5 849 mtcr r6 850 mtmsrd r4 851 blr 852