1/* 2 * This file contains idle entry/exit functions for POWER7, 3 * POWER8 and POWER9 CPUs. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 */ 10 11#include <linux/threads.h> 12#include <asm/processor.h> 13#include <asm/page.h> 14#include <asm/cputable.h> 15#include <asm/thread_info.h> 16#include <asm/ppc_asm.h> 17#include <asm/asm-offsets.h> 18#include <asm/ppc-opcode.h> 19#include <asm/hw_irq.h> 20#include <asm/kvm_book3s_asm.h> 21#include <asm/opal.h> 22#include <asm/cpuidle.h> 23#include <asm/exception-64s.h> 24#include <asm/book3s/64/mmu-hash.h> 25#include <asm/mmu.h> 26 27#undef DEBUG 28 29/* 30 * Use unused space in the interrupt stack to save and restore 31 * registers for winkle support. 32 */ 33#define _MMCR0 GPR0 34#define _SDR1 GPR3 35#define _PTCR GPR3 36#define _RPR GPR4 37#define _SPURR GPR5 38#define _PURR GPR6 39#define _TSCR GPR7 40#define _DSCR GPR8 41#define _AMOR GPR9 42#define _WORT GPR10 43#define _WORC GPR11 44#define _LPCR GPR12 45 46#define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16 47 48 .text 49 50/* 51 * Used by threads before entering deep idle states. Saves SPRs 52 * in interrupt stack frame 53 */ 54save_sprs_to_stack: 55 /* 56 * Note all register i.e per-core, per-subcore or per-thread is saved 57 * here since any thread in the core might wake up first 58 */ 59BEGIN_FTR_SECTION 60 /* 61 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring 62 * SDR1 here 63 */ 64 mfspr r3,SPRN_PTCR 65 std r3,_PTCR(r1) 66 mfspr r3,SPRN_LPCR 67 std r3,_LPCR(r1) 68FTR_SECTION_ELSE 69 mfspr r3,SPRN_SDR1 70 std r3,_SDR1(r1) 71ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 72 mfspr r3,SPRN_RPR 73 std r3,_RPR(r1) 74 mfspr r3,SPRN_SPURR 75 std r3,_SPURR(r1) 76 mfspr r3,SPRN_PURR 77 std r3,_PURR(r1) 78 mfspr r3,SPRN_TSCR 79 std r3,_TSCR(r1) 80 mfspr r3,SPRN_DSCR 81 std r3,_DSCR(r1) 82 mfspr r3,SPRN_AMOR 83 std r3,_AMOR(r1) 84 mfspr r3,SPRN_WORT 85 std r3,_WORT(r1) 86 mfspr r3,SPRN_WORC 87 std r3,_WORC(r1) 88 89 blr 90 91/* 92 * Used by threads when the lock bit of core_idle_state is set. 93 * Threads will spin in HMT_LOW until the lock bit is cleared. 94 * r14 - pointer to core_idle_state 95 * r15 - used to load contents of core_idle_state 96 * r9 - used as a temporary variable 97 */ 98 99core_idle_lock_held: 100 HMT_LOW 1013: lwz r15,0(r14) 102 andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 103 bne 3b 104 HMT_MEDIUM 105 lwarx r15,0,r14 106 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 107 bne- core_idle_lock_held 108 blr 109 110/* 111 * Pass requested state in r3: 112 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 113 * - Requested PSSCR value in POWER9 114 * 115 * Address of idle handler to branch to in realmode in r4 116 */ 117pnv_powersave_common: 118 /* Use r3 to pass state nap/sleep/winkle */ 119 /* NAP is a state loss, we create a regs frame on the 120 * stack, fill it up with the state we care about and 121 * stick a pointer to it in PACAR1. We really only 122 * need to save PC, some CR bits and the NV GPRs, 123 * but for now an interrupt frame will do. 124 */ 125 mtctr r4 126 127 mflr r0 128 std r0,16(r1) 129 stdu r1,-INT_FRAME_SIZE(r1) 130 std r0,_LINK(r1) 131 std r0,_NIP(r1) 132 133 /* We haven't lost state ... yet */ 134 li r0,0 135 stb r0,PACA_NAPSTATELOST(r13) 136 137 /* Continue saving state */ 138 SAVE_GPR(2, r1) 139 SAVE_NVGPRS(r1) 140 mfcr r5 141 std r5,_CCR(r1) 142 std r1,PACAR1(r13) 143 144 /* 145 * Go to real mode to do the nap, as required by the architecture. 146 * Also, we need to be in real mode before setting hwthread_state, 147 * because as soon as we do that, another thread can switch 148 * the MMU context to the guest. 149 */ 150 LOAD_REG_IMMEDIATE(r7, MSR_IDLE) 151 mtmsrd r7,0 152 bctr 153 154 .globl pnv_enter_arch207_idle_mode 155pnv_enter_arch207_idle_mode: 156#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 157 /* Tell KVM we're entering idle */ 158 li r4,KVM_HWTHREAD_IN_IDLE 159 /******************************************************/ 160 /* N O T E W E L L ! ! ! N O T E W E L L */ 161 /* The following store to HSTATE_HWTHREAD_STATE(r13) */ 162 /* MUST occur in real mode, i.e. with the MMU off, */ 163 /* and the MMU must stay off until we clear this flag */ 164 /* and test HSTATE_HWTHREAD_REQ(r13) in */ 165 /* pnv_powersave_wakeup in this file. */ 166 /* The reason is that another thread can switch the */ 167 /* MMU to a guest context whenever this flag is set */ 168 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ 169 /* that would potentially cause this thread to start */ 170 /* executing instructions from guest memory in */ 171 /* hypervisor mode, leading to a host crash or data */ 172 /* corruption, or worse. */ 173 /******************************************************/ 174 stb r4,HSTATE_HWTHREAD_STATE(r13) 175#endif 176 stb r3,PACA_THREAD_IDLE_STATE(r13) 177 cmpwi cr3,r3,PNV_THREAD_SLEEP 178 bge cr3,2f 179 IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) 180 /* No return */ 1812: 182 /* Sleep or winkle */ 183 lbz r7,PACA_THREAD_MASK(r13) 184 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 185 li r5,0 186 beq cr3,3f 187 lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h 1883: 189lwarx_loop1: 190 lwarx r15,0,r14 191 192 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 193 bnel- core_idle_lock_held 194 195 add r15,r15,r5 /* Add if winkle */ 196 andc r15,r15,r7 /* Clear thread bit */ 197 198 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS 199 200/* 201 * If cr0 = 0, then current thread is the last thread of the core entering 202 * sleep. Last thread needs to execute the hardware bug workaround code if 203 * required by the platform. 204 * Make the workaround call unconditionally here. The below branch call is 205 * patched out when the idle states are discovered if the platform does not 206 * require it. 207 */ 208.global pnv_fastsleep_workaround_at_entry 209pnv_fastsleep_workaround_at_entry: 210 beq fastsleep_workaround_at_entry 211 212 stwcx. r15,0,r14 213 bne- lwarx_loop1 214 isync 215 216common_enter: /* common code for all the threads entering sleep or winkle */ 217 bgt cr3,enter_winkle 218 IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) 219 220fastsleep_workaround_at_entry: 221 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 222 stwcx. r15,0,r14 223 bne- lwarx_loop1 224 isync 225 226 /* Fast sleep workaround */ 227 li r3,1 228 li r4,1 229 bl opal_config_cpu_idle_state 230 231 /* Unlock */ 232 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 233 lwsync 234 stw r15,0(r14) 235 b common_enter 236 237enter_winkle: 238 bl save_sprs_to_stack 239 240 IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) 241 242/* 243 * r3 - PSSCR value corresponding to the requested stop state. 244 */ 245power_enter_stop: 246#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 247 /* Tell KVM we're entering idle */ 248 li r4,KVM_HWTHREAD_IN_IDLE 249 /* DO THIS IN REAL MODE! See comment above. */ 250 stb r4,HSTATE_HWTHREAD_STATE(r13) 251#endif 252/* 253 * Check if we are executing the lite variant with ESL=EC=0 254 */ 255 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED 256 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ 257 bne .Lhandle_esl_ec_set 258 IDLE_STATE_ENTER_SEQ(PPC_STOP) 259 li r3,0 /* Since we didn't lose state, return 0 */ 260 261 /* 262 * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so 263 * it can determine if the wakeup reason is an HMI in 264 * CHECK_HMI_INTERRUPT. 265 * 266 * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup 267 * reason, so there is no point setting r12 to SRR1. 268 * 269 * Further, we clear r12 here, so that we don't accidentally enter the 270 * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI. 271 */ 272 li r12, 0 273 b pnv_wakeup_noloss 274 275.Lhandle_esl_ec_set: 276 /* 277 * POWER9 DD2 can incorrectly set PMAO when waking up after a 278 * state-loss idle. Saving and restoring MMCR0 over idle is a 279 * workaround. 280 */ 281 mfspr r4,SPRN_MMCR0 282 std r4,_MMCR0(r1) 283 284/* 285 * Check if the requested state is a deep idle state. 286 */ 287 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 288 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 289 cmpd r3,r4 290 bge .Lhandle_deep_stop 291 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) 292.Lhandle_deep_stop: 293/* 294 * Entering deep idle state. 295 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to 296 * stack and enter stop 297 */ 298 lbz r7,PACA_THREAD_MASK(r13) 299 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 300 301lwarx_loop_stop: 302 lwarx r15,0,r14 303 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 304 bnel- core_idle_lock_held 305 andc r15,r15,r7 /* Clear thread bit */ 306 307 stwcx. r15,0,r14 308 bne- lwarx_loop_stop 309 isync 310 311 bl save_sprs_to_stack 312 313 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) 314 315/* 316 * Entered with MSR[EE]=0 and no soft-masked interrupts pending. 317 * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE). 318 */ 319_GLOBAL(power7_idle_insn) 320 /* Now check if user or arch enabled NAP mode */ 321 LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode) 322 b pnv_powersave_common 323 324#define CHECK_HMI_INTERRUPT \ 325BEGIN_FTR_SECTION_NESTED(66); \ 326 rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \ 327FTR_SECTION_ELSE_NESTED(66); \ 328 rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \ 329ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ 330 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ 331 bne+ 20f; \ 332 /* Invoke opal call to handle hmi */ \ 333 ld r2,PACATOC(r13); \ 334 ld r1,PACAR1(r13); \ 335 std r3,ORIG_GPR3(r1); /* Save original r3 */ \ 336 li r3,0; /* NULL argument */ \ 337 bl hmi_exception_realmode; \ 338 nop; \ 339 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ 34020: nop; 341 342/* 343 * Entered with MSR[EE]=0 and no soft-masked interrupts pending. 344 * r3 contains desired PSSCR register value. 345 */ 346_GLOBAL(power9_idle_stop) 347 std r3, PACA_REQ_PSSCR(r13) 348 mtspr SPRN_PSSCR,r3 349 LOAD_REG_ADDR(r4,power_enter_stop) 350 b pnv_powersave_common 351 /* No return */ 352 353/* 354 * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1, 355 * HSPRG0 will be set to the HSPRG0 value of one of the 356 * threads in this core. Thus the value we have in r13 357 * may not be this thread's paca pointer. 358 * 359 * Fortunately, the TIR remains invariant. Since this thread's 360 * paca pointer is recorded in all its sibling's paca, we can 361 * correctly recover this thread's paca pointer if we 362 * know the index of this thread in the core. 363 * 364 * This index can be obtained from the TIR. 365 * 366 * i.e, thread's position in the core = TIR. 367 * If this value is i, then this thread's paca is 368 * paca->thread_sibling_pacas[i]. 369 */ 370power9_dd1_recover_paca: 371 mfspr r4, SPRN_TIR 372 /* 373 * Since each entry in thread_sibling_pacas is 8 bytes 374 * we need to left-shift by 3 bits. Thus r4 = i * 8 375 */ 376 sldi r4, r4, 3 377 /* Get &paca->thread_sibling_pacas[0] in r5 */ 378 ld r5, PACA_SIBLING_PACA_PTRS(r13) 379 /* Load paca->thread_sibling_pacas[i] into r13 */ 380 ldx r13, r4, r5 381 SET_PACA(r13) 382 /* 383 * Indicate that we have lost NVGPR state 384 * which needs to be restored from the stack. 385 */ 386 li r3, 1 387 stb r3,PACA_NAPSTATELOST(r13) 388 blr 389 390/* 391 * Called from machine check handler for powersave wakeups. 392 * Low level machine check processing has already been done. Now just 393 * go through the wake up path to get everything in order. 394 * 395 * r3 - The original SRR1 value. 396 * Original SRR[01] have been clobbered. 397 * MSR_RI is clear. 398 */ 399.global pnv_powersave_wakeup_mce 400pnv_powersave_wakeup_mce: 401 /* Set cr3 for pnv_powersave_wakeup */ 402 rlwinm r11,r3,47-31,30,31 403 cmpwi cr3,r11,2 404 405 /* 406 * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake 407 * reason into r12, which allows reuse of the system reset wakeup 408 * code without being mistaken for another type of wakeup. 409 */ 410 oris r12,r3,SRR1_WAKEMCE_RESVD@h 411 412 b pnv_powersave_wakeup 413 414/* 415 * Called from reset vector for powersave wakeups. 416 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 417 * r12 - SRR1 418 */ 419.global pnv_powersave_wakeup 420pnv_powersave_wakeup: 421 ld r2, PACATOC(r13) 422 423BEGIN_FTR_SECTION 424BEGIN_FTR_SECTION_NESTED(70) 425 bl power9_dd1_recover_paca 426END_FTR_SECTION_NESTED_IFSET(CPU_FTR_POWER9_DD1, 70) 427 bl pnv_restore_hyp_resource_arch300 428FTR_SECTION_ELSE 429 bl pnv_restore_hyp_resource_arch207 430ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 431 432 li r0,PNV_THREAD_RUNNING 433 stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */ 434 435 mr r3,r12 436 437#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 438 li r0,KVM_HWTHREAD_IN_KERNEL 439 stb r0,HSTATE_HWTHREAD_STATE(r13) 440 /* Order setting hwthread_state vs. testing hwthread_req */ 441 sync 442 lbz r0,HSTATE_HWTHREAD_REQ(r13) 443 cmpwi r0,0 444 beq 1f 445 b kvm_start_guest 4461: 447#endif 448 449 /* Return SRR1 from power7_nap() */ 450 blt cr3,pnv_wakeup_noloss 451 b pnv_wakeup_loss 452 453/* 454 * Check whether we have woken up with hypervisor state loss. 455 * If yes, restore hypervisor state and return back to link. 456 * 457 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 458 */ 459pnv_restore_hyp_resource_arch300: 460 /* 461 * Workaround for POWER9, if we lost resources, the ERAT 462 * might have been mixed up and needs flushing. We also need 463 * to reload MMCR0 (see comment above). We also need to set 464 * then clear bit 60 in MMCRA to ensure the PMU starts running. 465 */ 466 blt cr3,1f 467 PPC_INVALIDATE_ERAT 468 ld r1,PACAR1(r13) 469 mfspr r4,SPRN_MMCRA 470 ori r4,r4,(1 << (63-60)) 471 mtspr SPRN_MMCRA,r4 472 xori r4,r4,(1 << (63-60)) 473 mtspr SPRN_MMCRA,r4 474 ld r4,_MMCR0(r1) 475 mtspr SPRN_MMCR0,r4 4761: 477 /* 478 * POWER ISA 3. Use PSSCR to determine if we 479 * are waking up from deep idle state 480 */ 481 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 482 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 483 484BEGIN_FTR_SECTION_NESTED(71) 485 /* 486 * Assume that we are waking up from the state 487 * same as the Requested Level (RL) in the PSSCR 488 * which are Bits 60-63 489 */ 490 ld r5,PACA_REQ_PSSCR(r13) 491 rldicl r5,r5,0,60 492FTR_SECTION_ELSE_NESTED(71) 493 /* 494 * 0-3 bits correspond to Power-Saving Level Status 495 * which indicates the idle state we are waking up from 496 */ 497 mfspr r5, SPRN_PSSCR 498 rldicl r5,r5,4,60 499ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71) 500 cmpd cr4,r5,r4 501 bge cr4,pnv_wakeup_tb_loss /* returns to caller */ 502 503 blr /* Waking up without hypervisor state loss. */ 504 505/* Same calling convention as arch300 */ 506pnv_restore_hyp_resource_arch207: 507 /* 508 * POWER ISA 2.07 or less. 509 * Check if we slept with sleep or winkle. 510 */ 511 lbz r4,PACA_THREAD_IDLE_STATE(r13) 512 cmpwi cr2,r4,PNV_THREAD_NAP 513 bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ 514 515 /* 516 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking 517 * up from nap. At this stage CR3 shouldn't contains 'gt' since that 518 * indicates we are waking with hypervisor state loss from nap. 519 */ 520 bgt cr3,. 521 522 blr /* Waking up without hypervisor state loss */ 523 524/* 525 * Called if waking up from idle state which can cause either partial or 526 * complete hyp state loss. 527 * In POWER8, called if waking up from fastsleep or winkle 528 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state 529 * 530 * r13 - PACA 531 * cr3 - gt if waking up with partial/complete hypervisor state loss 532 * 533 * If ISA300: 534 * cr4 - gt or eq if waking up from complete hypervisor state loss. 535 * 536 * If ISA207: 537 * r4 - PACA_THREAD_IDLE_STATE 538 */ 539pnv_wakeup_tb_loss: 540 ld r1,PACAR1(r13) 541 /* 542 * Before entering any idle state, the NVGPRs are saved in the stack. 543 * If there was a state loss, or PACA_NAPSTATELOST was set, then the 544 * NVGPRs are restored. If we are here, it is likely that state is lost, 545 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach 546 * here are the same as the test to restore NVGPRS: 547 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300, 548 * and SRR1 test for restoring NVGPRs. 549 * 550 * We are about to clobber NVGPRs now, so set NAPSTATELOST to 551 * guarantee they will always be restored. This might be tightened 552 * with careful reading of specs (particularly for ISA300) but this 553 * is already a slow wakeup path and it's simpler to be safe. 554 */ 555 li r0,1 556 stb r0,PACA_NAPSTATELOST(r13) 557 558 /* 559 * 560 * Save SRR1 and LR in NVGPRs as they might be clobbered in 561 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 562 * to determine the wakeup reason if we branch to kvm_start_guest. LR 563 * is required to return back to reset vector after hypervisor state 564 * restore is complete. 565 */ 566 mr r19,r12 567 mr r18,r4 568 mflr r17 569BEGIN_FTR_SECTION 570 CHECK_HMI_INTERRUPT 571END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 572 573 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 574 lbz r7,PACA_THREAD_MASK(r13) 575 576 /* 577 * Take the core lock to synchronize against other threads. 578 * 579 * Lock bit is set in one of the 2 cases- 580 * a. In the sleep/winkle enter path, the last thread is executing 581 * fastsleep workaround code. 582 * b. In the wake up path, another thread is executing fastsleep 583 * workaround undo code or resyncing timebase or restoring context 584 * In either case loop until the lock bit is cleared. 585 */ 5861: 587 lwarx r15,0,r14 588 andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h 589 bnel- core_idle_lock_held 590 oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 591 stwcx. r15,0,r14 592 bne- 1b 593 isync 594 595 andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS 596 cmpwi cr2,r9,0 597 598 /* 599 * At this stage 600 * cr2 - eq if first thread to wakeup in core 601 * cr3- gt if waking up with partial/complete hypervisor state loss 602 * ISA300: 603 * cr4 - gt or eq if waking up from complete hypervisor state loss. 604 */ 605 606BEGIN_FTR_SECTION 607 /* 608 * Were we in winkle? 609 * If yes, check if all threads were in winkle, decrement our 610 * winkle count, set all thread winkle bits if all were in winkle. 611 * Check if our thread has a winkle bit set, and set cr4 accordingly 612 * (to match ISA300, above). Pseudo-code for core idle state 613 * transitions for ISA207 is as follows (everything happens atomically 614 * due to store conditional and/or lock bit): 615 * 616 * nap_idle() { } 617 * nap_wake() { } 618 * 619 * sleep_idle() 620 * { 621 * core_idle_state &= ~thread_in_core 622 * } 623 * 624 * sleep_wake() 625 * { 626 * bool first_in_core, first_in_subcore; 627 * 628 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; 629 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; 630 * 631 * core_idle_state |= thread_in_core; 632 * } 633 * 634 * winkle_idle() 635 * { 636 * core_idle_state &= ~thread_in_core; 637 * core_idle_state += 1 << WINKLE_COUNT_SHIFT; 638 * } 639 * 640 * winkle_wake() 641 * { 642 * bool first_in_core, first_in_subcore, winkle_state_lost; 643 * 644 * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; 645 * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; 646 * 647 * core_idle_state |= thread_in_core; 648 * 649 * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT)) 650 * core_idle_state |= THREAD_WINKLE_BITS; 651 * core_idle_state -= 1 << WINKLE_COUNT_SHIFT; 652 * 653 * winkle_state_lost = core_idle_state & 654 * (thread_in_core << WINKLE_THREAD_SHIFT); 655 * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT); 656 * } 657 * 658 */ 659 cmpwi r18,PNV_THREAD_WINKLE 660 bne 2f 661 andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h 662 subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h 663 beq 2f 664 ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */ 6652: 666 /* Shift thread bit to winkle mask, then test if this thread is set, 667 * and remove it from the winkle bits */ 668 slwi r8,r7,8 669 and r8,r8,r15 670 andc r15,r15,r8 671 cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */ 672 673 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) 674 and r4,r4,r15 675 cmpwi r4,0 /* Check if first in subcore */ 676 677 or r15,r15,r7 /* Set thread bit */ 678 beq first_thread_in_subcore 679END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 680 681 or r15,r15,r7 /* Set thread bit */ 682 beq cr2,first_thread_in_core 683 684 /* Not first thread in core or subcore to wake up */ 685 b clear_lock 686 687first_thread_in_subcore: 688 /* 689 * If waking up from sleep, subcore state is not lost. Hence 690 * skip subcore state restore 691 */ 692 blt cr4,subcore_state_restored 693 694 /* Restore per-subcore state */ 695 ld r4,_SDR1(r1) 696 mtspr SPRN_SDR1,r4 697 698 ld r4,_RPR(r1) 699 mtspr SPRN_RPR,r4 700 ld r4,_AMOR(r1) 701 mtspr SPRN_AMOR,r4 702 703subcore_state_restored: 704 /* 705 * Check if the thread is also the first thread in the core. If not, 706 * skip to clear_lock. 707 */ 708 bne cr2,clear_lock 709 710first_thread_in_core: 711 712 /* 713 * First thread in the core waking up from any state which can cause 714 * partial or complete hypervisor state loss. It needs to 715 * call the fastsleep workaround code if the platform requires it. 716 * Call it unconditionally here. The below branch instruction will 717 * be patched out if the platform does not have fastsleep or does not 718 * require the workaround. Patching will be performed during the 719 * discovery of idle-states. 720 */ 721.global pnv_fastsleep_workaround_at_exit 722pnv_fastsleep_workaround_at_exit: 723 b fastsleep_workaround_at_exit 724 725timebase_resync: 726 /* 727 * Use cr3 which indicates that we are waking up with atleast partial 728 * hypervisor state loss to determine if TIMEBASE RESYNC is needed. 729 */ 730 ble cr3,.Ltb_resynced 731 /* Time base re-sync */ 732 bl opal_resync_timebase; 733 /* 734 * If waking up from sleep (POWER8), per core state 735 * is not lost, skip to clear_lock. 736 */ 737.Ltb_resynced: 738 blt cr4,clear_lock 739 740 /* 741 * First thread in the core to wake up and its waking up with 742 * complete hypervisor state loss. Restore per core hypervisor 743 * state. 744 */ 745BEGIN_FTR_SECTION 746 ld r4,_PTCR(r1) 747 mtspr SPRN_PTCR,r4 748 ld r4,_RPR(r1) 749 mtspr SPRN_RPR,r4 750END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 751 752 ld r4,_TSCR(r1) 753 mtspr SPRN_TSCR,r4 754 ld r4,_WORC(r1) 755 mtspr SPRN_WORC,r4 756 757clear_lock: 758 xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h 759 lwsync 760 stw r15,0(r14) 761 762common_exit: 763 /* 764 * Common to all threads. 765 * 766 * If waking up from sleep, hypervisor state is not lost. Hence 767 * skip hypervisor state restore. 768 */ 769 blt cr4,hypervisor_state_restored 770 771 /* Waking up from winkle */ 772 773BEGIN_MMU_FTR_SECTION 774 b no_segments 775END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 776 /* Restore SLB from PACA */ 777 ld r8,PACA_SLBSHADOWPTR(r13) 778 779 .rept SLB_NUM_BOLTED 780 li r3, SLBSHADOW_SAVEAREA 781 LDX_BE r5, r8, r3 782 addi r3, r3, 8 783 LDX_BE r6, r8, r3 784 andis. r7,r5,SLB_ESID_V@h 785 beq 1f 786 slbmte r6,r5 7871: addi r8,r8,16 788 .endr 789no_segments: 790 791 /* Restore per thread state */ 792 793 ld r4,_SPURR(r1) 794 mtspr SPRN_SPURR,r4 795 ld r4,_PURR(r1) 796 mtspr SPRN_PURR,r4 797 ld r4,_DSCR(r1) 798 mtspr SPRN_DSCR,r4 799 ld r4,_WORT(r1) 800 mtspr SPRN_WORT,r4 801 802 /* Call cur_cpu_spec->cpu_restore() */ 803 LOAD_REG_ADDR(r4, cur_cpu_spec) 804 ld r4,0(r4) 805 ld r12,CPU_SPEC_RESTORE(r4) 806#ifdef PPC64_ELF_ABI_v1 807 ld r12,0(r12) 808#endif 809 mtctr r12 810 bctrl 811 812BEGIN_FTR_SECTION 813 ld r4,_LPCR(r1) 814 mtspr SPRN_LPCR,r4 815END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 816hypervisor_state_restored: 817 818 mr r12,r19 819 mtlr r17 820 blr /* return to pnv_powersave_wakeup */ 821 822fastsleep_workaround_at_exit: 823 li r3,1 824 li r4,0 825 bl opal_config_cpu_idle_state 826 b timebase_resync 827 828/* 829 * R3 here contains the value that will be returned to the caller 830 * of power7_nap. 831 * R12 contains SRR1 for CHECK_HMI_INTERRUPT. 832 */ 833.global pnv_wakeup_loss 834pnv_wakeup_loss: 835 ld r1,PACAR1(r13) 836BEGIN_FTR_SECTION 837 CHECK_HMI_INTERRUPT 838END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 839 REST_NVGPRS(r1) 840 REST_GPR(2, r1) 841 ld r4,PACAKMSR(r13) 842 ld r5,_LINK(r1) 843 ld r6,_CCR(r1) 844 addi r1,r1,INT_FRAME_SIZE 845 mtlr r5 846 mtcr r6 847 mtmsrd r4 848 blr 849 850/* 851 * R3 here contains the value that will be returned to the caller 852 * of power7_nap. 853 * R12 contains SRR1 for CHECK_HMI_INTERRUPT. 854 */ 855pnv_wakeup_noloss: 856 lbz r0,PACA_NAPSTATELOST(r13) 857 cmpwi r0,0 858 bne pnv_wakeup_loss 859 ld r1,PACAR1(r13) 860BEGIN_FTR_SECTION 861 CHECK_HMI_INTERRUPT 862END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 863 ld r4,PACAKMSR(r13) 864 ld r5,_NIP(r1) 865 ld r6,_CCR(r1) 866 addi r1,r1,INT_FRAME_SIZE 867 mtlr r5 868 mtcr r6 869 mtmsrd r4 870 blr 871