1/* 2 * This file contains idle entry/exit functions for POWER7, 3 * POWER8 and POWER9 CPUs. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 */ 10 11#include <linux/threads.h> 12#include <asm/processor.h> 13#include <asm/page.h> 14#include <asm/cputable.h> 15#include <asm/thread_info.h> 16#include <asm/ppc_asm.h> 17#include <asm/asm-offsets.h> 18#include <asm/ppc-opcode.h> 19#include <asm/hw_irq.h> 20#include <asm/kvm_book3s_asm.h> 21#include <asm/opal.h> 22#include <asm/cpuidle.h> 23#include <asm/book3s/64/mmu-hash.h> 24#include <asm/mmu.h> 25 26#undef DEBUG 27 28/* 29 * Use unused space in the interrupt stack to save and restore 30 * registers for winkle support. 31 */ 32#define _SDR1 GPR3 33#define _RPR GPR4 34#define _SPURR GPR5 35#define _PURR GPR6 36#define _TSCR GPR7 37#define _DSCR GPR8 38#define _AMOR GPR9 39#define _WORT GPR10 40#define _WORC GPR11 41#define _PTCR GPR12 42 43#define PSSCR_HV_TEMPLATE PSSCR_ESL | PSSCR_EC | \ 44 PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ 45 PSSCR_MTL_MASK 46 47 .text 48 49/* 50 * Used by threads before entering deep idle states. Saves SPRs 51 * in interrupt stack frame 52 */ 53save_sprs_to_stack: 54 /* 55 * Note all register i.e per-core, per-subcore or per-thread is saved 56 * here since any thread in the core might wake up first 57 */ 58BEGIN_FTR_SECTION 59 mfspr r3,SPRN_PTCR 60 std r3,_PTCR(r1) 61 /* 62 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring 63 * SDR1 here 64 */ 65FTR_SECTION_ELSE 66 mfspr r3,SPRN_SDR1 67 std r3,_SDR1(r1) 68ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 69 mfspr r3,SPRN_RPR 70 std r3,_RPR(r1) 71 mfspr r3,SPRN_SPURR 72 std r3,_SPURR(r1) 73 mfspr r3,SPRN_PURR 74 std r3,_PURR(r1) 75 mfspr r3,SPRN_TSCR 76 std r3,_TSCR(r1) 77 mfspr r3,SPRN_DSCR 78 std r3,_DSCR(r1) 79 mfspr r3,SPRN_AMOR 80 std r3,_AMOR(r1) 81 mfspr r3,SPRN_WORT 82 std r3,_WORT(r1) 83 mfspr r3,SPRN_WORC 84 std r3,_WORC(r1) 85 86 blr 87 88/* 89 * Used by threads when the lock bit of core_idle_state is set. 90 * Threads will spin in HMT_LOW until the lock bit is cleared. 91 * r14 - pointer to core_idle_state 92 * r15 - used to load contents of core_idle_state 93 * r9 - used as a temporary variable 94 */ 95 96core_idle_lock_held: 97 HMT_LOW 983: lwz r15,0(r14) 99 andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT 100 bne 3b 101 HMT_MEDIUM 102 lwarx r15,0,r14 103 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT 104 bne core_idle_lock_held 105 blr 106 107/* 108 * Pass requested state in r3: 109 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 110 * - Requested STOP state in POWER9 111 * 112 * To check IRQ_HAPPENED in r4 113 * 0 - don't check 114 * 1 - check 115 * 116 * Address to 'rfid' to in r5 117 */ 118_GLOBAL(pnv_powersave_common) 119 /* Use r3 to pass state nap/sleep/winkle */ 120 /* NAP is a state loss, we create a regs frame on the 121 * stack, fill it up with the state we care about and 122 * stick a pointer to it in PACAR1. We really only 123 * need to save PC, some CR bits and the NV GPRs, 124 * but for now an interrupt frame will do. 125 */ 126 mflr r0 127 std r0,16(r1) 128 stdu r1,-INT_FRAME_SIZE(r1) 129 std r0,_LINK(r1) 130 std r0,_NIP(r1) 131 132 /* Hard disable interrupts */ 133 mfmsr r9 134 rldicl r9,r9,48,1 135 rotldi r9,r9,16 136 mtmsrd r9,1 /* hard-disable interrupts */ 137 138 /* Check if something happened while soft-disabled */ 139 lbz r0,PACAIRQHAPPENED(r13) 140 andi. r0,r0,~PACA_IRQ_HARD_DIS@l 141 beq 1f 142 cmpwi cr0,r4,0 143 beq 1f 144 addi r1,r1,INT_FRAME_SIZE 145 ld r0,16(r1) 146 li r3,0 /* Return 0 (no nap) */ 147 mtlr r0 148 blr 149 1501: /* We mark irqs hard disabled as this is the state we'll 151 * be in when returning and we need to tell arch_local_irq_restore() 152 * about it 153 */ 154 li r0,PACA_IRQ_HARD_DIS 155 stb r0,PACAIRQHAPPENED(r13) 156 157 /* We haven't lost state ... yet */ 158 li r0,0 159 stb r0,PACA_NAPSTATELOST(r13) 160 161 /* Continue saving state */ 162 SAVE_GPR(2, r1) 163 SAVE_NVGPRS(r1) 164 mfcr r4 165 std r4,_CCR(r1) 166 std r9,_MSR(r1) 167 std r1,PACAR1(r13) 168 169 /* 170 * Go to real mode to do the nap, as required by the architecture. 171 * Also, we need to be in real mode before setting hwthread_state, 172 * because as soon as we do that, another thread can switch 173 * the MMU context to the guest. 174 */ 175 LOAD_REG_IMMEDIATE(r7, MSR_IDLE) 176 li r6, MSR_RI 177 andc r6, r9, r6 178 mtmsrd r6, 1 /* clear RI before setting SRR0/1 */ 179 mtspr SPRN_SRR0, r5 180 mtspr SPRN_SRR1, r7 181 rfid 182 183 .globl pnv_enter_arch207_idle_mode 184pnv_enter_arch207_idle_mode: 185#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 186 /* Tell KVM we're entering idle */ 187 li r4,KVM_HWTHREAD_IN_IDLE 188 /******************************************************/ 189 /* N O T E W E L L ! ! ! N O T E W E L L */ 190 /* The following store to HSTATE_HWTHREAD_STATE(r13) */ 191 /* MUST occur in real mode, i.e. with the MMU off, */ 192 /* and the MMU must stay off until we clear this flag */ 193 /* and test HSTATE_HWTHREAD_REQ(r13) in the system */ 194 /* reset interrupt vector in exceptions-64s.S. */ 195 /* The reason is that another thread can switch the */ 196 /* MMU to a guest context whenever this flag is set */ 197 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ 198 /* that would potentially cause this thread to start */ 199 /* executing instructions from guest memory in */ 200 /* hypervisor mode, leading to a host crash or data */ 201 /* corruption, or worse. */ 202 /******************************************************/ 203 stb r4,HSTATE_HWTHREAD_STATE(r13) 204#endif 205 stb r3,PACA_THREAD_IDLE_STATE(r13) 206 cmpwi cr3,r3,PNV_THREAD_SLEEP 207 bge cr3,2f 208 IDLE_STATE_ENTER_SEQ(PPC_NAP) 209 /* No return */ 2102: 211 /* Sleep or winkle */ 212 lbz r7,PACA_THREAD_MASK(r13) 213 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 214lwarx_loop1: 215 lwarx r15,0,r14 216 217 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT 218 bnel core_idle_lock_held 219 220 andc r15,r15,r7 /* Clear thread bit */ 221 222 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS 223 224/* 225 * If cr0 = 0, then current thread is the last thread of the core entering 226 * sleep. Last thread needs to execute the hardware bug workaround code if 227 * required by the platform. 228 * Make the workaround call unconditionally here. The below branch call is 229 * patched out when the idle states are discovered if the platform does not 230 * require it. 231 */ 232.global pnv_fastsleep_workaround_at_entry 233pnv_fastsleep_workaround_at_entry: 234 beq fastsleep_workaround_at_entry 235 236 stwcx. r15,0,r14 237 bne- lwarx_loop1 238 isync 239 240common_enter: /* common code for all the threads entering sleep or winkle */ 241 bgt cr3,enter_winkle 242 IDLE_STATE_ENTER_SEQ(PPC_SLEEP) 243 244fastsleep_workaround_at_entry: 245 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT 246 stwcx. r15,0,r14 247 bne- lwarx_loop1 248 isync 249 250 /* Fast sleep workaround */ 251 li r3,1 252 li r4,1 253 bl opal_rm_config_cpu_idle_state 254 255 /* Clear Lock bit */ 256 li r0,0 257 lwsync 258 stw r0,0(r14) 259 b common_enter 260 261enter_winkle: 262 bl save_sprs_to_stack 263 264 IDLE_STATE_ENTER_SEQ(PPC_WINKLE) 265 266/* 267 * r3 - requested stop state 268 */ 269power_enter_stop: 270#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 271 /* Tell KVM we're entering idle */ 272 li r4,KVM_HWTHREAD_IN_IDLE 273 /* DO THIS IN REAL MODE! See comment above. */ 274 stb r4,HSTATE_HWTHREAD_STATE(r13) 275#endif 276/* 277 * Check if the requested state is a deep idle state. 278 */ 279 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 280 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 281 cmpd r3,r4 282 bge 2f 283 IDLE_STATE_ENTER_SEQ(PPC_STOP) 2842: 285/* 286 * Entering deep idle state. 287 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to 288 * stack and enter stop 289 */ 290 lbz r7,PACA_THREAD_MASK(r13) 291 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 292 293lwarx_loop_stop: 294 lwarx r15,0,r14 295 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT 296 bnel core_idle_lock_held 297 andc r15,r15,r7 /* Clear thread bit */ 298 299 stwcx. r15,0,r14 300 bne- lwarx_loop_stop 301 isync 302 303 bl save_sprs_to_stack 304 305 IDLE_STATE_ENTER_SEQ(PPC_STOP) 306 307_GLOBAL(power7_idle) 308 /* Now check if user or arch enabled NAP mode */ 309 LOAD_REG_ADDRBASE(r3,powersave_nap) 310 lwz r4,ADDROFF(powersave_nap)(r3) 311 cmpwi 0,r4,0 312 beqlr 313 li r3, 1 314 /* fall through */ 315 316_GLOBAL(power7_nap) 317 mr r4,r3 318 li r3,PNV_THREAD_NAP 319 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) 320 b pnv_powersave_common 321 /* No return */ 322 323_GLOBAL(power7_sleep) 324 li r3,PNV_THREAD_SLEEP 325 li r4,1 326 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) 327 b pnv_powersave_common 328 /* No return */ 329 330_GLOBAL(power7_winkle) 331 li r3,PNV_THREAD_WINKLE 332 li r4,1 333 LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) 334 b pnv_powersave_common 335 /* No return */ 336 337#define CHECK_HMI_INTERRUPT \ 338 mfspr r0,SPRN_SRR1; \ 339BEGIN_FTR_SECTION_NESTED(66); \ 340 rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \ 341FTR_SECTION_ELSE_NESTED(66); \ 342 rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \ 343ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ 344 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ 345 bne 20f; \ 346 /* Invoke opal call to handle hmi */ \ 347 ld r2,PACATOC(r13); \ 348 ld r1,PACAR1(r13); \ 349 std r3,ORIG_GPR3(r1); /* Save original r3 */ \ 350 li r3,0; /* NULL argument */ \ 351 bl hmi_exception_realmode; \ 352 nop; \ 353 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ 35420: nop; 355 356 357/* 358 * r3 - requested stop state 359 */ 360_GLOBAL(power9_idle_stop) 361 LOAD_REG_IMMEDIATE(r4, PSSCR_HV_TEMPLATE) 362 or r4,r4,r3 363 mtspr SPRN_PSSCR, r4 364 li r4, 1 365 LOAD_REG_ADDR(r5,power_enter_stop) 366 b pnv_powersave_common 367 /* No return */ 368/* 369 * Called from reset vector. Check whether we have woken up with 370 * hypervisor state loss. If yes, restore hypervisor state and return 371 * back to reset vector. 372 * 373 * r13 - Contents of HSPRG0 374 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 375 */ 376_GLOBAL(pnv_restore_hyp_resource) 377BEGIN_FTR_SECTION 378 ld r2,PACATOC(r13); 379 /* 380 * POWER ISA 3. Use PSSCR to determine if we 381 * are waking up from deep idle state 382 */ 383 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 384 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 385 386 mfspr r5,SPRN_PSSCR 387 /* 388 * 0-3 bits correspond to Power-Saving Level Status 389 * which indicates the idle state we are waking up from 390 */ 391 rldicl r5,r5,4,60 392 cmpd cr4,r5,r4 393 bge cr4,pnv_wakeup_tb_loss 394 /* 395 * Waking up without hypervisor state loss. Return to 396 * reset vector 397 */ 398 blr 399 400END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 401 402 /* 403 * POWER ISA 2.07 or less. 404 * Check if last bit of HSPGR0 is set. This indicates whether we are 405 * waking up from winkle. 406 */ 407 clrldi r5,r13,63 408 clrrdi r13,r13,1 409 410 /* Now that we are sure r13 is corrected, load TOC */ 411 ld r2,PACATOC(r13); 412 cmpwi cr4,r5,1 413 mtspr SPRN_HSPRG0,r13 414 415 lbz r0,PACA_THREAD_IDLE_STATE(r13) 416 cmpwi cr2,r0,PNV_THREAD_NAP 417 bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ 418 419 /* 420 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking 421 * up from nap. At this stage CR3 shouldn't contains 'gt' since that 422 * indicates we are waking with hypervisor state loss from nap. 423 */ 424 bgt cr3,. 425 426 blr /* Return back to System Reset vector from where 427 pnv_restore_hyp_resource was invoked */ 428 429/* 430 * Called if waking up from idle state which can cause either partial or 431 * complete hyp state loss. 432 * In POWER8, called if waking up from fastsleep or winkle 433 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state 434 * 435 * r13 - PACA 436 * cr3 - gt if waking up with partial/complete hypervisor state loss 437 * cr4 - gt or eq if waking up from complete hypervisor state loss. 438 */ 439_GLOBAL(pnv_wakeup_tb_loss) 440 ld r1,PACAR1(r13) 441 /* 442 * Before entering any idle state, the NVGPRs are saved in the stack 443 * and they are restored before switching to the process context. Hence 444 * until they are restored, they are free to be used. 445 * 446 * Save SRR1 and LR in NVGPRs as they might be clobbered in 447 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 448 * to determine the wakeup reason if we branch to kvm_start_guest. LR 449 * is required to return back to reset vector after hypervisor state 450 * restore is complete. 451 */ 452 mflr r17 453 mfspr r16,SPRN_SRR1 454BEGIN_FTR_SECTION 455 CHECK_HMI_INTERRUPT 456END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 457 458 lbz r7,PACA_THREAD_MASK(r13) 459 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 460lwarx_loop2: 461 lwarx r15,0,r14 462 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT 463 /* 464 * Lock bit is set in one of the 2 cases- 465 * a. In the sleep/winkle enter path, the last thread is executing 466 * fastsleep workaround code. 467 * b. In the wake up path, another thread is executing fastsleep 468 * workaround undo code or resyncing timebase or restoring context 469 * In either case loop until the lock bit is cleared. 470 */ 471 bnel core_idle_lock_held 472 473 cmpwi cr2,r15,0 474 475 /* 476 * At this stage 477 * cr2 - eq if first thread to wakeup in core 478 * cr3- gt if waking up with partial/complete hypervisor state loss 479 * cr4 - gt or eq if waking up from complete hypervisor state loss. 480 */ 481 482 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT 483 stwcx. r15,0,r14 484 bne- lwarx_loop2 485 isync 486 487BEGIN_FTR_SECTION 488 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) 489 and r4,r4,r15 490 cmpwi r4,0 /* Check if first in subcore */ 491 492 or r15,r15,r7 /* Set thread bit */ 493 beq first_thread_in_subcore 494END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 495 496 or r15,r15,r7 /* Set thread bit */ 497 beq cr2,first_thread_in_core 498 499 /* Not first thread in core or subcore to wake up */ 500 b clear_lock 501 502first_thread_in_subcore: 503 /* 504 * If waking up from sleep, subcore state is not lost. Hence 505 * skip subcore state restore 506 */ 507 blt cr4,subcore_state_restored 508 509 /* Restore per-subcore state */ 510 ld r4,_SDR1(r1) 511 mtspr SPRN_SDR1,r4 512 513 ld r4,_RPR(r1) 514 mtspr SPRN_RPR,r4 515 ld r4,_AMOR(r1) 516 mtspr SPRN_AMOR,r4 517 518subcore_state_restored: 519 /* 520 * Check if the thread is also the first thread in the core. If not, 521 * skip to clear_lock. 522 */ 523 bne cr2,clear_lock 524 525first_thread_in_core: 526 527 /* 528 * First thread in the core waking up from any state which can cause 529 * partial or complete hypervisor state loss. It needs to 530 * call the fastsleep workaround code if the platform requires it. 531 * Call it unconditionally here. The below branch instruction will 532 * be patched out if the platform does not have fastsleep or does not 533 * require the workaround. Patching will be performed during the 534 * discovery of idle-states. 535 */ 536.global pnv_fastsleep_workaround_at_exit 537pnv_fastsleep_workaround_at_exit: 538 b fastsleep_workaround_at_exit 539 540timebase_resync: 541 /* 542 * Use cr3 which indicates that we are waking up with atleast partial 543 * hypervisor state loss to determine if TIMEBASE RESYNC is needed. 544 */ 545 ble cr3,clear_lock 546 /* Time base re-sync */ 547 bl opal_rm_resync_timebase; 548 /* 549 * If waking up from sleep, per core state is not lost, skip to 550 * clear_lock. 551 */ 552 blt cr4,clear_lock 553 554 /* 555 * First thread in the core to wake up and its waking up with 556 * complete hypervisor state loss. Restore per core hypervisor 557 * state. 558 */ 559BEGIN_FTR_SECTION 560 ld r4,_PTCR(r1) 561 mtspr SPRN_PTCR,r4 562 ld r4,_RPR(r1) 563 mtspr SPRN_RPR,r4 564END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 565 566 ld r4,_TSCR(r1) 567 mtspr SPRN_TSCR,r4 568 ld r4,_WORC(r1) 569 mtspr SPRN_WORC,r4 570 571clear_lock: 572 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS 573 lwsync 574 stw r15,0(r14) 575 576common_exit: 577 /* 578 * Common to all threads. 579 * 580 * If waking up from sleep, hypervisor state is not lost. Hence 581 * skip hypervisor state restore. 582 */ 583 blt cr4,hypervisor_state_restored 584 585 /* Waking up from winkle */ 586 587BEGIN_MMU_FTR_SECTION 588 b no_segments 589END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 590 /* Restore SLB from PACA */ 591 ld r8,PACA_SLBSHADOWPTR(r13) 592 593 .rept SLB_NUM_BOLTED 594 li r3, SLBSHADOW_SAVEAREA 595 LDX_BE r5, r8, r3 596 addi r3, r3, 8 597 LDX_BE r6, r8, r3 598 andis. r7,r5,SLB_ESID_V@h 599 beq 1f 600 slbmte r6,r5 6011: addi r8,r8,16 602 .endr 603no_segments: 604 605 /* Restore per thread state */ 606 607 ld r4,_SPURR(r1) 608 mtspr SPRN_SPURR,r4 609 ld r4,_PURR(r1) 610 mtspr SPRN_PURR,r4 611 ld r4,_DSCR(r1) 612 mtspr SPRN_DSCR,r4 613 ld r4,_WORT(r1) 614 mtspr SPRN_WORT,r4 615 616 /* Call cur_cpu_spec->cpu_restore() */ 617 LOAD_REG_ADDR(r4, cur_cpu_spec) 618 ld r4,0(r4) 619 ld r12,CPU_SPEC_RESTORE(r4) 620#ifdef PPC64_ELF_ABI_v1 621 ld r12,0(r12) 622#endif 623 mtctr r12 624 bctrl 625 626hypervisor_state_restored: 627 628 mtspr SPRN_SRR1,r16 629 mtlr r17 630 blr /* Return back to System Reset vector from where 631 pnv_restore_hyp_resource was invoked */ 632 633fastsleep_workaround_at_exit: 634 li r3,1 635 li r4,0 636 bl opal_rm_config_cpu_idle_state 637 b timebase_resync 638 639/* 640 * R3 here contains the value that will be returned to the caller 641 * of power7_nap. 642 */ 643_GLOBAL(pnv_wakeup_loss) 644 ld r1,PACAR1(r13) 645BEGIN_FTR_SECTION 646 CHECK_HMI_INTERRUPT 647END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 648 REST_NVGPRS(r1) 649 REST_GPR(2, r1) 650 ld r6,_CCR(r1) 651 ld r4,_MSR(r1) 652 ld r5,_NIP(r1) 653 addi r1,r1,INT_FRAME_SIZE 654 mtcr r6 655 mtspr SPRN_SRR1,r4 656 mtspr SPRN_SRR0,r5 657 rfid 658 659/* 660 * R3 here contains the value that will be returned to the caller 661 * of power7_nap. 662 */ 663_GLOBAL(pnv_wakeup_noloss) 664 lbz r0,PACA_NAPSTATELOST(r13) 665 cmpwi r0,0 666 bne pnv_wakeup_loss 667BEGIN_FTR_SECTION 668 CHECK_HMI_INTERRUPT 669END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 670 ld r1,PACAR1(r13) 671 ld r6,_CCR(r1) 672 ld r4,_MSR(r1) 673 ld r5,_NIP(r1) 674 addi r1,r1,INT_FRAME_SIZE 675 mtcr r6 676 mtspr SPRN_SRR1,r4 677 mtspr SPRN_SRR0,r5 678 rfid 679