xref: /openbmc/linux/arch/powerpc/kernel/idle_book3s.S (revision ce57c6610cc2d7cde61fc005a2d2090bce46fc73)
1/*
2 *  This file contains idle entry/exit functions for POWER7,
3 *  POWER8 and POWER9 CPUs.
4 *
5 *  This program is free software; you can redistribute it and/or
6 *  modify it under the terms of the GNU General Public License
7 *  as published by the Free Software Foundation; either version
8 *  2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/threads.h>
12#include <asm/processor.h>
13#include <asm/page.h>
14#include <asm/cputable.h>
15#include <asm/thread_info.h>
16#include <asm/ppc_asm.h>
17#include <asm/asm-offsets.h>
18#include <asm/ppc-opcode.h>
19#include <asm/hw_irq.h>
20#include <asm/kvm_book3s_asm.h>
21#include <asm/opal.h>
22#include <asm/cpuidle.h>
23#include <asm/exception-64s.h>
24#include <asm/book3s/64/mmu-hash.h>
25#include <asm/mmu.h>
26
27#undef DEBUG
28
29/*
30 * Use unused space in the interrupt stack to save and restore
31 * registers for winkle support.
32 */
33#define _MMCR0	GPR0
34#define _SDR1	GPR3
35#define _PTCR	GPR3
36#define _RPR	GPR4
37#define _SPURR	GPR5
38#define _PURR	GPR6
39#define _TSCR	GPR7
40#define _DSCR	GPR8
41#define _AMOR	GPR9
42#define _WORT	GPR10
43#define _WORC	GPR11
44#define _LPCR	GPR12
45
46#define PSSCR_EC_ESL_MASK_SHIFTED          (PSSCR_EC | PSSCR_ESL) >> 16
47
48	.text
49
50/*
51 * Used by threads before entering deep idle states. Saves SPRs
52 * in interrupt stack frame
53 */
54save_sprs_to_stack:
55	/*
56	 * Note all register i.e per-core, per-subcore or per-thread is saved
57	 * here since any thread in the core might wake up first
58	 */
59BEGIN_FTR_SECTION
60	/*
61	 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
62	 * SDR1 here
63	 */
64	mfspr	r3,SPRN_PTCR
65	std	r3,_PTCR(r1)
66	mfspr	r3,SPRN_LPCR
67	std	r3,_LPCR(r1)
68FTR_SECTION_ELSE
69	mfspr	r3,SPRN_SDR1
70	std	r3,_SDR1(r1)
71ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
72	mfspr	r3,SPRN_RPR
73	std	r3,_RPR(r1)
74	mfspr	r3,SPRN_SPURR
75	std	r3,_SPURR(r1)
76	mfspr	r3,SPRN_PURR
77	std	r3,_PURR(r1)
78	mfspr	r3,SPRN_TSCR
79	std	r3,_TSCR(r1)
80	mfspr	r3,SPRN_DSCR
81	std	r3,_DSCR(r1)
82	mfspr	r3,SPRN_AMOR
83	std	r3,_AMOR(r1)
84	mfspr	r3,SPRN_WORT
85	std	r3,_WORT(r1)
86	mfspr	r3,SPRN_WORC
87	std	r3,_WORC(r1)
88/*
89 * On POWER9, there are idle states such as stop4, invoked via cpuidle,
90 * that lose hypervisor resources. In such cases, we need to save
91 * additional SPRs before entering those idle states so that they can
92 * be restored to their older values on wakeup from the idle state.
93 *
94 * On POWER8, the only such deep idle state is winkle which is used
95 * only in the context of CPU-Hotplug, where these additional SPRs are
96 * reinitiazed to a sane value. Hence there is no need to save/restore
97 * these SPRs.
98 */
99BEGIN_FTR_SECTION
100	blr
101END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
102
103power9_save_additional_sprs:
104	mfspr	r3, SPRN_PID
105	mfspr	r4, SPRN_LDBAR
106	std	r3, STOP_PID(r13)
107	std	r4, STOP_LDBAR(r13)
108
109	mfspr	r3, SPRN_FSCR
110	mfspr	r4, SPRN_HFSCR
111	std	r3, STOP_FSCR(r13)
112	std	r4, STOP_HFSCR(r13)
113
114	mfspr	r3, SPRN_MMCRA
115	mfspr	r4, SPRN_MMCR0
116	std	r3, STOP_MMCRA(r13)
117	std	r4, _MMCR0(r1)
118
119	mfspr	r3, SPRN_MMCR1
120	mfspr	r4, SPRN_MMCR2
121	std	r3, STOP_MMCR1(r13)
122	std	r4, STOP_MMCR2(r13)
123	blr
124
125power9_restore_additional_sprs:
126	ld	r3,_LPCR(r1)
127	ld	r4, STOP_PID(r13)
128	mtspr	SPRN_LPCR,r3
129	mtspr	SPRN_PID, r4
130
131	ld	r3, STOP_LDBAR(r13)
132	ld	r4, STOP_FSCR(r13)
133	mtspr	SPRN_LDBAR, r3
134	mtspr	SPRN_FSCR, r4
135
136	ld	r3, STOP_HFSCR(r13)
137	ld	r4, STOP_MMCRA(r13)
138	mtspr	SPRN_HFSCR, r3
139	mtspr	SPRN_MMCRA, r4
140
141	ld	r3, _MMCR0(r1)
142	ld	r4, STOP_MMCR1(r13)
143	mtspr	SPRN_MMCR0, r3
144	mtspr	SPRN_MMCR1, r4
145
146	ld	r3, STOP_MMCR2(r13)
147	mtspr	SPRN_MMCR2, r3
148	blr
149
150/*
151 * Used by threads when the lock bit of core_idle_state is set.
152 * Threads will spin in HMT_LOW until the lock bit is cleared.
153 * r14 - pointer to core_idle_state
154 * r15 - used to load contents of core_idle_state
155 * r9  - used as a temporary variable
156 */
157
158core_idle_lock_held:
159	HMT_LOW
1603:	lwz	r15,0(r14)
161	andis.	r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
162	bne	3b
163	HMT_MEDIUM
164	lwarx	r15,0,r14
165	andis.	r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
166	bne-	core_idle_lock_held
167	blr
168
169/*
170 * Pass requested state in r3:
171 *	r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
172 *	   - Requested PSSCR value in POWER9
173 *
174 * Address of idle handler to branch to in realmode in r4
175 */
176pnv_powersave_common:
177	/* Use r3 to pass state nap/sleep/winkle */
178	/* NAP is a state loss, we create a regs frame on the
179	 * stack, fill it up with the state we care about and
180	 * stick a pointer to it in PACAR1. We really only
181	 * need to save PC, some CR bits and the NV GPRs,
182	 * but for now an interrupt frame will do.
183	 */
184	mtctr	r4
185
186	mflr	r0
187	std	r0,16(r1)
188	stdu	r1,-INT_FRAME_SIZE(r1)
189	std	r0,_LINK(r1)
190	std	r0,_NIP(r1)
191
192	/* We haven't lost state ... yet */
193	li	r0,0
194	stb	r0,PACA_NAPSTATELOST(r13)
195
196	/* Continue saving state */
197	SAVE_GPR(2, r1)
198	SAVE_NVGPRS(r1)
199	mfcr	r5
200	std	r5,_CCR(r1)
201	std	r1,PACAR1(r13)
202
203BEGIN_FTR_SECTION
204	/*
205	 * POWER9 does not require real mode to stop, and presently does not
206	 * set hwthread_state for KVM (threads don't share MMU context), so
207	 * we can remain in virtual mode for this.
208	 */
209	bctr
210END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
211	/*
212	 * POWER8
213	 * Go to real mode to do the nap, as required by the architecture.
214	 * Also, we need to be in real mode before setting hwthread_state,
215	 * because as soon as we do that, another thread can switch
216	 * the MMU context to the guest.
217	 */
218	LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
219	mtmsrd	r7,0
220	bctr
221
222/*
223 * This is the sequence required to execute idle instructions, as
224 * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
225 */
226#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST)			\
227	/* Magic NAP/SLEEP/WINKLE mode enter sequence */	\
228	std	r0,0(r1);					\
229	ptesync;						\
230	ld	r0,0(r1);					\
231236:	cmpd	cr0,r0,r0;					\
232	bne	236b;						\
233	IDLE_INST;
234
235
236	.globl pnv_enter_arch207_idle_mode
237pnv_enter_arch207_idle_mode:
238#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
239	/* Tell KVM we're entering idle */
240	li	r4,KVM_HWTHREAD_IN_IDLE
241	/******************************************************/
242	/*  N O T E   W E L L    ! ! !    N O T E   W E L L   */
243	/* The following store to HSTATE_HWTHREAD_STATE(r13)  */
244	/* MUST occur in real mode, i.e. with the MMU off,    */
245	/* and the MMU must stay off until we clear this flag */
246	/* and test HSTATE_HWTHREAD_REQ(r13) in               */
247	/* pnv_powersave_wakeup in this file.                 */
248	/* The reason is that another thread can switch the   */
249	/* MMU to a guest context whenever this flag is set   */
250	/* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on,    */
251	/* that would potentially cause this thread to start  */
252	/* executing instructions from guest memory in        */
253	/* hypervisor mode, leading to a host crash or data   */
254	/* corruption, or worse.                              */
255	/******************************************************/
256	stb	r4,HSTATE_HWTHREAD_STATE(r13)
257#endif
258	stb	r3,PACA_THREAD_IDLE_STATE(r13)
259	cmpwi	cr3,r3,PNV_THREAD_SLEEP
260	bge	cr3,2f
261	IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
262	/* No return */
2632:
264	/* Sleep or winkle */
265	lbz	r7,PACA_THREAD_MASK(r13)
266	ld	r14,PACA_CORE_IDLE_STATE_PTR(r13)
267	li	r5,0
268	beq	cr3,3f
269	lis	r5,PNV_CORE_IDLE_WINKLE_COUNT@h
2703:
271lwarx_loop1:
272	lwarx	r15,0,r14
273
274	andis.	r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
275	bnel-	core_idle_lock_held
276
277	add	r15,r15,r5			/* Add if winkle */
278	andc	r15,r15,r7			/* Clear thread bit */
279
280	andi.	r9,r15,PNV_CORE_IDLE_THREAD_BITS
281
282/*
283 * If cr0 = 0, then current thread is the last thread of the core entering
284 * sleep. Last thread needs to execute the hardware bug workaround code if
285 * required by the platform.
286 * Make the workaround call unconditionally here. The below branch call is
287 * patched out when the idle states are discovered if the platform does not
288 * require it.
289 */
290.global pnv_fastsleep_workaround_at_entry
291pnv_fastsleep_workaround_at_entry:
292	beq	fastsleep_workaround_at_entry
293
294	stwcx.	r15,0,r14
295	bne-	lwarx_loop1
296	isync
297
298common_enter: /* common code for all the threads entering sleep or winkle */
299	bgt	cr3,enter_winkle
300	IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
301
302fastsleep_workaround_at_entry:
303	oris	r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
304	stwcx.	r15,0,r14
305	bne-	lwarx_loop1
306	isync
307
308	/* Fast sleep workaround */
309	li	r3,1
310	li	r4,1
311	bl	opal_config_cpu_idle_state
312
313	/* Unlock */
314	xoris	r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
315	lwsync
316	stw	r15,0(r14)
317	b	common_enter
318
319enter_winkle:
320	bl	save_sprs_to_stack
321
322	IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
323
324/*
325 * r3 - PSSCR value corresponding to the requested stop state.
326 */
327power_enter_stop:
328/*
329 * Check if we are executing the lite variant with ESL=EC=0
330 */
331	andis.   r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
332	clrldi   r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
333	bne	 .Lhandle_esl_ec_set
334	PPC_STOP
335	li	r3,0  /* Since we didn't lose state, return 0 */
336	std	r3, PACA_REQ_PSSCR(r13)
337
338	/*
339	 * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so
340	 * it can determine if the wakeup reason is an HMI in
341	 * CHECK_HMI_INTERRUPT.
342	 *
343	 * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup
344	 * reason, so there is no point setting r12 to SRR1.
345	 *
346	 * Further, we clear r12 here, so that we don't accidentally enter the
347	 * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI.
348	 */
349	li	r12, 0
350	b 	pnv_wakeup_noloss
351
352.Lhandle_esl_ec_set:
353BEGIN_FTR_SECTION
354	/*
355	 * POWER9 DD2.0 or earlier can incorrectly set PMAO when waking up after
356	 * a state-loss idle. Saving and restoring MMCR0 over idle is a
357	 * workaround.
358	 */
359	mfspr	r4,SPRN_MMCR0
360	std	r4,_MMCR0(r1)
361END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
362
363/*
364 * Check if the requested state is a deep idle state.
365 */
366	LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
367	ld	r4,ADDROFF(pnv_first_deep_stop_state)(r5)
368	cmpd	r3,r4
369	bge	.Lhandle_deep_stop
370	PPC_STOP	/* Does not return (system reset interrupt) */
371
372.Lhandle_deep_stop:
373/*
374 * Entering deep idle state.
375 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
376 * stack and enter stop
377 */
378	lbz     r7,PACA_THREAD_MASK(r13)
379	ld      r14,PACA_CORE_IDLE_STATE_PTR(r13)
380
381lwarx_loop_stop:
382	lwarx   r15,0,r14
383	andis.	r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
384	bnel-	core_idle_lock_held
385	andc    r15,r15,r7                      /* Clear thread bit */
386
387	stwcx.  r15,0,r14
388	bne-    lwarx_loop_stop
389	isync
390
391	bl	save_sprs_to_stack
392
393	PPC_STOP	/* Does not return (system reset interrupt) */
394
395/*
396 * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
397 * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE).
398 */
399_GLOBAL(power7_idle_insn)
400	/* Now check if user or arch enabled NAP mode */
401	LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode)
402	b	pnv_powersave_common
403
404#define CHECK_HMI_INTERRUPT						\
405BEGIN_FTR_SECTION_NESTED(66);						\
406	rlwinm	r0,r12,45-31,0xf;  /* extract wake reason field (P8) */	\
407FTR_SECTION_ELSE_NESTED(66);						\
408	rlwinm	r0,r12,45-31,0xe;  /* P7 wake reason field is 3 bits */	\
409ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66);		\
410	cmpwi	r0,0xa;			/* Hypervisor maintenance ? */	\
411	bne+	20f;							\
412	/* Invoke opal call to handle hmi */				\
413	ld	r2,PACATOC(r13);					\
414	ld	r1,PACAR1(r13);						\
415	std	r3,ORIG_GPR3(r1);	/* Save original r3 */		\
416	li	r3,0;			/* NULL argument */		\
417	bl	hmi_exception_realmode;					\
418	nop;								\
419	ld	r3,ORIG_GPR3(r1);	/* Restore original r3 */	\
42020:	nop;
421
422/*
423 * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
424 * r3 contains desired PSSCR register value.
425 *
426 * Offline (CPU unplug) case also must notify KVM that the CPU is
427 * idle.
428 */
429_GLOBAL(power9_offline_stop)
430#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
431	/*
432	 * Tell KVM we're entering idle.
433	 * This does not have to be done in real mode because the P9 MMU
434	 * is independent per-thread. Some steppings share radix/hash mode
435	 * between threads, but in that case KVM has a barrier sync in real
436	 * mode before and after switching between radix and hash.
437	 */
438	li	r4,KVM_HWTHREAD_IN_IDLE
439	stb	r4,HSTATE_HWTHREAD_STATE(r13)
440#endif
441	/* fall through */
442
443_GLOBAL(power9_idle_stop)
444	std	r3, PACA_REQ_PSSCR(r13)
445#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
446BEGIN_FTR_SECTION
447	sync
448	lwz	r5, PACA_DONT_STOP(r13)
449	cmpwi	r5, 0
450	bne	1f
451END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
452#endif
453	mtspr 	SPRN_PSSCR,r3
454	LOAD_REG_ADDR(r4,power_enter_stop)
455	b	pnv_powersave_common
456	/* No return */
457#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
4581:
459	/*
460	 * We get here when TM / thread reconfiguration bug workaround
461	 * code wants to get the CPU into SMT4 mode, and therefore
462	 * we are being asked not to stop.
463	 */
464	li	r3, 0
465	std	r3, PACA_REQ_PSSCR(r13)
466	blr		/* return 0 for wakeup cause / SRR1 value */
467#endif
468
469/*
470 * Called from machine check handler for powersave wakeups.
471 * Low level machine check processing has already been done. Now just
472 * go through the wake up path to get everything in order.
473 *
474 * r3 - The original SRR1 value.
475 * Original SRR[01] have been clobbered.
476 * MSR_RI is clear.
477 */
478.global pnv_powersave_wakeup_mce
479pnv_powersave_wakeup_mce:
480	/* Set cr3 for pnv_powersave_wakeup */
481	rlwinm	r11,r3,47-31,30,31
482	cmpwi	cr3,r11,2
483
484	/*
485	 * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake
486	 * reason into r12, which allows reuse of the system reset wakeup
487	 * code without being mistaken for another type of wakeup.
488	 */
489	oris	r12,r3,SRR1_WAKEMCE_RESVD@h
490
491	b	pnv_powersave_wakeup
492
493/*
494 * Called from reset vector for powersave wakeups.
495 * cr3 - set to gt if waking up with partial/complete hypervisor state loss
496 * r12 - SRR1
497 */
498.global pnv_powersave_wakeup
499pnv_powersave_wakeup:
500	ld	r2, PACATOC(r13)
501
502BEGIN_FTR_SECTION
503	bl	pnv_restore_hyp_resource_arch300
504FTR_SECTION_ELSE
505	bl	pnv_restore_hyp_resource_arch207
506ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
507
508	li	r0,PNV_THREAD_RUNNING
509	stb	r0,PACA_THREAD_IDLE_STATE(r13)	/* Clear thread state */
510
511	mr	r3,r12
512
513#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
514	lbz	r0,HSTATE_HWTHREAD_STATE(r13)
515	cmpwi	r0,KVM_HWTHREAD_IN_KERNEL
516	beq	0f
517	li	r0,KVM_HWTHREAD_IN_KERNEL
518	stb	r0,HSTATE_HWTHREAD_STATE(r13)
519	/* Order setting hwthread_state vs. testing hwthread_req */
520	sync
5210:	lbz	r0,HSTATE_HWTHREAD_REQ(r13)
522	cmpwi	r0,0
523	beq	1f
524	b	kvm_start_guest
5251:
526#endif
527
528	/* Return SRR1 from power7_nap() */
529	blt	cr3,pnv_wakeup_noloss
530	b	pnv_wakeup_loss
531
532/*
533 * Check whether we have woken up with hypervisor state loss.
534 * If yes, restore hypervisor state and return back to link.
535 *
536 * cr3 - set to gt if waking up with partial/complete hypervisor state loss
537 */
538pnv_restore_hyp_resource_arch300:
539	/*
540	 * Workaround for POWER9, if we lost resources, the ERAT
541	 * might have been mixed up and needs flushing. We also need
542	 * to reload MMCR0 (see comment above). We also need to set
543	 * then clear bit 60 in MMCRA to ensure the PMU starts running.
544	 */
545	blt	cr3,1f
546BEGIN_FTR_SECTION
547	PPC_INVALIDATE_ERAT
548	ld	r1,PACAR1(r13)
549	ld	r4,_MMCR0(r1)
550	mtspr	SPRN_MMCR0,r4
551END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
552	mfspr	r4,SPRN_MMCRA
553	ori	r4,r4,(1 << (63-60))
554	mtspr	SPRN_MMCRA,r4
555	xori	r4,r4,(1 << (63-60))
556	mtspr	SPRN_MMCRA,r4
5571:
558	/*
559	 * POWER ISA 3. Use PSSCR to determine if we
560	 * are waking up from deep idle state
561	 */
562	LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
563	ld	r4,ADDROFF(pnv_first_deep_stop_state)(r5)
564
565	/*
566	 * 0-3 bits correspond to Power-Saving Level Status
567	 * which indicates the idle state we are waking up from
568	 */
569	mfspr	r5, SPRN_PSSCR
570	rldicl  r5,r5,4,60
571	li	r0, 0		/* clear requested_psscr to say we're awake */
572	std	r0, PACA_REQ_PSSCR(r13)
573	cmpd	cr4,r5,r4
574	bge	cr4,pnv_wakeup_tb_loss /* returns to caller */
575
576	blr	/* Waking up without hypervisor state loss. */
577
578/* Same calling convention as arch300 */
579pnv_restore_hyp_resource_arch207:
580	/*
581	 * POWER ISA 2.07 or less.
582	 * Check if we slept with sleep or winkle.
583	 */
584	lbz	r4,PACA_THREAD_IDLE_STATE(r13)
585	cmpwi	cr2,r4,PNV_THREAD_NAP
586	bgt	cr2,pnv_wakeup_tb_loss	/* Either sleep or Winkle */
587
588	/*
589	 * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
590	 * up from nap. At this stage CR3 shouldn't contains 'gt' since that
591	 * indicates we are waking with hypervisor state loss from nap.
592	 */
593	bgt	cr3,.
594
595	blr	/* Waking up without hypervisor state loss */
596
597/*
598 * Called if waking up from idle state which can cause either partial or
599 * complete hyp state loss.
600 * In POWER8, called if waking up from fastsleep or winkle
601 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
602 *
603 * r13 - PACA
604 * cr3 - gt if waking up with partial/complete hypervisor state loss
605 *
606 * If ISA300:
607 * cr4 - gt or eq if waking up from complete hypervisor state loss.
608 *
609 * If ISA207:
610 * r4 - PACA_THREAD_IDLE_STATE
611 */
612pnv_wakeup_tb_loss:
613	ld	r1,PACAR1(r13)
614	/*
615	 * Before entering any idle state, the NVGPRs are saved in the stack.
616	 * If there was a state loss, or PACA_NAPSTATELOST was set, then the
617	 * NVGPRs are restored. If we are here, it is likely that state is lost,
618	 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
619	 * here are the same as the test to restore NVGPRS:
620	 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
621	 * and SRR1 test for restoring NVGPRs.
622	 *
623	 * We are about to clobber NVGPRs now, so set NAPSTATELOST to
624	 * guarantee they will always be restored. This might be tightened
625	 * with careful reading of specs (particularly for ISA300) but this
626	 * is already a slow wakeup path and it's simpler to be safe.
627	 */
628	li	r0,1
629	stb	r0,PACA_NAPSTATELOST(r13)
630
631	/*
632	 *
633	 * Save SRR1 and LR in NVGPRs as they might be clobbered in
634	 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
635	 * to determine the wakeup reason if we branch to kvm_start_guest. LR
636	 * is required to return back to reset vector after hypervisor state
637	 * restore is complete.
638	 */
639	mr	r19,r12
640	mr	r18,r4
641	mflr	r17
642BEGIN_FTR_SECTION
643	CHECK_HMI_INTERRUPT
644END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
645
646	ld	r14,PACA_CORE_IDLE_STATE_PTR(r13)
647	lbz	r7,PACA_THREAD_MASK(r13)
648
649	/*
650	 * Take the core lock to synchronize against other threads.
651	 *
652	 * Lock bit is set in one of the 2 cases-
653	 * a. In the sleep/winkle enter path, the last thread is executing
654	 * fastsleep workaround code.
655	 * b. In the wake up path, another thread is executing fastsleep
656	 * workaround undo code or resyncing timebase or restoring context
657	 * In either case loop until the lock bit is cleared.
658	 */
6591:
660	lwarx	r15,0,r14
661	andis.	r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
662	bnel-	core_idle_lock_held
663	oris	r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
664	stwcx.	r15,0,r14
665	bne-	1b
666	isync
667
668	andi.	r9,r15,PNV_CORE_IDLE_THREAD_BITS
669	cmpwi	cr2,r9,0
670
671	/*
672	 * At this stage
673	 * cr2 - eq if first thread to wakeup in core
674	 * cr3-  gt if waking up with partial/complete hypervisor state loss
675	 * ISA300:
676	 * cr4 - gt or eq if waking up from complete hypervisor state loss.
677	 */
678
679BEGIN_FTR_SECTION
680	/*
681	 * Were we in winkle?
682	 * If yes, check if all threads were in winkle, decrement our
683	 * winkle count, set all thread winkle bits if all were in winkle.
684	 * Check if our thread has a winkle bit set, and set cr4 accordingly
685	 * (to match ISA300, above). Pseudo-code for core idle state
686	 * transitions for ISA207 is as follows (everything happens atomically
687	 * due to store conditional and/or lock bit):
688	 *
689	 * nap_idle() { }
690	 * nap_wake() { }
691	 *
692	 * sleep_idle()
693	 * {
694	 *	core_idle_state &= ~thread_in_core
695	 * }
696	 *
697	 * sleep_wake()
698	 * {
699	 *     bool first_in_core, first_in_subcore;
700	 *
701	 *     first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
702	 *     first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
703	 *
704	 *     core_idle_state |= thread_in_core;
705	 * }
706	 *
707	 * winkle_idle()
708	 * {
709	 *	core_idle_state &= ~thread_in_core;
710	 *	core_idle_state += 1 << WINKLE_COUNT_SHIFT;
711	 * }
712	 *
713	 * winkle_wake()
714	 * {
715	 *     bool first_in_core, first_in_subcore, winkle_state_lost;
716	 *
717	 *     first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
718	 *     first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
719	 *
720	 *     core_idle_state |= thread_in_core;
721	 *
722	 *     if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT))
723	 *         core_idle_state |= THREAD_WINKLE_BITS;
724	 *     core_idle_state -= 1 << WINKLE_COUNT_SHIFT;
725	 *
726	 *     winkle_state_lost = core_idle_state &
727	 *				(thread_in_core << WINKLE_THREAD_SHIFT);
728	 *     core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT);
729	 * }
730	 *
731	 */
732	cmpwi	r18,PNV_THREAD_WINKLE
733	bne	2f
734	andis.	r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h
735	subis	r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h
736	beq	2f
737	ori	r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */
7382:
739	/* Shift thread bit to winkle mask, then test if this thread is set,
740	 * and remove it from the winkle bits */
741	slwi	r8,r7,8
742	and	r8,r8,r15
743	andc	r15,r15,r8
744	cmpwi	cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */
745
746	lbz	r4,PACA_SUBCORE_SIBLING_MASK(r13)
747	and	r4,r4,r15
748	cmpwi	r4,0	/* Check if first in subcore */
749
750	or	r15,r15,r7		/* Set thread bit */
751	beq	first_thread_in_subcore
752END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
753
754	or	r15,r15,r7		/* Set thread bit */
755	beq	cr2,first_thread_in_core
756
757	/* Not first thread in core or subcore to wake up */
758	b	clear_lock
759
760first_thread_in_subcore:
761	/*
762	 * If waking up from sleep, subcore state is not lost. Hence
763	 * skip subcore state restore
764	 */
765	blt	cr4,subcore_state_restored
766
767	/* Restore per-subcore state */
768	ld      r4,_SDR1(r1)
769	mtspr   SPRN_SDR1,r4
770
771	ld      r4,_RPR(r1)
772	mtspr   SPRN_RPR,r4
773	ld	r4,_AMOR(r1)
774	mtspr	SPRN_AMOR,r4
775
776subcore_state_restored:
777	/*
778	 * Check if the thread is also the first thread in the core. If not,
779	 * skip to clear_lock.
780	 */
781	bne	cr2,clear_lock
782
783first_thread_in_core:
784
785	/*
786	 * First thread in the core waking up from any state which can cause
787	 * partial or complete hypervisor state loss. It needs to
788	 * call the fastsleep workaround code if the platform requires it.
789	 * Call it unconditionally here. The below branch instruction will
790	 * be patched out if the platform does not have fastsleep or does not
791	 * require the workaround. Patching will be performed during the
792	 * discovery of idle-states.
793	 */
794.global pnv_fastsleep_workaround_at_exit
795pnv_fastsleep_workaround_at_exit:
796	b	fastsleep_workaround_at_exit
797
798timebase_resync:
799	/*
800	 * Use cr3 which indicates that we are waking up with atleast partial
801	 * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
802	 */
803	ble	cr3,.Ltb_resynced
804	/* Time base re-sync */
805	bl	opal_resync_timebase;
806	/*
807	 * If waking up from sleep (POWER8), per core state
808	 * is not lost, skip to clear_lock.
809	 */
810.Ltb_resynced:
811	blt	cr4,clear_lock
812
813	/*
814	 * First thread in the core to wake up and its waking up with
815	 * complete hypervisor state loss. Restore per core hypervisor
816	 * state.
817	 */
818BEGIN_FTR_SECTION
819	ld	r4,_PTCR(r1)
820	mtspr	SPRN_PTCR,r4
821	ld	r4,_RPR(r1)
822	mtspr	SPRN_RPR,r4
823	ld	r4,_AMOR(r1)
824	mtspr	SPRN_AMOR,r4
825END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
826
827	ld	r4,_TSCR(r1)
828	mtspr	SPRN_TSCR,r4
829	ld	r4,_WORC(r1)
830	mtspr	SPRN_WORC,r4
831
832clear_lock:
833	xoris	r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
834	lwsync
835	stw	r15,0(r14)
836
837common_exit:
838	/*
839	 * Common to all threads.
840	 *
841	 * If waking up from sleep, hypervisor state is not lost. Hence
842	 * skip hypervisor state restore.
843	 */
844	blt	cr4,hypervisor_state_restored
845
846	/* Waking up from winkle */
847
848BEGIN_MMU_FTR_SECTION
849	b	no_segments
850END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
851	/* Restore SLB  from PACA */
852	ld	r8,PACA_SLBSHADOWPTR(r13)
853
854	.rept	SLB_NUM_BOLTED
855	li	r3, SLBSHADOW_SAVEAREA
856	LDX_BE	r5, r8, r3
857	addi	r3, r3, 8
858	LDX_BE	r6, r8, r3
859	andis.	r7,r5,SLB_ESID_V@h
860	beq	1f
861	slbmte	r6,r5
8621:	addi	r8,r8,16
863	.endr
864no_segments:
865
866	/* Restore per thread state */
867
868	ld	r4,_SPURR(r1)
869	mtspr	SPRN_SPURR,r4
870	ld	r4,_PURR(r1)
871	mtspr	SPRN_PURR,r4
872	ld	r4,_DSCR(r1)
873	mtspr	SPRN_DSCR,r4
874	ld	r4,_WORT(r1)
875	mtspr	SPRN_WORT,r4
876
877	/* Call cur_cpu_spec->cpu_restore() */
878	LOAD_REG_ADDR(r4, cur_cpu_spec)
879	ld	r4,0(r4)
880	ld	r12,CPU_SPEC_RESTORE(r4)
881#ifdef PPC64_ELF_ABI_v1
882	ld	r12,0(r12)
883#endif
884	mtctr	r12
885	bctrl
886
887/*
888 * On POWER9, we can come here on wakeup from a cpuidle stop state.
889 * Hence restore the additional SPRs to the saved value.
890 *
891 * On POWER8, we come here only on winkle. Since winkle is used
892 * only in the case of CPU-Hotplug, we don't need to restore
893 * the additional SPRs.
894 */
895BEGIN_FTR_SECTION
896	bl 	power9_restore_additional_sprs
897END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
898hypervisor_state_restored:
899
900	mr	r12,r19
901	mtlr	r17
902	blr		/* return to pnv_powersave_wakeup */
903
904fastsleep_workaround_at_exit:
905	li	r3,1
906	li	r4,0
907	bl	opal_config_cpu_idle_state
908	b	timebase_resync
909
910/*
911 * R3 here contains the value that will be returned to the caller
912 * of power7_nap.
913 * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
914 */
915.global pnv_wakeup_loss
916pnv_wakeup_loss:
917	ld	r1,PACAR1(r13)
918BEGIN_FTR_SECTION
919	CHECK_HMI_INTERRUPT
920END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
921	REST_NVGPRS(r1)
922	REST_GPR(2, r1)
923	ld	r4,PACAKMSR(r13)
924	ld	r5,_LINK(r1)
925	ld	r6,_CCR(r1)
926	addi	r1,r1,INT_FRAME_SIZE
927	mtlr	r5
928	mtcr	r6
929	mtmsrd	r4
930	blr
931
932/*
933 * R3 here contains the value that will be returned to the caller
934 * of power7_nap.
935 * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
936 */
937pnv_wakeup_noloss:
938	lbz	r0,PACA_NAPSTATELOST(r13)
939	cmpwi	r0,0
940	bne	pnv_wakeup_loss
941	ld	r1,PACAR1(r13)
942BEGIN_FTR_SECTION
943	CHECK_HMI_INTERRUPT
944END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
945	ld	r4,PACAKMSR(r13)
946	ld	r5,_NIP(r1)
947	ld	r6,_CCR(r1)
948	addi	r1,r1,INT_FRAME_SIZE
949	mtlr	r5
950	mtcr	r6
951	mtmsrd	r4
952	blr
953