1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * PowerNV cpuidle code
4  *
5  * Copyright 2015 IBM Corp.
6  */
7 
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/slab.h>
11 #include <linux/of.h>
12 #include <linux/device.h>
13 #include <linux/cpu.h>
14 
15 #include <asm/asm-prototypes.h>
16 #include <asm/firmware.h>
17 #include <asm/machdep.h>
18 #include <asm/opal.h>
19 #include <asm/cputhreads.h>
20 #include <asm/cpuidle.h>
21 #include <asm/code-patching.h>
22 #include <asm/smp.h>
23 #include <asm/runlatch.h>
24 #include <asm/dbell.h>
25 
26 #include "powernv.h"
27 #include "subcore.h"
28 
29 /* Power ISA 3.0 allows for stop states 0x0 - 0xF */
30 #define MAX_STOP_STATE	0xF
31 
32 #define P9_STOP_SPR_MSR 2000
33 #define P9_STOP_SPR_PSSCR      855
34 
35 static u32 supported_cpuidle_states;
36 struct pnv_idle_states_t *pnv_idle_states;
37 int nr_pnv_idle_states;
38 
39 /*
40  * The default stop state that will be used by ppc_md.power_save
41  * function on platforms that support stop instruction.
42  */
43 static u64 pnv_default_stop_val;
44 static u64 pnv_default_stop_mask;
45 static bool default_stop_found;
46 
47 /*
48  * First stop state levels when SPR and TB loss can occur.
49  */
50 static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
51 static u64 deep_spr_loss_state = MAX_STOP_STATE + 1;
52 
53 /*
54  * psscr value and mask of the deepest stop idle state.
55  * Used when a cpu is offlined.
56  */
57 static u64 pnv_deepest_stop_psscr_val;
58 static u64 pnv_deepest_stop_psscr_mask;
59 static u64 pnv_deepest_stop_flag;
60 static bool deepest_stop_found;
61 
62 static unsigned long power7_offline_type;
63 
64 static int pnv_save_sprs_for_deep_states(void)
65 {
66 	int cpu;
67 	int rc;
68 
69 	/*
70 	 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across
71 	 * all cpus at boot. Get these reg values of current cpu and use the
72 	 * same across all cpus.
73 	 */
74 	uint64_t lpcr_val	= mfspr(SPRN_LPCR);
75 	uint64_t hid0_val	= mfspr(SPRN_HID0);
76 	uint64_t hmeer_val	= mfspr(SPRN_HMEER);
77 	uint64_t msr_val = MSR_IDLE;
78 	uint64_t psscr_val = pnv_deepest_stop_psscr_val;
79 
80 	for_each_present_cpu(cpu) {
81 		uint64_t pir = get_hard_smp_processor_id(cpu);
82 		uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
83 
84 		rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
85 		if (rc != 0)
86 			return rc;
87 
88 		rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
89 		if (rc != 0)
90 			return rc;
91 
92 		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
93 			rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val);
94 			if (rc)
95 				return rc;
96 
97 			rc = opal_slw_set_reg(pir,
98 					      P9_STOP_SPR_PSSCR, psscr_val);
99 
100 			if (rc)
101 				return rc;
102 		}
103 
104 		/* HIDs are per core registers */
105 		if (cpu_thread_in_core(cpu) == 0) {
106 
107 			rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
108 			if (rc != 0)
109 				return rc;
110 
111 			rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
112 			if (rc != 0)
113 				return rc;
114 
115 			/* Only p8 needs to set extra HID regiters */
116 			if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
117 				uint64_t hid1_val = mfspr(SPRN_HID1);
118 				uint64_t hid4_val = mfspr(SPRN_HID4);
119 				uint64_t hid5_val = mfspr(SPRN_HID5);
120 
121 				rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
122 				if (rc != 0)
123 					return rc;
124 
125 				rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
126 				if (rc != 0)
127 					return rc;
128 
129 				rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
130 				if (rc != 0)
131 					return rc;
132 			}
133 		}
134 	}
135 
136 	return 0;
137 }
138 
139 u32 pnv_get_supported_cpuidle_states(void)
140 {
141 	return supported_cpuidle_states;
142 }
143 EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
144 
145 static void pnv_fastsleep_workaround_apply(void *info)
146 
147 {
148 	int rc;
149 	int *err = info;
150 
151 	rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
152 					OPAL_CONFIG_IDLE_APPLY);
153 	if (rc)
154 		*err = 1;
155 }
156 
157 static bool power7_fastsleep_workaround_entry = true;
158 static bool power7_fastsleep_workaround_exit = true;
159 
160 /*
161  * Used to store fastsleep workaround state
162  * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
163  * 1 - Workaround applied once, never undone.
164  */
165 static u8 fastsleep_workaround_applyonce;
166 
167 static ssize_t show_fastsleep_workaround_applyonce(struct device *dev,
168 		struct device_attribute *attr, char *buf)
169 {
170 	return sprintf(buf, "%u\n", fastsleep_workaround_applyonce);
171 }
172 
173 static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
174 		struct device_attribute *attr, const char *buf,
175 		size_t count)
176 {
177 	cpumask_t primary_thread_mask;
178 	int err;
179 	u8 val;
180 
181 	if (kstrtou8(buf, 0, &val) || val != 1)
182 		return -EINVAL;
183 
184 	if (fastsleep_workaround_applyonce == 1)
185 		return count;
186 
187 	/*
188 	 * fastsleep_workaround_applyonce = 1 implies
189 	 * fastsleep workaround needs to be left in 'applied' state on all
190 	 * the cores. Do this by-
191 	 * 1. Disable the 'undo' workaround in fastsleep exit path
192 	 * 2. Sendi IPIs to all the cores which have at least one online thread
193 	 * 3. Disable the 'apply' workaround in fastsleep entry path
194 	 *
195 	 * There is no need to send ipi to cores which have all threads
196 	 * offlined, as last thread of the core entering fastsleep or deeper
197 	 * state would have applied workaround.
198 	 */
199 	power7_fastsleep_workaround_exit = false;
200 
201 	get_online_cpus();
202 	primary_thread_mask = cpu_online_cores_map();
203 	on_each_cpu_mask(&primary_thread_mask,
204 				pnv_fastsleep_workaround_apply,
205 				&err, 1);
206 	put_online_cpus();
207 	if (err) {
208 		pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
209 		goto fail;
210 	}
211 
212 	power7_fastsleep_workaround_entry = false;
213 
214 	fastsleep_workaround_applyonce = 1;
215 
216 	return count;
217 fail:
218 	return -EIO;
219 }
220 
221 static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
222 			show_fastsleep_workaround_applyonce,
223 			store_fastsleep_workaround_applyonce);
224 
225 static inline void atomic_start_thread_idle(void)
226 {
227 	int cpu = raw_smp_processor_id();
228 	int first = cpu_first_thread_sibling(cpu);
229 	int thread_nr = cpu_thread_in_core(cpu);
230 	unsigned long *state = &paca_ptrs[first]->idle_state;
231 
232 	clear_bit(thread_nr, state);
233 }
234 
235 static inline void atomic_stop_thread_idle(void)
236 {
237 	int cpu = raw_smp_processor_id();
238 	int first = cpu_first_thread_sibling(cpu);
239 	int thread_nr = cpu_thread_in_core(cpu);
240 	unsigned long *state = &paca_ptrs[first]->idle_state;
241 
242 	set_bit(thread_nr, state);
243 }
244 
245 static inline void atomic_lock_thread_idle(void)
246 {
247 	int cpu = raw_smp_processor_id();
248 	int first = cpu_first_thread_sibling(cpu);
249 	unsigned long *state = &paca_ptrs[first]->idle_state;
250 
251 	while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, state)))
252 		barrier();
253 }
254 
255 static inline void atomic_unlock_and_stop_thread_idle(void)
256 {
257 	int cpu = raw_smp_processor_id();
258 	int first = cpu_first_thread_sibling(cpu);
259 	unsigned long thread = 1UL << cpu_thread_in_core(cpu);
260 	unsigned long *state = &paca_ptrs[first]->idle_state;
261 	u64 s = READ_ONCE(*state);
262 	u64 new, tmp;
263 
264 	BUG_ON(!(s & PNV_CORE_IDLE_LOCK_BIT));
265 	BUG_ON(s & thread);
266 
267 again:
268 	new = (s | thread) & ~PNV_CORE_IDLE_LOCK_BIT;
269 	tmp = cmpxchg(state, s, new);
270 	if (unlikely(tmp != s)) {
271 		s = tmp;
272 		goto again;
273 	}
274 }
275 
276 static inline void atomic_unlock_thread_idle(void)
277 {
278 	int cpu = raw_smp_processor_id();
279 	int first = cpu_first_thread_sibling(cpu);
280 	unsigned long *state = &paca_ptrs[first]->idle_state;
281 
282 	BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, state));
283 	clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, state);
284 }
285 
286 /* P7 and P8 */
287 struct p7_sprs {
288 	/* per core */
289 	u64 tscr;
290 	u64 worc;
291 
292 	/* per subcore */
293 	u64 sdr1;
294 	u64 rpr;
295 
296 	/* per thread */
297 	u64 lpcr;
298 	u64 hfscr;
299 	u64 fscr;
300 	u64 purr;
301 	u64 spurr;
302 	u64 dscr;
303 	u64 wort;
304 
305 	/* per thread SPRs that get lost in shallow states */
306 	u64 amr;
307 	u64 iamr;
308 	u64 amor;
309 	u64 uamor;
310 };
311 
312 static unsigned long power7_idle_insn(unsigned long type)
313 {
314 	int cpu = raw_smp_processor_id();
315 	int first = cpu_first_thread_sibling(cpu);
316 	unsigned long *state = &paca_ptrs[first]->idle_state;
317 	unsigned long thread = 1UL << cpu_thread_in_core(cpu);
318 	unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
319 	unsigned long srr1;
320 	bool full_winkle;
321 	struct p7_sprs sprs = {}; /* avoid false use-uninitialised */
322 	bool sprs_saved = false;
323 	int rc;
324 
325 	if (unlikely(type != PNV_THREAD_NAP)) {
326 		atomic_lock_thread_idle();
327 
328 		BUG_ON(!(*state & thread));
329 		*state &= ~thread;
330 
331 		if (power7_fastsleep_workaround_entry) {
332 			if ((*state & core_thread_mask) == 0) {
333 				rc = opal_config_cpu_idle_state(
334 						OPAL_CONFIG_IDLE_FASTSLEEP,
335 						OPAL_CONFIG_IDLE_APPLY);
336 				BUG_ON(rc);
337 			}
338 		}
339 
340 		if (type == PNV_THREAD_WINKLE) {
341 			sprs.tscr	= mfspr(SPRN_TSCR);
342 			sprs.worc	= mfspr(SPRN_WORC);
343 
344 			sprs.sdr1	= mfspr(SPRN_SDR1);
345 			sprs.rpr	= mfspr(SPRN_RPR);
346 
347 			sprs.lpcr	= mfspr(SPRN_LPCR);
348 			if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
349 				sprs.hfscr	= mfspr(SPRN_HFSCR);
350 				sprs.fscr	= mfspr(SPRN_FSCR);
351 			}
352 			sprs.purr	= mfspr(SPRN_PURR);
353 			sprs.spurr	= mfspr(SPRN_SPURR);
354 			sprs.dscr	= mfspr(SPRN_DSCR);
355 			sprs.wort	= mfspr(SPRN_WORT);
356 
357 			sprs_saved = true;
358 
359 			/*
360 			 * Increment winkle counter and set all winkle bits if
361 			 * all threads are winkling. This allows wakeup side to
362 			 * distinguish between fast sleep and winkle state
363 			 * loss. Fast sleep still has to resync the timebase so
364 			 * this may not be a really big win.
365 			 */
366 			*state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
367 			if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS)
368 					>> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT
369 					== threads_per_core)
370 				*state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS;
371 			WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
372 		}
373 
374 		atomic_unlock_thread_idle();
375 	}
376 
377 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
378 		sprs.amr	= mfspr(SPRN_AMR);
379 		sprs.iamr	= mfspr(SPRN_IAMR);
380 		sprs.amor	= mfspr(SPRN_AMOR);
381 		sprs.uamor	= mfspr(SPRN_UAMOR);
382 	}
383 
384 	local_paca->thread_idle_state = type;
385 	srr1 = isa206_idle_insn_mayloss(type);		/* go idle */
386 	local_paca->thread_idle_state = PNV_THREAD_RUNNING;
387 
388 	WARN_ON_ONCE(!srr1);
389 	WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
390 
391 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
392 		if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
393 			/*
394 			 * We don't need an isync after the mtsprs here because
395 			 * the upcoming mtmsrd is execution synchronizing.
396 			 */
397 			mtspr(SPRN_AMR,		sprs.amr);
398 			mtspr(SPRN_IAMR,	sprs.iamr);
399 			mtspr(SPRN_AMOR,	sprs.amor);
400 			mtspr(SPRN_UAMOR,	sprs.uamor);
401 		}
402 	}
403 
404 	if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
405 		hmi_exception_realmode(NULL);
406 
407 	if (likely((srr1 & SRR1_WAKESTATE) != SRR1_WS_HVLOSS)) {
408 		if (unlikely(type != PNV_THREAD_NAP)) {
409 			atomic_lock_thread_idle();
410 			if (type == PNV_THREAD_WINKLE) {
411 				WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
412 				*state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
413 				*state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
414 			}
415 			atomic_unlock_and_stop_thread_idle();
416 		}
417 		return srr1;
418 	}
419 
420 	/* HV state loss */
421 	BUG_ON(type == PNV_THREAD_NAP);
422 
423 	atomic_lock_thread_idle();
424 
425 	full_winkle = false;
426 	if (type == PNV_THREAD_WINKLE) {
427 		WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
428 		*state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
429 		if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) {
430 			*state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
431 			full_winkle = true;
432 			BUG_ON(!sprs_saved);
433 		}
434 	}
435 
436 	WARN_ON(*state & thread);
437 
438 	if ((*state & core_thread_mask) != 0)
439 		goto core_woken;
440 
441 	/* Per-core SPRs */
442 	if (full_winkle) {
443 		mtspr(SPRN_TSCR,	sprs.tscr);
444 		mtspr(SPRN_WORC,	sprs.worc);
445 	}
446 
447 	if (power7_fastsleep_workaround_exit) {
448 		rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
449 						OPAL_CONFIG_IDLE_UNDO);
450 		BUG_ON(rc);
451 	}
452 
453 	/* TB */
454 	if (opal_resync_timebase() != OPAL_SUCCESS)
455 		BUG();
456 
457 core_woken:
458 	if (!full_winkle)
459 		goto subcore_woken;
460 
461 	if ((*state & local_paca->subcore_sibling_mask) != 0)
462 		goto subcore_woken;
463 
464 	/* Per-subcore SPRs */
465 	mtspr(SPRN_SDR1,	sprs.sdr1);
466 	mtspr(SPRN_RPR,		sprs.rpr);
467 
468 subcore_woken:
469 	/*
470 	 * isync after restoring shared SPRs and before unlocking. Unlock
471 	 * only contains hwsync which does not necessarily do the right
472 	 * thing for SPRs.
473 	 */
474 	isync();
475 	atomic_unlock_and_stop_thread_idle();
476 
477 	/* Fast sleep does not lose SPRs */
478 	if (!full_winkle)
479 		return srr1;
480 
481 	/* Per-thread SPRs */
482 	mtspr(SPRN_LPCR,	sprs.lpcr);
483 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
484 		mtspr(SPRN_HFSCR,	sprs.hfscr);
485 		mtspr(SPRN_FSCR,	sprs.fscr);
486 	}
487 	mtspr(SPRN_PURR,	sprs.purr);
488 	mtspr(SPRN_SPURR,	sprs.spurr);
489 	mtspr(SPRN_DSCR,	sprs.dscr);
490 	mtspr(SPRN_WORT,	sprs.wort);
491 
492 	mtspr(SPRN_SPRG3,	local_paca->sprg_vdso);
493 
494 	/*
495 	 * The SLB has to be restored here, but it sometimes still
496 	 * contains entries, so the __ variant must be used to prevent
497 	 * multi hits.
498 	 */
499 	__slb_restore_bolted_realmode();
500 
501 	return srr1;
502 }
503 
504 extern unsigned long idle_kvm_start_guest(unsigned long srr1);
505 
506 #ifdef CONFIG_HOTPLUG_CPU
507 static unsigned long power7_offline(void)
508 {
509 	unsigned long srr1;
510 
511 	mtmsr(MSR_IDLE);
512 
513 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
514 	/* Tell KVM we're entering idle. */
515 	/******************************************************/
516 	/*  N O T E   W E L L    ! ! !    N O T E   W E L L   */
517 	/* The following store to HSTATE_HWTHREAD_STATE(r13)  */
518 	/* MUST occur in real mode, i.e. with the MMU off,    */
519 	/* and the MMU must stay off until we clear this flag */
520 	/* and test HSTATE_HWTHREAD_REQ(r13) in               */
521 	/* pnv_powersave_wakeup in this file.                 */
522 	/* The reason is that another thread can switch the   */
523 	/* MMU to a guest context whenever this flag is set   */
524 	/* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on,    */
525 	/* that would potentially cause this thread to start  */
526 	/* executing instructions from guest memory in        */
527 	/* hypervisor mode, leading to a host crash or data   */
528 	/* corruption, or worse.                              */
529 	/******************************************************/
530 	local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
531 #endif
532 
533 	__ppc64_runlatch_off();
534 	srr1 = power7_idle_insn(power7_offline_type);
535 	__ppc64_runlatch_on();
536 
537 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
538 	local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
539 	/* Order setting hwthread_state vs. testing hwthread_req */
540 	smp_mb();
541 	if (local_paca->kvm_hstate.hwthread_req)
542 		srr1 = idle_kvm_start_guest(srr1);
543 #endif
544 
545 	mtmsr(MSR_KERNEL);
546 
547 	return srr1;
548 }
549 #endif
550 
551 void power7_idle_type(unsigned long type)
552 {
553 	unsigned long srr1;
554 
555 	if (!prep_irq_for_idle_irqsoff())
556 		return;
557 
558 	mtmsr(MSR_IDLE);
559 	__ppc64_runlatch_off();
560 	srr1 = power7_idle_insn(type);
561 	__ppc64_runlatch_on();
562 	mtmsr(MSR_KERNEL);
563 
564 	fini_irq_for_idle_irqsoff();
565 	irq_set_pending_from_srr1(srr1);
566 }
567 
568 static void power7_idle(void)
569 {
570 	if (!powersave_nap)
571 		return;
572 
573 	power7_idle_type(PNV_THREAD_NAP);
574 }
575 
576 struct p9_sprs {
577 	/* per core */
578 	u64 ptcr;
579 	u64 rpr;
580 	u64 tscr;
581 	u64 ldbar;
582 
583 	/* per thread */
584 	u64 lpcr;
585 	u64 hfscr;
586 	u64 fscr;
587 	u64 pid;
588 	u64 purr;
589 	u64 spurr;
590 	u64 dscr;
591 	u64 wort;
592 
593 	u64 mmcra;
594 	u32 mmcr0;
595 	u32 mmcr1;
596 	u64 mmcr2;
597 
598 	/* per thread SPRs that get lost in shallow states */
599 	u64 amr;
600 	u64 iamr;
601 	u64 amor;
602 	u64 uamor;
603 };
604 
605 static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
606 {
607 	int cpu = raw_smp_processor_id();
608 	int first = cpu_first_thread_sibling(cpu);
609 	unsigned long *state = &paca_ptrs[first]->idle_state;
610 	unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
611 	unsigned long srr1;
612 	unsigned long pls;
613 	unsigned long mmcr0 = 0;
614 	unsigned long mmcra = 0;
615 	struct p9_sprs sprs = {}; /* avoid false used-uninitialised */
616 	bool sprs_saved = false;
617 
618 	if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
619 		/* EC=ESL=0 case */
620 
621 		BUG_ON(!mmu_on);
622 
623 		/*
624 		 * Wake synchronously. SRESET via xscom may still cause
625 		 * a 0x100 powersave wakeup with SRR1 reason!
626 		 */
627 		srr1 = isa300_idle_stop_noloss(psscr);		/* go idle */
628 		if (likely(!srr1))
629 			return 0;
630 
631 		/*
632 		 * Registers not saved, can't recover!
633 		 * This would be a hardware bug
634 		 */
635 		BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
636 
637 		goto out;
638 	}
639 
640 	/* EC=ESL=1 case */
641 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
642 	if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG)) {
643 		local_paca->requested_psscr = psscr;
644 		/* order setting requested_psscr vs testing dont_stop */
645 		smp_mb();
646 		if (atomic_read(&local_paca->dont_stop)) {
647 			local_paca->requested_psscr = 0;
648 			return 0;
649 		}
650 	}
651 #endif
652 
653 	if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
654 		 /*
655 		  * POWER9 DD2 can incorrectly set PMAO when waking up
656 		  * after a state-loss idle. Saving and restoring MMCR0
657 		  * over idle is a workaround.
658 		  */
659 		mmcr0		= mfspr(SPRN_MMCR0);
660 	}
661 
662 	if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) {
663 		sprs.lpcr	= mfspr(SPRN_LPCR);
664 		sprs.hfscr	= mfspr(SPRN_HFSCR);
665 		sprs.fscr	= mfspr(SPRN_FSCR);
666 		sprs.pid	= mfspr(SPRN_PID);
667 		sprs.purr	= mfspr(SPRN_PURR);
668 		sprs.spurr	= mfspr(SPRN_SPURR);
669 		sprs.dscr	= mfspr(SPRN_DSCR);
670 		sprs.wort	= mfspr(SPRN_WORT);
671 
672 		sprs.mmcra	= mfspr(SPRN_MMCRA);
673 		sprs.mmcr0	= mfspr(SPRN_MMCR0);
674 		sprs.mmcr1	= mfspr(SPRN_MMCR1);
675 		sprs.mmcr2	= mfspr(SPRN_MMCR2);
676 
677 		sprs.ptcr	= mfspr(SPRN_PTCR);
678 		sprs.rpr	= mfspr(SPRN_RPR);
679 		sprs.tscr	= mfspr(SPRN_TSCR);
680 		if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
681 			sprs.ldbar = mfspr(SPRN_LDBAR);
682 
683 		sprs_saved = true;
684 
685 		atomic_start_thread_idle();
686 	}
687 
688 	sprs.amr	= mfspr(SPRN_AMR);
689 	sprs.iamr	= mfspr(SPRN_IAMR);
690 	sprs.amor	= mfspr(SPRN_AMOR);
691 	sprs.uamor	= mfspr(SPRN_UAMOR);
692 
693 	srr1 = isa300_idle_stop_mayloss(psscr);		/* go idle */
694 
695 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
696 	local_paca->requested_psscr = 0;
697 #endif
698 
699 	psscr = mfspr(SPRN_PSSCR);
700 
701 	WARN_ON_ONCE(!srr1);
702 	WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
703 
704 	if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
705 		/*
706 		 * We don't need an isync after the mtsprs here because the
707 		 * upcoming mtmsrd is execution synchronizing.
708 		 */
709 		mtspr(SPRN_AMR,		sprs.amr);
710 		mtspr(SPRN_IAMR,	sprs.iamr);
711 		mtspr(SPRN_AMOR,	sprs.amor);
712 		mtspr(SPRN_UAMOR,	sprs.uamor);
713 
714 		/*
715 		 * Workaround for POWER9 DD2.0, if we lost resources, the ERAT
716 		 * might have been corrupted and needs flushing. We also need
717 		 * to reload MMCR0 (see mmcr0 comment above).
718 		 */
719 		if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
720 			asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT);
721 			mtspr(SPRN_MMCR0, mmcr0);
722 		}
723 
724 		/*
725 		 * DD2.2 and earlier need to set then clear bit 60 in MMCRA
726 		 * to ensure the PMU starts running.
727 		 */
728 		mmcra = mfspr(SPRN_MMCRA);
729 		mmcra |= PPC_BIT(60);
730 		mtspr(SPRN_MMCRA, mmcra);
731 		mmcra &= ~PPC_BIT(60);
732 		mtspr(SPRN_MMCRA, mmcra);
733 	}
734 
735 	if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
736 		hmi_exception_realmode(NULL);
737 
738 	/*
739 	 * On POWER9, SRR1 bits do not match exactly as expected.
740 	 * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
741 	 * just always test PSSCR for SPR/TB state loss.
742 	 */
743 	pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
744 	if (likely(pls < deep_spr_loss_state)) {
745 		if (sprs_saved)
746 			atomic_stop_thread_idle();
747 		goto out;
748 	}
749 
750 	/* HV state loss */
751 	BUG_ON(!sprs_saved);
752 
753 	atomic_lock_thread_idle();
754 
755 	if ((*state & core_thread_mask) != 0)
756 		goto core_woken;
757 
758 	/* Per-core SPRs */
759 	mtspr(SPRN_PTCR,	sprs.ptcr);
760 	mtspr(SPRN_RPR,		sprs.rpr);
761 	mtspr(SPRN_TSCR,	sprs.tscr);
762 
763 	if (pls >= pnv_first_tb_loss_level) {
764 		/* TB loss */
765 		if (opal_resync_timebase() != OPAL_SUCCESS)
766 			BUG();
767 	}
768 
769 	/*
770 	 * isync after restoring shared SPRs and before unlocking. Unlock
771 	 * only contains hwsync which does not necessarily do the right
772 	 * thing for SPRs.
773 	 */
774 	isync();
775 
776 core_woken:
777 	atomic_unlock_and_stop_thread_idle();
778 
779 	/* Per-thread SPRs */
780 	mtspr(SPRN_LPCR,	sprs.lpcr);
781 	mtspr(SPRN_HFSCR,	sprs.hfscr);
782 	mtspr(SPRN_FSCR,	sprs.fscr);
783 	mtspr(SPRN_PID,		sprs.pid);
784 	mtspr(SPRN_PURR,	sprs.purr);
785 	mtspr(SPRN_SPURR,	sprs.spurr);
786 	mtspr(SPRN_DSCR,	sprs.dscr);
787 	mtspr(SPRN_WORT,	sprs.wort);
788 
789 	mtspr(SPRN_MMCRA,	sprs.mmcra);
790 	mtspr(SPRN_MMCR0,	sprs.mmcr0);
791 	mtspr(SPRN_MMCR1,	sprs.mmcr1);
792 	mtspr(SPRN_MMCR2,	sprs.mmcr2);
793 	if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
794 		mtspr(SPRN_LDBAR, sprs.ldbar);
795 
796 	mtspr(SPRN_SPRG3,	local_paca->sprg_vdso);
797 
798 	if (!radix_enabled())
799 		__slb_restore_bolted_realmode();
800 
801 out:
802 	if (mmu_on)
803 		mtmsr(MSR_KERNEL);
804 
805 	return srr1;
806 }
807 
808 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
809 /*
810  * This is used in working around bugs in thread reconfiguration
811  * on POWER9 (at least up to Nimbus DD2.2) relating to transactional
812  * memory and the way that XER[SO] is checkpointed.
813  * This function forces the core into SMT4 in order by asking
814  * all other threads not to stop, and sending a message to any
815  * that are in a stop state.
816  * Must be called with preemption disabled.
817  */
818 void pnv_power9_force_smt4_catch(void)
819 {
820 	int cpu, cpu0, thr;
821 	int awake_threads = 1;		/* this thread is awake */
822 	int poke_threads = 0;
823 	int need_awake = threads_per_core;
824 
825 	cpu = smp_processor_id();
826 	cpu0 = cpu & ~(threads_per_core - 1);
827 	for (thr = 0; thr < threads_per_core; ++thr) {
828 		if (cpu != cpu0 + thr)
829 			atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop);
830 	}
831 	/* order setting dont_stop vs testing requested_psscr */
832 	smp_mb();
833 	for (thr = 0; thr < threads_per_core; ++thr) {
834 		if (!paca_ptrs[cpu0+thr]->requested_psscr)
835 			++awake_threads;
836 		else
837 			poke_threads |= (1 << thr);
838 	}
839 
840 	/* If at least 3 threads are awake, the core is in SMT4 already */
841 	if (awake_threads < need_awake) {
842 		/* We have to wake some threads; we'll use msgsnd */
843 		for (thr = 0; thr < threads_per_core; ++thr) {
844 			if (poke_threads & (1 << thr)) {
845 				ppc_msgsnd_sync();
846 				ppc_msgsnd(PPC_DBELL_MSGTYPE, 0,
847 					   paca_ptrs[cpu0+thr]->hw_cpu_id);
848 			}
849 		}
850 		/* now spin until at least 3 threads are awake */
851 		do {
852 			for (thr = 0; thr < threads_per_core; ++thr) {
853 				if ((poke_threads & (1 << thr)) &&
854 				    !paca_ptrs[cpu0+thr]->requested_psscr) {
855 					++awake_threads;
856 					poke_threads &= ~(1 << thr);
857 				}
858 			}
859 		} while (awake_threads < need_awake);
860 	}
861 }
862 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch);
863 
864 void pnv_power9_force_smt4_release(void)
865 {
866 	int cpu, cpu0, thr;
867 
868 	cpu = smp_processor_id();
869 	cpu0 = cpu & ~(threads_per_core - 1);
870 
871 	/* clear all the dont_stop flags */
872 	for (thr = 0; thr < threads_per_core; ++thr) {
873 		if (cpu != cpu0 + thr)
874 			atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop);
875 	}
876 }
877 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
878 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
879 
880 struct p10_sprs {
881 	/*
882 	 * SPRs that get lost in shallow states:
883 	 *
884 	 * P10 loses CR, LR, CTR, FPSCR, VSCR, XER, TAR, SPRG2, and HSPRG1
885 	 * isa300 idle routines restore CR, LR.
886 	 * CTR is volatile
887 	 * idle thread doesn't use FP or VEC
888 	 * kernel doesn't use TAR
889 	 * HSPRG1 is only live in HV interrupt entry
890 	 * SPRG2 is only live in KVM guests, KVM handles it.
891 	 */
892 };
893 
894 static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
895 {
896 	int cpu = raw_smp_processor_id();
897 	int first = cpu_first_thread_sibling(cpu);
898 	unsigned long *state = &paca_ptrs[first]->idle_state;
899 	unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
900 	unsigned long srr1;
901 	unsigned long pls;
902 //	struct p10_sprs sprs = {}; /* avoid false used-uninitialised */
903 	bool sprs_saved = false;
904 
905 	if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
906 		/* EC=ESL=0 case */
907 
908 		BUG_ON(!mmu_on);
909 
910 		/*
911 		 * Wake synchronously. SRESET via xscom may still cause
912 		 * a 0x100 powersave wakeup with SRR1 reason!
913 		 */
914 		srr1 = isa300_idle_stop_noloss(psscr);		/* go idle */
915 		if (likely(!srr1))
916 			return 0;
917 
918 		/*
919 		 * Registers not saved, can't recover!
920 		 * This would be a hardware bug
921 		 */
922 		BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
923 
924 		goto out;
925 	}
926 
927 	/* EC=ESL=1 case */
928 	if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) {
929 		/* XXX: save SPRs for deep state loss here. */
930 
931 		sprs_saved = true;
932 
933 		atomic_start_thread_idle();
934 	}
935 
936 	srr1 = isa300_idle_stop_mayloss(psscr);		/* go idle */
937 
938 	psscr = mfspr(SPRN_PSSCR);
939 
940 	WARN_ON_ONCE(!srr1);
941 	WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
942 
943 	if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
944 		hmi_exception_realmode(NULL);
945 
946 	/*
947 	 * On POWER10, SRR1 bits do not match exactly as expected.
948 	 * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
949 	 * just always test PSSCR for SPR/TB state loss.
950 	 */
951 	pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
952 	if (likely(pls < deep_spr_loss_state)) {
953 		if (sprs_saved)
954 			atomic_stop_thread_idle();
955 		goto out;
956 	}
957 
958 	/* HV state loss */
959 	BUG_ON(!sprs_saved);
960 
961 	atomic_lock_thread_idle();
962 
963 	if ((*state & core_thread_mask) != 0)
964 		goto core_woken;
965 
966 	/* XXX: restore per-core SPRs here */
967 
968 	if (pls >= pnv_first_tb_loss_level) {
969 		/* TB loss */
970 		if (opal_resync_timebase() != OPAL_SUCCESS)
971 			BUG();
972 	}
973 
974 	/*
975 	 * isync after restoring shared SPRs and before unlocking. Unlock
976 	 * only contains hwsync which does not necessarily do the right
977 	 * thing for SPRs.
978 	 */
979 	isync();
980 
981 core_woken:
982 	atomic_unlock_and_stop_thread_idle();
983 
984 	/* XXX: restore per-thread SPRs here */
985 
986 	if (!radix_enabled())
987 		__slb_restore_bolted_realmode();
988 
989 out:
990 	if (mmu_on)
991 		mtmsr(MSR_KERNEL);
992 
993 	return srr1;
994 }
995 
996 #ifdef CONFIG_HOTPLUG_CPU
997 static unsigned long arch300_offline_stop(unsigned long psscr)
998 {
999 	unsigned long srr1;
1000 
1001 #ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1002 	__ppc64_runlatch_off();
1003 	if (cpu_has_feature(CPU_FTR_ARCH_31))
1004 		srr1 = power10_idle_stop(psscr, true);
1005 	else
1006 		srr1 = power9_idle_stop(psscr, true);
1007 	__ppc64_runlatch_on();
1008 #else
1009 	/*
1010 	 * Tell KVM we're entering idle.
1011 	 * This does not have to be done in real mode because the P9 MMU
1012 	 * is independent per-thread. Some steppings share radix/hash mode
1013 	 * between threads, but in that case KVM has a barrier sync in real
1014 	 * mode before and after switching between radix and hash.
1015 	 *
1016 	 * kvm_start_guest must still be called in real mode though, hence
1017 	 * the false argument.
1018 	 */
1019 	local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
1020 
1021 	__ppc64_runlatch_off();
1022 	if (cpu_has_feature(CPU_FTR_ARCH_31))
1023 		srr1 = power10_idle_stop(psscr, false);
1024 	else
1025 		srr1 = power9_idle_stop(psscr, false);
1026 	__ppc64_runlatch_on();
1027 
1028 	local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
1029 	/* Order setting hwthread_state vs. testing hwthread_req */
1030 	smp_mb();
1031 	if (local_paca->kvm_hstate.hwthread_req)
1032 		srr1 = idle_kvm_start_guest(srr1);
1033 	mtmsr(MSR_KERNEL);
1034 #endif
1035 
1036 	return srr1;
1037 }
1038 #endif
1039 
1040 void arch300_idle_type(unsigned long stop_psscr_val,
1041 				      unsigned long stop_psscr_mask)
1042 {
1043 	unsigned long psscr;
1044 	unsigned long srr1;
1045 
1046 	if (!prep_irq_for_idle_irqsoff())
1047 		return;
1048 
1049 	psscr = mfspr(SPRN_PSSCR);
1050 	psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
1051 
1052 	__ppc64_runlatch_off();
1053 	if (cpu_has_feature(CPU_FTR_ARCH_31))
1054 		srr1 = power10_idle_stop(psscr, true);
1055 	else
1056 		srr1 = power9_idle_stop(psscr, true);
1057 	__ppc64_runlatch_on();
1058 
1059 	fini_irq_for_idle_irqsoff();
1060 
1061 	irq_set_pending_from_srr1(srr1);
1062 }
1063 
1064 /*
1065  * Used for ppc_md.power_save which needs a function with no parameters
1066  */
1067 static void arch300_idle(void)
1068 {
1069 	arch300_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
1070 }
1071 
1072 #ifdef CONFIG_HOTPLUG_CPU
1073 
1074 void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
1075 {
1076 	u64 pir = get_hard_smp_processor_id(cpu);
1077 
1078 	mtspr(SPRN_LPCR, lpcr_val);
1079 
1080 	/*
1081 	 * Program the LPCR via stop-api only if the deepest stop state
1082 	 * can lose hypervisor context.
1083 	 */
1084 	if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
1085 		opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
1086 }
1087 
1088 /*
1089  * pnv_cpu_offline: A function that puts the CPU into the deepest
1090  * available platform idle state on a CPU-Offline.
1091  * interrupts hard disabled and no lazy irq pending.
1092  */
1093 unsigned long pnv_cpu_offline(unsigned int cpu)
1094 {
1095 	unsigned long srr1;
1096 
1097 	__ppc64_runlatch_off();
1098 
1099 	if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
1100 		unsigned long psscr;
1101 
1102 		psscr = mfspr(SPRN_PSSCR);
1103 		psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
1104 						pnv_deepest_stop_psscr_val;
1105 		srr1 = arch300_offline_stop(psscr);
1106 	} else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) {
1107 		srr1 = power7_offline();
1108 	} else {
1109 		/* This is the fallback method. We emulate snooze */
1110 		while (!generic_check_cpu_restart(cpu)) {
1111 			HMT_low();
1112 			HMT_very_low();
1113 		}
1114 		srr1 = 0;
1115 		HMT_medium();
1116 	}
1117 
1118 	__ppc64_runlatch_on();
1119 
1120 	return srr1;
1121 }
1122 #endif
1123 
1124 /*
1125  * Power ISA 3.0 idle initialization.
1126  *
1127  * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
1128  * Register (PSSCR) to control idle behavior.
1129  *
1130  * PSSCR layout:
1131  * ----------------------------------------------------------
1132  * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
1133  * ----------------------------------------------------------
1134  * 0      4     41   42    43   44     48    54   56    60
1135  *
1136  * PSSCR key fields:
1137  *	Bits 0:3  - Power-Saving Level Status (PLS). This field indicates the
1138  *	lowest power-saving state the thread entered since stop instruction was
1139  *	last executed.
1140  *
1141  *	Bit 41 - Status Disable(SD)
1142  *	0 - Shows PLS entries
1143  *	1 - PLS entries are all 0
1144  *
1145  *	Bit 42 - Enable State Loss
1146  *	0 - No state is lost irrespective of other fields
1147  *	1 - Allows state loss
1148  *
1149  *	Bit 43 - Exit Criterion
1150  *	0 - Exit from power-save mode on any interrupt
1151  *	1 - Exit from power-save mode controlled by LPCR's PECE bits
1152  *
1153  *	Bits 44:47 - Power-Saving Level Limit
1154  *	This limits the power-saving level that can be entered into.
1155  *
1156  *	Bits 60:63 - Requested Level
1157  *	Used to specify which power-saving level must be entered on executing
1158  *	stop instruction
1159  */
1160 
1161 int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
1162 {
1163 	int err = 0;
1164 
1165 	/*
1166 	 * psscr_mask == 0xf indicates an older firmware.
1167 	 * Set remaining fields of psscr to the default values.
1168 	 * See NOTE above definition of PSSCR_HV_DEFAULT_VAL
1169 	 */
1170 	if (*psscr_mask == 0xf) {
1171 		*psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL;
1172 		*psscr_mask = PSSCR_HV_DEFAULT_MASK;
1173 		return err;
1174 	}
1175 
1176 	/*
1177 	 * New firmware is expected to set the psscr_val bits correctly.
1178 	 * Validate that the following invariants are correctly maintained by
1179 	 * the new firmware.
1180 	 * - ESL bit value matches the EC bit value.
1181 	 * - ESL bit is set for all the deep stop states.
1182 	 */
1183 	if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) {
1184 		err = ERR_EC_ESL_MISMATCH;
1185 	} else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
1186 		GET_PSSCR_ESL(*psscr_val) == 0) {
1187 		err = ERR_DEEP_STATE_ESL_MISMATCH;
1188 	}
1189 
1190 	return err;
1191 }
1192 
1193 /*
1194  * pnv_arch300_idle_init: Initializes the default idle state, first
1195  *                        deep idle state and deepest idle state on
1196  *                        ISA 3.0 CPUs.
1197  *
1198  * @np: /ibm,opal/power-mgt device node
1199  * @flags: cpu-idle-state-flags array
1200  * @dt_idle_states: Number of idle state entries
1201  * Returns 0 on success
1202  */
1203 static void __init pnv_arch300_idle_init(void)
1204 {
1205 	u64 max_residency_ns = 0;
1206 	int i;
1207 
1208 	/* stop is not really architected, we only have p9,p10 drivers */
1209 	if (!pvr_version_is(PVR_POWER10) && !pvr_version_is(PVR_POWER9))
1210 		return;
1211 
1212 	/*
1213 	 * pnv_deepest_stop_{val,mask} should be set to values corresponding to
1214 	 * the deepest stop state.
1215 	 *
1216 	 * pnv_default_stop_{val,mask} should be set to values corresponding to
1217 	 * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state.
1218 	 */
1219 	pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
1220 	deep_spr_loss_state = MAX_STOP_STATE + 1;
1221 	for (i = 0; i < nr_pnv_idle_states; i++) {
1222 		int err;
1223 		struct pnv_idle_states_t *state = &pnv_idle_states[i];
1224 		u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK;
1225 
1226 		/* No deep loss driver implemented for POWER10 yet */
1227 		if (pvr_version_is(PVR_POWER10) &&
1228 				state->flags & (OPAL_PM_TIMEBASE_STOP|OPAL_PM_LOSE_FULL_CONTEXT))
1229 			continue;
1230 
1231 		if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
1232 		     (pnv_first_tb_loss_level > psscr_rl))
1233 			pnv_first_tb_loss_level = psscr_rl;
1234 
1235 		if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
1236 		     (deep_spr_loss_state > psscr_rl))
1237 			deep_spr_loss_state = psscr_rl;
1238 
1239 		/*
1240 		 * The idle code does not deal with TB loss occurring
1241 		 * in a shallower state than SPR loss, so force it to
1242 		 * behave like SPRs are lost if TB is lost. POWER9 would
1243 		 * never encouter this, but a POWER8 core would if it
1244 		 * implemented the stop instruction. So this is for forward
1245 		 * compatibility.
1246 		 */
1247 		if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
1248 		     (deep_spr_loss_state > psscr_rl))
1249 			deep_spr_loss_state = psscr_rl;
1250 
1251 		err = validate_psscr_val_mask(&state->psscr_val,
1252 					      &state->psscr_mask,
1253 					      state->flags);
1254 		if (err) {
1255 			report_invalid_psscr_val(state->psscr_val, err);
1256 			continue;
1257 		}
1258 
1259 		state->valid = true;
1260 
1261 		if (max_residency_ns < state->residency_ns) {
1262 			max_residency_ns = state->residency_ns;
1263 			pnv_deepest_stop_psscr_val = state->psscr_val;
1264 			pnv_deepest_stop_psscr_mask = state->psscr_mask;
1265 			pnv_deepest_stop_flag = state->flags;
1266 			deepest_stop_found = true;
1267 		}
1268 
1269 		if (!default_stop_found &&
1270 		    (state->flags & OPAL_PM_STOP_INST_FAST)) {
1271 			pnv_default_stop_val = state->psscr_val;
1272 			pnv_default_stop_mask = state->psscr_mask;
1273 			default_stop_found = true;
1274 			WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT);
1275 		}
1276 	}
1277 
1278 	if (unlikely(!default_stop_found)) {
1279 		pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n");
1280 	} else {
1281 		ppc_md.power_save = arch300_idle;
1282 		pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n",
1283 			pnv_default_stop_val, pnv_default_stop_mask);
1284 	}
1285 
1286 	if (unlikely(!deepest_stop_found)) {
1287 		pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait");
1288 	} else {
1289 		pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n",
1290 			pnv_deepest_stop_psscr_val,
1291 			pnv_deepest_stop_psscr_mask);
1292 	}
1293 
1294 	pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n",
1295 		deep_spr_loss_state);
1296 
1297 	pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n",
1298 		pnv_first_tb_loss_level);
1299 }
1300 
1301 static void __init pnv_disable_deep_states(void)
1302 {
1303 	/*
1304 	 * The stop-api is unable to restore hypervisor
1305 	 * resources on wakeup from platform idle states which
1306 	 * lose full context. So disable such states.
1307 	 */
1308 	supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
1309 	pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
1310 	pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
1311 
1312 	if (cpu_has_feature(CPU_FTR_ARCH_300) &&
1313 	    (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
1314 		/*
1315 		 * Use the default stop state for CPU-Hotplug
1316 		 * if available.
1317 		 */
1318 		if (default_stop_found) {
1319 			pnv_deepest_stop_psscr_val = pnv_default_stop_val;
1320 			pnv_deepest_stop_psscr_mask = pnv_default_stop_mask;
1321 			pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
1322 				pnv_deepest_stop_psscr_val);
1323 		} else { /* Fallback to snooze loop for CPU-Hotplug */
1324 			deepest_stop_found = false;
1325 			pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
1326 		}
1327 	}
1328 }
1329 
1330 /*
1331  * Probe device tree for supported idle states
1332  */
1333 static void __init pnv_probe_idle_states(void)
1334 {
1335 	int i;
1336 
1337 	if (nr_pnv_idle_states < 0) {
1338 		pr_warn("cpuidle-powernv: no idle states found in the DT\n");
1339 		return;
1340 	}
1341 
1342 	if (cpu_has_feature(CPU_FTR_ARCH_300))
1343 		pnv_arch300_idle_init();
1344 
1345 	for (i = 0; i < nr_pnv_idle_states; i++)
1346 		supported_cpuidle_states |= pnv_idle_states[i].flags;
1347 }
1348 
1349 /*
1350  * This function parses device-tree and populates all the information
1351  * into pnv_idle_states structure. It also sets up nr_pnv_idle_states
1352  * which is the number of cpuidle states discovered through device-tree.
1353  */
1354 
1355 static int pnv_parse_cpuidle_dt(void)
1356 {
1357 	struct device_node *np;
1358 	int nr_idle_states, i;
1359 	int rc = 0;
1360 	u32 *temp_u32;
1361 	u64 *temp_u64;
1362 	const char **temp_string;
1363 
1364 	np = of_find_node_by_path("/ibm,opal/power-mgt");
1365 	if (!np) {
1366 		pr_warn("opal: PowerMgmt Node not found\n");
1367 		return -ENODEV;
1368 	}
1369 	nr_idle_states = of_property_count_u32_elems(np,
1370 						"ibm,cpu-idle-state-flags");
1371 
1372 	pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states),
1373 				  GFP_KERNEL);
1374 	temp_u32 = kcalloc(nr_idle_states, sizeof(u32),  GFP_KERNEL);
1375 	temp_u64 = kcalloc(nr_idle_states, sizeof(u64),  GFP_KERNEL);
1376 	temp_string = kcalloc(nr_idle_states, sizeof(char *),  GFP_KERNEL);
1377 
1378 	if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) {
1379 		pr_err("Could not allocate memory for dt parsing\n");
1380 		rc = -ENOMEM;
1381 		goto out;
1382 	}
1383 
1384 	/* Read flags */
1385 	if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags",
1386 				       temp_u32, nr_idle_states)) {
1387 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
1388 		rc = -EINVAL;
1389 		goto out;
1390 	}
1391 	for (i = 0; i < nr_idle_states; i++)
1392 		pnv_idle_states[i].flags = temp_u32[i];
1393 
1394 	/* Read latencies */
1395 	if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns",
1396 				       temp_u32, nr_idle_states)) {
1397 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
1398 		rc = -EINVAL;
1399 		goto out;
1400 	}
1401 	for (i = 0; i < nr_idle_states; i++)
1402 		pnv_idle_states[i].latency_ns = temp_u32[i];
1403 
1404 	/* Read residencies */
1405 	if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns",
1406 				       temp_u32, nr_idle_states)) {
1407 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n");
1408 		rc = -EINVAL;
1409 		goto out;
1410 	}
1411 	for (i = 0; i < nr_idle_states; i++)
1412 		pnv_idle_states[i].residency_ns = temp_u32[i];
1413 
1414 	/* For power9 and later */
1415 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1416 		/* Read pm_crtl_val */
1417 		if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr",
1418 					       temp_u64, nr_idle_states)) {
1419 			pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
1420 			rc = -EINVAL;
1421 			goto out;
1422 		}
1423 		for (i = 0; i < nr_idle_states; i++)
1424 			pnv_idle_states[i].psscr_val = temp_u64[i];
1425 
1426 		/* Read pm_crtl_mask */
1427 		if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask",
1428 					       temp_u64, nr_idle_states)) {
1429 			pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
1430 			rc = -EINVAL;
1431 			goto out;
1432 		}
1433 		for (i = 0; i < nr_idle_states; i++)
1434 			pnv_idle_states[i].psscr_mask = temp_u64[i];
1435 	}
1436 
1437 	/*
1438 	 * power8 specific properties ibm,cpu-idle-state-pmicr-mask and
1439 	 * ibm,cpu-idle-state-pmicr-val were never used and there is no
1440 	 * plan to use it in near future. Hence, not parsing these properties
1441 	 */
1442 
1443 	if (of_property_read_string_array(np, "ibm,cpu-idle-state-names",
1444 					  temp_string, nr_idle_states) < 0) {
1445 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
1446 		rc = -EINVAL;
1447 		goto out;
1448 	}
1449 	for (i = 0; i < nr_idle_states; i++)
1450 		strlcpy(pnv_idle_states[i].name, temp_string[i],
1451 			PNV_IDLE_NAME_LEN);
1452 	nr_pnv_idle_states = nr_idle_states;
1453 	rc = 0;
1454 out:
1455 	kfree(temp_u32);
1456 	kfree(temp_u64);
1457 	kfree(temp_string);
1458 	return rc;
1459 }
1460 
1461 static int __init pnv_init_idle_states(void)
1462 {
1463 	int cpu;
1464 	int rc = 0;
1465 
1466 	/* Set up PACA fields */
1467 	for_each_present_cpu(cpu) {
1468 		struct paca_struct *p = paca_ptrs[cpu];
1469 
1470 		p->idle_state = 0;
1471 		if (cpu == cpu_first_thread_sibling(cpu))
1472 			p->idle_state = (1 << threads_per_core) - 1;
1473 
1474 		if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1475 			/* P7/P8 nap */
1476 			p->thread_idle_state = PNV_THREAD_RUNNING;
1477 		} else if (pvr_version_is(PVR_POWER9)) {
1478 			/* P9 stop workarounds */
1479 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1480 			p->requested_psscr = 0;
1481 			atomic_set(&p->dont_stop, 0);
1482 #endif
1483 		}
1484 	}
1485 
1486 	/* In case we error out nr_pnv_idle_states will be zero */
1487 	nr_pnv_idle_states = 0;
1488 	supported_cpuidle_states = 0;
1489 
1490 	if (cpuidle_disable != IDLE_NO_OVERRIDE)
1491 		goto out;
1492 	rc = pnv_parse_cpuidle_dt();
1493 	if (rc)
1494 		return rc;
1495 	pnv_probe_idle_states();
1496 
1497 	if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1498 		if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
1499 			power7_fastsleep_workaround_entry = false;
1500 			power7_fastsleep_workaround_exit = false;
1501 		} else {
1502 			/*
1503 			 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
1504 			 * workaround is needed to use fastsleep. Provide sysfs
1505 			 * control to choose how this workaround has to be
1506 			 * applied.
1507 			 */
1508 			device_create_file(cpu_subsys.dev_root,
1509 				&dev_attr_fastsleep_workaround_applyonce);
1510 		}
1511 
1512 		update_subcore_sibling_mask();
1513 
1514 		if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) {
1515 			ppc_md.power_save = power7_idle;
1516 			power7_offline_type = PNV_THREAD_NAP;
1517 		}
1518 
1519 		if ((supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) &&
1520 			   (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT))
1521 			power7_offline_type = PNV_THREAD_WINKLE;
1522 		else if ((supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED) ||
1523 			   (supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1))
1524 			power7_offline_type = PNV_THREAD_SLEEP;
1525 	}
1526 
1527 	if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
1528 		if (pnv_save_sprs_for_deep_states())
1529 			pnv_disable_deep_states();
1530 	}
1531 
1532 out:
1533 	return 0;
1534 }
1535 machine_subsys_initcall(powernv, pnv_init_idle_states);
1536