12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2d405a98cSShreyas B. Prabhu /*
3d405a98cSShreyas B. Prabhu  * PowerNV cpuidle code
4d405a98cSShreyas B. Prabhu  *
5d405a98cSShreyas B. Prabhu  * Copyright 2015 IBM Corp.
6d405a98cSShreyas B. Prabhu  */
7d405a98cSShreyas B. Prabhu 
8d405a98cSShreyas B. Prabhu #include <linux/types.h>
9d405a98cSShreyas B. Prabhu #include <linux/mm.h>
10d405a98cSShreyas B. Prabhu #include <linux/slab.h>
11d405a98cSShreyas B. Prabhu #include <linux/of.h>
125703d2f4SShreyas B. Prabhu #include <linux/device.h>
135703d2f4SShreyas B. Prabhu #include <linux/cpu.h>
14d405a98cSShreyas B. Prabhu 
1510d91611SNicholas Piggin #include <asm/asm-prototypes.h>
16d405a98cSShreyas B. Prabhu #include <asm/firmware.h>
173a96570fSNicholas Piggin #include <asm/interrupt.h>
184bece972SMichael Ellerman #include <asm/machdep.h>
19d405a98cSShreyas B. Prabhu #include <asm/opal.h>
20d405a98cSShreyas B. Prabhu #include <asm/cputhreads.h>
21d405a98cSShreyas B. Prabhu #include <asm/cpuidle.h>
22d405a98cSShreyas B. Prabhu #include <asm/code-patching.h>
23d405a98cSShreyas B. Prabhu #include <asm/smp.h>
242201f994SNicholas Piggin #include <asm/runlatch.h>
257672691aSPaul Mackerras #include <asm/dbell.h>
26d405a98cSShreyas B. Prabhu 
27d405a98cSShreyas B. Prabhu #include "powernv.h"
28d405a98cSShreyas B. Prabhu #include "subcore.h"
29d405a98cSShreyas B. Prabhu 
30bcef83a0SShreyas B. Prabhu /* Power ISA 3.0 allows for stop states 0x0 - 0xF */
31bcef83a0SShreyas B. Prabhu #define MAX_STOP_STATE	0xF
32bcef83a0SShreyas B. Prabhu 
331e1601b3SAkshay Adiga #define P9_STOP_SPR_MSR 2000
341e1601b3SAkshay Adiga #define P9_STOP_SPR_PSSCR      855
351e1601b3SAkshay Adiga 
36d405a98cSShreyas B. Prabhu static u32 supported_cpuidle_states;
379c7b185aSAkshay Adiga struct pnv_idle_states_t *pnv_idle_states;
389c7b185aSAkshay Adiga int nr_pnv_idle_states;
39d405a98cSShreyas B. Prabhu 
401e1601b3SAkshay Adiga /*
411e1601b3SAkshay Adiga  * The default stop state that will be used by ppc_md.power_save
421e1601b3SAkshay Adiga  * function on platforms that support stop instruction.
431e1601b3SAkshay Adiga  */
441e1601b3SAkshay Adiga static u64 pnv_default_stop_val;
451e1601b3SAkshay Adiga static u64 pnv_default_stop_mask;
461e1601b3SAkshay Adiga static bool default_stop_found;
471e1601b3SAkshay Adiga 
481e1601b3SAkshay Adiga /*
4910d91611SNicholas Piggin  * First stop state levels when SPR and TB loss can occur.
501e1601b3SAkshay Adiga  */
5110d91611SNicholas Piggin static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
52dcbbfa6bSPratik Rajesh Sampat static u64 deep_spr_loss_state = MAX_STOP_STATE + 1;
531e1601b3SAkshay Adiga 
541e1601b3SAkshay Adiga /*
551e1601b3SAkshay Adiga  * psscr value and mask of the deepest stop idle state.
561e1601b3SAkshay Adiga  * Used when a cpu is offlined.
571e1601b3SAkshay Adiga  */
581e1601b3SAkshay Adiga static u64 pnv_deepest_stop_psscr_val;
591e1601b3SAkshay Adiga static u64 pnv_deepest_stop_psscr_mask;
60785a12afSGautham R. Shenoy static u64 pnv_deepest_stop_flag;
611e1601b3SAkshay Adiga static bool deepest_stop_found;
621e1601b3SAkshay Adiga 
6310d91611SNicholas Piggin static unsigned long power7_offline_type;
6410d91611SNicholas Piggin 
65bcef83a0SShreyas B. Prabhu static int pnv_save_sprs_for_deep_states(void)
66d405a98cSShreyas B. Prabhu {
67d405a98cSShreyas B. Prabhu 	int cpu;
68d405a98cSShreyas B. Prabhu 	int rc;
69d405a98cSShreyas B. Prabhu 
70d405a98cSShreyas B. Prabhu 	/*
71446957baSAdam Buchbinder 	 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across
72d405a98cSShreyas B. Prabhu 	 * all cpus at boot. Get these reg values of current cpu and use the
73446957baSAdam Buchbinder 	 * same across all cpus.
74d405a98cSShreyas B. Prabhu 	 */
7524be85a2SGautham R. Shenoy 	uint64_t lpcr_val	= mfspr(SPRN_LPCR);
76d405a98cSShreyas B. Prabhu 	uint64_t hid0_val	= mfspr(SPRN_HID0);
77d405a98cSShreyas B. Prabhu 	uint64_t hmeer_val	= mfspr(SPRN_HMEER);
781e1601b3SAkshay Adiga 	uint64_t msr_val = MSR_IDLE;
791e1601b3SAkshay Adiga 	uint64_t psscr_val = pnv_deepest_stop_psscr_val;
80d405a98cSShreyas B. Prabhu 
81ac9816dcSAkshay Adiga 	for_each_present_cpu(cpu) {
82d405a98cSShreyas B. Prabhu 		uint64_t pir = get_hard_smp_processor_id(cpu);
83d2e60075SNicholas Piggin 		uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
84d405a98cSShreyas B. Prabhu 
85d405a98cSShreyas B. Prabhu 		rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
86d405a98cSShreyas B. Prabhu 		if (rc != 0)
87d405a98cSShreyas B. Prabhu 			return rc;
88d405a98cSShreyas B. Prabhu 
89d405a98cSShreyas B. Prabhu 		rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
90d405a98cSShreyas B. Prabhu 		if (rc != 0)
91d405a98cSShreyas B. Prabhu 			return rc;
92d405a98cSShreyas B. Prabhu 
931e1601b3SAkshay Adiga 		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
941e1601b3SAkshay Adiga 			rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val);
951e1601b3SAkshay Adiga 			if (rc)
961e1601b3SAkshay Adiga 				return rc;
971e1601b3SAkshay Adiga 
981e1601b3SAkshay Adiga 			rc = opal_slw_set_reg(pir,
991e1601b3SAkshay Adiga 					      P9_STOP_SPR_PSSCR, psscr_val);
1001e1601b3SAkshay Adiga 
1011e1601b3SAkshay Adiga 			if (rc)
1021e1601b3SAkshay Adiga 				return rc;
1031e1601b3SAkshay Adiga 		}
1041e1601b3SAkshay Adiga 
105d405a98cSShreyas B. Prabhu 		/* HIDs are per core registers */
106d405a98cSShreyas B. Prabhu 		if (cpu_thread_in_core(cpu) == 0) {
107d405a98cSShreyas B. Prabhu 
108d405a98cSShreyas B. Prabhu 			rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
109d405a98cSShreyas B. Prabhu 			if (rc != 0)
110d405a98cSShreyas B. Prabhu 				return rc;
111d405a98cSShreyas B. Prabhu 
112d405a98cSShreyas B. Prabhu 			rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
113d405a98cSShreyas B. Prabhu 			if (rc != 0)
114d405a98cSShreyas B. Prabhu 				return rc;
115d405a98cSShreyas B. Prabhu 
1161e1601b3SAkshay Adiga 			/* Only p8 needs to set extra HID regiters */
1171e1601b3SAkshay Adiga 			if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1185c92fb1bSPratik Rajesh Sampat 				uint64_t hid1_val = mfspr(SPRN_HID1);
1195c92fb1bSPratik Rajesh Sampat 				uint64_t hid4_val = mfspr(SPRN_HID4);
1205c92fb1bSPratik Rajesh Sampat 				uint64_t hid5_val = mfspr(SPRN_HID5);
1211e1601b3SAkshay Adiga 
122d405a98cSShreyas B. Prabhu 				rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
123d405a98cSShreyas B. Prabhu 				if (rc != 0)
124d405a98cSShreyas B. Prabhu 					return rc;
125d405a98cSShreyas B. Prabhu 
126d405a98cSShreyas B. Prabhu 				rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
127d405a98cSShreyas B. Prabhu 				if (rc != 0)
128d405a98cSShreyas B. Prabhu 					return rc;
129d405a98cSShreyas B. Prabhu 
130d405a98cSShreyas B. Prabhu 				rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
131d405a98cSShreyas B. Prabhu 				if (rc != 0)
132d405a98cSShreyas B. Prabhu 					return rc;
133d405a98cSShreyas B. Prabhu 			}
134d405a98cSShreyas B. Prabhu 		}
1351e1601b3SAkshay Adiga 	}
136d405a98cSShreyas B. Prabhu 
137d405a98cSShreyas B. Prabhu 	return 0;
138d405a98cSShreyas B. Prabhu }
139d405a98cSShreyas B. Prabhu 
140d405a98cSShreyas B. Prabhu u32 pnv_get_supported_cpuidle_states(void)
141d405a98cSShreyas B. Prabhu {
142d405a98cSShreyas B. Prabhu 	return supported_cpuidle_states;
143d405a98cSShreyas B. Prabhu }
144d405a98cSShreyas B. Prabhu EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
145d405a98cSShreyas B. Prabhu 
1465703d2f4SShreyas B. Prabhu static void pnv_fastsleep_workaround_apply(void *info)
1475703d2f4SShreyas B. Prabhu 
1485703d2f4SShreyas B. Prabhu {
1495703d2f4SShreyas B. Prabhu 	int rc;
1505703d2f4SShreyas B. Prabhu 	int *err = info;
1515703d2f4SShreyas B. Prabhu 
1525703d2f4SShreyas B. Prabhu 	rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
1535703d2f4SShreyas B. Prabhu 					OPAL_CONFIG_IDLE_APPLY);
1545703d2f4SShreyas B. Prabhu 	if (rc)
1555703d2f4SShreyas B. Prabhu 		*err = 1;
1565703d2f4SShreyas B. Prabhu }
1575703d2f4SShreyas B. Prabhu 
15810d91611SNicholas Piggin static bool power7_fastsleep_workaround_entry = true;
15910d91611SNicholas Piggin static bool power7_fastsleep_workaround_exit = true;
16010d91611SNicholas Piggin 
1615703d2f4SShreyas B. Prabhu /*
1625703d2f4SShreyas B. Prabhu  * Used to store fastsleep workaround state
1635703d2f4SShreyas B. Prabhu  * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
1645703d2f4SShreyas B. Prabhu  * 1 - Workaround applied once, never undone.
1655703d2f4SShreyas B. Prabhu  */
1665703d2f4SShreyas B. Prabhu static u8 fastsleep_workaround_applyonce;
1675703d2f4SShreyas B. Prabhu 
1685703d2f4SShreyas B. Prabhu static ssize_t show_fastsleep_workaround_applyonce(struct device *dev,
1695703d2f4SShreyas B. Prabhu 		struct device_attribute *attr, char *buf)
1705703d2f4SShreyas B. Prabhu {
1715703d2f4SShreyas B. Prabhu 	return sprintf(buf, "%u\n", fastsleep_workaround_applyonce);
1725703d2f4SShreyas B. Prabhu }
1735703d2f4SShreyas B. Prabhu 
1745703d2f4SShreyas B. Prabhu static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
1755703d2f4SShreyas B. Prabhu 		struct device_attribute *attr, const char *buf,
1765703d2f4SShreyas B. Prabhu 		size_t count)
1775703d2f4SShreyas B. Prabhu {
1785703d2f4SShreyas B. Prabhu 	cpumask_t primary_thread_mask;
1795703d2f4SShreyas B. Prabhu 	int err;
1805703d2f4SShreyas B. Prabhu 	u8 val;
1815703d2f4SShreyas B. Prabhu 
1825703d2f4SShreyas B. Prabhu 	if (kstrtou8(buf, 0, &val) || val != 1)
1835703d2f4SShreyas B. Prabhu 		return -EINVAL;
1845703d2f4SShreyas B. Prabhu 
1855703d2f4SShreyas B. Prabhu 	if (fastsleep_workaround_applyonce == 1)
1865703d2f4SShreyas B. Prabhu 		return count;
1875703d2f4SShreyas B. Prabhu 
1885703d2f4SShreyas B. Prabhu 	/*
1895703d2f4SShreyas B. Prabhu 	 * fastsleep_workaround_applyonce = 1 implies
1905703d2f4SShreyas B. Prabhu 	 * fastsleep workaround needs to be left in 'applied' state on all
1915703d2f4SShreyas B. Prabhu 	 * the cores. Do this by-
19210d91611SNicholas Piggin 	 * 1. Disable the 'undo' workaround in fastsleep exit path
19310d91611SNicholas Piggin 	 * 2. Sendi IPIs to all the cores which have at least one online thread
19410d91611SNicholas Piggin 	 * 3. Disable the 'apply' workaround in fastsleep entry path
19510d91611SNicholas Piggin 	 *
1965703d2f4SShreyas B. Prabhu 	 * There is no need to send ipi to cores which have all threads
1975703d2f4SShreyas B. Prabhu 	 * offlined, as last thread of the core entering fastsleep or deeper
1985703d2f4SShreyas B. Prabhu 	 * state would have applied workaround.
1995703d2f4SShreyas B. Prabhu 	 */
20010d91611SNicholas Piggin 	power7_fastsleep_workaround_exit = false;
2015703d2f4SShreyas B. Prabhu 
2025703d2f4SShreyas B. Prabhu 	get_online_cpus();
2035703d2f4SShreyas B. Prabhu 	primary_thread_mask = cpu_online_cores_map();
2045703d2f4SShreyas B. Prabhu 	on_each_cpu_mask(&primary_thread_mask,
2055703d2f4SShreyas B. Prabhu 				pnv_fastsleep_workaround_apply,
2065703d2f4SShreyas B. Prabhu 				&err, 1);
2075703d2f4SShreyas B. Prabhu 	put_online_cpus();
2085703d2f4SShreyas B. Prabhu 	if (err) {
2095703d2f4SShreyas B. Prabhu 		pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
2105703d2f4SShreyas B. Prabhu 		goto fail;
2115703d2f4SShreyas B. Prabhu 	}
2125703d2f4SShreyas B. Prabhu 
21310d91611SNicholas Piggin 	power7_fastsleep_workaround_entry = false;
2145703d2f4SShreyas B. Prabhu 
2155703d2f4SShreyas B. Prabhu 	fastsleep_workaround_applyonce = 1;
2165703d2f4SShreyas B. Prabhu 
2175703d2f4SShreyas B. Prabhu 	return count;
2185703d2f4SShreyas B. Prabhu fail:
2195703d2f4SShreyas B. Prabhu 	return -EIO;
2205703d2f4SShreyas B. Prabhu }
2215703d2f4SShreyas B. Prabhu 
2225703d2f4SShreyas B. Prabhu static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
2235703d2f4SShreyas B. Prabhu 			show_fastsleep_workaround_applyonce,
2245703d2f4SShreyas B. Prabhu 			store_fastsleep_workaround_applyonce);
2255703d2f4SShreyas B. Prabhu 
22610d91611SNicholas Piggin static inline void atomic_start_thread_idle(void)
2272201f994SNicholas Piggin {
22810d91611SNicholas Piggin 	int cpu = raw_smp_processor_id();
22910d91611SNicholas Piggin 	int first = cpu_first_thread_sibling(cpu);
23010d91611SNicholas Piggin 	int thread_nr = cpu_thread_in_core(cpu);
23110d91611SNicholas Piggin 	unsigned long *state = &paca_ptrs[first]->idle_state;
23210d91611SNicholas Piggin 
23310d91611SNicholas Piggin 	clear_bit(thread_nr, state);
23410d91611SNicholas Piggin }
23510d91611SNicholas Piggin 
23610d91611SNicholas Piggin static inline void atomic_stop_thread_idle(void)
23710d91611SNicholas Piggin {
23810d91611SNicholas Piggin 	int cpu = raw_smp_processor_id();
23910d91611SNicholas Piggin 	int first = cpu_first_thread_sibling(cpu);
24010d91611SNicholas Piggin 	int thread_nr = cpu_thread_in_core(cpu);
24110d91611SNicholas Piggin 	unsigned long *state = &paca_ptrs[first]->idle_state;
24210d91611SNicholas Piggin 
24310d91611SNicholas Piggin 	set_bit(thread_nr, state);
24410d91611SNicholas Piggin }
24510d91611SNicholas Piggin 
24610d91611SNicholas Piggin static inline void atomic_lock_thread_idle(void)
24710d91611SNicholas Piggin {
24810d91611SNicholas Piggin 	int cpu = raw_smp_processor_id();
24910d91611SNicholas Piggin 	int first = cpu_first_thread_sibling(cpu);
25010d91611SNicholas Piggin 	unsigned long *state = &paca_ptrs[first]->idle_state;
25110d91611SNicholas Piggin 
25210d91611SNicholas Piggin 	while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, state)))
25310d91611SNicholas Piggin 		barrier();
25410d91611SNicholas Piggin }
25510d91611SNicholas Piggin 
25610d91611SNicholas Piggin static inline void atomic_unlock_and_stop_thread_idle(void)
25710d91611SNicholas Piggin {
25810d91611SNicholas Piggin 	int cpu = raw_smp_processor_id();
25910d91611SNicholas Piggin 	int first = cpu_first_thread_sibling(cpu);
26010d91611SNicholas Piggin 	unsigned long thread = 1UL << cpu_thread_in_core(cpu);
26110d91611SNicholas Piggin 	unsigned long *state = &paca_ptrs[first]->idle_state;
26210d91611SNicholas Piggin 	u64 s = READ_ONCE(*state);
26310d91611SNicholas Piggin 	u64 new, tmp;
26410d91611SNicholas Piggin 
26510d91611SNicholas Piggin 	BUG_ON(!(s & PNV_CORE_IDLE_LOCK_BIT));
26610d91611SNicholas Piggin 	BUG_ON(s & thread);
26710d91611SNicholas Piggin 
26810d91611SNicholas Piggin again:
26910d91611SNicholas Piggin 	new = (s | thread) & ~PNV_CORE_IDLE_LOCK_BIT;
27010d91611SNicholas Piggin 	tmp = cmpxchg(state, s, new);
27110d91611SNicholas Piggin 	if (unlikely(tmp != s)) {
27210d91611SNicholas Piggin 		s = tmp;
27310d91611SNicholas Piggin 		goto again;
27410d91611SNicholas Piggin 	}
27510d91611SNicholas Piggin }
27610d91611SNicholas Piggin 
27710d91611SNicholas Piggin static inline void atomic_unlock_thread_idle(void)
27810d91611SNicholas Piggin {
27910d91611SNicholas Piggin 	int cpu = raw_smp_processor_id();
28010d91611SNicholas Piggin 	int first = cpu_first_thread_sibling(cpu);
28110d91611SNicholas Piggin 	unsigned long *state = &paca_ptrs[first]->idle_state;
28210d91611SNicholas Piggin 
28310d91611SNicholas Piggin 	BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, state));
28410d91611SNicholas Piggin 	clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, state);
28510d91611SNicholas Piggin }
28610d91611SNicholas Piggin 
28710d91611SNicholas Piggin /* P7 and P8 */
28810d91611SNicholas Piggin struct p7_sprs {
28910d91611SNicholas Piggin 	/* per core */
29010d91611SNicholas Piggin 	u64 tscr;
29110d91611SNicholas Piggin 	u64 worc;
29210d91611SNicholas Piggin 
29310d91611SNicholas Piggin 	/* per subcore */
29410d91611SNicholas Piggin 	u64 sdr1;
29510d91611SNicholas Piggin 	u64 rpr;
29610d91611SNicholas Piggin 
29710d91611SNicholas Piggin 	/* per thread */
29810d91611SNicholas Piggin 	u64 lpcr;
29910d91611SNicholas Piggin 	u64 hfscr;
30010d91611SNicholas Piggin 	u64 fscr;
30110d91611SNicholas Piggin 	u64 purr;
30210d91611SNicholas Piggin 	u64 spurr;
30310d91611SNicholas Piggin 	u64 dscr;
30410d91611SNicholas Piggin 	u64 wort;
305e9cef018SMichael Ellerman 
306e9cef018SMichael Ellerman 	/* per thread SPRs that get lost in shallow states */
307e9cef018SMichael Ellerman 	u64 amr;
308e9cef018SMichael Ellerman 	u64 iamr;
309e9cef018SMichael Ellerman 	u64 amor;
310e9cef018SMichael Ellerman 	u64 uamor;
31110d91611SNicholas Piggin };
31210d91611SNicholas Piggin 
31310d91611SNicholas Piggin static unsigned long power7_idle_insn(unsigned long type)
31410d91611SNicholas Piggin {
31510d91611SNicholas Piggin 	int cpu = raw_smp_processor_id();
31610d91611SNicholas Piggin 	int first = cpu_first_thread_sibling(cpu);
31710d91611SNicholas Piggin 	unsigned long *state = &paca_ptrs[first]->idle_state;
31810d91611SNicholas Piggin 	unsigned long thread = 1UL << cpu_thread_in_core(cpu);
31910d91611SNicholas Piggin 	unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
3202201f994SNicholas Piggin 	unsigned long srr1;
32110d91611SNicholas Piggin 	bool full_winkle;
32210d91611SNicholas Piggin 	struct p7_sprs sprs = {}; /* avoid false use-uninitialised */
32310d91611SNicholas Piggin 	bool sprs_saved = false;
32410d91611SNicholas Piggin 	int rc;
3252201f994SNicholas Piggin 
32610d91611SNicholas Piggin 	if (unlikely(type != PNV_THREAD_NAP)) {
32710d91611SNicholas Piggin 		atomic_lock_thread_idle();
3282201f994SNicholas Piggin 
32910d91611SNicholas Piggin 		BUG_ON(!(*state & thread));
33010d91611SNicholas Piggin 		*state &= ~thread;
3312201f994SNicholas Piggin 
33210d91611SNicholas Piggin 		if (power7_fastsleep_workaround_entry) {
33310d91611SNicholas Piggin 			if ((*state & core_thread_mask) == 0) {
33410d91611SNicholas Piggin 				rc = opal_config_cpu_idle_state(
33510d91611SNicholas Piggin 						OPAL_CONFIG_IDLE_FASTSLEEP,
33610d91611SNicholas Piggin 						OPAL_CONFIG_IDLE_APPLY);
33710d91611SNicholas Piggin 				BUG_ON(rc);
33810d91611SNicholas Piggin 			}
33910d91611SNicholas Piggin 		}
34010d91611SNicholas Piggin 
34110d91611SNicholas Piggin 		if (type == PNV_THREAD_WINKLE) {
34210d91611SNicholas Piggin 			sprs.tscr	= mfspr(SPRN_TSCR);
34310d91611SNicholas Piggin 			sprs.worc	= mfspr(SPRN_WORC);
34410d91611SNicholas Piggin 
34510d91611SNicholas Piggin 			sprs.sdr1	= mfspr(SPRN_SDR1);
34610d91611SNicholas Piggin 			sprs.rpr	= mfspr(SPRN_RPR);
34710d91611SNicholas Piggin 
34810d91611SNicholas Piggin 			sprs.lpcr	= mfspr(SPRN_LPCR);
34910d91611SNicholas Piggin 			if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
35010d91611SNicholas Piggin 				sprs.hfscr	= mfspr(SPRN_HFSCR);
35110d91611SNicholas Piggin 				sprs.fscr	= mfspr(SPRN_FSCR);
35210d91611SNicholas Piggin 			}
35310d91611SNicholas Piggin 			sprs.purr	= mfspr(SPRN_PURR);
35410d91611SNicholas Piggin 			sprs.spurr	= mfspr(SPRN_SPURR);
35510d91611SNicholas Piggin 			sprs.dscr	= mfspr(SPRN_DSCR);
35610d91611SNicholas Piggin 			sprs.wort	= mfspr(SPRN_WORT);
35710d91611SNicholas Piggin 
35810d91611SNicholas Piggin 			sprs_saved = true;
35910d91611SNicholas Piggin 
36010d91611SNicholas Piggin 			/*
36110d91611SNicholas Piggin 			 * Increment winkle counter and set all winkle bits if
36210d91611SNicholas Piggin 			 * all threads are winkling. This allows wakeup side to
36310d91611SNicholas Piggin 			 * distinguish between fast sleep and winkle state
36410d91611SNicholas Piggin 			 * loss. Fast sleep still has to resync the timebase so
36510d91611SNicholas Piggin 			 * this may not be a really big win.
36610d91611SNicholas Piggin 			 */
36710d91611SNicholas Piggin 			*state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
36810d91611SNicholas Piggin 			if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS)
36910d91611SNicholas Piggin 					>> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT
37010d91611SNicholas Piggin 					== threads_per_core)
37110d91611SNicholas Piggin 				*state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS;
37210d91611SNicholas Piggin 			WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
37310d91611SNicholas Piggin 		}
37410d91611SNicholas Piggin 
37510d91611SNicholas Piggin 		atomic_unlock_thread_idle();
37610d91611SNicholas Piggin 	}
37710d91611SNicholas Piggin 
378e9cef018SMichael Ellerman 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
379e9cef018SMichael Ellerman 		sprs.amr	= mfspr(SPRN_AMR);
380e9cef018SMichael Ellerman 		sprs.iamr	= mfspr(SPRN_IAMR);
381e9cef018SMichael Ellerman 		sprs.amor	= mfspr(SPRN_AMOR);
382e9cef018SMichael Ellerman 		sprs.uamor	= mfspr(SPRN_UAMOR);
383e9cef018SMichael Ellerman 	}
384e9cef018SMichael Ellerman 
38510d91611SNicholas Piggin 	local_paca->thread_idle_state = type;
38610d91611SNicholas Piggin 	srr1 = isa206_idle_insn_mayloss(type);		/* go idle */
38710d91611SNicholas Piggin 	local_paca->thread_idle_state = PNV_THREAD_RUNNING;
38810d91611SNicholas Piggin 
38910d91611SNicholas Piggin 	WARN_ON_ONCE(!srr1);
39010d91611SNicholas Piggin 	WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
39110d91611SNicholas Piggin 
392e9cef018SMichael Ellerman 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
393e9cef018SMichael Ellerman 		if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
394e9cef018SMichael Ellerman 			/*
395e9cef018SMichael Ellerman 			 * We don't need an isync after the mtsprs here because
396e9cef018SMichael Ellerman 			 * the upcoming mtmsrd is execution synchronizing.
397e9cef018SMichael Ellerman 			 */
398e9cef018SMichael Ellerman 			mtspr(SPRN_AMR,		sprs.amr);
399e9cef018SMichael Ellerman 			mtspr(SPRN_IAMR,	sprs.iamr);
400e9cef018SMichael Ellerman 			mtspr(SPRN_AMOR,	sprs.amor);
401e9cef018SMichael Ellerman 			mtspr(SPRN_UAMOR,	sprs.uamor);
402e9cef018SMichael Ellerman 		}
403e9cef018SMichael Ellerman 	}
404e9cef018SMichael Ellerman 
40510d91611SNicholas Piggin 	if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
40610d91611SNicholas Piggin 		hmi_exception_realmode(NULL);
40710d91611SNicholas Piggin 
40810d91611SNicholas Piggin 	if (likely((srr1 & SRR1_WAKESTATE) != SRR1_WS_HVLOSS)) {
40910d91611SNicholas Piggin 		if (unlikely(type != PNV_THREAD_NAP)) {
41010d91611SNicholas Piggin 			atomic_lock_thread_idle();
41110d91611SNicholas Piggin 			if (type == PNV_THREAD_WINKLE) {
41210d91611SNicholas Piggin 				WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
41310d91611SNicholas Piggin 				*state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
41410d91611SNicholas Piggin 				*state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
41510d91611SNicholas Piggin 			}
41610d91611SNicholas Piggin 			atomic_unlock_and_stop_thread_idle();
41710d91611SNicholas Piggin 		}
41810d91611SNicholas Piggin 		return srr1;
41910d91611SNicholas Piggin 	}
42010d91611SNicholas Piggin 
42110d91611SNicholas Piggin 	/* HV state loss */
42210d91611SNicholas Piggin 	BUG_ON(type == PNV_THREAD_NAP);
42310d91611SNicholas Piggin 
42410d91611SNicholas Piggin 	atomic_lock_thread_idle();
42510d91611SNicholas Piggin 
42610d91611SNicholas Piggin 	full_winkle = false;
42710d91611SNicholas Piggin 	if (type == PNV_THREAD_WINKLE) {
42810d91611SNicholas Piggin 		WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
42910d91611SNicholas Piggin 		*state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
43010d91611SNicholas Piggin 		if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) {
43110d91611SNicholas Piggin 			*state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
43210d91611SNicholas Piggin 			full_winkle = true;
43310d91611SNicholas Piggin 			BUG_ON(!sprs_saved);
43410d91611SNicholas Piggin 		}
43510d91611SNicholas Piggin 	}
43610d91611SNicholas Piggin 
43710d91611SNicholas Piggin 	WARN_ON(*state & thread);
43810d91611SNicholas Piggin 
43910d91611SNicholas Piggin 	if ((*state & core_thread_mask) != 0)
44010d91611SNicholas Piggin 		goto core_woken;
44110d91611SNicholas Piggin 
44210d91611SNicholas Piggin 	/* Per-core SPRs */
44310d91611SNicholas Piggin 	if (full_winkle) {
44410d91611SNicholas Piggin 		mtspr(SPRN_TSCR,	sprs.tscr);
44510d91611SNicholas Piggin 		mtspr(SPRN_WORC,	sprs.worc);
44610d91611SNicholas Piggin 	}
44710d91611SNicholas Piggin 
44810d91611SNicholas Piggin 	if (power7_fastsleep_workaround_exit) {
44910d91611SNicholas Piggin 		rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
45010d91611SNicholas Piggin 						OPAL_CONFIG_IDLE_UNDO);
45110d91611SNicholas Piggin 		BUG_ON(rc);
45210d91611SNicholas Piggin 	}
45310d91611SNicholas Piggin 
45410d91611SNicholas Piggin 	/* TB */
45510d91611SNicholas Piggin 	if (opal_resync_timebase() != OPAL_SUCCESS)
45610d91611SNicholas Piggin 		BUG();
45710d91611SNicholas Piggin 
45810d91611SNicholas Piggin core_woken:
45910d91611SNicholas Piggin 	if (!full_winkle)
46010d91611SNicholas Piggin 		goto subcore_woken;
46110d91611SNicholas Piggin 
46210d91611SNicholas Piggin 	if ((*state & local_paca->subcore_sibling_mask) != 0)
46310d91611SNicholas Piggin 		goto subcore_woken;
46410d91611SNicholas Piggin 
46510d91611SNicholas Piggin 	/* Per-subcore SPRs */
46610d91611SNicholas Piggin 	mtspr(SPRN_SDR1,	sprs.sdr1);
46710d91611SNicholas Piggin 	mtspr(SPRN_RPR,		sprs.rpr);
46810d91611SNicholas Piggin 
46910d91611SNicholas Piggin subcore_woken:
47010d91611SNicholas Piggin 	/*
47110d91611SNicholas Piggin 	 * isync after restoring shared SPRs and before unlocking. Unlock
47210d91611SNicholas Piggin 	 * only contains hwsync which does not necessarily do the right
47310d91611SNicholas Piggin 	 * thing for SPRs.
47410d91611SNicholas Piggin 	 */
47510d91611SNicholas Piggin 	isync();
47610d91611SNicholas Piggin 	atomic_unlock_and_stop_thread_idle();
47710d91611SNicholas Piggin 
47810d91611SNicholas Piggin 	/* Fast sleep does not lose SPRs */
47910d91611SNicholas Piggin 	if (!full_winkle)
48010d91611SNicholas Piggin 		return srr1;
48110d91611SNicholas Piggin 
48210d91611SNicholas Piggin 	/* Per-thread SPRs */
48310d91611SNicholas Piggin 	mtspr(SPRN_LPCR,	sprs.lpcr);
48410d91611SNicholas Piggin 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
48510d91611SNicholas Piggin 		mtspr(SPRN_HFSCR,	sprs.hfscr);
48610d91611SNicholas Piggin 		mtspr(SPRN_FSCR,	sprs.fscr);
48710d91611SNicholas Piggin 	}
48810d91611SNicholas Piggin 	mtspr(SPRN_PURR,	sprs.purr);
48910d91611SNicholas Piggin 	mtspr(SPRN_SPURR,	sprs.spurr);
49010d91611SNicholas Piggin 	mtspr(SPRN_DSCR,	sprs.dscr);
49110d91611SNicholas Piggin 	mtspr(SPRN_WORT,	sprs.wort);
49210d91611SNicholas Piggin 
49310d91611SNicholas Piggin 	mtspr(SPRN_SPRG3,	local_paca->sprg_vdso);
49410d91611SNicholas Piggin 
49510d91611SNicholas Piggin 	/*
49610d91611SNicholas Piggin 	 * The SLB has to be restored here, but it sometimes still
49710d91611SNicholas Piggin 	 * contains entries, so the __ variant must be used to prevent
49810d91611SNicholas Piggin 	 * multi hits.
49910d91611SNicholas Piggin 	 */
50010d91611SNicholas Piggin 	__slb_restore_bolted_realmode();
5012201f994SNicholas Piggin 
5022201f994SNicholas Piggin 	return srr1;
5032201f994SNicholas Piggin }
5042201f994SNicholas Piggin 
50510d91611SNicholas Piggin extern unsigned long idle_kvm_start_guest(unsigned long srr1);
50610d91611SNicholas Piggin 
50710d91611SNicholas Piggin #ifdef CONFIG_HOTPLUG_CPU
50810d91611SNicholas Piggin static unsigned long power7_offline(void)
50910d91611SNicholas Piggin {
51010d91611SNicholas Piggin 	unsigned long srr1;
51110d91611SNicholas Piggin 
51210d91611SNicholas Piggin 	mtmsr(MSR_IDLE);
51310d91611SNicholas Piggin 
51410d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
51510d91611SNicholas Piggin 	/* Tell KVM we're entering idle. */
51610d91611SNicholas Piggin 	/******************************************************/
51710d91611SNicholas Piggin 	/*  N O T E   W E L L    ! ! !    N O T E   W E L L   */
51810d91611SNicholas Piggin 	/* The following store to HSTATE_HWTHREAD_STATE(r13)  */
51910d91611SNicholas Piggin 	/* MUST occur in real mode, i.e. with the MMU off,    */
52010d91611SNicholas Piggin 	/* and the MMU must stay off until we clear this flag */
52110d91611SNicholas Piggin 	/* and test HSTATE_HWTHREAD_REQ(r13) in               */
52210d91611SNicholas Piggin 	/* pnv_powersave_wakeup in this file.                 */
52310d91611SNicholas Piggin 	/* The reason is that another thread can switch the   */
52410d91611SNicholas Piggin 	/* MMU to a guest context whenever this flag is set   */
52510d91611SNicholas Piggin 	/* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on,    */
52610d91611SNicholas Piggin 	/* that would potentially cause this thread to start  */
52710d91611SNicholas Piggin 	/* executing instructions from guest memory in        */
52810d91611SNicholas Piggin 	/* hypervisor mode, leading to a host crash or data   */
52910d91611SNicholas Piggin 	/* corruption, or worse.                              */
53010d91611SNicholas Piggin 	/******************************************************/
53110d91611SNicholas Piggin 	local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
53210d91611SNicholas Piggin #endif
53310d91611SNicholas Piggin 
53410d91611SNicholas Piggin 	__ppc64_runlatch_off();
53510d91611SNicholas Piggin 	srr1 = power7_idle_insn(power7_offline_type);
53610d91611SNicholas Piggin 	__ppc64_runlatch_on();
53710d91611SNicholas Piggin 
53810d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
53910d91611SNicholas Piggin 	local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
54010d91611SNicholas Piggin 	/* Order setting hwthread_state vs. testing hwthread_req */
54110d91611SNicholas Piggin 	smp_mb();
54210d91611SNicholas Piggin 	if (local_paca->kvm_hstate.hwthread_req)
54310d91611SNicholas Piggin 		srr1 = idle_kvm_start_guest(srr1);
54410d91611SNicholas Piggin #endif
54510d91611SNicholas Piggin 
54610d91611SNicholas Piggin 	mtmsr(MSR_KERNEL);
54710d91611SNicholas Piggin 
54810d91611SNicholas Piggin 	return srr1;
54910d91611SNicholas Piggin }
55010d91611SNicholas Piggin #endif
55110d91611SNicholas Piggin 
5522201f994SNicholas Piggin void power7_idle_type(unsigned long type)
5532201f994SNicholas Piggin {
554771d4304SNicholas Piggin 	unsigned long srr1;
555771d4304SNicholas Piggin 
55610d91611SNicholas Piggin 	if (!prep_irq_for_idle_irqsoff())
55710d91611SNicholas Piggin 		return;
55810d91611SNicholas Piggin 
55910d91611SNicholas Piggin 	mtmsr(MSR_IDLE);
56010d91611SNicholas Piggin 	__ppc64_runlatch_off();
56110d91611SNicholas Piggin 	srr1 = power7_idle_insn(type);
56210d91611SNicholas Piggin 	__ppc64_runlatch_on();
56310d91611SNicholas Piggin 	mtmsr(MSR_KERNEL);
56410d91611SNicholas Piggin 
56510d91611SNicholas Piggin 	fini_irq_for_idle_irqsoff();
566771d4304SNicholas Piggin 	irq_set_pending_from_srr1(srr1);
5672201f994SNicholas Piggin }
5682201f994SNicholas Piggin 
569ffd2961bSNicholas Piggin static void power7_idle(void)
5702201f994SNicholas Piggin {
5712201f994SNicholas Piggin 	if (!powersave_nap)
5722201f994SNicholas Piggin 		return;
5732201f994SNicholas Piggin 
5742201f994SNicholas Piggin 	power7_idle_type(PNV_THREAD_NAP);
5752201f994SNicholas Piggin }
5762201f994SNicholas Piggin 
57710d91611SNicholas Piggin struct p9_sprs {
57810d91611SNicholas Piggin 	/* per core */
57910d91611SNicholas Piggin 	u64 ptcr;
58010d91611SNicholas Piggin 	u64 rpr;
58110d91611SNicholas Piggin 	u64 tscr;
58210d91611SNicholas Piggin 	u64 ldbar;
58310d91611SNicholas Piggin 
58410d91611SNicholas Piggin 	/* per thread */
58510d91611SNicholas Piggin 	u64 lpcr;
58610d91611SNicholas Piggin 	u64 hfscr;
58710d91611SNicholas Piggin 	u64 fscr;
58810d91611SNicholas Piggin 	u64 pid;
58910d91611SNicholas Piggin 	u64 purr;
59010d91611SNicholas Piggin 	u64 spurr;
59110d91611SNicholas Piggin 	u64 dscr;
59210d91611SNicholas Piggin 	u64 wort;
593250ad7a4SJordan Niethe 	u64 ciabr;
59410d91611SNicholas Piggin 
59510d91611SNicholas Piggin 	u64 mmcra;
59610d91611SNicholas Piggin 	u32 mmcr0;
59710d91611SNicholas Piggin 	u32 mmcr1;
59810d91611SNicholas Piggin 	u64 mmcr2;
599e9cef018SMichael Ellerman 
600e9cef018SMichael Ellerman 	/* per thread SPRs that get lost in shallow states */
601e9cef018SMichael Ellerman 	u64 amr;
602e9cef018SMichael Ellerman 	u64 iamr;
603e9cef018SMichael Ellerman 	u64 amor;
604e9cef018SMichael Ellerman 	u64 uamor;
60510d91611SNicholas Piggin };
60610d91611SNicholas Piggin 
607*fae5c9f3SNicholas Piggin static unsigned long power9_idle_stop(unsigned long psscr)
60810d91611SNicholas Piggin {
60910d91611SNicholas Piggin 	int cpu = raw_smp_processor_id();
61010d91611SNicholas Piggin 	int first = cpu_first_thread_sibling(cpu);
61110d91611SNicholas Piggin 	unsigned long *state = &paca_ptrs[first]->idle_state;
61210d91611SNicholas Piggin 	unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
61310d91611SNicholas Piggin 	unsigned long srr1;
61410d91611SNicholas Piggin 	unsigned long pls;
61510d91611SNicholas Piggin 	unsigned long mmcr0 = 0;
6161cade527SAthira Rajeev 	unsigned long mmcra = 0;
61710d91611SNicholas Piggin 	struct p9_sprs sprs = {}; /* avoid false used-uninitialised */
61810d91611SNicholas Piggin 	bool sprs_saved = false;
61910d91611SNicholas Piggin 
62010d91611SNicholas Piggin 	if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
62110d91611SNicholas Piggin 		/* EC=ESL=0 case */
62210d91611SNicholas Piggin 
62310d91611SNicholas Piggin 		/*
62410d91611SNicholas Piggin 		 * Wake synchronously. SRESET via xscom may still cause
62510d91611SNicholas Piggin 		 * a 0x100 powersave wakeup with SRR1 reason!
62610d91611SNicholas Piggin 		 */
62710d91611SNicholas Piggin 		srr1 = isa300_idle_stop_noloss(psscr);		/* go idle */
62810d91611SNicholas Piggin 		if (likely(!srr1))
62910d91611SNicholas Piggin 			return 0;
63010d91611SNicholas Piggin 
63110d91611SNicholas Piggin 		/*
63210d91611SNicholas Piggin 		 * Registers not saved, can't recover!
63310d91611SNicholas Piggin 		 * This would be a hardware bug
63410d91611SNicholas Piggin 		 */
63510d91611SNicholas Piggin 		BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
63610d91611SNicholas Piggin 
63710d91611SNicholas Piggin 		goto out;
63810d91611SNicholas Piggin 	}
63910d91611SNicholas Piggin 
64010d91611SNicholas Piggin 	/* EC=ESL=1 case */
64110d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
64210d91611SNicholas Piggin 	if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG)) {
64310d91611SNicholas Piggin 		local_paca->requested_psscr = psscr;
64410d91611SNicholas Piggin 		/* order setting requested_psscr vs testing dont_stop */
64510d91611SNicholas Piggin 		smp_mb();
64610d91611SNicholas Piggin 		if (atomic_read(&local_paca->dont_stop)) {
64710d91611SNicholas Piggin 			local_paca->requested_psscr = 0;
64810d91611SNicholas Piggin 			return 0;
64910d91611SNicholas Piggin 		}
65010d91611SNicholas Piggin 	}
65110d91611SNicholas Piggin #endif
65210d91611SNicholas Piggin 
65310d91611SNicholas Piggin 	if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
65410d91611SNicholas Piggin 		 /*
65510d91611SNicholas Piggin 		  * POWER9 DD2 can incorrectly set PMAO when waking up
65610d91611SNicholas Piggin 		  * after a state-loss idle. Saving and restoring MMCR0
65710d91611SNicholas Piggin 		  * over idle is a workaround.
65810d91611SNicholas Piggin 		  */
65910d91611SNicholas Piggin 		mmcr0		= mfspr(SPRN_MMCR0);
66010d91611SNicholas Piggin 	}
6611cade527SAthira Rajeev 
662dcbbfa6bSPratik Rajesh Sampat 	if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) {
66310d91611SNicholas Piggin 		sprs.lpcr	= mfspr(SPRN_LPCR);
66410d91611SNicholas Piggin 		sprs.hfscr	= mfspr(SPRN_HFSCR);
66510d91611SNicholas Piggin 		sprs.fscr	= mfspr(SPRN_FSCR);
66610d91611SNicholas Piggin 		sprs.pid	= mfspr(SPRN_PID);
66710d91611SNicholas Piggin 		sprs.purr	= mfspr(SPRN_PURR);
66810d91611SNicholas Piggin 		sprs.spurr	= mfspr(SPRN_SPURR);
66910d91611SNicholas Piggin 		sprs.dscr	= mfspr(SPRN_DSCR);
67010d91611SNicholas Piggin 		sprs.wort	= mfspr(SPRN_WORT);
671250ad7a4SJordan Niethe 		sprs.ciabr	= mfspr(SPRN_CIABR);
67210d91611SNicholas Piggin 
67310d91611SNicholas Piggin 		sprs.mmcra	= mfspr(SPRN_MMCRA);
67410d91611SNicholas Piggin 		sprs.mmcr0	= mfspr(SPRN_MMCR0);
67510d91611SNicholas Piggin 		sprs.mmcr1	= mfspr(SPRN_MMCR1);
67610d91611SNicholas Piggin 		sprs.mmcr2	= mfspr(SPRN_MMCR2);
67710d91611SNicholas Piggin 
67810d91611SNicholas Piggin 		sprs.ptcr	= mfspr(SPRN_PTCR);
67910d91611SNicholas Piggin 		sprs.rpr	= mfspr(SPRN_RPR);
68010d91611SNicholas Piggin 		sprs.tscr	= mfspr(SPRN_TSCR);
681512a5a64SClaudio Carvalho 		if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
68210d91611SNicholas Piggin 			sprs.ldbar = mfspr(SPRN_LDBAR);
68310d91611SNicholas Piggin 
68410d91611SNicholas Piggin 		sprs_saved = true;
68510d91611SNicholas Piggin 
68610d91611SNicholas Piggin 		atomic_start_thread_idle();
68710d91611SNicholas Piggin 	}
68810d91611SNicholas Piggin 
689e9cef018SMichael Ellerman 	sprs.amr	= mfspr(SPRN_AMR);
690e9cef018SMichael Ellerman 	sprs.iamr	= mfspr(SPRN_IAMR);
691e9cef018SMichael Ellerman 	sprs.amor	= mfspr(SPRN_AMOR);
692e9cef018SMichael Ellerman 	sprs.uamor	= mfspr(SPRN_UAMOR);
693e9cef018SMichael Ellerman 
69410d91611SNicholas Piggin 	srr1 = isa300_idle_stop_mayloss(psscr);		/* go idle */
69510d91611SNicholas Piggin 
69610d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
69710d91611SNicholas Piggin 	local_paca->requested_psscr = 0;
69810d91611SNicholas Piggin #endif
69910d91611SNicholas Piggin 
70010d91611SNicholas Piggin 	psscr = mfspr(SPRN_PSSCR);
70110d91611SNicholas Piggin 
70210d91611SNicholas Piggin 	WARN_ON_ONCE(!srr1);
70310d91611SNicholas Piggin 	WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
70410d91611SNicholas Piggin 
70510d91611SNicholas Piggin 	if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
70610d91611SNicholas Piggin 		/*
707e9cef018SMichael Ellerman 		 * We don't need an isync after the mtsprs here because the
708e9cef018SMichael Ellerman 		 * upcoming mtmsrd is execution synchronizing.
709e9cef018SMichael Ellerman 		 */
710e9cef018SMichael Ellerman 		mtspr(SPRN_AMR,		sprs.amr);
711e9cef018SMichael Ellerman 		mtspr(SPRN_IAMR,	sprs.iamr);
712e9cef018SMichael Ellerman 		mtspr(SPRN_AMOR,	sprs.amor);
713e9cef018SMichael Ellerman 		mtspr(SPRN_UAMOR,	sprs.uamor);
714e9cef018SMichael Ellerman 
715e9cef018SMichael Ellerman 		/*
71610d91611SNicholas Piggin 		 * Workaround for POWER9 DD2.0, if we lost resources, the ERAT
71710d91611SNicholas Piggin 		 * might have been corrupted and needs flushing. We also need
71810d91611SNicholas Piggin 		 * to reload MMCR0 (see mmcr0 comment above).
71910d91611SNicholas Piggin 		 */
72010d91611SNicholas Piggin 		if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
721fe7946ceSNicholas Piggin 			asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT);
72210d91611SNicholas Piggin 			mtspr(SPRN_MMCR0, mmcr0);
72310d91611SNicholas Piggin 		}
72410d91611SNicholas Piggin 
72510d91611SNicholas Piggin 		/*
72610d91611SNicholas Piggin 		 * DD2.2 and earlier need to set then clear bit 60 in MMCRA
72710d91611SNicholas Piggin 		 * to ensure the PMU starts running.
72810d91611SNicholas Piggin 		 */
72910d91611SNicholas Piggin 		mmcra = mfspr(SPRN_MMCRA);
73010d91611SNicholas Piggin 		mmcra |= PPC_BIT(60);
73110d91611SNicholas Piggin 		mtspr(SPRN_MMCRA, mmcra);
73210d91611SNicholas Piggin 		mmcra &= ~PPC_BIT(60);
73310d91611SNicholas Piggin 		mtspr(SPRN_MMCRA, mmcra);
73410d91611SNicholas Piggin 	}
73510d91611SNicholas Piggin 
73610d91611SNicholas Piggin 	if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
73710d91611SNicholas Piggin 		hmi_exception_realmode(NULL);
73810d91611SNicholas Piggin 
73910d91611SNicholas Piggin 	/*
74010d91611SNicholas Piggin 	 * On POWER9, SRR1 bits do not match exactly as expected.
74110d91611SNicholas Piggin 	 * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
74210d91611SNicholas Piggin 	 * just always test PSSCR for SPR/TB state loss.
74310d91611SNicholas Piggin 	 */
74410d91611SNicholas Piggin 	pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
745dcbbfa6bSPratik Rajesh Sampat 	if (likely(pls < deep_spr_loss_state)) {
74610d91611SNicholas Piggin 		if (sprs_saved)
74710d91611SNicholas Piggin 			atomic_stop_thread_idle();
74810d91611SNicholas Piggin 		goto out;
74910d91611SNicholas Piggin 	}
75010d91611SNicholas Piggin 
75110d91611SNicholas Piggin 	/* HV state loss */
75210d91611SNicholas Piggin 	BUG_ON(!sprs_saved);
75310d91611SNicholas Piggin 
75410d91611SNicholas Piggin 	atomic_lock_thread_idle();
75510d91611SNicholas Piggin 
75610d91611SNicholas Piggin 	if ((*state & core_thread_mask) != 0)
75710d91611SNicholas Piggin 		goto core_woken;
75810d91611SNicholas Piggin 
75910d91611SNicholas Piggin 	/* Per-core SPRs */
76010d91611SNicholas Piggin 	mtspr(SPRN_PTCR,	sprs.ptcr);
76110d91611SNicholas Piggin 	mtspr(SPRN_RPR,		sprs.rpr);
76210d91611SNicholas Piggin 	mtspr(SPRN_TSCR,	sprs.tscr);
76310d91611SNicholas Piggin 
76410d91611SNicholas Piggin 	if (pls >= pnv_first_tb_loss_level) {
76510d91611SNicholas Piggin 		/* TB loss */
76610d91611SNicholas Piggin 		if (opal_resync_timebase() != OPAL_SUCCESS)
76710d91611SNicholas Piggin 			BUG();
76810d91611SNicholas Piggin 	}
76910d91611SNicholas Piggin 
77010d91611SNicholas Piggin 	/*
77110d91611SNicholas Piggin 	 * isync after restoring shared SPRs and before unlocking. Unlock
77210d91611SNicholas Piggin 	 * only contains hwsync which does not necessarily do the right
77310d91611SNicholas Piggin 	 * thing for SPRs.
77410d91611SNicholas Piggin 	 */
77510d91611SNicholas Piggin 	isync();
77610d91611SNicholas Piggin 
77710d91611SNicholas Piggin core_woken:
77810d91611SNicholas Piggin 	atomic_unlock_and_stop_thread_idle();
77910d91611SNicholas Piggin 
78010d91611SNicholas Piggin 	/* Per-thread SPRs */
78110d91611SNicholas Piggin 	mtspr(SPRN_LPCR,	sprs.lpcr);
78210d91611SNicholas Piggin 	mtspr(SPRN_HFSCR,	sprs.hfscr);
78310d91611SNicholas Piggin 	mtspr(SPRN_FSCR,	sprs.fscr);
78410d91611SNicholas Piggin 	mtspr(SPRN_PID,		sprs.pid);
78510d91611SNicholas Piggin 	mtspr(SPRN_PURR,	sprs.purr);
78610d91611SNicholas Piggin 	mtspr(SPRN_SPURR,	sprs.spurr);
78710d91611SNicholas Piggin 	mtspr(SPRN_DSCR,	sprs.dscr);
78810d91611SNicholas Piggin 	mtspr(SPRN_WORT,	sprs.wort);
789250ad7a4SJordan Niethe 	mtspr(SPRN_CIABR,	sprs.ciabr);
79010d91611SNicholas Piggin 
79110d91611SNicholas Piggin 	mtspr(SPRN_MMCRA,	sprs.mmcra);
79210d91611SNicholas Piggin 	mtspr(SPRN_MMCR0,	sprs.mmcr0);
79310d91611SNicholas Piggin 	mtspr(SPRN_MMCR1,	sprs.mmcr1);
79410d91611SNicholas Piggin 	mtspr(SPRN_MMCR2,	sprs.mmcr2);
795512a5a64SClaudio Carvalho 	if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
796f5a9e488SAthira Rajeev 		mtspr(SPRN_LDBAR, sprs.ldbar);
79710d91611SNicholas Piggin 
79810d91611SNicholas Piggin 	mtspr(SPRN_SPRG3,	local_paca->sprg_vdso);
79910d91611SNicholas Piggin 
80010d91611SNicholas Piggin 	if (!radix_enabled())
80110d91611SNicholas Piggin 		__slb_restore_bolted_realmode();
80210d91611SNicholas Piggin 
80310d91611SNicholas Piggin out:
80410d91611SNicholas Piggin 	mtmsr(MSR_KERNEL);
80510d91611SNicholas Piggin 
80610d91611SNicholas Piggin 	return srr1;
80710d91611SNicholas Piggin }
80810d91611SNicholas Piggin 
8097672691aSPaul Mackerras #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
8107672691aSPaul Mackerras /*
8117672691aSPaul Mackerras  * This is used in working around bugs in thread reconfiguration
8127672691aSPaul Mackerras  * on POWER9 (at least up to Nimbus DD2.2) relating to transactional
8137672691aSPaul Mackerras  * memory and the way that XER[SO] is checkpointed.
8147672691aSPaul Mackerras  * This function forces the core into SMT4 in order by asking
8157672691aSPaul Mackerras  * all other threads not to stop, and sending a message to any
8167672691aSPaul Mackerras  * that are in a stop state.
8177672691aSPaul Mackerras  * Must be called with preemption disabled.
8187672691aSPaul Mackerras  */
8197672691aSPaul Mackerras void pnv_power9_force_smt4_catch(void)
8207672691aSPaul Mackerras {
8217672691aSPaul Mackerras 	int cpu, cpu0, thr;
8227672691aSPaul Mackerras 	int awake_threads = 1;		/* this thread is awake */
8237672691aSPaul Mackerras 	int poke_threads = 0;
8247672691aSPaul Mackerras 	int need_awake = threads_per_core;
8257672691aSPaul Mackerras 
8267672691aSPaul Mackerras 	cpu = smp_processor_id();
8277672691aSPaul Mackerras 	cpu0 = cpu & ~(threads_per_core - 1);
8287672691aSPaul Mackerras 	for (thr = 0; thr < threads_per_core; ++thr) {
8297672691aSPaul Mackerras 		if (cpu != cpu0 + thr)
830f437c517SMichael Ellerman 			atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop);
8317672691aSPaul Mackerras 	}
8327672691aSPaul Mackerras 	/* order setting dont_stop vs testing requested_psscr */
83310d91611SNicholas Piggin 	smp_mb();
8347672691aSPaul Mackerras 	for (thr = 0; thr < threads_per_core; ++thr) {
835f437c517SMichael Ellerman 		if (!paca_ptrs[cpu0+thr]->requested_psscr)
8367672691aSPaul Mackerras 			++awake_threads;
8377672691aSPaul Mackerras 		else
8387672691aSPaul Mackerras 			poke_threads |= (1 << thr);
8397672691aSPaul Mackerras 	}
8407672691aSPaul Mackerras 
8417672691aSPaul Mackerras 	/* If at least 3 threads are awake, the core is in SMT4 already */
8427672691aSPaul Mackerras 	if (awake_threads < need_awake) {
8437672691aSPaul Mackerras 		/* We have to wake some threads; we'll use msgsnd */
8447672691aSPaul Mackerras 		for (thr = 0; thr < threads_per_core; ++thr) {
8457672691aSPaul Mackerras 			if (poke_threads & (1 << thr)) {
8467672691aSPaul Mackerras 				ppc_msgsnd_sync();
8477672691aSPaul Mackerras 				ppc_msgsnd(PPC_DBELL_MSGTYPE, 0,
848f437c517SMichael Ellerman 					   paca_ptrs[cpu0+thr]->hw_cpu_id);
8497672691aSPaul Mackerras 			}
8507672691aSPaul Mackerras 		}
8517672691aSPaul Mackerras 		/* now spin until at least 3 threads are awake */
8527672691aSPaul Mackerras 		do {
8537672691aSPaul Mackerras 			for (thr = 0; thr < threads_per_core; ++thr) {
8547672691aSPaul Mackerras 				if ((poke_threads & (1 << thr)) &&
855f437c517SMichael Ellerman 				    !paca_ptrs[cpu0+thr]->requested_psscr) {
8567672691aSPaul Mackerras 					++awake_threads;
8577672691aSPaul Mackerras 					poke_threads &= ~(1 << thr);
8587672691aSPaul Mackerras 				}
8597672691aSPaul Mackerras 			}
8607672691aSPaul Mackerras 		} while (awake_threads < need_awake);
8617672691aSPaul Mackerras 	}
8627672691aSPaul Mackerras }
8637672691aSPaul Mackerras EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch);
8647672691aSPaul Mackerras 
8657672691aSPaul Mackerras void pnv_power9_force_smt4_release(void)
8667672691aSPaul Mackerras {
8677672691aSPaul Mackerras 	int cpu, cpu0, thr;
8687672691aSPaul Mackerras 
8697672691aSPaul Mackerras 	cpu = smp_processor_id();
8707672691aSPaul Mackerras 	cpu0 = cpu & ~(threads_per_core - 1);
8717672691aSPaul Mackerras 
8727672691aSPaul Mackerras 	/* clear all the dont_stop flags */
8737672691aSPaul Mackerras 	for (thr = 0; thr < threads_per_core; ++thr) {
8747672691aSPaul Mackerras 		if (cpu != cpu0 + thr)
875f437c517SMichael Ellerman 			atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop);
8767672691aSPaul Mackerras 	}
8777672691aSPaul Mackerras }
8787672691aSPaul Mackerras EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
8797672691aSPaul Mackerras #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
8807672691aSPaul Mackerras 
881ffd2961bSNicholas Piggin struct p10_sprs {
882ffd2961bSNicholas Piggin 	/*
883ffd2961bSNicholas Piggin 	 * SPRs that get lost in shallow states:
884ffd2961bSNicholas Piggin 	 *
885ffd2961bSNicholas Piggin 	 * P10 loses CR, LR, CTR, FPSCR, VSCR, XER, TAR, SPRG2, and HSPRG1
886ffd2961bSNicholas Piggin 	 * isa300 idle routines restore CR, LR.
887ffd2961bSNicholas Piggin 	 * CTR is volatile
888ffd2961bSNicholas Piggin 	 * idle thread doesn't use FP or VEC
889ffd2961bSNicholas Piggin 	 * kernel doesn't use TAR
890ffd2961bSNicholas Piggin 	 * HSPRG1 is only live in HV interrupt entry
891ffd2961bSNicholas Piggin 	 * SPRG2 is only live in KVM guests, KVM handles it.
892ffd2961bSNicholas Piggin 	 */
893ffd2961bSNicholas Piggin };
894ffd2961bSNicholas Piggin 
895*fae5c9f3SNicholas Piggin static unsigned long power10_idle_stop(unsigned long psscr)
896ffd2961bSNicholas Piggin {
897ffd2961bSNicholas Piggin 	int cpu = raw_smp_processor_id();
898ffd2961bSNicholas Piggin 	int first = cpu_first_thread_sibling(cpu);
899ffd2961bSNicholas Piggin 	unsigned long *state = &paca_ptrs[first]->idle_state;
900ffd2961bSNicholas Piggin 	unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
901ffd2961bSNicholas Piggin 	unsigned long srr1;
902ffd2961bSNicholas Piggin 	unsigned long pls;
903ffd2961bSNicholas Piggin //	struct p10_sprs sprs = {}; /* avoid false used-uninitialised */
904ffd2961bSNicholas Piggin 	bool sprs_saved = false;
905ffd2961bSNicholas Piggin 
906ffd2961bSNicholas Piggin 	if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
907ffd2961bSNicholas Piggin 		/* EC=ESL=0 case */
908ffd2961bSNicholas Piggin 
909ffd2961bSNicholas Piggin 		/*
910ffd2961bSNicholas Piggin 		 * Wake synchronously. SRESET via xscom may still cause
911ffd2961bSNicholas Piggin 		 * a 0x100 powersave wakeup with SRR1 reason!
912ffd2961bSNicholas Piggin 		 */
913ffd2961bSNicholas Piggin 		srr1 = isa300_idle_stop_noloss(psscr);		/* go idle */
914ffd2961bSNicholas Piggin 		if (likely(!srr1))
915ffd2961bSNicholas Piggin 			return 0;
916ffd2961bSNicholas Piggin 
917ffd2961bSNicholas Piggin 		/*
918ffd2961bSNicholas Piggin 		 * Registers not saved, can't recover!
919ffd2961bSNicholas Piggin 		 * This would be a hardware bug
920ffd2961bSNicholas Piggin 		 */
921ffd2961bSNicholas Piggin 		BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
922ffd2961bSNicholas Piggin 
923ffd2961bSNicholas Piggin 		goto out;
924ffd2961bSNicholas Piggin 	}
925ffd2961bSNicholas Piggin 
926ffd2961bSNicholas Piggin 	/* EC=ESL=1 case */
927ffd2961bSNicholas Piggin 	if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) {
928ffd2961bSNicholas Piggin 		/* XXX: save SPRs for deep state loss here. */
929ffd2961bSNicholas Piggin 
930ffd2961bSNicholas Piggin 		sprs_saved = true;
931ffd2961bSNicholas Piggin 
932ffd2961bSNicholas Piggin 		atomic_start_thread_idle();
933ffd2961bSNicholas Piggin 	}
934ffd2961bSNicholas Piggin 
935ffd2961bSNicholas Piggin 	srr1 = isa300_idle_stop_mayloss(psscr);		/* go idle */
936ffd2961bSNicholas Piggin 
937ffd2961bSNicholas Piggin 	psscr = mfspr(SPRN_PSSCR);
938ffd2961bSNicholas Piggin 
939ffd2961bSNicholas Piggin 	WARN_ON_ONCE(!srr1);
940ffd2961bSNicholas Piggin 	WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
941ffd2961bSNicholas Piggin 
942ffd2961bSNicholas Piggin 	if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
943ffd2961bSNicholas Piggin 		hmi_exception_realmode(NULL);
944ffd2961bSNicholas Piggin 
945ffd2961bSNicholas Piggin 	/*
946ffd2961bSNicholas Piggin 	 * On POWER10, SRR1 bits do not match exactly as expected.
947ffd2961bSNicholas Piggin 	 * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
948ffd2961bSNicholas Piggin 	 * just always test PSSCR for SPR/TB state loss.
949ffd2961bSNicholas Piggin 	 */
950ffd2961bSNicholas Piggin 	pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
951ffd2961bSNicholas Piggin 	if (likely(pls < deep_spr_loss_state)) {
952ffd2961bSNicholas Piggin 		if (sprs_saved)
953ffd2961bSNicholas Piggin 			atomic_stop_thread_idle();
954ffd2961bSNicholas Piggin 		goto out;
955ffd2961bSNicholas Piggin 	}
956ffd2961bSNicholas Piggin 
957ffd2961bSNicholas Piggin 	/* HV state loss */
958ffd2961bSNicholas Piggin 	BUG_ON(!sprs_saved);
959ffd2961bSNicholas Piggin 
960ffd2961bSNicholas Piggin 	atomic_lock_thread_idle();
961ffd2961bSNicholas Piggin 
962ffd2961bSNicholas Piggin 	if ((*state & core_thread_mask) != 0)
963ffd2961bSNicholas Piggin 		goto core_woken;
964ffd2961bSNicholas Piggin 
965ffd2961bSNicholas Piggin 	/* XXX: restore per-core SPRs here */
966ffd2961bSNicholas Piggin 
967ffd2961bSNicholas Piggin 	if (pls >= pnv_first_tb_loss_level) {
968ffd2961bSNicholas Piggin 		/* TB loss */
969ffd2961bSNicholas Piggin 		if (opal_resync_timebase() != OPAL_SUCCESS)
970ffd2961bSNicholas Piggin 			BUG();
971ffd2961bSNicholas Piggin 	}
972ffd2961bSNicholas Piggin 
973ffd2961bSNicholas Piggin 	/*
974ffd2961bSNicholas Piggin 	 * isync after restoring shared SPRs and before unlocking. Unlock
975ffd2961bSNicholas Piggin 	 * only contains hwsync which does not necessarily do the right
976ffd2961bSNicholas Piggin 	 * thing for SPRs.
977ffd2961bSNicholas Piggin 	 */
978ffd2961bSNicholas Piggin 	isync();
979ffd2961bSNicholas Piggin 
980ffd2961bSNicholas Piggin core_woken:
981ffd2961bSNicholas Piggin 	atomic_unlock_and_stop_thread_idle();
982ffd2961bSNicholas Piggin 
983ffd2961bSNicholas Piggin 	/* XXX: restore per-thread SPRs here */
984ffd2961bSNicholas Piggin 
985ffd2961bSNicholas Piggin 	if (!radix_enabled())
986ffd2961bSNicholas Piggin 		__slb_restore_bolted_realmode();
987ffd2961bSNicholas Piggin 
988ffd2961bSNicholas Piggin out:
989ffd2961bSNicholas Piggin 	mtmsr(MSR_KERNEL);
990ffd2961bSNicholas Piggin 
991ffd2961bSNicholas Piggin 	return srr1;
992ffd2961bSNicholas Piggin }
993ffd2961bSNicholas Piggin 
994ffd2961bSNicholas Piggin #ifdef CONFIG_HOTPLUG_CPU
995ffd2961bSNicholas Piggin static unsigned long arch300_offline_stop(unsigned long psscr)
996ffd2961bSNicholas Piggin {
997ffd2961bSNicholas Piggin 	unsigned long srr1;
998ffd2961bSNicholas Piggin 
999ffd2961bSNicholas Piggin 	if (cpu_has_feature(CPU_FTR_ARCH_31))
1000*fae5c9f3SNicholas Piggin 		srr1 = power10_idle_stop(psscr);
1001ffd2961bSNicholas Piggin 	else
1002*fae5c9f3SNicholas Piggin 		srr1 = power9_idle_stop(psscr);
1003ffd2961bSNicholas Piggin 
1004ffd2961bSNicholas Piggin 	return srr1;
1005ffd2961bSNicholas Piggin }
1006ffd2961bSNicholas Piggin #endif
1007ffd2961bSNicholas Piggin 
1008ffd2961bSNicholas Piggin void arch300_idle_type(unsigned long stop_psscr_val,
1009ffd2961bSNicholas Piggin 				      unsigned long stop_psscr_mask)
1010ffd2961bSNicholas Piggin {
1011ffd2961bSNicholas Piggin 	unsigned long psscr;
1012ffd2961bSNicholas Piggin 	unsigned long srr1;
1013ffd2961bSNicholas Piggin 
1014ffd2961bSNicholas Piggin 	if (!prep_irq_for_idle_irqsoff())
1015ffd2961bSNicholas Piggin 		return;
1016ffd2961bSNicholas Piggin 
1017ffd2961bSNicholas Piggin 	psscr = mfspr(SPRN_PSSCR);
1018ffd2961bSNicholas Piggin 	psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
1019ffd2961bSNicholas Piggin 
1020ffd2961bSNicholas Piggin 	__ppc64_runlatch_off();
1021ffd2961bSNicholas Piggin 	if (cpu_has_feature(CPU_FTR_ARCH_31))
1022*fae5c9f3SNicholas Piggin 		srr1 = power10_idle_stop(psscr);
1023ffd2961bSNicholas Piggin 	else
1024*fae5c9f3SNicholas Piggin 		srr1 = power9_idle_stop(psscr);
1025ffd2961bSNicholas Piggin 	__ppc64_runlatch_on();
1026ffd2961bSNicholas Piggin 
1027ffd2961bSNicholas Piggin 	fini_irq_for_idle_irqsoff();
1028ffd2961bSNicholas Piggin 
1029ffd2961bSNicholas Piggin 	irq_set_pending_from_srr1(srr1);
1030ffd2961bSNicholas Piggin }
1031ffd2961bSNicholas Piggin 
1032ffd2961bSNicholas Piggin /*
1033ffd2961bSNicholas Piggin  * Used for ppc_md.power_save which needs a function with no parameters
1034ffd2961bSNicholas Piggin  */
1035ffd2961bSNicholas Piggin static void arch300_idle(void)
1036ffd2961bSNicholas Piggin {
1037ffd2961bSNicholas Piggin 	arch300_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
1038ffd2961bSNicholas Piggin }
1039ffd2961bSNicholas Piggin 
104067d20418SNicholas Piggin #ifdef CONFIG_HOTPLUG_CPU
104119f8a5b5SPaul Mackerras 
104219f8a5b5SPaul Mackerras void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
104324be85a2SGautham R. Shenoy {
104424be85a2SGautham R. Shenoy 	u64 pir = get_hard_smp_processor_id(cpu);
104524be85a2SGautham R. Shenoy 
104624be85a2SGautham R. Shenoy 	mtspr(SPRN_LPCR, lpcr_val);
10475d298baaSGautham R. Shenoy 
10485d298baaSGautham R. Shenoy 	/*
10495d298baaSGautham R. Shenoy 	 * Program the LPCR via stop-api only if the deepest stop state
10505d298baaSGautham R. Shenoy 	 * can lose hypervisor context.
10515d298baaSGautham R. Shenoy 	 */
10525d298baaSGautham R. Shenoy 	if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
105324be85a2SGautham R. Shenoy 		opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
105424be85a2SGautham R. Shenoy }
105524be85a2SGautham R. Shenoy 
1056c0691f9dSShreyas B. Prabhu /*
1057a7cd88daSGautham R. Shenoy  * pnv_cpu_offline: A function that puts the CPU into the deepest
1058a7cd88daSGautham R. Shenoy  * available platform idle state on a CPU-Offline.
10592525db04SNicholas Piggin  * interrupts hard disabled and no lazy irq pending.
1060a7cd88daSGautham R. Shenoy  */
1061a7cd88daSGautham R. Shenoy unsigned long pnv_cpu_offline(unsigned int cpu)
1062a7cd88daSGautham R. Shenoy {
1063a7cd88daSGautham R. Shenoy 	unsigned long srr1;
1064a7cd88daSGautham R. Shenoy 
106540d24343SNicholas Piggin 	__ppc64_runlatch_off();
10662525db04SNicholas Piggin 
1067f3b3f284SGautham R. Shenoy 	if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
10682525db04SNicholas Piggin 		unsigned long psscr;
10692525db04SNicholas Piggin 
10702525db04SNicholas Piggin 		psscr = mfspr(SPRN_PSSCR);
10712525db04SNicholas Piggin 		psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
10722525db04SNicholas Piggin 						pnv_deepest_stop_psscr_val;
1073ffd2961bSNicholas Piggin 		srr1 = arch300_offline_stop(psscr);
107410d91611SNicholas Piggin 	} else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) {
107510d91611SNicholas Piggin 		srr1 = power7_offline();
107690061231SGautham R. Shenoy 	} else {
107790061231SGautham R. Shenoy 		/* This is the fallback method. We emulate snooze */
107890061231SGautham R. Shenoy 		while (!generic_check_cpu_restart(cpu)) {
107990061231SGautham R. Shenoy 			HMT_low();
108090061231SGautham R. Shenoy 			HMT_very_low();
108190061231SGautham R. Shenoy 		}
108290061231SGautham R. Shenoy 		srr1 = 0;
108390061231SGautham R. Shenoy 		HMT_medium();
1084a7cd88daSGautham R. Shenoy 	}
1085a7cd88daSGautham R. Shenoy 
108640d24343SNicholas Piggin 	__ppc64_runlatch_on();
10872525db04SNicholas Piggin 
1088a7cd88daSGautham R. Shenoy 	return srr1;
1089a7cd88daSGautham R. Shenoy }
109067d20418SNicholas Piggin #endif
1091a7cd88daSGautham R. Shenoy 
1092a7cd88daSGautham R. Shenoy /*
1093bcef83a0SShreyas B. Prabhu  * Power ISA 3.0 idle initialization.
1094bcef83a0SShreyas B. Prabhu  *
1095bcef83a0SShreyas B. Prabhu  * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
1096bcef83a0SShreyas B. Prabhu  * Register (PSSCR) to control idle behavior.
1097bcef83a0SShreyas B. Prabhu  *
1098bcef83a0SShreyas B. Prabhu  * PSSCR layout:
1099bcef83a0SShreyas B. Prabhu  * ----------------------------------------------------------
1100bcef83a0SShreyas B. Prabhu  * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
1101bcef83a0SShreyas B. Prabhu  * ----------------------------------------------------------
1102bcef83a0SShreyas B. Prabhu  * 0      4     41   42    43   44     48    54   56    60
1103bcef83a0SShreyas B. Prabhu  *
1104bcef83a0SShreyas B. Prabhu  * PSSCR key fields:
1105bcef83a0SShreyas B. Prabhu  *	Bits 0:3  - Power-Saving Level Status (PLS). This field indicates the
1106bcef83a0SShreyas B. Prabhu  *	lowest power-saving state the thread entered since stop instruction was
1107bcef83a0SShreyas B. Prabhu  *	last executed.
1108bcef83a0SShreyas B. Prabhu  *
1109bcef83a0SShreyas B. Prabhu  *	Bit 41 - Status Disable(SD)
1110bcef83a0SShreyas B. Prabhu  *	0 - Shows PLS entries
1111bcef83a0SShreyas B. Prabhu  *	1 - PLS entries are all 0
1112bcef83a0SShreyas B. Prabhu  *
1113bcef83a0SShreyas B. Prabhu  *	Bit 42 - Enable State Loss
1114bcef83a0SShreyas B. Prabhu  *	0 - No state is lost irrespective of other fields
1115bcef83a0SShreyas B. Prabhu  *	1 - Allows state loss
1116bcef83a0SShreyas B. Prabhu  *
1117bcef83a0SShreyas B. Prabhu  *	Bit 43 - Exit Criterion
1118bcef83a0SShreyas B. Prabhu  *	0 - Exit from power-save mode on any interrupt
1119bcef83a0SShreyas B. Prabhu  *	1 - Exit from power-save mode controlled by LPCR's PECE bits
1120bcef83a0SShreyas B. Prabhu  *
1121bcef83a0SShreyas B. Prabhu  *	Bits 44:47 - Power-Saving Level Limit
1122bcef83a0SShreyas B. Prabhu  *	This limits the power-saving level that can be entered into.
1123bcef83a0SShreyas B. Prabhu  *
1124bcef83a0SShreyas B. Prabhu  *	Bits 60:63 - Requested Level
1125bcef83a0SShreyas B. Prabhu  *	Used to specify which power-saving level must be entered on executing
1126bcef83a0SShreyas B. Prabhu  *	stop instruction
112709206b60SGautham R. Shenoy  */
112809206b60SGautham R. Shenoy 
112909206b60SGautham R. Shenoy int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
113009206b60SGautham R. Shenoy {
113109206b60SGautham R. Shenoy 	int err = 0;
113209206b60SGautham R. Shenoy 
113309206b60SGautham R. Shenoy 	/*
113409206b60SGautham R. Shenoy 	 * psscr_mask == 0xf indicates an older firmware.
113509206b60SGautham R. Shenoy 	 * Set remaining fields of psscr to the default values.
113609206b60SGautham R. Shenoy 	 * See NOTE above definition of PSSCR_HV_DEFAULT_VAL
113709206b60SGautham R. Shenoy 	 */
113809206b60SGautham R. Shenoy 	if (*psscr_mask == 0xf) {
113909206b60SGautham R. Shenoy 		*psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL;
114009206b60SGautham R. Shenoy 		*psscr_mask = PSSCR_HV_DEFAULT_MASK;
114109206b60SGautham R. Shenoy 		return err;
114209206b60SGautham R. Shenoy 	}
114309206b60SGautham R. Shenoy 
114409206b60SGautham R. Shenoy 	/*
114509206b60SGautham R. Shenoy 	 * New firmware is expected to set the psscr_val bits correctly.
114609206b60SGautham R. Shenoy 	 * Validate that the following invariants are correctly maintained by
114709206b60SGautham R. Shenoy 	 * the new firmware.
114809206b60SGautham R. Shenoy 	 * - ESL bit value matches the EC bit value.
114909206b60SGautham R. Shenoy 	 * - ESL bit is set for all the deep stop states.
115009206b60SGautham R. Shenoy 	 */
115109206b60SGautham R. Shenoy 	if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) {
115209206b60SGautham R. Shenoy 		err = ERR_EC_ESL_MISMATCH;
115309206b60SGautham R. Shenoy 	} else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
115409206b60SGautham R. Shenoy 		GET_PSSCR_ESL(*psscr_val) == 0) {
115509206b60SGautham R. Shenoy 		err = ERR_DEEP_STATE_ESL_MISMATCH;
115609206b60SGautham R. Shenoy 	}
115709206b60SGautham R. Shenoy 
115809206b60SGautham R. Shenoy 	return err;
115909206b60SGautham R. Shenoy }
116009206b60SGautham R. Shenoy 
116109206b60SGautham R. Shenoy /*
116209206b60SGautham R. Shenoy  * pnv_arch300_idle_init: Initializes the default idle state, first
116309206b60SGautham R. Shenoy  *                        deep idle state and deepest idle state on
116409206b60SGautham R. Shenoy  *                        ISA 3.0 CPUs.
1165bcef83a0SShreyas B. Prabhu  *
1166bcef83a0SShreyas B. Prabhu  * @np: /ibm,opal/power-mgt device node
1167bcef83a0SShreyas B. Prabhu  * @flags: cpu-idle-state-flags array
1168bcef83a0SShreyas B. Prabhu  * @dt_idle_states: Number of idle state entries
1169bcef83a0SShreyas B. Prabhu  * Returns 0 on success
1170bcef83a0SShreyas B. Prabhu  */
1171ffd2961bSNicholas Piggin static void __init pnv_arch300_idle_init(void)
1172bcef83a0SShreyas B. Prabhu {
117309206b60SGautham R. Shenoy 	u64 max_residency_ns = 0;
11749c7b185aSAkshay Adiga 	int i;
1175bcef83a0SShreyas B. Prabhu 
1176ffd2961bSNicholas Piggin 	/* stop is not really architected, we only have p9,p10 drivers */
1177ffd2961bSNicholas Piggin 	if (!pvr_version_is(PVR_POWER10) && !pvr_version_is(PVR_POWER9))
1178ffd2961bSNicholas Piggin 		return;
1179ffd2961bSNicholas Piggin 
1180bcef83a0SShreyas B. Prabhu 	/*
118109206b60SGautham R. Shenoy 	 * pnv_deepest_stop_{val,mask} should be set to values corresponding to
118209206b60SGautham R. Shenoy 	 * the deepest stop state.
118309206b60SGautham R. Shenoy 	 *
118409206b60SGautham R. Shenoy 	 * pnv_default_stop_{val,mask} should be set to values corresponding to
118510d91611SNicholas Piggin 	 * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state.
1186bcef83a0SShreyas B. Prabhu 	 */
118710d91611SNicholas Piggin 	pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
1188dcbbfa6bSPratik Rajesh Sampat 	deep_spr_loss_state = MAX_STOP_STATE + 1;
11899c7b185aSAkshay Adiga 	for (i = 0; i < nr_pnv_idle_states; i++) {
119009206b60SGautham R. Shenoy 		int err;
11919c7b185aSAkshay Adiga 		struct pnv_idle_states_t *state = &pnv_idle_states[i];
11929c7b185aSAkshay Adiga 		u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK;
1193bcef83a0SShreyas B. Prabhu 
1194ffd2961bSNicholas Piggin 		/* No deep loss driver implemented for POWER10 yet */
1195ffd2961bSNicholas Piggin 		if (pvr_version_is(PVR_POWER10) &&
1196ffd2961bSNicholas Piggin 				state->flags & (OPAL_PM_TIMEBASE_STOP|OPAL_PM_LOSE_FULL_CONTEXT))
1197ffd2961bSNicholas Piggin 			continue;
1198ffd2961bSNicholas Piggin 
119910d91611SNicholas Piggin 		if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
120010d91611SNicholas Piggin 		     (pnv_first_tb_loss_level > psscr_rl))
120110d91611SNicholas Piggin 			pnv_first_tb_loss_level = psscr_rl;
120210d91611SNicholas Piggin 
12039c7b185aSAkshay Adiga 		if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
1204dcbbfa6bSPratik Rajesh Sampat 		     (deep_spr_loss_state > psscr_rl))
1205dcbbfa6bSPratik Rajesh Sampat 			deep_spr_loss_state = psscr_rl;
120610d91611SNicholas Piggin 
120710d91611SNicholas Piggin 		/*
120810d91611SNicholas Piggin 		 * The idle code does not deal with TB loss occurring
120910d91611SNicholas Piggin 		 * in a shallower state than SPR loss, so force it to
121010d91611SNicholas Piggin 		 * behave like SPRs are lost if TB is lost. POWER9 would
121110d91611SNicholas Piggin 		 * never encouter this, but a POWER8 core would if it
121210d91611SNicholas Piggin 		 * implemented the stop instruction. So this is for forward
121310d91611SNicholas Piggin 		 * compatibility.
121410d91611SNicholas Piggin 		 */
121510d91611SNicholas Piggin 		if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
1216dcbbfa6bSPratik Rajesh Sampat 		     (deep_spr_loss_state > psscr_rl))
1217dcbbfa6bSPratik Rajesh Sampat 			deep_spr_loss_state = psscr_rl;
1218c0691f9dSShreyas B. Prabhu 
12199c7b185aSAkshay Adiga 		err = validate_psscr_val_mask(&state->psscr_val,
12209c7b185aSAkshay Adiga 					      &state->psscr_mask,
12219c7b185aSAkshay Adiga 					      state->flags);
122209206b60SGautham R. Shenoy 		if (err) {
12239c7b185aSAkshay Adiga 			report_invalid_psscr_val(state->psscr_val, err);
122409206b60SGautham R. Shenoy 			continue;
122509206b60SGautham R. Shenoy 		}
122609206b60SGautham R. Shenoy 
12273127692dSNicholas Piggin 		state->valid = true;
12283127692dSNicholas Piggin 
12299c7b185aSAkshay Adiga 		if (max_residency_ns < state->residency_ns) {
12309c7b185aSAkshay Adiga 			max_residency_ns = state->residency_ns;
12319c7b185aSAkshay Adiga 			pnv_deepest_stop_psscr_val = state->psscr_val;
12329c7b185aSAkshay Adiga 			pnv_deepest_stop_psscr_mask = state->psscr_mask;
12339c7b185aSAkshay Adiga 			pnv_deepest_stop_flag = state->flags;
123409206b60SGautham R. Shenoy 			deepest_stop_found = true;
123509206b60SGautham R. Shenoy 		}
123609206b60SGautham R. Shenoy 
123709206b60SGautham R. Shenoy 		if (!default_stop_found &&
12389c7b185aSAkshay Adiga 		    (state->flags & OPAL_PM_STOP_INST_FAST)) {
12399c7b185aSAkshay Adiga 			pnv_default_stop_val = state->psscr_val;
12409c7b185aSAkshay Adiga 			pnv_default_stop_mask = state->psscr_mask;
124109206b60SGautham R. Shenoy 			default_stop_found = true;
124210d91611SNicholas Piggin 			WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT);
124309206b60SGautham R. Shenoy 		}
124409206b60SGautham R. Shenoy 	}
124509206b60SGautham R. Shenoy 
1246f3b3f284SGautham R. Shenoy 	if (unlikely(!default_stop_found)) {
1247f3b3f284SGautham R. Shenoy 		pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n");
1248f3b3f284SGautham R. Shenoy 	} else {
1249ffd2961bSNicholas Piggin 		ppc_md.power_save = arch300_idle;
1250f3b3f284SGautham R. Shenoy 		pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n",
125109206b60SGautham R. Shenoy 			pnv_default_stop_val, pnv_default_stop_mask);
125209206b60SGautham R. Shenoy 	}
125309206b60SGautham R. Shenoy 
1254f3b3f284SGautham R. Shenoy 	if (unlikely(!deepest_stop_found)) {
1255f3b3f284SGautham R. Shenoy 		pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait");
1256f3b3f284SGautham R. Shenoy 	} else {
1257f3b3f284SGautham R. Shenoy 		pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n",
125809206b60SGautham R. Shenoy 			pnv_deepest_stop_psscr_val,
125909206b60SGautham R. Shenoy 			pnv_deepest_stop_psscr_mask);
1260bcef83a0SShreyas B. Prabhu 	}
1261bcef83a0SShreyas B. Prabhu 
126287997471SShaokun Zhang 	pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n",
1263dcbbfa6bSPratik Rajesh Sampat 		deep_spr_loss_state);
12649c7b185aSAkshay Adiga 
126587997471SShaokun Zhang 	pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n",
126610d91611SNicholas Piggin 		pnv_first_tb_loss_level);
126710d91611SNicholas Piggin }
126810d91611SNicholas Piggin 
126910d91611SNicholas Piggin static void __init pnv_disable_deep_states(void)
127010d91611SNicholas Piggin {
127110d91611SNicholas Piggin 	/*
127210d91611SNicholas Piggin 	 * The stop-api is unable to restore hypervisor
127310d91611SNicholas Piggin 	 * resources on wakeup from platform idle states which
127410d91611SNicholas Piggin 	 * lose full context. So disable such states.
127510d91611SNicholas Piggin 	 */
127610d91611SNicholas Piggin 	supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
127710d91611SNicholas Piggin 	pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
127810d91611SNicholas Piggin 	pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
127910d91611SNicholas Piggin 
128010d91611SNicholas Piggin 	if (cpu_has_feature(CPU_FTR_ARCH_300) &&
128110d91611SNicholas Piggin 	    (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
128210d91611SNicholas Piggin 		/*
128310d91611SNicholas Piggin 		 * Use the default stop state for CPU-Hotplug
128410d91611SNicholas Piggin 		 * if available.
128510d91611SNicholas Piggin 		 */
128610d91611SNicholas Piggin 		if (default_stop_found) {
128710d91611SNicholas Piggin 			pnv_deepest_stop_psscr_val = pnv_default_stop_val;
128810d91611SNicholas Piggin 			pnv_deepest_stop_psscr_mask = pnv_default_stop_mask;
128910d91611SNicholas Piggin 			pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
129010d91611SNicholas Piggin 				pnv_deepest_stop_psscr_val);
129110d91611SNicholas Piggin 		} else { /* Fallback to snooze loop for CPU-Hotplug */
129210d91611SNicholas Piggin 			deepest_stop_found = false;
129310d91611SNicholas Piggin 			pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
129410d91611SNicholas Piggin 		}
129510d91611SNicholas Piggin 	}
1296bcef83a0SShreyas B. Prabhu }
1297bcef83a0SShreyas B. Prabhu 
1298bcef83a0SShreyas B. Prabhu /*
1299bcef83a0SShreyas B. Prabhu  * Probe device tree for supported idle states
1300bcef83a0SShreyas B. Prabhu  */
1301bcef83a0SShreyas B. Prabhu static void __init pnv_probe_idle_states(void)
1302bcef83a0SShreyas B. Prabhu {
1303d405a98cSShreyas B. Prabhu 	int i;
1304d405a98cSShreyas B. Prabhu 
13059c7b185aSAkshay Adiga 	if (nr_pnv_idle_states < 0) {
13069c7b185aSAkshay Adiga 		pr_warn("cpuidle-powernv: no idle states found in the DT\n");
13079c7b185aSAkshay Adiga 		return;
13089c7b185aSAkshay Adiga 	}
13099c7b185aSAkshay Adiga 
131016d83a54SPratik Rajesh Sampat 	if (cpu_has_feature(CPU_FTR_ARCH_300))
1311ffd2961bSNicholas Piggin 		pnv_arch300_idle_init();
13129c7b185aSAkshay Adiga 
13139c7b185aSAkshay Adiga 	for (i = 0; i < nr_pnv_idle_states; i++)
13149c7b185aSAkshay Adiga 		supported_cpuidle_states |= pnv_idle_states[i].flags;
13159c7b185aSAkshay Adiga }
13169c7b185aSAkshay Adiga 
13179c7b185aSAkshay Adiga /*
13189c7b185aSAkshay Adiga  * This function parses device-tree and populates all the information
13199c7b185aSAkshay Adiga  * into pnv_idle_states structure. It also sets up nr_pnv_idle_states
13209c7b185aSAkshay Adiga  * which is the number of cpuidle states discovered through device-tree.
13219c7b185aSAkshay Adiga  */
13229c7b185aSAkshay Adiga 
13239c7b185aSAkshay Adiga static int pnv_parse_cpuidle_dt(void)
13249c7b185aSAkshay Adiga {
13259c7b185aSAkshay Adiga 	struct device_node *np;
13269c7b185aSAkshay Adiga 	int nr_idle_states, i;
13279c7b185aSAkshay Adiga 	int rc = 0;
13289c7b185aSAkshay Adiga 	u32 *temp_u32;
13299c7b185aSAkshay Adiga 	u64 *temp_u64;
13309c7b185aSAkshay Adiga 	const char **temp_string;
13319c7b185aSAkshay Adiga 
1332bcef83a0SShreyas B. Prabhu 	np = of_find_node_by_path("/ibm,opal/power-mgt");
1333bcef83a0SShreyas B. Prabhu 	if (!np) {
1334d405a98cSShreyas B. Prabhu 		pr_warn("opal: PowerMgmt Node not found\n");
13359c7b185aSAkshay Adiga 		return -ENODEV;
1336d405a98cSShreyas B. Prabhu 	}
13379c7b185aSAkshay Adiga 	nr_idle_states = of_property_count_u32_elems(np,
1338d405a98cSShreyas B. Prabhu 						"ibm,cpu-idle-state-flags");
13399c7b185aSAkshay Adiga 
13409c7b185aSAkshay Adiga 	pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states),
13419c7b185aSAkshay Adiga 				  GFP_KERNEL);
13429c7b185aSAkshay Adiga 	temp_u32 = kcalloc(nr_idle_states, sizeof(u32),  GFP_KERNEL);
13439c7b185aSAkshay Adiga 	temp_u64 = kcalloc(nr_idle_states, sizeof(u64),  GFP_KERNEL);
13449c7b185aSAkshay Adiga 	temp_string = kcalloc(nr_idle_states, sizeof(char *),  GFP_KERNEL);
13459c7b185aSAkshay Adiga 
13469c7b185aSAkshay Adiga 	if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) {
13479c7b185aSAkshay Adiga 		pr_err("Could not allocate memory for dt parsing\n");
13489c7b185aSAkshay Adiga 		rc = -ENOMEM;
1349d405a98cSShreyas B. Prabhu 		goto out;
1350d405a98cSShreyas B. Prabhu 	}
1351d405a98cSShreyas B. Prabhu 
13529c7b185aSAkshay Adiga 	/* Read flags */
13539c7b185aSAkshay Adiga 	if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags",
13549c7b185aSAkshay Adiga 				       temp_u32, nr_idle_states)) {
1355d405a98cSShreyas B. Prabhu 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
13569c7b185aSAkshay Adiga 		rc = -EINVAL;
1357bcef83a0SShreyas B. Prabhu 		goto out;
1358bcef83a0SShreyas B. Prabhu 	}
13599c7b185aSAkshay Adiga 	for (i = 0; i < nr_idle_states; i++)
13609c7b185aSAkshay Adiga 		pnv_idle_states[i].flags = temp_u32[i];
1361bcef83a0SShreyas B. Prabhu 
13629c7b185aSAkshay Adiga 	/* Read latencies */
13639c7b185aSAkshay Adiga 	if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns",
13649c7b185aSAkshay Adiga 				       temp_u32, nr_idle_states)) {
13659c7b185aSAkshay Adiga 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
13669c7b185aSAkshay Adiga 		rc = -EINVAL;
13679c7b185aSAkshay Adiga 		goto out;
13689c7b185aSAkshay Adiga 	}
13699c7b185aSAkshay Adiga 	for (i = 0; i < nr_idle_states; i++)
13709c7b185aSAkshay Adiga 		pnv_idle_states[i].latency_ns = temp_u32[i];
13719c7b185aSAkshay Adiga 
13729c7b185aSAkshay Adiga 	/* Read residencies */
13739c7b185aSAkshay Adiga 	if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns",
13749c7b185aSAkshay Adiga 				       temp_u32, nr_idle_states)) {
13752f62870cSChristophe JAILLET 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n");
13769c7b185aSAkshay Adiga 		rc = -EINVAL;
13779c7b185aSAkshay Adiga 		goto out;
13789c7b185aSAkshay Adiga 	}
13799c7b185aSAkshay Adiga 	for (i = 0; i < nr_idle_states; i++)
13809c7b185aSAkshay Adiga 		pnv_idle_states[i].residency_ns = temp_u32[i];
13819c7b185aSAkshay Adiga 
1382ffd2961bSNicholas Piggin 	/* For power9 and later */
1383bcef83a0SShreyas B. Prabhu 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
13849c7b185aSAkshay Adiga 		/* Read pm_crtl_val */
13859c7b185aSAkshay Adiga 		if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr",
13869c7b185aSAkshay Adiga 					       temp_u64, nr_idle_states)) {
13879c7b185aSAkshay Adiga 			pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
13889c7b185aSAkshay Adiga 			rc = -EINVAL;
1389bcef83a0SShreyas B. Prabhu 			goto out;
1390d405a98cSShreyas B. Prabhu 		}
13919c7b185aSAkshay Adiga 		for (i = 0; i < nr_idle_states; i++)
13929c7b185aSAkshay Adiga 			pnv_idle_states[i].psscr_val = temp_u64[i];
1393d405a98cSShreyas B. Prabhu 
13949c7b185aSAkshay Adiga 		/* Read pm_crtl_mask */
13959c7b185aSAkshay Adiga 		if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask",
13969c7b185aSAkshay Adiga 					       temp_u64, nr_idle_states)) {
13979c7b185aSAkshay Adiga 			pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
13989c7b185aSAkshay Adiga 			rc = -EINVAL;
13999c7b185aSAkshay Adiga 			goto out;
1400bcef83a0SShreyas B. Prabhu 		}
14019c7b185aSAkshay Adiga 		for (i = 0; i < nr_idle_states; i++)
14029c7b185aSAkshay Adiga 			pnv_idle_states[i].psscr_mask = temp_u64[i];
14039c7b185aSAkshay Adiga 	}
14049c7b185aSAkshay Adiga 
14059c7b185aSAkshay Adiga 	/*
14069c7b185aSAkshay Adiga 	 * power8 specific properties ibm,cpu-idle-state-pmicr-mask and
14079c7b185aSAkshay Adiga 	 * ibm,cpu-idle-state-pmicr-val were never used and there is no
14089c7b185aSAkshay Adiga 	 * plan to use it in near future. Hence, not parsing these properties
14099c7b185aSAkshay Adiga 	 */
14109c7b185aSAkshay Adiga 
14119c7b185aSAkshay Adiga 	if (of_property_read_string_array(np, "ibm,cpu-idle-state-names",
14129c7b185aSAkshay Adiga 					  temp_string, nr_idle_states) < 0) {
14139c7b185aSAkshay Adiga 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
14149c7b185aSAkshay Adiga 		rc = -EINVAL;
14159c7b185aSAkshay Adiga 		goto out;
14169c7b185aSAkshay Adiga 	}
14179c7b185aSAkshay Adiga 	for (i = 0; i < nr_idle_states; i++)
1418ae24ce5eSAneesh Kumar K.V 		strlcpy(pnv_idle_states[i].name, temp_string[i],
14199c7b185aSAkshay Adiga 			PNV_IDLE_NAME_LEN);
14209c7b185aSAkshay Adiga 	nr_pnv_idle_states = nr_idle_states;
14219c7b185aSAkshay Adiga 	rc = 0;
14229c7b185aSAkshay Adiga out:
14239c7b185aSAkshay Adiga 	kfree(temp_u32);
14249c7b185aSAkshay Adiga 	kfree(temp_u64);
14259c7b185aSAkshay Adiga 	kfree(temp_string);
14269c7b185aSAkshay Adiga 	return rc;
14279c7b185aSAkshay Adiga }
14289c7b185aSAkshay Adiga 
1429bcef83a0SShreyas B. Prabhu static int __init pnv_init_idle_states(void)
1430bcef83a0SShreyas B. Prabhu {
143110d91611SNicholas Piggin 	int cpu;
14329c7b185aSAkshay Adiga 	int rc = 0;
143310d91611SNicholas Piggin 
143410d91611SNicholas Piggin 	/* Set up PACA fields */
143510d91611SNicholas Piggin 	for_each_present_cpu(cpu) {
143610d91611SNicholas Piggin 		struct paca_struct *p = paca_ptrs[cpu];
143710d91611SNicholas Piggin 
143810d91611SNicholas Piggin 		p->idle_state = 0;
143910d91611SNicholas Piggin 		if (cpu == cpu_first_thread_sibling(cpu))
144010d91611SNicholas Piggin 			p->idle_state = (1 << threads_per_core) - 1;
144110d91611SNicholas Piggin 
144210d91611SNicholas Piggin 		if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
144310d91611SNicholas Piggin 			/* P7/P8 nap */
144410d91611SNicholas Piggin 			p->thread_idle_state = PNV_THREAD_RUNNING;
1445ffd2961bSNicholas Piggin 		} else if (pvr_version_is(PVR_POWER9)) {
1446ffd2961bSNicholas Piggin 			/* P9 stop workarounds */
144710d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
144810d91611SNicholas Piggin 			p->requested_psscr = 0;
144910d91611SNicholas Piggin 			atomic_set(&p->dont_stop, 0);
145010d91611SNicholas Piggin #endif
145110d91611SNicholas Piggin 		}
145210d91611SNicholas Piggin 	}
1453bcef83a0SShreyas B. Prabhu 
14549c7b185aSAkshay Adiga 	/* In case we error out nr_pnv_idle_states will be zero */
14559c7b185aSAkshay Adiga 	nr_pnv_idle_states = 0;
145610d91611SNicholas Piggin 	supported_cpuidle_states = 0;
145710d91611SNicholas Piggin 
1458bcef83a0SShreyas B. Prabhu 	if (cpuidle_disable != IDLE_NO_OVERRIDE)
1459bcef83a0SShreyas B. Prabhu 		goto out;
14609c7b185aSAkshay Adiga 	rc = pnv_parse_cpuidle_dt();
14619c7b185aSAkshay Adiga 	if (rc)
14629c7b185aSAkshay Adiga 		return rc;
1463bcef83a0SShreyas B. Prabhu 	pnv_probe_idle_states();
1464bcef83a0SShreyas B. Prabhu 
146510d91611SNicholas Piggin 	if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1466d405a98cSShreyas B. Prabhu 		if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
146710d91611SNicholas Piggin 			power7_fastsleep_workaround_entry = false;
146810d91611SNicholas Piggin 			power7_fastsleep_workaround_exit = false;
14695703d2f4SShreyas B. Prabhu 		} else {
14705703d2f4SShreyas B. Prabhu 			/*
14715703d2f4SShreyas B. Prabhu 			 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
14725703d2f4SShreyas B. Prabhu 			 * workaround is needed to use fastsleep. Provide sysfs
147310d91611SNicholas Piggin 			 * control to choose how this workaround has to be
147410d91611SNicholas Piggin 			 * applied.
14755703d2f4SShreyas B. Prabhu 			 */
14765703d2f4SShreyas B. Prabhu 			device_create_file(cpu_subsys.dev_root,
14775703d2f4SShreyas B. Prabhu 				&dev_attr_fastsleep_workaround_applyonce);
1478d405a98cSShreyas B. Prabhu 		}
14795703d2f4SShreyas B. Prabhu 
148010d91611SNicholas Piggin 		update_subcore_sibling_mask();
14815593e303SShreyas B. Prabhu 
148210d91611SNicholas Piggin 		if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) {
14835593e303SShreyas B. Prabhu 			ppc_md.power_save = power7_idle;
148410d91611SNicholas Piggin 			power7_offline_type = PNV_THREAD_NAP;
148510d91611SNicholas Piggin 		}
148610d91611SNicholas Piggin 
148710d91611SNicholas Piggin 		if ((supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) &&
148810d91611SNicholas Piggin 			   (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT))
148910d91611SNicholas Piggin 			power7_offline_type = PNV_THREAD_WINKLE;
149010d91611SNicholas Piggin 		else if ((supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED) ||
149110d91611SNicholas Piggin 			   (supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1))
149210d91611SNicholas Piggin 			power7_offline_type = PNV_THREAD_SLEEP;
149310d91611SNicholas Piggin 	}
149410d91611SNicholas Piggin 
149510d91611SNicholas Piggin 	if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
149610d91611SNicholas Piggin 		if (pnv_save_sprs_for_deep_states())
149710d91611SNicholas Piggin 			pnv_disable_deep_states();
149810d91611SNicholas Piggin 	}
1499bcef83a0SShreyas B. Prabhu 
1500d405a98cSShreyas B. Prabhu out:
1501d405a98cSShreyas B. Prabhu 	return 0;
1502d405a98cSShreyas B. Prabhu }
15034bece972SMichael Ellerman machine_subsys_initcall(powernv, pnv_init_idle_states);
1504