12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2d405a98cSShreyas B. Prabhu /* 3d405a98cSShreyas B. Prabhu * PowerNV cpuidle code 4d405a98cSShreyas B. Prabhu * 5d405a98cSShreyas B. Prabhu * Copyright 2015 IBM Corp. 6d405a98cSShreyas B. Prabhu */ 7d405a98cSShreyas B. Prabhu 8d405a98cSShreyas B. Prabhu #include <linux/types.h> 9d405a98cSShreyas B. Prabhu #include <linux/mm.h> 10d405a98cSShreyas B. Prabhu #include <linux/slab.h> 11d405a98cSShreyas B. Prabhu #include <linux/of.h> 125703d2f4SShreyas B. Prabhu #include <linux/device.h> 135703d2f4SShreyas B. Prabhu #include <linux/cpu.h> 14d405a98cSShreyas B. Prabhu 1510d91611SNicholas Piggin #include <asm/asm-prototypes.h> 16d405a98cSShreyas B. Prabhu #include <asm/firmware.h> 17*3a96570fSNicholas Piggin #include <asm/interrupt.h> 184bece972SMichael Ellerman #include <asm/machdep.h> 19d405a98cSShreyas B. Prabhu #include <asm/opal.h> 20d405a98cSShreyas B. Prabhu #include <asm/cputhreads.h> 21d405a98cSShreyas B. Prabhu #include <asm/cpuidle.h> 22d405a98cSShreyas B. Prabhu #include <asm/code-patching.h> 23d405a98cSShreyas B. Prabhu #include <asm/smp.h> 242201f994SNicholas Piggin #include <asm/runlatch.h> 257672691aSPaul Mackerras #include <asm/dbell.h> 26d405a98cSShreyas B. Prabhu 27d405a98cSShreyas B. Prabhu #include "powernv.h" 28d405a98cSShreyas B. Prabhu #include "subcore.h" 29d405a98cSShreyas B. Prabhu 30bcef83a0SShreyas B. Prabhu /* Power ISA 3.0 allows for stop states 0x0 - 0xF */ 31bcef83a0SShreyas B. Prabhu #define MAX_STOP_STATE 0xF 32bcef83a0SShreyas B. Prabhu 331e1601b3SAkshay Adiga #define P9_STOP_SPR_MSR 2000 341e1601b3SAkshay Adiga #define P9_STOP_SPR_PSSCR 855 351e1601b3SAkshay Adiga 36d405a98cSShreyas B. Prabhu static u32 supported_cpuidle_states; 379c7b185aSAkshay Adiga struct pnv_idle_states_t *pnv_idle_states; 389c7b185aSAkshay Adiga int nr_pnv_idle_states; 39d405a98cSShreyas B. Prabhu 401e1601b3SAkshay Adiga /* 411e1601b3SAkshay Adiga * The default stop state that will be used by ppc_md.power_save 421e1601b3SAkshay Adiga * function on platforms that support stop instruction. 431e1601b3SAkshay Adiga */ 441e1601b3SAkshay Adiga static u64 pnv_default_stop_val; 451e1601b3SAkshay Adiga static u64 pnv_default_stop_mask; 461e1601b3SAkshay Adiga static bool default_stop_found; 471e1601b3SAkshay Adiga 481e1601b3SAkshay Adiga /* 4910d91611SNicholas Piggin * First stop state levels when SPR and TB loss can occur. 501e1601b3SAkshay Adiga */ 5110d91611SNicholas Piggin static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1; 52dcbbfa6bSPratik Rajesh Sampat static u64 deep_spr_loss_state = MAX_STOP_STATE + 1; 531e1601b3SAkshay Adiga 541e1601b3SAkshay Adiga /* 551e1601b3SAkshay Adiga * psscr value and mask of the deepest stop idle state. 561e1601b3SAkshay Adiga * Used when a cpu is offlined. 571e1601b3SAkshay Adiga */ 581e1601b3SAkshay Adiga static u64 pnv_deepest_stop_psscr_val; 591e1601b3SAkshay Adiga static u64 pnv_deepest_stop_psscr_mask; 60785a12afSGautham R. Shenoy static u64 pnv_deepest_stop_flag; 611e1601b3SAkshay Adiga static bool deepest_stop_found; 621e1601b3SAkshay Adiga 6310d91611SNicholas Piggin static unsigned long power7_offline_type; 6410d91611SNicholas Piggin 65bcef83a0SShreyas B. Prabhu static int pnv_save_sprs_for_deep_states(void) 66d405a98cSShreyas B. Prabhu { 67d405a98cSShreyas B. Prabhu int cpu; 68d405a98cSShreyas B. Prabhu int rc; 69d405a98cSShreyas B. Prabhu 70d405a98cSShreyas B. Prabhu /* 71446957baSAdam Buchbinder * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across 72d405a98cSShreyas B. Prabhu * all cpus at boot. Get these reg values of current cpu and use the 73446957baSAdam Buchbinder * same across all cpus. 74d405a98cSShreyas B. Prabhu */ 7524be85a2SGautham R. Shenoy uint64_t lpcr_val = mfspr(SPRN_LPCR); 76d405a98cSShreyas B. Prabhu uint64_t hid0_val = mfspr(SPRN_HID0); 77d405a98cSShreyas B. Prabhu uint64_t hmeer_val = mfspr(SPRN_HMEER); 781e1601b3SAkshay Adiga uint64_t msr_val = MSR_IDLE; 791e1601b3SAkshay Adiga uint64_t psscr_val = pnv_deepest_stop_psscr_val; 80d405a98cSShreyas B. Prabhu 81ac9816dcSAkshay Adiga for_each_present_cpu(cpu) { 82d405a98cSShreyas B. Prabhu uint64_t pir = get_hard_smp_processor_id(cpu); 83d2e60075SNicholas Piggin uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu]; 84d405a98cSShreyas B. Prabhu 85d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val); 86d405a98cSShreyas B. Prabhu if (rc != 0) 87d405a98cSShreyas B. Prabhu return rc; 88d405a98cSShreyas B. Prabhu 89d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); 90d405a98cSShreyas B. Prabhu if (rc != 0) 91d405a98cSShreyas B. Prabhu return rc; 92d405a98cSShreyas B. Prabhu 931e1601b3SAkshay Adiga if (cpu_has_feature(CPU_FTR_ARCH_300)) { 941e1601b3SAkshay Adiga rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val); 951e1601b3SAkshay Adiga if (rc) 961e1601b3SAkshay Adiga return rc; 971e1601b3SAkshay Adiga 981e1601b3SAkshay Adiga rc = opal_slw_set_reg(pir, 991e1601b3SAkshay Adiga P9_STOP_SPR_PSSCR, psscr_val); 1001e1601b3SAkshay Adiga 1011e1601b3SAkshay Adiga if (rc) 1021e1601b3SAkshay Adiga return rc; 1031e1601b3SAkshay Adiga } 1041e1601b3SAkshay Adiga 105d405a98cSShreyas B. Prabhu /* HIDs are per core registers */ 106d405a98cSShreyas B. Prabhu if (cpu_thread_in_core(cpu) == 0) { 107d405a98cSShreyas B. Prabhu 108d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val); 109d405a98cSShreyas B. Prabhu if (rc != 0) 110d405a98cSShreyas B. Prabhu return rc; 111d405a98cSShreyas B. Prabhu 112d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val); 113d405a98cSShreyas B. Prabhu if (rc != 0) 114d405a98cSShreyas B. Prabhu return rc; 115d405a98cSShreyas B. Prabhu 1161e1601b3SAkshay Adiga /* Only p8 needs to set extra HID regiters */ 1171e1601b3SAkshay Adiga if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 1185c92fb1bSPratik Rajesh Sampat uint64_t hid1_val = mfspr(SPRN_HID1); 1195c92fb1bSPratik Rajesh Sampat uint64_t hid4_val = mfspr(SPRN_HID4); 1205c92fb1bSPratik Rajesh Sampat uint64_t hid5_val = mfspr(SPRN_HID5); 1211e1601b3SAkshay Adiga 122d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val); 123d405a98cSShreyas B. Prabhu if (rc != 0) 124d405a98cSShreyas B. Prabhu return rc; 125d405a98cSShreyas B. Prabhu 126d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val); 127d405a98cSShreyas B. Prabhu if (rc != 0) 128d405a98cSShreyas B. Prabhu return rc; 129d405a98cSShreyas B. Prabhu 130d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val); 131d405a98cSShreyas B. Prabhu if (rc != 0) 132d405a98cSShreyas B. Prabhu return rc; 133d405a98cSShreyas B. Prabhu } 134d405a98cSShreyas B. Prabhu } 1351e1601b3SAkshay Adiga } 136d405a98cSShreyas B. Prabhu 137d405a98cSShreyas B. Prabhu return 0; 138d405a98cSShreyas B. Prabhu } 139d405a98cSShreyas B. Prabhu 140d405a98cSShreyas B. Prabhu u32 pnv_get_supported_cpuidle_states(void) 141d405a98cSShreyas B. Prabhu { 142d405a98cSShreyas B. Prabhu return supported_cpuidle_states; 143d405a98cSShreyas B. Prabhu } 144d405a98cSShreyas B. Prabhu EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states); 145d405a98cSShreyas B. Prabhu 1465703d2f4SShreyas B. Prabhu static void pnv_fastsleep_workaround_apply(void *info) 1475703d2f4SShreyas B. Prabhu 1485703d2f4SShreyas B. Prabhu { 1495703d2f4SShreyas B. Prabhu int rc; 1505703d2f4SShreyas B. Prabhu int *err = info; 1515703d2f4SShreyas B. Prabhu 1525703d2f4SShreyas B. Prabhu rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP, 1535703d2f4SShreyas B. Prabhu OPAL_CONFIG_IDLE_APPLY); 1545703d2f4SShreyas B. Prabhu if (rc) 1555703d2f4SShreyas B. Prabhu *err = 1; 1565703d2f4SShreyas B. Prabhu } 1575703d2f4SShreyas B. Prabhu 15810d91611SNicholas Piggin static bool power7_fastsleep_workaround_entry = true; 15910d91611SNicholas Piggin static bool power7_fastsleep_workaround_exit = true; 16010d91611SNicholas Piggin 1615703d2f4SShreyas B. Prabhu /* 1625703d2f4SShreyas B. Prabhu * Used to store fastsleep workaround state 1635703d2f4SShreyas B. Prabhu * 0 - Workaround applied/undone at fastsleep entry/exit path (Default) 1645703d2f4SShreyas B. Prabhu * 1 - Workaround applied once, never undone. 1655703d2f4SShreyas B. Prabhu */ 1665703d2f4SShreyas B. Prabhu static u8 fastsleep_workaround_applyonce; 1675703d2f4SShreyas B. Prabhu 1685703d2f4SShreyas B. Prabhu static ssize_t show_fastsleep_workaround_applyonce(struct device *dev, 1695703d2f4SShreyas B. Prabhu struct device_attribute *attr, char *buf) 1705703d2f4SShreyas B. Prabhu { 1715703d2f4SShreyas B. Prabhu return sprintf(buf, "%u\n", fastsleep_workaround_applyonce); 1725703d2f4SShreyas B. Prabhu } 1735703d2f4SShreyas B. Prabhu 1745703d2f4SShreyas B. Prabhu static ssize_t store_fastsleep_workaround_applyonce(struct device *dev, 1755703d2f4SShreyas B. Prabhu struct device_attribute *attr, const char *buf, 1765703d2f4SShreyas B. Prabhu size_t count) 1775703d2f4SShreyas B. Prabhu { 1785703d2f4SShreyas B. Prabhu cpumask_t primary_thread_mask; 1795703d2f4SShreyas B. Prabhu int err; 1805703d2f4SShreyas B. Prabhu u8 val; 1815703d2f4SShreyas B. Prabhu 1825703d2f4SShreyas B. Prabhu if (kstrtou8(buf, 0, &val) || val != 1) 1835703d2f4SShreyas B. Prabhu return -EINVAL; 1845703d2f4SShreyas B. Prabhu 1855703d2f4SShreyas B. Prabhu if (fastsleep_workaround_applyonce == 1) 1865703d2f4SShreyas B. Prabhu return count; 1875703d2f4SShreyas B. Prabhu 1885703d2f4SShreyas B. Prabhu /* 1895703d2f4SShreyas B. Prabhu * fastsleep_workaround_applyonce = 1 implies 1905703d2f4SShreyas B. Prabhu * fastsleep workaround needs to be left in 'applied' state on all 1915703d2f4SShreyas B. Prabhu * the cores. Do this by- 19210d91611SNicholas Piggin * 1. Disable the 'undo' workaround in fastsleep exit path 19310d91611SNicholas Piggin * 2. Sendi IPIs to all the cores which have at least one online thread 19410d91611SNicholas Piggin * 3. Disable the 'apply' workaround in fastsleep entry path 19510d91611SNicholas Piggin * 1965703d2f4SShreyas B. Prabhu * There is no need to send ipi to cores which have all threads 1975703d2f4SShreyas B. Prabhu * offlined, as last thread of the core entering fastsleep or deeper 1985703d2f4SShreyas B. Prabhu * state would have applied workaround. 1995703d2f4SShreyas B. Prabhu */ 20010d91611SNicholas Piggin power7_fastsleep_workaround_exit = false; 2015703d2f4SShreyas B. Prabhu 2025703d2f4SShreyas B. Prabhu get_online_cpus(); 2035703d2f4SShreyas B. Prabhu primary_thread_mask = cpu_online_cores_map(); 2045703d2f4SShreyas B. Prabhu on_each_cpu_mask(&primary_thread_mask, 2055703d2f4SShreyas B. Prabhu pnv_fastsleep_workaround_apply, 2065703d2f4SShreyas B. Prabhu &err, 1); 2075703d2f4SShreyas B. Prabhu put_online_cpus(); 2085703d2f4SShreyas B. Prabhu if (err) { 2095703d2f4SShreyas B. Prabhu pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply"); 2105703d2f4SShreyas B. Prabhu goto fail; 2115703d2f4SShreyas B. Prabhu } 2125703d2f4SShreyas B. Prabhu 21310d91611SNicholas Piggin power7_fastsleep_workaround_entry = false; 2145703d2f4SShreyas B. Prabhu 2155703d2f4SShreyas B. Prabhu fastsleep_workaround_applyonce = 1; 2165703d2f4SShreyas B. Prabhu 2175703d2f4SShreyas B. Prabhu return count; 2185703d2f4SShreyas B. Prabhu fail: 2195703d2f4SShreyas B. Prabhu return -EIO; 2205703d2f4SShreyas B. Prabhu } 2215703d2f4SShreyas B. Prabhu 2225703d2f4SShreyas B. Prabhu static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600, 2235703d2f4SShreyas B. Prabhu show_fastsleep_workaround_applyonce, 2245703d2f4SShreyas B. Prabhu store_fastsleep_workaround_applyonce); 2255703d2f4SShreyas B. Prabhu 22610d91611SNicholas Piggin static inline void atomic_start_thread_idle(void) 2272201f994SNicholas Piggin { 22810d91611SNicholas Piggin int cpu = raw_smp_processor_id(); 22910d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu); 23010d91611SNicholas Piggin int thread_nr = cpu_thread_in_core(cpu); 23110d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state; 23210d91611SNicholas Piggin 23310d91611SNicholas Piggin clear_bit(thread_nr, state); 23410d91611SNicholas Piggin } 23510d91611SNicholas Piggin 23610d91611SNicholas Piggin static inline void atomic_stop_thread_idle(void) 23710d91611SNicholas Piggin { 23810d91611SNicholas Piggin int cpu = raw_smp_processor_id(); 23910d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu); 24010d91611SNicholas Piggin int thread_nr = cpu_thread_in_core(cpu); 24110d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state; 24210d91611SNicholas Piggin 24310d91611SNicholas Piggin set_bit(thread_nr, state); 24410d91611SNicholas Piggin } 24510d91611SNicholas Piggin 24610d91611SNicholas Piggin static inline void atomic_lock_thread_idle(void) 24710d91611SNicholas Piggin { 24810d91611SNicholas Piggin int cpu = raw_smp_processor_id(); 24910d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu); 25010d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state; 25110d91611SNicholas Piggin 25210d91611SNicholas Piggin while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, state))) 25310d91611SNicholas Piggin barrier(); 25410d91611SNicholas Piggin } 25510d91611SNicholas Piggin 25610d91611SNicholas Piggin static inline void atomic_unlock_and_stop_thread_idle(void) 25710d91611SNicholas Piggin { 25810d91611SNicholas Piggin int cpu = raw_smp_processor_id(); 25910d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu); 26010d91611SNicholas Piggin unsigned long thread = 1UL << cpu_thread_in_core(cpu); 26110d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state; 26210d91611SNicholas Piggin u64 s = READ_ONCE(*state); 26310d91611SNicholas Piggin u64 new, tmp; 26410d91611SNicholas Piggin 26510d91611SNicholas Piggin BUG_ON(!(s & PNV_CORE_IDLE_LOCK_BIT)); 26610d91611SNicholas Piggin BUG_ON(s & thread); 26710d91611SNicholas Piggin 26810d91611SNicholas Piggin again: 26910d91611SNicholas Piggin new = (s | thread) & ~PNV_CORE_IDLE_LOCK_BIT; 27010d91611SNicholas Piggin tmp = cmpxchg(state, s, new); 27110d91611SNicholas Piggin if (unlikely(tmp != s)) { 27210d91611SNicholas Piggin s = tmp; 27310d91611SNicholas Piggin goto again; 27410d91611SNicholas Piggin } 27510d91611SNicholas Piggin } 27610d91611SNicholas Piggin 27710d91611SNicholas Piggin static inline void atomic_unlock_thread_idle(void) 27810d91611SNicholas Piggin { 27910d91611SNicholas Piggin int cpu = raw_smp_processor_id(); 28010d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu); 28110d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state; 28210d91611SNicholas Piggin 28310d91611SNicholas Piggin BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, state)); 28410d91611SNicholas Piggin clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, state); 28510d91611SNicholas Piggin } 28610d91611SNicholas Piggin 28710d91611SNicholas Piggin /* P7 and P8 */ 28810d91611SNicholas Piggin struct p7_sprs { 28910d91611SNicholas Piggin /* per core */ 29010d91611SNicholas Piggin u64 tscr; 29110d91611SNicholas Piggin u64 worc; 29210d91611SNicholas Piggin 29310d91611SNicholas Piggin /* per subcore */ 29410d91611SNicholas Piggin u64 sdr1; 29510d91611SNicholas Piggin u64 rpr; 29610d91611SNicholas Piggin 29710d91611SNicholas Piggin /* per thread */ 29810d91611SNicholas Piggin u64 lpcr; 29910d91611SNicholas Piggin u64 hfscr; 30010d91611SNicholas Piggin u64 fscr; 30110d91611SNicholas Piggin u64 purr; 30210d91611SNicholas Piggin u64 spurr; 30310d91611SNicholas Piggin u64 dscr; 30410d91611SNicholas Piggin u64 wort; 305e9cef018SMichael Ellerman 306e9cef018SMichael Ellerman /* per thread SPRs that get lost in shallow states */ 307e9cef018SMichael Ellerman u64 amr; 308e9cef018SMichael Ellerman u64 iamr; 309e9cef018SMichael Ellerman u64 amor; 310e9cef018SMichael Ellerman u64 uamor; 31110d91611SNicholas Piggin }; 31210d91611SNicholas Piggin 31310d91611SNicholas Piggin static unsigned long power7_idle_insn(unsigned long type) 31410d91611SNicholas Piggin { 31510d91611SNicholas Piggin int cpu = raw_smp_processor_id(); 31610d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu); 31710d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state; 31810d91611SNicholas Piggin unsigned long thread = 1UL << cpu_thread_in_core(cpu); 31910d91611SNicholas Piggin unsigned long core_thread_mask = (1UL << threads_per_core) - 1; 3202201f994SNicholas Piggin unsigned long srr1; 32110d91611SNicholas Piggin bool full_winkle; 32210d91611SNicholas Piggin struct p7_sprs sprs = {}; /* avoid false use-uninitialised */ 32310d91611SNicholas Piggin bool sprs_saved = false; 32410d91611SNicholas Piggin int rc; 3252201f994SNicholas Piggin 32610d91611SNicholas Piggin if (unlikely(type != PNV_THREAD_NAP)) { 32710d91611SNicholas Piggin atomic_lock_thread_idle(); 3282201f994SNicholas Piggin 32910d91611SNicholas Piggin BUG_ON(!(*state & thread)); 33010d91611SNicholas Piggin *state &= ~thread; 3312201f994SNicholas Piggin 33210d91611SNicholas Piggin if (power7_fastsleep_workaround_entry) { 33310d91611SNicholas Piggin if ((*state & core_thread_mask) == 0) { 33410d91611SNicholas Piggin rc = opal_config_cpu_idle_state( 33510d91611SNicholas Piggin OPAL_CONFIG_IDLE_FASTSLEEP, 33610d91611SNicholas Piggin OPAL_CONFIG_IDLE_APPLY); 33710d91611SNicholas Piggin BUG_ON(rc); 33810d91611SNicholas Piggin } 33910d91611SNicholas Piggin } 34010d91611SNicholas Piggin 34110d91611SNicholas Piggin if (type == PNV_THREAD_WINKLE) { 34210d91611SNicholas Piggin sprs.tscr = mfspr(SPRN_TSCR); 34310d91611SNicholas Piggin sprs.worc = mfspr(SPRN_WORC); 34410d91611SNicholas Piggin 34510d91611SNicholas Piggin sprs.sdr1 = mfspr(SPRN_SDR1); 34610d91611SNicholas Piggin sprs.rpr = mfspr(SPRN_RPR); 34710d91611SNicholas Piggin 34810d91611SNicholas Piggin sprs.lpcr = mfspr(SPRN_LPCR); 34910d91611SNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 35010d91611SNicholas Piggin sprs.hfscr = mfspr(SPRN_HFSCR); 35110d91611SNicholas Piggin sprs.fscr = mfspr(SPRN_FSCR); 35210d91611SNicholas Piggin } 35310d91611SNicholas Piggin sprs.purr = mfspr(SPRN_PURR); 35410d91611SNicholas Piggin sprs.spurr = mfspr(SPRN_SPURR); 35510d91611SNicholas Piggin sprs.dscr = mfspr(SPRN_DSCR); 35610d91611SNicholas Piggin sprs.wort = mfspr(SPRN_WORT); 35710d91611SNicholas Piggin 35810d91611SNicholas Piggin sprs_saved = true; 35910d91611SNicholas Piggin 36010d91611SNicholas Piggin /* 36110d91611SNicholas Piggin * Increment winkle counter and set all winkle bits if 36210d91611SNicholas Piggin * all threads are winkling. This allows wakeup side to 36310d91611SNicholas Piggin * distinguish between fast sleep and winkle state 36410d91611SNicholas Piggin * loss. Fast sleep still has to resync the timebase so 36510d91611SNicholas Piggin * this may not be a really big win. 36610d91611SNicholas Piggin */ 36710d91611SNicholas Piggin *state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; 36810d91611SNicholas Piggin if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) 36910d91611SNicholas Piggin >> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT 37010d91611SNicholas Piggin == threads_per_core) 37110d91611SNicholas Piggin *state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS; 37210d91611SNicholas Piggin WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); 37310d91611SNicholas Piggin } 37410d91611SNicholas Piggin 37510d91611SNicholas Piggin atomic_unlock_thread_idle(); 37610d91611SNicholas Piggin } 37710d91611SNicholas Piggin 378e9cef018SMichael Ellerman if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 379e9cef018SMichael Ellerman sprs.amr = mfspr(SPRN_AMR); 380e9cef018SMichael Ellerman sprs.iamr = mfspr(SPRN_IAMR); 381e9cef018SMichael Ellerman sprs.amor = mfspr(SPRN_AMOR); 382e9cef018SMichael Ellerman sprs.uamor = mfspr(SPRN_UAMOR); 383e9cef018SMichael Ellerman } 384e9cef018SMichael Ellerman 38510d91611SNicholas Piggin local_paca->thread_idle_state = type; 38610d91611SNicholas Piggin srr1 = isa206_idle_insn_mayloss(type); /* go idle */ 38710d91611SNicholas Piggin local_paca->thread_idle_state = PNV_THREAD_RUNNING; 38810d91611SNicholas Piggin 38910d91611SNicholas Piggin WARN_ON_ONCE(!srr1); 39010d91611SNicholas Piggin WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); 39110d91611SNicholas Piggin 392e9cef018SMichael Ellerman if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 393e9cef018SMichael Ellerman if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) { 394e9cef018SMichael Ellerman /* 395e9cef018SMichael Ellerman * We don't need an isync after the mtsprs here because 396e9cef018SMichael Ellerman * the upcoming mtmsrd is execution synchronizing. 397e9cef018SMichael Ellerman */ 398e9cef018SMichael Ellerman mtspr(SPRN_AMR, sprs.amr); 399e9cef018SMichael Ellerman mtspr(SPRN_IAMR, sprs.iamr); 400e9cef018SMichael Ellerman mtspr(SPRN_AMOR, sprs.amor); 401e9cef018SMichael Ellerman mtspr(SPRN_UAMOR, sprs.uamor); 402e9cef018SMichael Ellerman } 403e9cef018SMichael Ellerman } 404e9cef018SMichael Ellerman 40510d91611SNicholas Piggin if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) 40610d91611SNicholas Piggin hmi_exception_realmode(NULL); 40710d91611SNicholas Piggin 40810d91611SNicholas Piggin if (likely((srr1 & SRR1_WAKESTATE) != SRR1_WS_HVLOSS)) { 40910d91611SNicholas Piggin if (unlikely(type != PNV_THREAD_NAP)) { 41010d91611SNicholas Piggin atomic_lock_thread_idle(); 41110d91611SNicholas Piggin if (type == PNV_THREAD_WINKLE) { 41210d91611SNicholas Piggin WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); 41310d91611SNicholas Piggin *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; 41410d91611SNicholas Piggin *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT); 41510d91611SNicholas Piggin } 41610d91611SNicholas Piggin atomic_unlock_and_stop_thread_idle(); 41710d91611SNicholas Piggin } 41810d91611SNicholas Piggin return srr1; 41910d91611SNicholas Piggin } 42010d91611SNicholas Piggin 42110d91611SNicholas Piggin /* HV state loss */ 42210d91611SNicholas Piggin BUG_ON(type == PNV_THREAD_NAP); 42310d91611SNicholas Piggin 42410d91611SNicholas Piggin atomic_lock_thread_idle(); 42510d91611SNicholas Piggin 42610d91611SNicholas Piggin full_winkle = false; 42710d91611SNicholas Piggin if (type == PNV_THREAD_WINKLE) { 42810d91611SNicholas Piggin WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); 42910d91611SNicholas Piggin *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; 43010d91611SNicholas Piggin if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) { 43110d91611SNicholas Piggin *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT); 43210d91611SNicholas Piggin full_winkle = true; 43310d91611SNicholas Piggin BUG_ON(!sprs_saved); 43410d91611SNicholas Piggin } 43510d91611SNicholas Piggin } 43610d91611SNicholas Piggin 43710d91611SNicholas Piggin WARN_ON(*state & thread); 43810d91611SNicholas Piggin 43910d91611SNicholas Piggin if ((*state & core_thread_mask) != 0) 44010d91611SNicholas Piggin goto core_woken; 44110d91611SNicholas Piggin 44210d91611SNicholas Piggin /* Per-core SPRs */ 44310d91611SNicholas Piggin if (full_winkle) { 44410d91611SNicholas Piggin mtspr(SPRN_TSCR, sprs.tscr); 44510d91611SNicholas Piggin mtspr(SPRN_WORC, sprs.worc); 44610d91611SNicholas Piggin } 44710d91611SNicholas Piggin 44810d91611SNicholas Piggin if (power7_fastsleep_workaround_exit) { 44910d91611SNicholas Piggin rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP, 45010d91611SNicholas Piggin OPAL_CONFIG_IDLE_UNDO); 45110d91611SNicholas Piggin BUG_ON(rc); 45210d91611SNicholas Piggin } 45310d91611SNicholas Piggin 45410d91611SNicholas Piggin /* TB */ 45510d91611SNicholas Piggin if (opal_resync_timebase() != OPAL_SUCCESS) 45610d91611SNicholas Piggin BUG(); 45710d91611SNicholas Piggin 45810d91611SNicholas Piggin core_woken: 45910d91611SNicholas Piggin if (!full_winkle) 46010d91611SNicholas Piggin goto subcore_woken; 46110d91611SNicholas Piggin 46210d91611SNicholas Piggin if ((*state & local_paca->subcore_sibling_mask) != 0) 46310d91611SNicholas Piggin goto subcore_woken; 46410d91611SNicholas Piggin 46510d91611SNicholas Piggin /* Per-subcore SPRs */ 46610d91611SNicholas Piggin mtspr(SPRN_SDR1, sprs.sdr1); 46710d91611SNicholas Piggin mtspr(SPRN_RPR, sprs.rpr); 46810d91611SNicholas Piggin 46910d91611SNicholas Piggin subcore_woken: 47010d91611SNicholas Piggin /* 47110d91611SNicholas Piggin * isync after restoring shared SPRs and before unlocking. Unlock 47210d91611SNicholas Piggin * only contains hwsync which does not necessarily do the right 47310d91611SNicholas Piggin * thing for SPRs. 47410d91611SNicholas Piggin */ 47510d91611SNicholas Piggin isync(); 47610d91611SNicholas Piggin atomic_unlock_and_stop_thread_idle(); 47710d91611SNicholas Piggin 47810d91611SNicholas Piggin /* Fast sleep does not lose SPRs */ 47910d91611SNicholas Piggin if (!full_winkle) 48010d91611SNicholas Piggin return srr1; 48110d91611SNicholas Piggin 48210d91611SNicholas Piggin /* Per-thread SPRs */ 48310d91611SNicholas Piggin mtspr(SPRN_LPCR, sprs.lpcr); 48410d91611SNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 48510d91611SNicholas Piggin mtspr(SPRN_HFSCR, sprs.hfscr); 48610d91611SNicholas Piggin mtspr(SPRN_FSCR, sprs.fscr); 48710d91611SNicholas Piggin } 48810d91611SNicholas Piggin mtspr(SPRN_PURR, sprs.purr); 48910d91611SNicholas Piggin mtspr(SPRN_SPURR, sprs.spurr); 49010d91611SNicholas Piggin mtspr(SPRN_DSCR, sprs.dscr); 49110d91611SNicholas Piggin mtspr(SPRN_WORT, sprs.wort); 49210d91611SNicholas Piggin 49310d91611SNicholas Piggin mtspr(SPRN_SPRG3, local_paca->sprg_vdso); 49410d91611SNicholas Piggin 49510d91611SNicholas Piggin /* 49610d91611SNicholas Piggin * The SLB has to be restored here, but it sometimes still 49710d91611SNicholas Piggin * contains entries, so the __ variant must be used to prevent 49810d91611SNicholas Piggin * multi hits. 49910d91611SNicholas Piggin */ 50010d91611SNicholas Piggin __slb_restore_bolted_realmode(); 5012201f994SNicholas Piggin 5022201f994SNicholas Piggin return srr1; 5032201f994SNicholas Piggin } 5042201f994SNicholas Piggin 50510d91611SNicholas Piggin extern unsigned long idle_kvm_start_guest(unsigned long srr1); 50610d91611SNicholas Piggin 50710d91611SNicholas Piggin #ifdef CONFIG_HOTPLUG_CPU 50810d91611SNicholas Piggin static unsigned long power7_offline(void) 50910d91611SNicholas Piggin { 51010d91611SNicholas Piggin unsigned long srr1; 51110d91611SNicholas Piggin 51210d91611SNicholas Piggin mtmsr(MSR_IDLE); 51310d91611SNicholas Piggin 51410d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 51510d91611SNicholas Piggin /* Tell KVM we're entering idle. */ 51610d91611SNicholas Piggin /******************************************************/ 51710d91611SNicholas Piggin /* N O T E W E L L ! ! ! N O T E W E L L */ 51810d91611SNicholas Piggin /* The following store to HSTATE_HWTHREAD_STATE(r13) */ 51910d91611SNicholas Piggin /* MUST occur in real mode, i.e. with the MMU off, */ 52010d91611SNicholas Piggin /* and the MMU must stay off until we clear this flag */ 52110d91611SNicholas Piggin /* and test HSTATE_HWTHREAD_REQ(r13) in */ 52210d91611SNicholas Piggin /* pnv_powersave_wakeup in this file. */ 52310d91611SNicholas Piggin /* The reason is that another thread can switch the */ 52410d91611SNicholas Piggin /* MMU to a guest context whenever this flag is set */ 52510d91611SNicholas Piggin /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ 52610d91611SNicholas Piggin /* that would potentially cause this thread to start */ 52710d91611SNicholas Piggin /* executing instructions from guest memory in */ 52810d91611SNicholas Piggin /* hypervisor mode, leading to a host crash or data */ 52910d91611SNicholas Piggin /* corruption, or worse. */ 53010d91611SNicholas Piggin /******************************************************/ 53110d91611SNicholas Piggin local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE; 53210d91611SNicholas Piggin #endif 53310d91611SNicholas Piggin 53410d91611SNicholas Piggin __ppc64_runlatch_off(); 53510d91611SNicholas Piggin srr1 = power7_idle_insn(power7_offline_type); 53610d91611SNicholas Piggin __ppc64_runlatch_on(); 53710d91611SNicholas Piggin 53810d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 53910d91611SNicholas Piggin local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL; 54010d91611SNicholas Piggin /* Order setting hwthread_state vs. testing hwthread_req */ 54110d91611SNicholas Piggin smp_mb(); 54210d91611SNicholas Piggin if (local_paca->kvm_hstate.hwthread_req) 54310d91611SNicholas Piggin srr1 = idle_kvm_start_guest(srr1); 54410d91611SNicholas Piggin #endif 54510d91611SNicholas Piggin 54610d91611SNicholas Piggin mtmsr(MSR_KERNEL); 54710d91611SNicholas Piggin 54810d91611SNicholas Piggin return srr1; 54910d91611SNicholas Piggin } 55010d91611SNicholas Piggin #endif 55110d91611SNicholas Piggin 5522201f994SNicholas Piggin void power7_idle_type(unsigned long type) 5532201f994SNicholas Piggin { 554771d4304SNicholas Piggin unsigned long srr1; 555771d4304SNicholas Piggin 55610d91611SNicholas Piggin if (!prep_irq_for_idle_irqsoff()) 55710d91611SNicholas Piggin return; 55810d91611SNicholas Piggin 55910d91611SNicholas Piggin mtmsr(MSR_IDLE); 56010d91611SNicholas Piggin __ppc64_runlatch_off(); 56110d91611SNicholas Piggin srr1 = power7_idle_insn(type); 56210d91611SNicholas Piggin __ppc64_runlatch_on(); 56310d91611SNicholas Piggin mtmsr(MSR_KERNEL); 56410d91611SNicholas Piggin 56510d91611SNicholas Piggin fini_irq_for_idle_irqsoff(); 566771d4304SNicholas Piggin irq_set_pending_from_srr1(srr1); 5672201f994SNicholas Piggin } 5682201f994SNicholas Piggin 569ffd2961bSNicholas Piggin static void power7_idle(void) 5702201f994SNicholas Piggin { 5712201f994SNicholas Piggin if (!powersave_nap) 5722201f994SNicholas Piggin return; 5732201f994SNicholas Piggin 5742201f994SNicholas Piggin power7_idle_type(PNV_THREAD_NAP); 5752201f994SNicholas Piggin } 5762201f994SNicholas Piggin 57710d91611SNicholas Piggin struct p9_sprs { 57810d91611SNicholas Piggin /* per core */ 57910d91611SNicholas Piggin u64 ptcr; 58010d91611SNicholas Piggin u64 rpr; 58110d91611SNicholas Piggin u64 tscr; 58210d91611SNicholas Piggin u64 ldbar; 58310d91611SNicholas Piggin 58410d91611SNicholas Piggin /* per thread */ 58510d91611SNicholas Piggin u64 lpcr; 58610d91611SNicholas Piggin u64 hfscr; 58710d91611SNicholas Piggin u64 fscr; 58810d91611SNicholas Piggin u64 pid; 58910d91611SNicholas Piggin u64 purr; 59010d91611SNicholas Piggin u64 spurr; 59110d91611SNicholas Piggin u64 dscr; 59210d91611SNicholas Piggin u64 wort; 593250ad7a4SJordan Niethe u64 ciabr; 59410d91611SNicholas Piggin 59510d91611SNicholas Piggin u64 mmcra; 59610d91611SNicholas Piggin u32 mmcr0; 59710d91611SNicholas Piggin u32 mmcr1; 59810d91611SNicholas Piggin u64 mmcr2; 599e9cef018SMichael Ellerman 600e9cef018SMichael Ellerman /* per thread SPRs that get lost in shallow states */ 601e9cef018SMichael Ellerman u64 amr; 602e9cef018SMichael Ellerman u64 iamr; 603e9cef018SMichael Ellerman u64 amor; 604e9cef018SMichael Ellerman u64 uamor; 60510d91611SNicholas Piggin }; 60610d91611SNicholas Piggin 60710d91611SNicholas Piggin static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) 60810d91611SNicholas Piggin { 60910d91611SNicholas Piggin int cpu = raw_smp_processor_id(); 61010d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu); 61110d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state; 61210d91611SNicholas Piggin unsigned long core_thread_mask = (1UL << threads_per_core) - 1; 61310d91611SNicholas Piggin unsigned long srr1; 61410d91611SNicholas Piggin unsigned long pls; 61510d91611SNicholas Piggin unsigned long mmcr0 = 0; 6161cade527SAthira Rajeev unsigned long mmcra = 0; 61710d91611SNicholas Piggin struct p9_sprs sprs = {}; /* avoid false used-uninitialised */ 61810d91611SNicholas Piggin bool sprs_saved = false; 61910d91611SNicholas Piggin 62010d91611SNicholas Piggin if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { 62110d91611SNicholas Piggin /* EC=ESL=0 case */ 62210d91611SNicholas Piggin 62310d91611SNicholas Piggin BUG_ON(!mmu_on); 62410d91611SNicholas Piggin 62510d91611SNicholas Piggin /* 62610d91611SNicholas Piggin * Wake synchronously. SRESET via xscom may still cause 62710d91611SNicholas Piggin * a 0x100 powersave wakeup with SRR1 reason! 62810d91611SNicholas Piggin */ 62910d91611SNicholas Piggin srr1 = isa300_idle_stop_noloss(psscr); /* go idle */ 63010d91611SNicholas Piggin if (likely(!srr1)) 63110d91611SNicholas Piggin return 0; 63210d91611SNicholas Piggin 63310d91611SNicholas Piggin /* 63410d91611SNicholas Piggin * Registers not saved, can't recover! 63510d91611SNicholas Piggin * This would be a hardware bug 63610d91611SNicholas Piggin */ 63710d91611SNicholas Piggin BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS); 63810d91611SNicholas Piggin 63910d91611SNicholas Piggin goto out; 64010d91611SNicholas Piggin } 64110d91611SNicholas Piggin 64210d91611SNicholas Piggin /* EC=ESL=1 case */ 64310d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 64410d91611SNicholas Piggin if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG)) { 64510d91611SNicholas Piggin local_paca->requested_psscr = psscr; 64610d91611SNicholas Piggin /* order setting requested_psscr vs testing dont_stop */ 64710d91611SNicholas Piggin smp_mb(); 64810d91611SNicholas Piggin if (atomic_read(&local_paca->dont_stop)) { 64910d91611SNicholas Piggin local_paca->requested_psscr = 0; 65010d91611SNicholas Piggin return 0; 65110d91611SNicholas Piggin } 65210d91611SNicholas Piggin } 65310d91611SNicholas Piggin #endif 65410d91611SNicholas Piggin 65510d91611SNicholas Piggin if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) { 65610d91611SNicholas Piggin /* 65710d91611SNicholas Piggin * POWER9 DD2 can incorrectly set PMAO when waking up 65810d91611SNicholas Piggin * after a state-loss idle. Saving and restoring MMCR0 65910d91611SNicholas Piggin * over idle is a workaround. 66010d91611SNicholas Piggin */ 66110d91611SNicholas Piggin mmcr0 = mfspr(SPRN_MMCR0); 66210d91611SNicholas Piggin } 6631cade527SAthira Rajeev 664dcbbfa6bSPratik Rajesh Sampat if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) { 66510d91611SNicholas Piggin sprs.lpcr = mfspr(SPRN_LPCR); 66610d91611SNicholas Piggin sprs.hfscr = mfspr(SPRN_HFSCR); 66710d91611SNicholas Piggin sprs.fscr = mfspr(SPRN_FSCR); 66810d91611SNicholas Piggin sprs.pid = mfspr(SPRN_PID); 66910d91611SNicholas Piggin sprs.purr = mfspr(SPRN_PURR); 67010d91611SNicholas Piggin sprs.spurr = mfspr(SPRN_SPURR); 67110d91611SNicholas Piggin sprs.dscr = mfspr(SPRN_DSCR); 67210d91611SNicholas Piggin sprs.wort = mfspr(SPRN_WORT); 673250ad7a4SJordan Niethe sprs.ciabr = mfspr(SPRN_CIABR); 67410d91611SNicholas Piggin 67510d91611SNicholas Piggin sprs.mmcra = mfspr(SPRN_MMCRA); 67610d91611SNicholas Piggin sprs.mmcr0 = mfspr(SPRN_MMCR0); 67710d91611SNicholas Piggin sprs.mmcr1 = mfspr(SPRN_MMCR1); 67810d91611SNicholas Piggin sprs.mmcr2 = mfspr(SPRN_MMCR2); 67910d91611SNicholas Piggin 68010d91611SNicholas Piggin sprs.ptcr = mfspr(SPRN_PTCR); 68110d91611SNicholas Piggin sprs.rpr = mfspr(SPRN_RPR); 68210d91611SNicholas Piggin sprs.tscr = mfspr(SPRN_TSCR); 683512a5a64SClaudio Carvalho if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR)) 68410d91611SNicholas Piggin sprs.ldbar = mfspr(SPRN_LDBAR); 68510d91611SNicholas Piggin 68610d91611SNicholas Piggin sprs_saved = true; 68710d91611SNicholas Piggin 68810d91611SNicholas Piggin atomic_start_thread_idle(); 68910d91611SNicholas Piggin } 69010d91611SNicholas Piggin 691e9cef018SMichael Ellerman sprs.amr = mfspr(SPRN_AMR); 692e9cef018SMichael Ellerman sprs.iamr = mfspr(SPRN_IAMR); 693e9cef018SMichael Ellerman sprs.amor = mfspr(SPRN_AMOR); 694e9cef018SMichael Ellerman sprs.uamor = mfspr(SPRN_UAMOR); 695e9cef018SMichael Ellerman 69610d91611SNicholas Piggin srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */ 69710d91611SNicholas Piggin 69810d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 69910d91611SNicholas Piggin local_paca->requested_psscr = 0; 70010d91611SNicholas Piggin #endif 70110d91611SNicholas Piggin 70210d91611SNicholas Piggin psscr = mfspr(SPRN_PSSCR); 70310d91611SNicholas Piggin 70410d91611SNicholas Piggin WARN_ON_ONCE(!srr1); 70510d91611SNicholas Piggin WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); 70610d91611SNicholas Piggin 70710d91611SNicholas Piggin if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) { 70810d91611SNicholas Piggin /* 709e9cef018SMichael Ellerman * We don't need an isync after the mtsprs here because the 710e9cef018SMichael Ellerman * upcoming mtmsrd is execution synchronizing. 711e9cef018SMichael Ellerman */ 712e9cef018SMichael Ellerman mtspr(SPRN_AMR, sprs.amr); 713e9cef018SMichael Ellerman mtspr(SPRN_IAMR, sprs.iamr); 714e9cef018SMichael Ellerman mtspr(SPRN_AMOR, sprs.amor); 715e9cef018SMichael Ellerman mtspr(SPRN_UAMOR, sprs.uamor); 716e9cef018SMichael Ellerman 717e9cef018SMichael Ellerman /* 71810d91611SNicholas Piggin * Workaround for POWER9 DD2.0, if we lost resources, the ERAT 71910d91611SNicholas Piggin * might have been corrupted and needs flushing. We also need 72010d91611SNicholas Piggin * to reload MMCR0 (see mmcr0 comment above). 72110d91611SNicholas Piggin */ 72210d91611SNicholas Piggin if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) { 723fe7946ceSNicholas Piggin asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT); 72410d91611SNicholas Piggin mtspr(SPRN_MMCR0, mmcr0); 72510d91611SNicholas Piggin } 72610d91611SNicholas Piggin 72710d91611SNicholas Piggin /* 72810d91611SNicholas Piggin * DD2.2 and earlier need to set then clear bit 60 in MMCRA 72910d91611SNicholas Piggin * to ensure the PMU starts running. 73010d91611SNicholas Piggin */ 73110d91611SNicholas Piggin mmcra = mfspr(SPRN_MMCRA); 73210d91611SNicholas Piggin mmcra |= PPC_BIT(60); 73310d91611SNicholas Piggin mtspr(SPRN_MMCRA, mmcra); 73410d91611SNicholas Piggin mmcra &= ~PPC_BIT(60); 73510d91611SNicholas Piggin mtspr(SPRN_MMCRA, mmcra); 73610d91611SNicholas Piggin } 73710d91611SNicholas Piggin 73810d91611SNicholas Piggin if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) 73910d91611SNicholas Piggin hmi_exception_realmode(NULL); 74010d91611SNicholas Piggin 74110d91611SNicholas Piggin /* 74210d91611SNicholas Piggin * On POWER9, SRR1 bits do not match exactly as expected. 74310d91611SNicholas Piggin * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so 74410d91611SNicholas Piggin * just always test PSSCR for SPR/TB state loss. 74510d91611SNicholas Piggin */ 74610d91611SNicholas Piggin pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT; 747dcbbfa6bSPratik Rajesh Sampat if (likely(pls < deep_spr_loss_state)) { 74810d91611SNicholas Piggin if (sprs_saved) 74910d91611SNicholas Piggin atomic_stop_thread_idle(); 75010d91611SNicholas Piggin goto out; 75110d91611SNicholas Piggin } 75210d91611SNicholas Piggin 75310d91611SNicholas Piggin /* HV state loss */ 75410d91611SNicholas Piggin BUG_ON(!sprs_saved); 75510d91611SNicholas Piggin 75610d91611SNicholas Piggin atomic_lock_thread_idle(); 75710d91611SNicholas Piggin 75810d91611SNicholas Piggin if ((*state & core_thread_mask) != 0) 75910d91611SNicholas Piggin goto core_woken; 76010d91611SNicholas Piggin 76110d91611SNicholas Piggin /* Per-core SPRs */ 76210d91611SNicholas Piggin mtspr(SPRN_PTCR, sprs.ptcr); 76310d91611SNicholas Piggin mtspr(SPRN_RPR, sprs.rpr); 76410d91611SNicholas Piggin mtspr(SPRN_TSCR, sprs.tscr); 76510d91611SNicholas Piggin 76610d91611SNicholas Piggin if (pls >= pnv_first_tb_loss_level) { 76710d91611SNicholas Piggin /* TB loss */ 76810d91611SNicholas Piggin if (opal_resync_timebase() != OPAL_SUCCESS) 76910d91611SNicholas Piggin BUG(); 77010d91611SNicholas Piggin } 77110d91611SNicholas Piggin 77210d91611SNicholas Piggin /* 77310d91611SNicholas Piggin * isync after restoring shared SPRs and before unlocking. Unlock 77410d91611SNicholas Piggin * only contains hwsync which does not necessarily do the right 77510d91611SNicholas Piggin * thing for SPRs. 77610d91611SNicholas Piggin */ 77710d91611SNicholas Piggin isync(); 77810d91611SNicholas Piggin 77910d91611SNicholas Piggin core_woken: 78010d91611SNicholas Piggin atomic_unlock_and_stop_thread_idle(); 78110d91611SNicholas Piggin 78210d91611SNicholas Piggin /* Per-thread SPRs */ 78310d91611SNicholas Piggin mtspr(SPRN_LPCR, sprs.lpcr); 78410d91611SNicholas Piggin mtspr(SPRN_HFSCR, sprs.hfscr); 78510d91611SNicholas Piggin mtspr(SPRN_FSCR, sprs.fscr); 78610d91611SNicholas Piggin mtspr(SPRN_PID, sprs.pid); 78710d91611SNicholas Piggin mtspr(SPRN_PURR, sprs.purr); 78810d91611SNicholas Piggin mtspr(SPRN_SPURR, sprs.spurr); 78910d91611SNicholas Piggin mtspr(SPRN_DSCR, sprs.dscr); 79010d91611SNicholas Piggin mtspr(SPRN_WORT, sprs.wort); 791250ad7a4SJordan Niethe mtspr(SPRN_CIABR, sprs.ciabr); 79210d91611SNicholas Piggin 79310d91611SNicholas Piggin mtspr(SPRN_MMCRA, sprs.mmcra); 79410d91611SNicholas Piggin mtspr(SPRN_MMCR0, sprs.mmcr0); 79510d91611SNicholas Piggin mtspr(SPRN_MMCR1, sprs.mmcr1); 79610d91611SNicholas Piggin mtspr(SPRN_MMCR2, sprs.mmcr2); 797512a5a64SClaudio Carvalho if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR)) 798f5a9e488SAthira Rajeev mtspr(SPRN_LDBAR, sprs.ldbar); 79910d91611SNicholas Piggin 80010d91611SNicholas Piggin mtspr(SPRN_SPRG3, local_paca->sprg_vdso); 80110d91611SNicholas Piggin 80210d91611SNicholas Piggin if (!radix_enabled()) 80310d91611SNicholas Piggin __slb_restore_bolted_realmode(); 80410d91611SNicholas Piggin 80510d91611SNicholas Piggin out: 80610d91611SNicholas Piggin if (mmu_on) 80710d91611SNicholas Piggin mtmsr(MSR_KERNEL); 80810d91611SNicholas Piggin 80910d91611SNicholas Piggin return srr1; 81010d91611SNicholas Piggin } 81110d91611SNicholas Piggin 8127672691aSPaul Mackerras #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 8137672691aSPaul Mackerras /* 8147672691aSPaul Mackerras * This is used in working around bugs in thread reconfiguration 8157672691aSPaul Mackerras * on POWER9 (at least up to Nimbus DD2.2) relating to transactional 8167672691aSPaul Mackerras * memory and the way that XER[SO] is checkpointed. 8177672691aSPaul Mackerras * This function forces the core into SMT4 in order by asking 8187672691aSPaul Mackerras * all other threads not to stop, and sending a message to any 8197672691aSPaul Mackerras * that are in a stop state. 8207672691aSPaul Mackerras * Must be called with preemption disabled. 8217672691aSPaul Mackerras */ 8227672691aSPaul Mackerras void pnv_power9_force_smt4_catch(void) 8237672691aSPaul Mackerras { 8247672691aSPaul Mackerras int cpu, cpu0, thr; 8257672691aSPaul Mackerras int awake_threads = 1; /* this thread is awake */ 8267672691aSPaul Mackerras int poke_threads = 0; 8277672691aSPaul Mackerras int need_awake = threads_per_core; 8287672691aSPaul Mackerras 8297672691aSPaul Mackerras cpu = smp_processor_id(); 8307672691aSPaul Mackerras cpu0 = cpu & ~(threads_per_core - 1); 8317672691aSPaul Mackerras for (thr = 0; thr < threads_per_core; ++thr) { 8327672691aSPaul Mackerras if (cpu != cpu0 + thr) 833f437c517SMichael Ellerman atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop); 8347672691aSPaul Mackerras } 8357672691aSPaul Mackerras /* order setting dont_stop vs testing requested_psscr */ 83610d91611SNicholas Piggin smp_mb(); 8377672691aSPaul Mackerras for (thr = 0; thr < threads_per_core; ++thr) { 838f437c517SMichael Ellerman if (!paca_ptrs[cpu0+thr]->requested_psscr) 8397672691aSPaul Mackerras ++awake_threads; 8407672691aSPaul Mackerras else 8417672691aSPaul Mackerras poke_threads |= (1 << thr); 8427672691aSPaul Mackerras } 8437672691aSPaul Mackerras 8447672691aSPaul Mackerras /* If at least 3 threads are awake, the core is in SMT4 already */ 8457672691aSPaul Mackerras if (awake_threads < need_awake) { 8467672691aSPaul Mackerras /* We have to wake some threads; we'll use msgsnd */ 8477672691aSPaul Mackerras for (thr = 0; thr < threads_per_core; ++thr) { 8487672691aSPaul Mackerras if (poke_threads & (1 << thr)) { 8497672691aSPaul Mackerras ppc_msgsnd_sync(); 8507672691aSPaul Mackerras ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, 851f437c517SMichael Ellerman paca_ptrs[cpu0+thr]->hw_cpu_id); 8527672691aSPaul Mackerras } 8537672691aSPaul Mackerras } 8547672691aSPaul Mackerras /* now spin until at least 3 threads are awake */ 8557672691aSPaul Mackerras do { 8567672691aSPaul Mackerras for (thr = 0; thr < threads_per_core; ++thr) { 8577672691aSPaul Mackerras if ((poke_threads & (1 << thr)) && 858f437c517SMichael Ellerman !paca_ptrs[cpu0+thr]->requested_psscr) { 8597672691aSPaul Mackerras ++awake_threads; 8607672691aSPaul Mackerras poke_threads &= ~(1 << thr); 8617672691aSPaul Mackerras } 8627672691aSPaul Mackerras } 8637672691aSPaul Mackerras } while (awake_threads < need_awake); 8647672691aSPaul Mackerras } 8657672691aSPaul Mackerras } 8667672691aSPaul Mackerras EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch); 8677672691aSPaul Mackerras 8687672691aSPaul Mackerras void pnv_power9_force_smt4_release(void) 8697672691aSPaul Mackerras { 8707672691aSPaul Mackerras int cpu, cpu0, thr; 8717672691aSPaul Mackerras 8727672691aSPaul Mackerras cpu = smp_processor_id(); 8737672691aSPaul Mackerras cpu0 = cpu & ~(threads_per_core - 1); 8747672691aSPaul Mackerras 8757672691aSPaul Mackerras /* clear all the dont_stop flags */ 8767672691aSPaul Mackerras for (thr = 0; thr < threads_per_core; ++thr) { 8777672691aSPaul Mackerras if (cpu != cpu0 + thr) 878f437c517SMichael Ellerman atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop); 8797672691aSPaul Mackerras } 8807672691aSPaul Mackerras } 8817672691aSPaul Mackerras EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release); 8827672691aSPaul Mackerras #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 8837672691aSPaul Mackerras 884ffd2961bSNicholas Piggin struct p10_sprs { 885ffd2961bSNicholas Piggin /* 886ffd2961bSNicholas Piggin * SPRs that get lost in shallow states: 887ffd2961bSNicholas Piggin * 888ffd2961bSNicholas Piggin * P10 loses CR, LR, CTR, FPSCR, VSCR, XER, TAR, SPRG2, and HSPRG1 889ffd2961bSNicholas Piggin * isa300 idle routines restore CR, LR. 890ffd2961bSNicholas Piggin * CTR is volatile 891ffd2961bSNicholas Piggin * idle thread doesn't use FP or VEC 892ffd2961bSNicholas Piggin * kernel doesn't use TAR 893ffd2961bSNicholas Piggin * HSPRG1 is only live in HV interrupt entry 894ffd2961bSNicholas Piggin * SPRG2 is only live in KVM guests, KVM handles it. 895ffd2961bSNicholas Piggin */ 896ffd2961bSNicholas Piggin }; 897ffd2961bSNicholas Piggin 898ffd2961bSNicholas Piggin static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on) 899ffd2961bSNicholas Piggin { 900ffd2961bSNicholas Piggin int cpu = raw_smp_processor_id(); 901ffd2961bSNicholas Piggin int first = cpu_first_thread_sibling(cpu); 902ffd2961bSNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state; 903ffd2961bSNicholas Piggin unsigned long core_thread_mask = (1UL << threads_per_core) - 1; 904ffd2961bSNicholas Piggin unsigned long srr1; 905ffd2961bSNicholas Piggin unsigned long pls; 906ffd2961bSNicholas Piggin // struct p10_sprs sprs = {}; /* avoid false used-uninitialised */ 907ffd2961bSNicholas Piggin bool sprs_saved = false; 908ffd2961bSNicholas Piggin 909ffd2961bSNicholas Piggin if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { 910ffd2961bSNicholas Piggin /* EC=ESL=0 case */ 911ffd2961bSNicholas Piggin 912ffd2961bSNicholas Piggin BUG_ON(!mmu_on); 913ffd2961bSNicholas Piggin 914ffd2961bSNicholas Piggin /* 915ffd2961bSNicholas Piggin * Wake synchronously. SRESET via xscom may still cause 916ffd2961bSNicholas Piggin * a 0x100 powersave wakeup with SRR1 reason! 917ffd2961bSNicholas Piggin */ 918ffd2961bSNicholas Piggin srr1 = isa300_idle_stop_noloss(psscr); /* go idle */ 919ffd2961bSNicholas Piggin if (likely(!srr1)) 920ffd2961bSNicholas Piggin return 0; 921ffd2961bSNicholas Piggin 922ffd2961bSNicholas Piggin /* 923ffd2961bSNicholas Piggin * Registers not saved, can't recover! 924ffd2961bSNicholas Piggin * This would be a hardware bug 925ffd2961bSNicholas Piggin */ 926ffd2961bSNicholas Piggin BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS); 927ffd2961bSNicholas Piggin 928ffd2961bSNicholas Piggin goto out; 929ffd2961bSNicholas Piggin } 930ffd2961bSNicholas Piggin 931ffd2961bSNicholas Piggin /* EC=ESL=1 case */ 932ffd2961bSNicholas Piggin if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) { 933ffd2961bSNicholas Piggin /* XXX: save SPRs for deep state loss here. */ 934ffd2961bSNicholas Piggin 935ffd2961bSNicholas Piggin sprs_saved = true; 936ffd2961bSNicholas Piggin 937ffd2961bSNicholas Piggin atomic_start_thread_idle(); 938ffd2961bSNicholas Piggin } 939ffd2961bSNicholas Piggin 940ffd2961bSNicholas Piggin srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */ 941ffd2961bSNicholas Piggin 942ffd2961bSNicholas Piggin psscr = mfspr(SPRN_PSSCR); 943ffd2961bSNicholas Piggin 944ffd2961bSNicholas Piggin WARN_ON_ONCE(!srr1); 945ffd2961bSNicholas Piggin WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); 946ffd2961bSNicholas Piggin 947ffd2961bSNicholas Piggin if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) 948ffd2961bSNicholas Piggin hmi_exception_realmode(NULL); 949ffd2961bSNicholas Piggin 950ffd2961bSNicholas Piggin /* 951ffd2961bSNicholas Piggin * On POWER10, SRR1 bits do not match exactly as expected. 952ffd2961bSNicholas Piggin * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so 953ffd2961bSNicholas Piggin * just always test PSSCR for SPR/TB state loss. 954ffd2961bSNicholas Piggin */ 955ffd2961bSNicholas Piggin pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT; 956ffd2961bSNicholas Piggin if (likely(pls < deep_spr_loss_state)) { 957ffd2961bSNicholas Piggin if (sprs_saved) 958ffd2961bSNicholas Piggin atomic_stop_thread_idle(); 959ffd2961bSNicholas Piggin goto out; 960ffd2961bSNicholas Piggin } 961ffd2961bSNicholas Piggin 962ffd2961bSNicholas Piggin /* HV state loss */ 963ffd2961bSNicholas Piggin BUG_ON(!sprs_saved); 964ffd2961bSNicholas Piggin 965ffd2961bSNicholas Piggin atomic_lock_thread_idle(); 966ffd2961bSNicholas Piggin 967ffd2961bSNicholas Piggin if ((*state & core_thread_mask) != 0) 968ffd2961bSNicholas Piggin goto core_woken; 969ffd2961bSNicholas Piggin 970ffd2961bSNicholas Piggin /* XXX: restore per-core SPRs here */ 971ffd2961bSNicholas Piggin 972ffd2961bSNicholas Piggin if (pls >= pnv_first_tb_loss_level) { 973ffd2961bSNicholas Piggin /* TB loss */ 974ffd2961bSNicholas Piggin if (opal_resync_timebase() != OPAL_SUCCESS) 975ffd2961bSNicholas Piggin BUG(); 976ffd2961bSNicholas Piggin } 977ffd2961bSNicholas Piggin 978ffd2961bSNicholas Piggin /* 979ffd2961bSNicholas Piggin * isync after restoring shared SPRs and before unlocking. Unlock 980ffd2961bSNicholas Piggin * only contains hwsync which does not necessarily do the right 981ffd2961bSNicholas Piggin * thing for SPRs. 982ffd2961bSNicholas Piggin */ 983ffd2961bSNicholas Piggin isync(); 984ffd2961bSNicholas Piggin 985ffd2961bSNicholas Piggin core_woken: 986ffd2961bSNicholas Piggin atomic_unlock_and_stop_thread_idle(); 987ffd2961bSNicholas Piggin 988ffd2961bSNicholas Piggin /* XXX: restore per-thread SPRs here */ 989ffd2961bSNicholas Piggin 990ffd2961bSNicholas Piggin if (!radix_enabled()) 991ffd2961bSNicholas Piggin __slb_restore_bolted_realmode(); 992ffd2961bSNicholas Piggin 993ffd2961bSNicholas Piggin out: 994ffd2961bSNicholas Piggin if (mmu_on) 995ffd2961bSNicholas Piggin mtmsr(MSR_KERNEL); 996ffd2961bSNicholas Piggin 997ffd2961bSNicholas Piggin return srr1; 998ffd2961bSNicholas Piggin } 999ffd2961bSNicholas Piggin 1000ffd2961bSNicholas Piggin #ifdef CONFIG_HOTPLUG_CPU 1001ffd2961bSNicholas Piggin static unsigned long arch300_offline_stop(unsigned long psscr) 1002ffd2961bSNicholas Piggin { 1003ffd2961bSNicholas Piggin unsigned long srr1; 1004ffd2961bSNicholas Piggin 1005ffd2961bSNicholas Piggin #ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1006ffd2961bSNicholas Piggin __ppc64_runlatch_off(); 1007ffd2961bSNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_31)) 1008ffd2961bSNicholas Piggin srr1 = power10_idle_stop(psscr, true); 1009ffd2961bSNicholas Piggin else 1010ffd2961bSNicholas Piggin srr1 = power9_idle_stop(psscr, true); 1011ffd2961bSNicholas Piggin __ppc64_runlatch_on(); 1012ffd2961bSNicholas Piggin #else 1013ffd2961bSNicholas Piggin /* 1014ffd2961bSNicholas Piggin * Tell KVM we're entering idle. 1015ffd2961bSNicholas Piggin * This does not have to be done in real mode because the P9 MMU 1016ffd2961bSNicholas Piggin * is independent per-thread. Some steppings share radix/hash mode 1017ffd2961bSNicholas Piggin * between threads, but in that case KVM has a barrier sync in real 1018ffd2961bSNicholas Piggin * mode before and after switching between radix and hash. 1019ffd2961bSNicholas Piggin * 1020ffd2961bSNicholas Piggin * kvm_start_guest must still be called in real mode though, hence 1021ffd2961bSNicholas Piggin * the false argument. 1022ffd2961bSNicholas Piggin */ 1023ffd2961bSNicholas Piggin local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE; 1024ffd2961bSNicholas Piggin 1025ffd2961bSNicholas Piggin __ppc64_runlatch_off(); 1026ffd2961bSNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_31)) 1027ffd2961bSNicholas Piggin srr1 = power10_idle_stop(psscr, false); 1028ffd2961bSNicholas Piggin else 1029ffd2961bSNicholas Piggin srr1 = power9_idle_stop(psscr, false); 1030ffd2961bSNicholas Piggin __ppc64_runlatch_on(); 1031ffd2961bSNicholas Piggin 1032ffd2961bSNicholas Piggin local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL; 1033ffd2961bSNicholas Piggin /* Order setting hwthread_state vs. testing hwthread_req */ 1034ffd2961bSNicholas Piggin smp_mb(); 1035ffd2961bSNicholas Piggin if (local_paca->kvm_hstate.hwthread_req) 1036ffd2961bSNicholas Piggin srr1 = idle_kvm_start_guest(srr1); 1037ffd2961bSNicholas Piggin mtmsr(MSR_KERNEL); 1038ffd2961bSNicholas Piggin #endif 1039ffd2961bSNicholas Piggin 1040ffd2961bSNicholas Piggin return srr1; 1041ffd2961bSNicholas Piggin } 1042ffd2961bSNicholas Piggin #endif 1043ffd2961bSNicholas Piggin 1044ffd2961bSNicholas Piggin void arch300_idle_type(unsigned long stop_psscr_val, 1045ffd2961bSNicholas Piggin unsigned long stop_psscr_mask) 1046ffd2961bSNicholas Piggin { 1047ffd2961bSNicholas Piggin unsigned long psscr; 1048ffd2961bSNicholas Piggin unsigned long srr1; 1049ffd2961bSNicholas Piggin 1050ffd2961bSNicholas Piggin if (!prep_irq_for_idle_irqsoff()) 1051ffd2961bSNicholas Piggin return; 1052ffd2961bSNicholas Piggin 1053ffd2961bSNicholas Piggin psscr = mfspr(SPRN_PSSCR); 1054ffd2961bSNicholas Piggin psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val; 1055ffd2961bSNicholas Piggin 1056ffd2961bSNicholas Piggin __ppc64_runlatch_off(); 1057ffd2961bSNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_31)) 1058ffd2961bSNicholas Piggin srr1 = power10_idle_stop(psscr, true); 1059ffd2961bSNicholas Piggin else 1060ffd2961bSNicholas Piggin srr1 = power9_idle_stop(psscr, true); 1061ffd2961bSNicholas Piggin __ppc64_runlatch_on(); 1062ffd2961bSNicholas Piggin 1063ffd2961bSNicholas Piggin fini_irq_for_idle_irqsoff(); 1064ffd2961bSNicholas Piggin 1065ffd2961bSNicholas Piggin irq_set_pending_from_srr1(srr1); 1066ffd2961bSNicholas Piggin } 1067ffd2961bSNicholas Piggin 1068ffd2961bSNicholas Piggin /* 1069ffd2961bSNicholas Piggin * Used for ppc_md.power_save which needs a function with no parameters 1070ffd2961bSNicholas Piggin */ 1071ffd2961bSNicholas Piggin static void arch300_idle(void) 1072ffd2961bSNicholas Piggin { 1073ffd2961bSNicholas Piggin arch300_idle_type(pnv_default_stop_val, pnv_default_stop_mask); 1074ffd2961bSNicholas Piggin } 1075ffd2961bSNicholas Piggin 107667d20418SNicholas Piggin #ifdef CONFIG_HOTPLUG_CPU 107719f8a5b5SPaul Mackerras 107819f8a5b5SPaul Mackerras void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) 107924be85a2SGautham R. Shenoy { 108024be85a2SGautham R. Shenoy u64 pir = get_hard_smp_processor_id(cpu); 108124be85a2SGautham R. Shenoy 108224be85a2SGautham R. Shenoy mtspr(SPRN_LPCR, lpcr_val); 10835d298baaSGautham R. Shenoy 10845d298baaSGautham R. Shenoy /* 10855d298baaSGautham R. Shenoy * Program the LPCR via stop-api only if the deepest stop state 10865d298baaSGautham R. Shenoy * can lose hypervisor context. 10875d298baaSGautham R. Shenoy */ 10885d298baaSGautham R. Shenoy if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) 108924be85a2SGautham R. Shenoy opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); 109024be85a2SGautham R. Shenoy } 109124be85a2SGautham R. Shenoy 1092c0691f9dSShreyas B. Prabhu /* 1093a7cd88daSGautham R. Shenoy * pnv_cpu_offline: A function that puts the CPU into the deepest 1094a7cd88daSGautham R. Shenoy * available platform idle state on a CPU-Offline. 10952525db04SNicholas Piggin * interrupts hard disabled and no lazy irq pending. 1096a7cd88daSGautham R. Shenoy */ 1097a7cd88daSGautham R. Shenoy unsigned long pnv_cpu_offline(unsigned int cpu) 1098a7cd88daSGautham R. Shenoy { 1099a7cd88daSGautham R. Shenoy unsigned long srr1; 1100a7cd88daSGautham R. Shenoy 110140d24343SNicholas Piggin __ppc64_runlatch_off(); 11022525db04SNicholas Piggin 1103f3b3f284SGautham R. Shenoy if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) { 11042525db04SNicholas Piggin unsigned long psscr; 11052525db04SNicholas Piggin 11062525db04SNicholas Piggin psscr = mfspr(SPRN_PSSCR); 11072525db04SNicholas Piggin psscr = (psscr & ~pnv_deepest_stop_psscr_mask) | 11082525db04SNicholas Piggin pnv_deepest_stop_psscr_val; 1109ffd2961bSNicholas Piggin srr1 = arch300_offline_stop(psscr); 111010d91611SNicholas Piggin } else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) { 111110d91611SNicholas Piggin srr1 = power7_offline(); 111290061231SGautham R. Shenoy } else { 111390061231SGautham R. Shenoy /* This is the fallback method. We emulate snooze */ 111490061231SGautham R. Shenoy while (!generic_check_cpu_restart(cpu)) { 111590061231SGautham R. Shenoy HMT_low(); 111690061231SGautham R. Shenoy HMT_very_low(); 111790061231SGautham R. Shenoy } 111890061231SGautham R. Shenoy srr1 = 0; 111990061231SGautham R. Shenoy HMT_medium(); 1120a7cd88daSGautham R. Shenoy } 1121a7cd88daSGautham R. Shenoy 112240d24343SNicholas Piggin __ppc64_runlatch_on(); 11232525db04SNicholas Piggin 1124a7cd88daSGautham R. Shenoy return srr1; 1125a7cd88daSGautham R. Shenoy } 112667d20418SNicholas Piggin #endif 1127a7cd88daSGautham R. Shenoy 1128a7cd88daSGautham R. Shenoy /* 1129bcef83a0SShreyas B. Prabhu * Power ISA 3.0 idle initialization. 1130bcef83a0SShreyas B. Prabhu * 1131bcef83a0SShreyas B. Prabhu * POWER ISA 3.0 defines a new SPR Processor stop Status and Control 1132bcef83a0SShreyas B. Prabhu * Register (PSSCR) to control idle behavior. 1133bcef83a0SShreyas B. Prabhu * 1134bcef83a0SShreyas B. Prabhu * PSSCR layout: 1135bcef83a0SShreyas B. Prabhu * ---------------------------------------------------------- 1136bcef83a0SShreyas B. Prabhu * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL | 1137bcef83a0SShreyas B. Prabhu * ---------------------------------------------------------- 1138bcef83a0SShreyas B. Prabhu * 0 4 41 42 43 44 48 54 56 60 1139bcef83a0SShreyas B. Prabhu * 1140bcef83a0SShreyas B. Prabhu * PSSCR key fields: 1141bcef83a0SShreyas B. Prabhu * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the 1142bcef83a0SShreyas B. Prabhu * lowest power-saving state the thread entered since stop instruction was 1143bcef83a0SShreyas B. Prabhu * last executed. 1144bcef83a0SShreyas B. Prabhu * 1145bcef83a0SShreyas B. Prabhu * Bit 41 - Status Disable(SD) 1146bcef83a0SShreyas B. Prabhu * 0 - Shows PLS entries 1147bcef83a0SShreyas B. Prabhu * 1 - PLS entries are all 0 1148bcef83a0SShreyas B. Prabhu * 1149bcef83a0SShreyas B. Prabhu * Bit 42 - Enable State Loss 1150bcef83a0SShreyas B. Prabhu * 0 - No state is lost irrespective of other fields 1151bcef83a0SShreyas B. Prabhu * 1 - Allows state loss 1152bcef83a0SShreyas B. Prabhu * 1153bcef83a0SShreyas B. Prabhu * Bit 43 - Exit Criterion 1154bcef83a0SShreyas B. Prabhu * 0 - Exit from power-save mode on any interrupt 1155bcef83a0SShreyas B. Prabhu * 1 - Exit from power-save mode controlled by LPCR's PECE bits 1156bcef83a0SShreyas B. Prabhu * 1157bcef83a0SShreyas B. Prabhu * Bits 44:47 - Power-Saving Level Limit 1158bcef83a0SShreyas B. Prabhu * This limits the power-saving level that can be entered into. 1159bcef83a0SShreyas B. Prabhu * 1160bcef83a0SShreyas B. Prabhu * Bits 60:63 - Requested Level 1161bcef83a0SShreyas B. Prabhu * Used to specify which power-saving level must be entered on executing 1162bcef83a0SShreyas B. Prabhu * stop instruction 116309206b60SGautham R. Shenoy */ 116409206b60SGautham R. Shenoy 116509206b60SGautham R. Shenoy int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags) 116609206b60SGautham R. Shenoy { 116709206b60SGautham R. Shenoy int err = 0; 116809206b60SGautham R. Shenoy 116909206b60SGautham R. Shenoy /* 117009206b60SGautham R. Shenoy * psscr_mask == 0xf indicates an older firmware. 117109206b60SGautham R. Shenoy * Set remaining fields of psscr to the default values. 117209206b60SGautham R. Shenoy * See NOTE above definition of PSSCR_HV_DEFAULT_VAL 117309206b60SGautham R. Shenoy */ 117409206b60SGautham R. Shenoy if (*psscr_mask == 0xf) { 117509206b60SGautham R. Shenoy *psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL; 117609206b60SGautham R. Shenoy *psscr_mask = PSSCR_HV_DEFAULT_MASK; 117709206b60SGautham R. Shenoy return err; 117809206b60SGautham R. Shenoy } 117909206b60SGautham R. Shenoy 118009206b60SGautham R. Shenoy /* 118109206b60SGautham R. Shenoy * New firmware is expected to set the psscr_val bits correctly. 118209206b60SGautham R. Shenoy * Validate that the following invariants are correctly maintained by 118309206b60SGautham R. Shenoy * the new firmware. 118409206b60SGautham R. Shenoy * - ESL bit value matches the EC bit value. 118509206b60SGautham R. Shenoy * - ESL bit is set for all the deep stop states. 118609206b60SGautham R. Shenoy */ 118709206b60SGautham R. Shenoy if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) { 118809206b60SGautham R. Shenoy err = ERR_EC_ESL_MISMATCH; 118909206b60SGautham R. Shenoy } else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) && 119009206b60SGautham R. Shenoy GET_PSSCR_ESL(*psscr_val) == 0) { 119109206b60SGautham R. Shenoy err = ERR_DEEP_STATE_ESL_MISMATCH; 119209206b60SGautham R. Shenoy } 119309206b60SGautham R. Shenoy 119409206b60SGautham R. Shenoy return err; 119509206b60SGautham R. Shenoy } 119609206b60SGautham R. Shenoy 119709206b60SGautham R. Shenoy /* 119809206b60SGautham R. Shenoy * pnv_arch300_idle_init: Initializes the default idle state, first 119909206b60SGautham R. Shenoy * deep idle state and deepest idle state on 120009206b60SGautham R. Shenoy * ISA 3.0 CPUs. 1201bcef83a0SShreyas B. Prabhu * 1202bcef83a0SShreyas B. Prabhu * @np: /ibm,opal/power-mgt device node 1203bcef83a0SShreyas B. Prabhu * @flags: cpu-idle-state-flags array 1204bcef83a0SShreyas B. Prabhu * @dt_idle_states: Number of idle state entries 1205bcef83a0SShreyas B. Prabhu * Returns 0 on success 1206bcef83a0SShreyas B. Prabhu */ 1207ffd2961bSNicholas Piggin static void __init pnv_arch300_idle_init(void) 1208bcef83a0SShreyas B. Prabhu { 120909206b60SGautham R. Shenoy u64 max_residency_ns = 0; 12109c7b185aSAkshay Adiga int i; 1211bcef83a0SShreyas B. Prabhu 1212ffd2961bSNicholas Piggin /* stop is not really architected, we only have p9,p10 drivers */ 1213ffd2961bSNicholas Piggin if (!pvr_version_is(PVR_POWER10) && !pvr_version_is(PVR_POWER9)) 1214ffd2961bSNicholas Piggin return; 1215ffd2961bSNicholas Piggin 1216bcef83a0SShreyas B. Prabhu /* 121709206b60SGautham R. Shenoy * pnv_deepest_stop_{val,mask} should be set to values corresponding to 121809206b60SGautham R. Shenoy * the deepest stop state. 121909206b60SGautham R. Shenoy * 122009206b60SGautham R. Shenoy * pnv_default_stop_{val,mask} should be set to values corresponding to 122110d91611SNicholas Piggin * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state. 1222bcef83a0SShreyas B. Prabhu */ 122310d91611SNicholas Piggin pnv_first_tb_loss_level = MAX_STOP_STATE + 1; 1224dcbbfa6bSPratik Rajesh Sampat deep_spr_loss_state = MAX_STOP_STATE + 1; 12259c7b185aSAkshay Adiga for (i = 0; i < nr_pnv_idle_states; i++) { 122609206b60SGautham R. Shenoy int err; 12279c7b185aSAkshay Adiga struct pnv_idle_states_t *state = &pnv_idle_states[i]; 12289c7b185aSAkshay Adiga u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK; 1229bcef83a0SShreyas B. Prabhu 1230ffd2961bSNicholas Piggin /* No deep loss driver implemented for POWER10 yet */ 1231ffd2961bSNicholas Piggin if (pvr_version_is(PVR_POWER10) && 1232ffd2961bSNicholas Piggin state->flags & (OPAL_PM_TIMEBASE_STOP|OPAL_PM_LOSE_FULL_CONTEXT)) 1233ffd2961bSNicholas Piggin continue; 1234ffd2961bSNicholas Piggin 123510d91611SNicholas Piggin if ((state->flags & OPAL_PM_TIMEBASE_STOP) && 123610d91611SNicholas Piggin (pnv_first_tb_loss_level > psscr_rl)) 123710d91611SNicholas Piggin pnv_first_tb_loss_level = psscr_rl; 123810d91611SNicholas Piggin 12399c7b185aSAkshay Adiga if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) && 1240dcbbfa6bSPratik Rajesh Sampat (deep_spr_loss_state > psscr_rl)) 1241dcbbfa6bSPratik Rajesh Sampat deep_spr_loss_state = psscr_rl; 124210d91611SNicholas Piggin 124310d91611SNicholas Piggin /* 124410d91611SNicholas Piggin * The idle code does not deal with TB loss occurring 124510d91611SNicholas Piggin * in a shallower state than SPR loss, so force it to 124610d91611SNicholas Piggin * behave like SPRs are lost if TB is lost. POWER9 would 124710d91611SNicholas Piggin * never encouter this, but a POWER8 core would if it 124810d91611SNicholas Piggin * implemented the stop instruction. So this is for forward 124910d91611SNicholas Piggin * compatibility. 125010d91611SNicholas Piggin */ 125110d91611SNicholas Piggin if ((state->flags & OPAL_PM_TIMEBASE_STOP) && 1252dcbbfa6bSPratik Rajesh Sampat (deep_spr_loss_state > psscr_rl)) 1253dcbbfa6bSPratik Rajesh Sampat deep_spr_loss_state = psscr_rl; 1254c0691f9dSShreyas B. Prabhu 12559c7b185aSAkshay Adiga err = validate_psscr_val_mask(&state->psscr_val, 12569c7b185aSAkshay Adiga &state->psscr_mask, 12579c7b185aSAkshay Adiga state->flags); 125809206b60SGautham R. Shenoy if (err) { 12599c7b185aSAkshay Adiga report_invalid_psscr_val(state->psscr_val, err); 126009206b60SGautham R. Shenoy continue; 126109206b60SGautham R. Shenoy } 126209206b60SGautham R. Shenoy 12633127692dSNicholas Piggin state->valid = true; 12643127692dSNicholas Piggin 12659c7b185aSAkshay Adiga if (max_residency_ns < state->residency_ns) { 12669c7b185aSAkshay Adiga max_residency_ns = state->residency_ns; 12679c7b185aSAkshay Adiga pnv_deepest_stop_psscr_val = state->psscr_val; 12689c7b185aSAkshay Adiga pnv_deepest_stop_psscr_mask = state->psscr_mask; 12699c7b185aSAkshay Adiga pnv_deepest_stop_flag = state->flags; 127009206b60SGautham R. Shenoy deepest_stop_found = true; 127109206b60SGautham R. Shenoy } 127209206b60SGautham R. Shenoy 127309206b60SGautham R. Shenoy if (!default_stop_found && 12749c7b185aSAkshay Adiga (state->flags & OPAL_PM_STOP_INST_FAST)) { 12759c7b185aSAkshay Adiga pnv_default_stop_val = state->psscr_val; 12769c7b185aSAkshay Adiga pnv_default_stop_mask = state->psscr_mask; 127709206b60SGautham R. Shenoy default_stop_found = true; 127810d91611SNicholas Piggin WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT); 127909206b60SGautham R. Shenoy } 128009206b60SGautham R. Shenoy } 128109206b60SGautham R. Shenoy 1282f3b3f284SGautham R. Shenoy if (unlikely(!default_stop_found)) { 1283f3b3f284SGautham R. Shenoy pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n"); 1284f3b3f284SGautham R. Shenoy } else { 1285ffd2961bSNicholas Piggin ppc_md.power_save = arch300_idle; 1286f3b3f284SGautham R. Shenoy pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n", 128709206b60SGautham R. Shenoy pnv_default_stop_val, pnv_default_stop_mask); 128809206b60SGautham R. Shenoy } 128909206b60SGautham R. Shenoy 1290f3b3f284SGautham R. Shenoy if (unlikely(!deepest_stop_found)) { 1291f3b3f284SGautham R. Shenoy pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait"); 1292f3b3f284SGautham R. Shenoy } else { 1293f3b3f284SGautham R. Shenoy pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n", 129409206b60SGautham R. Shenoy pnv_deepest_stop_psscr_val, 129509206b60SGautham R. Shenoy pnv_deepest_stop_psscr_mask); 1296bcef83a0SShreyas B. Prabhu } 1297bcef83a0SShreyas B. Prabhu 129887997471SShaokun Zhang pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n", 1299dcbbfa6bSPratik Rajesh Sampat deep_spr_loss_state); 13009c7b185aSAkshay Adiga 130187997471SShaokun Zhang pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n", 130210d91611SNicholas Piggin pnv_first_tb_loss_level); 130310d91611SNicholas Piggin } 130410d91611SNicholas Piggin 130510d91611SNicholas Piggin static void __init pnv_disable_deep_states(void) 130610d91611SNicholas Piggin { 130710d91611SNicholas Piggin /* 130810d91611SNicholas Piggin * The stop-api is unable to restore hypervisor 130910d91611SNicholas Piggin * resources on wakeup from platform idle states which 131010d91611SNicholas Piggin * lose full context. So disable such states. 131110d91611SNicholas Piggin */ 131210d91611SNicholas Piggin supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT; 131310d91611SNicholas Piggin pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n"); 131410d91611SNicholas Piggin pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n"); 131510d91611SNicholas Piggin 131610d91611SNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_300) && 131710d91611SNicholas Piggin (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) { 131810d91611SNicholas Piggin /* 131910d91611SNicholas Piggin * Use the default stop state for CPU-Hotplug 132010d91611SNicholas Piggin * if available. 132110d91611SNicholas Piggin */ 132210d91611SNicholas Piggin if (default_stop_found) { 132310d91611SNicholas Piggin pnv_deepest_stop_psscr_val = pnv_default_stop_val; 132410d91611SNicholas Piggin pnv_deepest_stop_psscr_mask = pnv_default_stop_mask; 132510d91611SNicholas Piggin pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n", 132610d91611SNicholas Piggin pnv_deepest_stop_psscr_val); 132710d91611SNicholas Piggin } else { /* Fallback to snooze loop for CPU-Hotplug */ 132810d91611SNicholas Piggin deepest_stop_found = false; 132910d91611SNicholas Piggin pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n"); 133010d91611SNicholas Piggin } 133110d91611SNicholas Piggin } 1332bcef83a0SShreyas B. Prabhu } 1333bcef83a0SShreyas B. Prabhu 1334bcef83a0SShreyas B. Prabhu /* 1335bcef83a0SShreyas B. Prabhu * Probe device tree for supported idle states 1336bcef83a0SShreyas B. Prabhu */ 1337bcef83a0SShreyas B. Prabhu static void __init pnv_probe_idle_states(void) 1338bcef83a0SShreyas B. Prabhu { 1339d405a98cSShreyas B. Prabhu int i; 1340d405a98cSShreyas B. Prabhu 13419c7b185aSAkshay Adiga if (nr_pnv_idle_states < 0) { 13429c7b185aSAkshay Adiga pr_warn("cpuidle-powernv: no idle states found in the DT\n"); 13439c7b185aSAkshay Adiga return; 13449c7b185aSAkshay Adiga } 13459c7b185aSAkshay Adiga 134616d83a54SPratik Rajesh Sampat if (cpu_has_feature(CPU_FTR_ARCH_300)) 1347ffd2961bSNicholas Piggin pnv_arch300_idle_init(); 13489c7b185aSAkshay Adiga 13499c7b185aSAkshay Adiga for (i = 0; i < nr_pnv_idle_states; i++) 13509c7b185aSAkshay Adiga supported_cpuidle_states |= pnv_idle_states[i].flags; 13519c7b185aSAkshay Adiga } 13529c7b185aSAkshay Adiga 13539c7b185aSAkshay Adiga /* 13549c7b185aSAkshay Adiga * This function parses device-tree and populates all the information 13559c7b185aSAkshay Adiga * into pnv_idle_states structure. It also sets up nr_pnv_idle_states 13569c7b185aSAkshay Adiga * which is the number of cpuidle states discovered through device-tree. 13579c7b185aSAkshay Adiga */ 13589c7b185aSAkshay Adiga 13599c7b185aSAkshay Adiga static int pnv_parse_cpuidle_dt(void) 13609c7b185aSAkshay Adiga { 13619c7b185aSAkshay Adiga struct device_node *np; 13629c7b185aSAkshay Adiga int nr_idle_states, i; 13639c7b185aSAkshay Adiga int rc = 0; 13649c7b185aSAkshay Adiga u32 *temp_u32; 13659c7b185aSAkshay Adiga u64 *temp_u64; 13669c7b185aSAkshay Adiga const char **temp_string; 13679c7b185aSAkshay Adiga 1368bcef83a0SShreyas B. Prabhu np = of_find_node_by_path("/ibm,opal/power-mgt"); 1369bcef83a0SShreyas B. Prabhu if (!np) { 1370d405a98cSShreyas B. Prabhu pr_warn("opal: PowerMgmt Node not found\n"); 13719c7b185aSAkshay Adiga return -ENODEV; 1372d405a98cSShreyas B. Prabhu } 13739c7b185aSAkshay Adiga nr_idle_states = of_property_count_u32_elems(np, 1374d405a98cSShreyas B. Prabhu "ibm,cpu-idle-state-flags"); 13759c7b185aSAkshay Adiga 13769c7b185aSAkshay Adiga pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states), 13779c7b185aSAkshay Adiga GFP_KERNEL); 13789c7b185aSAkshay Adiga temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL); 13799c7b185aSAkshay Adiga temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL); 13809c7b185aSAkshay Adiga temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL); 13819c7b185aSAkshay Adiga 13829c7b185aSAkshay Adiga if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) { 13839c7b185aSAkshay Adiga pr_err("Could not allocate memory for dt parsing\n"); 13849c7b185aSAkshay Adiga rc = -ENOMEM; 1385d405a98cSShreyas B. Prabhu goto out; 1386d405a98cSShreyas B. Prabhu } 1387d405a98cSShreyas B. Prabhu 13889c7b185aSAkshay Adiga /* Read flags */ 13899c7b185aSAkshay Adiga if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags", 13909c7b185aSAkshay Adiga temp_u32, nr_idle_states)) { 1391d405a98cSShreyas B. Prabhu pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n"); 13929c7b185aSAkshay Adiga rc = -EINVAL; 1393bcef83a0SShreyas B. Prabhu goto out; 1394bcef83a0SShreyas B. Prabhu } 13959c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++) 13969c7b185aSAkshay Adiga pnv_idle_states[i].flags = temp_u32[i]; 1397bcef83a0SShreyas B. Prabhu 13989c7b185aSAkshay Adiga /* Read latencies */ 13999c7b185aSAkshay Adiga if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns", 14009c7b185aSAkshay Adiga temp_u32, nr_idle_states)) { 14019c7b185aSAkshay Adiga pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n"); 14029c7b185aSAkshay Adiga rc = -EINVAL; 14039c7b185aSAkshay Adiga goto out; 14049c7b185aSAkshay Adiga } 14059c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++) 14069c7b185aSAkshay Adiga pnv_idle_states[i].latency_ns = temp_u32[i]; 14079c7b185aSAkshay Adiga 14089c7b185aSAkshay Adiga /* Read residencies */ 14099c7b185aSAkshay Adiga if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns", 14109c7b185aSAkshay Adiga temp_u32, nr_idle_states)) { 14112f62870cSChristophe JAILLET pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n"); 14129c7b185aSAkshay Adiga rc = -EINVAL; 14139c7b185aSAkshay Adiga goto out; 14149c7b185aSAkshay Adiga } 14159c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++) 14169c7b185aSAkshay Adiga pnv_idle_states[i].residency_ns = temp_u32[i]; 14179c7b185aSAkshay Adiga 1418ffd2961bSNicholas Piggin /* For power9 and later */ 1419bcef83a0SShreyas B. Prabhu if (cpu_has_feature(CPU_FTR_ARCH_300)) { 14209c7b185aSAkshay Adiga /* Read pm_crtl_val */ 14219c7b185aSAkshay Adiga if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr", 14229c7b185aSAkshay Adiga temp_u64, nr_idle_states)) { 14239c7b185aSAkshay Adiga pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n"); 14249c7b185aSAkshay Adiga rc = -EINVAL; 1425bcef83a0SShreyas B. Prabhu goto out; 1426d405a98cSShreyas B. Prabhu } 14279c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++) 14289c7b185aSAkshay Adiga pnv_idle_states[i].psscr_val = temp_u64[i]; 1429d405a98cSShreyas B. Prabhu 14309c7b185aSAkshay Adiga /* Read pm_crtl_mask */ 14319c7b185aSAkshay Adiga if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask", 14329c7b185aSAkshay Adiga temp_u64, nr_idle_states)) { 14339c7b185aSAkshay Adiga pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n"); 14349c7b185aSAkshay Adiga rc = -EINVAL; 14359c7b185aSAkshay Adiga goto out; 1436bcef83a0SShreyas B. Prabhu } 14379c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++) 14389c7b185aSAkshay Adiga pnv_idle_states[i].psscr_mask = temp_u64[i]; 14399c7b185aSAkshay Adiga } 14409c7b185aSAkshay Adiga 14419c7b185aSAkshay Adiga /* 14429c7b185aSAkshay Adiga * power8 specific properties ibm,cpu-idle-state-pmicr-mask and 14439c7b185aSAkshay Adiga * ibm,cpu-idle-state-pmicr-val were never used and there is no 14449c7b185aSAkshay Adiga * plan to use it in near future. Hence, not parsing these properties 14459c7b185aSAkshay Adiga */ 14469c7b185aSAkshay Adiga 14479c7b185aSAkshay Adiga if (of_property_read_string_array(np, "ibm,cpu-idle-state-names", 14489c7b185aSAkshay Adiga temp_string, nr_idle_states) < 0) { 14499c7b185aSAkshay Adiga pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n"); 14509c7b185aSAkshay Adiga rc = -EINVAL; 14519c7b185aSAkshay Adiga goto out; 14529c7b185aSAkshay Adiga } 14539c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++) 1454ae24ce5eSAneesh Kumar K.V strlcpy(pnv_idle_states[i].name, temp_string[i], 14559c7b185aSAkshay Adiga PNV_IDLE_NAME_LEN); 14569c7b185aSAkshay Adiga nr_pnv_idle_states = nr_idle_states; 14579c7b185aSAkshay Adiga rc = 0; 14589c7b185aSAkshay Adiga out: 14599c7b185aSAkshay Adiga kfree(temp_u32); 14609c7b185aSAkshay Adiga kfree(temp_u64); 14619c7b185aSAkshay Adiga kfree(temp_string); 14629c7b185aSAkshay Adiga return rc; 14639c7b185aSAkshay Adiga } 14649c7b185aSAkshay Adiga 1465bcef83a0SShreyas B. Prabhu static int __init pnv_init_idle_states(void) 1466bcef83a0SShreyas B. Prabhu { 146710d91611SNicholas Piggin int cpu; 14689c7b185aSAkshay Adiga int rc = 0; 146910d91611SNicholas Piggin 147010d91611SNicholas Piggin /* Set up PACA fields */ 147110d91611SNicholas Piggin for_each_present_cpu(cpu) { 147210d91611SNicholas Piggin struct paca_struct *p = paca_ptrs[cpu]; 147310d91611SNicholas Piggin 147410d91611SNicholas Piggin p->idle_state = 0; 147510d91611SNicholas Piggin if (cpu == cpu_first_thread_sibling(cpu)) 147610d91611SNicholas Piggin p->idle_state = (1 << threads_per_core) - 1; 147710d91611SNicholas Piggin 147810d91611SNicholas Piggin if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 147910d91611SNicholas Piggin /* P7/P8 nap */ 148010d91611SNicholas Piggin p->thread_idle_state = PNV_THREAD_RUNNING; 1481ffd2961bSNicholas Piggin } else if (pvr_version_is(PVR_POWER9)) { 1482ffd2961bSNicholas Piggin /* P9 stop workarounds */ 148310d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 148410d91611SNicholas Piggin p->requested_psscr = 0; 148510d91611SNicholas Piggin atomic_set(&p->dont_stop, 0); 148610d91611SNicholas Piggin #endif 148710d91611SNicholas Piggin } 148810d91611SNicholas Piggin } 1489bcef83a0SShreyas B. Prabhu 14909c7b185aSAkshay Adiga /* In case we error out nr_pnv_idle_states will be zero */ 14919c7b185aSAkshay Adiga nr_pnv_idle_states = 0; 149210d91611SNicholas Piggin supported_cpuidle_states = 0; 149310d91611SNicholas Piggin 1494bcef83a0SShreyas B. Prabhu if (cpuidle_disable != IDLE_NO_OVERRIDE) 1495bcef83a0SShreyas B. Prabhu goto out; 14969c7b185aSAkshay Adiga rc = pnv_parse_cpuidle_dt(); 14979c7b185aSAkshay Adiga if (rc) 14989c7b185aSAkshay Adiga return rc; 1499bcef83a0SShreyas B. Prabhu pnv_probe_idle_states(); 1500bcef83a0SShreyas B. Prabhu 150110d91611SNicholas Piggin if (!cpu_has_feature(CPU_FTR_ARCH_300)) { 1502d405a98cSShreyas B. Prabhu if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { 150310d91611SNicholas Piggin power7_fastsleep_workaround_entry = false; 150410d91611SNicholas Piggin power7_fastsleep_workaround_exit = false; 15055703d2f4SShreyas B. Prabhu } else { 15065703d2f4SShreyas B. Prabhu /* 15075703d2f4SShreyas B. Prabhu * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that 15085703d2f4SShreyas B. Prabhu * workaround is needed to use fastsleep. Provide sysfs 150910d91611SNicholas Piggin * control to choose how this workaround has to be 151010d91611SNicholas Piggin * applied. 15115703d2f4SShreyas B. Prabhu */ 15125703d2f4SShreyas B. Prabhu device_create_file(cpu_subsys.dev_root, 15135703d2f4SShreyas B. Prabhu &dev_attr_fastsleep_workaround_applyonce); 1514d405a98cSShreyas B. Prabhu } 15155703d2f4SShreyas B. Prabhu 151610d91611SNicholas Piggin update_subcore_sibling_mask(); 15175593e303SShreyas B. Prabhu 151810d91611SNicholas Piggin if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) { 15195593e303SShreyas B. Prabhu ppc_md.power_save = power7_idle; 152010d91611SNicholas Piggin power7_offline_type = PNV_THREAD_NAP; 152110d91611SNicholas Piggin } 152210d91611SNicholas Piggin 152310d91611SNicholas Piggin if ((supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) && 152410d91611SNicholas Piggin (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)) 152510d91611SNicholas Piggin power7_offline_type = PNV_THREAD_WINKLE; 152610d91611SNicholas Piggin else if ((supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED) || 152710d91611SNicholas Piggin (supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) 152810d91611SNicholas Piggin power7_offline_type = PNV_THREAD_SLEEP; 152910d91611SNicholas Piggin } 153010d91611SNicholas Piggin 153110d91611SNicholas Piggin if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) { 153210d91611SNicholas Piggin if (pnv_save_sprs_for_deep_states()) 153310d91611SNicholas Piggin pnv_disable_deep_states(); 153410d91611SNicholas Piggin } 1535bcef83a0SShreyas B. Prabhu 1536d405a98cSShreyas B. Prabhu out: 1537d405a98cSShreyas B. Prabhu return 0; 1538d405a98cSShreyas B. Prabhu } 15394bece972SMichael Ellerman machine_subsys_initcall(powernv, pnv_init_idle_states); 1540