1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _ASM_POWERPC_PARAVIRT_H 3 #define _ASM_POWERPC_PARAVIRT_H 4 5 #include <linux/jump_label.h> 6 #include <asm/smp.h> 7 #ifdef CONFIG_PPC64 8 #include <asm/paca.h> 9 #include <asm/hvcall.h> 10 #endif 11 12 #ifdef CONFIG_PPC_SPLPAR 13 #include <linux/smp.h> 14 #include <asm/kvm_guest.h> 15 #include <asm/cputhreads.h> 16 17 DECLARE_STATIC_KEY_FALSE(shared_processor); 18 19 static inline bool is_shared_processor(void) 20 { 21 return static_branch_unlikely(&shared_processor); 22 } 23 24 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 25 extern struct static_key paravirt_steal_enabled; 26 extern struct static_key paravirt_steal_rq_enabled; 27 28 u64 pseries_paravirt_steal_clock(int cpu); 29 30 static inline u64 paravirt_steal_clock(int cpu) 31 { 32 return pseries_paravirt_steal_clock(cpu); 33 } 34 #endif 35 36 /* If bit 0 is set, the cpu has been ceded, conferred, or preempted */ 37 static inline u32 yield_count_of(int cpu) 38 { 39 __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count); 40 return be32_to_cpu(yield_count); 41 } 42 43 /* 44 * Spinlock code confers and prods, so don't trace the hcalls because the 45 * tracing code takes spinlocks which can cause recursion deadlocks. 46 * 47 * These calls are made while the lock is not held: the lock slowpath yields if 48 * it can not acquire the lock, and unlock slow path might prod if a waiter has 49 * yielded). So this may not be a problem for simple spin locks because the 50 * tracing does not technically recurse on the lock, but we avoid it anyway. 51 * 52 * However the queued spin lock contended path is more strictly ordered: the 53 * H_CONFER hcall is made after the task has queued itself on the lock, so then 54 * recursing on that lock will cause the task to then queue up again behind the 55 * first instance (or worse: queued spinlocks use tricks that assume a context 56 * never waits on more than one spinlock, so such recursion may cause random 57 * corruption in the lock code). 58 */ 59 static inline void yield_to_preempted(int cpu, u32 yield_count) 60 { 61 plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count); 62 } 63 64 static inline void prod_cpu(int cpu) 65 { 66 plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu)); 67 } 68 69 static inline void yield_to_any(void) 70 { 71 plpar_hcall_norets_notrace(H_CONFER, -1, 0); 72 } 73 #else 74 static inline bool is_shared_processor(void) 75 { 76 return false; 77 } 78 79 static inline u32 yield_count_of(int cpu) 80 { 81 return 0; 82 } 83 84 extern void ___bad_yield_to_preempted(void); 85 static inline void yield_to_preempted(int cpu, u32 yield_count) 86 { 87 ___bad_yield_to_preempted(); /* This would be a bug */ 88 } 89 90 extern void ___bad_yield_to_any(void); 91 static inline void yield_to_any(void) 92 { 93 ___bad_yield_to_any(); /* This would be a bug */ 94 } 95 96 extern void ___bad_prod_cpu(void); 97 static inline void prod_cpu(int cpu) 98 { 99 ___bad_prod_cpu(); /* This would be a bug */ 100 } 101 102 #endif 103 104 #define vcpu_is_preempted vcpu_is_preempted 105 static inline bool vcpu_is_preempted(int cpu) 106 { 107 /* 108 * The dispatch/yield bit alone is an imperfect indicator of 109 * whether the hypervisor has dispatched @cpu to run on a physical 110 * processor. When it is clear, @cpu is definitely not preempted. 111 * But when it is set, it means only that it *might* be, subject to 112 * other conditions. So we check other properties of the VM and 113 * @cpu first, resorting to the yield count last. 114 */ 115 116 /* 117 * Hypervisor preemption isn't possible in dedicated processor 118 * mode by definition. 119 */ 120 if (!is_shared_processor()) 121 return false; 122 123 #ifdef CONFIG_PPC_SPLPAR 124 if (!is_kvm_guest()) { 125 int first_cpu; 126 127 /* 128 * The result of vcpu_is_preempted() is used in a 129 * speculative way, and is always subject to invalidation 130 * by events internal and external to Linux. While we can 131 * be called in preemptable context (in the Linux sense), 132 * we're not accessing per-cpu resources in a way that can 133 * race destructively with Linux scheduler preemption and 134 * migration, and callers can tolerate the potential for 135 * error introduced by sampling the CPU index without 136 * pinning the task to it. So it is permissible to use 137 * raw_smp_processor_id() here to defeat the preempt debug 138 * warnings that can arise from using smp_processor_id() 139 * in arbitrary contexts. 140 */ 141 first_cpu = cpu_first_thread_sibling(raw_smp_processor_id()); 142 143 /* 144 * The PowerVM hypervisor dispatches VMs on a whole core 145 * basis. So we know that a thread sibling of the local CPU 146 * cannot have been preempted by the hypervisor, even if it 147 * has called H_CONFER, which will set the yield bit. 148 */ 149 if (cpu_first_thread_sibling(cpu) == first_cpu) 150 return false; 151 } 152 #endif 153 154 if (yield_count_of(cpu) & 1) 155 return true; 156 return false; 157 } 158 159 static inline bool pv_is_native_spin_unlock(void) 160 { 161 return !is_shared_processor(); 162 } 163 164 #endif /* _ASM_POWERPC_PARAVIRT_H */ 165