120d444d0SNicholas Piggin /* SPDX-License-Identifier: GPL-2.0-or-later */
220d444d0SNicholas Piggin #ifndef _ASM_POWERPC_PARAVIRT_H
320d444d0SNicholas Piggin #define _ASM_POWERPC_PARAVIRT_H
420d444d0SNicholas Piggin
520d444d0SNicholas Piggin #include <linux/jump_label.h>
620d444d0SNicholas Piggin #include <asm/smp.h>
720d444d0SNicholas Piggin #ifdef CONFIG_PPC64
820d444d0SNicholas Piggin #include <asm/paca.h>
9*1aa00066SMichael Ellerman #include <asm/lppaca.h>
1020d444d0SNicholas Piggin #include <asm/hvcall.h>
1120d444d0SNicholas Piggin #endif
1220d444d0SNicholas Piggin
1320d444d0SNicholas Piggin #ifdef CONFIG_PPC_SPLPAR
149899a56fSMichal Suchanek #include <linux/smp.h>
15ca3f969dSSrikar Dronamraju #include <asm/kvm_guest.h>
16ca3f969dSSrikar Dronamraju #include <asm/cputhreads.h>
17ca3f969dSSrikar Dronamraju
1820d444d0SNicholas Piggin DECLARE_STATIC_KEY_FALSE(shared_processor);
1920d444d0SNicholas Piggin
is_shared_processor(void)2020d444d0SNicholas Piggin static inline bool is_shared_processor(void)
2120d444d0SNicholas Piggin {
2220d444d0SNicholas Piggin return static_branch_unlikely(&shared_processor);
2320d444d0SNicholas Piggin }
2420d444d0SNicholas Piggin
250e8a6313SNicholas Piggin #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
260e8a6313SNicholas Piggin extern struct static_key paravirt_steal_enabled;
270e8a6313SNicholas Piggin extern struct static_key paravirt_steal_rq_enabled;
280e8a6313SNicholas Piggin
290e8a6313SNicholas Piggin u64 pseries_paravirt_steal_clock(int cpu);
300e8a6313SNicholas Piggin
paravirt_steal_clock(int cpu)310e8a6313SNicholas Piggin static inline u64 paravirt_steal_clock(int cpu)
320e8a6313SNicholas Piggin {
330e8a6313SNicholas Piggin return pseries_paravirt_steal_clock(cpu);
340e8a6313SNicholas Piggin }
350e8a6313SNicholas Piggin #endif
360e8a6313SNicholas Piggin
37799f9b51SNathan Lynch /* If bit 0 is set, the cpu has been ceded, conferred, or preempted */
yield_count_of(int cpu)3820d444d0SNicholas Piggin static inline u32 yield_count_of(int cpu)
3920d444d0SNicholas Piggin {
4020d444d0SNicholas Piggin __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
4120d444d0SNicholas Piggin return be32_to_cpu(yield_count);
4220d444d0SNicholas Piggin }
4320d444d0SNicholas Piggin
442c8c89b9SNicholas Piggin /*
452c8c89b9SNicholas Piggin * Spinlock code confers and prods, so don't trace the hcalls because the
462c8c89b9SNicholas Piggin * tracing code takes spinlocks which can cause recursion deadlocks.
472c8c89b9SNicholas Piggin *
482c8c89b9SNicholas Piggin * These calls are made while the lock is not held: the lock slowpath yields if
492c8c89b9SNicholas Piggin * it can not acquire the lock, and unlock slow path might prod if a waiter has
502c8c89b9SNicholas Piggin * yielded). So this may not be a problem for simple spin locks because the
512c8c89b9SNicholas Piggin * tracing does not technically recurse on the lock, but we avoid it anyway.
522c8c89b9SNicholas Piggin *
532c8c89b9SNicholas Piggin * However the queued spin lock contended path is more strictly ordered: the
542c8c89b9SNicholas Piggin * H_CONFER hcall is made after the task has queued itself on the lock, so then
552c8c89b9SNicholas Piggin * recursing on that lock will cause the task to then queue up again behind the
562c8c89b9SNicholas Piggin * first instance (or worse: queued spinlocks use tricks that assume a context
572c8c89b9SNicholas Piggin * never waits on more than one spinlock, so such recursion may cause random
582c8c89b9SNicholas Piggin * corruption in the lock code).
592c8c89b9SNicholas Piggin */
yield_to_preempted(int cpu,u32 yield_count)6020d444d0SNicholas Piggin static inline void yield_to_preempted(int cpu, u32 yield_count)
6120d444d0SNicholas Piggin {
622c8c89b9SNicholas Piggin plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
6320d444d0SNicholas Piggin }
6420c0e826SNicholas Piggin
prod_cpu(int cpu)6520c0e826SNicholas Piggin static inline void prod_cpu(int cpu)
6620c0e826SNicholas Piggin {
672c8c89b9SNicholas Piggin plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
6820c0e826SNicholas Piggin }
6920c0e826SNicholas Piggin
yield_to_any(void)7020c0e826SNicholas Piggin static inline void yield_to_any(void)
7120c0e826SNicholas Piggin {
722c8c89b9SNicholas Piggin plpar_hcall_norets_notrace(H_CONFER, -1, 0);
7320c0e826SNicholas Piggin }
7420d444d0SNicholas Piggin #else
is_shared_processor(void)7520d444d0SNicholas Piggin static inline bool is_shared_processor(void)
7620d444d0SNicholas Piggin {
7720d444d0SNicholas Piggin return false;
7820d444d0SNicholas Piggin }
7920d444d0SNicholas Piggin
yield_count_of(int cpu)8020d444d0SNicholas Piggin static inline u32 yield_count_of(int cpu)
8120d444d0SNicholas Piggin {
8220d444d0SNicholas Piggin return 0;
8320d444d0SNicholas Piggin }
8420d444d0SNicholas Piggin
8520d444d0SNicholas Piggin extern void ___bad_yield_to_preempted(void);
yield_to_preempted(int cpu,u32 yield_count)8620d444d0SNicholas Piggin static inline void yield_to_preempted(int cpu, u32 yield_count)
8720d444d0SNicholas Piggin {
8820d444d0SNicholas Piggin ___bad_yield_to_preempted(); /* This would be a bug */
8920d444d0SNicholas Piggin }
9020c0e826SNicholas Piggin
9120c0e826SNicholas Piggin extern void ___bad_yield_to_any(void);
yield_to_any(void)9220c0e826SNicholas Piggin static inline void yield_to_any(void)
9320c0e826SNicholas Piggin {
9420c0e826SNicholas Piggin ___bad_yield_to_any(); /* This would be a bug */
9520c0e826SNicholas Piggin }
9620c0e826SNicholas Piggin
9720c0e826SNicholas Piggin extern void ___bad_prod_cpu(void);
prod_cpu(int cpu)9820c0e826SNicholas Piggin static inline void prod_cpu(int cpu)
9920c0e826SNicholas Piggin {
10020c0e826SNicholas Piggin ___bad_prod_cpu(); /* This would be a bug */
10120c0e826SNicholas Piggin }
10220c0e826SNicholas Piggin
10320d444d0SNicholas Piggin #endif
10420d444d0SNicholas Piggin
10520d444d0SNicholas Piggin #define vcpu_is_preempted vcpu_is_preempted
vcpu_is_preempted(int cpu)10620d444d0SNicholas Piggin static inline bool vcpu_is_preempted(int cpu)
10720d444d0SNicholas Piggin {
108799f9b51SNathan Lynch /*
109799f9b51SNathan Lynch * The dispatch/yield bit alone is an imperfect indicator of
110799f9b51SNathan Lynch * whether the hypervisor has dispatched @cpu to run on a physical
111799f9b51SNathan Lynch * processor. When it is clear, @cpu is definitely not preempted.
112799f9b51SNathan Lynch * But when it is set, it means only that it *might* be, subject to
113799f9b51SNathan Lynch * other conditions. So we check other properties of the VM and
114799f9b51SNathan Lynch * @cpu first, resorting to the yield count last.
115799f9b51SNathan Lynch */
116799f9b51SNathan Lynch
117799f9b51SNathan Lynch /*
118799f9b51SNathan Lynch * Hypervisor preemption isn't possible in dedicated processor
119799f9b51SNathan Lynch * mode by definition.
120799f9b51SNathan Lynch */
12120d444d0SNicholas Piggin if (!is_shared_processor())
12220d444d0SNicholas Piggin return false;
123ca3f969dSSrikar Dronamraju
124ca3f969dSSrikar Dronamraju #ifdef CONFIG_PPC_SPLPAR
125ca3f969dSSrikar Dronamraju if (!is_kvm_guest()) {
126fda0eb22SNathan Lynch int first_cpu;
127fda0eb22SNathan Lynch
128fda0eb22SNathan Lynch /*
129fda0eb22SNathan Lynch * The result of vcpu_is_preempted() is used in a
130fda0eb22SNathan Lynch * speculative way, and is always subject to invalidation
131fda0eb22SNathan Lynch * by events internal and external to Linux. While we can
132fda0eb22SNathan Lynch * be called in preemptable context (in the Linux sense),
133fda0eb22SNathan Lynch * we're not accessing per-cpu resources in a way that can
134fda0eb22SNathan Lynch * race destructively with Linux scheduler preemption and
135fda0eb22SNathan Lynch * migration, and callers can tolerate the potential for
136fda0eb22SNathan Lynch * error introduced by sampling the CPU index without
137fda0eb22SNathan Lynch * pinning the task to it. So it is permissible to use
138fda0eb22SNathan Lynch * raw_smp_processor_id() here to defeat the preempt debug
139fda0eb22SNathan Lynch * warnings that can arise from using smp_processor_id()
140fda0eb22SNathan Lynch * in arbitrary contexts.
141fda0eb22SNathan Lynch */
142fda0eb22SNathan Lynch first_cpu = cpu_first_thread_sibling(raw_smp_processor_id());
143ca3f969dSSrikar Dronamraju
144ca3f969dSSrikar Dronamraju /*
145799f9b51SNathan Lynch * The PowerVM hypervisor dispatches VMs on a whole core
146799f9b51SNathan Lynch * basis. So we know that a thread sibling of the local CPU
147799f9b51SNathan Lynch * cannot have been preempted by the hypervisor, even if it
148799f9b51SNathan Lynch * has called H_CONFER, which will set the yield bit.
149ca3f969dSSrikar Dronamraju */
150ca3f969dSSrikar Dronamraju if (cpu_first_thread_sibling(cpu) == first_cpu)
151ca3f969dSSrikar Dronamraju return false;
152ca3f969dSSrikar Dronamraju }
153ca3f969dSSrikar Dronamraju #endif
154ca3f969dSSrikar Dronamraju
15520d444d0SNicholas Piggin if (yield_count_of(cpu) & 1)
15620d444d0SNicholas Piggin return true;
15720d444d0SNicholas Piggin return false;
15820d444d0SNicholas Piggin }
15920d444d0SNicholas Piggin
pv_is_native_spin_unlock(void)16020c0e826SNicholas Piggin static inline bool pv_is_native_spin_unlock(void)
16120c0e826SNicholas Piggin {
16220c0e826SNicholas Piggin return !is_shared_processor();
16320c0e826SNicholas Piggin }
16420c0e826SNicholas Piggin
16520d444d0SNicholas Piggin #endif /* _ASM_POWERPC_PARAVIRT_H */
166