1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_PARAVIRT_H
3 #define _ASM_POWERPC_PARAVIRT_H
4 
5 #include <linux/jump_label.h>
6 #include <asm/smp.h>
7 #ifdef CONFIG_PPC64
8 #include <asm/paca.h>
9 #include <asm/hvcall.h>
10 #endif
11 
12 #ifdef CONFIG_PPC_SPLPAR
13 #include <linux/smp.h>
14 #include <asm/kvm_guest.h>
15 #include <asm/cputhreads.h>
16 
17 DECLARE_STATIC_KEY_FALSE(shared_processor);
18 
19 static inline bool is_shared_processor(void)
20 {
21 	return static_branch_unlikely(&shared_processor);
22 }
23 
24 /* If bit 0 is set, the cpu has been preempted */
25 static inline u32 yield_count_of(int cpu)
26 {
27 	__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
28 	return be32_to_cpu(yield_count);
29 }
30 
31 /*
32  * Spinlock code confers and prods, so don't trace the hcalls because the
33  * tracing code takes spinlocks which can cause recursion deadlocks.
34  *
35  * These calls are made while the lock is not held: the lock slowpath yields if
36  * it can not acquire the lock, and unlock slow path might prod if a waiter has
37  * yielded). So this may not be a problem for simple spin locks because the
38  * tracing does not technically recurse on the lock, but we avoid it anyway.
39  *
40  * However the queued spin lock contended path is more strictly ordered: the
41  * H_CONFER hcall is made after the task has queued itself on the lock, so then
42  * recursing on that lock will cause the task to then queue up again behind the
43  * first instance (or worse: queued spinlocks use tricks that assume a context
44  * never waits on more than one spinlock, so such recursion may cause random
45  * corruption in the lock code).
46  */
47 static inline void yield_to_preempted(int cpu, u32 yield_count)
48 {
49 	plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
50 }
51 
52 static inline void prod_cpu(int cpu)
53 {
54 	plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
55 }
56 
57 static inline void yield_to_any(void)
58 {
59 	plpar_hcall_norets_notrace(H_CONFER, -1, 0);
60 }
61 #else
62 static inline bool is_shared_processor(void)
63 {
64 	return false;
65 }
66 
67 static inline u32 yield_count_of(int cpu)
68 {
69 	return 0;
70 }
71 
72 extern void ___bad_yield_to_preempted(void);
73 static inline void yield_to_preempted(int cpu, u32 yield_count)
74 {
75 	___bad_yield_to_preempted(); /* This would be a bug */
76 }
77 
78 extern void ___bad_yield_to_any(void);
79 static inline void yield_to_any(void)
80 {
81 	___bad_yield_to_any(); /* This would be a bug */
82 }
83 
84 extern void ___bad_prod_cpu(void);
85 static inline void prod_cpu(int cpu)
86 {
87 	___bad_prod_cpu(); /* This would be a bug */
88 }
89 
90 #endif
91 
92 #define vcpu_is_preempted vcpu_is_preempted
93 static inline bool vcpu_is_preempted(int cpu)
94 {
95 	if (!is_shared_processor())
96 		return false;
97 
98 #ifdef CONFIG_PPC_SPLPAR
99 	if (!is_kvm_guest()) {
100 		int first_cpu = cpu_first_thread_sibling(smp_processor_id());
101 
102 		/*
103 		 * Preemption can only happen at core granularity. This CPU
104 		 * is not preempted if one of the CPU of this core is not
105 		 * preempted.
106 		 */
107 		if (cpu_first_thread_sibling(cpu) == first_cpu)
108 			return false;
109 	}
110 #endif
111 
112 	if (yield_count_of(cpu) & 1)
113 		return true;
114 	return false;
115 }
116 
117 static inline bool pv_is_native_spin_unlock(void)
118 {
119 	return !is_shared_processor();
120 }
121 
122 #endif /* _ASM_POWERPC_PARAVIRT_H */
123