1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_PARAVIRT_H
3 #define _ASM_POWERPC_PARAVIRT_H
4 
5 #include <linux/jump_label.h>
6 #include <asm/smp.h>
7 #ifdef CONFIG_PPC64
8 #include <asm/paca.h>
9 #include <asm/hvcall.h>
10 #endif
11 
12 #ifdef CONFIG_PPC_SPLPAR
13 #include <asm/kvm_guest.h>
14 #include <asm/cputhreads.h>
15 
16 DECLARE_STATIC_KEY_FALSE(shared_processor);
17 
18 static inline bool is_shared_processor(void)
19 {
20 	return static_branch_unlikely(&shared_processor);
21 }
22 
23 /* If bit 0 is set, the cpu has been preempted */
24 static inline u32 yield_count_of(int cpu)
25 {
26 	__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
27 	return be32_to_cpu(yield_count);
28 }
29 
30 static inline void yield_to_preempted(int cpu, u32 yield_count)
31 {
32 	plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
33 }
34 
35 static inline void prod_cpu(int cpu)
36 {
37 	plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
38 }
39 
40 static inline void yield_to_any(void)
41 {
42 	plpar_hcall_norets(H_CONFER, -1, 0);
43 }
44 #else
45 static inline bool is_shared_processor(void)
46 {
47 	return false;
48 }
49 
50 static inline u32 yield_count_of(int cpu)
51 {
52 	return 0;
53 }
54 
55 extern void ___bad_yield_to_preempted(void);
56 static inline void yield_to_preempted(int cpu, u32 yield_count)
57 {
58 	___bad_yield_to_preempted(); /* This would be a bug */
59 }
60 
61 extern void ___bad_yield_to_any(void);
62 static inline void yield_to_any(void)
63 {
64 	___bad_yield_to_any(); /* This would be a bug */
65 }
66 
67 extern void ___bad_prod_cpu(void);
68 static inline void prod_cpu(int cpu)
69 {
70 	___bad_prod_cpu(); /* This would be a bug */
71 }
72 
73 #endif
74 
75 #define vcpu_is_preempted vcpu_is_preempted
76 static inline bool vcpu_is_preempted(int cpu)
77 {
78 	if (!is_shared_processor())
79 		return false;
80 
81 #ifdef CONFIG_PPC_SPLPAR
82 	if (!is_kvm_guest()) {
83 		int first_cpu = cpu_first_thread_sibling(smp_processor_id());
84 
85 		/*
86 		 * Preemption can only happen at core granularity. This CPU
87 		 * is not preempted if one of the CPU of this core is not
88 		 * preempted.
89 		 */
90 		if (cpu_first_thread_sibling(cpu) == first_cpu)
91 			return false;
92 	}
93 #endif
94 
95 	if (yield_count_of(cpu) & 1)
96 		return true;
97 	return false;
98 }
99 
100 static inline bool pv_is_native_spin_unlock(void)
101 {
102 	return !is_shared_processor();
103 }
104 
105 #endif /* _ASM_POWERPC_PARAVIRT_H */
106