1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_PARAVIRT_H
3 #define _ASM_POWERPC_PARAVIRT_H
4 
5 #include <linux/jump_label.h>
6 #include <asm/smp.h>
7 #ifdef CONFIG_PPC64
8 #include <asm/paca.h>
9 #include <asm/hvcall.h>
10 #endif
11 
12 #ifdef CONFIG_PPC_SPLPAR
13 #include <linux/smp.h>
14 #include <asm/kvm_guest.h>
15 #include <asm/cputhreads.h>
16 
17 DECLARE_STATIC_KEY_FALSE(shared_processor);
18 
19 static inline bool is_shared_processor(void)
20 {
21 	return static_branch_unlikely(&shared_processor);
22 }
23 
24 /* If bit 0 is set, the cpu has been preempted */
25 static inline u32 yield_count_of(int cpu)
26 {
27 	__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
28 	return be32_to_cpu(yield_count);
29 }
30 
31 static inline void yield_to_preempted(int cpu, u32 yield_count)
32 {
33 	plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
34 }
35 
36 static inline void prod_cpu(int cpu)
37 {
38 	plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
39 }
40 
41 static inline void yield_to_any(void)
42 {
43 	plpar_hcall_norets(H_CONFER, -1, 0);
44 }
45 #else
46 static inline bool is_shared_processor(void)
47 {
48 	return false;
49 }
50 
51 static inline u32 yield_count_of(int cpu)
52 {
53 	return 0;
54 }
55 
56 extern void ___bad_yield_to_preempted(void);
57 static inline void yield_to_preempted(int cpu, u32 yield_count)
58 {
59 	___bad_yield_to_preempted(); /* This would be a bug */
60 }
61 
62 extern void ___bad_yield_to_any(void);
63 static inline void yield_to_any(void)
64 {
65 	___bad_yield_to_any(); /* This would be a bug */
66 }
67 
68 extern void ___bad_prod_cpu(void);
69 static inline void prod_cpu(int cpu)
70 {
71 	___bad_prod_cpu(); /* This would be a bug */
72 }
73 
74 #endif
75 
76 #define vcpu_is_preempted vcpu_is_preempted
77 static inline bool vcpu_is_preempted(int cpu)
78 {
79 	if (!is_shared_processor())
80 		return false;
81 
82 #ifdef CONFIG_PPC_SPLPAR
83 	if (!is_kvm_guest()) {
84 		int first_cpu = cpu_first_thread_sibling(smp_processor_id());
85 
86 		/*
87 		 * Preemption can only happen at core granularity. This CPU
88 		 * is not preempted if one of the CPU of this core is not
89 		 * preempted.
90 		 */
91 		if (cpu_first_thread_sibling(cpu) == first_cpu)
92 			return false;
93 	}
94 #endif
95 
96 	if (yield_count_of(cpu) & 1)
97 		return true;
98 	return false;
99 }
100 
101 static inline bool pv_is_native_spin_unlock(void)
102 {
103 	return !is_shared_processor();
104 }
105 
106 #endif /* _ASM_POWERPC_PARAVIRT_H */
107