1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4 #ifdef __KERNEL__
5 
6 /*
7  * Simple spin lock operations.
8  *
9  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
10  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
11  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
12  *	Rework to support virtual processors
13  *
14  * Type of int is used as a full 64b word is not necessary.
15  *
16  * (the type definitions are in asm/spinlock_types.h)
17  */
18 #include <linux/irqflags.h>
19 #ifdef CONFIG_PPC64
20 #include <asm/paca.h>
21 #include <asm/hvcall.h>
22 #endif
23 #include <asm/synch.h>
24 #include <asm/ppc-opcode.h>
25 #include <asm/asm-405.h>
26 
27 #ifdef CONFIG_PPC64
28 /* use 0x800000yy when locked, where yy == CPU number */
29 #ifdef __BIG_ENDIAN__
30 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
31 #else
32 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
33 #endif
34 #else
35 #define LOCK_TOKEN	1
36 #endif
37 
38 #ifdef CONFIG_PPC_PSERIES
39 DECLARE_STATIC_KEY_FALSE(shared_processor);
40 
41 #define vcpu_is_preempted vcpu_is_preempted
42 static inline bool vcpu_is_preempted(int cpu)
43 {
44 	if (!static_branch_unlikely(&shared_processor))
45 		return false;
46 	return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
47 }
48 #endif
49 
50 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
51 {
52 	return lock.slock == 0;
53 }
54 
55 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
56 {
57 	smp_mb();
58 	return !arch_spin_value_unlocked(*lock);
59 }
60 
61 /*
62  * This returns the old value in the lock, so we succeeded
63  * in getting the lock if the return value is 0.
64  */
65 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
66 {
67 	unsigned long tmp, token;
68 
69 	token = LOCK_TOKEN;
70 	__asm__ __volatile__(
71 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
72 	cmpwi		0,%0,0\n\
73 	bne-		2f\n\
74 	stwcx.		%1,0,%2\n\
75 	bne-		1b\n"
76 	PPC_ACQUIRE_BARRIER
77 "2:"
78 	: "=&r" (tmp)
79 	: "r" (token), "r" (&lock->slock)
80 	: "cr0", "memory");
81 
82 	return tmp;
83 }
84 
85 static inline int arch_spin_trylock(arch_spinlock_t *lock)
86 {
87 	return __arch_spin_trylock(lock) == 0;
88 }
89 
90 /*
91  * On a system with shared processors (that is, where a physical
92  * processor is multiplexed between several virtual processors),
93  * there is no point spinning on a lock if the holder of the lock
94  * isn't currently scheduled on a physical processor.  Instead
95  * we detect this situation and ask the hypervisor to give the
96  * rest of our timeslice to the lock holder.
97  *
98  * So that we can tell which virtual processor is holding a lock,
99  * we put 0x80000000 | smp_processor_id() in the lock when it is
100  * held.  Conveniently, we have a word in the paca that holds this
101  * value.
102  */
103 
104 #if defined(CONFIG_PPC_SPLPAR)
105 /* We only yield to the hypervisor if we are in shared processor mode */
106 void splpar_spin_yield(arch_spinlock_t *lock);
107 void splpar_rw_yield(arch_rwlock_t *lock);
108 #else /* SPLPAR */
109 static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
110 static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
111 #endif
112 
113 static inline bool is_shared_processor(void)
114 {
115 /*
116  * LPPACA is only available on Pseries so guard anything LPPACA related to
117  * allow other platforms (which include this common header) to compile.
118  */
119 #ifdef CONFIG_PPC_PSERIES
120 	return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
121 		lppaca_shared_proc(local_paca->lppaca_ptr));
122 #else
123 	return false;
124 #endif
125 }
126 
127 static inline void spin_yield(arch_spinlock_t *lock)
128 {
129 	if (is_shared_processor())
130 		splpar_spin_yield(lock);
131 	else
132 		barrier();
133 }
134 
135 static inline void rw_yield(arch_rwlock_t *lock)
136 {
137 	if (is_shared_processor())
138 		splpar_rw_yield(lock);
139 	else
140 		barrier();
141 }
142 
143 static inline void arch_spin_lock(arch_spinlock_t *lock)
144 {
145 	while (1) {
146 		if (likely(__arch_spin_trylock(lock) == 0))
147 			break;
148 		do {
149 			HMT_low();
150 			if (is_shared_processor())
151 				splpar_spin_yield(lock);
152 		} while (unlikely(lock->slock != 0));
153 		HMT_medium();
154 	}
155 }
156 
157 static inline
158 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
159 {
160 	unsigned long flags_dis;
161 
162 	while (1) {
163 		if (likely(__arch_spin_trylock(lock) == 0))
164 			break;
165 		local_save_flags(flags_dis);
166 		local_irq_restore(flags);
167 		do {
168 			HMT_low();
169 			if (is_shared_processor())
170 				splpar_spin_yield(lock);
171 		} while (unlikely(lock->slock != 0));
172 		HMT_medium();
173 		local_irq_restore(flags_dis);
174 	}
175 }
176 #define arch_spin_lock_flags arch_spin_lock_flags
177 
178 static inline void arch_spin_unlock(arch_spinlock_t *lock)
179 {
180 	__asm__ __volatile__("# arch_spin_unlock\n\t"
181 				PPC_RELEASE_BARRIER: : :"memory");
182 	lock->slock = 0;
183 }
184 
185 /*
186  * Read-write spinlocks, allowing multiple readers
187  * but only one writer.
188  *
189  * NOTE! it is quite common to have readers in interrupts
190  * but no interrupt writers. For those circumstances we
191  * can "mix" irq-safe locks - any writer needs to get a
192  * irq-safe write-lock, but readers can get non-irqsafe
193  * read-locks.
194  */
195 
196 #ifdef CONFIG_PPC64
197 #define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
198 #define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
199 #else
200 #define __DO_SIGN_EXTEND
201 #define WRLOCK_TOKEN		(-1)
202 #endif
203 
204 /*
205  * This returns the old value in the lock + 1,
206  * so we got a read lock if the return value is > 0.
207  */
208 static inline long __arch_read_trylock(arch_rwlock_t *rw)
209 {
210 	long tmp;
211 
212 	__asm__ __volatile__(
213 "1:	" PPC_LWARX(%0,0,%1,1) "\n"
214 	__DO_SIGN_EXTEND
215 "	addic.		%0,%0,1\n\
216 	ble-		2f\n"
217 	PPC405_ERR77(0,%1)
218 "	stwcx.		%0,0,%1\n\
219 	bne-		1b\n"
220 	PPC_ACQUIRE_BARRIER
221 "2:"	: "=&r" (tmp)
222 	: "r" (&rw->lock)
223 	: "cr0", "xer", "memory");
224 
225 	return tmp;
226 }
227 
228 /*
229  * This returns the old value in the lock,
230  * so we got the write lock if the return value is 0.
231  */
232 static inline long __arch_write_trylock(arch_rwlock_t *rw)
233 {
234 	long tmp, token;
235 
236 	token = WRLOCK_TOKEN;
237 	__asm__ __volatile__(
238 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
239 	cmpwi		0,%0,0\n\
240 	bne-		2f\n"
241 	PPC405_ERR77(0,%1)
242 "	stwcx.		%1,0,%2\n\
243 	bne-		1b\n"
244 	PPC_ACQUIRE_BARRIER
245 "2:"	: "=&r" (tmp)
246 	: "r" (token), "r" (&rw->lock)
247 	: "cr0", "memory");
248 
249 	return tmp;
250 }
251 
252 static inline void arch_read_lock(arch_rwlock_t *rw)
253 {
254 	while (1) {
255 		if (likely(__arch_read_trylock(rw) > 0))
256 			break;
257 		do {
258 			HMT_low();
259 			if (is_shared_processor())
260 				splpar_rw_yield(rw);
261 		} while (unlikely(rw->lock < 0));
262 		HMT_medium();
263 	}
264 }
265 
266 static inline void arch_write_lock(arch_rwlock_t *rw)
267 {
268 	while (1) {
269 		if (likely(__arch_write_trylock(rw) == 0))
270 			break;
271 		do {
272 			HMT_low();
273 			if (is_shared_processor())
274 				splpar_rw_yield(rw);
275 		} while (unlikely(rw->lock != 0));
276 		HMT_medium();
277 	}
278 }
279 
280 static inline int arch_read_trylock(arch_rwlock_t *rw)
281 {
282 	return __arch_read_trylock(rw) > 0;
283 }
284 
285 static inline int arch_write_trylock(arch_rwlock_t *rw)
286 {
287 	return __arch_write_trylock(rw) == 0;
288 }
289 
290 static inline void arch_read_unlock(arch_rwlock_t *rw)
291 {
292 	long tmp;
293 
294 	__asm__ __volatile__(
295 	"# read_unlock\n\t"
296 	PPC_RELEASE_BARRIER
297 "1:	lwarx		%0,0,%1\n\
298 	addic		%0,%0,-1\n"
299 	PPC405_ERR77(0,%1)
300 "	stwcx.		%0,0,%1\n\
301 	bne-		1b"
302 	: "=&r"(tmp)
303 	: "r"(&rw->lock)
304 	: "cr0", "xer", "memory");
305 }
306 
307 static inline void arch_write_unlock(arch_rwlock_t *rw)
308 {
309 	__asm__ __volatile__("# write_unlock\n\t"
310 				PPC_RELEASE_BARRIER: : :"memory");
311 	rw->lock = 0;
312 }
313 
314 #define arch_spin_relax(lock)	spin_yield(lock)
315 #define arch_read_relax(lock)	rw_yield(lock)
316 #define arch_write_relax(lock)	rw_yield(lock)
317 
318 /* See include/linux/spinlock.h */
319 #define smp_mb__after_spinlock()   smp_mb()
320 
321 #endif /* __KERNEL__ */
322 #endif /* __ASM_SPINLOCK_H */
323