1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4 #ifdef __KERNEL__
5 
6 /*
7  * Simple spin lock operations.
8  *
9  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
10  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
11  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
12  *	Rework to support virtual processors
13  *
14  * Type of int is used as a full 64b word is not necessary.
15  *
16  * (the type definitions are in asm/spinlock_types.h)
17  */
18 #include <linux/irqflags.h>
19 #ifdef CONFIG_PPC64
20 #include <asm/paca.h>
21 #include <asm/hvcall.h>
22 #endif
23 #include <asm/synch.h>
24 #include <asm/ppc-opcode.h>
25 #include <asm/asm-405.h>
26 
27 #ifdef CONFIG_PPC64
28 /* use 0x800000yy when locked, where yy == CPU number */
29 #ifdef __BIG_ENDIAN__
30 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
31 #else
32 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
33 #endif
34 #else
35 #define LOCK_TOKEN	1
36 #endif
37 
38 #ifdef CONFIG_PPC_PSERIES
39 #define vcpu_is_preempted vcpu_is_preempted
40 static inline bool vcpu_is_preempted(int cpu)
41 {
42 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
43 		return false;
44 	return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
45 }
46 #endif
47 
48 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
49 {
50 	return lock.slock == 0;
51 }
52 
53 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
54 {
55 	smp_mb();
56 	return !arch_spin_value_unlocked(*lock);
57 }
58 
59 /*
60  * This returns the old value in the lock, so we succeeded
61  * in getting the lock if the return value is 0.
62  */
63 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
64 {
65 	unsigned long tmp, token;
66 
67 	token = LOCK_TOKEN;
68 	__asm__ __volatile__(
69 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
70 	cmpwi		0,%0,0\n\
71 	bne-		2f\n\
72 	stwcx.		%1,0,%2\n\
73 	bne-		1b\n"
74 	PPC_ACQUIRE_BARRIER
75 "2:"
76 	: "=&r" (tmp)
77 	: "r" (token), "r" (&lock->slock)
78 	: "cr0", "memory");
79 
80 	return tmp;
81 }
82 
83 static inline int arch_spin_trylock(arch_spinlock_t *lock)
84 {
85 	return __arch_spin_trylock(lock) == 0;
86 }
87 
88 /*
89  * On a system with shared processors (that is, where a physical
90  * processor is multiplexed between several virtual processors),
91  * there is no point spinning on a lock if the holder of the lock
92  * isn't currently scheduled on a physical processor.  Instead
93  * we detect this situation and ask the hypervisor to give the
94  * rest of our timeslice to the lock holder.
95  *
96  * So that we can tell which virtual processor is holding a lock,
97  * we put 0x80000000 | smp_processor_id() in the lock when it is
98  * held.  Conveniently, we have a word in the paca that holds this
99  * value.
100  */
101 
102 #if defined(CONFIG_PPC_SPLPAR)
103 /* We only yield to the hypervisor if we are in shared processor mode */
104 void splpar_spin_yield(arch_spinlock_t *lock);
105 void splpar_rw_yield(arch_rwlock_t *lock);
106 #else /* SPLPAR */
107 static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
108 static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
109 #endif
110 
111 static inline bool is_shared_processor(void)
112 {
113 /*
114  * LPPACA is only available on Pseries so guard anything LPPACA related to
115  * allow other platforms (which include this common header) to compile.
116  */
117 #ifdef CONFIG_PPC_PSERIES
118 	return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
119 		lppaca_shared_proc(local_paca->lppaca_ptr));
120 #else
121 	return false;
122 #endif
123 }
124 
125 static inline void spin_yield(arch_spinlock_t *lock)
126 {
127 	if (is_shared_processor())
128 		splpar_spin_yield(lock);
129 	else
130 		barrier();
131 }
132 
133 static inline void rw_yield(arch_rwlock_t *lock)
134 {
135 	if (is_shared_processor())
136 		splpar_rw_yield(lock);
137 	else
138 		barrier();
139 }
140 
141 static inline void arch_spin_lock(arch_spinlock_t *lock)
142 {
143 	while (1) {
144 		if (likely(__arch_spin_trylock(lock) == 0))
145 			break;
146 		do {
147 			HMT_low();
148 			if (is_shared_processor())
149 				splpar_spin_yield(lock);
150 		} while (unlikely(lock->slock != 0));
151 		HMT_medium();
152 	}
153 }
154 
155 static inline
156 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
157 {
158 	unsigned long flags_dis;
159 
160 	while (1) {
161 		if (likely(__arch_spin_trylock(lock) == 0))
162 			break;
163 		local_save_flags(flags_dis);
164 		local_irq_restore(flags);
165 		do {
166 			HMT_low();
167 			if (is_shared_processor())
168 				splpar_spin_yield(lock);
169 		} while (unlikely(lock->slock != 0));
170 		HMT_medium();
171 		local_irq_restore(flags_dis);
172 	}
173 }
174 #define arch_spin_lock_flags arch_spin_lock_flags
175 
176 static inline void arch_spin_unlock(arch_spinlock_t *lock)
177 {
178 	__asm__ __volatile__("# arch_spin_unlock\n\t"
179 				PPC_RELEASE_BARRIER: : :"memory");
180 	lock->slock = 0;
181 }
182 
183 /*
184  * Read-write spinlocks, allowing multiple readers
185  * but only one writer.
186  *
187  * NOTE! it is quite common to have readers in interrupts
188  * but no interrupt writers. For those circumstances we
189  * can "mix" irq-safe locks - any writer needs to get a
190  * irq-safe write-lock, but readers can get non-irqsafe
191  * read-locks.
192  */
193 
194 #ifdef CONFIG_PPC64
195 #define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
196 #define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
197 #else
198 #define __DO_SIGN_EXTEND
199 #define WRLOCK_TOKEN		(-1)
200 #endif
201 
202 /*
203  * This returns the old value in the lock + 1,
204  * so we got a read lock if the return value is > 0.
205  */
206 static inline long __arch_read_trylock(arch_rwlock_t *rw)
207 {
208 	long tmp;
209 
210 	__asm__ __volatile__(
211 "1:	" PPC_LWARX(%0,0,%1,1) "\n"
212 	__DO_SIGN_EXTEND
213 "	addic.		%0,%0,1\n\
214 	ble-		2f\n"
215 	PPC405_ERR77(0,%1)
216 "	stwcx.		%0,0,%1\n\
217 	bne-		1b\n"
218 	PPC_ACQUIRE_BARRIER
219 "2:"	: "=&r" (tmp)
220 	: "r" (&rw->lock)
221 	: "cr0", "xer", "memory");
222 
223 	return tmp;
224 }
225 
226 /*
227  * This returns the old value in the lock,
228  * so we got the write lock if the return value is 0.
229  */
230 static inline long __arch_write_trylock(arch_rwlock_t *rw)
231 {
232 	long tmp, token;
233 
234 	token = WRLOCK_TOKEN;
235 	__asm__ __volatile__(
236 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
237 	cmpwi		0,%0,0\n\
238 	bne-		2f\n"
239 	PPC405_ERR77(0,%1)
240 "	stwcx.		%1,0,%2\n\
241 	bne-		1b\n"
242 	PPC_ACQUIRE_BARRIER
243 "2:"	: "=&r" (tmp)
244 	: "r" (token), "r" (&rw->lock)
245 	: "cr0", "memory");
246 
247 	return tmp;
248 }
249 
250 static inline void arch_read_lock(arch_rwlock_t *rw)
251 {
252 	while (1) {
253 		if (likely(__arch_read_trylock(rw) > 0))
254 			break;
255 		do {
256 			HMT_low();
257 			if (is_shared_processor())
258 				splpar_rw_yield(rw);
259 		} while (unlikely(rw->lock < 0));
260 		HMT_medium();
261 	}
262 }
263 
264 static inline void arch_write_lock(arch_rwlock_t *rw)
265 {
266 	while (1) {
267 		if (likely(__arch_write_trylock(rw) == 0))
268 			break;
269 		do {
270 			HMT_low();
271 			if (is_shared_processor())
272 				splpar_rw_yield(rw);
273 		} while (unlikely(rw->lock != 0));
274 		HMT_medium();
275 	}
276 }
277 
278 static inline int arch_read_trylock(arch_rwlock_t *rw)
279 {
280 	return __arch_read_trylock(rw) > 0;
281 }
282 
283 static inline int arch_write_trylock(arch_rwlock_t *rw)
284 {
285 	return __arch_write_trylock(rw) == 0;
286 }
287 
288 static inline void arch_read_unlock(arch_rwlock_t *rw)
289 {
290 	long tmp;
291 
292 	__asm__ __volatile__(
293 	"# read_unlock\n\t"
294 	PPC_RELEASE_BARRIER
295 "1:	lwarx		%0,0,%1\n\
296 	addic		%0,%0,-1\n"
297 	PPC405_ERR77(0,%1)
298 "	stwcx.		%0,0,%1\n\
299 	bne-		1b"
300 	: "=&r"(tmp)
301 	: "r"(&rw->lock)
302 	: "cr0", "xer", "memory");
303 }
304 
305 static inline void arch_write_unlock(arch_rwlock_t *rw)
306 {
307 	__asm__ __volatile__("# write_unlock\n\t"
308 				PPC_RELEASE_BARRIER: : :"memory");
309 	rw->lock = 0;
310 }
311 
312 #define arch_spin_relax(lock)	spin_yield(lock)
313 #define arch_read_relax(lock)	rw_yield(lock)
314 #define arch_write_relax(lock)	rw_yield(lock)
315 
316 /* See include/linux/spinlock.h */
317 #define smp_mb__after_spinlock()   smp_mb()
318 
319 #endif /* __KERNEL__ */
320 #endif /* __ASM_SPINLOCK_H */
321