1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4 #ifdef __KERNEL__
5 
6 /*
7  * Simple spin lock operations.
8  *
9  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
10  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
11  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
12  *	Rework to support virtual processors
13  *
14  * Type of int is used as a full 64b word is not necessary.
15  *
16  * (the type definitions are in asm/spinlock_types.h)
17  */
18 #include <linux/irqflags.h>
19 #ifdef CONFIG_PPC64
20 #include <asm/paca.h>
21 #include <asm/hvcall.h>
22 #endif
23 #include <asm/synch.h>
24 #include <asm/ppc-opcode.h>
25 #include <asm/asm-405.h>
26 
27 #ifdef CONFIG_PPC64
28 /* use 0x800000yy when locked, where yy == CPU number */
29 #ifdef __BIG_ENDIAN__
30 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
31 #else
32 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
33 #endif
34 #else
35 #define LOCK_TOKEN	1
36 #endif
37 
38 #ifdef CONFIG_PPC_PSERIES
39 #define vcpu_is_preempted vcpu_is_preempted
40 static inline bool vcpu_is_preempted(int cpu)
41 {
42 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
43 		return false;
44 	return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
45 }
46 #endif
47 
48 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
49 {
50 	return lock.slock == 0;
51 }
52 
53 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
54 {
55 	smp_mb();
56 	return !arch_spin_value_unlocked(*lock);
57 }
58 
59 /*
60  * This returns the old value in the lock, so we succeeded
61  * in getting the lock if the return value is 0.
62  */
63 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
64 {
65 	unsigned long tmp, token;
66 
67 	token = LOCK_TOKEN;
68 	__asm__ __volatile__(
69 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
70 	cmpwi		0,%0,0\n\
71 	bne-		2f\n\
72 	stwcx.		%1,0,%2\n\
73 	bne-		1b\n"
74 	PPC_ACQUIRE_BARRIER
75 "2:"
76 	: "=&r" (tmp)
77 	: "r" (token), "r" (&lock->slock)
78 	: "cr0", "memory");
79 
80 	return tmp;
81 }
82 
83 static inline int arch_spin_trylock(arch_spinlock_t *lock)
84 {
85 	return __arch_spin_trylock(lock) == 0;
86 }
87 
88 /*
89  * On a system with shared processors (that is, where a physical
90  * processor is multiplexed between several virtual processors),
91  * there is no point spinning on a lock if the holder of the lock
92  * isn't currently scheduled on a physical processor.  Instead
93  * we detect this situation and ask the hypervisor to give the
94  * rest of our timeslice to the lock holder.
95  *
96  * So that we can tell which virtual processor is holding a lock,
97  * we put 0x80000000 | smp_processor_id() in the lock when it is
98  * held.  Conveniently, we have a word in the paca that holds this
99  * value.
100  */
101 
102 #if defined(CONFIG_PPC_SPLPAR)
103 /* We only yield to the hypervisor if we are in shared processor mode */
104 void splpar_spin_yield(arch_spinlock_t *lock);
105 void splpar_rw_yield(arch_rwlock_t *lock);
106 #define __spin_yield(x) splpar_spin_yield(x)
107 #define __rw_yield(x) splpar_rw_yield(x)
108 #else /* SPLPAR */
109 #define __spin_yield(x)	barrier()
110 #define __rw_yield(x)	barrier()
111 #endif
112 
113 static inline bool is_shared_processor(void)
114 {
115 /*
116  * LPPACA is only available on Pseries so guard anything LPPACA related to
117  * allow other platforms (which include this common header) to compile.
118  */
119 #ifdef CONFIG_PPC_PSERIES
120 	return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
121 		lppaca_shared_proc(local_paca->lppaca_ptr));
122 #else
123 	return false;
124 #endif
125 }
126 
127 static inline void arch_spin_lock(arch_spinlock_t *lock)
128 {
129 	while (1) {
130 		if (likely(__arch_spin_trylock(lock) == 0))
131 			break;
132 		do {
133 			HMT_low();
134 			if (is_shared_processor())
135 				__spin_yield(lock);
136 		} while (unlikely(lock->slock != 0));
137 		HMT_medium();
138 	}
139 }
140 
141 static inline
142 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
143 {
144 	unsigned long flags_dis;
145 
146 	while (1) {
147 		if (likely(__arch_spin_trylock(lock) == 0))
148 			break;
149 		local_save_flags(flags_dis);
150 		local_irq_restore(flags);
151 		do {
152 			HMT_low();
153 			if (is_shared_processor())
154 				__spin_yield(lock);
155 		} while (unlikely(lock->slock != 0));
156 		HMT_medium();
157 		local_irq_restore(flags_dis);
158 	}
159 }
160 #define arch_spin_lock_flags arch_spin_lock_flags
161 
162 static inline void arch_spin_unlock(arch_spinlock_t *lock)
163 {
164 	__asm__ __volatile__("# arch_spin_unlock\n\t"
165 				PPC_RELEASE_BARRIER: : :"memory");
166 	lock->slock = 0;
167 }
168 
169 /*
170  * Read-write spinlocks, allowing multiple readers
171  * but only one writer.
172  *
173  * NOTE! it is quite common to have readers in interrupts
174  * but no interrupt writers. For those circumstances we
175  * can "mix" irq-safe locks - any writer needs to get a
176  * irq-safe write-lock, but readers can get non-irqsafe
177  * read-locks.
178  */
179 
180 #ifdef CONFIG_PPC64
181 #define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
182 #define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
183 #else
184 #define __DO_SIGN_EXTEND
185 #define WRLOCK_TOKEN		(-1)
186 #endif
187 
188 /*
189  * This returns the old value in the lock + 1,
190  * so we got a read lock if the return value is > 0.
191  */
192 static inline long __arch_read_trylock(arch_rwlock_t *rw)
193 {
194 	long tmp;
195 
196 	__asm__ __volatile__(
197 "1:	" PPC_LWARX(%0,0,%1,1) "\n"
198 	__DO_SIGN_EXTEND
199 "	addic.		%0,%0,1\n\
200 	ble-		2f\n"
201 	PPC405_ERR77(0,%1)
202 "	stwcx.		%0,0,%1\n\
203 	bne-		1b\n"
204 	PPC_ACQUIRE_BARRIER
205 "2:"	: "=&r" (tmp)
206 	: "r" (&rw->lock)
207 	: "cr0", "xer", "memory");
208 
209 	return tmp;
210 }
211 
212 /*
213  * This returns the old value in the lock,
214  * so we got the write lock if the return value is 0.
215  */
216 static inline long __arch_write_trylock(arch_rwlock_t *rw)
217 {
218 	long tmp, token;
219 
220 	token = WRLOCK_TOKEN;
221 	__asm__ __volatile__(
222 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
223 	cmpwi		0,%0,0\n\
224 	bne-		2f\n"
225 	PPC405_ERR77(0,%1)
226 "	stwcx.		%1,0,%2\n\
227 	bne-		1b\n"
228 	PPC_ACQUIRE_BARRIER
229 "2:"	: "=&r" (tmp)
230 	: "r" (token), "r" (&rw->lock)
231 	: "cr0", "memory");
232 
233 	return tmp;
234 }
235 
236 static inline void arch_read_lock(arch_rwlock_t *rw)
237 {
238 	while (1) {
239 		if (likely(__arch_read_trylock(rw) > 0))
240 			break;
241 		do {
242 			HMT_low();
243 			if (is_shared_processor())
244 				__rw_yield(rw);
245 		} while (unlikely(rw->lock < 0));
246 		HMT_medium();
247 	}
248 }
249 
250 static inline void arch_write_lock(arch_rwlock_t *rw)
251 {
252 	while (1) {
253 		if (likely(__arch_write_trylock(rw) == 0))
254 			break;
255 		do {
256 			HMT_low();
257 			if (is_shared_processor())
258 				__rw_yield(rw);
259 		} while (unlikely(rw->lock != 0));
260 		HMT_medium();
261 	}
262 }
263 
264 static inline int arch_read_trylock(arch_rwlock_t *rw)
265 {
266 	return __arch_read_trylock(rw) > 0;
267 }
268 
269 static inline int arch_write_trylock(arch_rwlock_t *rw)
270 {
271 	return __arch_write_trylock(rw) == 0;
272 }
273 
274 static inline void arch_read_unlock(arch_rwlock_t *rw)
275 {
276 	long tmp;
277 
278 	__asm__ __volatile__(
279 	"# read_unlock\n\t"
280 	PPC_RELEASE_BARRIER
281 "1:	lwarx		%0,0,%1\n\
282 	addic		%0,%0,-1\n"
283 	PPC405_ERR77(0,%1)
284 "	stwcx.		%0,0,%1\n\
285 	bne-		1b"
286 	: "=&r"(tmp)
287 	: "r"(&rw->lock)
288 	: "cr0", "xer", "memory");
289 }
290 
291 static inline void arch_write_unlock(arch_rwlock_t *rw)
292 {
293 	__asm__ __volatile__("# write_unlock\n\t"
294 				PPC_RELEASE_BARRIER: : :"memory");
295 	rw->lock = 0;
296 }
297 
298 #define arch_spin_relax(lock)	__spin_yield(lock)
299 #define arch_read_relax(lock)	__rw_yield(lock)
300 #define arch_write_relax(lock)	__rw_yield(lock)
301 
302 /* See include/linux/spinlock.h */
303 #define smp_mb__after_spinlock()   smp_mb()
304 
305 #endif /* __KERNEL__ */
306 #endif /* __ASM_SPINLOCK_H */
307