1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4 #ifdef __KERNEL__
5 
6 /*
7  * Simple spin lock operations.
8  *
9  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
10  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
11  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
12  *	Rework to support virtual processors
13  *
14  * Type of int is used as a full 64b word is not necessary.
15  *
16  * (the type definitions are in asm/spinlock_types.h)
17  */
18 #include <linux/irqflags.h>
19 #include <asm/paravirt.h>
20 #ifdef CONFIG_PPC64
21 #include <asm/paca.h>
22 #endif
23 #include <asm/synch.h>
24 #include <asm/ppc-opcode.h>
25 
26 #ifdef CONFIG_PPC64
27 /* use 0x800000yy when locked, where yy == CPU number */
28 #ifdef __BIG_ENDIAN__
29 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
30 #else
31 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
32 #endif
33 #else
34 #define LOCK_TOKEN	1
35 #endif
36 
37 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
38 {
39 	return lock.slock == 0;
40 }
41 
42 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
43 {
44 	smp_mb();
45 	return !arch_spin_value_unlocked(*lock);
46 }
47 
48 /*
49  * This returns the old value in the lock, so we succeeded
50  * in getting the lock if the return value is 0.
51  */
52 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
53 {
54 	unsigned long tmp, token;
55 
56 	token = LOCK_TOKEN;
57 	__asm__ __volatile__(
58 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
59 	cmpwi		0,%0,0\n\
60 	bne-		2f\n\
61 	stwcx.		%1,0,%2\n\
62 	bne-		1b\n"
63 	PPC_ACQUIRE_BARRIER
64 "2:"
65 	: "=&r" (tmp)
66 	: "r" (token), "r" (&lock->slock)
67 	: "cr0", "memory");
68 
69 	return tmp;
70 }
71 
72 static inline int arch_spin_trylock(arch_spinlock_t *lock)
73 {
74 	return __arch_spin_trylock(lock) == 0;
75 }
76 
77 /*
78  * On a system with shared processors (that is, where a physical
79  * processor is multiplexed between several virtual processors),
80  * there is no point spinning on a lock if the holder of the lock
81  * isn't currently scheduled on a physical processor.  Instead
82  * we detect this situation and ask the hypervisor to give the
83  * rest of our timeslice to the lock holder.
84  *
85  * So that we can tell which virtual processor is holding a lock,
86  * we put 0x80000000 | smp_processor_id() in the lock when it is
87  * held.  Conveniently, we have a word in the paca that holds this
88  * value.
89  */
90 
91 #if defined(CONFIG_PPC_SPLPAR)
92 /* We only yield to the hypervisor if we are in shared processor mode */
93 void splpar_spin_yield(arch_spinlock_t *lock);
94 void splpar_rw_yield(arch_rwlock_t *lock);
95 #else /* SPLPAR */
96 static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
97 static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
98 #endif
99 
100 static inline void spin_yield(arch_spinlock_t *lock)
101 {
102 	if (is_shared_processor())
103 		splpar_spin_yield(lock);
104 	else
105 		barrier();
106 }
107 
108 static inline void rw_yield(arch_rwlock_t *lock)
109 {
110 	if (is_shared_processor())
111 		splpar_rw_yield(lock);
112 	else
113 		barrier();
114 }
115 
116 static inline void arch_spin_lock(arch_spinlock_t *lock)
117 {
118 	while (1) {
119 		if (likely(__arch_spin_trylock(lock) == 0))
120 			break;
121 		do {
122 			HMT_low();
123 			if (is_shared_processor())
124 				splpar_spin_yield(lock);
125 		} while (unlikely(lock->slock != 0));
126 		HMT_medium();
127 	}
128 }
129 
130 static inline
131 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
132 {
133 	unsigned long flags_dis;
134 
135 	while (1) {
136 		if (likely(__arch_spin_trylock(lock) == 0))
137 			break;
138 		local_save_flags(flags_dis);
139 		local_irq_restore(flags);
140 		do {
141 			HMT_low();
142 			if (is_shared_processor())
143 				splpar_spin_yield(lock);
144 		} while (unlikely(lock->slock != 0));
145 		HMT_medium();
146 		local_irq_restore(flags_dis);
147 	}
148 }
149 #define arch_spin_lock_flags arch_spin_lock_flags
150 
151 static inline void arch_spin_unlock(arch_spinlock_t *lock)
152 {
153 	__asm__ __volatile__("# arch_spin_unlock\n\t"
154 				PPC_RELEASE_BARRIER: : :"memory");
155 	lock->slock = 0;
156 }
157 
158 /*
159  * Read-write spinlocks, allowing multiple readers
160  * but only one writer.
161  *
162  * NOTE! it is quite common to have readers in interrupts
163  * but no interrupt writers. For those circumstances we
164  * can "mix" irq-safe locks - any writer needs to get a
165  * irq-safe write-lock, but readers can get non-irqsafe
166  * read-locks.
167  */
168 
169 #ifdef CONFIG_PPC64
170 #define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
171 #define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
172 #else
173 #define __DO_SIGN_EXTEND
174 #define WRLOCK_TOKEN		(-1)
175 #endif
176 
177 /*
178  * This returns the old value in the lock + 1,
179  * so we got a read lock if the return value is > 0.
180  */
181 static inline long __arch_read_trylock(arch_rwlock_t *rw)
182 {
183 	long tmp;
184 
185 	__asm__ __volatile__(
186 "1:	" PPC_LWARX(%0,0,%1,1) "\n"
187 	__DO_SIGN_EXTEND
188 "	addic.		%0,%0,1\n\
189 	ble-		2f\n"
190 "	stwcx.		%0,0,%1\n\
191 	bne-		1b\n"
192 	PPC_ACQUIRE_BARRIER
193 "2:"	: "=&r" (tmp)
194 	: "r" (&rw->lock)
195 	: "cr0", "xer", "memory");
196 
197 	return tmp;
198 }
199 
200 /*
201  * This returns the old value in the lock,
202  * so we got the write lock if the return value is 0.
203  */
204 static inline long __arch_write_trylock(arch_rwlock_t *rw)
205 {
206 	long tmp, token;
207 
208 	token = WRLOCK_TOKEN;
209 	__asm__ __volatile__(
210 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
211 	cmpwi		0,%0,0\n\
212 	bne-		2f\n"
213 "	stwcx.		%1,0,%2\n\
214 	bne-		1b\n"
215 	PPC_ACQUIRE_BARRIER
216 "2:"	: "=&r" (tmp)
217 	: "r" (token), "r" (&rw->lock)
218 	: "cr0", "memory");
219 
220 	return tmp;
221 }
222 
223 static inline void arch_read_lock(arch_rwlock_t *rw)
224 {
225 	while (1) {
226 		if (likely(__arch_read_trylock(rw) > 0))
227 			break;
228 		do {
229 			HMT_low();
230 			if (is_shared_processor())
231 				splpar_rw_yield(rw);
232 		} while (unlikely(rw->lock < 0));
233 		HMT_medium();
234 	}
235 }
236 
237 static inline void arch_write_lock(arch_rwlock_t *rw)
238 {
239 	while (1) {
240 		if (likely(__arch_write_trylock(rw) == 0))
241 			break;
242 		do {
243 			HMT_low();
244 			if (is_shared_processor())
245 				splpar_rw_yield(rw);
246 		} while (unlikely(rw->lock != 0));
247 		HMT_medium();
248 	}
249 }
250 
251 static inline int arch_read_trylock(arch_rwlock_t *rw)
252 {
253 	return __arch_read_trylock(rw) > 0;
254 }
255 
256 static inline int arch_write_trylock(arch_rwlock_t *rw)
257 {
258 	return __arch_write_trylock(rw) == 0;
259 }
260 
261 static inline void arch_read_unlock(arch_rwlock_t *rw)
262 {
263 	long tmp;
264 
265 	__asm__ __volatile__(
266 	"# read_unlock\n\t"
267 	PPC_RELEASE_BARRIER
268 "1:	lwarx		%0,0,%1\n\
269 	addic		%0,%0,-1\n"
270 "	stwcx.		%0,0,%1\n\
271 	bne-		1b"
272 	: "=&r"(tmp)
273 	: "r"(&rw->lock)
274 	: "cr0", "xer", "memory");
275 }
276 
277 static inline void arch_write_unlock(arch_rwlock_t *rw)
278 {
279 	__asm__ __volatile__("# write_unlock\n\t"
280 				PPC_RELEASE_BARRIER: : :"memory");
281 	rw->lock = 0;
282 }
283 
284 #define arch_spin_relax(lock)	spin_yield(lock)
285 #define arch_read_relax(lock)	rw_yield(lock)
286 #define arch_write_relax(lock)	rw_yield(lock)
287 
288 /* See include/linux/spinlock.h */
289 #define smp_mb__after_spinlock()   smp_mb()
290 
291 #endif /* __KERNEL__ */
292 #endif /* __ASM_SPINLOCK_H */
293