1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3 #ifdef __KERNEL__
4 
5 /*
6  * Simple spin lock operations.
7  *
8  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11  *	Rework to support virtual processors
12  *
13  * Type of int is used as a full 64b word is not necessary.
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * (the type definitions are in asm/spinlock_types.h)
21  */
22 #include <linux/irqflags.h>
23 #ifdef CONFIG_PPC64
24 #include <asm/paca.h>
25 #include <asm/hvcall.h>
26 #endif
27 #include <asm/asm-compat.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
30 
31 #ifdef CONFIG_PPC64
32 /* use 0x800000yy when locked, where yy == CPU number */
33 #ifdef __BIG_ENDIAN__
34 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
35 #else
36 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
37 #endif
38 #else
39 #define LOCK_TOKEN	1
40 #endif
41 
42 #if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
43 #define CLEAR_IO_SYNC	(get_paca()->io_sync = 0)
44 #define SYNC_IO		do {						\
45 				if (unlikely(get_paca()->io_sync)) {	\
46 					mb();				\
47 					get_paca()->io_sync = 0;	\
48 				}					\
49 			} while (0)
50 #else
51 #define CLEAR_IO_SYNC
52 #define SYNC_IO
53 #endif
54 
55 #ifdef CONFIG_PPC_PSERIES
56 #define vcpu_is_preempted vcpu_is_preempted
57 static inline bool vcpu_is_preempted(int cpu)
58 {
59 	return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
60 }
61 #endif
62 
63 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
64 {
65 	return lock.slock == 0;
66 }
67 
68 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
69 {
70 	smp_mb();
71 	return !arch_spin_value_unlocked(*lock);
72 }
73 
74 /*
75  * This returns the old value in the lock, so we succeeded
76  * in getting the lock if the return value is 0.
77  */
78 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
79 {
80 	unsigned long tmp, token;
81 
82 	token = LOCK_TOKEN;
83 	__asm__ __volatile__(
84 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
85 	cmpwi		0,%0,0\n\
86 	bne-		2f\n\
87 	stwcx.		%1,0,%2\n\
88 	bne-		1b\n"
89 	PPC_ACQUIRE_BARRIER
90 "2:"
91 	: "=&r" (tmp)
92 	: "r" (token), "r" (&lock->slock)
93 	: "cr0", "memory");
94 
95 	return tmp;
96 }
97 
98 static inline int arch_spin_trylock(arch_spinlock_t *lock)
99 {
100 	CLEAR_IO_SYNC;
101 	return __arch_spin_trylock(lock) == 0;
102 }
103 
104 /*
105  * On a system with shared processors (that is, where a physical
106  * processor is multiplexed between several virtual processors),
107  * there is no point spinning on a lock if the holder of the lock
108  * isn't currently scheduled on a physical processor.  Instead
109  * we detect this situation and ask the hypervisor to give the
110  * rest of our timeslice to the lock holder.
111  *
112  * So that we can tell which virtual processor is holding a lock,
113  * we put 0x80000000 | smp_processor_id() in the lock when it is
114  * held.  Conveniently, we have a word in the paca that holds this
115  * value.
116  */
117 
118 #if defined(CONFIG_PPC_SPLPAR)
119 /* We only yield to the hypervisor if we are in shared processor mode */
120 #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
121 extern void __spin_yield(arch_spinlock_t *lock);
122 extern void __rw_yield(arch_rwlock_t *lock);
123 #else /* SPLPAR */
124 #define __spin_yield(x)	barrier()
125 #define __rw_yield(x)	barrier()
126 #define SHARED_PROCESSOR	0
127 #endif
128 
129 static inline void arch_spin_lock(arch_spinlock_t *lock)
130 {
131 	CLEAR_IO_SYNC;
132 	while (1) {
133 		if (likely(__arch_spin_trylock(lock) == 0))
134 			break;
135 		do {
136 			HMT_low();
137 			if (SHARED_PROCESSOR)
138 				__spin_yield(lock);
139 		} while (unlikely(lock->slock != 0));
140 		HMT_medium();
141 	}
142 }
143 
144 static inline
145 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
146 {
147 	unsigned long flags_dis;
148 
149 	CLEAR_IO_SYNC;
150 	while (1) {
151 		if (likely(__arch_spin_trylock(lock) == 0))
152 			break;
153 		local_save_flags(flags_dis);
154 		local_irq_restore(flags);
155 		do {
156 			HMT_low();
157 			if (SHARED_PROCESSOR)
158 				__spin_yield(lock);
159 		} while (unlikely(lock->slock != 0));
160 		HMT_medium();
161 		local_irq_restore(flags_dis);
162 	}
163 }
164 
165 static inline void arch_spin_unlock(arch_spinlock_t *lock)
166 {
167 	SYNC_IO;
168 	__asm__ __volatile__("# arch_spin_unlock\n\t"
169 				PPC_RELEASE_BARRIER: : :"memory");
170 	lock->slock = 0;
171 }
172 
173 /*
174  * Read-write spinlocks, allowing multiple readers
175  * but only one writer.
176  *
177  * NOTE! it is quite common to have readers in interrupts
178  * but no interrupt writers. For those circumstances we
179  * can "mix" irq-safe locks - any writer needs to get a
180  * irq-safe write-lock, but readers can get non-irqsafe
181  * read-locks.
182  */
183 
184 #define arch_read_can_lock(rw)		((rw)->lock >= 0)
185 #define arch_write_can_lock(rw)	(!(rw)->lock)
186 
187 #ifdef CONFIG_PPC64
188 #define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
189 #define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
190 #else
191 #define __DO_SIGN_EXTEND
192 #define WRLOCK_TOKEN		(-1)
193 #endif
194 
195 /*
196  * This returns the old value in the lock + 1,
197  * so we got a read lock if the return value is > 0.
198  */
199 static inline long __arch_read_trylock(arch_rwlock_t *rw)
200 {
201 	long tmp;
202 
203 	__asm__ __volatile__(
204 "1:	" PPC_LWARX(%0,0,%1,1) "\n"
205 	__DO_SIGN_EXTEND
206 "	addic.		%0,%0,1\n\
207 	ble-		2f\n"
208 	PPC405_ERR77(0,%1)
209 "	stwcx.		%0,0,%1\n\
210 	bne-		1b\n"
211 	PPC_ACQUIRE_BARRIER
212 "2:"	: "=&r" (tmp)
213 	: "r" (&rw->lock)
214 	: "cr0", "xer", "memory");
215 
216 	return tmp;
217 }
218 
219 /*
220  * This returns the old value in the lock,
221  * so we got the write lock if the return value is 0.
222  */
223 static inline long __arch_write_trylock(arch_rwlock_t *rw)
224 {
225 	long tmp, token;
226 
227 	token = WRLOCK_TOKEN;
228 	__asm__ __volatile__(
229 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
230 	cmpwi		0,%0,0\n\
231 	bne-		2f\n"
232 	PPC405_ERR77(0,%1)
233 "	stwcx.		%1,0,%2\n\
234 	bne-		1b\n"
235 	PPC_ACQUIRE_BARRIER
236 "2:"	: "=&r" (tmp)
237 	: "r" (token), "r" (&rw->lock)
238 	: "cr0", "memory");
239 
240 	return tmp;
241 }
242 
243 static inline void arch_read_lock(arch_rwlock_t *rw)
244 {
245 	while (1) {
246 		if (likely(__arch_read_trylock(rw) > 0))
247 			break;
248 		do {
249 			HMT_low();
250 			if (SHARED_PROCESSOR)
251 				__rw_yield(rw);
252 		} while (unlikely(rw->lock < 0));
253 		HMT_medium();
254 	}
255 }
256 
257 static inline void arch_write_lock(arch_rwlock_t *rw)
258 {
259 	while (1) {
260 		if (likely(__arch_write_trylock(rw) == 0))
261 			break;
262 		do {
263 			HMT_low();
264 			if (SHARED_PROCESSOR)
265 				__rw_yield(rw);
266 		} while (unlikely(rw->lock != 0));
267 		HMT_medium();
268 	}
269 }
270 
271 static inline int arch_read_trylock(arch_rwlock_t *rw)
272 {
273 	return __arch_read_trylock(rw) > 0;
274 }
275 
276 static inline int arch_write_trylock(arch_rwlock_t *rw)
277 {
278 	return __arch_write_trylock(rw) == 0;
279 }
280 
281 static inline void arch_read_unlock(arch_rwlock_t *rw)
282 {
283 	long tmp;
284 
285 	__asm__ __volatile__(
286 	"# read_unlock\n\t"
287 	PPC_RELEASE_BARRIER
288 "1:	lwarx		%0,0,%1\n\
289 	addic		%0,%0,-1\n"
290 	PPC405_ERR77(0,%1)
291 "	stwcx.		%0,0,%1\n\
292 	bne-		1b"
293 	: "=&r"(tmp)
294 	: "r"(&rw->lock)
295 	: "cr0", "xer", "memory");
296 }
297 
298 static inline void arch_write_unlock(arch_rwlock_t *rw)
299 {
300 	__asm__ __volatile__("# write_unlock\n\t"
301 				PPC_RELEASE_BARRIER: : :"memory");
302 	rw->lock = 0;
303 }
304 
305 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
306 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
307 
308 #define arch_spin_relax(lock)	__spin_yield(lock)
309 #define arch_read_relax(lock)	__rw_yield(lock)
310 #define arch_write_relax(lock)	__rw_yield(lock)
311 
312 /* See include/linux/spinlock.h */
313 #define smp_mb__after_spinlock()   smp_mb()
314 
315 #endif /* __KERNEL__ */
316 #endif /* __ASM_SPINLOCK_H */
317