1b8b572e1SStephen Rothwell #ifndef __ASM_SPINLOCK_H
2b8b572e1SStephen Rothwell #define __ASM_SPINLOCK_H
3b8b572e1SStephen Rothwell #ifdef __KERNEL__
4b8b572e1SStephen Rothwell 
5b8b572e1SStephen Rothwell /*
6b8b572e1SStephen Rothwell  * Simple spin lock operations.
7b8b572e1SStephen Rothwell  *
8b8b572e1SStephen Rothwell  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9b8b572e1SStephen Rothwell  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10b8b572e1SStephen Rothwell  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11b8b572e1SStephen Rothwell  *	Rework to support virtual processors
12b8b572e1SStephen Rothwell  *
13b8b572e1SStephen Rothwell  * Type of int is used as a full 64b word is not necessary.
14b8b572e1SStephen Rothwell  *
15b8b572e1SStephen Rothwell  * This program is free software; you can redistribute it and/or
16b8b572e1SStephen Rothwell  * modify it under the terms of the GNU General Public License
17b8b572e1SStephen Rothwell  * as published by the Free Software Foundation; either version
18b8b572e1SStephen Rothwell  * 2 of the License, or (at your option) any later version.
19b8b572e1SStephen Rothwell  *
20b8b572e1SStephen Rothwell  * (the type definitions are in asm/spinlock_types.h)
21b8b572e1SStephen Rothwell  */
22b8b572e1SStephen Rothwell #include <linux/irqflags.h>
23b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
24b8b572e1SStephen Rothwell #include <asm/paca.h>
25b8b572e1SStephen Rothwell #include <asm/hvcall.h>
26b8b572e1SStephen Rothwell #include <asm/iseries/hv_call.h>
27b8b572e1SStephen Rothwell #endif
28b8b572e1SStephen Rothwell #include <asm/asm-compat.h>
29b8b572e1SStephen Rothwell #include <asm/synch.h>
304e14a4d1SAnton Blanchard #include <asm/ppc-opcode.h>
31b8b572e1SStephen Rothwell 
320199c4e6SThomas Gleixner #define arch_spin_is_locked(x)		((x)->slock != 0)
33b8b572e1SStephen Rothwell 
34b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
35b8b572e1SStephen Rothwell /* use 0x800000yy when locked, where yy == CPU number */
36b8b572e1SStephen Rothwell #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
37b8b572e1SStephen Rothwell #else
38b8b572e1SStephen Rothwell #define LOCK_TOKEN	1
39b8b572e1SStephen Rothwell #endif
40b8b572e1SStephen Rothwell 
41b8b572e1SStephen Rothwell #if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
42b8b572e1SStephen Rothwell #define CLEAR_IO_SYNC	(get_paca()->io_sync = 0)
43b8b572e1SStephen Rothwell #define SYNC_IO		do {						\
44b8b572e1SStephen Rothwell 				if (unlikely(get_paca()->io_sync)) {	\
45b8b572e1SStephen Rothwell 					mb();				\
46b8b572e1SStephen Rothwell 					get_paca()->io_sync = 0;	\
47b8b572e1SStephen Rothwell 				}					\
48b8b572e1SStephen Rothwell 			} while (0)
49b8b572e1SStephen Rothwell #else
50b8b572e1SStephen Rothwell #define CLEAR_IO_SYNC
51b8b572e1SStephen Rothwell #define SYNC_IO
52b8b572e1SStephen Rothwell #endif
53b8b572e1SStephen Rothwell 
54b8b572e1SStephen Rothwell /*
55b8b572e1SStephen Rothwell  * This returns the old value in the lock, so we succeeded
56b8b572e1SStephen Rothwell  * in getting the lock if the return value is 0.
57b8b572e1SStephen Rothwell  */
580199c4e6SThomas Gleixner static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
59b8b572e1SStephen Rothwell {
60b8b572e1SStephen Rothwell 	unsigned long tmp, token;
61b8b572e1SStephen Rothwell 
62b8b572e1SStephen Rothwell 	token = LOCK_TOKEN;
63b8b572e1SStephen Rothwell 	__asm__ __volatile__(
644e14a4d1SAnton Blanchard "1:	" PPC_LWARX(%0,0,%2,1) "\n\
65b8b572e1SStephen Rothwell 	cmpwi		0,%0,0\n\
66b8b572e1SStephen Rothwell 	bne-		2f\n\
67b8b572e1SStephen Rothwell 	stwcx.		%1,0,%2\n\
68f10e2e5bSAnton Blanchard 	bne-		1b\n"
69f10e2e5bSAnton Blanchard 	PPC_ACQUIRE_BARRIER
70f10e2e5bSAnton Blanchard "2:"
71f10e2e5bSAnton Blanchard 	: "=&r" (tmp)
72b8b572e1SStephen Rothwell 	: "r" (token), "r" (&lock->slock)
73b8b572e1SStephen Rothwell 	: "cr0", "memory");
74b8b572e1SStephen Rothwell 
75b8b572e1SStephen Rothwell 	return tmp;
76b8b572e1SStephen Rothwell }
77b8b572e1SStephen Rothwell 
780199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *lock)
79b8b572e1SStephen Rothwell {
80b8b572e1SStephen Rothwell 	CLEAR_IO_SYNC;
810199c4e6SThomas Gleixner 	return __arch_spin_trylock(lock) == 0;
82b8b572e1SStephen Rothwell }
83b8b572e1SStephen Rothwell 
84b8b572e1SStephen Rothwell /*
85b8b572e1SStephen Rothwell  * On a system with shared processors (that is, where a physical
86b8b572e1SStephen Rothwell  * processor is multiplexed between several virtual processors),
87b8b572e1SStephen Rothwell  * there is no point spinning on a lock if the holder of the lock
88b8b572e1SStephen Rothwell  * isn't currently scheduled on a physical processor.  Instead
89b8b572e1SStephen Rothwell  * we detect this situation and ask the hypervisor to give the
90b8b572e1SStephen Rothwell  * rest of our timeslice to the lock holder.
91b8b572e1SStephen Rothwell  *
92b8b572e1SStephen Rothwell  * So that we can tell which virtual processor is holding a lock,
93b8b572e1SStephen Rothwell  * we put 0x80000000 | smp_processor_id() in the lock when it is
94b8b572e1SStephen Rothwell  * held.  Conveniently, we have a word in the paca that holds this
95b8b572e1SStephen Rothwell  * value.
96b8b572e1SStephen Rothwell  */
97b8b572e1SStephen Rothwell 
98b8b572e1SStephen Rothwell #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
99b8b572e1SStephen Rothwell /* We only yield to the hypervisor if we are in shared processor mode */
100b8b572e1SStephen Rothwell #define SHARED_PROCESSOR (get_lppaca()->shared_proc)
101445c8951SThomas Gleixner extern void __spin_yield(arch_spinlock_t *lock);
102fb3a6bbcSThomas Gleixner extern void __rw_yield(arch_rwlock_t *lock);
103b8b572e1SStephen Rothwell #else /* SPLPAR || ISERIES */
104b8b572e1SStephen Rothwell #define __spin_yield(x)	barrier()
105b8b572e1SStephen Rothwell #define __rw_yield(x)	barrier()
106b8b572e1SStephen Rothwell #define SHARED_PROCESSOR	0
107b8b572e1SStephen Rothwell #endif
108b8b572e1SStephen Rothwell 
1090199c4e6SThomas Gleixner static inline void arch_spin_lock(arch_spinlock_t *lock)
110b8b572e1SStephen Rothwell {
111b8b572e1SStephen Rothwell 	CLEAR_IO_SYNC;
112b8b572e1SStephen Rothwell 	while (1) {
1130199c4e6SThomas Gleixner 		if (likely(__arch_spin_trylock(lock) == 0))
114b8b572e1SStephen Rothwell 			break;
115b8b572e1SStephen Rothwell 		do {
116b8b572e1SStephen Rothwell 			HMT_low();
117b8b572e1SStephen Rothwell 			if (SHARED_PROCESSOR)
118b8b572e1SStephen Rothwell 				__spin_yield(lock);
119b8b572e1SStephen Rothwell 		} while (unlikely(lock->slock != 0));
120b8b572e1SStephen Rothwell 		HMT_medium();
121b8b572e1SStephen Rothwell 	}
122b8b572e1SStephen Rothwell }
123b8b572e1SStephen Rothwell 
124b8b572e1SStephen Rothwell static inline
1250199c4e6SThomas Gleixner void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
126b8b572e1SStephen Rothwell {
127b8b572e1SStephen Rothwell 	unsigned long flags_dis;
128b8b572e1SStephen Rothwell 
129b8b572e1SStephen Rothwell 	CLEAR_IO_SYNC;
130b8b572e1SStephen Rothwell 	while (1) {
1310199c4e6SThomas Gleixner 		if (likely(__arch_spin_trylock(lock) == 0))
132b8b572e1SStephen Rothwell 			break;
133b8b572e1SStephen Rothwell 		local_save_flags(flags_dis);
134b8b572e1SStephen Rothwell 		local_irq_restore(flags);
135b8b572e1SStephen Rothwell 		do {
136b8b572e1SStephen Rothwell 			HMT_low();
137b8b572e1SStephen Rothwell 			if (SHARED_PROCESSOR)
138b8b572e1SStephen Rothwell 				__spin_yield(lock);
139b8b572e1SStephen Rothwell 		} while (unlikely(lock->slock != 0));
140b8b572e1SStephen Rothwell 		HMT_medium();
141b8b572e1SStephen Rothwell 		local_irq_restore(flags_dis);
142b8b572e1SStephen Rothwell 	}
143b8b572e1SStephen Rothwell }
144b8b572e1SStephen Rothwell 
1450199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *lock)
146b8b572e1SStephen Rothwell {
147b8b572e1SStephen Rothwell 	SYNC_IO;
1480199c4e6SThomas Gleixner 	__asm__ __volatile__("# arch_spin_unlock\n\t"
149f10e2e5bSAnton Blanchard 				PPC_RELEASE_BARRIER: : :"memory");
150b8b572e1SStephen Rothwell 	lock->slock = 0;
151b8b572e1SStephen Rothwell }
152b8b572e1SStephen Rothwell 
153b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
1540199c4e6SThomas Gleixner extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
155b8b572e1SStephen Rothwell #else
1560199c4e6SThomas Gleixner #define arch_spin_unlock_wait(lock) \
1570199c4e6SThomas Gleixner 	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
158b8b572e1SStephen Rothwell #endif
159b8b572e1SStephen Rothwell 
160b8b572e1SStephen Rothwell /*
161b8b572e1SStephen Rothwell  * Read-write spinlocks, allowing multiple readers
162b8b572e1SStephen Rothwell  * but only one writer.
163b8b572e1SStephen Rothwell  *
164b8b572e1SStephen Rothwell  * NOTE! it is quite common to have readers in interrupts
165b8b572e1SStephen Rothwell  * but no interrupt writers. For those circumstances we
166b8b572e1SStephen Rothwell  * can "mix" irq-safe locks - any writer needs to get a
167b8b572e1SStephen Rothwell  * irq-safe write-lock, but readers can get non-irqsafe
168b8b572e1SStephen Rothwell  * read-locks.
169b8b572e1SStephen Rothwell  */
170b8b572e1SStephen Rothwell 
171e5931943SThomas Gleixner #define arch_read_can_lock(rw)		((rw)->lock >= 0)
172e5931943SThomas Gleixner #define arch_write_can_lock(rw)	(!(rw)->lock)
173b8b572e1SStephen Rothwell 
174b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
175b8b572e1SStephen Rothwell #define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
176b8b572e1SStephen Rothwell #define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
177b8b572e1SStephen Rothwell #else
178b8b572e1SStephen Rothwell #define __DO_SIGN_EXTEND
179b8b572e1SStephen Rothwell #define WRLOCK_TOKEN		(-1)
180b8b572e1SStephen Rothwell #endif
181b8b572e1SStephen Rothwell 
182b8b572e1SStephen Rothwell /*
183b8b572e1SStephen Rothwell  * This returns the old value in the lock + 1,
184b8b572e1SStephen Rothwell  * so we got a read lock if the return value is > 0.
185b8b572e1SStephen Rothwell  */
186e5931943SThomas Gleixner static inline long __arch_read_trylock(arch_rwlock_t *rw)
187b8b572e1SStephen Rothwell {
188b8b572e1SStephen Rothwell 	long tmp;
189b8b572e1SStephen Rothwell 
190b8b572e1SStephen Rothwell 	__asm__ __volatile__(
1914e14a4d1SAnton Blanchard "1:	" PPC_LWARX(%0,0,%1,1) "\n"
192b8b572e1SStephen Rothwell 	__DO_SIGN_EXTEND
193b8b572e1SStephen Rothwell "	addic.		%0,%0,1\n\
194b8b572e1SStephen Rothwell 	ble-		2f\n"
195b8b572e1SStephen Rothwell 	PPC405_ERR77(0,%1)
196b8b572e1SStephen Rothwell "	stwcx.		%0,0,%1\n\
197f10e2e5bSAnton Blanchard 	bne-		1b\n"
198f10e2e5bSAnton Blanchard 	PPC_ACQUIRE_BARRIER
199f10e2e5bSAnton Blanchard "2:"	: "=&r" (tmp)
200b8b572e1SStephen Rothwell 	: "r" (&rw->lock)
201b8b572e1SStephen Rothwell 	: "cr0", "xer", "memory");
202b8b572e1SStephen Rothwell 
203b8b572e1SStephen Rothwell 	return tmp;
204b8b572e1SStephen Rothwell }
205b8b572e1SStephen Rothwell 
206b8b572e1SStephen Rothwell /*
207b8b572e1SStephen Rothwell  * This returns the old value in the lock,
208b8b572e1SStephen Rothwell  * so we got the write lock if the return value is 0.
209b8b572e1SStephen Rothwell  */
210e5931943SThomas Gleixner static inline long __arch_write_trylock(arch_rwlock_t *rw)
211b8b572e1SStephen Rothwell {
212b8b572e1SStephen Rothwell 	long tmp, token;
213b8b572e1SStephen Rothwell 
214b8b572e1SStephen Rothwell 	token = WRLOCK_TOKEN;
215b8b572e1SStephen Rothwell 	__asm__ __volatile__(
2164e14a4d1SAnton Blanchard "1:	" PPC_LWARX(%0,0,%2,1) "\n\
217b8b572e1SStephen Rothwell 	cmpwi		0,%0,0\n\
218b8b572e1SStephen Rothwell 	bne-		2f\n"
219b8b572e1SStephen Rothwell 	PPC405_ERR77(0,%1)
220b8b572e1SStephen Rothwell "	stwcx.		%1,0,%2\n\
221f10e2e5bSAnton Blanchard 	bne-		1b\n"
222f10e2e5bSAnton Blanchard 	PPC_ACQUIRE_BARRIER
223f10e2e5bSAnton Blanchard "2:"	: "=&r" (tmp)
224b8b572e1SStephen Rothwell 	: "r" (token), "r" (&rw->lock)
225b8b572e1SStephen Rothwell 	: "cr0", "memory");
226b8b572e1SStephen Rothwell 
227b8b572e1SStephen Rothwell 	return tmp;
228b8b572e1SStephen Rothwell }
229b8b572e1SStephen Rothwell 
230e5931943SThomas Gleixner static inline void arch_read_lock(arch_rwlock_t *rw)
231b8b572e1SStephen Rothwell {
232b8b572e1SStephen Rothwell 	while (1) {
233e5931943SThomas Gleixner 		if (likely(__arch_read_trylock(rw) > 0))
234b8b572e1SStephen Rothwell 			break;
235b8b572e1SStephen Rothwell 		do {
236b8b572e1SStephen Rothwell 			HMT_low();
237b8b572e1SStephen Rothwell 			if (SHARED_PROCESSOR)
238b8b572e1SStephen Rothwell 				__rw_yield(rw);
239b8b572e1SStephen Rothwell 		} while (unlikely(rw->lock < 0));
240b8b572e1SStephen Rothwell 		HMT_medium();
241b8b572e1SStephen Rothwell 	}
242b8b572e1SStephen Rothwell }
243b8b572e1SStephen Rothwell 
244e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw)
245b8b572e1SStephen Rothwell {
246b8b572e1SStephen Rothwell 	while (1) {
247e5931943SThomas Gleixner 		if (likely(__arch_write_trylock(rw) == 0))
248b8b572e1SStephen Rothwell 			break;
249b8b572e1SStephen Rothwell 		do {
250b8b572e1SStephen Rothwell 			HMT_low();
251b8b572e1SStephen Rothwell 			if (SHARED_PROCESSOR)
252b8b572e1SStephen Rothwell 				__rw_yield(rw);
253b8b572e1SStephen Rothwell 		} while (unlikely(rw->lock != 0));
254b8b572e1SStephen Rothwell 		HMT_medium();
255b8b572e1SStephen Rothwell 	}
256b8b572e1SStephen Rothwell }
257b8b572e1SStephen Rothwell 
258e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *rw)
259b8b572e1SStephen Rothwell {
260e5931943SThomas Gleixner 	return __arch_read_trylock(rw) > 0;
261b8b572e1SStephen Rothwell }
262b8b572e1SStephen Rothwell 
263e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *rw)
264b8b572e1SStephen Rothwell {
265e5931943SThomas Gleixner 	return __arch_write_trylock(rw) == 0;
266b8b572e1SStephen Rothwell }
267b8b572e1SStephen Rothwell 
268e5931943SThomas Gleixner static inline void arch_read_unlock(arch_rwlock_t *rw)
269b8b572e1SStephen Rothwell {
270b8b572e1SStephen Rothwell 	long tmp;
271b8b572e1SStephen Rothwell 
272b8b572e1SStephen Rothwell 	__asm__ __volatile__(
273b8b572e1SStephen Rothwell 	"# read_unlock\n\t"
274f10e2e5bSAnton Blanchard 	PPC_RELEASE_BARRIER
275b8b572e1SStephen Rothwell "1:	lwarx		%0,0,%1\n\
276b8b572e1SStephen Rothwell 	addic		%0,%0,-1\n"
277b8b572e1SStephen Rothwell 	PPC405_ERR77(0,%1)
278b8b572e1SStephen Rothwell "	stwcx.		%0,0,%1\n\
279b8b572e1SStephen Rothwell 	bne-		1b"
280b8b572e1SStephen Rothwell 	: "=&r"(tmp)
281b8b572e1SStephen Rothwell 	: "r"(&rw->lock)
282efc3624cSPaul Mackerras 	: "cr0", "xer", "memory");
283b8b572e1SStephen Rothwell }
284b8b572e1SStephen Rothwell 
285e5931943SThomas Gleixner static inline void arch_write_unlock(arch_rwlock_t *rw)
286b8b572e1SStephen Rothwell {
287b8b572e1SStephen Rothwell 	__asm__ __volatile__("# write_unlock\n\t"
288f10e2e5bSAnton Blanchard 				PPC_RELEASE_BARRIER: : :"memory");
289b8b572e1SStephen Rothwell 	rw->lock = 0;
290b8b572e1SStephen Rothwell }
291b8b572e1SStephen Rothwell 
292e5931943SThomas Gleixner #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
293e5931943SThomas Gleixner #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
294f5f7eac4SRobin Holt 
2950199c4e6SThomas Gleixner #define arch_spin_relax(lock)	__spin_yield(lock)
2960199c4e6SThomas Gleixner #define arch_read_relax(lock)	__rw_yield(lock)
2970199c4e6SThomas Gleixner #define arch_write_relax(lock)	__rw_yield(lock)
298b8b572e1SStephen Rothwell 
299b8b572e1SStephen Rothwell #endif /* __KERNEL__ */
300b8b572e1SStephen Rothwell #endif /* __ASM_SPINLOCK_H */
301