xref: /openbmc/linux/arch/s390/include/asm/spinlock.h (revision bbae71bf)
1c6557e7fSMartin Schwidefsky /*
2c6557e7fSMartin Schwidefsky  *  S390 version
3a53c8fabSHeiko Carstens  *    Copyright IBM Corp. 1999
4c6557e7fSMartin Schwidefsky  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5c6557e7fSMartin Schwidefsky  *
6c6557e7fSMartin Schwidefsky  *  Derived from "include/asm-i386/spinlock.h"
7c6557e7fSMartin Schwidefsky  */
8c6557e7fSMartin Schwidefsky 
9c6557e7fSMartin Schwidefsky #ifndef __ASM_SPINLOCK_H
10c6557e7fSMartin Schwidefsky #define __ASM_SPINLOCK_H
11c6557e7fSMartin Schwidefsky 
12c6557e7fSMartin Schwidefsky #include <linux/smp.h>
13c6557e7fSMartin Schwidefsky 
146c8cd5bbSPhilipp Hachtmann #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
156c8cd5bbSPhilipp Hachtmann 
16638ad34aSMartin Schwidefsky extern int spin_retry;
17638ad34aSMartin Schwidefsky 
18c6557e7fSMartin Schwidefsky static inline int
195b3f683eSPhilipp Hachtmann _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
20c6557e7fSMartin Schwidefsky {
215b3f683eSPhilipp Hachtmann 	unsigned int old_expected = old;
225b3f683eSPhilipp Hachtmann 
23c6557e7fSMartin Schwidefsky 	asm volatile(
24c6557e7fSMartin Schwidefsky 		"	cs	%0,%3,%1"
25c6557e7fSMartin Schwidefsky 		: "=d" (old), "=Q" (*lock)
26c6557e7fSMartin Schwidefsky 		: "0" (old), "d" (new), "Q" (*lock)
27c6557e7fSMartin Schwidefsky 		: "cc", "memory" );
285b3f683eSPhilipp Hachtmann 	return old == old_expected;
29c6557e7fSMartin Schwidefsky }
30c6557e7fSMartin Schwidefsky 
31c6557e7fSMartin Schwidefsky /*
32c6557e7fSMartin Schwidefsky  * Simple spin lock operations.  There are two variants, one clears IRQ's
33c6557e7fSMartin Schwidefsky  * on the local processor, one does not.
34c6557e7fSMartin Schwidefsky  *
35c6557e7fSMartin Schwidefsky  * We make no fairness assumptions. They have a cost.
36c6557e7fSMartin Schwidefsky  *
37c6557e7fSMartin Schwidefsky  * (the type definitions are in asm/spinlock_types.h)
38c6557e7fSMartin Schwidefsky  */
39c6557e7fSMartin Schwidefsky 
40d59b93daSMartin Schwidefsky void arch_lock_relax(unsigned int cpu);
41d59b93daSMartin Schwidefsky 
425b3f683eSPhilipp Hachtmann void arch_spin_lock_wait(arch_spinlock_t *);
435b3f683eSPhilipp Hachtmann int arch_spin_trylock_retry(arch_spinlock_t *);
445b3f683eSPhilipp Hachtmann void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
45c6557e7fSMartin Schwidefsky 
46d59b93daSMartin Schwidefsky static inline void arch_spin_relax(arch_spinlock_t *lock)
47d59b93daSMartin Schwidefsky {
48d59b93daSMartin Schwidefsky 	arch_lock_relax(lock->lock);
49d59b93daSMartin Schwidefsky }
50d59b93daSMartin Schwidefsky 
516c8cd5bbSPhilipp Hachtmann static inline u32 arch_spin_lockval(int cpu)
526c8cd5bbSPhilipp Hachtmann {
536c8cd5bbSPhilipp Hachtmann 	return ~cpu;
546c8cd5bbSPhilipp Hachtmann }
556c8cd5bbSPhilipp Hachtmann 
56efc1d23bSHeiko Carstens static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
57efc1d23bSHeiko Carstens {
585b3f683eSPhilipp Hachtmann 	return lock.lock == 0;
595b3f683eSPhilipp Hachtmann }
605b3f683eSPhilipp Hachtmann 
615b3f683eSPhilipp Hachtmann static inline int arch_spin_is_locked(arch_spinlock_t *lp)
625b3f683eSPhilipp Hachtmann {
635b3f683eSPhilipp Hachtmann 	return ACCESS_ONCE(lp->lock) != 0;
645b3f683eSPhilipp Hachtmann }
655b3f683eSPhilipp Hachtmann 
665b3f683eSPhilipp Hachtmann static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
675b3f683eSPhilipp Hachtmann {
68bae8f567SMartin Schwidefsky 	barrier();
69bae8f567SMartin Schwidefsky 	return likely(arch_spin_value_unlocked(*lp) &&
70bae8f567SMartin Schwidefsky 		      _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
715b3f683eSPhilipp Hachtmann }
725b3f683eSPhilipp Hachtmann 
730199c4e6SThomas Gleixner static inline void arch_spin_lock(arch_spinlock_t *lp)
74c6557e7fSMartin Schwidefsky {
75bae8f567SMartin Schwidefsky 	if (!arch_spin_trylock_once(lp))
760199c4e6SThomas Gleixner 		arch_spin_lock_wait(lp);
77c6557e7fSMartin Schwidefsky }
78c6557e7fSMartin Schwidefsky 
790199c4e6SThomas Gleixner static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
80c6557e7fSMartin Schwidefsky 					unsigned long flags)
81c6557e7fSMartin Schwidefsky {
82bae8f567SMartin Schwidefsky 	if (!arch_spin_trylock_once(lp))
830199c4e6SThomas Gleixner 		arch_spin_lock_wait_flags(lp, flags);
84c6557e7fSMartin Schwidefsky }
85c6557e7fSMartin Schwidefsky 
860199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *lp)
87c6557e7fSMartin Schwidefsky {
88bae8f567SMartin Schwidefsky 	if (!arch_spin_trylock_once(lp))
890199c4e6SThomas Gleixner 		return arch_spin_trylock_retry(lp);
905b3f683eSPhilipp Hachtmann 	return 1;
91c6557e7fSMartin Schwidefsky }
92c6557e7fSMartin Schwidefsky 
930199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *lp)
94c6557e7fSMartin Schwidefsky {
9544230282SHeiko Carstens 	typecheck(unsigned int, lp->lock);
9644230282SHeiko Carstens 	asm volatile(
9744230282SHeiko Carstens 		__ASM_BARRIER
9844230282SHeiko Carstens 		"st	%1,%0\n"
9944230282SHeiko Carstens 		: "+Q" (lp->lock)
10044230282SHeiko Carstens 		: "d" (0)
10144230282SHeiko Carstens 		: "cc", "memory");
1025b3f683eSPhilipp Hachtmann }
1035b3f683eSPhilipp Hachtmann 
1045b3f683eSPhilipp Hachtmann static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
1055b3f683eSPhilipp Hachtmann {
1065b3f683eSPhilipp Hachtmann 	while (arch_spin_is_locked(lock))
1075b3f683eSPhilipp Hachtmann 		arch_spin_relax(lock);
108c6557e7fSMartin Schwidefsky }
109c6557e7fSMartin Schwidefsky 
110c6557e7fSMartin Schwidefsky /*
111c6557e7fSMartin Schwidefsky  * Read-write spinlocks, allowing multiple readers
112c6557e7fSMartin Schwidefsky  * but only one writer.
113c6557e7fSMartin Schwidefsky  *
114c6557e7fSMartin Schwidefsky  * NOTE! it is quite common to have readers in interrupts
115c6557e7fSMartin Schwidefsky  * but no interrupt writers. For those circumstances we
116c6557e7fSMartin Schwidefsky  * can "mix" irq-safe locks - any writer needs to get a
117c6557e7fSMartin Schwidefsky  * irq-safe write-lock, but readers can get non-irqsafe
118c6557e7fSMartin Schwidefsky  * read-locks.
119c6557e7fSMartin Schwidefsky  */
120c6557e7fSMartin Schwidefsky 
121c6557e7fSMartin Schwidefsky /**
122c6557e7fSMartin Schwidefsky  * read_can_lock - would read_trylock() succeed?
123c6557e7fSMartin Schwidefsky  * @lock: the rwlock in question.
124c6557e7fSMartin Schwidefsky  */
125e5931943SThomas Gleixner #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
126c6557e7fSMartin Schwidefsky 
127c6557e7fSMartin Schwidefsky /**
128c6557e7fSMartin Schwidefsky  * write_can_lock - would write_trylock() succeed?
129c6557e7fSMartin Schwidefsky  * @lock: the rwlock in question.
130c6557e7fSMartin Schwidefsky  */
131e5931943SThomas Gleixner #define arch_write_can_lock(x) ((x)->lock == 0)
132c6557e7fSMartin Schwidefsky 
1332684e73aSMartin Schwidefsky extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
134fb3a6bbcSThomas Gleixner extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
135c6557e7fSMartin Schwidefsky 
1362684e73aSMartin Schwidefsky #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
1372684e73aSMartin Schwidefsky #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
1382684e73aSMartin Schwidefsky 
139bae8f567SMartin Schwidefsky static inline int arch_read_trylock_once(arch_rwlock_t *rw)
140bae8f567SMartin Schwidefsky {
141bae8f567SMartin Schwidefsky 	unsigned int old = ACCESS_ONCE(rw->lock);
142bae8f567SMartin Schwidefsky 	return likely((int) old >= 0 &&
143bae8f567SMartin Schwidefsky 		      _raw_compare_and_swap(&rw->lock, old, old + 1));
144bae8f567SMartin Schwidefsky }
145bae8f567SMartin Schwidefsky 
146bae8f567SMartin Schwidefsky static inline int arch_write_trylock_once(arch_rwlock_t *rw)
147bae8f567SMartin Schwidefsky {
148bae8f567SMartin Schwidefsky 	unsigned int old = ACCESS_ONCE(rw->lock);
149bae8f567SMartin Schwidefsky 	return likely(old == 0 &&
150bae8f567SMartin Schwidefsky 		      _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
151bae8f567SMartin Schwidefsky }
152bae8f567SMartin Schwidefsky 
153bbae71bfSMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
154bbae71bfSMartin Schwidefsky 
155bbae71bfSMartin Schwidefsky #define __RAW_OP_OR	"lao"
156bbae71bfSMartin Schwidefsky #define __RAW_OP_AND	"lan"
157bbae71bfSMartin Schwidefsky #define __RAW_OP_ADD	"laa"
158bbae71bfSMartin Schwidefsky 
159bbae71bfSMartin Schwidefsky #define __RAW_LOCK(ptr, op_val, op_string)		\
160bbae71bfSMartin Schwidefsky ({							\
161bbae71bfSMartin Schwidefsky 	unsigned int old_val;				\
162bbae71bfSMartin Schwidefsky 							\
163bbae71bfSMartin Schwidefsky 	typecheck(unsigned int *, ptr);			\
164bbae71bfSMartin Schwidefsky 	asm volatile(					\
165bbae71bfSMartin Schwidefsky 		op_string "	%0,%2,%1\n"		\
166bbae71bfSMartin Schwidefsky 		"bcr	14,0\n"				\
167bbae71bfSMartin Schwidefsky 		: "=d" (old_val), "+Q" (*ptr)		\
168bbae71bfSMartin Schwidefsky 		: "d" (op_val)				\
169bbae71bfSMartin Schwidefsky 		: "cc", "memory");			\
170bbae71bfSMartin Schwidefsky 	old_val;					\
171bbae71bfSMartin Schwidefsky })
172bbae71bfSMartin Schwidefsky 
173bbae71bfSMartin Schwidefsky #define __RAW_UNLOCK(ptr, op_val, op_string)		\
174bbae71bfSMartin Schwidefsky ({							\
175bbae71bfSMartin Schwidefsky 	unsigned int old_val;				\
176bbae71bfSMartin Schwidefsky 							\
177bbae71bfSMartin Schwidefsky 	typecheck(unsigned int *, ptr);			\
178bbae71bfSMartin Schwidefsky 	asm volatile(					\
179bbae71bfSMartin Schwidefsky 		"bcr	14,0\n"				\
180bbae71bfSMartin Schwidefsky 		op_string "	%0,%2,%1\n"		\
181bbae71bfSMartin Schwidefsky 		: "=d" (old_val), "+Q" (*ptr)		\
182bbae71bfSMartin Schwidefsky 		: "d" (op_val)				\
183bbae71bfSMartin Schwidefsky 		: "cc", "memory");			\
184bbae71bfSMartin Schwidefsky 	old_val;					\
185bbae71bfSMartin Schwidefsky })
186bbae71bfSMartin Schwidefsky 
187bbae71bfSMartin Schwidefsky extern void _raw_read_lock_wait(arch_rwlock_t *lp);
188bbae71bfSMartin Schwidefsky extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
189bbae71bfSMartin Schwidefsky 
190bbae71bfSMartin Schwidefsky static inline void arch_read_lock(arch_rwlock_t *rw)
191bbae71bfSMartin Schwidefsky {
192bbae71bfSMartin Schwidefsky 	unsigned int old;
193bbae71bfSMartin Schwidefsky 
194bbae71bfSMartin Schwidefsky 	old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
195bbae71bfSMartin Schwidefsky 	if ((int) old < 0)
196bbae71bfSMartin Schwidefsky 		_raw_read_lock_wait(rw);
197bbae71bfSMartin Schwidefsky }
198bbae71bfSMartin Schwidefsky 
199bbae71bfSMartin Schwidefsky static inline void arch_read_unlock(arch_rwlock_t *rw)
200bbae71bfSMartin Schwidefsky {
201bbae71bfSMartin Schwidefsky 	__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
202bbae71bfSMartin Schwidefsky }
203bbae71bfSMartin Schwidefsky 
204bbae71bfSMartin Schwidefsky static inline void arch_write_lock(arch_rwlock_t *rw)
205bbae71bfSMartin Schwidefsky {
206bbae71bfSMartin Schwidefsky 	unsigned int old;
207bbae71bfSMartin Schwidefsky 
208bbae71bfSMartin Schwidefsky 	old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
209bbae71bfSMartin Schwidefsky 	if (old != 0)
210bbae71bfSMartin Schwidefsky 		_raw_write_lock_wait(rw, old);
211bbae71bfSMartin Schwidefsky 	rw->owner = SPINLOCK_LOCKVAL;
212bbae71bfSMartin Schwidefsky }
213bbae71bfSMartin Schwidefsky 
214bbae71bfSMartin Schwidefsky static inline void arch_write_unlock(arch_rwlock_t *rw)
215bbae71bfSMartin Schwidefsky {
216bbae71bfSMartin Schwidefsky 	rw->owner = 0;
217bbae71bfSMartin Schwidefsky 	__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
218bbae71bfSMartin Schwidefsky }
219bbae71bfSMartin Schwidefsky 
220bbae71bfSMartin Schwidefsky #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
221bbae71bfSMartin Schwidefsky 
222bbae71bfSMartin Schwidefsky extern void _raw_read_lock_wait(arch_rwlock_t *lp);
223bbae71bfSMartin Schwidefsky extern void _raw_write_lock_wait(arch_rwlock_t *lp);
224bbae71bfSMartin Schwidefsky 
225e5931943SThomas Gleixner static inline void arch_read_lock(arch_rwlock_t *rw)
226c6557e7fSMartin Schwidefsky {
227bae8f567SMartin Schwidefsky 	if (!arch_read_trylock_once(rw))
228c6557e7fSMartin Schwidefsky 		_raw_read_lock_wait(rw);
229c6557e7fSMartin Schwidefsky }
230c6557e7fSMartin Schwidefsky 
231e5931943SThomas Gleixner static inline void arch_read_unlock(arch_rwlock_t *rw)
232c6557e7fSMartin Schwidefsky {
2335b3f683eSPhilipp Hachtmann 	unsigned int old;
234c6557e7fSMartin Schwidefsky 
235c6557e7fSMartin Schwidefsky 	do {
2365b3f683eSPhilipp Hachtmann 		old = ACCESS_ONCE(rw->lock);
2375b3f683eSPhilipp Hachtmann 	} while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
238c6557e7fSMartin Schwidefsky }
239c6557e7fSMartin Schwidefsky 
240e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw)
241c6557e7fSMartin Schwidefsky {
242bae8f567SMartin Schwidefsky 	if (!arch_write_trylock_once(rw))
243c6557e7fSMartin Schwidefsky 		_raw_write_lock_wait(rw);
244d59b93daSMartin Schwidefsky 	rw->owner = SPINLOCK_LOCKVAL;
245c6557e7fSMartin Schwidefsky }
246c6557e7fSMartin Schwidefsky 
247e5931943SThomas Gleixner static inline void arch_write_unlock(arch_rwlock_t *rw)
248c6557e7fSMartin Schwidefsky {
24944230282SHeiko Carstens 	typecheck(unsigned int, rw->lock);
250d59b93daSMartin Schwidefsky 
251d59b93daSMartin Schwidefsky 	rw->owner = 0;
25244230282SHeiko Carstens 	asm volatile(
25344230282SHeiko Carstens 		__ASM_BARRIER
25444230282SHeiko Carstens 		"st	%1,%0\n"
25544230282SHeiko Carstens 		: "+Q" (rw->lock)
25644230282SHeiko Carstens 		: "d" (0)
25744230282SHeiko Carstens 		: "cc", "memory");
258c6557e7fSMartin Schwidefsky }
259c6557e7fSMartin Schwidefsky 
260bbae71bfSMartin Schwidefsky #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
261bbae71bfSMartin Schwidefsky 
262e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *rw)
263c6557e7fSMartin Schwidefsky {
264bae8f567SMartin Schwidefsky 	if (!arch_read_trylock_once(rw))
265c6557e7fSMartin Schwidefsky 		return _raw_read_trylock_retry(rw);
266bae8f567SMartin Schwidefsky 	return 1;
267c6557e7fSMartin Schwidefsky }
268c6557e7fSMartin Schwidefsky 
269e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *rw)
270c6557e7fSMartin Schwidefsky {
271d59b93daSMartin Schwidefsky 	if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
272d59b93daSMartin Schwidefsky 		return 0;
273d59b93daSMartin Schwidefsky 	rw->owner = SPINLOCK_LOCKVAL;
274bae8f567SMartin Schwidefsky 	return 1;
275c6557e7fSMartin Schwidefsky }
276c6557e7fSMartin Schwidefsky 
277d59b93daSMartin Schwidefsky static inline void arch_read_relax(arch_rwlock_t *rw)
278d59b93daSMartin Schwidefsky {
279d59b93daSMartin Schwidefsky 	arch_lock_relax(rw->owner);
280d59b93daSMartin Schwidefsky }
281d59b93daSMartin Schwidefsky 
282d59b93daSMartin Schwidefsky static inline void arch_write_relax(arch_rwlock_t *rw)
283d59b93daSMartin Schwidefsky {
284d59b93daSMartin Schwidefsky 	arch_lock_relax(rw->owner);
285d59b93daSMartin Schwidefsky }
286c6557e7fSMartin Schwidefsky 
287c6557e7fSMartin Schwidefsky #endif /* __ASM_SPINLOCK_H */
288