xref: /openbmc/linux/arch/s390/include/asm/spinlock.h (revision a53c8fab)
1c6557e7fSMartin Schwidefsky /*
2c6557e7fSMartin Schwidefsky  *  S390 version
3a53c8fabSHeiko Carstens  *    Copyright IBM Corp. 1999
4c6557e7fSMartin Schwidefsky  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5c6557e7fSMartin Schwidefsky  *
6c6557e7fSMartin Schwidefsky  *  Derived from "include/asm-i386/spinlock.h"
7c6557e7fSMartin Schwidefsky  */
8c6557e7fSMartin Schwidefsky 
9c6557e7fSMartin Schwidefsky #ifndef __ASM_SPINLOCK_H
10c6557e7fSMartin Schwidefsky #define __ASM_SPINLOCK_H
11c6557e7fSMartin Schwidefsky 
12c6557e7fSMartin Schwidefsky #include <linux/smp.h>
13c6557e7fSMartin Schwidefsky 
14638ad34aSMartin Schwidefsky extern int spin_retry;
15638ad34aSMartin Schwidefsky 
16c6557e7fSMartin Schwidefsky static inline int
17c6557e7fSMartin Schwidefsky _raw_compare_and_swap(volatile unsigned int *lock,
18c6557e7fSMartin Schwidefsky 		      unsigned int old, unsigned int new)
19c6557e7fSMartin Schwidefsky {
20c6557e7fSMartin Schwidefsky 	asm volatile(
21c6557e7fSMartin Schwidefsky 		"	cs	%0,%3,%1"
22c6557e7fSMartin Schwidefsky 		: "=d" (old), "=Q" (*lock)
23c6557e7fSMartin Schwidefsky 		: "0" (old), "d" (new), "Q" (*lock)
24c6557e7fSMartin Schwidefsky 		: "cc", "memory" );
25c6557e7fSMartin Schwidefsky 	return old;
26c6557e7fSMartin Schwidefsky }
27c6557e7fSMartin Schwidefsky 
28c6557e7fSMartin Schwidefsky /*
29c6557e7fSMartin Schwidefsky  * Simple spin lock operations.  There are two variants, one clears IRQ's
30c6557e7fSMartin Schwidefsky  * on the local processor, one does not.
31c6557e7fSMartin Schwidefsky  *
32c6557e7fSMartin Schwidefsky  * We make no fairness assumptions. They have a cost.
33c6557e7fSMartin Schwidefsky  *
34c6557e7fSMartin Schwidefsky  * (the type definitions are in asm/spinlock_types.h)
35c6557e7fSMartin Schwidefsky  */
36c6557e7fSMartin Schwidefsky 
370199c4e6SThomas Gleixner #define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
380199c4e6SThomas Gleixner #define arch_spin_unlock_wait(lock) \
390199c4e6SThomas Gleixner 	do { while (arch_spin_is_locked(lock)) \
400199c4e6SThomas Gleixner 		 arch_spin_relax(lock); } while (0)
41c6557e7fSMartin Schwidefsky 
420199c4e6SThomas Gleixner extern void arch_spin_lock_wait(arch_spinlock_t *);
430199c4e6SThomas Gleixner extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
440199c4e6SThomas Gleixner extern int arch_spin_trylock_retry(arch_spinlock_t *);
450199c4e6SThomas Gleixner extern void arch_spin_relax(arch_spinlock_t *lock);
46c6557e7fSMartin Schwidefsky 
470199c4e6SThomas Gleixner static inline void arch_spin_lock(arch_spinlock_t *lp)
48c6557e7fSMartin Schwidefsky {
49c6557e7fSMartin Schwidefsky 	int old;
50c6557e7fSMartin Schwidefsky 
51c6557e7fSMartin Schwidefsky 	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
52c6557e7fSMartin Schwidefsky 	if (likely(old == 0))
53c6557e7fSMartin Schwidefsky 		return;
540199c4e6SThomas Gleixner 	arch_spin_lock_wait(lp);
55c6557e7fSMartin Schwidefsky }
56c6557e7fSMartin Schwidefsky 
570199c4e6SThomas Gleixner static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
58c6557e7fSMartin Schwidefsky 					 unsigned long flags)
59c6557e7fSMartin Schwidefsky {
60c6557e7fSMartin Schwidefsky 	int old;
61c6557e7fSMartin Schwidefsky 
62c6557e7fSMartin Schwidefsky 	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
63c6557e7fSMartin Schwidefsky 	if (likely(old == 0))
64c6557e7fSMartin Schwidefsky 		return;
650199c4e6SThomas Gleixner 	arch_spin_lock_wait_flags(lp, flags);
66c6557e7fSMartin Schwidefsky }
67c6557e7fSMartin Schwidefsky 
680199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *lp)
69c6557e7fSMartin Schwidefsky {
70c6557e7fSMartin Schwidefsky 	int old;
71c6557e7fSMartin Schwidefsky 
72c6557e7fSMartin Schwidefsky 	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
73c6557e7fSMartin Schwidefsky 	if (likely(old == 0))
74c6557e7fSMartin Schwidefsky 		return 1;
750199c4e6SThomas Gleixner 	return arch_spin_trylock_retry(lp);
76c6557e7fSMartin Schwidefsky }
77c6557e7fSMartin Schwidefsky 
780199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *lp)
79c6557e7fSMartin Schwidefsky {
80c6557e7fSMartin Schwidefsky 	_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
81c6557e7fSMartin Schwidefsky }
82c6557e7fSMartin Schwidefsky 
83c6557e7fSMartin Schwidefsky /*
84c6557e7fSMartin Schwidefsky  * Read-write spinlocks, allowing multiple readers
85c6557e7fSMartin Schwidefsky  * but only one writer.
86c6557e7fSMartin Schwidefsky  *
87c6557e7fSMartin Schwidefsky  * NOTE! it is quite common to have readers in interrupts
88c6557e7fSMartin Schwidefsky  * but no interrupt writers. For those circumstances we
89c6557e7fSMartin Schwidefsky  * can "mix" irq-safe locks - any writer needs to get a
90c6557e7fSMartin Schwidefsky  * irq-safe write-lock, but readers can get non-irqsafe
91c6557e7fSMartin Schwidefsky  * read-locks.
92c6557e7fSMartin Schwidefsky  */
93c6557e7fSMartin Schwidefsky 
94c6557e7fSMartin Schwidefsky /**
95c6557e7fSMartin Schwidefsky  * read_can_lock - would read_trylock() succeed?
96c6557e7fSMartin Schwidefsky  * @lock: the rwlock in question.
97c6557e7fSMartin Schwidefsky  */
98e5931943SThomas Gleixner #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
99c6557e7fSMartin Schwidefsky 
100c6557e7fSMartin Schwidefsky /**
101c6557e7fSMartin Schwidefsky  * write_can_lock - would write_trylock() succeed?
102c6557e7fSMartin Schwidefsky  * @lock: the rwlock in question.
103c6557e7fSMartin Schwidefsky  */
104e5931943SThomas Gleixner #define arch_write_can_lock(x) ((x)->lock == 0)
105c6557e7fSMartin Schwidefsky 
106fb3a6bbcSThomas Gleixner extern void _raw_read_lock_wait(arch_rwlock_t *lp);
107fb3a6bbcSThomas Gleixner extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
108fb3a6bbcSThomas Gleixner extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
109fb3a6bbcSThomas Gleixner extern void _raw_write_lock_wait(arch_rwlock_t *lp);
110fb3a6bbcSThomas Gleixner extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
111fb3a6bbcSThomas Gleixner extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
112c6557e7fSMartin Schwidefsky 
113e5931943SThomas Gleixner static inline void arch_read_lock(arch_rwlock_t *rw)
114c6557e7fSMartin Schwidefsky {
115c6557e7fSMartin Schwidefsky 	unsigned int old;
116c6557e7fSMartin Schwidefsky 	old = rw->lock & 0x7fffffffU;
117c6557e7fSMartin Schwidefsky 	if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
118c6557e7fSMartin Schwidefsky 		_raw_read_lock_wait(rw);
119c6557e7fSMartin Schwidefsky }
120c6557e7fSMartin Schwidefsky 
121e5931943SThomas Gleixner static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
122ce58ae6fSHeiko Carstens {
123ce58ae6fSHeiko Carstens 	unsigned int old;
124ce58ae6fSHeiko Carstens 	old = rw->lock & 0x7fffffffU;
125ce58ae6fSHeiko Carstens 	if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
126ce58ae6fSHeiko Carstens 		_raw_read_lock_wait_flags(rw, flags);
127ce58ae6fSHeiko Carstens }
128ce58ae6fSHeiko Carstens 
129e5931943SThomas Gleixner static inline void arch_read_unlock(arch_rwlock_t *rw)
130c6557e7fSMartin Schwidefsky {
131c6557e7fSMartin Schwidefsky 	unsigned int old, cmp;
132c6557e7fSMartin Schwidefsky 
133c6557e7fSMartin Schwidefsky 	old = rw->lock;
134c6557e7fSMartin Schwidefsky 	do {
135c6557e7fSMartin Schwidefsky 		cmp = old;
136c6557e7fSMartin Schwidefsky 		old = _raw_compare_and_swap(&rw->lock, old, old - 1);
137c6557e7fSMartin Schwidefsky 	} while (cmp != old);
138c6557e7fSMartin Schwidefsky }
139c6557e7fSMartin Schwidefsky 
140e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw)
141c6557e7fSMartin Schwidefsky {
142c6557e7fSMartin Schwidefsky 	if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
143c6557e7fSMartin Schwidefsky 		_raw_write_lock_wait(rw);
144c6557e7fSMartin Schwidefsky }
145c6557e7fSMartin Schwidefsky 
146e5931943SThomas Gleixner static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
147ce58ae6fSHeiko Carstens {
148ce58ae6fSHeiko Carstens 	if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
149ce58ae6fSHeiko Carstens 		_raw_write_lock_wait_flags(rw, flags);
150ce58ae6fSHeiko Carstens }
151ce58ae6fSHeiko Carstens 
152e5931943SThomas Gleixner static inline void arch_write_unlock(arch_rwlock_t *rw)
153c6557e7fSMartin Schwidefsky {
154c6557e7fSMartin Schwidefsky 	_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
155c6557e7fSMartin Schwidefsky }
156c6557e7fSMartin Schwidefsky 
157e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *rw)
158c6557e7fSMartin Schwidefsky {
159c6557e7fSMartin Schwidefsky 	unsigned int old;
160c6557e7fSMartin Schwidefsky 	old = rw->lock & 0x7fffffffU;
161c6557e7fSMartin Schwidefsky 	if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
162c6557e7fSMartin Schwidefsky 		return 1;
163c6557e7fSMartin Schwidefsky 	return _raw_read_trylock_retry(rw);
164c6557e7fSMartin Schwidefsky }
165c6557e7fSMartin Schwidefsky 
166e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *rw)
167c6557e7fSMartin Schwidefsky {
168c6557e7fSMartin Schwidefsky 	if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
169c6557e7fSMartin Schwidefsky 		return 1;
170c6557e7fSMartin Schwidefsky 	return _raw_write_trylock_retry(rw);
171c6557e7fSMartin Schwidefsky }
172c6557e7fSMartin Schwidefsky 
1730199c4e6SThomas Gleixner #define arch_read_relax(lock)	cpu_relax()
1740199c4e6SThomas Gleixner #define arch_write_relax(lock)	cpu_relax()
175c6557e7fSMartin Schwidefsky 
176c6557e7fSMartin Schwidefsky #endif /* __ASM_SPINLOCK_H */
177