xref: /openbmc/linux/arch/s390/include/asm/spinlock.h (revision e5931943)
1c6557e7fSMartin Schwidefsky /*
2c6557e7fSMartin Schwidefsky  *  include/asm-s390/spinlock.h
3c6557e7fSMartin Schwidefsky  *
4c6557e7fSMartin Schwidefsky  *  S390 version
5c6557e7fSMartin Schwidefsky  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6c6557e7fSMartin Schwidefsky  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7c6557e7fSMartin Schwidefsky  *
8c6557e7fSMartin Schwidefsky  *  Derived from "include/asm-i386/spinlock.h"
9c6557e7fSMartin Schwidefsky  */
10c6557e7fSMartin Schwidefsky 
11c6557e7fSMartin Schwidefsky #ifndef __ASM_SPINLOCK_H
12c6557e7fSMartin Schwidefsky #define __ASM_SPINLOCK_H
13c6557e7fSMartin Schwidefsky 
14c6557e7fSMartin Schwidefsky #include <linux/smp.h>
15c6557e7fSMartin Schwidefsky 
16c6557e7fSMartin Schwidefsky #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
17c6557e7fSMartin Schwidefsky 
18c6557e7fSMartin Schwidefsky static inline int
19c6557e7fSMartin Schwidefsky _raw_compare_and_swap(volatile unsigned int *lock,
20c6557e7fSMartin Schwidefsky 		      unsigned int old, unsigned int new)
21c6557e7fSMartin Schwidefsky {
22c6557e7fSMartin Schwidefsky 	asm volatile(
23c6557e7fSMartin Schwidefsky 		"	cs	%0,%3,%1"
24c6557e7fSMartin Schwidefsky 		: "=d" (old), "=Q" (*lock)
25c6557e7fSMartin Schwidefsky 		: "0" (old), "d" (new), "Q" (*lock)
26c6557e7fSMartin Schwidefsky 		: "cc", "memory" );
27c6557e7fSMartin Schwidefsky 	return old;
28c6557e7fSMartin Schwidefsky }
29c6557e7fSMartin Schwidefsky 
30c6557e7fSMartin Schwidefsky #else /* __GNUC__ */
31c6557e7fSMartin Schwidefsky 
32c6557e7fSMartin Schwidefsky static inline int
33c6557e7fSMartin Schwidefsky _raw_compare_and_swap(volatile unsigned int *lock,
34c6557e7fSMartin Schwidefsky 		      unsigned int old, unsigned int new)
35c6557e7fSMartin Schwidefsky {
36c6557e7fSMartin Schwidefsky 	asm volatile(
37c6557e7fSMartin Schwidefsky 		"	cs	%0,%3,0(%4)"
38c6557e7fSMartin Schwidefsky 		: "=d" (old), "=m" (*lock)
39c6557e7fSMartin Schwidefsky 		: "0" (old), "d" (new), "a" (lock), "m" (*lock)
40c6557e7fSMartin Schwidefsky 		: "cc", "memory" );
41c6557e7fSMartin Schwidefsky 	return old;
42c6557e7fSMartin Schwidefsky }
43c6557e7fSMartin Schwidefsky 
44c6557e7fSMartin Schwidefsky #endif /* __GNUC__ */
45c6557e7fSMartin Schwidefsky 
46c6557e7fSMartin Schwidefsky /*
47c6557e7fSMartin Schwidefsky  * Simple spin lock operations.  There are two variants, one clears IRQ's
48c6557e7fSMartin Schwidefsky  * on the local processor, one does not.
49c6557e7fSMartin Schwidefsky  *
50c6557e7fSMartin Schwidefsky  * We make no fairness assumptions. They have a cost.
51c6557e7fSMartin Schwidefsky  *
52c6557e7fSMartin Schwidefsky  * (the type definitions are in asm/spinlock_types.h)
53c6557e7fSMartin Schwidefsky  */
54c6557e7fSMartin Schwidefsky 
550199c4e6SThomas Gleixner #define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
560199c4e6SThomas Gleixner #define arch_spin_unlock_wait(lock) \
570199c4e6SThomas Gleixner 	do { while (arch_spin_is_locked(lock)) \
580199c4e6SThomas Gleixner 		 arch_spin_relax(lock); } while (0)
59c6557e7fSMartin Schwidefsky 
600199c4e6SThomas Gleixner extern void arch_spin_lock_wait(arch_spinlock_t *);
610199c4e6SThomas Gleixner extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
620199c4e6SThomas Gleixner extern int arch_spin_trylock_retry(arch_spinlock_t *);
630199c4e6SThomas Gleixner extern void arch_spin_relax(arch_spinlock_t *lock);
64c6557e7fSMartin Schwidefsky 
650199c4e6SThomas Gleixner static inline void arch_spin_lock(arch_spinlock_t *lp)
66c6557e7fSMartin Schwidefsky {
67c6557e7fSMartin Schwidefsky 	int old;
68c6557e7fSMartin Schwidefsky 
69c6557e7fSMartin Schwidefsky 	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
70c6557e7fSMartin Schwidefsky 	if (likely(old == 0))
71c6557e7fSMartin Schwidefsky 		return;
720199c4e6SThomas Gleixner 	arch_spin_lock_wait(lp);
73c6557e7fSMartin Schwidefsky }
74c6557e7fSMartin Schwidefsky 
750199c4e6SThomas Gleixner static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
76c6557e7fSMartin Schwidefsky 					 unsigned long flags)
77c6557e7fSMartin Schwidefsky {
78c6557e7fSMartin Schwidefsky 	int old;
79c6557e7fSMartin Schwidefsky 
80c6557e7fSMartin Schwidefsky 	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
81c6557e7fSMartin Schwidefsky 	if (likely(old == 0))
82c6557e7fSMartin Schwidefsky 		return;
830199c4e6SThomas Gleixner 	arch_spin_lock_wait_flags(lp, flags);
84c6557e7fSMartin Schwidefsky }
85c6557e7fSMartin Schwidefsky 
860199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *lp)
87c6557e7fSMartin Schwidefsky {
88c6557e7fSMartin Schwidefsky 	int old;
89c6557e7fSMartin Schwidefsky 
90c6557e7fSMartin Schwidefsky 	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
91c6557e7fSMartin Schwidefsky 	if (likely(old == 0))
92c6557e7fSMartin Schwidefsky 		return 1;
930199c4e6SThomas Gleixner 	return arch_spin_trylock_retry(lp);
94c6557e7fSMartin Schwidefsky }
95c6557e7fSMartin Schwidefsky 
960199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *lp)
97c6557e7fSMartin Schwidefsky {
98c6557e7fSMartin Schwidefsky 	_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
99c6557e7fSMartin Schwidefsky }
100c6557e7fSMartin Schwidefsky 
101c6557e7fSMartin Schwidefsky /*
102c6557e7fSMartin Schwidefsky  * Read-write spinlocks, allowing multiple readers
103c6557e7fSMartin Schwidefsky  * but only one writer.
104c6557e7fSMartin Schwidefsky  *
105c6557e7fSMartin Schwidefsky  * NOTE! it is quite common to have readers in interrupts
106c6557e7fSMartin Schwidefsky  * but no interrupt writers. For those circumstances we
107c6557e7fSMartin Schwidefsky  * can "mix" irq-safe locks - any writer needs to get a
108c6557e7fSMartin Schwidefsky  * irq-safe write-lock, but readers can get non-irqsafe
109c6557e7fSMartin Schwidefsky  * read-locks.
110c6557e7fSMartin Schwidefsky  */
111c6557e7fSMartin Schwidefsky 
112c6557e7fSMartin Schwidefsky /**
113c6557e7fSMartin Schwidefsky  * read_can_lock - would read_trylock() succeed?
114c6557e7fSMartin Schwidefsky  * @lock: the rwlock in question.
115c6557e7fSMartin Schwidefsky  */
116e5931943SThomas Gleixner #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
117c6557e7fSMartin Schwidefsky 
118c6557e7fSMartin Schwidefsky /**
119c6557e7fSMartin Schwidefsky  * write_can_lock - would write_trylock() succeed?
120c6557e7fSMartin Schwidefsky  * @lock: the rwlock in question.
121c6557e7fSMartin Schwidefsky  */
122e5931943SThomas Gleixner #define arch_write_can_lock(x) ((x)->lock == 0)
123c6557e7fSMartin Schwidefsky 
124fb3a6bbcSThomas Gleixner extern void _raw_read_lock_wait(arch_rwlock_t *lp);
125fb3a6bbcSThomas Gleixner extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
126fb3a6bbcSThomas Gleixner extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
127fb3a6bbcSThomas Gleixner extern void _raw_write_lock_wait(arch_rwlock_t *lp);
128fb3a6bbcSThomas Gleixner extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
129fb3a6bbcSThomas Gleixner extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
130c6557e7fSMartin Schwidefsky 
131e5931943SThomas Gleixner static inline void arch_read_lock(arch_rwlock_t *rw)
132c6557e7fSMartin Schwidefsky {
133c6557e7fSMartin Schwidefsky 	unsigned int old;
134c6557e7fSMartin Schwidefsky 	old = rw->lock & 0x7fffffffU;
135c6557e7fSMartin Schwidefsky 	if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
136c6557e7fSMartin Schwidefsky 		_raw_read_lock_wait(rw);
137c6557e7fSMartin Schwidefsky }
138c6557e7fSMartin Schwidefsky 
139e5931943SThomas Gleixner static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
140ce58ae6fSHeiko Carstens {
141ce58ae6fSHeiko Carstens 	unsigned int old;
142ce58ae6fSHeiko Carstens 	old = rw->lock & 0x7fffffffU;
143ce58ae6fSHeiko Carstens 	if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
144ce58ae6fSHeiko Carstens 		_raw_read_lock_wait_flags(rw, flags);
145ce58ae6fSHeiko Carstens }
146ce58ae6fSHeiko Carstens 
147e5931943SThomas Gleixner static inline void arch_read_unlock(arch_rwlock_t *rw)
148c6557e7fSMartin Schwidefsky {
149c6557e7fSMartin Schwidefsky 	unsigned int old, cmp;
150c6557e7fSMartin Schwidefsky 
151c6557e7fSMartin Schwidefsky 	old = rw->lock;
152c6557e7fSMartin Schwidefsky 	do {
153c6557e7fSMartin Schwidefsky 		cmp = old;
154c6557e7fSMartin Schwidefsky 		old = _raw_compare_and_swap(&rw->lock, old, old - 1);
155c6557e7fSMartin Schwidefsky 	} while (cmp != old);
156c6557e7fSMartin Schwidefsky }
157c6557e7fSMartin Schwidefsky 
158e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw)
159c6557e7fSMartin Schwidefsky {
160c6557e7fSMartin Schwidefsky 	if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
161c6557e7fSMartin Schwidefsky 		_raw_write_lock_wait(rw);
162c6557e7fSMartin Schwidefsky }
163c6557e7fSMartin Schwidefsky 
164e5931943SThomas Gleixner static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
165ce58ae6fSHeiko Carstens {
166ce58ae6fSHeiko Carstens 	if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
167ce58ae6fSHeiko Carstens 		_raw_write_lock_wait_flags(rw, flags);
168ce58ae6fSHeiko Carstens }
169ce58ae6fSHeiko Carstens 
170e5931943SThomas Gleixner static inline void arch_write_unlock(arch_rwlock_t *rw)
171c6557e7fSMartin Schwidefsky {
172c6557e7fSMartin Schwidefsky 	_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
173c6557e7fSMartin Schwidefsky }
174c6557e7fSMartin Schwidefsky 
175e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *rw)
176c6557e7fSMartin Schwidefsky {
177c6557e7fSMartin Schwidefsky 	unsigned int old;
178c6557e7fSMartin Schwidefsky 	old = rw->lock & 0x7fffffffU;
179c6557e7fSMartin Schwidefsky 	if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
180c6557e7fSMartin Schwidefsky 		return 1;
181c6557e7fSMartin Schwidefsky 	return _raw_read_trylock_retry(rw);
182c6557e7fSMartin Schwidefsky }
183c6557e7fSMartin Schwidefsky 
184e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *rw)
185c6557e7fSMartin Schwidefsky {
186c6557e7fSMartin Schwidefsky 	if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
187c6557e7fSMartin Schwidefsky 		return 1;
188c6557e7fSMartin Schwidefsky 	return _raw_write_trylock_retry(rw);
189c6557e7fSMartin Schwidefsky }
190c6557e7fSMartin Schwidefsky 
1910199c4e6SThomas Gleixner #define arch_read_relax(lock)	cpu_relax()
1920199c4e6SThomas Gleixner #define arch_write_relax(lock)	cpu_relax()
193c6557e7fSMartin Schwidefsky 
194c6557e7fSMartin Schwidefsky #endif /* __ASM_SPINLOCK_H */
195