xref: /openbmc/linux/arch/s390/include/asm/spinlock.h (revision e5931943)
1 /*
2  *  include/asm-s390/spinlock.h
3  *
4  *  S390 version
5  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/spinlock.h"
9  */
10 
11 #ifndef __ASM_SPINLOCK_H
12 #define __ASM_SPINLOCK_H
13 
14 #include <linux/smp.h>
15 
16 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
17 
18 static inline int
19 _raw_compare_and_swap(volatile unsigned int *lock,
20 		      unsigned int old, unsigned int new)
21 {
22 	asm volatile(
23 		"	cs	%0,%3,%1"
24 		: "=d" (old), "=Q" (*lock)
25 		: "0" (old), "d" (new), "Q" (*lock)
26 		: "cc", "memory" );
27 	return old;
28 }
29 
30 #else /* __GNUC__ */
31 
32 static inline int
33 _raw_compare_and_swap(volatile unsigned int *lock,
34 		      unsigned int old, unsigned int new)
35 {
36 	asm volatile(
37 		"	cs	%0,%3,0(%4)"
38 		: "=d" (old), "=m" (*lock)
39 		: "0" (old), "d" (new), "a" (lock), "m" (*lock)
40 		: "cc", "memory" );
41 	return old;
42 }
43 
44 #endif /* __GNUC__ */
45 
46 /*
47  * Simple spin lock operations.  There are two variants, one clears IRQ's
48  * on the local processor, one does not.
49  *
50  * We make no fairness assumptions. They have a cost.
51  *
52  * (the type definitions are in asm/spinlock_types.h)
53  */
54 
55 #define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
56 #define arch_spin_unlock_wait(lock) \
57 	do { while (arch_spin_is_locked(lock)) \
58 		 arch_spin_relax(lock); } while (0)
59 
60 extern void arch_spin_lock_wait(arch_spinlock_t *);
61 extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
62 extern int arch_spin_trylock_retry(arch_spinlock_t *);
63 extern void arch_spin_relax(arch_spinlock_t *lock);
64 
65 static inline void arch_spin_lock(arch_spinlock_t *lp)
66 {
67 	int old;
68 
69 	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
70 	if (likely(old == 0))
71 		return;
72 	arch_spin_lock_wait(lp);
73 }
74 
75 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
76 					 unsigned long flags)
77 {
78 	int old;
79 
80 	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
81 	if (likely(old == 0))
82 		return;
83 	arch_spin_lock_wait_flags(lp, flags);
84 }
85 
86 static inline int arch_spin_trylock(arch_spinlock_t *lp)
87 {
88 	int old;
89 
90 	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
91 	if (likely(old == 0))
92 		return 1;
93 	return arch_spin_trylock_retry(lp);
94 }
95 
96 static inline void arch_spin_unlock(arch_spinlock_t *lp)
97 {
98 	_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
99 }
100 
101 /*
102  * Read-write spinlocks, allowing multiple readers
103  * but only one writer.
104  *
105  * NOTE! it is quite common to have readers in interrupts
106  * but no interrupt writers. For those circumstances we
107  * can "mix" irq-safe locks - any writer needs to get a
108  * irq-safe write-lock, but readers can get non-irqsafe
109  * read-locks.
110  */
111 
112 /**
113  * read_can_lock - would read_trylock() succeed?
114  * @lock: the rwlock in question.
115  */
116 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
117 
118 /**
119  * write_can_lock - would write_trylock() succeed?
120  * @lock: the rwlock in question.
121  */
122 #define arch_write_can_lock(x) ((x)->lock == 0)
123 
124 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
125 extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
126 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
127 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
128 extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
129 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
130 
131 static inline void arch_read_lock(arch_rwlock_t *rw)
132 {
133 	unsigned int old;
134 	old = rw->lock & 0x7fffffffU;
135 	if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
136 		_raw_read_lock_wait(rw);
137 }
138 
139 static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
140 {
141 	unsigned int old;
142 	old = rw->lock & 0x7fffffffU;
143 	if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
144 		_raw_read_lock_wait_flags(rw, flags);
145 }
146 
147 static inline void arch_read_unlock(arch_rwlock_t *rw)
148 {
149 	unsigned int old, cmp;
150 
151 	old = rw->lock;
152 	do {
153 		cmp = old;
154 		old = _raw_compare_and_swap(&rw->lock, old, old - 1);
155 	} while (cmp != old);
156 }
157 
158 static inline void arch_write_lock(arch_rwlock_t *rw)
159 {
160 	if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
161 		_raw_write_lock_wait(rw);
162 }
163 
164 static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
165 {
166 	if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
167 		_raw_write_lock_wait_flags(rw, flags);
168 }
169 
170 static inline void arch_write_unlock(arch_rwlock_t *rw)
171 {
172 	_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
173 }
174 
175 static inline int arch_read_trylock(arch_rwlock_t *rw)
176 {
177 	unsigned int old;
178 	old = rw->lock & 0x7fffffffU;
179 	if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
180 		return 1;
181 	return _raw_read_trylock_retry(rw);
182 }
183 
184 static inline int arch_write_trylock(arch_rwlock_t *rw)
185 {
186 	if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
187 		return 1;
188 	return _raw_write_trylock_retry(rw);
189 }
190 
191 #define arch_read_relax(lock)	cpu_relax()
192 #define arch_write_relax(lock)	cpu_relax()
193 
194 #endif /* __ASM_SPINLOCK_H */
195