xref: /openbmc/linux/arch/s390/include/asm/spinlock.h (revision 92a76f6d)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999
4  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  *
6  *  Derived from "include/asm-i386/spinlock.h"
7  */
8 
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11 
12 #include <linux/smp.h>
13 
14 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
15 
16 extern int spin_retry;
17 
18 static inline int
19 _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
20 {
21 	return __sync_bool_compare_and_swap(lock, old, new);
22 }
23 
24 /*
25  * Simple spin lock operations.  There are two variants, one clears IRQ's
26  * on the local processor, one does not.
27  *
28  * We make no fairness assumptions. They have a cost.
29  *
30  * (the type definitions are in asm/spinlock_types.h)
31  */
32 
33 void arch_lock_relax(unsigned int cpu);
34 
35 void arch_spin_lock_wait(arch_spinlock_t *);
36 int arch_spin_trylock_retry(arch_spinlock_t *);
37 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
38 
39 static inline void arch_spin_relax(arch_spinlock_t *lock)
40 {
41 	arch_lock_relax(lock->lock);
42 }
43 
44 static inline u32 arch_spin_lockval(int cpu)
45 {
46 	return ~cpu;
47 }
48 
49 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
50 {
51 	return lock.lock == 0;
52 }
53 
54 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
55 {
56 	return ACCESS_ONCE(lp->lock) != 0;
57 }
58 
59 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
60 {
61 	barrier();
62 	return likely(arch_spin_value_unlocked(*lp) &&
63 		      _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
64 }
65 
66 static inline void arch_spin_lock(arch_spinlock_t *lp)
67 {
68 	if (!arch_spin_trylock_once(lp))
69 		arch_spin_lock_wait(lp);
70 }
71 
72 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
73 					unsigned long flags)
74 {
75 	if (!arch_spin_trylock_once(lp))
76 		arch_spin_lock_wait_flags(lp, flags);
77 }
78 
79 static inline int arch_spin_trylock(arch_spinlock_t *lp)
80 {
81 	if (!arch_spin_trylock_once(lp))
82 		return arch_spin_trylock_retry(lp);
83 	return 1;
84 }
85 
86 static inline void arch_spin_unlock(arch_spinlock_t *lp)
87 {
88 	typecheck(unsigned int, lp->lock);
89 	asm volatile(
90 		"st	%1,%0\n"
91 		: "+Q" (lp->lock)
92 		: "d" (0)
93 		: "cc", "memory");
94 }
95 
96 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
97 {
98 	while (arch_spin_is_locked(lock))
99 		arch_spin_relax(lock);
100 }
101 
102 /*
103  * Read-write spinlocks, allowing multiple readers
104  * but only one writer.
105  *
106  * NOTE! it is quite common to have readers in interrupts
107  * but no interrupt writers. For those circumstances we
108  * can "mix" irq-safe locks - any writer needs to get a
109  * irq-safe write-lock, but readers can get non-irqsafe
110  * read-locks.
111  */
112 
113 /**
114  * read_can_lock - would read_trylock() succeed?
115  * @lock: the rwlock in question.
116  */
117 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
118 
119 /**
120  * write_can_lock - would write_trylock() succeed?
121  * @lock: the rwlock in question.
122  */
123 #define arch_write_can_lock(x) ((x)->lock == 0)
124 
125 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
126 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
127 
128 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
129 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
130 
131 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
132 {
133 	unsigned int old = ACCESS_ONCE(rw->lock);
134 	return likely((int) old >= 0 &&
135 		      _raw_compare_and_swap(&rw->lock, old, old + 1));
136 }
137 
138 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
139 {
140 	unsigned int old = ACCESS_ONCE(rw->lock);
141 	return likely(old == 0 &&
142 		      _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
143 }
144 
145 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
146 
147 #define __RAW_OP_OR	"lao"
148 #define __RAW_OP_AND	"lan"
149 #define __RAW_OP_ADD	"laa"
150 
151 #define __RAW_LOCK(ptr, op_val, op_string)		\
152 ({							\
153 	unsigned int old_val;				\
154 							\
155 	typecheck(unsigned int *, ptr);			\
156 	asm volatile(					\
157 		op_string "	%0,%2,%1\n"		\
158 		"bcr	14,0\n"				\
159 		: "=d" (old_val), "+Q" (*ptr)		\
160 		: "d" (op_val)				\
161 		: "cc", "memory");			\
162 	old_val;					\
163 })
164 
165 #define __RAW_UNLOCK(ptr, op_val, op_string)		\
166 ({							\
167 	unsigned int old_val;				\
168 							\
169 	typecheck(unsigned int *, ptr);			\
170 	asm volatile(					\
171 		op_string "	%0,%2,%1\n"		\
172 		: "=d" (old_val), "+Q" (*ptr)		\
173 		: "d" (op_val)				\
174 		: "cc", "memory");			\
175 	old_val;					\
176 })
177 
178 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
179 extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
180 
181 static inline void arch_read_lock(arch_rwlock_t *rw)
182 {
183 	unsigned int old;
184 
185 	old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
186 	if ((int) old < 0)
187 		_raw_read_lock_wait(rw);
188 }
189 
190 static inline void arch_read_unlock(arch_rwlock_t *rw)
191 {
192 	__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
193 }
194 
195 static inline void arch_write_lock(arch_rwlock_t *rw)
196 {
197 	unsigned int old;
198 
199 	old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
200 	if (old != 0)
201 		_raw_write_lock_wait(rw, old);
202 	rw->owner = SPINLOCK_LOCKVAL;
203 }
204 
205 static inline void arch_write_unlock(arch_rwlock_t *rw)
206 {
207 	rw->owner = 0;
208 	__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
209 }
210 
211 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
212 
213 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
214 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
215 
216 static inline void arch_read_lock(arch_rwlock_t *rw)
217 {
218 	if (!arch_read_trylock_once(rw))
219 		_raw_read_lock_wait(rw);
220 }
221 
222 static inline void arch_read_unlock(arch_rwlock_t *rw)
223 {
224 	unsigned int old;
225 
226 	do {
227 		old = ACCESS_ONCE(rw->lock);
228 	} while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
229 }
230 
231 static inline void arch_write_lock(arch_rwlock_t *rw)
232 {
233 	if (!arch_write_trylock_once(rw))
234 		_raw_write_lock_wait(rw);
235 	rw->owner = SPINLOCK_LOCKVAL;
236 }
237 
238 static inline void arch_write_unlock(arch_rwlock_t *rw)
239 {
240 	typecheck(unsigned int, rw->lock);
241 
242 	rw->owner = 0;
243 	asm volatile(
244 		"st	%1,%0\n"
245 		: "+Q" (rw->lock)
246 		: "d" (0)
247 		: "cc", "memory");
248 }
249 
250 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
251 
252 static inline int arch_read_trylock(arch_rwlock_t *rw)
253 {
254 	if (!arch_read_trylock_once(rw))
255 		return _raw_read_trylock_retry(rw);
256 	return 1;
257 }
258 
259 static inline int arch_write_trylock(arch_rwlock_t *rw)
260 {
261 	if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
262 		return 0;
263 	rw->owner = SPINLOCK_LOCKVAL;
264 	return 1;
265 }
266 
267 static inline void arch_read_relax(arch_rwlock_t *rw)
268 {
269 	arch_lock_relax(rw->owner);
270 }
271 
272 static inline void arch_write_relax(arch_rwlock_t *rw)
273 {
274 	arch_lock_relax(rw->owner);
275 }
276 
277 #endif /* __ASM_SPINLOCK_H */
278