xref: /openbmc/linux/arch/s390/include/asm/spinlock.h (revision 82003e04)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999
4  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  *
6  *  Derived from "include/asm-i386/spinlock.h"
7  */
8 
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11 
12 #include <linux/smp.h>
13 #include <asm/barrier.h>
14 #include <asm/processor.h>
15 
16 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
17 
18 extern int spin_retry;
19 
20 static inline int
21 _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
22 {
23 	return __sync_bool_compare_and_swap(lock, old, new);
24 }
25 
26 /*
27  * Simple spin lock operations.  There are two variants, one clears IRQ's
28  * on the local processor, one does not.
29  *
30  * We make no fairness assumptions. They have a cost.
31  *
32  * (the type definitions are in asm/spinlock_types.h)
33  */
34 
35 void arch_lock_relax(unsigned int cpu);
36 
37 void arch_spin_lock_wait(arch_spinlock_t *);
38 int arch_spin_trylock_retry(arch_spinlock_t *);
39 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
40 
41 static inline void arch_spin_relax(arch_spinlock_t *lock)
42 {
43 	arch_lock_relax(lock->lock);
44 }
45 
46 static inline u32 arch_spin_lockval(int cpu)
47 {
48 	return ~cpu;
49 }
50 
51 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
52 {
53 	return lock.lock == 0;
54 }
55 
56 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
57 {
58 	return ACCESS_ONCE(lp->lock) != 0;
59 }
60 
61 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
62 {
63 	barrier();
64 	return likely(arch_spin_value_unlocked(*lp) &&
65 		      _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
66 }
67 
68 static inline void arch_spin_lock(arch_spinlock_t *lp)
69 {
70 	if (!arch_spin_trylock_once(lp))
71 		arch_spin_lock_wait(lp);
72 }
73 
74 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
75 					unsigned long flags)
76 {
77 	if (!arch_spin_trylock_once(lp))
78 		arch_spin_lock_wait_flags(lp, flags);
79 }
80 
81 static inline int arch_spin_trylock(arch_spinlock_t *lp)
82 {
83 	if (!arch_spin_trylock_once(lp))
84 		return arch_spin_trylock_retry(lp);
85 	return 1;
86 }
87 
88 static inline void arch_spin_unlock(arch_spinlock_t *lp)
89 {
90 	typecheck(unsigned int, lp->lock);
91 	asm volatile(
92 		"st	%1,%0\n"
93 		: "+Q" (lp->lock)
94 		: "d" (0)
95 		: "cc", "memory");
96 }
97 
98 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
99 {
100 	while (arch_spin_is_locked(lock))
101 		arch_spin_relax(lock);
102 	smp_acquire__after_ctrl_dep();
103 }
104 
105 /*
106  * Read-write spinlocks, allowing multiple readers
107  * but only one writer.
108  *
109  * NOTE! it is quite common to have readers in interrupts
110  * but no interrupt writers. For those circumstances we
111  * can "mix" irq-safe locks - any writer needs to get a
112  * irq-safe write-lock, but readers can get non-irqsafe
113  * read-locks.
114  */
115 
116 /**
117  * read_can_lock - would read_trylock() succeed?
118  * @lock: the rwlock in question.
119  */
120 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
121 
122 /**
123  * write_can_lock - would write_trylock() succeed?
124  * @lock: the rwlock in question.
125  */
126 #define arch_write_can_lock(x) ((x)->lock == 0)
127 
128 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
129 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
130 
131 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
132 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
133 
134 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
135 {
136 	unsigned int old = ACCESS_ONCE(rw->lock);
137 	return likely((int) old >= 0 &&
138 		      _raw_compare_and_swap(&rw->lock, old, old + 1));
139 }
140 
141 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
142 {
143 	unsigned int old = ACCESS_ONCE(rw->lock);
144 	return likely(old == 0 &&
145 		      _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
146 }
147 
148 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
149 
150 #define __RAW_OP_OR	"lao"
151 #define __RAW_OP_AND	"lan"
152 #define __RAW_OP_ADD	"laa"
153 
154 #define __RAW_LOCK(ptr, op_val, op_string)		\
155 ({							\
156 	unsigned int old_val;				\
157 							\
158 	typecheck(unsigned int *, ptr);			\
159 	asm volatile(					\
160 		op_string "	%0,%2,%1\n"		\
161 		"bcr	14,0\n"				\
162 		: "=d" (old_val), "+Q" (*ptr)		\
163 		: "d" (op_val)				\
164 		: "cc", "memory");			\
165 	old_val;					\
166 })
167 
168 #define __RAW_UNLOCK(ptr, op_val, op_string)		\
169 ({							\
170 	unsigned int old_val;				\
171 							\
172 	typecheck(unsigned int *, ptr);			\
173 	asm volatile(					\
174 		op_string "	%0,%2,%1\n"		\
175 		: "=d" (old_val), "+Q" (*ptr)		\
176 		: "d" (op_val)				\
177 		: "cc", "memory");			\
178 	old_val;					\
179 })
180 
181 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
182 extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
183 
184 static inline void arch_read_lock(arch_rwlock_t *rw)
185 {
186 	unsigned int old;
187 
188 	old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
189 	if ((int) old < 0)
190 		_raw_read_lock_wait(rw);
191 }
192 
193 static inline void arch_read_unlock(arch_rwlock_t *rw)
194 {
195 	__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
196 }
197 
198 static inline void arch_write_lock(arch_rwlock_t *rw)
199 {
200 	unsigned int old;
201 
202 	old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
203 	if (old != 0)
204 		_raw_write_lock_wait(rw, old);
205 	rw->owner = SPINLOCK_LOCKVAL;
206 }
207 
208 static inline void arch_write_unlock(arch_rwlock_t *rw)
209 {
210 	rw->owner = 0;
211 	__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
212 }
213 
214 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
215 
216 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
217 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
218 
219 static inline void arch_read_lock(arch_rwlock_t *rw)
220 {
221 	if (!arch_read_trylock_once(rw))
222 		_raw_read_lock_wait(rw);
223 }
224 
225 static inline void arch_read_unlock(arch_rwlock_t *rw)
226 {
227 	unsigned int old;
228 
229 	do {
230 		old = ACCESS_ONCE(rw->lock);
231 	} while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
232 }
233 
234 static inline void arch_write_lock(arch_rwlock_t *rw)
235 {
236 	if (!arch_write_trylock_once(rw))
237 		_raw_write_lock_wait(rw);
238 	rw->owner = SPINLOCK_LOCKVAL;
239 }
240 
241 static inline void arch_write_unlock(arch_rwlock_t *rw)
242 {
243 	typecheck(unsigned int, rw->lock);
244 
245 	rw->owner = 0;
246 	asm volatile(
247 		"st	%1,%0\n"
248 		: "+Q" (rw->lock)
249 		: "d" (0)
250 		: "cc", "memory");
251 }
252 
253 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
254 
255 static inline int arch_read_trylock(arch_rwlock_t *rw)
256 {
257 	if (!arch_read_trylock_once(rw))
258 		return _raw_read_trylock_retry(rw);
259 	return 1;
260 }
261 
262 static inline int arch_write_trylock(arch_rwlock_t *rw)
263 {
264 	if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
265 		return 0;
266 	rw->owner = SPINLOCK_LOCKVAL;
267 	return 1;
268 }
269 
270 static inline void arch_read_relax(arch_rwlock_t *rw)
271 {
272 	arch_lock_relax(rw->owner);
273 }
274 
275 static inline void arch_write_relax(arch_rwlock_t *rw)
276 {
277 	arch_lock_relax(rw->owner);
278 }
279 
280 #endif /* __ASM_SPINLOCK_H */
281