xref: /openbmc/linux/arch/s390/include/asm/spinlock.h (revision 6774def6)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999
4  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  *
6  *  Derived from "include/asm-i386/spinlock.h"
7  */
8 
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11 
12 #include <linux/smp.h>
13 
14 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
15 
16 extern int spin_retry;
17 
18 static inline int
19 _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
20 {
21 	unsigned int old_expected = old;
22 
23 	asm volatile(
24 		"	cs	%0,%3,%1"
25 		: "=d" (old), "=Q" (*lock)
26 		: "0" (old), "d" (new), "Q" (*lock)
27 		: "cc", "memory" );
28 	return old == old_expected;
29 }
30 
31 /*
32  * Simple spin lock operations.  There are two variants, one clears IRQ's
33  * on the local processor, one does not.
34  *
35  * We make no fairness assumptions. They have a cost.
36  *
37  * (the type definitions are in asm/spinlock_types.h)
38  */
39 
40 void arch_lock_relax(unsigned int cpu);
41 
42 void arch_spin_lock_wait(arch_spinlock_t *);
43 int arch_spin_trylock_retry(arch_spinlock_t *);
44 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
45 
46 static inline void arch_spin_relax(arch_spinlock_t *lock)
47 {
48 	arch_lock_relax(lock->lock);
49 }
50 
51 static inline u32 arch_spin_lockval(int cpu)
52 {
53 	return ~cpu;
54 }
55 
56 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
57 {
58 	return lock.lock == 0;
59 }
60 
61 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
62 {
63 	return ACCESS_ONCE(lp->lock) != 0;
64 }
65 
66 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
67 {
68 	barrier();
69 	return likely(arch_spin_value_unlocked(*lp) &&
70 		      _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
71 }
72 
73 static inline void arch_spin_lock(arch_spinlock_t *lp)
74 {
75 	if (!arch_spin_trylock_once(lp))
76 		arch_spin_lock_wait(lp);
77 }
78 
79 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
80 					unsigned long flags)
81 {
82 	if (!arch_spin_trylock_once(lp))
83 		arch_spin_lock_wait_flags(lp, flags);
84 }
85 
86 static inline int arch_spin_trylock(arch_spinlock_t *lp)
87 {
88 	if (!arch_spin_trylock_once(lp))
89 		return arch_spin_trylock_retry(lp);
90 	return 1;
91 }
92 
93 static inline void arch_spin_unlock(arch_spinlock_t *lp)
94 {
95 	typecheck(unsigned int, lp->lock);
96 	asm volatile(
97 		__ASM_BARRIER
98 		"st	%1,%0\n"
99 		: "+Q" (lp->lock)
100 		: "d" (0)
101 		: "cc", "memory");
102 }
103 
104 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
105 {
106 	while (arch_spin_is_locked(lock))
107 		arch_spin_relax(lock);
108 }
109 
110 /*
111  * Read-write spinlocks, allowing multiple readers
112  * but only one writer.
113  *
114  * NOTE! it is quite common to have readers in interrupts
115  * but no interrupt writers. For those circumstances we
116  * can "mix" irq-safe locks - any writer needs to get a
117  * irq-safe write-lock, but readers can get non-irqsafe
118  * read-locks.
119  */
120 
121 /**
122  * read_can_lock - would read_trylock() succeed?
123  * @lock: the rwlock in question.
124  */
125 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
126 
127 /**
128  * write_can_lock - would write_trylock() succeed?
129  * @lock: the rwlock in question.
130  */
131 #define arch_write_can_lock(x) ((x)->lock == 0)
132 
133 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
134 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
135 
136 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
137 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
138 
139 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
140 {
141 	unsigned int old = ACCESS_ONCE(rw->lock);
142 	return likely((int) old >= 0 &&
143 		      _raw_compare_and_swap(&rw->lock, old, old + 1));
144 }
145 
146 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
147 {
148 	unsigned int old = ACCESS_ONCE(rw->lock);
149 	return likely(old == 0 &&
150 		      _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
151 }
152 
153 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
154 
155 #define __RAW_OP_OR	"lao"
156 #define __RAW_OP_AND	"lan"
157 #define __RAW_OP_ADD	"laa"
158 
159 #define __RAW_LOCK(ptr, op_val, op_string)		\
160 ({							\
161 	unsigned int old_val;				\
162 							\
163 	typecheck(unsigned int *, ptr);			\
164 	asm volatile(					\
165 		op_string "	%0,%2,%1\n"		\
166 		"bcr	14,0\n"				\
167 		: "=d" (old_val), "+Q" (*ptr)		\
168 		: "d" (op_val)				\
169 		: "cc", "memory");			\
170 	old_val;					\
171 })
172 
173 #define __RAW_UNLOCK(ptr, op_val, op_string)		\
174 ({							\
175 	unsigned int old_val;				\
176 							\
177 	typecheck(unsigned int *, ptr);			\
178 	asm volatile(					\
179 		"bcr	14,0\n"				\
180 		op_string "	%0,%2,%1\n"		\
181 		: "=d" (old_val), "+Q" (*ptr)		\
182 		: "d" (op_val)				\
183 		: "cc", "memory");			\
184 	old_val;					\
185 })
186 
187 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
188 extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
189 
190 static inline void arch_read_lock(arch_rwlock_t *rw)
191 {
192 	unsigned int old;
193 
194 	old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
195 	if ((int) old < 0)
196 		_raw_read_lock_wait(rw);
197 }
198 
199 static inline void arch_read_unlock(arch_rwlock_t *rw)
200 {
201 	__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
202 }
203 
204 static inline void arch_write_lock(arch_rwlock_t *rw)
205 {
206 	unsigned int old;
207 
208 	old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
209 	if (old != 0)
210 		_raw_write_lock_wait(rw, old);
211 	rw->owner = SPINLOCK_LOCKVAL;
212 }
213 
214 static inline void arch_write_unlock(arch_rwlock_t *rw)
215 {
216 	rw->owner = 0;
217 	__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
218 }
219 
220 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
221 
222 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
223 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
224 
225 static inline void arch_read_lock(arch_rwlock_t *rw)
226 {
227 	if (!arch_read_trylock_once(rw))
228 		_raw_read_lock_wait(rw);
229 }
230 
231 static inline void arch_read_unlock(arch_rwlock_t *rw)
232 {
233 	unsigned int old;
234 
235 	do {
236 		old = ACCESS_ONCE(rw->lock);
237 	} while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
238 }
239 
240 static inline void arch_write_lock(arch_rwlock_t *rw)
241 {
242 	if (!arch_write_trylock_once(rw))
243 		_raw_write_lock_wait(rw);
244 	rw->owner = SPINLOCK_LOCKVAL;
245 }
246 
247 static inline void arch_write_unlock(arch_rwlock_t *rw)
248 {
249 	typecheck(unsigned int, rw->lock);
250 
251 	rw->owner = 0;
252 	asm volatile(
253 		__ASM_BARRIER
254 		"st	%1,%0\n"
255 		: "+Q" (rw->lock)
256 		: "d" (0)
257 		: "cc", "memory");
258 }
259 
260 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
261 
262 static inline int arch_read_trylock(arch_rwlock_t *rw)
263 {
264 	if (!arch_read_trylock_once(rw))
265 		return _raw_read_trylock_retry(rw);
266 	return 1;
267 }
268 
269 static inline int arch_write_trylock(arch_rwlock_t *rw)
270 {
271 	if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
272 		return 0;
273 	rw->owner = SPINLOCK_LOCKVAL;
274 	return 1;
275 }
276 
277 static inline void arch_read_relax(arch_rwlock_t *rw)
278 {
279 	arch_lock_relax(rw->owner);
280 }
281 
282 static inline void arch_write_relax(arch_rwlock_t *rw)
283 {
284 	arch_lock_relax(rw->owner);
285 }
286 
287 #endif /* __ASM_SPINLOCK_H */
288