xref: /openbmc/linux/arch/s390/include/asm/spinlock.h (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *
7  *  Derived from "include/asm-i386/spinlock.h"
8  */
9 
10 #ifndef __ASM_SPINLOCK_H
11 #define __ASM_SPINLOCK_H
12 
13 #include <linux/smp.h>
14 #include <asm/atomic_ops.h>
15 #include <asm/barrier.h>
16 #include <asm/processor.h>
17 
18 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
19 
20 extern int spin_retry;
21 
22 #ifndef CONFIG_SMP
23 static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
24 #else
25 bool arch_vcpu_is_preempted(int cpu);
26 #endif
27 
28 #define vcpu_is_preempted arch_vcpu_is_preempted
29 
30 /*
31  * Simple spin lock operations.  There are two variants, one clears IRQ's
32  * on the local processor, one does not.
33  *
34  * We make no fairness assumptions. They have a cost.
35  *
36  * (the type definitions are in asm/spinlock_types.h)
37  */
38 
39 void arch_lock_relax(int cpu);
40 
41 void arch_spin_lock_wait(arch_spinlock_t *);
42 int arch_spin_trylock_retry(arch_spinlock_t *);
43 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44 
45 static inline void arch_spin_relax(arch_spinlock_t *lock)
46 {
47 	arch_lock_relax(lock->lock);
48 }
49 
50 static inline u32 arch_spin_lockval(int cpu)
51 {
52 	return ~cpu;
53 }
54 
55 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
56 {
57 	return lock.lock == 0;
58 }
59 
60 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
61 {
62 	return READ_ONCE(lp->lock) != 0;
63 }
64 
65 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
66 {
67 	barrier();
68 	return likely(arch_spin_value_unlocked(*lp) &&
69 		      __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
70 }
71 
72 static inline void arch_spin_lock(arch_spinlock_t *lp)
73 {
74 	if (!arch_spin_trylock_once(lp))
75 		arch_spin_lock_wait(lp);
76 }
77 
78 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
79 					unsigned long flags)
80 {
81 	if (!arch_spin_trylock_once(lp))
82 		arch_spin_lock_wait_flags(lp, flags);
83 }
84 
85 static inline int arch_spin_trylock(arch_spinlock_t *lp)
86 {
87 	if (!arch_spin_trylock_once(lp))
88 		return arch_spin_trylock_retry(lp);
89 	return 1;
90 }
91 
92 static inline void arch_spin_unlock(arch_spinlock_t *lp)
93 {
94 	typecheck(int, lp->lock);
95 	asm volatile(
96 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
97 		"	.long	0xb2fa0070\n"	/* NIAI 7 */
98 #endif
99 		"	st	%1,%0\n"
100 		: "=Q" (lp->lock) : "d" (0) : "cc", "memory");
101 }
102 
103 /*
104  * Read-write spinlocks, allowing multiple readers
105  * but only one writer.
106  *
107  * NOTE! it is quite common to have readers in interrupts
108  * but no interrupt writers. For those circumstances we
109  * can "mix" irq-safe locks - any writer needs to get a
110  * irq-safe write-lock, but readers can get non-irqsafe
111  * read-locks.
112  */
113 
114 /**
115  * read_can_lock - would read_trylock() succeed?
116  * @lock: the rwlock in question.
117  */
118 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
119 
120 /**
121  * write_can_lock - would write_trylock() succeed?
122  * @lock: the rwlock in question.
123  */
124 #define arch_write_can_lock(x) ((x)->lock == 0)
125 
126 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
127 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
128 
129 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
130 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
131 
132 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
133 {
134 	int old = ACCESS_ONCE(rw->lock);
135 	return likely(old >= 0 &&
136 		      __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
137 }
138 
139 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
140 {
141 	int old = ACCESS_ONCE(rw->lock);
142 	return likely(old == 0 &&
143 		      __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
144 }
145 
146 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
147 
148 #define __RAW_OP_OR	"lao"
149 #define __RAW_OP_AND	"lan"
150 #define __RAW_OP_ADD	"laa"
151 
152 #define __RAW_LOCK(ptr, op_val, op_string)		\
153 ({							\
154 	int old_val;					\
155 							\
156 	typecheck(int *, ptr);				\
157 	asm volatile(					\
158 		op_string "	%0,%2,%1\n"		\
159 		"bcr	14,0\n"				\
160 		: "=d" (old_val), "+Q" (*ptr)		\
161 		: "d" (op_val)				\
162 		: "cc", "memory");			\
163 	old_val;					\
164 })
165 
166 #define __RAW_UNLOCK(ptr, op_val, op_string)		\
167 ({							\
168 	int old_val;					\
169 							\
170 	typecheck(int *, ptr);				\
171 	asm volatile(					\
172 		op_string "	%0,%2,%1\n"		\
173 		: "=d" (old_val), "+Q" (*ptr)		\
174 		: "d" (op_val)				\
175 		: "cc", "memory");			\
176 	old_val;					\
177 })
178 
179 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
180 extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
181 
182 static inline void arch_read_lock(arch_rwlock_t *rw)
183 {
184 	int old;
185 
186 	old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
187 	if (old < 0)
188 		_raw_read_lock_wait(rw);
189 }
190 
191 static inline void arch_read_unlock(arch_rwlock_t *rw)
192 {
193 	__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
194 }
195 
196 static inline void arch_write_lock(arch_rwlock_t *rw)
197 {
198 	int old;
199 
200 	old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
201 	if (old != 0)
202 		_raw_write_lock_wait(rw, old);
203 	rw->owner = SPINLOCK_LOCKVAL;
204 }
205 
206 static inline void arch_write_unlock(arch_rwlock_t *rw)
207 {
208 	rw->owner = 0;
209 	__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
210 }
211 
212 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
213 
214 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
215 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
216 
217 static inline void arch_read_lock(arch_rwlock_t *rw)
218 {
219 	if (!arch_read_trylock_once(rw))
220 		_raw_read_lock_wait(rw);
221 }
222 
223 static inline void arch_read_unlock(arch_rwlock_t *rw)
224 {
225 	int old;
226 
227 	do {
228 		old = ACCESS_ONCE(rw->lock);
229 	} while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
230 }
231 
232 static inline void arch_write_lock(arch_rwlock_t *rw)
233 {
234 	if (!arch_write_trylock_once(rw))
235 		_raw_write_lock_wait(rw);
236 	rw->owner = SPINLOCK_LOCKVAL;
237 }
238 
239 static inline void arch_write_unlock(arch_rwlock_t *rw)
240 {
241 	typecheck(int, rw->lock);
242 
243 	rw->owner = 0;
244 	asm volatile(
245 		"st	%1,%0\n"
246 		: "+Q" (rw->lock)
247 		: "d" (0)
248 		: "cc", "memory");
249 }
250 
251 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
252 
253 static inline int arch_read_trylock(arch_rwlock_t *rw)
254 {
255 	if (!arch_read_trylock_once(rw))
256 		return _raw_read_trylock_retry(rw);
257 	return 1;
258 }
259 
260 static inline int arch_write_trylock(arch_rwlock_t *rw)
261 {
262 	if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
263 		return 0;
264 	rw->owner = SPINLOCK_LOCKVAL;
265 	return 1;
266 }
267 
268 static inline void arch_read_relax(arch_rwlock_t *rw)
269 {
270 	arch_lock_relax(rw->owner);
271 }
272 
273 static inline void arch_write_relax(arch_rwlock_t *rw)
274 {
275 	arch_lock_relax(rw->owner);
276 }
277 
278 #endif /* __ASM_SPINLOCK_H */
279