xref: /openbmc/linux/arch/s390/include/asm/spinlock.h (revision 6aa7de05)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999
4  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  *
6  *  Derived from "include/asm-i386/spinlock.h"
7  */
8 
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11 
12 #include <linux/smp.h>
13 #include <asm/atomic_ops.h>
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
16 
17 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
18 
19 extern int spin_retry;
20 
21 #ifndef CONFIG_SMP
22 static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
23 #else
24 bool arch_vcpu_is_preempted(int cpu);
25 #endif
26 
27 #define vcpu_is_preempted arch_vcpu_is_preempted
28 
29 /*
30  * Simple spin lock operations.  There are two variants, one clears IRQ's
31  * on the local processor, one does not.
32  *
33  * We make no fairness assumptions. They have a cost.
34  *
35  * (the type definitions are in asm/spinlock_types.h)
36  */
37 
38 void arch_lock_relax(int cpu);
39 
40 void arch_spin_lock_wait(arch_spinlock_t *);
41 int arch_spin_trylock_retry(arch_spinlock_t *);
42 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
43 
44 static inline void arch_spin_relax(arch_spinlock_t *lock)
45 {
46 	arch_lock_relax(lock->lock);
47 }
48 #define arch_spin_relax		arch_spin_relax
49 
50 static inline u32 arch_spin_lockval(int cpu)
51 {
52 	return ~cpu;
53 }
54 
55 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
56 {
57 	return lock.lock == 0;
58 }
59 
60 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
61 {
62 	return READ_ONCE(lp->lock) != 0;
63 }
64 
65 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
66 {
67 	barrier();
68 	return likely(arch_spin_value_unlocked(*lp) &&
69 		      __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
70 }
71 
72 static inline void arch_spin_lock(arch_spinlock_t *lp)
73 {
74 	if (!arch_spin_trylock_once(lp))
75 		arch_spin_lock_wait(lp);
76 }
77 
78 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
79 					unsigned long flags)
80 {
81 	if (!arch_spin_trylock_once(lp))
82 		arch_spin_lock_wait_flags(lp, flags);
83 }
84 #define arch_spin_lock_flags	arch_spin_lock_flags
85 
86 static inline int arch_spin_trylock(arch_spinlock_t *lp)
87 {
88 	if (!arch_spin_trylock_once(lp))
89 		return arch_spin_trylock_retry(lp);
90 	return 1;
91 }
92 
93 static inline void arch_spin_unlock(arch_spinlock_t *lp)
94 {
95 	typecheck(int, lp->lock);
96 	asm volatile(
97 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
98 		"	.long	0xb2fa0070\n"	/* NIAI 7 */
99 #endif
100 		"	st	%1,%0\n"
101 		: "=Q" (lp->lock) : "d" (0) : "cc", "memory");
102 }
103 
104 /*
105  * Read-write spinlocks, allowing multiple readers
106  * but only one writer.
107  *
108  * NOTE! it is quite common to have readers in interrupts
109  * but no interrupt writers. For those circumstances we
110  * can "mix" irq-safe locks - any writer needs to get a
111  * irq-safe write-lock, but readers can get non-irqsafe
112  * read-locks.
113  */
114 
115 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
116 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
117 
118 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
119 {
120 	int old = READ_ONCE(rw->lock);
121 	return likely(old >= 0 &&
122 		      __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
123 }
124 
125 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
126 {
127 	int old = READ_ONCE(rw->lock);
128 	return likely(old == 0 &&
129 		      __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
130 }
131 
132 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
133 
134 #define __RAW_OP_OR	"lao"
135 #define __RAW_OP_AND	"lan"
136 #define __RAW_OP_ADD	"laa"
137 
138 #define __RAW_LOCK(ptr, op_val, op_string)		\
139 ({							\
140 	int old_val;					\
141 							\
142 	typecheck(int *, ptr);				\
143 	asm volatile(					\
144 		op_string "	%0,%2,%1\n"		\
145 		"bcr	14,0\n"				\
146 		: "=d" (old_val), "+Q" (*ptr)		\
147 		: "d" (op_val)				\
148 		: "cc", "memory");			\
149 	old_val;					\
150 })
151 
152 #define __RAW_UNLOCK(ptr, op_val, op_string)		\
153 ({							\
154 	int old_val;					\
155 							\
156 	typecheck(int *, ptr);				\
157 	asm volatile(					\
158 		op_string "	%0,%2,%1\n"		\
159 		: "=d" (old_val), "+Q" (*ptr)		\
160 		: "d" (op_val)				\
161 		: "cc", "memory");			\
162 	old_val;					\
163 })
164 
165 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
166 extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
167 
168 static inline void arch_read_lock(arch_rwlock_t *rw)
169 {
170 	int old;
171 
172 	old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
173 	if (old < 0)
174 		_raw_read_lock_wait(rw);
175 }
176 
177 static inline void arch_read_unlock(arch_rwlock_t *rw)
178 {
179 	__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
180 }
181 
182 static inline void arch_write_lock(arch_rwlock_t *rw)
183 {
184 	int old;
185 
186 	old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
187 	if (old != 0)
188 		_raw_write_lock_wait(rw, old);
189 	rw->owner = SPINLOCK_LOCKVAL;
190 }
191 
192 static inline void arch_write_unlock(arch_rwlock_t *rw)
193 {
194 	rw->owner = 0;
195 	__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
196 }
197 
198 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
199 
200 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
201 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
202 
203 static inline void arch_read_lock(arch_rwlock_t *rw)
204 {
205 	if (!arch_read_trylock_once(rw))
206 		_raw_read_lock_wait(rw);
207 }
208 
209 static inline void arch_read_unlock(arch_rwlock_t *rw)
210 {
211 	int old;
212 
213 	do {
214 		old = READ_ONCE(rw->lock);
215 	} while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
216 }
217 
218 static inline void arch_write_lock(arch_rwlock_t *rw)
219 {
220 	if (!arch_write_trylock_once(rw))
221 		_raw_write_lock_wait(rw);
222 	rw->owner = SPINLOCK_LOCKVAL;
223 }
224 
225 static inline void arch_write_unlock(arch_rwlock_t *rw)
226 {
227 	typecheck(int, rw->lock);
228 
229 	rw->owner = 0;
230 	asm volatile(
231 		"st	%1,%0\n"
232 		: "+Q" (rw->lock)
233 		: "d" (0)
234 		: "cc", "memory");
235 }
236 
237 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
238 
239 static inline int arch_read_trylock(arch_rwlock_t *rw)
240 {
241 	if (!arch_read_trylock_once(rw))
242 		return _raw_read_trylock_retry(rw);
243 	return 1;
244 }
245 
246 static inline int arch_write_trylock(arch_rwlock_t *rw)
247 {
248 	if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
249 		return 0;
250 	rw->owner = SPINLOCK_LOCKVAL;
251 	return 1;
252 }
253 
254 static inline void arch_read_relax(arch_rwlock_t *rw)
255 {
256 	arch_lock_relax(rw->owner);
257 }
258 #define arch_read_relax		arch_read_relax
259 
260 static inline void arch_write_relax(arch_rwlock_t *rw)
261 {
262 	arch_lock_relax(rw->owner);
263 }
264 #define arch_write_relax	arch_write_relax
265 
266 #endif /* __ASM_SPINLOCK_H */
267