xref: /openbmc/linux/arch/s390/include/asm/spinlock.h (revision 2684e73a)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999
4  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  *
6  *  Derived from "include/asm-i386/spinlock.h"
7  */
8 
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11 
12 #include <linux/smp.h>
13 
14 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
15 
16 extern int spin_retry;
17 
18 static inline int
19 _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
20 {
21 	unsigned int old_expected = old;
22 
23 	asm volatile(
24 		"	cs	%0,%3,%1"
25 		: "=d" (old), "=Q" (*lock)
26 		: "0" (old), "d" (new), "Q" (*lock)
27 		: "cc", "memory" );
28 	return old == old_expected;
29 }
30 
31 /*
32  * Simple spin lock operations.  There are two variants, one clears IRQ's
33  * on the local processor, one does not.
34  *
35  * We make no fairness assumptions. They have a cost.
36  *
37  * (the type definitions are in asm/spinlock_types.h)
38  */
39 
40 void arch_lock_relax(unsigned int cpu);
41 
42 void arch_spin_lock_wait(arch_spinlock_t *);
43 int arch_spin_trylock_retry(arch_spinlock_t *);
44 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
45 
46 static inline void arch_spin_relax(arch_spinlock_t *lock)
47 {
48 	arch_lock_relax(lock->lock);
49 }
50 
51 static inline u32 arch_spin_lockval(int cpu)
52 {
53 	return ~cpu;
54 }
55 
56 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
57 {
58 	return lock.lock == 0;
59 }
60 
61 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
62 {
63 	return ACCESS_ONCE(lp->lock) != 0;
64 }
65 
66 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
67 {
68 	barrier();
69 	return likely(arch_spin_value_unlocked(*lp) &&
70 		      _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
71 }
72 
73 static inline void arch_spin_lock(arch_spinlock_t *lp)
74 {
75 	if (!arch_spin_trylock_once(lp))
76 		arch_spin_lock_wait(lp);
77 }
78 
79 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
80 					unsigned long flags)
81 {
82 	if (!arch_spin_trylock_once(lp))
83 		arch_spin_lock_wait_flags(lp, flags);
84 }
85 
86 static inline int arch_spin_trylock(arch_spinlock_t *lp)
87 {
88 	if (!arch_spin_trylock_once(lp))
89 		return arch_spin_trylock_retry(lp);
90 	return 1;
91 }
92 
93 static inline void arch_spin_unlock(arch_spinlock_t *lp)
94 {
95 	typecheck(unsigned int, lp->lock);
96 	asm volatile(
97 		__ASM_BARRIER
98 		"st	%1,%0\n"
99 		: "+Q" (lp->lock)
100 		: "d" (0)
101 		: "cc", "memory");
102 }
103 
104 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
105 {
106 	while (arch_spin_is_locked(lock))
107 		arch_spin_relax(lock);
108 }
109 
110 /*
111  * Read-write spinlocks, allowing multiple readers
112  * but only one writer.
113  *
114  * NOTE! it is quite common to have readers in interrupts
115  * but no interrupt writers. For those circumstances we
116  * can "mix" irq-safe locks - any writer needs to get a
117  * irq-safe write-lock, but readers can get non-irqsafe
118  * read-locks.
119  */
120 
121 /**
122  * read_can_lock - would read_trylock() succeed?
123  * @lock: the rwlock in question.
124  */
125 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
126 
127 /**
128  * write_can_lock - would write_trylock() succeed?
129  * @lock: the rwlock in question.
130  */
131 #define arch_write_can_lock(x) ((x)->lock == 0)
132 
133 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
134 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
135 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
136 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
137 
138 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
139 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
140 
141 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
142 {
143 	unsigned int old = ACCESS_ONCE(rw->lock);
144 	return likely((int) old >= 0 &&
145 		      _raw_compare_and_swap(&rw->lock, old, old + 1));
146 }
147 
148 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
149 {
150 	unsigned int old = ACCESS_ONCE(rw->lock);
151 	return likely(old == 0 &&
152 		      _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
153 }
154 
155 static inline void arch_read_lock(arch_rwlock_t *rw)
156 {
157 	if (!arch_read_trylock_once(rw))
158 		_raw_read_lock_wait(rw);
159 }
160 
161 static inline void arch_read_unlock(arch_rwlock_t *rw)
162 {
163 	unsigned int old;
164 
165 	do {
166 		old = ACCESS_ONCE(rw->lock);
167 	} while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
168 }
169 
170 static inline void arch_write_lock(arch_rwlock_t *rw)
171 {
172 	if (!arch_write_trylock_once(rw))
173 		_raw_write_lock_wait(rw);
174 	rw->owner = SPINLOCK_LOCKVAL;
175 }
176 
177 static inline void arch_write_unlock(arch_rwlock_t *rw)
178 {
179 	typecheck(unsigned int, rw->lock);
180 
181 	rw->owner = 0;
182 	asm volatile(
183 		__ASM_BARRIER
184 		"st	%1,%0\n"
185 		: "+Q" (rw->lock)
186 		: "d" (0)
187 		: "cc", "memory");
188 }
189 
190 static inline int arch_read_trylock(arch_rwlock_t *rw)
191 {
192 	if (!arch_read_trylock_once(rw))
193 		return _raw_read_trylock_retry(rw);
194 	return 1;
195 }
196 
197 static inline int arch_write_trylock(arch_rwlock_t *rw)
198 {
199 	if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
200 		return 0;
201 	rw->owner = SPINLOCK_LOCKVAL;
202 	return 1;
203 }
204 
205 static inline void arch_read_relax(arch_rwlock_t *rw)
206 {
207 	arch_lock_relax(rw->owner);
208 }
209 
210 static inline void arch_write_relax(arch_rwlock_t *rw)
211 {
212 	arch_lock_relax(rw->owner);
213 }
214 
215 #endif /* __ASM_SPINLOCK_H */
216