xref: /openbmc/linux/arch/s390/lib/spinlock.c (revision 8c0b9ee8)
1 /*
2  *    Out of line spinlock code.
3  *
4  *    Copyright IBM Corp. 2004, 2006
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  */
7 
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14 
15 int spin_retry = -1;
16 
17 static int __init spin_retry_init(void)
18 {
19 	if (spin_retry < 0)
20 		spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
21 	return 0;
22 }
23 early_initcall(spin_retry_init);
24 
25 /**
26  * spin_retry= parameter
27  */
28 static int __init spin_retry_setup(char *str)
29 {
30 	spin_retry = simple_strtoul(str, &str, 0);
31 	return 1;
32 }
33 __setup("spin_retry=", spin_retry_setup);
34 
35 static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
36 {
37 	asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
38 }
39 
40 void arch_spin_lock_wait(arch_spinlock_t *lp)
41 {
42 	unsigned int cpu = SPINLOCK_LOCKVAL;
43 	unsigned int owner;
44 	int count;
45 
46 	while (1) {
47 		owner = ACCESS_ONCE(lp->lock);
48 		/* Try to get the lock if it is free. */
49 		if (!owner) {
50 			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
51 				return;
52 			continue;
53 		}
54 		/* Check if the lock owner is running. */
55 		if (!smp_vcpu_scheduled(~owner)) {
56 			smp_yield_cpu(~owner);
57 			continue;
58 		}
59 		/* Loop for a while on the lock value. */
60 		count = spin_retry;
61 		do {
62 			if (MACHINE_HAS_CAD)
63 				_raw_compare_and_delay(&lp->lock, owner);
64 			owner = ACCESS_ONCE(lp->lock);
65 		} while (owner && count-- > 0);
66 		if (!owner)
67 			continue;
68 		/*
69 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
70 		 * yield the CPU if the lock is still unavailable.
71 		 */
72 		if (!MACHINE_IS_LPAR)
73 			smp_yield_cpu(~owner);
74 	}
75 }
76 EXPORT_SYMBOL(arch_spin_lock_wait);
77 
78 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
79 {
80 	unsigned int cpu = SPINLOCK_LOCKVAL;
81 	unsigned int owner;
82 	int count;
83 
84 	local_irq_restore(flags);
85 	while (1) {
86 		owner = ACCESS_ONCE(lp->lock);
87 		/* Try to get the lock if it is free. */
88 		if (!owner) {
89 			local_irq_disable();
90 			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
91 				return;
92 			local_irq_restore(flags);
93 		}
94 		/* Check if the lock owner is running. */
95 		if (!smp_vcpu_scheduled(~owner)) {
96 			smp_yield_cpu(~owner);
97 			continue;
98 		}
99 		/* Loop for a while on the lock value. */
100 		count = spin_retry;
101 		do {
102 			if (MACHINE_HAS_CAD)
103 				_raw_compare_and_delay(&lp->lock, owner);
104 			owner = ACCESS_ONCE(lp->lock);
105 		} while (owner && count-- > 0);
106 		if (!owner)
107 			continue;
108 		/*
109 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
110 		 * yield the CPU if the lock is still unavailable.
111 		 */
112 		if (!MACHINE_IS_LPAR)
113 			smp_yield_cpu(~owner);
114 	}
115 }
116 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
117 
118 int arch_spin_trylock_retry(arch_spinlock_t *lp)
119 {
120 	unsigned int cpu = SPINLOCK_LOCKVAL;
121 	unsigned int owner;
122 	int count;
123 
124 	for (count = spin_retry; count > 0; count--) {
125 		owner = ACCESS_ONCE(lp->lock);
126 		/* Try to get the lock if it is free. */
127 		if (!owner) {
128 			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
129 				return 1;
130 		} else if (MACHINE_HAS_CAD)
131 			_raw_compare_and_delay(&lp->lock, owner);
132 	}
133 	return 0;
134 }
135 EXPORT_SYMBOL(arch_spin_trylock_retry);
136 
137 void _raw_read_lock_wait(arch_rwlock_t *rw)
138 {
139 	unsigned int owner, old;
140 	int count = spin_retry;
141 
142 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
143 	__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
144 #endif
145 	owner = 0;
146 	while (1) {
147 		if (count-- <= 0) {
148 			if (owner && !smp_vcpu_scheduled(~owner))
149 				smp_yield_cpu(~owner);
150 			count = spin_retry;
151 		}
152 		old = ACCESS_ONCE(rw->lock);
153 		owner = ACCESS_ONCE(rw->owner);
154 		if ((int) old < 0) {
155 			if (MACHINE_HAS_CAD)
156 				_raw_compare_and_delay(&rw->lock, old);
157 			continue;
158 		}
159 		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
160 			return;
161 	}
162 }
163 EXPORT_SYMBOL(_raw_read_lock_wait);
164 
165 int _raw_read_trylock_retry(arch_rwlock_t *rw)
166 {
167 	unsigned int old;
168 	int count = spin_retry;
169 
170 	while (count-- > 0) {
171 		old = ACCESS_ONCE(rw->lock);
172 		if ((int) old < 0) {
173 			if (MACHINE_HAS_CAD)
174 				_raw_compare_and_delay(&rw->lock, old);
175 			continue;
176 		}
177 		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
178 			return 1;
179 	}
180 	return 0;
181 }
182 EXPORT_SYMBOL(_raw_read_trylock_retry);
183 
184 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
185 
186 void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
187 {
188 	unsigned int owner, old;
189 	int count = spin_retry;
190 
191 	owner = 0;
192 	while (1) {
193 		if (count-- <= 0) {
194 			if (owner && !smp_vcpu_scheduled(~owner))
195 				smp_yield_cpu(~owner);
196 			count = spin_retry;
197 		}
198 		old = ACCESS_ONCE(rw->lock);
199 		owner = ACCESS_ONCE(rw->owner);
200 		smp_rmb();
201 		if ((int) old >= 0) {
202 			prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
203 			old = prev;
204 		}
205 		if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
206 			break;
207 		if (MACHINE_HAS_CAD)
208 			_raw_compare_and_delay(&rw->lock, old);
209 	}
210 }
211 EXPORT_SYMBOL(_raw_write_lock_wait);
212 
213 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
214 
215 void _raw_write_lock_wait(arch_rwlock_t *rw)
216 {
217 	unsigned int owner, old, prev;
218 	int count = spin_retry;
219 
220 	prev = 0x80000000;
221 	owner = 0;
222 	while (1) {
223 		if (count-- <= 0) {
224 			if (owner && !smp_vcpu_scheduled(~owner))
225 				smp_yield_cpu(~owner);
226 			count = spin_retry;
227 		}
228 		old = ACCESS_ONCE(rw->lock);
229 		owner = ACCESS_ONCE(rw->owner);
230 		if ((int) old >= 0 &&
231 		    _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
232 			prev = old;
233 		else
234 			smp_rmb();
235 		if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
236 			break;
237 		if (MACHINE_HAS_CAD)
238 			_raw_compare_and_delay(&rw->lock, old);
239 	}
240 }
241 EXPORT_SYMBOL(_raw_write_lock_wait);
242 
243 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
244 
245 int _raw_write_trylock_retry(arch_rwlock_t *rw)
246 {
247 	unsigned int old;
248 	int count = spin_retry;
249 
250 	while (count-- > 0) {
251 		old = ACCESS_ONCE(rw->lock);
252 		if (old) {
253 			if (MACHINE_HAS_CAD)
254 				_raw_compare_and_delay(&rw->lock, old);
255 			continue;
256 		}
257 		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
258 			return 1;
259 	}
260 	return 0;
261 }
262 EXPORT_SYMBOL(_raw_write_trylock_retry);
263 
264 void arch_lock_relax(unsigned int cpu)
265 {
266 	if (!cpu)
267 		return;
268 	if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
269 		return;
270 	smp_yield_cpu(~cpu);
271 }
272 EXPORT_SYMBOL(arch_lock_relax);
273