xref: /openbmc/linux/arch/s390/lib/spinlock.c (revision efe4a1ac)
1 /*
2  *    Out of line spinlock code.
3  *
4  *    Copyright IBM Corp. 2004, 2006
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  */
7 
8 #include <linux/types.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14 
15 int spin_retry = -1;
16 
17 static int __init spin_retry_init(void)
18 {
19 	if (spin_retry < 0)
20 		spin_retry = 1000;
21 	return 0;
22 }
23 early_initcall(spin_retry_init);
24 
25 /**
26  * spin_retry= parameter
27  */
28 static int __init spin_retry_setup(char *str)
29 {
30 	spin_retry = simple_strtoul(str, &str, 0);
31 	return 1;
32 }
33 __setup("spin_retry=", spin_retry_setup);
34 
35 void arch_spin_lock_wait(arch_spinlock_t *lp)
36 {
37 	int cpu = SPINLOCK_LOCKVAL;
38 	int owner, count, first_diag;
39 
40 	first_diag = 1;
41 	while (1) {
42 		owner = ACCESS_ONCE(lp->lock);
43 		/* Try to get the lock if it is free. */
44 		if (!owner) {
45 			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
46 				return;
47 			continue;
48 		}
49 		/* First iteration: check if the lock owner is running. */
50 		if (first_diag && arch_vcpu_is_preempted(~owner)) {
51 			smp_yield_cpu(~owner);
52 			first_diag = 0;
53 			continue;
54 		}
55 		/* Loop for a while on the lock value. */
56 		count = spin_retry;
57 		do {
58 			owner = ACCESS_ONCE(lp->lock);
59 		} while (owner && count-- > 0);
60 		if (!owner)
61 			continue;
62 		/*
63 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
64 		 * yield the CPU unconditionally. For LPAR rely on the
65 		 * sense running status.
66 		 */
67 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
68 			smp_yield_cpu(~owner);
69 			first_diag = 0;
70 		}
71 	}
72 }
73 EXPORT_SYMBOL(arch_spin_lock_wait);
74 
75 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
76 {
77 	int cpu = SPINLOCK_LOCKVAL;
78 	int owner, count, first_diag;
79 
80 	local_irq_restore(flags);
81 	first_diag = 1;
82 	while (1) {
83 		owner = ACCESS_ONCE(lp->lock);
84 		/* Try to get the lock if it is free. */
85 		if (!owner) {
86 			local_irq_disable();
87 			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
88 				return;
89 			local_irq_restore(flags);
90 			continue;
91 		}
92 		/* Check if the lock owner is running. */
93 		if (first_diag && arch_vcpu_is_preempted(~owner)) {
94 			smp_yield_cpu(~owner);
95 			first_diag = 0;
96 			continue;
97 		}
98 		/* Loop for a while on the lock value. */
99 		count = spin_retry;
100 		do {
101 			owner = ACCESS_ONCE(lp->lock);
102 		} while (owner && count-- > 0);
103 		if (!owner)
104 			continue;
105 		/*
106 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
107 		 * yield the CPU unconditionally. For LPAR rely on the
108 		 * sense running status.
109 		 */
110 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
111 			smp_yield_cpu(~owner);
112 			first_diag = 0;
113 		}
114 	}
115 }
116 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
117 
118 int arch_spin_trylock_retry(arch_spinlock_t *lp)
119 {
120 	int cpu = SPINLOCK_LOCKVAL;
121 	int owner, count;
122 
123 	for (count = spin_retry; count > 0; count--) {
124 		owner = READ_ONCE(lp->lock);
125 		/* Try to get the lock if it is free. */
126 		if (!owner) {
127 			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
128 				return 1;
129 		}
130 	}
131 	return 0;
132 }
133 EXPORT_SYMBOL(arch_spin_trylock_retry);
134 
135 void _raw_read_lock_wait(arch_rwlock_t *rw)
136 {
137 	int count = spin_retry;
138 	int owner, old;
139 
140 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
141 	__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
142 #endif
143 	owner = 0;
144 	while (1) {
145 		if (count-- <= 0) {
146 			if (owner && arch_vcpu_is_preempted(~owner))
147 				smp_yield_cpu(~owner);
148 			count = spin_retry;
149 		}
150 		old = ACCESS_ONCE(rw->lock);
151 		owner = ACCESS_ONCE(rw->owner);
152 		if (old < 0)
153 			continue;
154 		if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
155 			return;
156 	}
157 }
158 EXPORT_SYMBOL(_raw_read_lock_wait);
159 
160 int _raw_read_trylock_retry(arch_rwlock_t *rw)
161 {
162 	int count = spin_retry;
163 	int old;
164 
165 	while (count-- > 0) {
166 		old = ACCESS_ONCE(rw->lock);
167 		if (old < 0)
168 			continue;
169 		if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
170 			return 1;
171 	}
172 	return 0;
173 }
174 EXPORT_SYMBOL(_raw_read_trylock_retry);
175 
176 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
177 
178 void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
179 {
180 	int count = spin_retry;
181 	int owner, old;
182 
183 	owner = 0;
184 	while (1) {
185 		if (count-- <= 0) {
186 			if (owner && arch_vcpu_is_preempted(~owner))
187 				smp_yield_cpu(~owner);
188 			count = spin_retry;
189 		}
190 		old = ACCESS_ONCE(rw->lock);
191 		owner = ACCESS_ONCE(rw->owner);
192 		smp_mb();
193 		if (old >= 0) {
194 			prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
195 			old = prev;
196 		}
197 		if ((old & 0x7fffffff) == 0 && prev >= 0)
198 			break;
199 	}
200 }
201 EXPORT_SYMBOL(_raw_write_lock_wait);
202 
203 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
204 
205 void _raw_write_lock_wait(arch_rwlock_t *rw)
206 {
207 	int count = spin_retry;
208 	int owner, old, prev;
209 
210 	prev = 0x80000000;
211 	owner = 0;
212 	while (1) {
213 		if (count-- <= 0) {
214 			if (owner && arch_vcpu_is_preempted(~owner))
215 				smp_yield_cpu(~owner);
216 			count = spin_retry;
217 		}
218 		old = ACCESS_ONCE(rw->lock);
219 		owner = ACCESS_ONCE(rw->owner);
220 		if (old >= 0 &&
221 		    __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
222 			prev = old;
223 		else
224 			smp_mb();
225 		if ((old & 0x7fffffff) == 0 && prev >= 0)
226 			break;
227 	}
228 }
229 EXPORT_SYMBOL(_raw_write_lock_wait);
230 
231 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
232 
233 int _raw_write_trylock_retry(arch_rwlock_t *rw)
234 {
235 	int count = spin_retry;
236 	int old;
237 
238 	while (count-- > 0) {
239 		old = ACCESS_ONCE(rw->lock);
240 		if (old)
241 			continue;
242 		if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
243 			return 1;
244 	}
245 	return 0;
246 }
247 EXPORT_SYMBOL(_raw_write_trylock_retry);
248 
249 void arch_lock_relax(int cpu)
250 {
251 	if (!cpu)
252 		return;
253 	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
254 		return;
255 	smp_yield_cpu(~cpu);
256 }
257 EXPORT_SYMBOL(arch_lock_relax);
258