xref: /openbmc/linux/arch/s390/lib/spinlock.c (revision 8684014d)
1 /*
2  *    Out of line spinlock code.
3  *
4  *    Copyright IBM Corp. 2004, 2006
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  */
7 
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14 
15 int spin_retry = 1000;
16 
17 /**
18  * spin_retry= parameter
19  */
20 static int __init spin_retry_setup(char *str)
21 {
22 	spin_retry = simple_strtoul(str, &str, 0);
23 	return 1;
24 }
25 __setup("spin_retry=", spin_retry_setup);
26 
27 void arch_spin_lock_wait(arch_spinlock_t *lp)
28 {
29 	unsigned int cpu = SPINLOCK_LOCKVAL;
30 	unsigned int owner;
31 	int count;
32 
33 	while (1) {
34 		owner = ACCESS_ONCE(lp->lock);
35 		/* Try to get the lock if it is free. */
36 		if (!owner) {
37 			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
38 				return;
39 			continue;
40 		}
41 		/* Check if the lock owner is running. */
42 		if (!smp_vcpu_scheduled(~owner)) {
43 			smp_yield_cpu(~owner);
44 			continue;
45 		}
46 		/* Loop for a while on the lock value. */
47 		count = spin_retry;
48 		do {
49 			owner = ACCESS_ONCE(lp->lock);
50 		} while (owner && count-- > 0);
51 		if (!owner)
52 			continue;
53 		/*
54 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
55 		 * yield the CPU if the lock is still unavailable.
56 		 */
57 		if (!MACHINE_IS_LPAR)
58 			smp_yield_cpu(~owner);
59 	}
60 }
61 EXPORT_SYMBOL(arch_spin_lock_wait);
62 
63 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
64 {
65 	unsigned int cpu = SPINLOCK_LOCKVAL;
66 	unsigned int owner;
67 	int count;
68 
69 	local_irq_restore(flags);
70 	while (1) {
71 		owner = ACCESS_ONCE(lp->lock);
72 		/* Try to get the lock if it is free. */
73 		if (!owner) {
74 			local_irq_disable();
75 			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
76 				return;
77 			local_irq_restore(flags);
78 		}
79 		/* Check if the lock owner is running. */
80 		if (!smp_vcpu_scheduled(~owner)) {
81 			smp_yield_cpu(~owner);
82 			continue;
83 		}
84 		/* Loop for a while on the lock value. */
85 		count = spin_retry;
86 		do {
87 			owner = ACCESS_ONCE(lp->lock);
88 		} while (owner && count-- > 0);
89 		if (!owner)
90 			continue;
91 		/*
92 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
93 		 * yield the CPU if the lock is still unavailable.
94 		 */
95 		if (!MACHINE_IS_LPAR)
96 			smp_yield_cpu(~owner);
97 	}
98 }
99 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
100 
101 int arch_spin_trylock_retry(arch_spinlock_t *lp)
102 {
103 	int count;
104 
105 	for (count = spin_retry; count > 0; count--)
106 		if (arch_spin_trylock_once(lp))
107 			return 1;
108 	return 0;
109 }
110 EXPORT_SYMBOL(arch_spin_trylock_retry);
111 
112 void _raw_read_lock_wait(arch_rwlock_t *rw)
113 {
114 	unsigned int owner, old;
115 	int count = spin_retry;
116 
117 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
118 	__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
119 #endif
120 	owner = 0;
121 	while (1) {
122 		if (count-- <= 0) {
123 			if (owner && !smp_vcpu_scheduled(~owner))
124 				smp_yield_cpu(~owner);
125 			count = spin_retry;
126 		}
127 		old = ACCESS_ONCE(rw->lock);
128 		owner = ACCESS_ONCE(rw->owner);
129 		if ((int) old < 0)
130 			continue;
131 		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
132 			return;
133 	}
134 }
135 EXPORT_SYMBOL(_raw_read_lock_wait);
136 
137 int _raw_read_trylock_retry(arch_rwlock_t *rw)
138 {
139 	unsigned int old;
140 	int count = spin_retry;
141 
142 	while (count-- > 0) {
143 		old = ACCESS_ONCE(rw->lock);
144 		if ((int) old < 0)
145 			continue;
146 		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
147 			return 1;
148 	}
149 	return 0;
150 }
151 EXPORT_SYMBOL(_raw_read_trylock_retry);
152 
153 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
154 
155 void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
156 {
157 	unsigned int owner, old;
158 	int count = spin_retry;
159 
160 	owner = 0;
161 	while (1) {
162 		if (count-- <= 0) {
163 			if (owner && !smp_vcpu_scheduled(~owner))
164 				smp_yield_cpu(~owner);
165 			count = spin_retry;
166 		}
167 		old = ACCESS_ONCE(rw->lock);
168 		owner = ACCESS_ONCE(rw->owner);
169 		smp_rmb();
170 		if ((int) old >= 0) {
171 			prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
172 			old = prev;
173 		}
174 		if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
175 			break;
176 	}
177 }
178 EXPORT_SYMBOL(_raw_write_lock_wait);
179 
180 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
181 
182 void _raw_write_lock_wait(arch_rwlock_t *rw)
183 {
184 	unsigned int owner, old, prev;
185 	int count = spin_retry;
186 
187 	prev = 0x80000000;
188 	owner = 0;
189 	while (1) {
190 		if (count-- <= 0) {
191 			if (owner && !smp_vcpu_scheduled(~owner))
192 				smp_yield_cpu(~owner);
193 			count = spin_retry;
194 		}
195 		old = ACCESS_ONCE(rw->lock);
196 		owner = ACCESS_ONCE(rw->owner);
197 		if ((int) old >= 0 &&
198 		    _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
199 			prev = old;
200 		else
201 			smp_rmb();
202 		if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
203 			break;
204 	}
205 }
206 EXPORT_SYMBOL(_raw_write_lock_wait);
207 
208 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
209 
210 int _raw_write_trylock_retry(arch_rwlock_t *rw)
211 {
212 	unsigned int old;
213 	int count = spin_retry;
214 
215 	while (count-- > 0) {
216 		old = ACCESS_ONCE(rw->lock);
217 		if (old)
218 			continue;
219 		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
220 			return 1;
221 	}
222 	return 0;
223 }
224 EXPORT_SYMBOL(_raw_write_trylock_retry);
225 
226 void arch_lock_relax(unsigned int cpu)
227 {
228 	if (!cpu)
229 		return;
230 	if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
231 		return;
232 	smp_yield_cpu(~cpu);
233 }
234 EXPORT_SYMBOL(arch_lock_relax);
235