xref: /openbmc/linux/arch/s390/lib/spinlock.c (revision 470ada6b)
1951f22d5SMartin Schwidefsky /*
2951f22d5SMartin Schwidefsky  *    Out of line spinlock code.
3951f22d5SMartin Schwidefsky  *
4a53c8fabSHeiko Carstens  *    Copyright IBM Corp. 2004, 2006
5951f22d5SMartin Schwidefsky  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6951f22d5SMartin Schwidefsky  */
7951f22d5SMartin Schwidefsky 
8951f22d5SMartin Schwidefsky #include <linux/types.h>
9951f22d5SMartin Schwidefsky #include <linux/module.h>
10951f22d5SMartin Schwidefsky #include <linux/spinlock.h>
11951f22d5SMartin Schwidefsky #include <linux/init.h>
128b646bd7SMartin Schwidefsky #include <linux/smp.h>
13951f22d5SMartin Schwidefsky #include <asm/io.h>
14951f22d5SMartin Schwidefsky 
15951f22d5SMartin Schwidefsky int spin_retry = 1000;
16951f22d5SMartin Schwidefsky 
17951f22d5SMartin Schwidefsky /**
18951f22d5SMartin Schwidefsky  * spin_retry= parameter
19951f22d5SMartin Schwidefsky  */
20951f22d5SMartin Schwidefsky static int __init spin_retry_setup(char *str)
21951f22d5SMartin Schwidefsky {
22951f22d5SMartin Schwidefsky 	spin_retry = simple_strtoul(str, &str, 0);
23951f22d5SMartin Schwidefsky 	return 1;
24951f22d5SMartin Schwidefsky }
25951f22d5SMartin Schwidefsky __setup("spin_retry=", spin_retry_setup);
26951f22d5SMartin Schwidefsky 
270199c4e6SThomas Gleixner void arch_spin_lock_wait(arch_spinlock_t *lp)
28951f22d5SMartin Schwidefsky {
296c8cd5bbSPhilipp Hachtmann 	unsigned int cpu = SPINLOCK_LOCKVAL;
3059b69787SGerald Schaefer 	unsigned int owner;
312e4006b3SGerald Schaefer 	int count;
32951f22d5SMartin Schwidefsky 
33951f22d5SMartin Schwidefsky 	while (1) {
34470ada6bSMartin Schwidefsky 		owner = ACCESS_ONCE(lp->lock);
35470ada6bSMartin Schwidefsky 		/* Try to get the lock if it is free. */
36470ada6bSMartin Schwidefsky 		if (!owner) {
375b3f683eSPhilipp Hachtmann 			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
3859b69787SGerald Schaefer 				return;
3959b69787SGerald Schaefer 			continue;
4059b69787SGerald Schaefer 		}
41470ada6bSMartin Schwidefsky 		/* Check if the lock owner is running. */
42470ada6bSMartin Schwidefsky 		if (!smp_vcpu_scheduled(~owner)) {
438b646bd7SMartin Schwidefsky 			smp_yield_cpu(~owner);
44470ada6bSMartin Schwidefsky 			continue;
45470ada6bSMartin Schwidefsky 		}
46470ada6bSMartin Schwidefsky 		/* Loop for a while on the lock value. */
47470ada6bSMartin Schwidefsky 		count = spin_retry;
48470ada6bSMartin Schwidefsky 		do {
49470ada6bSMartin Schwidefsky 			owner = ACCESS_ONCE(lp->lock);
50470ada6bSMartin Schwidefsky 		} while (owner && count-- > 0);
51470ada6bSMartin Schwidefsky 		if (!owner)
52470ada6bSMartin Schwidefsky 			continue;
53470ada6bSMartin Schwidefsky 		/*
54470ada6bSMartin Schwidefsky 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
55470ada6bSMartin Schwidefsky 		 * yield the CPU if the lock is still unavailable.
56470ada6bSMartin Schwidefsky 		 */
57470ada6bSMartin Schwidefsky 		if (!MACHINE_IS_LPAR)
58470ada6bSMartin Schwidefsky 			smp_yield_cpu(~owner);
59951f22d5SMartin Schwidefsky 	}
60951f22d5SMartin Schwidefsky }
610199c4e6SThomas Gleixner EXPORT_SYMBOL(arch_spin_lock_wait);
62951f22d5SMartin Schwidefsky 
630199c4e6SThomas Gleixner void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
64894cdde2SHisashi Hifumi {
656c8cd5bbSPhilipp Hachtmann 	unsigned int cpu = SPINLOCK_LOCKVAL;
6659b69787SGerald Schaefer 	unsigned int owner;
672e4006b3SGerald Schaefer 	int count;
68894cdde2SHisashi Hifumi 
69894cdde2SHisashi Hifumi 	local_irq_restore(flags);
70894cdde2SHisashi Hifumi 	while (1) {
71470ada6bSMartin Schwidefsky 		owner = ACCESS_ONCE(lp->lock);
72470ada6bSMartin Schwidefsky 		/* Try to get the lock if it is free. */
73470ada6bSMartin Schwidefsky 		if (!owner) {
74894cdde2SHisashi Hifumi 			local_irq_disable();
755b3f683eSPhilipp Hachtmann 			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
7659b69787SGerald Schaefer 				return;
7759b69787SGerald Schaefer 			local_irq_restore(flags);
78470ada6bSMartin Schwidefsky 		}
79470ada6bSMartin Schwidefsky 		/* Check if the lock owner is running. */
80470ada6bSMartin Schwidefsky 		if (!smp_vcpu_scheduled(~owner)) {
81470ada6bSMartin Schwidefsky 			smp_yield_cpu(~owner);
8259b69787SGerald Schaefer 			continue;
8359b69787SGerald Schaefer 		}
84470ada6bSMartin Schwidefsky 		/* Loop for a while on the lock value. */
85470ada6bSMartin Schwidefsky 		count = spin_retry;
86470ada6bSMartin Schwidefsky 		do {
87470ada6bSMartin Schwidefsky 			owner = ACCESS_ONCE(lp->lock);
88470ada6bSMartin Schwidefsky 		} while (owner && count-- > 0);
89470ada6bSMartin Schwidefsky 		if (!owner)
90470ada6bSMartin Schwidefsky 			continue;
91470ada6bSMartin Schwidefsky 		/*
92470ada6bSMartin Schwidefsky 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
93470ada6bSMartin Schwidefsky 		 * yield the CPU if the lock is still unavailable.
94470ada6bSMartin Schwidefsky 		 */
95470ada6bSMartin Schwidefsky 		if (!MACHINE_IS_LPAR)
968b646bd7SMartin Schwidefsky 			smp_yield_cpu(~owner);
97894cdde2SHisashi Hifumi 	}
98894cdde2SHisashi Hifumi }
990199c4e6SThomas Gleixner EXPORT_SYMBOL(arch_spin_lock_wait_flags);
100894cdde2SHisashi Hifumi 
1015b3f683eSPhilipp Hachtmann void arch_spin_relax(arch_spinlock_t *lp)
102951f22d5SMartin Schwidefsky {
1035b3f683eSPhilipp Hachtmann 	unsigned int cpu = lp->lock;
10459b69787SGerald Schaefer 	if (cpu != 0) {
10559b69787SGerald Schaefer 		if (MACHINE_IS_VM || MACHINE_IS_KVM ||
10659b69787SGerald Schaefer 		    !smp_vcpu_scheduled(~cpu))
1078b646bd7SMartin Schwidefsky 			smp_yield_cpu(~cpu);
1083c1fcfe2SMartin Schwidefsky 	}
10959b69787SGerald Schaefer }
1100199c4e6SThomas Gleixner EXPORT_SYMBOL(arch_spin_relax);
1113c1fcfe2SMartin Schwidefsky 
1125b3f683eSPhilipp Hachtmann int arch_spin_trylock_retry(arch_spinlock_t *lp)
1135b3f683eSPhilipp Hachtmann {
1145b3f683eSPhilipp Hachtmann 	int count;
1155b3f683eSPhilipp Hachtmann 
116bae8f567SMartin Schwidefsky 	for (count = spin_retry; count > 0; count--)
1175b3f683eSPhilipp Hachtmann 		if (arch_spin_trylock_once(lp))
1185b3f683eSPhilipp Hachtmann 			return 1;
1195b3f683eSPhilipp Hachtmann 	return 0;
1205b3f683eSPhilipp Hachtmann }
1215b3f683eSPhilipp Hachtmann EXPORT_SYMBOL(arch_spin_trylock_retry);
1225b3f683eSPhilipp Hachtmann 
123fb3a6bbcSThomas Gleixner void _raw_read_lock_wait(arch_rwlock_t *rw)
124951f22d5SMartin Schwidefsky {
125951f22d5SMartin Schwidefsky 	unsigned int old;
126951f22d5SMartin Schwidefsky 	int count = spin_retry;
127951f22d5SMartin Schwidefsky 
128951f22d5SMartin Schwidefsky 	while (1) {
129951f22d5SMartin Schwidefsky 		if (count-- <= 0) {
1308b646bd7SMartin Schwidefsky 			smp_yield();
131951f22d5SMartin Schwidefsky 			count = spin_retry;
132951f22d5SMartin Schwidefsky 		}
133bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
134bae8f567SMartin Schwidefsky 		if ((int) old < 0)
13596567161SChristian Ehrhardt 			continue;
1365b3f683eSPhilipp Hachtmann 		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
137951f22d5SMartin Schwidefsky 			return;
138951f22d5SMartin Schwidefsky 	}
139951f22d5SMartin Schwidefsky }
140951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_read_lock_wait);
141951f22d5SMartin Schwidefsky 
142fb3a6bbcSThomas Gleixner void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
143ce58ae6fSHeiko Carstens {
144ce58ae6fSHeiko Carstens 	unsigned int old;
145ce58ae6fSHeiko Carstens 	int count = spin_retry;
146ce58ae6fSHeiko Carstens 
147ce58ae6fSHeiko Carstens 	local_irq_restore(flags);
148ce58ae6fSHeiko Carstens 	while (1) {
149ce58ae6fSHeiko Carstens 		if (count-- <= 0) {
1508b646bd7SMartin Schwidefsky 			smp_yield();
151ce58ae6fSHeiko Carstens 			count = spin_retry;
152ce58ae6fSHeiko Carstens 		}
153bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
154bae8f567SMartin Schwidefsky 		if ((int) old < 0)
155ce58ae6fSHeiko Carstens 			continue;
156ce58ae6fSHeiko Carstens 		local_irq_disable();
1575b3f683eSPhilipp Hachtmann 		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
158ce58ae6fSHeiko Carstens 			return;
159939c5ae4SMartin Schwidefsky 		local_irq_restore(flags);
160ce58ae6fSHeiko Carstens 	}
161ce58ae6fSHeiko Carstens }
162ce58ae6fSHeiko Carstens EXPORT_SYMBOL(_raw_read_lock_wait_flags);
163ce58ae6fSHeiko Carstens 
164fb3a6bbcSThomas Gleixner int _raw_read_trylock_retry(arch_rwlock_t *rw)
165951f22d5SMartin Schwidefsky {
166951f22d5SMartin Schwidefsky 	unsigned int old;
167951f22d5SMartin Schwidefsky 	int count = spin_retry;
168951f22d5SMartin Schwidefsky 
169951f22d5SMartin Schwidefsky 	while (count-- > 0) {
170bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
171bae8f567SMartin Schwidefsky 		if ((int) old < 0)
17296567161SChristian Ehrhardt 			continue;
1735b3f683eSPhilipp Hachtmann 		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
174951f22d5SMartin Schwidefsky 			return 1;
175951f22d5SMartin Schwidefsky 	}
176951f22d5SMartin Schwidefsky 	return 0;
177951f22d5SMartin Schwidefsky }
178951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_read_trylock_retry);
179951f22d5SMartin Schwidefsky 
180fb3a6bbcSThomas Gleixner void _raw_write_lock_wait(arch_rwlock_t *rw)
181951f22d5SMartin Schwidefsky {
182bae8f567SMartin Schwidefsky 	unsigned int old;
183951f22d5SMartin Schwidefsky 	int count = spin_retry;
184951f22d5SMartin Schwidefsky 
185951f22d5SMartin Schwidefsky 	while (1) {
186951f22d5SMartin Schwidefsky 		if (count-- <= 0) {
1878b646bd7SMartin Schwidefsky 			smp_yield();
188951f22d5SMartin Schwidefsky 			count = spin_retry;
189951f22d5SMartin Schwidefsky 		}
190bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
191bae8f567SMartin Schwidefsky 		if (old)
19296567161SChristian Ehrhardt 			continue;
1935b3f683eSPhilipp Hachtmann 		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
194951f22d5SMartin Schwidefsky 			return;
195951f22d5SMartin Schwidefsky 	}
196951f22d5SMartin Schwidefsky }
197951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_write_lock_wait);
198951f22d5SMartin Schwidefsky 
199fb3a6bbcSThomas Gleixner void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
200ce58ae6fSHeiko Carstens {
201bae8f567SMartin Schwidefsky 	unsigned int old;
202ce58ae6fSHeiko Carstens 	int count = spin_retry;
203ce58ae6fSHeiko Carstens 
204ce58ae6fSHeiko Carstens 	local_irq_restore(flags);
205ce58ae6fSHeiko Carstens 	while (1) {
206ce58ae6fSHeiko Carstens 		if (count-- <= 0) {
2078b646bd7SMartin Schwidefsky 			smp_yield();
208ce58ae6fSHeiko Carstens 			count = spin_retry;
209ce58ae6fSHeiko Carstens 		}
210bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
211bae8f567SMartin Schwidefsky 		if (old)
212ce58ae6fSHeiko Carstens 			continue;
213ce58ae6fSHeiko Carstens 		local_irq_disable();
2145b3f683eSPhilipp Hachtmann 		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
215ce58ae6fSHeiko Carstens 			return;
216939c5ae4SMartin Schwidefsky 		local_irq_restore(flags);
217ce58ae6fSHeiko Carstens 	}
218ce58ae6fSHeiko Carstens }
219ce58ae6fSHeiko Carstens EXPORT_SYMBOL(_raw_write_lock_wait_flags);
220ce58ae6fSHeiko Carstens 
221fb3a6bbcSThomas Gleixner int _raw_write_trylock_retry(arch_rwlock_t *rw)
222951f22d5SMartin Schwidefsky {
223bae8f567SMartin Schwidefsky 	unsigned int old;
224951f22d5SMartin Schwidefsky 	int count = spin_retry;
225951f22d5SMartin Schwidefsky 
226951f22d5SMartin Schwidefsky 	while (count-- > 0) {
227bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
228bae8f567SMartin Schwidefsky 		if (old)
22996567161SChristian Ehrhardt 			continue;
2305b3f683eSPhilipp Hachtmann 		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
231951f22d5SMartin Schwidefsky 			return 1;
232951f22d5SMartin Schwidefsky 	}
233951f22d5SMartin Schwidefsky 	return 0;
234951f22d5SMartin Schwidefsky }
235951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_write_trylock_retry);
236