xref: /openbmc/linux/arch/s390/lib/spinlock.c (revision b2441318)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2951f22d5SMartin Schwidefsky /*
3951f22d5SMartin Schwidefsky  *    Out of line spinlock code.
4951f22d5SMartin Schwidefsky  *
5a53c8fabSHeiko Carstens  *    Copyright IBM Corp. 2004, 2006
6951f22d5SMartin Schwidefsky  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7951f22d5SMartin Schwidefsky  */
8951f22d5SMartin Schwidefsky 
9951f22d5SMartin Schwidefsky #include <linux/types.h>
10d3217967SPaul Gortmaker #include <linux/export.h>
11951f22d5SMartin Schwidefsky #include <linux/spinlock.h>
12951f22d5SMartin Schwidefsky #include <linux/init.h>
138b646bd7SMartin Schwidefsky #include <linux/smp.h>
14951f22d5SMartin Schwidefsky #include <asm/io.h>
15951f22d5SMartin Schwidefsky 
162c72a44eSMartin Schwidefsky int spin_retry = -1;
172c72a44eSMartin Schwidefsky 
182c72a44eSMartin Schwidefsky static int __init spin_retry_init(void)
192c72a44eSMartin Schwidefsky {
202c72a44eSMartin Schwidefsky 	if (spin_retry < 0)
21b13de4b7SMartin Schwidefsky 		spin_retry = 1000;
222c72a44eSMartin Schwidefsky 	return 0;
232c72a44eSMartin Schwidefsky }
242c72a44eSMartin Schwidefsky early_initcall(spin_retry_init);
25951f22d5SMartin Schwidefsky 
26951f22d5SMartin Schwidefsky /**
27951f22d5SMartin Schwidefsky  * spin_retry= parameter
28951f22d5SMartin Schwidefsky  */
29951f22d5SMartin Schwidefsky static int __init spin_retry_setup(char *str)
30951f22d5SMartin Schwidefsky {
31951f22d5SMartin Schwidefsky 	spin_retry = simple_strtoul(str, &str, 0);
32951f22d5SMartin Schwidefsky 	return 1;
33951f22d5SMartin Schwidefsky }
34951f22d5SMartin Schwidefsky __setup("spin_retry=", spin_retry_setup);
35951f22d5SMartin Schwidefsky 
367f7e6e28SMartin Schwidefsky static inline int arch_load_niai4(int *lock)
377f7e6e28SMartin Schwidefsky {
387f7e6e28SMartin Schwidefsky 	int owner;
397f7e6e28SMartin Schwidefsky 
407f7e6e28SMartin Schwidefsky 	asm volatile(
417f7e6e28SMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
427f7e6e28SMartin Schwidefsky 		"	.long	0xb2fa0040\n"	/* NIAI 4 */
437f7e6e28SMartin Schwidefsky #endif
447f7e6e28SMartin Schwidefsky 		"	l	%0,%1\n"
457f7e6e28SMartin Schwidefsky 		: "=d" (owner) : "Q" (*lock) : "memory");
467f7e6e28SMartin Schwidefsky        return owner;
477f7e6e28SMartin Schwidefsky }
487f7e6e28SMartin Schwidefsky 
497f7e6e28SMartin Schwidefsky static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
507f7e6e28SMartin Schwidefsky {
517f7e6e28SMartin Schwidefsky 	int expected = old;
527f7e6e28SMartin Schwidefsky 
537f7e6e28SMartin Schwidefsky 	asm volatile(
547f7e6e28SMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
557f7e6e28SMartin Schwidefsky 		"	.long	0xb2fa0080\n"	/* NIAI 8 */
567f7e6e28SMartin Schwidefsky #endif
577f7e6e28SMartin Schwidefsky 		"	cs	%0,%3,%1\n"
587f7e6e28SMartin Schwidefsky 		: "=d" (old), "=Q" (*lock)
597f7e6e28SMartin Schwidefsky 		: "0" (old), "d" (new), "Q" (*lock)
607f7e6e28SMartin Schwidefsky 		: "cc", "memory");
617f7e6e28SMartin Schwidefsky 	return expected == old;
627f7e6e28SMartin Schwidefsky }
637f7e6e28SMartin Schwidefsky 
640199c4e6SThomas Gleixner void arch_spin_lock_wait(arch_spinlock_t *lp)
65951f22d5SMartin Schwidefsky {
6602c503ffSMartin Schwidefsky 	int cpu = SPINLOCK_LOCKVAL;
677f7e6e28SMartin Schwidefsky 	int owner, count;
68951f22d5SMartin Schwidefsky 
697f7e6e28SMartin Schwidefsky 	/* Pass the virtual CPU to the lock holder if it is not running */
707f7e6e28SMartin Schwidefsky 	owner = arch_load_niai4(&lp->lock);
717f7e6e28SMartin Schwidefsky 	if (owner && arch_vcpu_is_preempted(~owner))
727f7e6e28SMartin Schwidefsky 		smp_yield_cpu(~owner);
737f7e6e28SMartin Schwidefsky 
747f7e6e28SMartin Schwidefsky 	count = spin_retry;
75951f22d5SMartin Schwidefsky 	while (1) {
767f7e6e28SMartin Schwidefsky 		owner = arch_load_niai4(&lp->lock);
77470ada6bSMartin Schwidefsky 		/* Try to get the lock if it is free. */
78470ada6bSMartin Schwidefsky 		if (!owner) {
797f7e6e28SMartin Schwidefsky 			if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
8059b69787SGerald Schaefer 				return;
8159b69787SGerald Schaefer 			continue;
8259b69787SGerald Schaefer 		}
837f7e6e28SMartin Schwidefsky 		if (count-- >= 0)
84470ada6bSMartin Schwidefsky 			continue;
85470ada6bSMartin Schwidefsky 		count = spin_retry;
86470ada6bSMartin Schwidefsky 		/*
87470ada6bSMartin Schwidefsky 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
88db1c4515SMartin Schwidefsky 		 * yield the CPU unconditionally. For LPAR rely on the
89db1c4515SMartin Schwidefsky 		 * sense running status.
90470ada6bSMartin Schwidefsky 		 */
917f7e6e28SMartin Schwidefsky 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
92470ada6bSMartin Schwidefsky 			smp_yield_cpu(~owner);
93951f22d5SMartin Schwidefsky 	}
94951f22d5SMartin Schwidefsky }
950199c4e6SThomas Gleixner EXPORT_SYMBOL(arch_spin_lock_wait);
96951f22d5SMartin Schwidefsky 
970199c4e6SThomas Gleixner void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
98894cdde2SHisashi Hifumi {
9902c503ffSMartin Schwidefsky 	int cpu = SPINLOCK_LOCKVAL;
1007f7e6e28SMartin Schwidefsky 	int owner, count;
101894cdde2SHisashi Hifumi 
102894cdde2SHisashi Hifumi 	local_irq_restore(flags);
1037f7e6e28SMartin Schwidefsky 
1047f7e6e28SMartin Schwidefsky 	/* Pass the virtual CPU to the lock holder if it is not running */
1057f7e6e28SMartin Schwidefsky 	owner = arch_load_niai4(&lp->lock);
1067f7e6e28SMartin Schwidefsky 	if (owner && arch_vcpu_is_preempted(~owner))
1077f7e6e28SMartin Schwidefsky 		smp_yield_cpu(~owner);
1087f7e6e28SMartin Schwidefsky 
1097f7e6e28SMartin Schwidefsky 	count = spin_retry;
110894cdde2SHisashi Hifumi 	while (1) {
1117f7e6e28SMartin Schwidefsky 		owner = arch_load_niai4(&lp->lock);
112470ada6bSMartin Schwidefsky 		/* Try to get the lock if it is free. */
113470ada6bSMartin Schwidefsky 		if (!owner) {
114894cdde2SHisashi Hifumi 			local_irq_disable();
1157f7e6e28SMartin Schwidefsky 			if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
11659b69787SGerald Schaefer 				return;
11759b69787SGerald Schaefer 			local_irq_restore(flags);
11884976952SHeiko Carstens 			continue;
119470ada6bSMartin Schwidefsky 		}
1207f7e6e28SMartin Schwidefsky 		if (count-- >= 0)
12159b69787SGerald Schaefer 			continue;
122470ada6bSMartin Schwidefsky 		count = spin_retry;
123470ada6bSMartin Schwidefsky 		/*
124470ada6bSMartin Schwidefsky 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
125db1c4515SMartin Schwidefsky 		 * yield the CPU unconditionally. For LPAR rely on the
126db1c4515SMartin Schwidefsky 		 * sense running status.
127470ada6bSMartin Schwidefsky 		 */
1287f7e6e28SMartin Schwidefsky 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
1298b646bd7SMartin Schwidefsky 			smp_yield_cpu(~owner);
130894cdde2SHisashi Hifumi 	}
131894cdde2SHisashi Hifumi }
1320199c4e6SThomas Gleixner EXPORT_SYMBOL(arch_spin_lock_wait_flags);
133894cdde2SHisashi Hifumi 
1345b3f683eSPhilipp Hachtmann int arch_spin_trylock_retry(arch_spinlock_t *lp)
1355b3f683eSPhilipp Hachtmann {
13602c503ffSMartin Schwidefsky 	int cpu = SPINLOCK_LOCKVAL;
13702c503ffSMartin Schwidefsky 	int owner, count;
1385b3f683eSPhilipp Hachtmann 
1392c72a44eSMartin Schwidefsky 	for (count = spin_retry; count > 0; count--) {
140187b5f41SChristian Borntraeger 		owner = READ_ONCE(lp->lock);
1412c72a44eSMartin Schwidefsky 		/* Try to get the lock if it is free. */
1422c72a44eSMartin Schwidefsky 		if (!owner) {
14302c503ffSMartin Schwidefsky 			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
1445b3f683eSPhilipp Hachtmann 				return 1;
145b13de4b7SMartin Schwidefsky 		}
1462c72a44eSMartin Schwidefsky 	}
1475b3f683eSPhilipp Hachtmann 	return 0;
1485b3f683eSPhilipp Hachtmann }
1495b3f683eSPhilipp Hachtmann EXPORT_SYMBOL(arch_spin_trylock_retry);
1505b3f683eSPhilipp Hachtmann 
151fb3a6bbcSThomas Gleixner void _raw_read_lock_wait(arch_rwlock_t *rw)
152951f22d5SMartin Schwidefsky {
153951f22d5SMartin Schwidefsky 	int count = spin_retry;
15402c503ffSMartin Schwidefsky 	int owner, old;
155951f22d5SMartin Schwidefsky 
156bbae71bfSMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
157bbae71bfSMartin Schwidefsky 	__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
158bbae71bfSMartin Schwidefsky #endif
159d59b93daSMartin Schwidefsky 	owner = 0;
160951f22d5SMartin Schwidefsky 	while (1) {
161951f22d5SMartin Schwidefsky 		if (count-- <= 0) {
162760928c0SChristian Borntraeger 			if (owner && arch_vcpu_is_preempted(~owner))
163d59b93daSMartin Schwidefsky 				smp_yield_cpu(~owner);
164951f22d5SMartin Schwidefsky 			count = spin_retry;
165951f22d5SMartin Schwidefsky 		}
166bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
167d59b93daSMartin Schwidefsky 		owner = ACCESS_ONCE(rw->owner);
168b13de4b7SMartin Schwidefsky 		if (old < 0)
16996567161SChristian Ehrhardt 			continue;
17002c503ffSMartin Schwidefsky 		if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
171951f22d5SMartin Schwidefsky 			return;
172951f22d5SMartin Schwidefsky 	}
173951f22d5SMartin Schwidefsky }
174951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_read_lock_wait);
175951f22d5SMartin Schwidefsky 
176fb3a6bbcSThomas Gleixner int _raw_read_trylock_retry(arch_rwlock_t *rw)
177951f22d5SMartin Schwidefsky {
178951f22d5SMartin Schwidefsky 	int count = spin_retry;
17902c503ffSMartin Schwidefsky 	int old;
180951f22d5SMartin Schwidefsky 
181951f22d5SMartin Schwidefsky 	while (count-- > 0) {
182bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
183b13de4b7SMartin Schwidefsky 		if (old < 0)
18496567161SChristian Ehrhardt 			continue;
18502c503ffSMartin Schwidefsky 		if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
186951f22d5SMartin Schwidefsky 			return 1;
187951f22d5SMartin Schwidefsky 	}
188951f22d5SMartin Schwidefsky 	return 0;
189951f22d5SMartin Schwidefsky }
190951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_read_trylock_retry);
191951f22d5SMartin Schwidefsky 
192bbae71bfSMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
193bbae71bfSMartin Schwidefsky 
19402c503ffSMartin Schwidefsky void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
195bbae71bfSMartin Schwidefsky {
196bbae71bfSMartin Schwidefsky 	int count = spin_retry;
19702c503ffSMartin Schwidefsky 	int owner, old;
198bbae71bfSMartin Schwidefsky 
199bbae71bfSMartin Schwidefsky 	owner = 0;
200bbae71bfSMartin Schwidefsky 	while (1) {
201bbae71bfSMartin Schwidefsky 		if (count-- <= 0) {
202760928c0SChristian Borntraeger 			if (owner && arch_vcpu_is_preempted(~owner))
203bbae71bfSMartin Schwidefsky 				smp_yield_cpu(~owner);
204bbae71bfSMartin Schwidefsky 			count = spin_retry;
205bbae71bfSMartin Schwidefsky 		}
206bbae71bfSMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
207bbae71bfSMartin Schwidefsky 		owner = ACCESS_ONCE(rw->owner);
208e0af21c5SChristian Borntraeger 		smp_mb();
20902c503ffSMartin Schwidefsky 		if (old >= 0) {
210bbae71bfSMartin Schwidefsky 			prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
211bbae71bfSMartin Schwidefsky 			old = prev;
212bbae71bfSMartin Schwidefsky 		}
21302c503ffSMartin Schwidefsky 		if ((old & 0x7fffffff) == 0 && prev >= 0)
214bbae71bfSMartin Schwidefsky 			break;
215bbae71bfSMartin Schwidefsky 	}
216bbae71bfSMartin Schwidefsky }
217bbae71bfSMartin Schwidefsky EXPORT_SYMBOL(_raw_write_lock_wait);
218bbae71bfSMartin Schwidefsky 
219bbae71bfSMartin Schwidefsky #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
220bbae71bfSMartin Schwidefsky 
221fb3a6bbcSThomas Gleixner void _raw_write_lock_wait(arch_rwlock_t *rw)
222951f22d5SMartin Schwidefsky {
223951f22d5SMartin Schwidefsky 	int count = spin_retry;
22402c503ffSMartin Schwidefsky 	int owner, old, prev;
225951f22d5SMartin Schwidefsky 
22694232a43SMartin Schwidefsky 	prev = 0x80000000;
227d59b93daSMartin Schwidefsky 	owner = 0;
228951f22d5SMartin Schwidefsky 	while (1) {
229951f22d5SMartin Schwidefsky 		if (count-- <= 0) {
230760928c0SChristian Borntraeger 			if (owner && arch_vcpu_is_preempted(~owner))
231d59b93daSMartin Schwidefsky 				smp_yield_cpu(~owner);
232951f22d5SMartin Schwidefsky 			count = spin_retry;
233951f22d5SMartin Schwidefsky 		}
234bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
235d59b93daSMartin Schwidefsky 		owner = ACCESS_ONCE(rw->owner);
23602c503ffSMartin Schwidefsky 		if (old >= 0 &&
23702c503ffSMartin Schwidefsky 		    __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
23894232a43SMartin Schwidefsky 			prev = old;
23994232a43SMartin Schwidefsky 		else
240e0af21c5SChristian Borntraeger 			smp_mb();
24102c503ffSMartin Schwidefsky 		if ((old & 0x7fffffff) == 0 && prev >= 0)
24294232a43SMartin Schwidefsky 			break;
243951f22d5SMartin Schwidefsky 	}
244951f22d5SMartin Schwidefsky }
245951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_write_lock_wait);
246951f22d5SMartin Schwidefsky 
247bbae71bfSMartin Schwidefsky #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
248bbae71bfSMartin Schwidefsky 
249fb3a6bbcSThomas Gleixner int _raw_write_trylock_retry(arch_rwlock_t *rw)
250951f22d5SMartin Schwidefsky {
251951f22d5SMartin Schwidefsky 	int count = spin_retry;
25202c503ffSMartin Schwidefsky 	int old;
253951f22d5SMartin Schwidefsky 
254951f22d5SMartin Schwidefsky 	while (count-- > 0) {
255bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
256b13de4b7SMartin Schwidefsky 		if (old)
25796567161SChristian Ehrhardt 			continue;
25802c503ffSMartin Schwidefsky 		if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
259951f22d5SMartin Schwidefsky 			return 1;
260951f22d5SMartin Schwidefsky 	}
261951f22d5SMartin Schwidefsky 	return 0;
262951f22d5SMartin Schwidefsky }
263951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_write_trylock_retry);
264d59b93daSMartin Schwidefsky 
26502c503ffSMartin Schwidefsky void arch_lock_relax(int cpu)
266d59b93daSMartin Schwidefsky {
267d59b93daSMartin Schwidefsky 	if (!cpu)
268d59b93daSMartin Schwidefsky 		return;
269760928c0SChristian Borntraeger 	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
270d59b93daSMartin Schwidefsky 		return;
271d59b93daSMartin Schwidefsky 	smp_yield_cpu(~cpu);
272d59b93daSMartin Schwidefsky }
273d59b93daSMartin Schwidefsky EXPORT_SYMBOL(arch_lock_relax);
274