xref: /openbmc/linux/arch/s390/lib/spinlock.c (revision b96f7d88)
1951f22d5SMartin Schwidefsky /*
2951f22d5SMartin Schwidefsky  *    Out of line spinlock code.
3951f22d5SMartin Schwidefsky  *
4a53c8fabSHeiko Carstens  *    Copyright IBM Corp. 2004, 2006
5951f22d5SMartin Schwidefsky  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6951f22d5SMartin Schwidefsky  */
7951f22d5SMartin Schwidefsky 
8951f22d5SMartin Schwidefsky #include <linux/types.h>
9d3217967SPaul Gortmaker #include <linux/export.h>
10951f22d5SMartin Schwidefsky #include <linux/spinlock.h>
11b96f7d88SMartin Schwidefsky #include <linux/jiffies.h>
12951f22d5SMartin Schwidefsky #include <linux/init.h>
138b646bd7SMartin Schwidefsky #include <linux/smp.h>
14b96f7d88SMartin Schwidefsky #include <linux/percpu.h>
15951f22d5SMartin Schwidefsky #include <asm/io.h>
16951f22d5SMartin Schwidefsky 
172c72a44eSMartin Schwidefsky int spin_retry = -1;
182c72a44eSMartin Schwidefsky 
192c72a44eSMartin Schwidefsky static int __init spin_retry_init(void)
202c72a44eSMartin Schwidefsky {
212c72a44eSMartin Schwidefsky 	if (spin_retry < 0)
22b13de4b7SMartin Schwidefsky 		spin_retry = 1000;
232c72a44eSMartin Schwidefsky 	return 0;
242c72a44eSMartin Schwidefsky }
252c72a44eSMartin Schwidefsky early_initcall(spin_retry_init);
26951f22d5SMartin Schwidefsky 
27951f22d5SMartin Schwidefsky /**
28951f22d5SMartin Schwidefsky  * spin_retry= parameter
29951f22d5SMartin Schwidefsky  */
30951f22d5SMartin Schwidefsky static int __init spin_retry_setup(char *str)
31951f22d5SMartin Schwidefsky {
32951f22d5SMartin Schwidefsky 	spin_retry = simple_strtoul(str, &str, 0);
33951f22d5SMartin Schwidefsky 	return 1;
34951f22d5SMartin Schwidefsky }
35951f22d5SMartin Schwidefsky __setup("spin_retry=", spin_retry_setup);
36951f22d5SMartin Schwidefsky 
37b96f7d88SMartin Schwidefsky struct spin_wait {
38b96f7d88SMartin Schwidefsky 	struct spin_wait *next, *prev;
39b96f7d88SMartin Schwidefsky 	int node_id;
40b96f7d88SMartin Schwidefsky } __aligned(32);
41b96f7d88SMartin Schwidefsky 
42b96f7d88SMartin Schwidefsky static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
43b96f7d88SMartin Schwidefsky 
44b96f7d88SMartin Schwidefsky #define _Q_LOCK_CPU_OFFSET	0
45b96f7d88SMartin Schwidefsky #define _Q_LOCK_STEAL_OFFSET	16
46b96f7d88SMartin Schwidefsky #define _Q_TAIL_IDX_OFFSET	18
47b96f7d88SMartin Schwidefsky #define _Q_TAIL_CPU_OFFSET	20
48b96f7d88SMartin Schwidefsky 
49b96f7d88SMartin Schwidefsky #define _Q_LOCK_CPU_MASK	0x0000ffff
50b96f7d88SMartin Schwidefsky #define _Q_LOCK_STEAL_ADD	0x00010000
51b96f7d88SMartin Schwidefsky #define _Q_LOCK_STEAL_MASK	0x00030000
52b96f7d88SMartin Schwidefsky #define _Q_TAIL_IDX_MASK	0x000c0000
53b96f7d88SMartin Schwidefsky #define _Q_TAIL_CPU_MASK	0xfff00000
54b96f7d88SMartin Schwidefsky 
55b96f7d88SMartin Schwidefsky #define _Q_LOCK_MASK		(_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
56b96f7d88SMartin Schwidefsky #define _Q_TAIL_MASK		(_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
57b96f7d88SMartin Schwidefsky 
58b96f7d88SMartin Schwidefsky void arch_spin_lock_setup(int cpu)
59b96f7d88SMartin Schwidefsky {
60b96f7d88SMartin Schwidefsky 	struct spin_wait *node;
61b96f7d88SMartin Schwidefsky 	int ix;
62b96f7d88SMartin Schwidefsky 
63b96f7d88SMartin Schwidefsky 	node = per_cpu_ptr(&spin_wait[0], cpu);
64b96f7d88SMartin Schwidefsky 	for (ix = 0; ix < 4; ix++, node++) {
65b96f7d88SMartin Schwidefsky 		memset(node, 0, sizeof(*node));
66b96f7d88SMartin Schwidefsky 		node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
67b96f7d88SMartin Schwidefsky 			(ix << _Q_TAIL_IDX_OFFSET);
68b96f7d88SMartin Schwidefsky 	}
69b96f7d88SMartin Schwidefsky }
70b96f7d88SMartin Schwidefsky 
717f7e6e28SMartin Schwidefsky static inline int arch_load_niai4(int *lock)
727f7e6e28SMartin Schwidefsky {
737f7e6e28SMartin Schwidefsky 	int owner;
747f7e6e28SMartin Schwidefsky 
757f7e6e28SMartin Schwidefsky 	asm volatile(
767f7e6e28SMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
777f7e6e28SMartin Schwidefsky 		"	.long	0xb2fa0040\n"	/* NIAI 4 */
787f7e6e28SMartin Schwidefsky #endif
797f7e6e28SMartin Schwidefsky 		"	l	%0,%1\n"
807f7e6e28SMartin Schwidefsky 		: "=d" (owner) : "Q" (*lock) : "memory");
817f7e6e28SMartin Schwidefsky        return owner;
827f7e6e28SMartin Schwidefsky }
837f7e6e28SMartin Schwidefsky 
847f7e6e28SMartin Schwidefsky static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
857f7e6e28SMartin Schwidefsky {
867f7e6e28SMartin Schwidefsky 	int expected = old;
877f7e6e28SMartin Schwidefsky 
887f7e6e28SMartin Schwidefsky 	asm volatile(
897f7e6e28SMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
907f7e6e28SMartin Schwidefsky 		"	.long	0xb2fa0080\n"	/* NIAI 8 */
917f7e6e28SMartin Schwidefsky #endif
927f7e6e28SMartin Schwidefsky 		"	cs	%0,%3,%1\n"
937f7e6e28SMartin Schwidefsky 		: "=d" (old), "=Q" (*lock)
947f7e6e28SMartin Schwidefsky 		: "0" (old), "d" (new), "Q" (*lock)
957f7e6e28SMartin Schwidefsky 		: "cc", "memory");
967f7e6e28SMartin Schwidefsky 	return expected == old;
977f7e6e28SMartin Schwidefsky }
987f7e6e28SMartin Schwidefsky 
99b96f7d88SMartin Schwidefsky static inline struct spin_wait *arch_spin_decode_tail(int lock)
100b96f7d88SMartin Schwidefsky {
101b96f7d88SMartin Schwidefsky 	int ix, cpu;
102b96f7d88SMartin Schwidefsky 
103b96f7d88SMartin Schwidefsky 	ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
104b96f7d88SMartin Schwidefsky 	cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
105b96f7d88SMartin Schwidefsky 	return per_cpu_ptr(&spin_wait[ix], cpu - 1);
106b96f7d88SMartin Schwidefsky }
107b96f7d88SMartin Schwidefsky 
108b96f7d88SMartin Schwidefsky static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
109b96f7d88SMartin Schwidefsky {
110b96f7d88SMartin Schwidefsky 	if (lock & _Q_LOCK_CPU_MASK)
111b96f7d88SMartin Schwidefsky 		return lock & _Q_LOCK_CPU_MASK;
112b96f7d88SMartin Schwidefsky 	if (node == NULL || node->prev == NULL)
113b96f7d88SMartin Schwidefsky 		return 0;	/* 0 -> no target cpu */
114b96f7d88SMartin Schwidefsky 	while (node->prev)
115b96f7d88SMartin Schwidefsky 		node = node->prev;
116b96f7d88SMartin Schwidefsky 	return node->node_id >> _Q_TAIL_CPU_OFFSET;
117b96f7d88SMartin Schwidefsky }
118b96f7d88SMartin Schwidefsky 
119b96f7d88SMartin Schwidefsky static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
120b96f7d88SMartin Schwidefsky {
121b96f7d88SMartin Schwidefsky 	struct spin_wait *node, *next;
122b96f7d88SMartin Schwidefsky 	int lockval, ix, node_id, tail_id, old, new, owner, count;
123b96f7d88SMartin Schwidefsky 
124b96f7d88SMartin Schwidefsky 	ix = S390_lowcore.spinlock_index++;
125b96f7d88SMartin Schwidefsky 	barrier();
126b96f7d88SMartin Schwidefsky 	lockval = SPINLOCK_LOCKVAL;	/* cpu + 1 */
127b96f7d88SMartin Schwidefsky 	node = this_cpu_ptr(&spin_wait[ix]);
128b96f7d88SMartin Schwidefsky 	node->prev = node->next = NULL;
129b96f7d88SMartin Schwidefsky 	node_id = node->node_id;
130b96f7d88SMartin Schwidefsky 
131b96f7d88SMartin Schwidefsky 	/* Enqueue the node for this CPU in the spinlock wait queue */
132b96f7d88SMartin Schwidefsky 	while (1) {
133b96f7d88SMartin Schwidefsky 		old = READ_ONCE(lp->lock);
134b96f7d88SMartin Schwidefsky 		if ((old & _Q_LOCK_CPU_MASK) == 0 &&
135b96f7d88SMartin Schwidefsky 		    (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
136b96f7d88SMartin Schwidefsky 			/*
137b96f7d88SMartin Schwidefsky 			 * The lock is free but there may be waiters.
138b96f7d88SMartin Schwidefsky 			 * With no waiters simply take the lock, if there
139b96f7d88SMartin Schwidefsky 			 * are waiters try to steal the lock. The lock may
140b96f7d88SMartin Schwidefsky 			 * be stolen three times before the next queued
141b96f7d88SMartin Schwidefsky 			 * waiter will get the lock.
142b96f7d88SMartin Schwidefsky 			 */
143b96f7d88SMartin Schwidefsky 			new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
144b96f7d88SMartin Schwidefsky 			if (__atomic_cmpxchg_bool(&lp->lock, old, new))
145b96f7d88SMartin Schwidefsky 				/* Got the lock */
146b96f7d88SMartin Schwidefsky 				goto out;
147b96f7d88SMartin Schwidefsky 			/* lock passing in progress */
148b96f7d88SMartin Schwidefsky 			continue;
149b96f7d88SMartin Schwidefsky 		}
150b96f7d88SMartin Schwidefsky 		/* Make the node of this CPU the new tail. */
151b96f7d88SMartin Schwidefsky 		new = node_id | (old & _Q_LOCK_MASK);
152b96f7d88SMartin Schwidefsky 		if (__atomic_cmpxchg_bool(&lp->lock, old, new))
153b96f7d88SMartin Schwidefsky 			break;
154b96f7d88SMartin Schwidefsky 	}
155b96f7d88SMartin Schwidefsky 	/* Set the 'next' pointer of the tail node in the queue */
156b96f7d88SMartin Schwidefsky 	tail_id = old & _Q_TAIL_MASK;
157b96f7d88SMartin Schwidefsky 	if (tail_id != 0) {
158b96f7d88SMartin Schwidefsky 		node->prev = arch_spin_decode_tail(tail_id);
159b96f7d88SMartin Schwidefsky 		WRITE_ONCE(node->prev->next, node);
160b96f7d88SMartin Schwidefsky 	}
161b96f7d88SMartin Schwidefsky 
162b96f7d88SMartin Schwidefsky 	/* Pass the virtual CPU to the lock holder if it is not running */
163b96f7d88SMartin Schwidefsky 	owner = arch_spin_yield_target(old, node);
164b96f7d88SMartin Schwidefsky 	if (owner && arch_vcpu_is_preempted(owner - 1))
165b96f7d88SMartin Schwidefsky 		smp_yield_cpu(owner - 1);
166b96f7d88SMartin Schwidefsky 
167b96f7d88SMartin Schwidefsky 	/* Spin on the CPU local node->prev pointer */
168b96f7d88SMartin Schwidefsky 	if (tail_id != 0) {
169b96f7d88SMartin Schwidefsky 		count = spin_retry;
170b96f7d88SMartin Schwidefsky 		while (READ_ONCE(node->prev) != NULL) {
171b96f7d88SMartin Schwidefsky 			if (count-- >= 0)
172b96f7d88SMartin Schwidefsky 				continue;
173b96f7d88SMartin Schwidefsky 			count = spin_retry;
174b96f7d88SMartin Schwidefsky 			/* Query running state of lock holder again. */
175b96f7d88SMartin Schwidefsky 			owner = arch_spin_yield_target(old, node);
176b96f7d88SMartin Schwidefsky 			if (owner && arch_vcpu_is_preempted(owner - 1))
177b96f7d88SMartin Schwidefsky 				smp_yield_cpu(owner - 1);
178b96f7d88SMartin Schwidefsky 		}
179b96f7d88SMartin Schwidefsky 	}
180b96f7d88SMartin Schwidefsky 
181b96f7d88SMartin Schwidefsky 	/* Spin on the lock value in the spinlock_t */
182b96f7d88SMartin Schwidefsky 	count = spin_retry;
183b96f7d88SMartin Schwidefsky 	while (1) {
184b96f7d88SMartin Schwidefsky 		old = READ_ONCE(lp->lock);
185b96f7d88SMartin Schwidefsky 		owner = old & _Q_LOCK_CPU_MASK;
186b96f7d88SMartin Schwidefsky 		if (!owner) {
187b96f7d88SMartin Schwidefsky 			tail_id = old & _Q_TAIL_MASK;
188b96f7d88SMartin Schwidefsky 			new = ((tail_id != node_id) ? tail_id : 0) | lockval;
189b96f7d88SMartin Schwidefsky 			if (__atomic_cmpxchg_bool(&lp->lock, old, new))
190b96f7d88SMartin Schwidefsky 				/* Got the lock */
191b96f7d88SMartin Schwidefsky 				break;
192b96f7d88SMartin Schwidefsky 			continue;
193b96f7d88SMartin Schwidefsky 		}
194b96f7d88SMartin Schwidefsky 		if (count-- >= 0)
195b96f7d88SMartin Schwidefsky 			continue;
196b96f7d88SMartin Schwidefsky 		count = spin_retry;
197b96f7d88SMartin Schwidefsky 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
198b96f7d88SMartin Schwidefsky 			smp_yield_cpu(owner - 1);
199b96f7d88SMartin Schwidefsky 	}
200b96f7d88SMartin Schwidefsky 
201b96f7d88SMartin Schwidefsky 	/* Pass lock_spin job to next CPU in the queue */
202b96f7d88SMartin Schwidefsky 	if (node_id && tail_id != node_id) {
203b96f7d88SMartin Schwidefsky 		/* Wait until the next CPU has set up the 'next' pointer */
204b96f7d88SMartin Schwidefsky 		while ((next = READ_ONCE(node->next)) == NULL)
205b96f7d88SMartin Schwidefsky 			;
206b96f7d88SMartin Schwidefsky 		next->prev = NULL;
207b96f7d88SMartin Schwidefsky 	}
208b96f7d88SMartin Schwidefsky 
209b96f7d88SMartin Schwidefsky  out:
210b96f7d88SMartin Schwidefsky 	S390_lowcore.spinlock_index--;
211b96f7d88SMartin Schwidefsky }
212b96f7d88SMartin Schwidefsky 
213b96f7d88SMartin Schwidefsky static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
214b96f7d88SMartin Schwidefsky {
215b96f7d88SMartin Schwidefsky 	int lockval, old, new, owner, count;
216b96f7d88SMartin Schwidefsky 
217b96f7d88SMartin Schwidefsky 	lockval = SPINLOCK_LOCKVAL;	/* cpu + 1 */
218b96f7d88SMartin Schwidefsky 
219b96f7d88SMartin Schwidefsky 	/* Pass the virtual CPU to the lock holder if it is not running */
220b96f7d88SMartin Schwidefsky 	owner = arch_spin_yield_target(ACCESS_ONCE(lp->lock), NULL);
221b96f7d88SMartin Schwidefsky 	if (owner && arch_vcpu_is_preempted(owner - 1))
222b96f7d88SMartin Schwidefsky 		smp_yield_cpu(owner - 1);
223b96f7d88SMartin Schwidefsky 
224b96f7d88SMartin Schwidefsky 	count = spin_retry;
225b96f7d88SMartin Schwidefsky 	while (1) {
226b96f7d88SMartin Schwidefsky 		old = arch_load_niai4(&lp->lock);
227b96f7d88SMartin Schwidefsky 		owner = old & _Q_LOCK_CPU_MASK;
228b96f7d88SMartin Schwidefsky 		/* Try to get the lock if it is free. */
229b96f7d88SMartin Schwidefsky 		if (!owner) {
230b96f7d88SMartin Schwidefsky 			new = (old & _Q_TAIL_MASK) | lockval;
231b96f7d88SMartin Schwidefsky 			if (arch_cmpxchg_niai8(&lp->lock, old, new))
232b96f7d88SMartin Schwidefsky 				/* Got the lock */
233b96f7d88SMartin Schwidefsky 			       return;
234b96f7d88SMartin Schwidefsky 			continue;
235b96f7d88SMartin Schwidefsky 		}
236b96f7d88SMartin Schwidefsky 		if (count-- >= 0)
237b96f7d88SMartin Schwidefsky 			continue;
238b96f7d88SMartin Schwidefsky 		count = spin_retry;
239b96f7d88SMartin Schwidefsky 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
240b96f7d88SMartin Schwidefsky 			smp_yield_cpu(owner - 1);
241b96f7d88SMartin Schwidefsky 	}
242b96f7d88SMartin Schwidefsky }
243b96f7d88SMartin Schwidefsky 
2440199c4e6SThomas Gleixner void arch_spin_lock_wait(arch_spinlock_t *lp)
245951f22d5SMartin Schwidefsky {
246b96f7d88SMartin Schwidefsky 	/* Use classic spinlocks + niai if the steal time is >= 10% */
247b96f7d88SMartin Schwidefsky 	if (test_cpu_flag(CIF_DEDICATED_CPU))
248b96f7d88SMartin Schwidefsky 		arch_spin_lock_queued(lp);
249b96f7d88SMartin Schwidefsky 	else
250b96f7d88SMartin Schwidefsky 		arch_spin_lock_classic(lp);
251951f22d5SMartin Schwidefsky }
2520199c4e6SThomas Gleixner EXPORT_SYMBOL(arch_spin_lock_wait);
253951f22d5SMartin Schwidefsky 
2545b3f683eSPhilipp Hachtmann int arch_spin_trylock_retry(arch_spinlock_t *lp)
2555b3f683eSPhilipp Hachtmann {
25602c503ffSMartin Schwidefsky 	int cpu = SPINLOCK_LOCKVAL;
25702c503ffSMartin Schwidefsky 	int owner, count;
2585b3f683eSPhilipp Hachtmann 
2592c72a44eSMartin Schwidefsky 	for (count = spin_retry; count > 0; count--) {
260187b5f41SChristian Borntraeger 		owner = READ_ONCE(lp->lock);
2612c72a44eSMartin Schwidefsky 		/* Try to get the lock if it is free. */
2622c72a44eSMartin Schwidefsky 		if (!owner) {
26302c503ffSMartin Schwidefsky 			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
2645b3f683eSPhilipp Hachtmann 				return 1;
265b13de4b7SMartin Schwidefsky 		}
2662c72a44eSMartin Schwidefsky 	}
2675b3f683eSPhilipp Hachtmann 	return 0;
2685b3f683eSPhilipp Hachtmann }
2695b3f683eSPhilipp Hachtmann EXPORT_SYMBOL(arch_spin_trylock_retry);
2705b3f683eSPhilipp Hachtmann 
271fb3a6bbcSThomas Gleixner void _raw_read_lock_wait(arch_rwlock_t *rw)
272951f22d5SMartin Schwidefsky {
273951f22d5SMartin Schwidefsky 	int count = spin_retry;
27402c503ffSMartin Schwidefsky 	int owner, old;
275951f22d5SMartin Schwidefsky 
276bbae71bfSMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
277bbae71bfSMartin Schwidefsky 	__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
278bbae71bfSMartin Schwidefsky #endif
279d59b93daSMartin Schwidefsky 	owner = 0;
280951f22d5SMartin Schwidefsky 	while (1) {
281951f22d5SMartin Schwidefsky 		if (count-- <= 0) {
28281533803SMartin Schwidefsky 			if (owner && arch_vcpu_is_preempted(owner - 1))
28381533803SMartin Schwidefsky 				smp_yield_cpu(owner - 1);
284951f22d5SMartin Schwidefsky 			count = spin_retry;
285951f22d5SMartin Schwidefsky 		}
286bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
287d59b93daSMartin Schwidefsky 		owner = ACCESS_ONCE(rw->owner);
288b13de4b7SMartin Schwidefsky 		if (old < 0)
28996567161SChristian Ehrhardt 			continue;
29002c503ffSMartin Schwidefsky 		if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
291951f22d5SMartin Schwidefsky 			return;
292951f22d5SMartin Schwidefsky 	}
293951f22d5SMartin Schwidefsky }
294951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_read_lock_wait);
295951f22d5SMartin Schwidefsky 
296fb3a6bbcSThomas Gleixner int _raw_read_trylock_retry(arch_rwlock_t *rw)
297951f22d5SMartin Schwidefsky {
298951f22d5SMartin Schwidefsky 	int count = spin_retry;
29902c503ffSMartin Schwidefsky 	int old;
300951f22d5SMartin Schwidefsky 
301951f22d5SMartin Schwidefsky 	while (count-- > 0) {
302bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
303b13de4b7SMartin Schwidefsky 		if (old < 0)
30496567161SChristian Ehrhardt 			continue;
30502c503ffSMartin Schwidefsky 		if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
306951f22d5SMartin Schwidefsky 			return 1;
307951f22d5SMartin Schwidefsky 	}
308951f22d5SMartin Schwidefsky 	return 0;
309951f22d5SMartin Schwidefsky }
310951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_read_trylock_retry);
311951f22d5SMartin Schwidefsky 
312bbae71bfSMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
313bbae71bfSMartin Schwidefsky 
31402c503ffSMartin Schwidefsky void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
315bbae71bfSMartin Schwidefsky {
316bbae71bfSMartin Schwidefsky 	int count = spin_retry;
31702c503ffSMartin Schwidefsky 	int owner, old;
318bbae71bfSMartin Schwidefsky 
319bbae71bfSMartin Schwidefsky 	owner = 0;
320bbae71bfSMartin Schwidefsky 	while (1) {
321bbae71bfSMartin Schwidefsky 		if (count-- <= 0) {
32281533803SMartin Schwidefsky 			if (owner && arch_vcpu_is_preempted(owner - 1))
32381533803SMartin Schwidefsky 				smp_yield_cpu(owner - 1);
324bbae71bfSMartin Schwidefsky 			count = spin_retry;
325bbae71bfSMartin Schwidefsky 		}
326bbae71bfSMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
327bbae71bfSMartin Schwidefsky 		owner = ACCESS_ONCE(rw->owner);
328e0af21c5SChristian Borntraeger 		smp_mb();
32902c503ffSMartin Schwidefsky 		if (old >= 0) {
330bbae71bfSMartin Schwidefsky 			prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
331bbae71bfSMartin Schwidefsky 			old = prev;
332bbae71bfSMartin Schwidefsky 		}
33302c503ffSMartin Schwidefsky 		if ((old & 0x7fffffff) == 0 && prev >= 0)
334bbae71bfSMartin Schwidefsky 			break;
335bbae71bfSMartin Schwidefsky 	}
336bbae71bfSMartin Schwidefsky }
337bbae71bfSMartin Schwidefsky EXPORT_SYMBOL(_raw_write_lock_wait);
338bbae71bfSMartin Schwidefsky 
339bbae71bfSMartin Schwidefsky #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
340bbae71bfSMartin Schwidefsky 
341fb3a6bbcSThomas Gleixner void _raw_write_lock_wait(arch_rwlock_t *rw)
342951f22d5SMartin Schwidefsky {
343951f22d5SMartin Schwidefsky 	int count = spin_retry;
34402c503ffSMartin Schwidefsky 	int owner, old, prev;
345951f22d5SMartin Schwidefsky 
34694232a43SMartin Schwidefsky 	prev = 0x80000000;
347d59b93daSMartin Schwidefsky 	owner = 0;
348951f22d5SMartin Schwidefsky 	while (1) {
349951f22d5SMartin Schwidefsky 		if (count-- <= 0) {
35081533803SMartin Schwidefsky 			if (owner && arch_vcpu_is_preempted(owner - 1))
35181533803SMartin Schwidefsky 				smp_yield_cpu(owner - 1);
352951f22d5SMartin Schwidefsky 			count = spin_retry;
353951f22d5SMartin Schwidefsky 		}
354bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
355d59b93daSMartin Schwidefsky 		owner = ACCESS_ONCE(rw->owner);
35602c503ffSMartin Schwidefsky 		if (old >= 0 &&
35702c503ffSMartin Schwidefsky 		    __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
35894232a43SMartin Schwidefsky 			prev = old;
35994232a43SMartin Schwidefsky 		else
360e0af21c5SChristian Borntraeger 			smp_mb();
36102c503ffSMartin Schwidefsky 		if ((old & 0x7fffffff) == 0 && prev >= 0)
36294232a43SMartin Schwidefsky 			break;
363951f22d5SMartin Schwidefsky 	}
364951f22d5SMartin Schwidefsky }
365951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_write_lock_wait);
366951f22d5SMartin Schwidefsky 
367bbae71bfSMartin Schwidefsky #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
368bbae71bfSMartin Schwidefsky 
369fb3a6bbcSThomas Gleixner int _raw_write_trylock_retry(arch_rwlock_t *rw)
370951f22d5SMartin Schwidefsky {
371951f22d5SMartin Schwidefsky 	int count = spin_retry;
37202c503ffSMartin Schwidefsky 	int old;
373951f22d5SMartin Schwidefsky 
374951f22d5SMartin Schwidefsky 	while (count-- > 0) {
375bae8f567SMartin Schwidefsky 		old = ACCESS_ONCE(rw->lock);
376b13de4b7SMartin Schwidefsky 		if (old)
37796567161SChristian Ehrhardt 			continue;
37802c503ffSMartin Schwidefsky 		if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
379951f22d5SMartin Schwidefsky 			return 1;
380951f22d5SMartin Schwidefsky 	}
381951f22d5SMartin Schwidefsky 	return 0;
382951f22d5SMartin Schwidefsky }
383951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_write_trylock_retry);
384d59b93daSMartin Schwidefsky 
38502c503ffSMartin Schwidefsky void arch_lock_relax(int cpu)
386d59b93daSMartin Schwidefsky {
387d59b93daSMartin Schwidefsky 	if (!cpu)
388d59b93daSMartin Schwidefsky 		return;
38981533803SMartin Schwidefsky 	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
390d59b93daSMartin Schwidefsky 		return;
39181533803SMartin Schwidefsky 	smp_yield_cpu(cpu - 1);
392d59b93daSMartin Schwidefsky }
393d59b93daSMartin Schwidefsky EXPORT_SYMBOL(arch_lock_relax);
394b96f7d88SMartin Schwidefsky 
395b96f7d88SMartin Schwidefsky void arch_spin_relax(arch_spinlock_t *lp)
396b96f7d88SMartin Schwidefsky {
397b96f7d88SMartin Schwidefsky 	int cpu;
398b96f7d88SMartin Schwidefsky 
399b96f7d88SMartin Schwidefsky 	cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
400b96f7d88SMartin Schwidefsky 	if (!cpu)
401b96f7d88SMartin Schwidefsky 		return;
402b96f7d88SMartin Schwidefsky 	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
403b96f7d88SMartin Schwidefsky 		return;
404b96f7d88SMartin Schwidefsky 	smp_yield_cpu(cpu - 1);
405b96f7d88SMartin Schwidefsky }
406b96f7d88SMartin Schwidefsky EXPORT_SYMBOL(arch_spin_relax);
407