xref: /openbmc/linux/arch/s390/lib/spinlock.c (revision 8e9a2dba)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2951f22d5SMartin Schwidefsky /*
3951f22d5SMartin Schwidefsky  *    Out of line spinlock code.
4951f22d5SMartin Schwidefsky  *
5a53c8fabSHeiko Carstens  *    Copyright IBM Corp. 2004, 2006
6951f22d5SMartin Schwidefsky  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7951f22d5SMartin Schwidefsky  */
8951f22d5SMartin Schwidefsky 
9951f22d5SMartin Schwidefsky #include <linux/types.h>
10d3217967SPaul Gortmaker #include <linux/export.h>
11951f22d5SMartin Schwidefsky #include <linux/spinlock.h>
12b96f7d88SMartin Schwidefsky #include <linux/jiffies.h>
13951f22d5SMartin Schwidefsky #include <linux/init.h>
148b646bd7SMartin Schwidefsky #include <linux/smp.h>
15b96f7d88SMartin Schwidefsky #include <linux/percpu.h>
16f554be42SVasily Gorbik #include <asm/alternative.h>
17951f22d5SMartin Schwidefsky #include <asm/io.h>
18951f22d5SMartin Schwidefsky 
192c72a44eSMartin Schwidefsky int spin_retry = -1;
202c72a44eSMartin Schwidefsky 
212c72a44eSMartin Schwidefsky static int __init spin_retry_init(void)
222c72a44eSMartin Schwidefsky {
232c72a44eSMartin Schwidefsky 	if (spin_retry < 0)
24b13de4b7SMartin Schwidefsky 		spin_retry = 1000;
252c72a44eSMartin Schwidefsky 	return 0;
262c72a44eSMartin Schwidefsky }
272c72a44eSMartin Schwidefsky early_initcall(spin_retry_init);
28951f22d5SMartin Schwidefsky 
29951f22d5SMartin Schwidefsky /**
30951f22d5SMartin Schwidefsky  * spin_retry= parameter
31951f22d5SMartin Schwidefsky  */
32951f22d5SMartin Schwidefsky static int __init spin_retry_setup(char *str)
33951f22d5SMartin Schwidefsky {
34951f22d5SMartin Schwidefsky 	spin_retry = simple_strtoul(str, &str, 0);
35951f22d5SMartin Schwidefsky 	return 1;
36951f22d5SMartin Schwidefsky }
37951f22d5SMartin Schwidefsky __setup("spin_retry=", spin_retry_setup);
38951f22d5SMartin Schwidefsky 
39b96f7d88SMartin Schwidefsky struct spin_wait {
40b96f7d88SMartin Schwidefsky 	struct spin_wait *next, *prev;
41b96f7d88SMartin Schwidefsky 	int node_id;
42b96f7d88SMartin Schwidefsky } __aligned(32);
43b96f7d88SMartin Schwidefsky 
44b96f7d88SMartin Schwidefsky static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
45b96f7d88SMartin Schwidefsky 
46b96f7d88SMartin Schwidefsky #define _Q_LOCK_CPU_OFFSET	0
47b96f7d88SMartin Schwidefsky #define _Q_LOCK_STEAL_OFFSET	16
48b96f7d88SMartin Schwidefsky #define _Q_TAIL_IDX_OFFSET	18
49b96f7d88SMartin Schwidefsky #define _Q_TAIL_CPU_OFFSET	20
50b96f7d88SMartin Schwidefsky 
51b96f7d88SMartin Schwidefsky #define _Q_LOCK_CPU_MASK	0x0000ffff
52b96f7d88SMartin Schwidefsky #define _Q_LOCK_STEAL_ADD	0x00010000
53b96f7d88SMartin Schwidefsky #define _Q_LOCK_STEAL_MASK	0x00030000
54b96f7d88SMartin Schwidefsky #define _Q_TAIL_IDX_MASK	0x000c0000
55b96f7d88SMartin Schwidefsky #define _Q_TAIL_CPU_MASK	0xfff00000
56b96f7d88SMartin Schwidefsky 
57b96f7d88SMartin Schwidefsky #define _Q_LOCK_MASK		(_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
58b96f7d88SMartin Schwidefsky #define _Q_TAIL_MASK		(_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
59b96f7d88SMartin Schwidefsky 
60b96f7d88SMartin Schwidefsky void arch_spin_lock_setup(int cpu)
61b96f7d88SMartin Schwidefsky {
62b96f7d88SMartin Schwidefsky 	struct spin_wait *node;
63b96f7d88SMartin Schwidefsky 	int ix;
64b96f7d88SMartin Schwidefsky 
65b96f7d88SMartin Schwidefsky 	node = per_cpu_ptr(&spin_wait[0], cpu);
66b96f7d88SMartin Schwidefsky 	for (ix = 0; ix < 4; ix++, node++) {
67b96f7d88SMartin Schwidefsky 		memset(node, 0, sizeof(*node));
68b96f7d88SMartin Schwidefsky 		node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
69b96f7d88SMartin Schwidefsky 			(ix << _Q_TAIL_IDX_OFFSET);
70b96f7d88SMartin Schwidefsky 	}
71b96f7d88SMartin Schwidefsky }
72b96f7d88SMartin Schwidefsky 
737f7e6e28SMartin Schwidefsky static inline int arch_load_niai4(int *lock)
747f7e6e28SMartin Schwidefsky {
757f7e6e28SMartin Schwidefsky 	int owner;
767f7e6e28SMartin Schwidefsky 
777f7e6e28SMartin Schwidefsky 	asm volatile(
78f554be42SVasily Gorbik 		ALTERNATIVE("", ".long 0xb2fa0040", 49)	/* NIAI 4 */
797f7e6e28SMartin Schwidefsky 		"	l	%0,%1\n"
807f7e6e28SMartin Schwidefsky 		: "=d" (owner) : "Q" (*lock) : "memory");
817f7e6e28SMartin Schwidefsky        return owner;
827f7e6e28SMartin Schwidefsky }
837f7e6e28SMartin Schwidefsky 
847f7e6e28SMartin Schwidefsky static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
857f7e6e28SMartin Schwidefsky {
867f7e6e28SMartin Schwidefsky 	int expected = old;
877f7e6e28SMartin Schwidefsky 
887f7e6e28SMartin Schwidefsky 	asm volatile(
89f554be42SVasily Gorbik 		ALTERNATIVE("", ".long 0xb2fa0080", 49)	/* NIAI 8 */
907f7e6e28SMartin Schwidefsky 		"	cs	%0,%3,%1\n"
917f7e6e28SMartin Schwidefsky 		: "=d" (old), "=Q" (*lock)
927f7e6e28SMartin Schwidefsky 		: "0" (old), "d" (new), "Q" (*lock)
937f7e6e28SMartin Schwidefsky 		: "cc", "memory");
947f7e6e28SMartin Schwidefsky 	return expected == old;
957f7e6e28SMartin Schwidefsky }
967f7e6e28SMartin Schwidefsky 
97b96f7d88SMartin Schwidefsky static inline struct spin_wait *arch_spin_decode_tail(int lock)
98b96f7d88SMartin Schwidefsky {
99b96f7d88SMartin Schwidefsky 	int ix, cpu;
100b96f7d88SMartin Schwidefsky 
101b96f7d88SMartin Schwidefsky 	ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
102b96f7d88SMartin Schwidefsky 	cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
103b96f7d88SMartin Schwidefsky 	return per_cpu_ptr(&spin_wait[ix], cpu - 1);
104b96f7d88SMartin Schwidefsky }
105b96f7d88SMartin Schwidefsky 
106b96f7d88SMartin Schwidefsky static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
107b96f7d88SMartin Schwidefsky {
108b96f7d88SMartin Schwidefsky 	if (lock & _Q_LOCK_CPU_MASK)
109b96f7d88SMartin Schwidefsky 		return lock & _Q_LOCK_CPU_MASK;
110b96f7d88SMartin Schwidefsky 	if (node == NULL || node->prev == NULL)
111b96f7d88SMartin Schwidefsky 		return 0;	/* 0 -> no target cpu */
112b96f7d88SMartin Schwidefsky 	while (node->prev)
113b96f7d88SMartin Schwidefsky 		node = node->prev;
114b96f7d88SMartin Schwidefsky 	return node->node_id >> _Q_TAIL_CPU_OFFSET;
115b96f7d88SMartin Schwidefsky }
116b96f7d88SMartin Schwidefsky 
117b96f7d88SMartin Schwidefsky static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
118b96f7d88SMartin Schwidefsky {
119b96f7d88SMartin Schwidefsky 	struct spin_wait *node, *next;
120b96f7d88SMartin Schwidefsky 	int lockval, ix, node_id, tail_id, old, new, owner, count;
121b96f7d88SMartin Schwidefsky 
122b96f7d88SMartin Schwidefsky 	ix = S390_lowcore.spinlock_index++;
123b96f7d88SMartin Schwidefsky 	barrier();
124b96f7d88SMartin Schwidefsky 	lockval = SPINLOCK_LOCKVAL;	/* cpu + 1 */
125b96f7d88SMartin Schwidefsky 	node = this_cpu_ptr(&spin_wait[ix]);
126b96f7d88SMartin Schwidefsky 	node->prev = node->next = NULL;
127b96f7d88SMartin Schwidefsky 	node_id = node->node_id;
128b96f7d88SMartin Schwidefsky 
129b96f7d88SMartin Schwidefsky 	/* Enqueue the node for this CPU in the spinlock wait queue */
130b96f7d88SMartin Schwidefsky 	while (1) {
131b96f7d88SMartin Schwidefsky 		old = READ_ONCE(lp->lock);
132b96f7d88SMartin Schwidefsky 		if ((old & _Q_LOCK_CPU_MASK) == 0 &&
133b96f7d88SMartin Schwidefsky 		    (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
134b96f7d88SMartin Schwidefsky 			/*
135b96f7d88SMartin Schwidefsky 			 * The lock is free but there may be waiters.
136b96f7d88SMartin Schwidefsky 			 * With no waiters simply take the lock, if there
137b96f7d88SMartin Schwidefsky 			 * are waiters try to steal the lock. The lock may
138b96f7d88SMartin Schwidefsky 			 * be stolen three times before the next queued
139b96f7d88SMartin Schwidefsky 			 * waiter will get the lock.
140b96f7d88SMartin Schwidefsky 			 */
141b96f7d88SMartin Schwidefsky 			new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
142b96f7d88SMartin Schwidefsky 			if (__atomic_cmpxchg_bool(&lp->lock, old, new))
143b96f7d88SMartin Schwidefsky 				/* Got the lock */
144b96f7d88SMartin Schwidefsky 				goto out;
145b96f7d88SMartin Schwidefsky 			/* lock passing in progress */
146b96f7d88SMartin Schwidefsky 			continue;
147b96f7d88SMartin Schwidefsky 		}
148b96f7d88SMartin Schwidefsky 		/* Make the node of this CPU the new tail. */
149b96f7d88SMartin Schwidefsky 		new = node_id | (old & _Q_LOCK_MASK);
150b96f7d88SMartin Schwidefsky 		if (__atomic_cmpxchg_bool(&lp->lock, old, new))
151b96f7d88SMartin Schwidefsky 			break;
152b96f7d88SMartin Schwidefsky 	}
153b96f7d88SMartin Schwidefsky 	/* Set the 'next' pointer of the tail node in the queue */
154b96f7d88SMartin Schwidefsky 	tail_id = old & _Q_TAIL_MASK;
155b96f7d88SMartin Schwidefsky 	if (tail_id != 0) {
156b96f7d88SMartin Schwidefsky 		node->prev = arch_spin_decode_tail(tail_id);
157b96f7d88SMartin Schwidefsky 		WRITE_ONCE(node->prev->next, node);
158b96f7d88SMartin Schwidefsky 	}
159b96f7d88SMartin Schwidefsky 
160b96f7d88SMartin Schwidefsky 	/* Pass the virtual CPU to the lock holder if it is not running */
161b96f7d88SMartin Schwidefsky 	owner = arch_spin_yield_target(old, node);
162b96f7d88SMartin Schwidefsky 	if (owner && arch_vcpu_is_preempted(owner - 1))
163b96f7d88SMartin Schwidefsky 		smp_yield_cpu(owner - 1);
164b96f7d88SMartin Schwidefsky 
165b96f7d88SMartin Schwidefsky 	/* Spin on the CPU local node->prev pointer */
166b96f7d88SMartin Schwidefsky 	if (tail_id != 0) {
167b96f7d88SMartin Schwidefsky 		count = spin_retry;
168b96f7d88SMartin Schwidefsky 		while (READ_ONCE(node->prev) != NULL) {
169b96f7d88SMartin Schwidefsky 			if (count-- >= 0)
170b96f7d88SMartin Schwidefsky 				continue;
171b96f7d88SMartin Schwidefsky 			count = spin_retry;
172b96f7d88SMartin Schwidefsky 			/* Query running state of lock holder again. */
173b96f7d88SMartin Schwidefsky 			owner = arch_spin_yield_target(old, node);
174b96f7d88SMartin Schwidefsky 			if (owner && arch_vcpu_is_preempted(owner - 1))
175b96f7d88SMartin Schwidefsky 				smp_yield_cpu(owner - 1);
176b96f7d88SMartin Schwidefsky 		}
177b96f7d88SMartin Schwidefsky 	}
178b96f7d88SMartin Schwidefsky 
179b96f7d88SMartin Schwidefsky 	/* Spin on the lock value in the spinlock_t */
180b96f7d88SMartin Schwidefsky 	count = spin_retry;
181b96f7d88SMartin Schwidefsky 	while (1) {
182b96f7d88SMartin Schwidefsky 		old = READ_ONCE(lp->lock);
183b96f7d88SMartin Schwidefsky 		owner = old & _Q_LOCK_CPU_MASK;
184b96f7d88SMartin Schwidefsky 		if (!owner) {
185b96f7d88SMartin Schwidefsky 			tail_id = old & _Q_TAIL_MASK;
186b96f7d88SMartin Schwidefsky 			new = ((tail_id != node_id) ? tail_id : 0) | lockval;
187b96f7d88SMartin Schwidefsky 			if (__atomic_cmpxchg_bool(&lp->lock, old, new))
188b96f7d88SMartin Schwidefsky 				/* Got the lock */
189b96f7d88SMartin Schwidefsky 				break;
190b96f7d88SMartin Schwidefsky 			continue;
191b96f7d88SMartin Schwidefsky 		}
192b96f7d88SMartin Schwidefsky 		if (count-- >= 0)
193b96f7d88SMartin Schwidefsky 			continue;
194b96f7d88SMartin Schwidefsky 		count = spin_retry;
195b96f7d88SMartin Schwidefsky 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
196b96f7d88SMartin Schwidefsky 			smp_yield_cpu(owner - 1);
197b96f7d88SMartin Schwidefsky 	}
198b96f7d88SMartin Schwidefsky 
199b96f7d88SMartin Schwidefsky 	/* Pass lock_spin job to next CPU in the queue */
200b96f7d88SMartin Schwidefsky 	if (node_id && tail_id != node_id) {
201b96f7d88SMartin Schwidefsky 		/* Wait until the next CPU has set up the 'next' pointer */
202b96f7d88SMartin Schwidefsky 		while ((next = READ_ONCE(node->next)) == NULL)
203b96f7d88SMartin Schwidefsky 			;
204b96f7d88SMartin Schwidefsky 		next->prev = NULL;
205b96f7d88SMartin Schwidefsky 	}
206b96f7d88SMartin Schwidefsky 
207b96f7d88SMartin Schwidefsky  out:
208b96f7d88SMartin Schwidefsky 	S390_lowcore.spinlock_index--;
209b96f7d88SMartin Schwidefsky }
210b96f7d88SMartin Schwidefsky 
211b96f7d88SMartin Schwidefsky static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
212b96f7d88SMartin Schwidefsky {
213b96f7d88SMartin Schwidefsky 	int lockval, old, new, owner, count;
214b96f7d88SMartin Schwidefsky 
215b96f7d88SMartin Schwidefsky 	lockval = SPINLOCK_LOCKVAL;	/* cpu + 1 */
216b96f7d88SMartin Schwidefsky 
217b96f7d88SMartin Schwidefsky 	/* Pass the virtual CPU to the lock holder if it is not running */
2188e9a2dbaSLinus Torvalds 	owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
219b96f7d88SMartin Schwidefsky 	if (owner && arch_vcpu_is_preempted(owner - 1))
220b96f7d88SMartin Schwidefsky 		smp_yield_cpu(owner - 1);
221b96f7d88SMartin Schwidefsky 
222b96f7d88SMartin Schwidefsky 	count = spin_retry;
223b96f7d88SMartin Schwidefsky 	while (1) {
224b96f7d88SMartin Schwidefsky 		old = arch_load_niai4(&lp->lock);
225b96f7d88SMartin Schwidefsky 		owner = old & _Q_LOCK_CPU_MASK;
226b96f7d88SMartin Schwidefsky 		/* Try to get the lock if it is free. */
227b96f7d88SMartin Schwidefsky 		if (!owner) {
228b96f7d88SMartin Schwidefsky 			new = (old & _Q_TAIL_MASK) | lockval;
229b96f7d88SMartin Schwidefsky 			if (arch_cmpxchg_niai8(&lp->lock, old, new))
230b96f7d88SMartin Schwidefsky 				/* Got the lock */
231b96f7d88SMartin Schwidefsky 			       return;
232b96f7d88SMartin Schwidefsky 			continue;
233b96f7d88SMartin Schwidefsky 		}
234b96f7d88SMartin Schwidefsky 		if (count-- >= 0)
235b96f7d88SMartin Schwidefsky 			continue;
236b96f7d88SMartin Schwidefsky 		count = spin_retry;
237b96f7d88SMartin Schwidefsky 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
238b96f7d88SMartin Schwidefsky 			smp_yield_cpu(owner - 1);
239b96f7d88SMartin Schwidefsky 	}
240b96f7d88SMartin Schwidefsky }
241b96f7d88SMartin Schwidefsky 
2420199c4e6SThomas Gleixner void arch_spin_lock_wait(arch_spinlock_t *lp)
243951f22d5SMartin Schwidefsky {
244b96f7d88SMartin Schwidefsky 	/* Use classic spinlocks + niai if the steal time is >= 10% */
245b96f7d88SMartin Schwidefsky 	if (test_cpu_flag(CIF_DEDICATED_CPU))
246b96f7d88SMartin Schwidefsky 		arch_spin_lock_queued(lp);
247b96f7d88SMartin Schwidefsky 	else
248b96f7d88SMartin Schwidefsky 		arch_spin_lock_classic(lp);
249951f22d5SMartin Schwidefsky }
2500199c4e6SThomas Gleixner EXPORT_SYMBOL(arch_spin_lock_wait);
251951f22d5SMartin Schwidefsky 
2525b3f683eSPhilipp Hachtmann int arch_spin_trylock_retry(arch_spinlock_t *lp)
2535b3f683eSPhilipp Hachtmann {
25402c503ffSMartin Schwidefsky 	int cpu = SPINLOCK_LOCKVAL;
25502c503ffSMartin Schwidefsky 	int owner, count;
2565b3f683eSPhilipp Hachtmann 
2572c72a44eSMartin Schwidefsky 	for (count = spin_retry; count > 0; count--) {
258187b5f41SChristian Borntraeger 		owner = READ_ONCE(lp->lock);
2592c72a44eSMartin Schwidefsky 		/* Try to get the lock if it is free. */
2602c72a44eSMartin Schwidefsky 		if (!owner) {
26102c503ffSMartin Schwidefsky 			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
2625b3f683eSPhilipp Hachtmann 				return 1;
263b13de4b7SMartin Schwidefsky 		}
2642c72a44eSMartin Schwidefsky 	}
2655b3f683eSPhilipp Hachtmann 	return 0;
2665b3f683eSPhilipp Hachtmann }
2675b3f683eSPhilipp Hachtmann EXPORT_SYMBOL(arch_spin_trylock_retry);
2685b3f683eSPhilipp Hachtmann 
269eb3b7b84SMartin Schwidefsky void arch_read_lock_wait(arch_rwlock_t *rw)
270951f22d5SMartin Schwidefsky {
271eb3b7b84SMartin Schwidefsky 	if (unlikely(in_interrupt())) {
272eb3b7b84SMartin Schwidefsky 		while (READ_ONCE(rw->cnts) & 0x10000)
273eb3b7b84SMartin Schwidefsky 			barrier();
274951f22d5SMartin Schwidefsky 		return;
275951f22d5SMartin Schwidefsky 	}
276951f22d5SMartin Schwidefsky 
277eb3b7b84SMartin Schwidefsky 	/* Remove this reader again to allow recursive read locking */
278eb3b7b84SMartin Schwidefsky 	__atomic_add_const(-1, &rw->cnts);
279eb3b7b84SMartin Schwidefsky 	/* Put the reader into the wait queue */
280eb3b7b84SMartin Schwidefsky 	arch_spin_lock(&rw->wait);
281eb3b7b84SMartin Schwidefsky 	/* Now add this reader to the count value again */
282eb3b7b84SMartin Schwidefsky 	__atomic_add_const(1, &rw->cnts);
283eb3b7b84SMartin Schwidefsky 	/* Loop until the writer is done */
284eb3b7b84SMartin Schwidefsky 	while (READ_ONCE(rw->cnts) & 0x10000)
285eb3b7b84SMartin Schwidefsky 		barrier();
286eb3b7b84SMartin Schwidefsky 	arch_spin_unlock(&rw->wait);
287eb3b7b84SMartin Schwidefsky }
288eb3b7b84SMartin Schwidefsky EXPORT_SYMBOL(arch_read_lock_wait);
289eb3b7b84SMartin Schwidefsky 
290eb3b7b84SMartin Schwidefsky void arch_write_lock_wait(arch_rwlock_t *rw)
291951f22d5SMartin Schwidefsky {
29202c503ffSMartin Schwidefsky 	int old;
293951f22d5SMartin Schwidefsky 
294eb3b7b84SMartin Schwidefsky 	/* Add this CPU to the write waiters */
295eb3b7b84SMartin Schwidefsky 	__atomic_add(0x20000, &rw->cnts);
296951f22d5SMartin Schwidefsky 
297eb3b7b84SMartin Schwidefsky 	/* Put the writer into the wait queue */
298eb3b7b84SMartin Schwidefsky 	arch_spin_lock(&rw->wait);
299bbae71bfSMartin Schwidefsky 
300bbae71bfSMartin Schwidefsky 	while (1) {
301eb3b7b84SMartin Schwidefsky 		old = READ_ONCE(rw->cnts);
302eb3b7b84SMartin Schwidefsky 		if ((old & 0x1ffff) == 0 &&
303eb3b7b84SMartin Schwidefsky 		    __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
304eb3b7b84SMartin Schwidefsky 			/* Got the lock */
305bbae71bfSMartin Schwidefsky 			break;
306eb3b7b84SMartin Schwidefsky 		barrier();
307bbae71bfSMartin Schwidefsky 	}
308bbae71bfSMartin Schwidefsky 
309eb3b7b84SMartin Schwidefsky 	arch_spin_unlock(&rw->wait);
310bbae71bfSMartin Schwidefsky }
311eb3b7b84SMartin Schwidefsky EXPORT_SYMBOL(arch_write_lock_wait);
312bbae71bfSMartin Schwidefsky 
313b96f7d88SMartin Schwidefsky void arch_spin_relax(arch_spinlock_t *lp)
314951f22d5SMartin Schwidefsky {
315b96f7d88SMartin Schwidefsky 	int cpu;
316951f22d5SMartin Schwidefsky 
317b96f7d88SMartin Schwidefsky 	cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
318d59b93daSMartin Schwidefsky 	if (!cpu)
319d59b93daSMartin Schwidefsky 		return;
320b96f7d88SMartin Schwidefsky 	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
321d59b93daSMartin Schwidefsky 		return;
322b96f7d88SMartin Schwidefsky 	smp_yield_cpu(cpu - 1);
323d59b93daSMartin Schwidefsky }
324b96f7d88SMartin Schwidefsky EXPORT_SYMBOL(arch_spin_relax);
325