xref: /openbmc/linux/arch/s390/include/asm/spinlock.h (revision fad442d3)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2c6557e7fSMartin Schwidefsky /*
3c6557e7fSMartin Schwidefsky  *  S390 version
4a53c8fabSHeiko Carstens  *    Copyright IBM Corp. 1999
5c6557e7fSMartin Schwidefsky  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6c6557e7fSMartin Schwidefsky  *
7c6557e7fSMartin Schwidefsky  *  Derived from "include/asm-i386/spinlock.h"
8c6557e7fSMartin Schwidefsky  */
9c6557e7fSMartin Schwidefsky 
10c6557e7fSMartin Schwidefsky #ifndef __ASM_SPINLOCK_H
11c6557e7fSMartin Schwidefsky #define __ASM_SPINLOCK_H
12c6557e7fSMartin Schwidefsky 
13c6557e7fSMartin Schwidefsky #include <linux/smp.h>
1402c503ffSMartin Schwidefsky #include <asm/atomic_ops.h>
15726328d9SPeter Zijlstra #include <asm/barrier.h>
16726328d9SPeter Zijlstra #include <asm/processor.h>
17f554be42SVasily Gorbik #include <asm/alternative.h>
18c6557e7fSMartin Schwidefsky 
196c8cd5bbSPhilipp Hachtmann #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
206c8cd5bbSPhilipp Hachtmann 
21638ad34aSMartin Schwidefsky extern int spin_retry;
22638ad34aSMartin Schwidefsky 
23760928c0SChristian Borntraeger bool arch_vcpu_is_preempted(int cpu);
24760928c0SChristian Borntraeger 
25760928c0SChristian Borntraeger #define vcpu_is_preempted arch_vcpu_is_preempted
26760928c0SChristian Borntraeger 
27c6557e7fSMartin Schwidefsky /*
28c6557e7fSMartin Schwidefsky  * Simple spin lock operations.  There are two variants, one clears IRQ's
29c6557e7fSMartin Schwidefsky  * on the local processor, one does not.
30c6557e7fSMartin Schwidefsky  *
31c6557e7fSMartin Schwidefsky  * We make no fairness assumptions. They have a cost.
32c6557e7fSMartin Schwidefsky  *
33c6557e7fSMartin Schwidefsky  * (the type definitions are in asm/spinlock_types.h)
34c6557e7fSMartin Schwidefsky  */
35c6557e7fSMartin Schwidefsky 
36b96f7d88SMartin Schwidefsky void arch_spin_relax(arch_spinlock_t *lock);
378e9a2dbaSLinus Torvalds #define arch_spin_relax	arch_spin_relax
38d59b93daSMartin Schwidefsky 
395b3f683eSPhilipp Hachtmann void arch_spin_lock_wait(arch_spinlock_t *);
405b3f683eSPhilipp Hachtmann int arch_spin_trylock_retry(arch_spinlock_t *);
41b96f7d88SMartin Schwidefsky void arch_spin_lock_setup(int cpu);
42d59b93daSMartin Schwidefsky 
arch_spin_lockval(int cpu)436c8cd5bbSPhilipp Hachtmann static inline u32 arch_spin_lockval(int cpu)
446c8cd5bbSPhilipp Hachtmann {
4581533803SMartin Schwidefsky 	return cpu + 1;
466c8cd5bbSPhilipp Hachtmann }
476c8cd5bbSPhilipp Hachtmann 
arch_spin_value_unlocked(arch_spinlock_t lock)48efc1d23bSHeiko Carstens static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
49efc1d23bSHeiko Carstens {
505b3f683eSPhilipp Hachtmann 	return lock.lock == 0;
515b3f683eSPhilipp Hachtmann }
525b3f683eSPhilipp Hachtmann 
arch_spin_is_locked(arch_spinlock_t * lp)535b3f683eSPhilipp Hachtmann static inline int arch_spin_is_locked(arch_spinlock_t *lp)
545b3f683eSPhilipp Hachtmann {
55187b5f41SChristian Borntraeger 	return READ_ONCE(lp->lock) != 0;
565b3f683eSPhilipp Hachtmann }
575b3f683eSPhilipp Hachtmann 
arch_spin_trylock_once(arch_spinlock_t * lp)585b3f683eSPhilipp Hachtmann static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
595b3f683eSPhilipp Hachtmann {
60bae8f567SMartin Schwidefsky 	barrier();
61b96f7d88SMartin Schwidefsky 	return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
625b3f683eSPhilipp Hachtmann }
635b3f683eSPhilipp Hachtmann 
arch_spin_lock(arch_spinlock_t * lp)640199c4e6SThomas Gleixner static inline void arch_spin_lock(arch_spinlock_t *lp)
65c6557e7fSMartin Schwidefsky {
66bae8f567SMartin Schwidefsky 	if (!arch_spin_trylock_once(lp))
670199c4e6SThomas Gleixner 		arch_spin_lock_wait(lp);
68c6557e7fSMartin Schwidefsky }
69c6557e7fSMartin Schwidefsky 
arch_spin_trylock(arch_spinlock_t * lp)700199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *lp)
71c6557e7fSMartin Schwidefsky {
72bae8f567SMartin Schwidefsky 	if (!arch_spin_trylock_once(lp))
730199c4e6SThomas Gleixner 		return arch_spin_trylock_retry(lp);
745b3f683eSPhilipp Hachtmann 	return 1;
75c6557e7fSMartin Schwidefsky }
76c6557e7fSMartin Schwidefsky 
arch_spin_unlock(arch_spinlock_t * lp)770199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *lp)
78c6557e7fSMartin Schwidefsky {
7902c503ffSMartin Schwidefsky 	typecheck(int, lp->lock);
809a077317SIlya Leoshkevich 	kcsan_release();
81cceb0183SHeiko Carstens 	asm_inline volatile(
82*fad442d3SHeiko Carstens 		ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */
83b96f7d88SMartin Schwidefsky 		"	sth	%1,%0\n"
844f9abb7eSHeiko Carstens 		: "=R" (((unsigned short *) &lp->lock)[1])
85b96f7d88SMartin Schwidefsky 		: "d" (0) : "cc", "memory");
865b3f683eSPhilipp Hachtmann }
875b3f683eSPhilipp Hachtmann 
88c6557e7fSMartin Schwidefsky /*
89c6557e7fSMartin Schwidefsky  * Read-write spinlocks, allowing multiple readers
90c6557e7fSMartin Schwidefsky  * but only one writer.
91c6557e7fSMartin Schwidefsky  *
92c6557e7fSMartin Schwidefsky  * NOTE! it is quite common to have readers in interrupts
93c6557e7fSMartin Schwidefsky  * but no interrupt writers. For those circumstances we
94c6557e7fSMartin Schwidefsky  * can "mix" irq-safe locks - any writer needs to get a
95c6557e7fSMartin Schwidefsky  * irq-safe write-lock, but readers can get non-irqsafe
96c6557e7fSMartin Schwidefsky  * read-locks.
97c6557e7fSMartin Schwidefsky  */
98c6557e7fSMartin Schwidefsky 
99eb3b7b84SMartin Schwidefsky #define arch_read_relax(rw) barrier()
100eb3b7b84SMartin Schwidefsky #define arch_write_relax(rw) barrier()
1012684e73aSMartin Schwidefsky 
102eb3b7b84SMartin Schwidefsky void arch_read_lock_wait(arch_rwlock_t *lp);
103eb3b7b84SMartin Schwidefsky void arch_write_lock_wait(arch_rwlock_t *lp);
104bbae71bfSMartin Schwidefsky 
arch_read_lock(arch_rwlock_t * rw)105bbae71bfSMartin Schwidefsky static inline void arch_read_lock(arch_rwlock_t *rw)
106bbae71bfSMartin Schwidefsky {
10702c503ffSMartin Schwidefsky 	int old;
108bbae71bfSMartin Schwidefsky 
109eb3b7b84SMartin Schwidefsky 	old = __atomic_add(1, &rw->cnts);
110eb3b7b84SMartin Schwidefsky 	if (old & 0xffff0000)
111eb3b7b84SMartin Schwidefsky 		arch_read_lock_wait(rw);
112bbae71bfSMartin Schwidefsky }
113bbae71bfSMartin Schwidefsky 
arch_read_unlock(arch_rwlock_t * rw)114bbae71bfSMartin Schwidefsky static inline void arch_read_unlock(arch_rwlock_t *rw)
115bbae71bfSMartin Schwidefsky {
116eb3b7b84SMartin Schwidefsky 	__atomic_add_const_barrier(-1, &rw->cnts);
117bbae71bfSMartin Schwidefsky }
118bbae71bfSMartin Schwidefsky 
arch_write_lock(arch_rwlock_t * rw)119bbae71bfSMartin Schwidefsky static inline void arch_write_lock(arch_rwlock_t *rw)
120bbae71bfSMartin Schwidefsky {
121eb3b7b84SMartin Schwidefsky 	if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
122eb3b7b84SMartin Schwidefsky 		arch_write_lock_wait(rw);
123bbae71bfSMartin Schwidefsky }
124bbae71bfSMartin Schwidefsky 
arch_write_unlock(arch_rwlock_t * rw)125bbae71bfSMartin Schwidefsky static inline void arch_write_unlock(arch_rwlock_t *rw)
126bbae71bfSMartin Schwidefsky {
127eb3b7b84SMartin Schwidefsky 	__atomic_add_barrier(-0x30000, &rw->cnts);
128bbae71bfSMartin Schwidefsky }
129bbae71bfSMartin Schwidefsky 
130bbae71bfSMartin Schwidefsky 
arch_read_trylock(arch_rwlock_t * rw)131e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *rw)
132c6557e7fSMartin Schwidefsky {
133eb3b7b84SMartin Schwidefsky 	int old;
134eb3b7b84SMartin Schwidefsky 
135eb3b7b84SMartin Schwidefsky 	old = READ_ONCE(rw->cnts);
136eb3b7b84SMartin Schwidefsky 	return (!(old & 0xffff0000) &&
137eb3b7b84SMartin Schwidefsky 		__atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
138c6557e7fSMartin Schwidefsky }
139c6557e7fSMartin Schwidefsky 
arch_write_trylock(arch_rwlock_t * rw)140e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *rw)
141c6557e7fSMartin Schwidefsky {
142eb3b7b84SMartin Schwidefsky 	int old;
143c6557e7fSMartin Schwidefsky 
144eb3b7b84SMartin Schwidefsky 	old = READ_ONCE(rw->cnts);
145eb3b7b84SMartin Schwidefsky 	return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
146d59b93daSMartin Schwidefsky }
147c6557e7fSMartin Schwidefsky 
148c6557e7fSMartin Schwidefsky #endif /* __ASM_SPINLOCK_H */
149