1c6557e7fSMartin Schwidefsky /* 2c6557e7fSMartin Schwidefsky * S390 version 3a53c8fabSHeiko Carstens * Copyright IBM Corp. 1999 4c6557e7fSMartin Schwidefsky * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5c6557e7fSMartin Schwidefsky * 6c6557e7fSMartin Schwidefsky * Derived from "include/asm-i386/spinlock.h" 7c6557e7fSMartin Schwidefsky */ 8c6557e7fSMartin Schwidefsky 9c6557e7fSMartin Schwidefsky #ifndef __ASM_SPINLOCK_H 10c6557e7fSMartin Schwidefsky #define __ASM_SPINLOCK_H 11c6557e7fSMartin Schwidefsky 12c6557e7fSMartin Schwidefsky #include <linux/smp.h> 1302c503ffSMartin Schwidefsky #include <asm/atomic_ops.h> 14726328d9SPeter Zijlstra #include <asm/barrier.h> 15726328d9SPeter Zijlstra #include <asm/processor.h> 16c6557e7fSMartin Schwidefsky 176c8cd5bbSPhilipp Hachtmann #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) 186c8cd5bbSPhilipp Hachtmann 19638ad34aSMartin Schwidefsky extern int spin_retry; 20638ad34aSMartin Schwidefsky 21760928c0SChristian Borntraeger #ifndef CONFIG_SMP 22760928c0SChristian Borntraeger static inline bool arch_vcpu_is_preempted(int cpu) { return false; } 23760928c0SChristian Borntraeger #else 24760928c0SChristian Borntraeger bool arch_vcpu_is_preempted(int cpu); 25760928c0SChristian Borntraeger #endif 26760928c0SChristian Borntraeger 27760928c0SChristian Borntraeger #define vcpu_is_preempted arch_vcpu_is_preempted 28760928c0SChristian Borntraeger 29c6557e7fSMartin Schwidefsky /* 30c6557e7fSMartin Schwidefsky * Simple spin lock operations. There are two variants, one clears IRQ's 31c6557e7fSMartin Schwidefsky * on the local processor, one does not. 32c6557e7fSMartin Schwidefsky * 33c6557e7fSMartin Schwidefsky * We make no fairness assumptions. They have a cost. 34c6557e7fSMartin Schwidefsky * 35c6557e7fSMartin Schwidefsky * (the type definitions are in asm/spinlock_types.h) 36c6557e7fSMartin Schwidefsky */ 37c6557e7fSMartin Schwidefsky 3802c503ffSMartin Schwidefsky void arch_lock_relax(int cpu); 39d59b93daSMartin Schwidefsky 405b3f683eSPhilipp Hachtmann void arch_spin_lock_wait(arch_spinlock_t *); 415b3f683eSPhilipp Hachtmann int arch_spin_trylock_retry(arch_spinlock_t *); 425b3f683eSPhilipp Hachtmann void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 43c6557e7fSMartin Schwidefsky 44d59b93daSMartin Schwidefsky static inline void arch_spin_relax(arch_spinlock_t *lock) 45d59b93daSMartin Schwidefsky { 46d59b93daSMartin Schwidefsky arch_lock_relax(lock->lock); 47d59b93daSMartin Schwidefsky } 48d59b93daSMartin Schwidefsky 496c8cd5bbSPhilipp Hachtmann static inline u32 arch_spin_lockval(int cpu) 506c8cd5bbSPhilipp Hachtmann { 516c8cd5bbSPhilipp Hachtmann return ~cpu; 526c8cd5bbSPhilipp Hachtmann } 536c8cd5bbSPhilipp Hachtmann 54efc1d23bSHeiko Carstens static inline int arch_spin_value_unlocked(arch_spinlock_t lock) 55efc1d23bSHeiko Carstens { 565b3f683eSPhilipp Hachtmann return lock.lock == 0; 575b3f683eSPhilipp Hachtmann } 585b3f683eSPhilipp Hachtmann 595b3f683eSPhilipp Hachtmann static inline int arch_spin_is_locked(arch_spinlock_t *lp) 605b3f683eSPhilipp Hachtmann { 61187b5f41SChristian Borntraeger return READ_ONCE(lp->lock) != 0; 625b3f683eSPhilipp Hachtmann } 635b3f683eSPhilipp Hachtmann 645b3f683eSPhilipp Hachtmann static inline int arch_spin_trylock_once(arch_spinlock_t *lp) 655b3f683eSPhilipp Hachtmann { 66bae8f567SMartin Schwidefsky barrier(); 67bae8f567SMartin Schwidefsky return likely(arch_spin_value_unlocked(*lp) && 6802c503ffSMartin Schwidefsky __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL)); 695b3f683eSPhilipp Hachtmann } 705b3f683eSPhilipp Hachtmann 710199c4e6SThomas Gleixner static inline void arch_spin_lock(arch_spinlock_t *lp) 72c6557e7fSMartin Schwidefsky { 73bae8f567SMartin Schwidefsky if (!arch_spin_trylock_once(lp)) 740199c4e6SThomas Gleixner arch_spin_lock_wait(lp); 75c6557e7fSMartin Schwidefsky } 76c6557e7fSMartin Schwidefsky 770199c4e6SThomas Gleixner static inline void arch_spin_lock_flags(arch_spinlock_t *lp, 78c6557e7fSMartin Schwidefsky unsigned long flags) 79c6557e7fSMartin Schwidefsky { 80bae8f567SMartin Schwidefsky if (!arch_spin_trylock_once(lp)) 810199c4e6SThomas Gleixner arch_spin_lock_wait_flags(lp, flags); 82c6557e7fSMartin Schwidefsky } 83c6557e7fSMartin Schwidefsky 840199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *lp) 85c6557e7fSMartin Schwidefsky { 86bae8f567SMartin Schwidefsky if (!arch_spin_trylock_once(lp)) 870199c4e6SThomas Gleixner return arch_spin_trylock_retry(lp); 885b3f683eSPhilipp Hachtmann return 1; 89c6557e7fSMartin Schwidefsky } 90c6557e7fSMartin Schwidefsky 910199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *lp) 92c6557e7fSMartin Schwidefsky { 9302c503ffSMartin Schwidefsky typecheck(int, lp->lock); 9444230282SHeiko Carstens asm volatile( 957f7e6e28SMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES 967f7e6e28SMartin Schwidefsky " .long 0xb2fa0070\n" /* NIAI 7 */ 977f7e6e28SMartin Schwidefsky #endif 9844230282SHeiko Carstens " st %1,%0\n" 997f7e6e28SMartin Schwidefsky : "=Q" (lp->lock) : "d" (0) : "cc", "memory"); 1005b3f683eSPhilipp Hachtmann } 1015b3f683eSPhilipp Hachtmann 1025b3f683eSPhilipp Hachtmann static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 1035b3f683eSPhilipp Hachtmann { 1045b3f683eSPhilipp Hachtmann while (arch_spin_is_locked(lock)) 1055b3f683eSPhilipp Hachtmann arch_spin_relax(lock); 106726328d9SPeter Zijlstra smp_acquire__after_ctrl_dep(); 107c6557e7fSMartin Schwidefsky } 108c6557e7fSMartin Schwidefsky 109c6557e7fSMartin Schwidefsky /* 110c6557e7fSMartin Schwidefsky * Read-write spinlocks, allowing multiple readers 111c6557e7fSMartin Schwidefsky * but only one writer. 112c6557e7fSMartin Schwidefsky * 113c6557e7fSMartin Schwidefsky * NOTE! it is quite common to have readers in interrupts 114c6557e7fSMartin Schwidefsky * but no interrupt writers. For those circumstances we 115c6557e7fSMartin Schwidefsky * can "mix" irq-safe locks - any writer needs to get a 116c6557e7fSMartin Schwidefsky * irq-safe write-lock, but readers can get non-irqsafe 117c6557e7fSMartin Schwidefsky * read-locks. 118c6557e7fSMartin Schwidefsky */ 119c6557e7fSMartin Schwidefsky 120c6557e7fSMartin Schwidefsky /** 121c6557e7fSMartin Schwidefsky * read_can_lock - would read_trylock() succeed? 122c6557e7fSMartin Schwidefsky * @lock: the rwlock in question. 123c6557e7fSMartin Schwidefsky */ 124e5931943SThomas Gleixner #define arch_read_can_lock(x) ((int)(x)->lock >= 0) 125c6557e7fSMartin Schwidefsky 126c6557e7fSMartin Schwidefsky /** 127c6557e7fSMartin Schwidefsky * write_can_lock - would write_trylock() succeed? 128c6557e7fSMartin Schwidefsky * @lock: the rwlock in question. 129c6557e7fSMartin Schwidefsky */ 130e5931943SThomas Gleixner #define arch_write_can_lock(x) ((x)->lock == 0) 131c6557e7fSMartin Schwidefsky 1322684e73aSMartin Schwidefsky extern int _raw_read_trylock_retry(arch_rwlock_t *lp); 133fb3a6bbcSThomas Gleixner extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 134c6557e7fSMartin Schwidefsky 1352684e73aSMartin Schwidefsky #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 1362684e73aSMartin Schwidefsky #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 1372684e73aSMartin Schwidefsky 138bae8f567SMartin Schwidefsky static inline int arch_read_trylock_once(arch_rwlock_t *rw) 139bae8f567SMartin Schwidefsky { 14002c503ffSMartin Schwidefsky int old = ACCESS_ONCE(rw->lock); 14102c503ffSMartin Schwidefsky return likely(old >= 0 && 14202c503ffSMartin Schwidefsky __atomic_cmpxchg_bool(&rw->lock, old, old + 1)); 143bae8f567SMartin Schwidefsky } 144bae8f567SMartin Schwidefsky 145bae8f567SMartin Schwidefsky static inline int arch_write_trylock_once(arch_rwlock_t *rw) 146bae8f567SMartin Schwidefsky { 14702c503ffSMartin Schwidefsky int old = ACCESS_ONCE(rw->lock); 148bae8f567SMartin Schwidefsky return likely(old == 0 && 14902c503ffSMartin Schwidefsky __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)); 150bae8f567SMartin Schwidefsky } 151bae8f567SMartin Schwidefsky 152bbae71bfSMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 153bbae71bfSMartin Schwidefsky 154bbae71bfSMartin Schwidefsky #define __RAW_OP_OR "lao" 155bbae71bfSMartin Schwidefsky #define __RAW_OP_AND "lan" 156bbae71bfSMartin Schwidefsky #define __RAW_OP_ADD "laa" 157bbae71bfSMartin Schwidefsky 158bbae71bfSMartin Schwidefsky #define __RAW_LOCK(ptr, op_val, op_string) \ 159bbae71bfSMartin Schwidefsky ({ \ 16002c503ffSMartin Schwidefsky int old_val; \ 161bbae71bfSMartin Schwidefsky \ 16202c503ffSMartin Schwidefsky typecheck(int *, ptr); \ 163bbae71bfSMartin Schwidefsky asm volatile( \ 164bbae71bfSMartin Schwidefsky op_string " %0,%2,%1\n" \ 165bbae71bfSMartin Schwidefsky "bcr 14,0\n" \ 166bbae71bfSMartin Schwidefsky : "=d" (old_val), "+Q" (*ptr) \ 167bbae71bfSMartin Schwidefsky : "d" (op_val) \ 168bbae71bfSMartin Schwidefsky : "cc", "memory"); \ 169bbae71bfSMartin Schwidefsky old_val; \ 170bbae71bfSMartin Schwidefsky }) 171bbae71bfSMartin Schwidefsky 172bbae71bfSMartin Schwidefsky #define __RAW_UNLOCK(ptr, op_val, op_string) \ 173bbae71bfSMartin Schwidefsky ({ \ 17402c503ffSMartin Schwidefsky int old_val; \ 175bbae71bfSMartin Schwidefsky \ 17602c503ffSMartin Schwidefsky typecheck(int *, ptr); \ 177bbae71bfSMartin Schwidefsky asm volatile( \ 178bbae71bfSMartin Schwidefsky op_string " %0,%2,%1\n" \ 179bbae71bfSMartin Schwidefsky : "=d" (old_val), "+Q" (*ptr) \ 180bbae71bfSMartin Schwidefsky : "d" (op_val) \ 181bbae71bfSMartin Schwidefsky : "cc", "memory"); \ 182bbae71bfSMartin Schwidefsky old_val; \ 183bbae71bfSMartin Schwidefsky }) 184bbae71bfSMartin Schwidefsky 185bbae71bfSMartin Schwidefsky extern void _raw_read_lock_wait(arch_rwlock_t *lp); 18602c503ffSMartin Schwidefsky extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev); 187bbae71bfSMartin Schwidefsky 188bbae71bfSMartin Schwidefsky static inline void arch_read_lock(arch_rwlock_t *rw) 189bbae71bfSMartin Schwidefsky { 19002c503ffSMartin Schwidefsky int old; 191bbae71bfSMartin Schwidefsky 192bbae71bfSMartin Schwidefsky old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); 19302c503ffSMartin Schwidefsky if (old < 0) 194bbae71bfSMartin Schwidefsky _raw_read_lock_wait(rw); 195bbae71bfSMartin Schwidefsky } 196bbae71bfSMartin Schwidefsky 197bbae71bfSMartin Schwidefsky static inline void arch_read_unlock(arch_rwlock_t *rw) 198bbae71bfSMartin Schwidefsky { 199bbae71bfSMartin Schwidefsky __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD); 200bbae71bfSMartin Schwidefsky } 201bbae71bfSMartin Schwidefsky 202bbae71bfSMartin Schwidefsky static inline void arch_write_lock(arch_rwlock_t *rw) 203bbae71bfSMartin Schwidefsky { 20402c503ffSMartin Schwidefsky int old; 205bbae71bfSMartin Schwidefsky 206bbae71bfSMartin Schwidefsky old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); 207bbae71bfSMartin Schwidefsky if (old != 0) 208bbae71bfSMartin Schwidefsky _raw_write_lock_wait(rw, old); 209bbae71bfSMartin Schwidefsky rw->owner = SPINLOCK_LOCKVAL; 210bbae71bfSMartin Schwidefsky } 211bbae71bfSMartin Schwidefsky 212bbae71bfSMartin Schwidefsky static inline void arch_write_unlock(arch_rwlock_t *rw) 213bbae71bfSMartin Schwidefsky { 214bbae71bfSMartin Schwidefsky rw->owner = 0; 215bbae71bfSMartin Schwidefsky __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND); 216bbae71bfSMartin Schwidefsky } 217bbae71bfSMartin Schwidefsky 218bbae71bfSMartin Schwidefsky #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 219bbae71bfSMartin Schwidefsky 220bbae71bfSMartin Schwidefsky extern void _raw_read_lock_wait(arch_rwlock_t *lp); 221bbae71bfSMartin Schwidefsky extern void _raw_write_lock_wait(arch_rwlock_t *lp); 222bbae71bfSMartin Schwidefsky 223e5931943SThomas Gleixner static inline void arch_read_lock(arch_rwlock_t *rw) 224c6557e7fSMartin Schwidefsky { 225bae8f567SMartin Schwidefsky if (!arch_read_trylock_once(rw)) 226c6557e7fSMartin Schwidefsky _raw_read_lock_wait(rw); 227c6557e7fSMartin Schwidefsky } 228c6557e7fSMartin Schwidefsky 229e5931943SThomas Gleixner static inline void arch_read_unlock(arch_rwlock_t *rw) 230c6557e7fSMartin Schwidefsky { 23102c503ffSMartin Schwidefsky int old; 232c6557e7fSMartin Schwidefsky 233c6557e7fSMartin Schwidefsky do { 2345b3f683eSPhilipp Hachtmann old = ACCESS_ONCE(rw->lock); 23502c503ffSMartin Schwidefsky } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1)); 236c6557e7fSMartin Schwidefsky } 237c6557e7fSMartin Schwidefsky 238e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw) 239c6557e7fSMartin Schwidefsky { 240bae8f567SMartin Schwidefsky if (!arch_write_trylock_once(rw)) 241c6557e7fSMartin Schwidefsky _raw_write_lock_wait(rw); 242d59b93daSMartin Schwidefsky rw->owner = SPINLOCK_LOCKVAL; 243c6557e7fSMartin Schwidefsky } 244c6557e7fSMartin Schwidefsky 245e5931943SThomas Gleixner static inline void arch_write_unlock(arch_rwlock_t *rw) 246c6557e7fSMartin Schwidefsky { 24702c503ffSMartin Schwidefsky typecheck(int, rw->lock); 248d59b93daSMartin Schwidefsky 249d59b93daSMartin Schwidefsky rw->owner = 0; 25044230282SHeiko Carstens asm volatile( 25144230282SHeiko Carstens "st %1,%0\n" 25244230282SHeiko Carstens : "+Q" (rw->lock) 25344230282SHeiko Carstens : "d" (0) 25444230282SHeiko Carstens : "cc", "memory"); 255c6557e7fSMartin Schwidefsky } 256c6557e7fSMartin Schwidefsky 257bbae71bfSMartin Schwidefsky #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 258bbae71bfSMartin Schwidefsky 259e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *rw) 260c6557e7fSMartin Schwidefsky { 261bae8f567SMartin Schwidefsky if (!arch_read_trylock_once(rw)) 262c6557e7fSMartin Schwidefsky return _raw_read_trylock_retry(rw); 263bae8f567SMartin Schwidefsky return 1; 264c6557e7fSMartin Schwidefsky } 265c6557e7fSMartin Schwidefsky 266e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *rw) 267c6557e7fSMartin Schwidefsky { 268d59b93daSMartin Schwidefsky if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw)) 269d59b93daSMartin Schwidefsky return 0; 270d59b93daSMartin Schwidefsky rw->owner = SPINLOCK_LOCKVAL; 271bae8f567SMartin Schwidefsky return 1; 272c6557e7fSMartin Schwidefsky } 273c6557e7fSMartin Schwidefsky 274d59b93daSMartin Schwidefsky static inline void arch_read_relax(arch_rwlock_t *rw) 275d59b93daSMartin Schwidefsky { 276d59b93daSMartin Schwidefsky arch_lock_relax(rw->owner); 277d59b93daSMartin Schwidefsky } 278d59b93daSMartin Schwidefsky 279d59b93daSMartin Schwidefsky static inline void arch_write_relax(arch_rwlock_t *rw) 280d59b93daSMartin Schwidefsky { 281d59b93daSMartin Schwidefsky arch_lock_relax(rw->owner); 282d59b93daSMartin Schwidefsky } 283c6557e7fSMartin Schwidefsky 284c6557e7fSMartin Schwidefsky #endif /* __ASM_SPINLOCK_H */ 285