1c6557e7fSMartin Schwidefsky /* 2c6557e7fSMartin Schwidefsky * S390 version 3a53c8fabSHeiko Carstens * Copyright IBM Corp. 1999 4c6557e7fSMartin Schwidefsky * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5c6557e7fSMartin Schwidefsky * 6c6557e7fSMartin Schwidefsky * Derived from "include/asm-i386/spinlock.h" 7c6557e7fSMartin Schwidefsky */ 8c6557e7fSMartin Schwidefsky 9c6557e7fSMartin Schwidefsky #ifndef __ASM_SPINLOCK_H 10c6557e7fSMartin Schwidefsky #define __ASM_SPINLOCK_H 11c6557e7fSMartin Schwidefsky 12c6557e7fSMartin Schwidefsky #include <linux/smp.h> 13726328d9SPeter Zijlstra #include <asm/barrier.h> 14726328d9SPeter Zijlstra #include <asm/processor.h> 15c6557e7fSMartin Schwidefsky 166c8cd5bbSPhilipp Hachtmann #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) 176c8cd5bbSPhilipp Hachtmann 18638ad34aSMartin Schwidefsky extern int spin_retry; 19638ad34aSMartin Schwidefsky 20c6557e7fSMartin Schwidefsky static inline int 215b3f683eSPhilipp Hachtmann _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) 22c6557e7fSMartin Schwidefsky { 23f318a122SMartin Schwidefsky return __sync_bool_compare_and_swap(lock, old, new); 24c6557e7fSMartin Schwidefsky } 25c6557e7fSMartin Schwidefsky 26760928c0SChristian Borntraeger #ifndef CONFIG_SMP 27760928c0SChristian Borntraeger static inline bool arch_vcpu_is_preempted(int cpu) { return false; } 28760928c0SChristian Borntraeger #else 29760928c0SChristian Borntraeger bool arch_vcpu_is_preempted(int cpu); 30760928c0SChristian Borntraeger #endif 31760928c0SChristian Borntraeger 32760928c0SChristian Borntraeger #define vcpu_is_preempted arch_vcpu_is_preempted 33760928c0SChristian Borntraeger 34c6557e7fSMartin Schwidefsky /* 35c6557e7fSMartin Schwidefsky * Simple spin lock operations. There are two variants, one clears IRQ's 36c6557e7fSMartin Schwidefsky * on the local processor, one does not. 37c6557e7fSMartin Schwidefsky * 38c6557e7fSMartin Schwidefsky * We make no fairness assumptions. They have a cost. 39c6557e7fSMartin Schwidefsky * 40c6557e7fSMartin Schwidefsky * (the type definitions are in asm/spinlock_types.h) 41c6557e7fSMartin Schwidefsky */ 42c6557e7fSMartin Schwidefsky 43d59b93daSMartin Schwidefsky void arch_lock_relax(unsigned int cpu); 44d59b93daSMartin Schwidefsky 455b3f683eSPhilipp Hachtmann void arch_spin_lock_wait(arch_spinlock_t *); 465b3f683eSPhilipp Hachtmann int arch_spin_trylock_retry(arch_spinlock_t *); 475b3f683eSPhilipp Hachtmann void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 48c6557e7fSMartin Schwidefsky 49d59b93daSMartin Schwidefsky static inline void arch_spin_relax(arch_spinlock_t *lock) 50d59b93daSMartin Schwidefsky { 51d59b93daSMartin Schwidefsky arch_lock_relax(lock->lock); 52d59b93daSMartin Schwidefsky } 53d59b93daSMartin Schwidefsky 546c8cd5bbSPhilipp Hachtmann static inline u32 arch_spin_lockval(int cpu) 556c8cd5bbSPhilipp Hachtmann { 566c8cd5bbSPhilipp Hachtmann return ~cpu; 576c8cd5bbSPhilipp Hachtmann } 586c8cd5bbSPhilipp Hachtmann 59efc1d23bSHeiko Carstens static inline int arch_spin_value_unlocked(arch_spinlock_t lock) 60efc1d23bSHeiko Carstens { 615b3f683eSPhilipp Hachtmann return lock.lock == 0; 625b3f683eSPhilipp Hachtmann } 635b3f683eSPhilipp Hachtmann 645b3f683eSPhilipp Hachtmann static inline int arch_spin_is_locked(arch_spinlock_t *lp) 655b3f683eSPhilipp Hachtmann { 665b3f683eSPhilipp Hachtmann return ACCESS_ONCE(lp->lock) != 0; 675b3f683eSPhilipp Hachtmann } 685b3f683eSPhilipp Hachtmann 695b3f683eSPhilipp Hachtmann static inline int arch_spin_trylock_once(arch_spinlock_t *lp) 705b3f683eSPhilipp Hachtmann { 71bae8f567SMartin Schwidefsky barrier(); 72bae8f567SMartin Schwidefsky return likely(arch_spin_value_unlocked(*lp) && 73bae8f567SMartin Schwidefsky _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); 745b3f683eSPhilipp Hachtmann } 755b3f683eSPhilipp Hachtmann 760199c4e6SThomas Gleixner static inline void arch_spin_lock(arch_spinlock_t *lp) 77c6557e7fSMartin Schwidefsky { 78bae8f567SMartin Schwidefsky if (!arch_spin_trylock_once(lp)) 790199c4e6SThomas Gleixner arch_spin_lock_wait(lp); 80c6557e7fSMartin Schwidefsky } 81c6557e7fSMartin Schwidefsky 820199c4e6SThomas Gleixner static inline void arch_spin_lock_flags(arch_spinlock_t *lp, 83c6557e7fSMartin Schwidefsky unsigned long flags) 84c6557e7fSMartin Schwidefsky { 85bae8f567SMartin Schwidefsky if (!arch_spin_trylock_once(lp)) 860199c4e6SThomas Gleixner arch_spin_lock_wait_flags(lp, flags); 87c6557e7fSMartin Schwidefsky } 88c6557e7fSMartin Schwidefsky 890199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *lp) 90c6557e7fSMartin Schwidefsky { 91bae8f567SMartin Schwidefsky if (!arch_spin_trylock_once(lp)) 920199c4e6SThomas Gleixner return arch_spin_trylock_retry(lp); 935b3f683eSPhilipp Hachtmann return 1; 94c6557e7fSMartin Schwidefsky } 95c6557e7fSMartin Schwidefsky 960199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *lp) 97c6557e7fSMartin Schwidefsky { 9844230282SHeiko Carstens typecheck(unsigned int, lp->lock); 9944230282SHeiko Carstens asm volatile( 10044230282SHeiko Carstens "st %1,%0\n" 10144230282SHeiko Carstens : "+Q" (lp->lock) 10244230282SHeiko Carstens : "d" (0) 10344230282SHeiko Carstens : "cc", "memory"); 1045b3f683eSPhilipp Hachtmann } 1055b3f683eSPhilipp Hachtmann 1065b3f683eSPhilipp Hachtmann static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 1075b3f683eSPhilipp Hachtmann { 1085b3f683eSPhilipp Hachtmann while (arch_spin_is_locked(lock)) 1095b3f683eSPhilipp Hachtmann arch_spin_relax(lock); 110726328d9SPeter Zijlstra smp_acquire__after_ctrl_dep(); 111c6557e7fSMartin Schwidefsky } 112c6557e7fSMartin Schwidefsky 113c6557e7fSMartin Schwidefsky /* 114c6557e7fSMartin Schwidefsky * Read-write spinlocks, allowing multiple readers 115c6557e7fSMartin Schwidefsky * but only one writer. 116c6557e7fSMartin Schwidefsky * 117c6557e7fSMartin Schwidefsky * NOTE! it is quite common to have readers in interrupts 118c6557e7fSMartin Schwidefsky * but no interrupt writers. For those circumstances we 119c6557e7fSMartin Schwidefsky * can "mix" irq-safe locks - any writer needs to get a 120c6557e7fSMartin Schwidefsky * irq-safe write-lock, but readers can get non-irqsafe 121c6557e7fSMartin Schwidefsky * read-locks. 122c6557e7fSMartin Schwidefsky */ 123c6557e7fSMartin Schwidefsky 124c6557e7fSMartin Schwidefsky /** 125c6557e7fSMartin Schwidefsky * read_can_lock - would read_trylock() succeed? 126c6557e7fSMartin Schwidefsky * @lock: the rwlock in question. 127c6557e7fSMartin Schwidefsky */ 128e5931943SThomas Gleixner #define arch_read_can_lock(x) ((int)(x)->lock >= 0) 129c6557e7fSMartin Schwidefsky 130c6557e7fSMartin Schwidefsky /** 131c6557e7fSMartin Schwidefsky * write_can_lock - would write_trylock() succeed? 132c6557e7fSMartin Schwidefsky * @lock: the rwlock in question. 133c6557e7fSMartin Schwidefsky */ 134e5931943SThomas Gleixner #define arch_write_can_lock(x) ((x)->lock == 0) 135c6557e7fSMartin Schwidefsky 1362684e73aSMartin Schwidefsky extern int _raw_read_trylock_retry(arch_rwlock_t *lp); 137fb3a6bbcSThomas Gleixner extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 138c6557e7fSMartin Schwidefsky 1392684e73aSMartin Schwidefsky #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 1402684e73aSMartin Schwidefsky #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 1412684e73aSMartin Schwidefsky 142bae8f567SMartin Schwidefsky static inline int arch_read_trylock_once(arch_rwlock_t *rw) 143bae8f567SMartin Schwidefsky { 144bae8f567SMartin Schwidefsky unsigned int old = ACCESS_ONCE(rw->lock); 145bae8f567SMartin Schwidefsky return likely((int) old >= 0 && 146bae8f567SMartin Schwidefsky _raw_compare_and_swap(&rw->lock, old, old + 1)); 147bae8f567SMartin Schwidefsky } 148bae8f567SMartin Schwidefsky 149bae8f567SMartin Schwidefsky static inline int arch_write_trylock_once(arch_rwlock_t *rw) 150bae8f567SMartin Schwidefsky { 151bae8f567SMartin Schwidefsky unsigned int old = ACCESS_ONCE(rw->lock); 152bae8f567SMartin Schwidefsky return likely(old == 0 && 153bae8f567SMartin Schwidefsky _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); 154bae8f567SMartin Schwidefsky } 155bae8f567SMartin Schwidefsky 156bbae71bfSMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 157bbae71bfSMartin Schwidefsky 158bbae71bfSMartin Schwidefsky #define __RAW_OP_OR "lao" 159bbae71bfSMartin Schwidefsky #define __RAW_OP_AND "lan" 160bbae71bfSMartin Schwidefsky #define __RAW_OP_ADD "laa" 161bbae71bfSMartin Schwidefsky 162bbae71bfSMartin Schwidefsky #define __RAW_LOCK(ptr, op_val, op_string) \ 163bbae71bfSMartin Schwidefsky ({ \ 164bbae71bfSMartin Schwidefsky unsigned int old_val; \ 165bbae71bfSMartin Schwidefsky \ 166bbae71bfSMartin Schwidefsky typecheck(unsigned int *, ptr); \ 167bbae71bfSMartin Schwidefsky asm volatile( \ 168bbae71bfSMartin Schwidefsky op_string " %0,%2,%1\n" \ 169bbae71bfSMartin Schwidefsky "bcr 14,0\n" \ 170bbae71bfSMartin Schwidefsky : "=d" (old_val), "+Q" (*ptr) \ 171bbae71bfSMartin Schwidefsky : "d" (op_val) \ 172bbae71bfSMartin Schwidefsky : "cc", "memory"); \ 173bbae71bfSMartin Schwidefsky old_val; \ 174bbae71bfSMartin Schwidefsky }) 175bbae71bfSMartin Schwidefsky 176bbae71bfSMartin Schwidefsky #define __RAW_UNLOCK(ptr, op_val, op_string) \ 177bbae71bfSMartin Schwidefsky ({ \ 178bbae71bfSMartin Schwidefsky unsigned int old_val; \ 179bbae71bfSMartin Schwidefsky \ 180bbae71bfSMartin Schwidefsky typecheck(unsigned int *, ptr); \ 181bbae71bfSMartin Schwidefsky asm volatile( \ 182bbae71bfSMartin Schwidefsky op_string " %0,%2,%1\n" \ 183bbae71bfSMartin Schwidefsky : "=d" (old_val), "+Q" (*ptr) \ 184bbae71bfSMartin Schwidefsky : "d" (op_val) \ 185bbae71bfSMartin Schwidefsky : "cc", "memory"); \ 186bbae71bfSMartin Schwidefsky old_val; \ 187bbae71bfSMartin Schwidefsky }) 188bbae71bfSMartin Schwidefsky 189bbae71bfSMartin Schwidefsky extern void _raw_read_lock_wait(arch_rwlock_t *lp); 190bbae71bfSMartin Schwidefsky extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev); 191bbae71bfSMartin Schwidefsky 192bbae71bfSMartin Schwidefsky static inline void arch_read_lock(arch_rwlock_t *rw) 193bbae71bfSMartin Schwidefsky { 194bbae71bfSMartin Schwidefsky unsigned int old; 195bbae71bfSMartin Schwidefsky 196bbae71bfSMartin Schwidefsky old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); 197bbae71bfSMartin Schwidefsky if ((int) old < 0) 198bbae71bfSMartin Schwidefsky _raw_read_lock_wait(rw); 199bbae71bfSMartin Schwidefsky } 200bbae71bfSMartin Schwidefsky 201bbae71bfSMartin Schwidefsky static inline void arch_read_unlock(arch_rwlock_t *rw) 202bbae71bfSMartin Schwidefsky { 203bbae71bfSMartin Schwidefsky __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD); 204bbae71bfSMartin Schwidefsky } 205bbae71bfSMartin Schwidefsky 206bbae71bfSMartin Schwidefsky static inline void arch_write_lock(arch_rwlock_t *rw) 207bbae71bfSMartin Schwidefsky { 208bbae71bfSMartin Schwidefsky unsigned int old; 209bbae71bfSMartin Schwidefsky 210bbae71bfSMartin Schwidefsky old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); 211bbae71bfSMartin Schwidefsky if (old != 0) 212bbae71bfSMartin Schwidefsky _raw_write_lock_wait(rw, old); 213bbae71bfSMartin Schwidefsky rw->owner = SPINLOCK_LOCKVAL; 214bbae71bfSMartin Schwidefsky } 215bbae71bfSMartin Schwidefsky 216bbae71bfSMartin Schwidefsky static inline void arch_write_unlock(arch_rwlock_t *rw) 217bbae71bfSMartin Schwidefsky { 218bbae71bfSMartin Schwidefsky rw->owner = 0; 219bbae71bfSMartin Schwidefsky __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND); 220bbae71bfSMartin Schwidefsky } 221bbae71bfSMartin Schwidefsky 222bbae71bfSMartin Schwidefsky #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 223bbae71bfSMartin Schwidefsky 224bbae71bfSMartin Schwidefsky extern void _raw_read_lock_wait(arch_rwlock_t *lp); 225bbae71bfSMartin Schwidefsky extern void _raw_write_lock_wait(arch_rwlock_t *lp); 226bbae71bfSMartin Schwidefsky 227e5931943SThomas Gleixner static inline void arch_read_lock(arch_rwlock_t *rw) 228c6557e7fSMartin Schwidefsky { 229bae8f567SMartin Schwidefsky if (!arch_read_trylock_once(rw)) 230c6557e7fSMartin Schwidefsky _raw_read_lock_wait(rw); 231c6557e7fSMartin Schwidefsky } 232c6557e7fSMartin Schwidefsky 233e5931943SThomas Gleixner static inline void arch_read_unlock(arch_rwlock_t *rw) 234c6557e7fSMartin Schwidefsky { 2355b3f683eSPhilipp Hachtmann unsigned int old; 236c6557e7fSMartin Schwidefsky 237c6557e7fSMartin Schwidefsky do { 2385b3f683eSPhilipp Hachtmann old = ACCESS_ONCE(rw->lock); 2395b3f683eSPhilipp Hachtmann } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); 240c6557e7fSMartin Schwidefsky } 241c6557e7fSMartin Schwidefsky 242e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw) 243c6557e7fSMartin Schwidefsky { 244bae8f567SMartin Schwidefsky if (!arch_write_trylock_once(rw)) 245c6557e7fSMartin Schwidefsky _raw_write_lock_wait(rw); 246d59b93daSMartin Schwidefsky rw->owner = SPINLOCK_LOCKVAL; 247c6557e7fSMartin Schwidefsky } 248c6557e7fSMartin Schwidefsky 249e5931943SThomas Gleixner static inline void arch_write_unlock(arch_rwlock_t *rw) 250c6557e7fSMartin Schwidefsky { 25144230282SHeiko Carstens typecheck(unsigned int, rw->lock); 252d59b93daSMartin Schwidefsky 253d59b93daSMartin Schwidefsky rw->owner = 0; 25444230282SHeiko Carstens asm volatile( 25544230282SHeiko Carstens "st %1,%0\n" 25644230282SHeiko Carstens : "+Q" (rw->lock) 25744230282SHeiko Carstens : "d" (0) 25844230282SHeiko Carstens : "cc", "memory"); 259c6557e7fSMartin Schwidefsky } 260c6557e7fSMartin Schwidefsky 261bbae71bfSMartin Schwidefsky #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 262bbae71bfSMartin Schwidefsky 263e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *rw) 264c6557e7fSMartin Schwidefsky { 265bae8f567SMartin Schwidefsky if (!arch_read_trylock_once(rw)) 266c6557e7fSMartin Schwidefsky return _raw_read_trylock_retry(rw); 267bae8f567SMartin Schwidefsky return 1; 268c6557e7fSMartin Schwidefsky } 269c6557e7fSMartin Schwidefsky 270e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *rw) 271c6557e7fSMartin Schwidefsky { 272d59b93daSMartin Schwidefsky if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw)) 273d59b93daSMartin Schwidefsky return 0; 274d59b93daSMartin Schwidefsky rw->owner = SPINLOCK_LOCKVAL; 275bae8f567SMartin Schwidefsky return 1; 276c6557e7fSMartin Schwidefsky } 277c6557e7fSMartin Schwidefsky 278d59b93daSMartin Schwidefsky static inline void arch_read_relax(arch_rwlock_t *rw) 279d59b93daSMartin Schwidefsky { 280d59b93daSMartin Schwidefsky arch_lock_relax(rw->owner); 281d59b93daSMartin Schwidefsky } 282d59b93daSMartin Schwidefsky 283d59b93daSMartin Schwidefsky static inline void arch_write_relax(arch_rwlock_t *rw) 284d59b93daSMartin Schwidefsky { 285d59b93daSMartin Schwidefsky arch_lock_relax(rw->owner); 286d59b93daSMartin Schwidefsky } 287c6557e7fSMartin Schwidefsky 288c6557e7fSMartin Schwidefsky #endif /* __ASM_SPINLOCK_H */ 289