1c6557e7fSMartin Schwidefsky /* 2c6557e7fSMartin Schwidefsky * S390 version 3a53c8fabSHeiko Carstens * Copyright IBM Corp. 1999 4c6557e7fSMartin Schwidefsky * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5c6557e7fSMartin Schwidefsky * 6c6557e7fSMartin Schwidefsky * Derived from "include/asm-i386/spinlock.h" 7c6557e7fSMartin Schwidefsky */ 8c6557e7fSMartin Schwidefsky 9c6557e7fSMartin Schwidefsky #ifndef __ASM_SPINLOCK_H 10c6557e7fSMartin Schwidefsky #define __ASM_SPINLOCK_H 11c6557e7fSMartin Schwidefsky 12c6557e7fSMartin Schwidefsky #include <linux/smp.h> 13c6557e7fSMartin Schwidefsky 146c8cd5bbSPhilipp Hachtmann #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) 156c8cd5bbSPhilipp Hachtmann 16638ad34aSMartin Schwidefsky extern int spin_retry; 17638ad34aSMartin Schwidefsky 18c6557e7fSMartin Schwidefsky static inline int 195b3f683eSPhilipp Hachtmann _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) 20c6557e7fSMartin Schwidefsky { 215b3f683eSPhilipp Hachtmann unsigned int old_expected = old; 225b3f683eSPhilipp Hachtmann 23c6557e7fSMartin Schwidefsky asm volatile( 24c6557e7fSMartin Schwidefsky " cs %0,%3,%1" 25c6557e7fSMartin Schwidefsky : "=d" (old), "=Q" (*lock) 26c6557e7fSMartin Schwidefsky : "0" (old), "d" (new), "Q" (*lock) 27c6557e7fSMartin Schwidefsky : "cc", "memory" ); 285b3f683eSPhilipp Hachtmann return old == old_expected; 29c6557e7fSMartin Schwidefsky } 30c6557e7fSMartin Schwidefsky 31c6557e7fSMartin Schwidefsky /* 32c6557e7fSMartin Schwidefsky * Simple spin lock operations. There are two variants, one clears IRQ's 33c6557e7fSMartin Schwidefsky * on the local processor, one does not. 34c6557e7fSMartin Schwidefsky * 35c6557e7fSMartin Schwidefsky * We make no fairness assumptions. They have a cost. 36c6557e7fSMartin Schwidefsky * 37c6557e7fSMartin Schwidefsky * (the type definitions are in asm/spinlock_types.h) 38c6557e7fSMartin Schwidefsky */ 39c6557e7fSMartin Schwidefsky 405b3f683eSPhilipp Hachtmann void arch_spin_lock_wait(arch_spinlock_t *); 415b3f683eSPhilipp Hachtmann int arch_spin_trylock_retry(arch_spinlock_t *); 425b3f683eSPhilipp Hachtmann void arch_spin_relax(arch_spinlock_t *); 435b3f683eSPhilipp Hachtmann void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 44c6557e7fSMartin Schwidefsky 456c8cd5bbSPhilipp Hachtmann static inline u32 arch_spin_lockval(int cpu) 466c8cd5bbSPhilipp Hachtmann { 476c8cd5bbSPhilipp Hachtmann return ~cpu; 486c8cd5bbSPhilipp Hachtmann } 496c8cd5bbSPhilipp Hachtmann 50efc1d23bSHeiko Carstens static inline int arch_spin_value_unlocked(arch_spinlock_t lock) 51efc1d23bSHeiko Carstens { 525b3f683eSPhilipp Hachtmann return lock.lock == 0; 535b3f683eSPhilipp Hachtmann } 545b3f683eSPhilipp Hachtmann 555b3f683eSPhilipp Hachtmann static inline int arch_spin_is_locked(arch_spinlock_t *lp) 565b3f683eSPhilipp Hachtmann { 575b3f683eSPhilipp Hachtmann return ACCESS_ONCE(lp->lock) != 0; 585b3f683eSPhilipp Hachtmann } 595b3f683eSPhilipp Hachtmann 605b3f683eSPhilipp Hachtmann static inline int arch_spin_trylock_once(arch_spinlock_t *lp) 615b3f683eSPhilipp Hachtmann { 626c8cd5bbSPhilipp Hachtmann return _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL); 635b3f683eSPhilipp Hachtmann } 645b3f683eSPhilipp Hachtmann 655b3f683eSPhilipp Hachtmann static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp) 665b3f683eSPhilipp Hachtmann { 676c8cd5bbSPhilipp Hachtmann return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0); 68efc1d23bSHeiko Carstens } 69efc1d23bSHeiko Carstens 700199c4e6SThomas Gleixner static inline void arch_spin_lock(arch_spinlock_t *lp) 71c6557e7fSMartin Schwidefsky { 725b3f683eSPhilipp Hachtmann if (unlikely(!arch_spin_trylock_once(lp))) 730199c4e6SThomas Gleixner arch_spin_lock_wait(lp); 74c6557e7fSMartin Schwidefsky } 75c6557e7fSMartin Schwidefsky 760199c4e6SThomas Gleixner static inline void arch_spin_lock_flags(arch_spinlock_t *lp, 77c6557e7fSMartin Schwidefsky unsigned long flags) 78c6557e7fSMartin Schwidefsky { 795b3f683eSPhilipp Hachtmann if (unlikely(!arch_spin_trylock_once(lp))) 800199c4e6SThomas Gleixner arch_spin_lock_wait_flags(lp, flags); 81c6557e7fSMartin Schwidefsky } 82c6557e7fSMartin Schwidefsky 830199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *lp) 84c6557e7fSMartin Schwidefsky { 855b3f683eSPhilipp Hachtmann if (unlikely(!arch_spin_trylock_once(lp))) 860199c4e6SThomas Gleixner return arch_spin_trylock_retry(lp); 875b3f683eSPhilipp Hachtmann return 1; 88c6557e7fSMartin Schwidefsky } 89c6557e7fSMartin Schwidefsky 900199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *lp) 91c6557e7fSMartin Schwidefsky { 925b3f683eSPhilipp Hachtmann arch_spin_tryrelease_once(lp); 935b3f683eSPhilipp Hachtmann } 945b3f683eSPhilipp Hachtmann 955b3f683eSPhilipp Hachtmann static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 965b3f683eSPhilipp Hachtmann { 975b3f683eSPhilipp Hachtmann while (arch_spin_is_locked(lock)) 985b3f683eSPhilipp Hachtmann arch_spin_relax(lock); 99c6557e7fSMartin Schwidefsky } 100c6557e7fSMartin Schwidefsky 101c6557e7fSMartin Schwidefsky /* 102c6557e7fSMartin Schwidefsky * Read-write spinlocks, allowing multiple readers 103c6557e7fSMartin Schwidefsky * but only one writer. 104c6557e7fSMartin Schwidefsky * 105c6557e7fSMartin Schwidefsky * NOTE! it is quite common to have readers in interrupts 106c6557e7fSMartin Schwidefsky * but no interrupt writers. For those circumstances we 107c6557e7fSMartin Schwidefsky * can "mix" irq-safe locks - any writer needs to get a 108c6557e7fSMartin Schwidefsky * irq-safe write-lock, but readers can get non-irqsafe 109c6557e7fSMartin Schwidefsky * read-locks. 110c6557e7fSMartin Schwidefsky */ 111c6557e7fSMartin Schwidefsky 112c6557e7fSMartin Schwidefsky /** 113c6557e7fSMartin Schwidefsky * read_can_lock - would read_trylock() succeed? 114c6557e7fSMartin Schwidefsky * @lock: the rwlock in question. 115c6557e7fSMartin Schwidefsky */ 116e5931943SThomas Gleixner #define arch_read_can_lock(x) ((int)(x)->lock >= 0) 117c6557e7fSMartin Schwidefsky 118c6557e7fSMartin Schwidefsky /** 119c6557e7fSMartin Schwidefsky * write_can_lock - would write_trylock() succeed? 120c6557e7fSMartin Schwidefsky * @lock: the rwlock in question. 121c6557e7fSMartin Schwidefsky */ 122e5931943SThomas Gleixner #define arch_write_can_lock(x) ((x)->lock == 0) 123c6557e7fSMartin Schwidefsky 124fb3a6bbcSThomas Gleixner extern void _raw_read_lock_wait(arch_rwlock_t *lp); 125fb3a6bbcSThomas Gleixner extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); 126fb3a6bbcSThomas Gleixner extern int _raw_read_trylock_retry(arch_rwlock_t *lp); 127fb3a6bbcSThomas Gleixner extern void _raw_write_lock_wait(arch_rwlock_t *lp); 128fb3a6bbcSThomas Gleixner extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); 129fb3a6bbcSThomas Gleixner extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 130c6557e7fSMartin Schwidefsky 131e5931943SThomas Gleixner static inline void arch_read_lock(arch_rwlock_t *rw) 132c6557e7fSMartin Schwidefsky { 133c6557e7fSMartin Schwidefsky unsigned int old; 134c6557e7fSMartin Schwidefsky old = rw->lock & 0x7fffffffU; 1355b3f683eSPhilipp Hachtmann if (!_raw_compare_and_swap(&rw->lock, old, old + 1)) 136c6557e7fSMartin Schwidefsky _raw_read_lock_wait(rw); 137c6557e7fSMartin Schwidefsky } 138c6557e7fSMartin Schwidefsky 139e5931943SThomas Gleixner static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) 140ce58ae6fSHeiko Carstens { 141ce58ae6fSHeiko Carstens unsigned int old; 142ce58ae6fSHeiko Carstens old = rw->lock & 0x7fffffffU; 1435b3f683eSPhilipp Hachtmann if (!_raw_compare_and_swap(&rw->lock, old, old + 1)) 144ce58ae6fSHeiko Carstens _raw_read_lock_wait_flags(rw, flags); 145ce58ae6fSHeiko Carstens } 146ce58ae6fSHeiko Carstens 147e5931943SThomas Gleixner static inline void arch_read_unlock(arch_rwlock_t *rw) 148c6557e7fSMartin Schwidefsky { 1495b3f683eSPhilipp Hachtmann unsigned int old; 150c6557e7fSMartin Schwidefsky 151c6557e7fSMartin Schwidefsky do { 1525b3f683eSPhilipp Hachtmann old = ACCESS_ONCE(rw->lock); 1535b3f683eSPhilipp Hachtmann } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); 154c6557e7fSMartin Schwidefsky } 155c6557e7fSMartin Schwidefsky 156e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw) 157c6557e7fSMartin Schwidefsky { 1585b3f683eSPhilipp Hachtmann if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000))) 159c6557e7fSMartin Schwidefsky _raw_write_lock_wait(rw); 160c6557e7fSMartin Schwidefsky } 161c6557e7fSMartin Schwidefsky 162e5931943SThomas Gleixner static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) 163ce58ae6fSHeiko Carstens { 1645b3f683eSPhilipp Hachtmann if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000))) 165ce58ae6fSHeiko Carstens _raw_write_lock_wait_flags(rw, flags); 166ce58ae6fSHeiko Carstens } 167ce58ae6fSHeiko Carstens 168e5931943SThomas Gleixner static inline void arch_write_unlock(arch_rwlock_t *rw) 169c6557e7fSMartin Schwidefsky { 170c6557e7fSMartin Schwidefsky _raw_compare_and_swap(&rw->lock, 0x80000000, 0); 171c6557e7fSMartin Schwidefsky } 172c6557e7fSMartin Schwidefsky 173e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *rw) 174c6557e7fSMartin Schwidefsky { 175c6557e7fSMartin Schwidefsky unsigned int old; 176c6557e7fSMartin Schwidefsky old = rw->lock & 0x7fffffffU; 1775b3f683eSPhilipp Hachtmann if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1))) 178c6557e7fSMartin Schwidefsky return 1; 179c6557e7fSMartin Schwidefsky return _raw_read_trylock_retry(rw); 180c6557e7fSMartin Schwidefsky } 181c6557e7fSMartin Schwidefsky 182e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *rw) 183c6557e7fSMartin Schwidefsky { 1845b3f683eSPhilipp Hachtmann if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000))) 185c6557e7fSMartin Schwidefsky return 1; 186c6557e7fSMartin Schwidefsky return _raw_write_trylock_retry(rw); 187c6557e7fSMartin Schwidefsky } 188c6557e7fSMartin Schwidefsky 1890199c4e6SThomas Gleixner #define arch_read_relax(lock) cpu_relax() 1900199c4e6SThomas Gleixner #define arch_write_relax(lock) cpu_relax() 191c6557e7fSMartin Schwidefsky 192c6557e7fSMartin Schwidefsky #endif /* __ASM_SPINLOCK_H */ 193