11965aae3SH. Peter Anvin #ifndef _ASM_X86_SPINLOCK_H 21965aae3SH. Peter Anvin #define _ASM_X86_SPINLOCK_H 3bb898558SAl Viro 496f853eaSJeremy Fitzhardinge #include <linux/jump_label.h> 560063497SArun Sharma #include <linux/atomic.h> 6bb898558SAl Viro #include <asm/page.h> 7bb898558SAl Viro #include <asm/processor.h> 8bb898558SAl Viro #include <linux/compiler.h> 9bb898558SAl Viro #include <asm/paravirt.h> 1096f853eaSJeremy Fitzhardinge #include <asm/bitops.h> 1196f853eaSJeremy Fitzhardinge 12bb898558SAl Viro /* 13bb898558SAl Viro * Your basic SMP spinlocks, allowing only a single CPU anywhere 14bb898558SAl Viro * 15bb898558SAl Viro * Simple spin lock operations. There are two variants, one clears IRQ's 16bb898558SAl Viro * on the local processor, one does not. 17bb898558SAl Viro * 1883be4ffaSRichard Weinberger * These are fair FIFO ticket locks, which support up to 2^16 CPUs. 19bb898558SAl Viro * 20bb898558SAl Viro * (the type definitions are in asm/spinlock_types.h) 21bb898558SAl Viro */ 22bb898558SAl Viro 23bb898558SAl Viro #ifdef CONFIG_X86_32 24bb898558SAl Viro # define LOCK_PTR_REG "a" 25bb898558SAl Viro #else 26bb898558SAl Viro # define LOCK_PTR_REG "D" 27bb898558SAl Viro #endif 28bb898558SAl Viro 2909df7c4cSDave Jones #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE)) 30bb898558SAl Viro /* 3109df7c4cSDave Jones * On PPro SMP, we use a locked operation to unlock 32bb898558SAl Viro * (PPro errata 66, 92) 33bb898558SAl Viro */ 34bb898558SAl Viro # define UNLOCK_LOCK_PREFIX LOCK_PREFIX 35bb898558SAl Viro #else 36bb898558SAl Viro # define UNLOCK_LOCK_PREFIX 37bb898558SAl Viro #endif 38bb898558SAl Viro 39545ac138SJeremy Fitzhardinge /* How long a lock should spin before we consider blocking */ 40545ac138SJeremy Fitzhardinge #define SPIN_THRESHOLD (1 << 15) 41545ac138SJeremy Fitzhardinge 4296f853eaSJeremy Fitzhardinge extern struct static_key paravirt_ticketlocks_enabled; 4396f853eaSJeremy Fitzhardinge static __always_inline bool static_key_false(struct static_key *key); 44545ac138SJeremy Fitzhardinge 4596f853eaSJeremy Fitzhardinge #ifdef CONFIG_PARAVIRT_SPINLOCKS 4696f853eaSJeremy Fitzhardinge 4796f853eaSJeremy Fitzhardinge static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) 4896f853eaSJeremy Fitzhardinge { 4996f853eaSJeremy Fitzhardinge set_bit(0, (volatile unsigned long *)&lock->tickets.tail); 5096f853eaSJeremy Fitzhardinge } 5196f853eaSJeremy Fitzhardinge 5296f853eaSJeremy Fitzhardinge #else /* !CONFIG_PARAVIRT_SPINLOCKS */ 5396f853eaSJeremy Fitzhardinge static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, 54545ac138SJeremy Fitzhardinge __ticket_t ticket) 55545ac138SJeremy Fitzhardinge { 56545ac138SJeremy Fitzhardinge } 5796f853eaSJeremy Fitzhardinge static inline void __ticket_unlock_kick(arch_spinlock_t *lock, 58545ac138SJeremy Fitzhardinge __ticket_t ticket) 59545ac138SJeremy Fitzhardinge { 60545ac138SJeremy Fitzhardinge } 61545ac138SJeremy Fitzhardinge 62545ac138SJeremy Fitzhardinge #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 63545ac138SJeremy Fitzhardinge 64bc08b449SLinus Torvalds static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 65bc08b449SLinus Torvalds { 66bc08b449SLinus Torvalds return lock.tickets.head == lock.tickets.tail; 67bc08b449SLinus Torvalds } 68bc08b449SLinus Torvalds 69bb898558SAl Viro /* 70bb898558SAl Viro * Ticket locks are conceptually two parts, one indicating the current head of 71bb898558SAl Viro * the queue, and the other indicating the current tail. The lock is acquired 72bb898558SAl Viro * by atomically noting the tail and incrementing it by one (thus adding 73bb898558SAl Viro * ourself to the queue and noting our position), then waiting until the head 74bb898558SAl Viro * becomes equal to the the initial value of the tail. 75bb898558SAl Viro * 76bb898558SAl Viro * We use an xadd covering *both* parts of the lock, to increment the tail and 77bb898558SAl Viro * also load the position of the head, which takes care of memory ordering 78bb898558SAl Viro * issues and should be optimal for the uncontended case. Note the tail must be 79bb898558SAl Viro * in the high part, because a wide xadd increment of the low part would carry 80bb898558SAl Viro * up and contaminate the high part. 81bb898558SAl Viro */ 8296f853eaSJeremy Fitzhardinge static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 83bb898558SAl Viro { 844a1ed4caSJeremy Fitzhardinge register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC }; 85bb898558SAl Viro 862994488fSJeremy Fitzhardinge inc = xadd(&lock->tickets, inc); 8796f853eaSJeremy Fitzhardinge if (likely(inc.head == inc.tail)) 8896f853eaSJeremy Fitzhardinge goto out; 89c576a3eaSJeremy Fitzhardinge 9096f853eaSJeremy Fitzhardinge inc.tail &= ~TICKET_SLOWPATH_FLAG; 91c576a3eaSJeremy Fitzhardinge for (;;) { 92545ac138SJeremy Fitzhardinge unsigned count = SPIN_THRESHOLD; 93545ac138SJeremy Fitzhardinge 94545ac138SJeremy Fitzhardinge do { 9596f853eaSJeremy Fitzhardinge if (ACCESS_ONCE(lock->tickets.head) == inc.tail) 96545ac138SJeremy Fitzhardinge goto out; 97c576a3eaSJeremy Fitzhardinge cpu_relax(); 98545ac138SJeremy Fitzhardinge } while (--count); 99545ac138SJeremy Fitzhardinge __ticket_lock_spinning(lock, inc.tail); 100c576a3eaSJeremy Fitzhardinge } 101545ac138SJeremy Fitzhardinge out: barrier(); /* make sure nothing creeps before the lock is taken */ 102bb898558SAl Viro } 103bb898558SAl Viro 104b798df09SJeremy Fitzhardinge static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 105bb898558SAl Viro { 106229855d6SJeremy Fitzhardinge arch_spinlock_t old, new; 107bb898558SAl Viro 108229855d6SJeremy Fitzhardinge old.tickets = ACCESS_ONCE(lock->tickets); 10996f853eaSJeremy Fitzhardinge if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG)) 110229855d6SJeremy Fitzhardinge return 0; 111bb898558SAl Viro 1124a1ed4caSJeremy Fitzhardinge new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); 113229855d6SJeremy Fitzhardinge 114229855d6SJeremy Fitzhardinge /* cmpxchg is a full barrier, so nothing can move before it */ 115229855d6SJeremy Fitzhardinge return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 116bb898558SAl Viro } 117bb898558SAl Viro 11896f853eaSJeremy Fitzhardinge static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock, 11996f853eaSJeremy Fitzhardinge arch_spinlock_t old) 120bb898558SAl Viro { 12196f853eaSJeremy Fitzhardinge arch_spinlock_t new; 12296f853eaSJeremy Fitzhardinge 12396f853eaSJeremy Fitzhardinge BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); 12496f853eaSJeremy Fitzhardinge 12596f853eaSJeremy Fitzhardinge /* Perform the unlock on the "before" copy */ 12696f853eaSJeremy Fitzhardinge old.tickets.head += TICKET_LOCK_INC; 12796f853eaSJeremy Fitzhardinge 12896f853eaSJeremy Fitzhardinge /* Clear the slowpath flag */ 12996f853eaSJeremy Fitzhardinge new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT); 13096f853eaSJeremy Fitzhardinge 13196f853eaSJeremy Fitzhardinge /* 13296f853eaSJeremy Fitzhardinge * If the lock is uncontended, clear the flag - use cmpxchg in 13396f853eaSJeremy Fitzhardinge * case it changes behind our back though. 13496f853eaSJeremy Fitzhardinge */ 13596f853eaSJeremy Fitzhardinge if (new.tickets.head != new.tickets.tail || 13696f853eaSJeremy Fitzhardinge cmpxchg(&lock->head_tail, old.head_tail, 13796f853eaSJeremy Fitzhardinge new.head_tail) != old.head_tail) { 13896f853eaSJeremy Fitzhardinge /* 13996f853eaSJeremy Fitzhardinge * Lock still has someone queued for it, so wake up an 14096f853eaSJeremy Fitzhardinge * appropriate waiter. 14196f853eaSJeremy Fitzhardinge */ 14296f853eaSJeremy Fitzhardinge __ticket_unlock_kick(lock, old.tickets.head); 14396f853eaSJeremy Fitzhardinge } 144bb898558SAl Viro } 145bb898558SAl Viro 146b798df09SJeremy Fitzhardinge static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 147bb898558SAl Viro { 14896f853eaSJeremy Fitzhardinge if (TICKET_SLOWPATH_FLAG && 14996f853eaSJeremy Fitzhardinge static_key_false(¶virt_ticketlocks_enabled)) { 15096f853eaSJeremy Fitzhardinge arch_spinlock_t prev; 151545ac138SJeremy Fitzhardinge 15296f853eaSJeremy Fitzhardinge prev = *lock; 15396f853eaSJeremy Fitzhardinge add_smp(&lock->tickets.head, TICKET_LOCK_INC); 15496f853eaSJeremy Fitzhardinge 15596f853eaSJeremy Fitzhardinge /* add_smp() is a full mb() */ 15696f853eaSJeremy Fitzhardinge 15796f853eaSJeremy Fitzhardinge if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) 15896f853eaSJeremy Fitzhardinge __ticket_unlock_slowpath(lock, prev); 15996f853eaSJeremy Fitzhardinge } else 1604a1ed4caSJeremy Fitzhardinge __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); 161bb898558SAl Viro } 162bb898558SAl Viro 163b798df09SJeremy Fitzhardinge static inline int arch_spin_is_locked(arch_spinlock_t *lock) 164bb898558SAl Viro { 16584eb950dSJeremy Fitzhardinge struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 166bb898558SAl Viro 1677931d493SJan Beulich return tmp.tail != tmp.head; 168bb898558SAl Viro } 169bb898558SAl Viro 170b798df09SJeremy Fitzhardinge static inline int arch_spin_is_contended(arch_spinlock_t *lock) 171bb898558SAl Viro { 17284eb950dSJeremy Fitzhardinge struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 173bb898558SAl Viro 1744a1ed4caSJeremy Fitzhardinge return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; 175bb898558SAl Viro } 1760199c4e6SThomas Gleixner #define arch_spin_is_contended arch_spin_is_contended 177bb898558SAl Viro 1780199c4e6SThomas Gleixner static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 179bb898558SAl Viro unsigned long flags) 180bb898558SAl Viro { 1810199c4e6SThomas Gleixner arch_spin_lock(lock); 182bb898558SAl Viro } 183bb898558SAl Viro 1840199c4e6SThomas Gleixner static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 185bb898558SAl Viro { 1860199c4e6SThomas Gleixner while (arch_spin_is_locked(lock)) 187bb898558SAl Viro cpu_relax(); 188bb898558SAl Viro } 189bb898558SAl Viro 190bb898558SAl Viro /* 191bb898558SAl Viro * Read-write spinlocks, allowing multiple readers 192bb898558SAl Viro * but only one writer. 193bb898558SAl Viro * 194bb898558SAl Viro * NOTE! it is quite common to have readers in interrupts 195bb898558SAl Viro * but no interrupt writers. For those circumstances we 196bb898558SAl Viro * can "mix" irq-safe locks - any writer needs to get a 197bb898558SAl Viro * irq-safe write-lock, but readers can get non-irqsafe 198bb898558SAl Viro * read-locks. 199bb898558SAl Viro * 2002ff810a7SWaiman Long * On x86, we implement read-write locks using the generic qrwlock with 2012ff810a7SWaiman Long * x86 specific optimization. 202bb898558SAl Viro */ 203bb898558SAl Viro 204bd01ec1aSWaiman Long #include <asm/qrwlock.h> 205bb898558SAl Viro 206e5931943SThomas Gleixner #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 207e5931943SThomas Gleixner #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 208f5f7eac4SRobin Holt 2090199c4e6SThomas Gleixner #define arch_spin_relax(lock) cpu_relax() 2100199c4e6SThomas Gleixner #define arch_read_relax(lock) cpu_relax() 2110199c4e6SThomas Gleixner #define arch_write_relax(lock) cpu_relax() 212bb898558SAl Viro 2131965aae3SH. Peter Anvin #endif /* _ASM_X86_SPINLOCK_H */ 214