11965aae3SH. Peter Anvin #ifndef _ASM_X86_SPINLOCK_H 21965aae3SH. Peter Anvin #define _ASM_X86_SPINLOCK_H 3bb898558SAl Viro 460063497SArun Sharma #include <linux/atomic.h> 5bb898558SAl Viro #include <asm/page.h> 6bb898558SAl Viro #include <asm/processor.h> 7bb898558SAl Viro #include <linux/compiler.h> 8bb898558SAl Viro #include <asm/paravirt.h> 9bb898558SAl Viro /* 10bb898558SAl Viro * Your basic SMP spinlocks, allowing only a single CPU anywhere 11bb898558SAl Viro * 12bb898558SAl Viro * Simple spin lock operations. There are two variants, one clears IRQ's 13bb898558SAl Viro * on the local processor, one does not. 14bb898558SAl Viro * 1583be4ffaSRichard Weinberger * These are fair FIFO ticket locks, which support up to 2^16 CPUs. 16bb898558SAl Viro * 17bb898558SAl Viro * (the type definitions are in asm/spinlock_types.h) 18bb898558SAl Viro */ 19bb898558SAl Viro 20bb898558SAl Viro #ifdef CONFIG_X86_32 21bb898558SAl Viro # define LOCK_PTR_REG "a" 22bb898558SAl Viro #else 23bb898558SAl Viro # define LOCK_PTR_REG "D" 24bb898558SAl Viro #endif 25bb898558SAl Viro 26bb898558SAl Viro #if defined(CONFIG_X86_32) && \ 27bb898558SAl Viro (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) 28bb898558SAl Viro /* 29bb898558SAl Viro * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock 30bb898558SAl Viro * (PPro errata 66, 92) 31bb898558SAl Viro */ 32bb898558SAl Viro # define UNLOCK_LOCK_PREFIX LOCK_PREFIX 33bb898558SAl Viro #else 34bb898558SAl Viro # define UNLOCK_LOCK_PREFIX 35bb898558SAl Viro #endif 36bb898558SAl Viro 37545ac138SJeremy Fitzhardinge /* How long a lock should spin before we consider blocking */ 38545ac138SJeremy Fitzhardinge #define SPIN_THRESHOLD (1 << 15) 39545ac138SJeremy Fitzhardinge 40545ac138SJeremy Fitzhardinge #ifndef CONFIG_PARAVIRT_SPINLOCKS 41545ac138SJeremy Fitzhardinge 42545ac138SJeremy Fitzhardinge static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, 43545ac138SJeremy Fitzhardinge __ticket_t ticket) 44545ac138SJeremy Fitzhardinge { 45545ac138SJeremy Fitzhardinge } 46545ac138SJeremy Fitzhardinge 47545ac138SJeremy Fitzhardinge static __always_inline void ____ticket_unlock_kick(struct arch_spinlock *lock, 48545ac138SJeremy Fitzhardinge __ticket_t ticket) 49545ac138SJeremy Fitzhardinge { 50545ac138SJeremy Fitzhardinge } 51545ac138SJeremy Fitzhardinge 52545ac138SJeremy Fitzhardinge #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 53545ac138SJeremy Fitzhardinge 54545ac138SJeremy Fitzhardinge 55545ac138SJeremy Fitzhardinge /* 56545ac138SJeremy Fitzhardinge * If a spinlock has someone waiting on it, then kick the appropriate 57545ac138SJeremy Fitzhardinge * waiting cpu. 58545ac138SJeremy Fitzhardinge */ 59545ac138SJeremy Fitzhardinge static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, 60545ac138SJeremy Fitzhardinge __ticket_t next) 61545ac138SJeremy Fitzhardinge { 62545ac138SJeremy Fitzhardinge if (unlikely(lock->tickets.tail != next)) 63545ac138SJeremy Fitzhardinge ____ticket_unlock_kick(lock, next); 64545ac138SJeremy Fitzhardinge } 65545ac138SJeremy Fitzhardinge 66bb898558SAl Viro /* 67bb898558SAl Viro * Ticket locks are conceptually two parts, one indicating the current head of 68bb898558SAl Viro * the queue, and the other indicating the current tail. The lock is acquired 69bb898558SAl Viro * by atomically noting the tail and incrementing it by one (thus adding 70bb898558SAl Viro * ourself to the queue and noting our position), then waiting until the head 71bb898558SAl Viro * becomes equal to the the initial value of the tail. 72bb898558SAl Viro * 73bb898558SAl Viro * We use an xadd covering *both* parts of the lock, to increment the tail and 74bb898558SAl Viro * also load the position of the head, which takes care of memory ordering 75bb898558SAl Viro * issues and should be optimal for the uncontended case. Note the tail must be 76bb898558SAl Viro * in the high part, because a wide xadd increment of the low part would carry 77bb898558SAl Viro * up and contaminate the high part. 78bb898558SAl Viro */ 79b798df09SJeremy Fitzhardinge static __always_inline void arch_spin_lock(struct arch_spinlock *lock) 80bb898558SAl Viro { 812994488fSJeremy Fitzhardinge register struct __raw_tickets inc = { .tail = 1 }; 82bb898558SAl Viro 832994488fSJeremy Fitzhardinge inc = xadd(&lock->tickets, inc); 84c576a3eaSJeremy Fitzhardinge 85c576a3eaSJeremy Fitzhardinge for (;;) { 86545ac138SJeremy Fitzhardinge unsigned count = SPIN_THRESHOLD; 87545ac138SJeremy Fitzhardinge 88545ac138SJeremy Fitzhardinge do { 892994488fSJeremy Fitzhardinge if (inc.head == inc.tail) 90545ac138SJeremy Fitzhardinge goto out; 91c576a3eaSJeremy Fitzhardinge cpu_relax(); 922994488fSJeremy Fitzhardinge inc.head = ACCESS_ONCE(lock->tickets.head); 93545ac138SJeremy Fitzhardinge } while (--count); 94545ac138SJeremy Fitzhardinge __ticket_lock_spinning(lock, inc.tail); 95c576a3eaSJeremy Fitzhardinge } 96545ac138SJeremy Fitzhardinge out: barrier(); /* make sure nothing creeps before the lock is taken */ 97bb898558SAl Viro } 98bb898558SAl Viro 99b798df09SJeremy Fitzhardinge static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 100bb898558SAl Viro { 101229855d6SJeremy Fitzhardinge arch_spinlock_t old, new; 102bb898558SAl Viro 103229855d6SJeremy Fitzhardinge old.tickets = ACCESS_ONCE(lock->tickets); 104229855d6SJeremy Fitzhardinge if (old.tickets.head != old.tickets.tail) 105229855d6SJeremy Fitzhardinge return 0; 106bb898558SAl Viro 107229855d6SJeremy Fitzhardinge new.head_tail = old.head_tail + (1 << TICKET_SHIFT); 108229855d6SJeremy Fitzhardinge 109229855d6SJeremy Fitzhardinge /* cmpxchg is a full barrier, so nothing can move before it */ 110229855d6SJeremy Fitzhardinge return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 111bb898558SAl Viro } 112bb898558SAl Viro 113b798df09SJeremy Fitzhardinge static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 114bb898558SAl Viro { 115545ac138SJeremy Fitzhardinge __ticket_t next = lock->tickets.head + 1; 116545ac138SJeremy Fitzhardinge 1173d94ae0cSJeremy Fitzhardinge __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); 118545ac138SJeremy Fitzhardinge __ticket_unlock_kick(lock, next); 119bb898558SAl Viro } 120bb898558SAl Viro 121b798df09SJeremy Fitzhardinge static inline int arch_spin_is_locked(arch_spinlock_t *lock) 122bb898558SAl Viro { 12384eb950dSJeremy Fitzhardinge struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 124bb898558SAl Viro 1257931d493SJan Beulich return tmp.tail != tmp.head; 126bb898558SAl Viro } 127bb898558SAl Viro 128b798df09SJeremy Fitzhardinge static inline int arch_spin_is_contended(arch_spinlock_t *lock) 129bb898558SAl Viro { 13084eb950dSJeremy Fitzhardinge struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 131bb898558SAl Viro 1327931d493SJan Beulich return (__ticket_t)(tmp.tail - tmp.head) > 1; 133bb898558SAl Viro } 1340199c4e6SThomas Gleixner #define arch_spin_is_contended arch_spin_is_contended 135bb898558SAl Viro 1360199c4e6SThomas Gleixner static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 137bb898558SAl Viro unsigned long flags) 138bb898558SAl Viro { 1390199c4e6SThomas Gleixner arch_spin_lock(lock); 140bb898558SAl Viro } 141bb898558SAl Viro 1420199c4e6SThomas Gleixner static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 143bb898558SAl Viro { 1440199c4e6SThomas Gleixner while (arch_spin_is_locked(lock)) 145bb898558SAl Viro cpu_relax(); 146bb898558SAl Viro } 147bb898558SAl Viro 148bb898558SAl Viro /* 149bb898558SAl Viro * Read-write spinlocks, allowing multiple readers 150bb898558SAl Viro * but only one writer. 151bb898558SAl Viro * 152bb898558SAl Viro * NOTE! it is quite common to have readers in interrupts 153bb898558SAl Viro * but no interrupt writers. For those circumstances we 154bb898558SAl Viro * can "mix" irq-safe locks - any writer needs to get a 155bb898558SAl Viro * irq-safe write-lock, but readers can get non-irqsafe 156bb898558SAl Viro * read-locks. 157bb898558SAl Viro * 158bb898558SAl Viro * On x86, we implement read-write locks as a 32-bit counter 159bb898558SAl Viro * with the high bit (sign) being the "contended" bit. 160bb898558SAl Viro */ 161bb898558SAl Viro 162bb898558SAl Viro /** 163bb898558SAl Viro * read_can_lock - would read_trylock() succeed? 164bb898558SAl Viro * @lock: the rwlock in question. 165bb898558SAl Viro */ 166e5931943SThomas Gleixner static inline int arch_read_can_lock(arch_rwlock_t *lock) 167bb898558SAl Viro { 168a750036fSJan Beulich return lock->lock > 0; 169bb898558SAl Viro } 170bb898558SAl Viro 171bb898558SAl Viro /** 172bb898558SAl Viro * write_can_lock - would write_trylock() succeed? 173bb898558SAl Viro * @lock: the rwlock in question. 174bb898558SAl Viro */ 175e5931943SThomas Gleixner static inline int arch_write_can_lock(arch_rwlock_t *lock) 176bb898558SAl Viro { 177a750036fSJan Beulich return lock->write == WRITE_LOCK_CMP; 178bb898558SAl Viro } 179bb898558SAl Viro 180e5931943SThomas Gleixner static inline void arch_read_lock(arch_rwlock_t *rw) 181bb898558SAl Viro { 182a750036fSJan Beulich asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" 183bb898558SAl Viro "jns 1f\n" 184bb898558SAl Viro "call __read_lock_failed\n\t" 185bb898558SAl Viro "1:\n" 186bb898558SAl Viro ::LOCK_PTR_REG (rw) : "memory"); 187bb898558SAl Viro } 188bb898558SAl Viro 189e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw) 190bb898558SAl Viro { 191a750036fSJan Beulich asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" 192bb898558SAl Viro "jz 1f\n" 193bb898558SAl Viro "call __write_lock_failed\n\t" 194bb898558SAl Viro "1:\n" 195a750036fSJan Beulich ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) 196a750036fSJan Beulich : "memory"); 197bb898558SAl Viro } 198bb898558SAl Viro 199e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *lock) 200bb898558SAl Viro { 201a750036fSJan Beulich READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; 202bb898558SAl Viro 203a750036fSJan Beulich if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) 204bb898558SAl Viro return 1; 205a750036fSJan Beulich READ_LOCK_ATOMIC(inc)(count); 206bb898558SAl Viro return 0; 207bb898558SAl Viro } 208bb898558SAl Viro 209e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *lock) 210bb898558SAl Viro { 211a750036fSJan Beulich atomic_t *count = (atomic_t *)&lock->write; 212bb898558SAl Viro 213a750036fSJan Beulich if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) 214bb898558SAl Viro return 1; 215a750036fSJan Beulich atomic_add(WRITE_LOCK_CMP, count); 216bb898558SAl Viro return 0; 217bb898558SAl Viro } 218bb898558SAl Viro 219e5931943SThomas Gleixner static inline void arch_read_unlock(arch_rwlock_t *rw) 220bb898558SAl Viro { 221a750036fSJan Beulich asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" 222a750036fSJan Beulich :"+m" (rw->lock) : : "memory"); 223bb898558SAl Viro } 224bb898558SAl Viro 225e5931943SThomas Gleixner static inline void arch_write_unlock(arch_rwlock_t *rw) 226bb898558SAl Viro { 227a750036fSJan Beulich asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" 228a750036fSJan Beulich : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); 229bb898558SAl Viro } 230bb898558SAl Viro 231e5931943SThomas Gleixner #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 232e5931943SThomas Gleixner #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 233f5f7eac4SRobin Holt 234a750036fSJan Beulich #undef READ_LOCK_SIZE 235a750036fSJan Beulich #undef READ_LOCK_ATOMIC 236a750036fSJan Beulich #undef WRITE_LOCK_ADD 237a750036fSJan Beulich #undef WRITE_LOCK_SUB 238a750036fSJan Beulich #undef WRITE_LOCK_CMP 239a750036fSJan Beulich 2400199c4e6SThomas Gleixner #define arch_spin_relax(lock) cpu_relax() 2410199c4e6SThomas Gleixner #define arch_read_relax(lock) cpu_relax() 2420199c4e6SThomas Gleixner #define arch_write_relax(lock) cpu_relax() 243bb898558SAl Viro 244ad462769SJiri Olsa /* The {read|write|spin}_lock() on x86 are full memory barriers. */ 245ad462769SJiri Olsa static inline void smp_mb__after_lock(void) { } 246ad462769SJiri Olsa #define ARCH_HAS_SMP_MB_AFTER_LOCK 247ad462769SJiri Olsa 2481965aae3SH. Peter Anvin #endif /* _ASM_X86_SPINLOCK_H */ 249