11965aae3SH. Peter Anvin #ifndef _ASM_X86_SPINLOCK_H 21965aae3SH. Peter Anvin #define _ASM_X86_SPINLOCK_H 3bb898558SAl Viro 460063497SArun Sharma #include <linux/atomic.h> 5bb898558SAl Viro #include <asm/page.h> 6bb898558SAl Viro #include <asm/processor.h> 7bb898558SAl Viro #include <linux/compiler.h> 8bb898558SAl Viro #include <asm/paravirt.h> 9bb898558SAl Viro /* 10bb898558SAl Viro * Your basic SMP spinlocks, allowing only a single CPU anywhere 11bb898558SAl Viro * 12bb898558SAl Viro * Simple spin lock operations. There are two variants, one clears IRQ's 13bb898558SAl Viro * on the local processor, one does not. 14bb898558SAl Viro * 15bb898558SAl Viro * These are fair FIFO ticket locks, which are currently limited to 256 16bb898558SAl Viro * CPUs. 17bb898558SAl Viro * 18bb898558SAl Viro * (the type definitions are in asm/spinlock_types.h) 19bb898558SAl Viro */ 20bb898558SAl Viro 21bb898558SAl Viro #ifdef CONFIG_X86_32 22bb898558SAl Viro # define LOCK_PTR_REG "a" 23bb898558SAl Viro # define REG_PTR_MODE "k" 24bb898558SAl Viro #else 25bb898558SAl Viro # define LOCK_PTR_REG "D" 26bb898558SAl Viro # define REG_PTR_MODE "q" 27bb898558SAl Viro #endif 28bb898558SAl Viro 29bb898558SAl Viro #if defined(CONFIG_X86_32) && \ 30bb898558SAl Viro (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) 31bb898558SAl Viro /* 32bb898558SAl Viro * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock 33bb898558SAl Viro * (PPro errata 66, 92) 34bb898558SAl Viro */ 35bb898558SAl Viro # define UNLOCK_LOCK_PREFIX LOCK_PREFIX 36bb898558SAl Viro #else 37bb898558SAl Viro # define UNLOCK_LOCK_PREFIX 38bb898558SAl Viro #endif 39bb898558SAl Viro 40bb898558SAl Viro /* 41bb898558SAl Viro * Ticket locks are conceptually two parts, one indicating the current head of 42bb898558SAl Viro * the queue, and the other indicating the current tail. The lock is acquired 43bb898558SAl Viro * by atomically noting the tail and incrementing it by one (thus adding 44bb898558SAl Viro * ourself to the queue and noting our position), then waiting until the head 45bb898558SAl Viro * becomes equal to the the initial value of the tail. 46bb898558SAl Viro * 47bb898558SAl Viro * We use an xadd covering *both* parts of the lock, to increment the tail and 48bb898558SAl Viro * also load the position of the head, which takes care of memory ordering 49bb898558SAl Viro * issues and should be optimal for the uncontended case. Note the tail must be 50bb898558SAl Viro * in the high part, because a wide xadd increment of the low part would carry 51bb898558SAl Viro * up and contaminate the high part. 52bb898558SAl Viro * 53bb898558SAl Viro * With fewer than 2^8 possible CPUs, we can use x86's partial registers to 54bb898558SAl Viro * save some instructions and make the code more elegant. There really isn't 55bb898558SAl Viro * much between them in performance though, especially as locks are out of line. 56bb898558SAl Viro */ 57bb898558SAl Viro #if (NR_CPUS < 256) 58445c8951SThomas Gleixner static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 59bb898558SAl Viro { 6084eb950dSJeremy Fitzhardinge unsigned short inc = 1 << TICKET_SHIFT; 61bb898558SAl Viro 62bb898558SAl Viro asm volatile ( 63bb898558SAl Viro LOCK_PREFIX "xaddw %w0, %1\n" 64bb898558SAl Viro "1:\t" 65bb898558SAl Viro "cmpb %h0, %b0\n\t" 66bb898558SAl Viro "je 2f\n\t" 67bb898558SAl Viro "rep ; nop\n\t" 68bb898558SAl Viro "movb %1, %b0\n\t" 69bb898558SAl Viro /* don't need lfence here, because loads are in-order */ 70bb898558SAl Viro "jmp 1b\n" 71bb898558SAl Viro "2:" 72bb898558SAl Viro : "+Q" (inc), "+m" (lock->slock) 73bb898558SAl Viro : 74bb898558SAl Viro : "memory", "cc"); 75bb898558SAl Viro } 76bb898558SAl Viro 77445c8951SThomas Gleixner static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 78bb898558SAl Viro { 7984eb950dSJeremy Fitzhardinge unsigned int tmp, new; 80bb898558SAl Viro 81bb898558SAl Viro asm volatile("movzwl %2, %0\n\t" 82bb898558SAl Viro "cmpb %h0,%b0\n\t" 83bb898558SAl Viro "leal 0x100(%" REG_PTR_MODE "0), %1\n\t" 84bb898558SAl Viro "jne 1f\n\t" 85bb898558SAl Viro LOCK_PREFIX "cmpxchgw %w1,%2\n\t" 86bb898558SAl Viro "1:" 87bb898558SAl Viro "sete %b1\n\t" 88bb898558SAl Viro "movzbl %b1,%0\n\t" 89bb898558SAl Viro : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) 90bb898558SAl Viro : 91bb898558SAl Viro : "memory", "cc"); 92bb898558SAl Viro 93bb898558SAl Viro return tmp; 94bb898558SAl Viro } 95bb898558SAl Viro 96445c8951SThomas Gleixner static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 97bb898558SAl Viro { 98bb898558SAl Viro asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 99bb898558SAl Viro : "+m" (lock->slock) 100bb898558SAl Viro : 101bb898558SAl Viro : "memory", "cc"); 102bb898558SAl Viro } 103bb898558SAl Viro #else 104445c8951SThomas Gleixner static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 105bb898558SAl Viro { 10684eb950dSJeremy Fitzhardinge unsigned inc = 1 << TICKET_SHIFT; 10784eb950dSJeremy Fitzhardinge unsigned tmp; 108bb898558SAl Viro 109bb898558SAl Viro asm volatile(LOCK_PREFIX "xaddl %0, %1\n" 110bb898558SAl Viro "movzwl %w0, %2\n\t" 111bb898558SAl Viro "shrl $16, %0\n\t" 112bb898558SAl Viro "1:\t" 113bb898558SAl Viro "cmpl %0, %2\n\t" 114bb898558SAl Viro "je 2f\n\t" 115bb898558SAl Viro "rep ; nop\n\t" 116bb898558SAl Viro "movzwl %1, %2\n\t" 117bb898558SAl Viro /* don't need lfence here, because loads are in-order */ 118bb898558SAl Viro "jmp 1b\n" 119bb898558SAl Viro "2:" 120bb898558SAl Viro : "+r" (inc), "+m" (lock->slock), "=&r" (tmp) 121bb898558SAl Viro : 122bb898558SAl Viro : "memory", "cc"); 123bb898558SAl Viro } 124bb898558SAl Viro 125445c8951SThomas Gleixner static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 126bb898558SAl Viro { 12784eb950dSJeremy Fitzhardinge unsigned tmp; 12884eb950dSJeremy Fitzhardinge unsigned new; 129bb898558SAl Viro 130bb898558SAl Viro asm volatile("movl %2,%0\n\t" 131bb898558SAl Viro "movl %0,%1\n\t" 132bb898558SAl Viro "roll $16, %0\n\t" 133bb898558SAl Viro "cmpl %0,%1\n\t" 134bb898558SAl Viro "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t" 135bb898558SAl Viro "jne 1f\n\t" 136bb898558SAl Viro LOCK_PREFIX "cmpxchgl %1,%2\n\t" 137bb898558SAl Viro "1:" 138bb898558SAl Viro "sete %b1\n\t" 139bb898558SAl Viro "movzbl %b1,%0\n\t" 140bb898558SAl Viro : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) 141bb898558SAl Viro : 142bb898558SAl Viro : "memory", "cc"); 143bb898558SAl Viro 144bb898558SAl Viro return tmp; 145bb898558SAl Viro } 146bb898558SAl Viro 147445c8951SThomas Gleixner static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 148bb898558SAl Viro { 149bb898558SAl Viro asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 150bb898558SAl Viro : "+m" (lock->slock) 151bb898558SAl Viro : 152bb898558SAl Viro : "memory", "cc"); 153bb898558SAl Viro } 154bb898558SAl Viro #endif 155bb898558SAl Viro 156445c8951SThomas Gleixner static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 157bb898558SAl Viro { 15884eb950dSJeremy Fitzhardinge struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 159bb898558SAl Viro 16084eb950dSJeremy Fitzhardinge return !!(tmp.tail ^ tmp.head); 161bb898558SAl Viro } 162bb898558SAl Viro 163445c8951SThomas Gleixner static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 164bb898558SAl Viro { 16584eb950dSJeremy Fitzhardinge struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 166bb898558SAl Viro 16784eb950dSJeremy Fitzhardinge return ((tmp.tail - tmp.head) & TICKET_MASK) > 1; 168bb898558SAl Viro } 169bb898558SAl Viro 170b4ecc126SJeremy Fitzhardinge #ifndef CONFIG_PARAVIRT_SPINLOCKS 171bb898558SAl Viro 1720199c4e6SThomas Gleixner static inline int arch_spin_is_locked(arch_spinlock_t *lock) 173bb898558SAl Viro { 174bb898558SAl Viro return __ticket_spin_is_locked(lock); 175bb898558SAl Viro } 176bb898558SAl Viro 1770199c4e6SThomas Gleixner static inline int arch_spin_is_contended(arch_spinlock_t *lock) 178bb898558SAl Viro { 179bb898558SAl Viro return __ticket_spin_is_contended(lock); 180bb898558SAl Viro } 1810199c4e6SThomas Gleixner #define arch_spin_is_contended arch_spin_is_contended 182bb898558SAl Viro 1830199c4e6SThomas Gleixner static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 184bb898558SAl Viro { 185bb898558SAl Viro __ticket_spin_lock(lock); 186bb898558SAl Viro } 187bb898558SAl Viro 1880199c4e6SThomas Gleixner static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 189bb898558SAl Viro { 190bb898558SAl Viro return __ticket_spin_trylock(lock); 191bb898558SAl Viro } 192bb898558SAl Viro 1930199c4e6SThomas Gleixner static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 194bb898558SAl Viro { 195bb898558SAl Viro __ticket_spin_unlock(lock); 196bb898558SAl Viro } 197bb898558SAl Viro 1980199c4e6SThomas Gleixner static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 199bb898558SAl Viro unsigned long flags) 200bb898558SAl Viro { 2010199c4e6SThomas Gleixner arch_spin_lock(lock); 202bb898558SAl Viro } 203bb898558SAl Viro 204b4ecc126SJeremy Fitzhardinge #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 205bb898558SAl Viro 2060199c4e6SThomas Gleixner static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 207bb898558SAl Viro { 2080199c4e6SThomas Gleixner while (arch_spin_is_locked(lock)) 209bb898558SAl Viro cpu_relax(); 210bb898558SAl Viro } 211bb898558SAl Viro 212bb898558SAl Viro /* 213bb898558SAl Viro * Read-write spinlocks, allowing multiple readers 214bb898558SAl Viro * but only one writer. 215bb898558SAl Viro * 216bb898558SAl Viro * NOTE! it is quite common to have readers in interrupts 217bb898558SAl Viro * but no interrupt writers. For those circumstances we 218bb898558SAl Viro * can "mix" irq-safe locks - any writer needs to get a 219bb898558SAl Viro * irq-safe write-lock, but readers can get non-irqsafe 220bb898558SAl Viro * read-locks. 221bb898558SAl Viro * 222bb898558SAl Viro * On x86, we implement read-write locks as a 32-bit counter 223bb898558SAl Viro * with the high bit (sign) being the "contended" bit. 224bb898558SAl Viro */ 225bb898558SAl Viro 226bb898558SAl Viro /** 227bb898558SAl Viro * read_can_lock - would read_trylock() succeed? 228bb898558SAl Viro * @lock: the rwlock in question. 229bb898558SAl Viro */ 230e5931943SThomas Gleixner static inline int arch_read_can_lock(arch_rwlock_t *lock) 231bb898558SAl Viro { 232a750036fSJan Beulich return lock->lock > 0; 233bb898558SAl Viro } 234bb898558SAl Viro 235bb898558SAl Viro /** 236bb898558SAl Viro * write_can_lock - would write_trylock() succeed? 237bb898558SAl Viro * @lock: the rwlock in question. 238bb898558SAl Viro */ 239e5931943SThomas Gleixner static inline int arch_write_can_lock(arch_rwlock_t *lock) 240bb898558SAl Viro { 241a750036fSJan Beulich return lock->write == WRITE_LOCK_CMP; 242bb898558SAl Viro } 243bb898558SAl Viro 244e5931943SThomas Gleixner static inline void arch_read_lock(arch_rwlock_t *rw) 245bb898558SAl Viro { 246a750036fSJan Beulich asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" 247bb898558SAl Viro "jns 1f\n" 248bb898558SAl Viro "call __read_lock_failed\n\t" 249bb898558SAl Viro "1:\n" 250bb898558SAl Viro ::LOCK_PTR_REG (rw) : "memory"); 251bb898558SAl Viro } 252bb898558SAl Viro 253e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw) 254bb898558SAl Viro { 255a750036fSJan Beulich asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" 256bb898558SAl Viro "jz 1f\n" 257bb898558SAl Viro "call __write_lock_failed\n\t" 258bb898558SAl Viro "1:\n" 259a750036fSJan Beulich ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) 260a750036fSJan Beulich : "memory"); 261bb898558SAl Viro } 262bb898558SAl Viro 263e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *lock) 264bb898558SAl Viro { 265a750036fSJan Beulich READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; 266bb898558SAl Viro 267a750036fSJan Beulich if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) 268bb898558SAl Viro return 1; 269a750036fSJan Beulich READ_LOCK_ATOMIC(inc)(count); 270bb898558SAl Viro return 0; 271bb898558SAl Viro } 272bb898558SAl Viro 273e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *lock) 274bb898558SAl Viro { 275a750036fSJan Beulich atomic_t *count = (atomic_t *)&lock->write; 276bb898558SAl Viro 277a750036fSJan Beulich if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) 278bb898558SAl Viro return 1; 279a750036fSJan Beulich atomic_add(WRITE_LOCK_CMP, count); 280bb898558SAl Viro return 0; 281bb898558SAl Viro } 282bb898558SAl Viro 283e5931943SThomas Gleixner static inline void arch_read_unlock(arch_rwlock_t *rw) 284bb898558SAl Viro { 285a750036fSJan Beulich asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" 286a750036fSJan Beulich :"+m" (rw->lock) : : "memory"); 287bb898558SAl Viro } 288bb898558SAl Viro 289e5931943SThomas Gleixner static inline void arch_write_unlock(arch_rwlock_t *rw) 290bb898558SAl Viro { 291a750036fSJan Beulich asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" 292a750036fSJan Beulich : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); 293bb898558SAl Viro } 294bb898558SAl Viro 295e5931943SThomas Gleixner #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 296e5931943SThomas Gleixner #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 297f5f7eac4SRobin Holt 298a750036fSJan Beulich #undef READ_LOCK_SIZE 299a750036fSJan Beulich #undef READ_LOCK_ATOMIC 300a750036fSJan Beulich #undef WRITE_LOCK_ADD 301a750036fSJan Beulich #undef WRITE_LOCK_SUB 302a750036fSJan Beulich #undef WRITE_LOCK_CMP 303a750036fSJan Beulich 3040199c4e6SThomas Gleixner #define arch_spin_relax(lock) cpu_relax() 3050199c4e6SThomas Gleixner #define arch_read_relax(lock) cpu_relax() 3060199c4e6SThomas Gleixner #define arch_write_relax(lock) cpu_relax() 307bb898558SAl Viro 308ad462769SJiri Olsa /* The {read|write|spin}_lock() on x86 are full memory barriers. */ 309ad462769SJiri Olsa static inline void smp_mb__after_lock(void) { } 310ad462769SJiri Olsa #define ARCH_HAS_SMP_MB_AFTER_LOCK 311ad462769SJiri Olsa 3121965aae3SH. Peter Anvin #endif /* _ASM_X86_SPINLOCK_H */ 313