1 #ifndef _ASM_X86_SPINLOCK_H 2 #define _ASM_X86_SPINLOCK_H 3 4 #include <linux/atomic.h> 5 #include <asm/page.h> 6 #include <asm/processor.h> 7 #include <linux/compiler.h> 8 #include <asm/paravirt.h> 9 /* 10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 11 * 12 * Simple spin lock operations. There are two variants, one clears IRQ's 13 * on the local processor, one does not. 14 * 15 * These are fair FIFO ticket locks, which are currently limited to 256 16 * CPUs. 17 * 18 * (the type definitions are in asm/spinlock_types.h) 19 */ 20 21 #ifdef CONFIG_X86_32 22 # define LOCK_PTR_REG "a" 23 #else 24 # define LOCK_PTR_REG "D" 25 #endif 26 27 #if defined(CONFIG_X86_32) && \ 28 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) 29 /* 30 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock 31 * (PPro errata 66, 92) 32 */ 33 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX 34 #else 35 # define UNLOCK_LOCK_PREFIX 36 #endif 37 38 /* 39 * Ticket locks are conceptually two parts, one indicating the current head of 40 * the queue, and the other indicating the current tail. The lock is acquired 41 * by atomically noting the tail and incrementing it by one (thus adding 42 * ourself to the queue and noting our position), then waiting until the head 43 * becomes equal to the the initial value of the tail. 44 * 45 * We use an xadd covering *both* parts of the lock, to increment the tail and 46 * also load the position of the head, which takes care of memory ordering 47 * issues and should be optimal for the uncontended case. Note the tail must be 48 * in the high part, because a wide xadd increment of the low part would carry 49 * up and contaminate the high part. 50 */ 51 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 52 { 53 register struct __raw_tickets inc = { .tail = 1 }; 54 55 inc = xadd(&lock->tickets, inc); 56 57 for (;;) { 58 if (inc.head == inc.tail) 59 break; 60 cpu_relax(); 61 inc.head = ACCESS_ONCE(lock->tickets.head); 62 } 63 barrier(); /* make sure nothing creeps before the lock is taken */ 64 } 65 66 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 67 { 68 arch_spinlock_t old, new; 69 70 old.tickets = ACCESS_ONCE(lock->tickets); 71 if (old.tickets.head != old.tickets.tail) 72 return 0; 73 74 new.head_tail = old.head_tail + (1 << TICKET_SHIFT); 75 76 /* cmpxchg is a full barrier, so nothing can move before it */ 77 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 78 } 79 80 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 81 { 82 __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); 83 } 84 85 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 86 { 87 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 88 89 return tmp.tail != tmp.head; 90 } 91 92 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 93 { 94 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 95 96 return (__ticket_t)(tmp.tail - tmp.head) > 1; 97 } 98 99 #ifndef CONFIG_PARAVIRT_SPINLOCKS 100 101 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 102 { 103 return __ticket_spin_is_locked(lock); 104 } 105 106 static inline int arch_spin_is_contended(arch_spinlock_t *lock) 107 { 108 return __ticket_spin_is_contended(lock); 109 } 110 #define arch_spin_is_contended arch_spin_is_contended 111 112 static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 113 { 114 __ticket_spin_lock(lock); 115 } 116 117 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 118 { 119 return __ticket_spin_trylock(lock); 120 } 121 122 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 123 { 124 __ticket_spin_unlock(lock); 125 } 126 127 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 128 unsigned long flags) 129 { 130 arch_spin_lock(lock); 131 } 132 133 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 134 135 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 136 { 137 while (arch_spin_is_locked(lock)) 138 cpu_relax(); 139 } 140 141 /* 142 * Read-write spinlocks, allowing multiple readers 143 * but only one writer. 144 * 145 * NOTE! it is quite common to have readers in interrupts 146 * but no interrupt writers. For those circumstances we 147 * can "mix" irq-safe locks - any writer needs to get a 148 * irq-safe write-lock, but readers can get non-irqsafe 149 * read-locks. 150 * 151 * On x86, we implement read-write locks as a 32-bit counter 152 * with the high bit (sign) being the "contended" bit. 153 */ 154 155 /** 156 * read_can_lock - would read_trylock() succeed? 157 * @lock: the rwlock in question. 158 */ 159 static inline int arch_read_can_lock(arch_rwlock_t *lock) 160 { 161 return lock->lock > 0; 162 } 163 164 /** 165 * write_can_lock - would write_trylock() succeed? 166 * @lock: the rwlock in question. 167 */ 168 static inline int arch_write_can_lock(arch_rwlock_t *lock) 169 { 170 return lock->write == WRITE_LOCK_CMP; 171 } 172 173 static inline void arch_read_lock(arch_rwlock_t *rw) 174 { 175 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" 176 "jns 1f\n" 177 "call __read_lock_failed\n\t" 178 "1:\n" 179 ::LOCK_PTR_REG (rw) : "memory"); 180 } 181 182 static inline void arch_write_lock(arch_rwlock_t *rw) 183 { 184 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" 185 "jz 1f\n" 186 "call __write_lock_failed\n\t" 187 "1:\n" 188 ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) 189 : "memory"); 190 } 191 192 static inline int arch_read_trylock(arch_rwlock_t *lock) 193 { 194 READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; 195 196 if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) 197 return 1; 198 READ_LOCK_ATOMIC(inc)(count); 199 return 0; 200 } 201 202 static inline int arch_write_trylock(arch_rwlock_t *lock) 203 { 204 atomic_t *count = (atomic_t *)&lock->write; 205 206 if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) 207 return 1; 208 atomic_add(WRITE_LOCK_CMP, count); 209 return 0; 210 } 211 212 static inline void arch_read_unlock(arch_rwlock_t *rw) 213 { 214 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" 215 :"+m" (rw->lock) : : "memory"); 216 } 217 218 static inline void arch_write_unlock(arch_rwlock_t *rw) 219 { 220 asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" 221 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); 222 } 223 224 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 225 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 226 227 #undef READ_LOCK_SIZE 228 #undef READ_LOCK_ATOMIC 229 #undef WRITE_LOCK_ADD 230 #undef WRITE_LOCK_SUB 231 #undef WRITE_LOCK_CMP 232 233 #define arch_spin_relax(lock) cpu_relax() 234 #define arch_read_relax(lock) cpu_relax() 235 #define arch_write_relax(lock) cpu_relax() 236 237 /* The {read|write|spin}_lock() on x86 are full memory barriers. */ 238 static inline void smp_mb__after_lock(void) { } 239 #define ARCH_HAS_SMP_MB_AFTER_LOCK 240 241 #endif /* _ASM_X86_SPINLOCK_H */ 242