1 #ifndef _ASM_X86_SPINLOCK_H 2 #define _ASM_X86_SPINLOCK_H 3 4 #include <linux/atomic.h> 5 #include <asm/page.h> 6 #include <asm/processor.h> 7 #include <linux/compiler.h> 8 #include <asm/paravirt.h> 9 /* 10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 11 * 12 * Simple spin lock operations. There are two variants, one clears IRQ's 13 * on the local processor, one does not. 14 * 15 * These are fair FIFO ticket locks, which are currently limited to 256 16 * CPUs. 17 * 18 * (the type definitions are in asm/spinlock_types.h) 19 */ 20 21 #ifdef CONFIG_X86_32 22 # define LOCK_PTR_REG "a" 23 # define REG_PTR_MODE "k" 24 #else 25 # define LOCK_PTR_REG "D" 26 # define REG_PTR_MODE "q" 27 #endif 28 29 #if defined(CONFIG_X86_32) && \ 30 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) 31 /* 32 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock 33 * (PPro errata 66, 92) 34 */ 35 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX 36 #else 37 # define UNLOCK_LOCK_PREFIX 38 #endif 39 40 /* 41 * Ticket locks are conceptually two parts, one indicating the current head of 42 * the queue, and the other indicating the current tail. The lock is acquired 43 * by atomically noting the tail and incrementing it by one (thus adding 44 * ourself to the queue and noting our position), then waiting until the head 45 * becomes equal to the the initial value of the tail. 46 * 47 * We use an xadd covering *both* parts of the lock, to increment the tail and 48 * also load the position of the head, which takes care of memory ordering 49 * issues and should be optimal for the uncontended case. Note the tail must be 50 * in the high part, because a wide xadd increment of the low part would carry 51 * up and contaminate the high part. 52 * 53 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to 54 * save some instructions and make the code more elegant. There really isn't 55 * much between them in performance though, especially as locks are out of line. 56 */ 57 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 58 { 59 register struct __raw_tickets inc = { .tail = 1 }; 60 61 inc = xadd(&lock->tickets, inc); 62 63 for (;;) { 64 if (inc.head == inc.tail) 65 break; 66 cpu_relax(); 67 inc.head = ACCESS_ONCE(lock->tickets.head); 68 } 69 barrier(); /* make sure nothing creeps before the lock is taken */ 70 } 71 72 #if (NR_CPUS < 256) 73 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 74 { 75 unsigned int tmp, new; 76 77 asm volatile("movzwl %2, %0\n\t" 78 "cmpb %h0,%b0\n\t" 79 "leal 0x100(%" REG_PTR_MODE "0), %1\n\t" 80 "jne 1f\n\t" 81 LOCK_PREFIX "cmpxchgw %w1,%2\n\t" 82 "1:" 83 "sete %b1\n\t" 84 "movzbl %b1,%0\n\t" 85 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) 86 : 87 : "memory", "cc"); 88 89 return tmp; 90 } 91 92 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 93 { 94 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 95 : "+m" (lock->slock) 96 : 97 : "memory", "cc"); 98 } 99 #else 100 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 101 { 102 unsigned tmp; 103 unsigned new; 104 105 asm volatile("movl %2,%0\n\t" 106 "movl %0,%1\n\t" 107 "roll $16, %0\n\t" 108 "cmpl %0,%1\n\t" 109 "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t" 110 "jne 1f\n\t" 111 LOCK_PREFIX "cmpxchgl %1,%2\n\t" 112 "1:" 113 "sete %b1\n\t" 114 "movzbl %b1,%0\n\t" 115 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) 116 : 117 : "memory", "cc"); 118 119 return tmp; 120 } 121 122 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 123 { 124 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 125 : "+m" (lock->slock) 126 : 127 : "memory", "cc"); 128 } 129 #endif 130 131 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 132 { 133 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 134 135 return !!(tmp.tail ^ tmp.head); 136 } 137 138 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 139 { 140 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 141 142 return ((tmp.tail - tmp.head) & TICKET_MASK) > 1; 143 } 144 145 #ifndef CONFIG_PARAVIRT_SPINLOCKS 146 147 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 148 { 149 return __ticket_spin_is_locked(lock); 150 } 151 152 static inline int arch_spin_is_contended(arch_spinlock_t *lock) 153 { 154 return __ticket_spin_is_contended(lock); 155 } 156 #define arch_spin_is_contended arch_spin_is_contended 157 158 static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 159 { 160 __ticket_spin_lock(lock); 161 } 162 163 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 164 { 165 return __ticket_spin_trylock(lock); 166 } 167 168 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 169 { 170 __ticket_spin_unlock(lock); 171 } 172 173 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 174 unsigned long flags) 175 { 176 arch_spin_lock(lock); 177 } 178 179 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 180 181 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 182 { 183 while (arch_spin_is_locked(lock)) 184 cpu_relax(); 185 } 186 187 /* 188 * Read-write spinlocks, allowing multiple readers 189 * but only one writer. 190 * 191 * NOTE! it is quite common to have readers in interrupts 192 * but no interrupt writers. For those circumstances we 193 * can "mix" irq-safe locks - any writer needs to get a 194 * irq-safe write-lock, but readers can get non-irqsafe 195 * read-locks. 196 * 197 * On x86, we implement read-write locks as a 32-bit counter 198 * with the high bit (sign) being the "contended" bit. 199 */ 200 201 /** 202 * read_can_lock - would read_trylock() succeed? 203 * @lock: the rwlock in question. 204 */ 205 static inline int arch_read_can_lock(arch_rwlock_t *lock) 206 { 207 return lock->lock > 0; 208 } 209 210 /** 211 * write_can_lock - would write_trylock() succeed? 212 * @lock: the rwlock in question. 213 */ 214 static inline int arch_write_can_lock(arch_rwlock_t *lock) 215 { 216 return lock->write == WRITE_LOCK_CMP; 217 } 218 219 static inline void arch_read_lock(arch_rwlock_t *rw) 220 { 221 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" 222 "jns 1f\n" 223 "call __read_lock_failed\n\t" 224 "1:\n" 225 ::LOCK_PTR_REG (rw) : "memory"); 226 } 227 228 static inline void arch_write_lock(arch_rwlock_t *rw) 229 { 230 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" 231 "jz 1f\n" 232 "call __write_lock_failed\n\t" 233 "1:\n" 234 ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) 235 : "memory"); 236 } 237 238 static inline int arch_read_trylock(arch_rwlock_t *lock) 239 { 240 READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; 241 242 if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) 243 return 1; 244 READ_LOCK_ATOMIC(inc)(count); 245 return 0; 246 } 247 248 static inline int arch_write_trylock(arch_rwlock_t *lock) 249 { 250 atomic_t *count = (atomic_t *)&lock->write; 251 252 if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) 253 return 1; 254 atomic_add(WRITE_LOCK_CMP, count); 255 return 0; 256 } 257 258 static inline void arch_read_unlock(arch_rwlock_t *rw) 259 { 260 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" 261 :"+m" (rw->lock) : : "memory"); 262 } 263 264 static inline void arch_write_unlock(arch_rwlock_t *rw) 265 { 266 asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" 267 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); 268 } 269 270 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 271 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 272 273 #undef READ_LOCK_SIZE 274 #undef READ_LOCK_ATOMIC 275 #undef WRITE_LOCK_ADD 276 #undef WRITE_LOCK_SUB 277 #undef WRITE_LOCK_CMP 278 279 #define arch_spin_relax(lock) cpu_relax() 280 #define arch_read_relax(lock) cpu_relax() 281 #define arch_write_relax(lock) cpu_relax() 282 283 /* The {read|write|spin}_lock() on x86 are full memory barriers. */ 284 static inline void smp_mb__after_lock(void) { } 285 #define ARCH_HAS_SMP_MB_AFTER_LOCK 286 287 #endif /* _ASM_X86_SPINLOCK_H */ 288