1 #ifndef _ASM_X86_SPINLOCK_H 2 #define _ASM_X86_SPINLOCK_H 3 4 #include <linux/jump_label.h> 5 #include <linux/atomic.h> 6 #include <asm/page.h> 7 #include <asm/processor.h> 8 #include <linux/compiler.h> 9 #include <asm/paravirt.h> 10 #include <asm/bitops.h> 11 12 /* 13 * Your basic SMP spinlocks, allowing only a single CPU anywhere 14 * 15 * Simple spin lock operations. There are two variants, one clears IRQ's 16 * on the local processor, one does not. 17 * 18 * These are fair FIFO ticket locks, which support up to 2^16 CPUs. 19 * 20 * (the type definitions are in asm/spinlock_types.h) 21 */ 22 23 #ifdef CONFIG_X86_32 24 # define LOCK_PTR_REG "a" 25 #else 26 # define LOCK_PTR_REG "D" 27 #endif 28 29 #if defined(CONFIG_X86_32) && \ 30 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) 31 /* 32 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock 33 * (PPro errata 66, 92) 34 */ 35 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX 36 #else 37 # define UNLOCK_LOCK_PREFIX 38 #endif 39 40 /* How long a lock should spin before we consider blocking */ 41 #define SPIN_THRESHOLD (1 << 15) 42 43 extern struct static_key paravirt_ticketlocks_enabled; 44 static __always_inline bool static_key_false(struct static_key *key); 45 46 #ifdef CONFIG_PARAVIRT_SPINLOCKS 47 48 static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) 49 { 50 set_bit(0, (volatile unsigned long *)&lock->tickets.tail); 51 } 52 53 #else /* !CONFIG_PARAVIRT_SPINLOCKS */ 54 static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, 55 __ticket_t ticket) 56 { 57 } 58 static inline void __ticket_unlock_kick(arch_spinlock_t *lock, 59 __ticket_t ticket) 60 { 61 } 62 63 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 64 65 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 66 { 67 return lock.tickets.head == lock.tickets.tail; 68 } 69 70 /* 71 * Ticket locks are conceptually two parts, one indicating the current head of 72 * the queue, and the other indicating the current tail. The lock is acquired 73 * by atomically noting the tail and incrementing it by one (thus adding 74 * ourself to the queue and noting our position), then waiting until the head 75 * becomes equal to the the initial value of the tail. 76 * 77 * We use an xadd covering *both* parts of the lock, to increment the tail and 78 * also load the position of the head, which takes care of memory ordering 79 * issues and should be optimal for the uncontended case. Note the tail must be 80 * in the high part, because a wide xadd increment of the low part would carry 81 * up and contaminate the high part. 82 */ 83 static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 84 { 85 register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC }; 86 87 inc = xadd(&lock->tickets, inc); 88 if (likely(inc.head == inc.tail)) 89 goto out; 90 91 inc.tail &= ~TICKET_SLOWPATH_FLAG; 92 for (;;) { 93 unsigned count = SPIN_THRESHOLD; 94 95 do { 96 if (ACCESS_ONCE(lock->tickets.head) == inc.tail) 97 goto out; 98 cpu_relax(); 99 } while (--count); 100 __ticket_lock_spinning(lock, inc.tail); 101 } 102 out: barrier(); /* make sure nothing creeps before the lock is taken */ 103 } 104 105 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 106 { 107 arch_spinlock_t old, new; 108 109 old.tickets = ACCESS_ONCE(lock->tickets); 110 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG)) 111 return 0; 112 113 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); 114 115 /* cmpxchg is a full barrier, so nothing can move before it */ 116 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 117 } 118 119 static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock, 120 arch_spinlock_t old) 121 { 122 arch_spinlock_t new; 123 124 BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); 125 126 /* Perform the unlock on the "before" copy */ 127 old.tickets.head += TICKET_LOCK_INC; 128 129 /* Clear the slowpath flag */ 130 new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT); 131 132 /* 133 * If the lock is uncontended, clear the flag - use cmpxchg in 134 * case it changes behind our back though. 135 */ 136 if (new.tickets.head != new.tickets.tail || 137 cmpxchg(&lock->head_tail, old.head_tail, 138 new.head_tail) != old.head_tail) { 139 /* 140 * Lock still has someone queued for it, so wake up an 141 * appropriate waiter. 142 */ 143 __ticket_unlock_kick(lock, old.tickets.head); 144 } 145 } 146 147 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 148 { 149 if (TICKET_SLOWPATH_FLAG && 150 static_key_false(¶virt_ticketlocks_enabled)) { 151 arch_spinlock_t prev; 152 153 prev = *lock; 154 add_smp(&lock->tickets.head, TICKET_LOCK_INC); 155 156 /* add_smp() is a full mb() */ 157 158 if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) 159 __ticket_unlock_slowpath(lock, prev); 160 } else 161 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); 162 } 163 164 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 165 { 166 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 167 168 return tmp.tail != tmp.head; 169 } 170 171 static inline int arch_spin_is_contended(arch_spinlock_t *lock) 172 { 173 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 174 175 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; 176 } 177 #define arch_spin_is_contended arch_spin_is_contended 178 179 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 180 unsigned long flags) 181 { 182 arch_spin_lock(lock); 183 } 184 185 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 186 { 187 while (arch_spin_is_locked(lock)) 188 cpu_relax(); 189 } 190 191 /* 192 * Read-write spinlocks, allowing multiple readers 193 * but only one writer. 194 * 195 * NOTE! it is quite common to have readers in interrupts 196 * but no interrupt writers. For those circumstances we 197 * can "mix" irq-safe locks - any writer needs to get a 198 * irq-safe write-lock, but readers can get non-irqsafe 199 * read-locks. 200 * 201 * On x86, we implement read-write locks as a 32-bit counter 202 * with the high bit (sign) being the "contended" bit. 203 */ 204 205 /** 206 * read_can_lock - would read_trylock() succeed? 207 * @lock: the rwlock in question. 208 */ 209 static inline int arch_read_can_lock(arch_rwlock_t *lock) 210 { 211 return lock->lock > 0; 212 } 213 214 /** 215 * write_can_lock - would write_trylock() succeed? 216 * @lock: the rwlock in question. 217 */ 218 static inline int arch_write_can_lock(arch_rwlock_t *lock) 219 { 220 return lock->write == WRITE_LOCK_CMP; 221 } 222 223 static inline void arch_read_lock(arch_rwlock_t *rw) 224 { 225 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" 226 "jns 1f\n" 227 "call __read_lock_failed\n\t" 228 "1:\n" 229 ::LOCK_PTR_REG (rw) : "memory"); 230 } 231 232 static inline void arch_write_lock(arch_rwlock_t *rw) 233 { 234 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" 235 "jz 1f\n" 236 "call __write_lock_failed\n\t" 237 "1:\n" 238 ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) 239 : "memory"); 240 } 241 242 static inline int arch_read_trylock(arch_rwlock_t *lock) 243 { 244 READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; 245 246 if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) 247 return 1; 248 READ_LOCK_ATOMIC(inc)(count); 249 return 0; 250 } 251 252 static inline int arch_write_trylock(arch_rwlock_t *lock) 253 { 254 atomic_t *count = (atomic_t *)&lock->write; 255 256 if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) 257 return 1; 258 atomic_add(WRITE_LOCK_CMP, count); 259 return 0; 260 } 261 262 static inline void arch_read_unlock(arch_rwlock_t *rw) 263 { 264 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" 265 :"+m" (rw->lock) : : "memory"); 266 } 267 268 static inline void arch_write_unlock(arch_rwlock_t *rw) 269 { 270 asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" 271 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); 272 } 273 274 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 275 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 276 277 #undef READ_LOCK_SIZE 278 #undef READ_LOCK_ATOMIC 279 #undef WRITE_LOCK_ADD 280 #undef WRITE_LOCK_SUB 281 #undef WRITE_LOCK_CMP 282 283 #define arch_spin_relax(lock) cpu_relax() 284 #define arch_read_relax(lock) cpu_relax() 285 #define arch_write_relax(lock) cpu_relax() 286 287 #endif /* _ASM_X86_SPINLOCK_H */ 288