1 #ifndef _ASM_X86_SPINLOCK_H 2 #define _ASM_X86_SPINLOCK_H 3 4 #include <linux/jump_label.h> 5 #include <linux/atomic.h> 6 #include <asm/page.h> 7 #include <asm/processor.h> 8 #include <linux/compiler.h> 9 #include <asm/paravirt.h> 10 #include <asm/bitops.h> 11 12 /* 13 * Your basic SMP spinlocks, allowing only a single CPU anywhere 14 * 15 * Simple spin lock operations. There are two variants, one clears IRQ's 16 * on the local processor, one does not. 17 * 18 * These are fair FIFO ticket locks, which support up to 2^16 CPUs. 19 * 20 * (the type definitions are in asm/spinlock_types.h) 21 */ 22 23 #ifdef CONFIG_X86_32 24 # define LOCK_PTR_REG "a" 25 #else 26 # define LOCK_PTR_REG "D" 27 #endif 28 29 #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE)) 30 /* 31 * On PPro SMP, we use a locked operation to unlock 32 * (PPro errata 66, 92) 33 */ 34 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX 35 #else 36 # define UNLOCK_LOCK_PREFIX 37 #endif 38 39 /* How long a lock should spin before we consider blocking */ 40 #define SPIN_THRESHOLD (1 << 15) 41 42 extern struct static_key paravirt_ticketlocks_enabled; 43 static __always_inline bool static_key_false(struct static_key *key); 44 45 #ifdef CONFIG_PARAVIRT_SPINLOCKS 46 47 static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) 48 { 49 set_bit(0, (volatile unsigned long *)&lock->tickets.tail); 50 } 51 52 #else /* !CONFIG_PARAVIRT_SPINLOCKS */ 53 static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, 54 __ticket_t ticket) 55 { 56 } 57 static inline void __ticket_unlock_kick(arch_spinlock_t *lock, 58 __ticket_t ticket) 59 { 60 } 61 62 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 63 64 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 65 { 66 return lock.tickets.head == lock.tickets.tail; 67 } 68 69 /* 70 * Ticket locks are conceptually two parts, one indicating the current head of 71 * the queue, and the other indicating the current tail. The lock is acquired 72 * by atomically noting the tail and incrementing it by one (thus adding 73 * ourself to the queue and noting our position), then waiting until the head 74 * becomes equal to the the initial value of the tail. 75 * 76 * We use an xadd covering *both* parts of the lock, to increment the tail and 77 * also load the position of the head, which takes care of memory ordering 78 * issues and should be optimal for the uncontended case. Note the tail must be 79 * in the high part, because a wide xadd increment of the low part would carry 80 * up and contaminate the high part. 81 */ 82 static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 83 { 84 register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC }; 85 86 inc = xadd(&lock->tickets, inc); 87 if (likely(inc.head == inc.tail)) 88 goto out; 89 90 inc.tail &= ~TICKET_SLOWPATH_FLAG; 91 for (;;) { 92 unsigned count = SPIN_THRESHOLD; 93 94 do { 95 if (ACCESS_ONCE(lock->tickets.head) == inc.tail) 96 goto out; 97 cpu_relax(); 98 } while (--count); 99 __ticket_lock_spinning(lock, inc.tail); 100 } 101 out: barrier(); /* make sure nothing creeps before the lock is taken */ 102 } 103 104 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 105 { 106 arch_spinlock_t old, new; 107 108 old.tickets = ACCESS_ONCE(lock->tickets); 109 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG)) 110 return 0; 111 112 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); 113 114 /* cmpxchg is a full barrier, so nothing can move before it */ 115 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 116 } 117 118 static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock, 119 arch_spinlock_t old) 120 { 121 arch_spinlock_t new; 122 123 BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); 124 125 /* Perform the unlock on the "before" copy */ 126 old.tickets.head += TICKET_LOCK_INC; 127 128 /* Clear the slowpath flag */ 129 new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT); 130 131 /* 132 * If the lock is uncontended, clear the flag - use cmpxchg in 133 * case it changes behind our back though. 134 */ 135 if (new.tickets.head != new.tickets.tail || 136 cmpxchg(&lock->head_tail, old.head_tail, 137 new.head_tail) != old.head_tail) { 138 /* 139 * Lock still has someone queued for it, so wake up an 140 * appropriate waiter. 141 */ 142 __ticket_unlock_kick(lock, old.tickets.head); 143 } 144 } 145 146 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 147 { 148 if (TICKET_SLOWPATH_FLAG && 149 static_key_false(¶virt_ticketlocks_enabled)) { 150 arch_spinlock_t prev; 151 152 prev = *lock; 153 add_smp(&lock->tickets.head, TICKET_LOCK_INC); 154 155 /* add_smp() is a full mb() */ 156 157 if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) 158 __ticket_unlock_slowpath(lock, prev); 159 } else 160 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); 161 } 162 163 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 164 { 165 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 166 167 return tmp.tail != tmp.head; 168 } 169 170 static inline int arch_spin_is_contended(arch_spinlock_t *lock) 171 { 172 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 173 174 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; 175 } 176 #define arch_spin_is_contended arch_spin_is_contended 177 178 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 179 unsigned long flags) 180 { 181 arch_spin_lock(lock); 182 } 183 184 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 185 { 186 while (arch_spin_is_locked(lock)) 187 cpu_relax(); 188 } 189 190 /* 191 * Read-write spinlocks, allowing multiple readers 192 * but only one writer. 193 * 194 * NOTE! it is quite common to have readers in interrupts 195 * but no interrupt writers. For those circumstances we 196 * can "mix" irq-safe locks - any writer needs to get a 197 * irq-safe write-lock, but readers can get non-irqsafe 198 * read-locks. 199 * 200 * On x86, we implement read-write locks as a 32-bit counter 201 * with the high bit (sign) being the "contended" bit. 202 */ 203 204 /** 205 * read_can_lock - would read_trylock() succeed? 206 * @lock: the rwlock in question. 207 */ 208 static inline int arch_read_can_lock(arch_rwlock_t *lock) 209 { 210 return lock->lock > 0; 211 } 212 213 /** 214 * write_can_lock - would write_trylock() succeed? 215 * @lock: the rwlock in question. 216 */ 217 static inline int arch_write_can_lock(arch_rwlock_t *lock) 218 { 219 return lock->write == WRITE_LOCK_CMP; 220 } 221 222 static inline void arch_read_lock(arch_rwlock_t *rw) 223 { 224 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" 225 "jns 1f\n" 226 "call __read_lock_failed\n\t" 227 "1:\n" 228 ::LOCK_PTR_REG (rw) : "memory"); 229 } 230 231 static inline void arch_write_lock(arch_rwlock_t *rw) 232 { 233 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" 234 "jz 1f\n" 235 "call __write_lock_failed\n\t" 236 "1:\n" 237 ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) 238 : "memory"); 239 } 240 241 static inline int arch_read_trylock(arch_rwlock_t *lock) 242 { 243 READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; 244 245 if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) 246 return 1; 247 READ_LOCK_ATOMIC(inc)(count); 248 return 0; 249 } 250 251 static inline int arch_write_trylock(arch_rwlock_t *lock) 252 { 253 atomic_t *count = (atomic_t *)&lock->write; 254 255 if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) 256 return 1; 257 atomic_add(WRITE_LOCK_CMP, count); 258 return 0; 259 } 260 261 static inline void arch_read_unlock(arch_rwlock_t *rw) 262 { 263 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" 264 :"+m" (rw->lock) : : "memory"); 265 } 266 267 static inline void arch_write_unlock(arch_rwlock_t *rw) 268 { 269 asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" 270 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); 271 } 272 273 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 274 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 275 276 #undef READ_LOCK_SIZE 277 #undef READ_LOCK_ATOMIC 278 #undef WRITE_LOCK_ADD 279 #undef WRITE_LOCK_SUB 280 #undef WRITE_LOCK_CMP 281 282 #define arch_spin_relax(lock) cpu_relax() 283 #define arch_read_relax(lock) cpu_relax() 284 #define arch_write_relax(lock) cpu_relax() 285 286 #endif /* _ASM_X86_SPINLOCK_H */ 287