1 #ifndef _ASM_X86_SPINLOCK_H 2 #define _ASM_X86_SPINLOCK_H 3 4 #include <linux/atomic.h> 5 #include <asm/page.h> 6 #include <asm/processor.h> 7 #include <linux/compiler.h> 8 #include <asm/paravirt.h> 9 /* 10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 11 * 12 * Simple spin lock operations. There are two variants, one clears IRQ's 13 * on the local processor, one does not. 14 * 15 * These are fair FIFO ticket locks, which support up to 2^16 CPUs. 16 * 17 * (the type definitions are in asm/spinlock_types.h) 18 */ 19 20 #ifdef CONFIG_X86_32 21 # define LOCK_PTR_REG "a" 22 #else 23 # define LOCK_PTR_REG "D" 24 #endif 25 26 #if defined(CONFIG_X86_32) && \ 27 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) 28 /* 29 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock 30 * (PPro errata 66, 92) 31 */ 32 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX 33 #else 34 # define UNLOCK_LOCK_PREFIX 35 #endif 36 37 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 38 { 39 return lock.tickets.head == lock.tickets.tail; 40 } 41 42 /* 43 * Ticket locks are conceptually two parts, one indicating the current head of 44 * the queue, and the other indicating the current tail. The lock is acquired 45 * by atomically noting the tail and incrementing it by one (thus adding 46 * ourself to the queue and noting our position), then waiting until the head 47 * becomes equal to the the initial value of the tail. 48 * 49 * We use an xadd covering *both* parts of the lock, to increment the tail and 50 * also load the position of the head, which takes care of memory ordering 51 * issues and should be optimal for the uncontended case. Note the tail must be 52 * in the high part, because a wide xadd increment of the low part would carry 53 * up and contaminate the high part. 54 */ 55 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 56 { 57 register struct __raw_tickets inc = { .tail = 1 }; 58 59 inc = xadd(&lock->tickets, inc); 60 61 for (;;) { 62 if (inc.head == inc.tail) 63 break; 64 cpu_relax(); 65 inc.head = ACCESS_ONCE(lock->tickets.head); 66 } 67 barrier(); /* make sure nothing creeps before the lock is taken */ 68 } 69 70 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 71 { 72 arch_spinlock_t old, new; 73 74 old.tickets = ACCESS_ONCE(lock->tickets); 75 if (old.tickets.head != old.tickets.tail) 76 return 0; 77 78 new.head_tail = old.head_tail + (1 << TICKET_SHIFT); 79 80 /* cmpxchg is a full barrier, so nothing can move before it */ 81 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 82 } 83 84 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 85 { 86 __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); 87 } 88 89 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 90 { 91 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 92 93 return tmp.tail != tmp.head; 94 } 95 96 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 97 { 98 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 99 100 return (__ticket_t)(tmp.tail - tmp.head) > 1; 101 } 102 103 #ifndef CONFIG_PARAVIRT_SPINLOCKS 104 105 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 106 { 107 return __ticket_spin_is_locked(lock); 108 } 109 110 static inline int arch_spin_is_contended(arch_spinlock_t *lock) 111 { 112 return __ticket_spin_is_contended(lock); 113 } 114 #define arch_spin_is_contended arch_spin_is_contended 115 116 static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 117 { 118 __ticket_spin_lock(lock); 119 } 120 121 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 122 { 123 return __ticket_spin_trylock(lock); 124 } 125 126 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 127 { 128 __ticket_spin_unlock(lock); 129 } 130 131 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 132 unsigned long flags) 133 { 134 arch_spin_lock(lock); 135 } 136 137 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 138 139 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 140 { 141 while (arch_spin_is_locked(lock)) 142 cpu_relax(); 143 } 144 145 /* 146 * Read-write spinlocks, allowing multiple readers 147 * but only one writer. 148 * 149 * NOTE! it is quite common to have readers in interrupts 150 * but no interrupt writers. For those circumstances we 151 * can "mix" irq-safe locks - any writer needs to get a 152 * irq-safe write-lock, but readers can get non-irqsafe 153 * read-locks. 154 * 155 * On x86, we implement read-write locks as a 32-bit counter 156 * with the high bit (sign) being the "contended" bit. 157 */ 158 159 /** 160 * read_can_lock - would read_trylock() succeed? 161 * @lock: the rwlock in question. 162 */ 163 static inline int arch_read_can_lock(arch_rwlock_t *lock) 164 { 165 return lock->lock > 0; 166 } 167 168 /** 169 * write_can_lock - would write_trylock() succeed? 170 * @lock: the rwlock in question. 171 */ 172 static inline int arch_write_can_lock(arch_rwlock_t *lock) 173 { 174 return lock->write == WRITE_LOCK_CMP; 175 } 176 177 static inline void arch_read_lock(arch_rwlock_t *rw) 178 { 179 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" 180 "jns 1f\n" 181 "call __read_lock_failed\n\t" 182 "1:\n" 183 ::LOCK_PTR_REG (rw) : "memory"); 184 } 185 186 static inline void arch_write_lock(arch_rwlock_t *rw) 187 { 188 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" 189 "jz 1f\n" 190 "call __write_lock_failed\n\t" 191 "1:\n" 192 ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) 193 : "memory"); 194 } 195 196 static inline int arch_read_trylock(arch_rwlock_t *lock) 197 { 198 READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; 199 200 if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) 201 return 1; 202 READ_LOCK_ATOMIC(inc)(count); 203 return 0; 204 } 205 206 static inline int arch_write_trylock(arch_rwlock_t *lock) 207 { 208 atomic_t *count = (atomic_t *)&lock->write; 209 210 if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) 211 return 1; 212 atomic_add(WRITE_LOCK_CMP, count); 213 return 0; 214 } 215 216 static inline void arch_read_unlock(arch_rwlock_t *rw) 217 { 218 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" 219 :"+m" (rw->lock) : : "memory"); 220 } 221 222 static inline void arch_write_unlock(arch_rwlock_t *rw) 223 { 224 asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" 225 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); 226 } 227 228 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 229 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 230 231 #undef READ_LOCK_SIZE 232 #undef READ_LOCK_ATOMIC 233 #undef WRITE_LOCK_ADD 234 #undef WRITE_LOCK_SUB 235 #undef WRITE_LOCK_CMP 236 237 #define arch_spin_relax(lock) cpu_relax() 238 #define arch_read_relax(lock) cpu_relax() 239 #define arch_write_relax(lock) cpu_relax() 240 241 #endif /* _ASM_X86_SPINLOCK_H */ 242