1 #ifndef __ASM_SPINLOCK_H 2 #define __ASM_SPINLOCK_H 3 #ifdef __KERNEL__ 4 5 /* 6 * Simple spin lock operations. 7 * 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 11 * Rework to support virtual processors 12 * 13 * Type of int is used as a full 64b word is not necessary. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * (the type definitions are in asm/spinlock_types.h) 21 */ 22 #include <linux/irqflags.h> 23 #ifdef CONFIG_PPC64 24 #include <asm/paca.h> 25 #include <asm/hvcall.h> 26 #endif 27 #include <asm/asm-compat.h> 28 #include <asm/synch.h> 29 #include <asm/ppc-opcode.h> 30 31 #ifdef CONFIG_PPC64 32 /* use 0x800000yy when locked, where yy == CPU number */ 33 #ifdef __BIG_ENDIAN__ 34 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 35 #else 36 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) 37 #endif 38 #else 39 #define LOCK_TOKEN 1 40 #endif 41 42 #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) 43 #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) 44 #define SYNC_IO do { \ 45 if (unlikely(get_paca()->io_sync)) { \ 46 mb(); \ 47 get_paca()->io_sync = 0; \ 48 } \ 49 } while (0) 50 #else 51 #define CLEAR_IO_SYNC 52 #define SYNC_IO 53 #endif 54 55 #ifdef CONFIG_PPC_PSERIES 56 #define vcpu_is_preempted vcpu_is_preempted 57 static inline bool vcpu_is_preempted(int cpu) 58 { 59 return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); 60 } 61 #endif 62 63 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 64 { 65 return lock.slock == 0; 66 } 67 68 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 69 { 70 smp_mb(); 71 return !arch_spin_value_unlocked(*lock); 72 } 73 74 /* 75 * This returns the old value in the lock, so we succeeded 76 * in getting the lock if the return value is 0. 77 */ 78 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 79 { 80 unsigned long tmp, token; 81 82 token = LOCK_TOKEN; 83 __asm__ __volatile__( 84 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 85 cmpwi 0,%0,0\n\ 86 bne- 2f\n\ 87 stwcx. %1,0,%2\n\ 88 bne- 1b\n" 89 PPC_ACQUIRE_BARRIER 90 "2:" 91 : "=&r" (tmp) 92 : "r" (token), "r" (&lock->slock) 93 : "cr0", "memory"); 94 95 return tmp; 96 } 97 98 static inline int arch_spin_trylock(arch_spinlock_t *lock) 99 { 100 CLEAR_IO_SYNC; 101 return __arch_spin_trylock(lock) == 0; 102 } 103 104 /* 105 * On a system with shared processors (that is, where a physical 106 * processor is multiplexed between several virtual processors), 107 * there is no point spinning on a lock if the holder of the lock 108 * isn't currently scheduled on a physical processor. Instead 109 * we detect this situation and ask the hypervisor to give the 110 * rest of our timeslice to the lock holder. 111 * 112 * So that we can tell which virtual processor is holding a lock, 113 * we put 0x80000000 | smp_processor_id() in the lock when it is 114 * held. Conveniently, we have a word in the paca that holds this 115 * value. 116 */ 117 118 #if defined(CONFIG_PPC_SPLPAR) 119 /* We only yield to the hypervisor if we are in shared processor mode */ 120 #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) 121 extern void __spin_yield(arch_spinlock_t *lock); 122 extern void __rw_yield(arch_rwlock_t *lock); 123 #else /* SPLPAR */ 124 #define __spin_yield(x) barrier() 125 #define __rw_yield(x) barrier() 126 #define SHARED_PROCESSOR 0 127 #endif 128 129 static inline void arch_spin_lock(arch_spinlock_t *lock) 130 { 131 CLEAR_IO_SYNC; 132 while (1) { 133 if (likely(__arch_spin_trylock(lock) == 0)) 134 break; 135 do { 136 HMT_low(); 137 if (SHARED_PROCESSOR) 138 __spin_yield(lock); 139 } while (unlikely(lock->slock != 0)); 140 HMT_medium(); 141 } 142 } 143 144 static inline 145 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 146 { 147 unsigned long flags_dis; 148 149 CLEAR_IO_SYNC; 150 while (1) { 151 if (likely(__arch_spin_trylock(lock) == 0)) 152 break; 153 local_save_flags(flags_dis); 154 local_irq_restore(flags); 155 do { 156 HMT_low(); 157 if (SHARED_PROCESSOR) 158 __spin_yield(lock); 159 } while (unlikely(lock->slock != 0)); 160 HMT_medium(); 161 local_irq_restore(flags_dis); 162 } 163 } 164 165 static inline void arch_spin_unlock(arch_spinlock_t *lock) 166 { 167 SYNC_IO; 168 __asm__ __volatile__("# arch_spin_unlock\n\t" 169 PPC_RELEASE_BARRIER: : :"memory"); 170 lock->slock = 0; 171 } 172 173 /* 174 * Read-write spinlocks, allowing multiple readers 175 * but only one writer. 176 * 177 * NOTE! it is quite common to have readers in interrupts 178 * but no interrupt writers. For those circumstances we 179 * can "mix" irq-safe locks - any writer needs to get a 180 * irq-safe write-lock, but readers can get non-irqsafe 181 * read-locks. 182 */ 183 184 #ifdef CONFIG_PPC64 185 #define __DO_SIGN_EXTEND "extsw %0,%0\n" 186 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 187 #else 188 #define __DO_SIGN_EXTEND 189 #define WRLOCK_TOKEN (-1) 190 #endif 191 192 /* 193 * This returns the old value in the lock + 1, 194 * so we got a read lock if the return value is > 0. 195 */ 196 static inline long __arch_read_trylock(arch_rwlock_t *rw) 197 { 198 long tmp; 199 200 __asm__ __volatile__( 201 "1: " PPC_LWARX(%0,0,%1,1) "\n" 202 __DO_SIGN_EXTEND 203 " addic. %0,%0,1\n\ 204 ble- 2f\n" 205 PPC405_ERR77(0,%1) 206 " stwcx. %0,0,%1\n\ 207 bne- 1b\n" 208 PPC_ACQUIRE_BARRIER 209 "2:" : "=&r" (tmp) 210 : "r" (&rw->lock) 211 : "cr0", "xer", "memory"); 212 213 return tmp; 214 } 215 216 /* 217 * This returns the old value in the lock, 218 * so we got the write lock if the return value is 0. 219 */ 220 static inline long __arch_write_trylock(arch_rwlock_t *rw) 221 { 222 long tmp, token; 223 224 token = WRLOCK_TOKEN; 225 __asm__ __volatile__( 226 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 227 cmpwi 0,%0,0\n\ 228 bne- 2f\n" 229 PPC405_ERR77(0,%1) 230 " stwcx. %1,0,%2\n\ 231 bne- 1b\n" 232 PPC_ACQUIRE_BARRIER 233 "2:" : "=&r" (tmp) 234 : "r" (token), "r" (&rw->lock) 235 : "cr0", "memory"); 236 237 return tmp; 238 } 239 240 static inline void arch_read_lock(arch_rwlock_t *rw) 241 { 242 while (1) { 243 if (likely(__arch_read_trylock(rw) > 0)) 244 break; 245 do { 246 HMT_low(); 247 if (SHARED_PROCESSOR) 248 __rw_yield(rw); 249 } while (unlikely(rw->lock < 0)); 250 HMT_medium(); 251 } 252 } 253 254 static inline void arch_write_lock(arch_rwlock_t *rw) 255 { 256 while (1) { 257 if (likely(__arch_write_trylock(rw) == 0)) 258 break; 259 do { 260 HMT_low(); 261 if (SHARED_PROCESSOR) 262 __rw_yield(rw); 263 } while (unlikely(rw->lock != 0)); 264 HMT_medium(); 265 } 266 } 267 268 static inline int arch_read_trylock(arch_rwlock_t *rw) 269 { 270 return __arch_read_trylock(rw) > 0; 271 } 272 273 static inline int arch_write_trylock(arch_rwlock_t *rw) 274 { 275 return __arch_write_trylock(rw) == 0; 276 } 277 278 static inline void arch_read_unlock(arch_rwlock_t *rw) 279 { 280 long tmp; 281 282 __asm__ __volatile__( 283 "# read_unlock\n\t" 284 PPC_RELEASE_BARRIER 285 "1: lwarx %0,0,%1\n\ 286 addic %0,%0,-1\n" 287 PPC405_ERR77(0,%1) 288 " stwcx. %0,0,%1\n\ 289 bne- 1b" 290 : "=&r"(tmp) 291 : "r"(&rw->lock) 292 : "cr0", "xer", "memory"); 293 } 294 295 static inline void arch_write_unlock(arch_rwlock_t *rw) 296 { 297 __asm__ __volatile__("# write_unlock\n\t" 298 PPC_RELEASE_BARRIER: : :"memory"); 299 rw->lock = 0; 300 } 301 302 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 303 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 304 305 #define arch_spin_relax(lock) __spin_yield(lock) 306 #define arch_read_relax(lock) __rw_yield(lock) 307 #define arch_write_relax(lock) __rw_yield(lock) 308 309 /* See include/linux/spinlock.h */ 310 #define smp_mb__after_spinlock() smp_mb() 311 312 #endif /* __KERNEL__ */ 313 #endif /* __ASM_SPINLOCK_H */ 314