1 #ifndef __ASM_SPINLOCK_H 2 #define __ASM_SPINLOCK_H 3 #ifdef __KERNEL__ 4 5 /* 6 * Simple spin lock operations. 7 * 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 11 * Rework to support virtual processors 12 * 13 * Type of int is used as a full 64b word is not necessary. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * (the type definitions are in asm/spinlock_types.h) 21 */ 22 #include <linux/irqflags.h> 23 #ifdef CONFIG_PPC64 24 #include <asm/paca.h> 25 #include <asm/hvcall.h> 26 #endif 27 #include <asm/asm-compat.h> 28 #include <asm/synch.h> 29 #include <asm/ppc-opcode.h> 30 #include <asm/asm-405.h> 31 32 #ifdef CONFIG_PPC64 33 /* use 0x800000yy when locked, where yy == CPU number */ 34 #ifdef __BIG_ENDIAN__ 35 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 36 #else 37 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) 38 #endif 39 #else 40 #define LOCK_TOKEN 1 41 #endif 42 43 #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) 44 #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) 45 #define SYNC_IO do { \ 46 if (unlikely(get_paca()->io_sync)) { \ 47 mb(); \ 48 get_paca()->io_sync = 0; \ 49 } \ 50 } while (0) 51 #else 52 #define CLEAR_IO_SYNC 53 #define SYNC_IO 54 #endif 55 56 #ifdef CONFIG_PPC_PSERIES 57 #define vcpu_is_preempted vcpu_is_preempted 58 static inline bool vcpu_is_preempted(int cpu) 59 { 60 if (!firmware_has_feature(FW_FEATURE_SPLPAR)) 61 return false; 62 return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); 63 } 64 #endif 65 66 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 67 { 68 return lock.slock == 0; 69 } 70 71 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 72 { 73 smp_mb(); 74 return !arch_spin_value_unlocked(*lock); 75 } 76 77 /* 78 * This returns the old value in the lock, so we succeeded 79 * in getting the lock if the return value is 0. 80 */ 81 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 82 { 83 unsigned long tmp, token; 84 85 token = LOCK_TOKEN; 86 __asm__ __volatile__( 87 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 88 cmpwi 0,%0,0\n\ 89 bne- 2f\n\ 90 stwcx. %1,0,%2\n\ 91 bne- 1b\n" 92 PPC_ACQUIRE_BARRIER 93 "2:" 94 : "=&r" (tmp) 95 : "r" (token), "r" (&lock->slock) 96 : "cr0", "memory"); 97 98 return tmp; 99 } 100 101 static inline int arch_spin_trylock(arch_spinlock_t *lock) 102 { 103 CLEAR_IO_SYNC; 104 return __arch_spin_trylock(lock) == 0; 105 } 106 107 /* 108 * On a system with shared processors (that is, where a physical 109 * processor is multiplexed between several virtual processors), 110 * there is no point spinning on a lock if the holder of the lock 111 * isn't currently scheduled on a physical processor. Instead 112 * we detect this situation and ask the hypervisor to give the 113 * rest of our timeslice to the lock holder. 114 * 115 * So that we can tell which virtual processor is holding a lock, 116 * we put 0x80000000 | smp_processor_id() in the lock when it is 117 * held. Conveniently, we have a word in the paca that holds this 118 * value. 119 */ 120 121 #if defined(CONFIG_PPC_SPLPAR) 122 /* We only yield to the hypervisor if we are in shared processor mode */ 123 #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) 124 extern void __spin_yield(arch_spinlock_t *lock); 125 extern void __rw_yield(arch_rwlock_t *lock); 126 #else /* SPLPAR */ 127 #define __spin_yield(x) barrier() 128 #define __rw_yield(x) barrier() 129 #define SHARED_PROCESSOR 0 130 #endif 131 132 static inline void arch_spin_lock(arch_spinlock_t *lock) 133 { 134 CLEAR_IO_SYNC; 135 while (1) { 136 if (likely(__arch_spin_trylock(lock) == 0)) 137 break; 138 do { 139 HMT_low(); 140 if (SHARED_PROCESSOR) 141 __spin_yield(lock); 142 } while (unlikely(lock->slock != 0)); 143 HMT_medium(); 144 } 145 } 146 147 static inline 148 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 149 { 150 unsigned long flags_dis; 151 152 CLEAR_IO_SYNC; 153 while (1) { 154 if (likely(__arch_spin_trylock(lock) == 0)) 155 break; 156 local_save_flags(flags_dis); 157 local_irq_restore(flags); 158 do { 159 HMT_low(); 160 if (SHARED_PROCESSOR) 161 __spin_yield(lock); 162 } while (unlikely(lock->slock != 0)); 163 HMT_medium(); 164 local_irq_restore(flags_dis); 165 } 166 } 167 #define arch_spin_lock_flags arch_spin_lock_flags 168 169 static inline void arch_spin_unlock(arch_spinlock_t *lock) 170 { 171 SYNC_IO; 172 __asm__ __volatile__("# arch_spin_unlock\n\t" 173 PPC_RELEASE_BARRIER: : :"memory"); 174 lock->slock = 0; 175 } 176 177 /* 178 * Read-write spinlocks, allowing multiple readers 179 * but only one writer. 180 * 181 * NOTE! it is quite common to have readers in interrupts 182 * but no interrupt writers. For those circumstances we 183 * can "mix" irq-safe locks - any writer needs to get a 184 * irq-safe write-lock, but readers can get non-irqsafe 185 * read-locks. 186 */ 187 188 #ifdef CONFIG_PPC64 189 #define __DO_SIGN_EXTEND "extsw %0,%0\n" 190 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 191 #else 192 #define __DO_SIGN_EXTEND 193 #define WRLOCK_TOKEN (-1) 194 #endif 195 196 /* 197 * This returns the old value in the lock + 1, 198 * so we got a read lock if the return value is > 0. 199 */ 200 static inline long __arch_read_trylock(arch_rwlock_t *rw) 201 { 202 long tmp; 203 204 __asm__ __volatile__( 205 "1: " PPC_LWARX(%0,0,%1,1) "\n" 206 __DO_SIGN_EXTEND 207 " addic. %0,%0,1\n\ 208 ble- 2f\n" 209 PPC405_ERR77(0,%1) 210 " stwcx. %0,0,%1\n\ 211 bne- 1b\n" 212 PPC_ACQUIRE_BARRIER 213 "2:" : "=&r" (tmp) 214 : "r" (&rw->lock) 215 : "cr0", "xer", "memory"); 216 217 return tmp; 218 } 219 220 /* 221 * This returns the old value in the lock, 222 * so we got the write lock if the return value is 0. 223 */ 224 static inline long __arch_write_trylock(arch_rwlock_t *rw) 225 { 226 long tmp, token; 227 228 token = WRLOCK_TOKEN; 229 __asm__ __volatile__( 230 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 231 cmpwi 0,%0,0\n\ 232 bne- 2f\n" 233 PPC405_ERR77(0,%1) 234 " stwcx. %1,0,%2\n\ 235 bne- 1b\n" 236 PPC_ACQUIRE_BARRIER 237 "2:" : "=&r" (tmp) 238 : "r" (token), "r" (&rw->lock) 239 : "cr0", "memory"); 240 241 return tmp; 242 } 243 244 static inline void arch_read_lock(arch_rwlock_t *rw) 245 { 246 while (1) { 247 if (likely(__arch_read_trylock(rw) > 0)) 248 break; 249 do { 250 HMT_low(); 251 if (SHARED_PROCESSOR) 252 __rw_yield(rw); 253 } while (unlikely(rw->lock < 0)); 254 HMT_medium(); 255 } 256 } 257 258 static inline void arch_write_lock(arch_rwlock_t *rw) 259 { 260 while (1) { 261 if (likely(__arch_write_trylock(rw) == 0)) 262 break; 263 do { 264 HMT_low(); 265 if (SHARED_PROCESSOR) 266 __rw_yield(rw); 267 } while (unlikely(rw->lock != 0)); 268 HMT_medium(); 269 } 270 } 271 272 static inline int arch_read_trylock(arch_rwlock_t *rw) 273 { 274 return __arch_read_trylock(rw) > 0; 275 } 276 277 static inline int arch_write_trylock(arch_rwlock_t *rw) 278 { 279 return __arch_write_trylock(rw) == 0; 280 } 281 282 static inline void arch_read_unlock(arch_rwlock_t *rw) 283 { 284 long tmp; 285 286 __asm__ __volatile__( 287 "# read_unlock\n\t" 288 PPC_RELEASE_BARRIER 289 "1: lwarx %0,0,%1\n\ 290 addic %0,%0,-1\n" 291 PPC405_ERR77(0,%1) 292 " stwcx. %0,0,%1\n\ 293 bne- 1b" 294 : "=&r"(tmp) 295 : "r"(&rw->lock) 296 : "cr0", "xer", "memory"); 297 } 298 299 static inline void arch_write_unlock(arch_rwlock_t *rw) 300 { 301 __asm__ __volatile__("# write_unlock\n\t" 302 PPC_RELEASE_BARRIER: : :"memory"); 303 rw->lock = 0; 304 } 305 306 #define arch_spin_relax(lock) __spin_yield(lock) 307 #define arch_read_relax(lock) __rw_yield(lock) 308 #define arch_write_relax(lock) __rw_yield(lock) 309 310 /* See include/linux/spinlock.h */ 311 #define smp_mb__after_spinlock() smp_mb() 312 313 #endif /* __KERNEL__ */ 314 #endif /* __ASM_SPINLOCK_H */ 315