1 #ifndef __ASM_SPINLOCK_H 2 #define __ASM_SPINLOCK_H 3 #ifdef __KERNEL__ 4 5 /* 6 * Simple spin lock operations. 7 * 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 11 * Rework to support virtual processors 12 * 13 * Type of int is used as a full 64b word is not necessary. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * (the type definitions are in asm/spinlock_types.h) 21 */ 22 #include <linux/irqflags.h> 23 #ifdef CONFIG_PPC64 24 #include <asm/paca.h> 25 #include <asm/hvcall.h> 26 #endif 27 #include <asm/asm-compat.h> 28 #include <asm/synch.h> 29 #include <asm/ppc-opcode.h> 30 31 #define arch_spin_is_locked(x) ((x)->slock != 0) 32 33 #ifdef CONFIG_PPC64 34 /* use 0x800000yy when locked, where yy == CPU number */ 35 #ifdef __BIG_ENDIAN__ 36 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 37 #else 38 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) 39 #endif 40 #else 41 #define LOCK_TOKEN 1 42 #endif 43 44 #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) 45 #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) 46 #define SYNC_IO do { \ 47 if (unlikely(get_paca()->io_sync)) { \ 48 mb(); \ 49 get_paca()->io_sync = 0; \ 50 } \ 51 } while (0) 52 #else 53 #define CLEAR_IO_SYNC 54 #define SYNC_IO 55 #endif 56 57 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 58 { 59 return lock.slock == 0; 60 } 61 62 /* 63 * This returns the old value in the lock, so we succeeded 64 * in getting the lock if the return value is 0. 65 */ 66 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 67 { 68 unsigned long tmp, token; 69 70 token = LOCK_TOKEN; 71 __asm__ __volatile__( 72 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 73 cmpwi 0,%0,0\n\ 74 bne- 2f\n\ 75 stwcx. %1,0,%2\n\ 76 bne- 1b\n" 77 PPC_ACQUIRE_BARRIER 78 "2:" 79 : "=&r" (tmp) 80 : "r" (token), "r" (&lock->slock) 81 : "cr0", "memory"); 82 83 return tmp; 84 } 85 86 static inline int arch_spin_trylock(arch_spinlock_t *lock) 87 { 88 CLEAR_IO_SYNC; 89 return __arch_spin_trylock(lock) == 0; 90 } 91 92 /* 93 * On a system with shared processors (that is, where a physical 94 * processor is multiplexed between several virtual processors), 95 * there is no point spinning on a lock if the holder of the lock 96 * isn't currently scheduled on a physical processor. Instead 97 * we detect this situation and ask the hypervisor to give the 98 * rest of our timeslice to the lock holder. 99 * 100 * So that we can tell which virtual processor is holding a lock, 101 * we put 0x80000000 | smp_processor_id() in the lock when it is 102 * held. Conveniently, we have a word in the paca that holds this 103 * value. 104 */ 105 106 #if defined(CONFIG_PPC_SPLPAR) 107 /* We only yield to the hypervisor if we are in shared processor mode */ 108 #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) 109 extern void __spin_yield(arch_spinlock_t *lock); 110 extern void __rw_yield(arch_rwlock_t *lock); 111 #else /* SPLPAR */ 112 #define __spin_yield(x) barrier() 113 #define __rw_yield(x) barrier() 114 #define SHARED_PROCESSOR 0 115 #endif 116 117 static inline void arch_spin_lock(arch_spinlock_t *lock) 118 { 119 CLEAR_IO_SYNC; 120 while (1) { 121 if (likely(__arch_spin_trylock(lock) == 0)) 122 break; 123 do { 124 HMT_low(); 125 if (SHARED_PROCESSOR) 126 __spin_yield(lock); 127 } while (unlikely(lock->slock != 0)); 128 HMT_medium(); 129 } 130 } 131 132 static inline 133 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 134 { 135 unsigned long flags_dis; 136 137 CLEAR_IO_SYNC; 138 while (1) { 139 if (likely(__arch_spin_trylock(lock) == 0)) 140 break; 141 local_save_flags(flags_dis); 142 local_irq_restore(flags); 143 do { 144 HMT_low(); 145 if (SHARED_PROCESSOR) 146 __spin_yield(lock); 147 } while (unlikely(lock->slock != 0)); 148 HMT_medium(); 149 local_irq_restore(flags_dis); 150 } 151 } 152 153 static inline void arch_spin_unlock(arch_spinlock_t *lock) 154 { 155 SYNC_IO; 156 __asm__ __volatile__("# arch_spin_unlock\n\t" 157 PPC_RELEASE_BARRIER: : :"memory"); 158 lock->slock = 0; 159 } 160 161 #ifdef CONFIG_PPC64 162 extern void arch_spin_unlock_wait(arch_spinlock_t *lock); 163 #else 164 #define arch_spin_unlock_wait(lock) \ 165 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 166 #endif 167 168 /* 169 * Read-write spinlocks, allowing multiple readers 170 * but only one writer. 171 * 172 * NOTE! it is quite common to have readers in interrupts 173 * but no interrupt writers. For those circumstances we 174 * can "mix" irq-safe locks - any writer needs to get a 175 * irq-safe write-lock, but readers can get non-irqsafe 176 * read-locks. 177 */ 178 179 #define arch_read_can_lock(rw) ((rw)->lock >= 0) 180 #define arch_write_can_lock(rw) (!(rw)->lock) 181 182 #ifdef CONFIG_PPC64 183 #define __DO_SIGN_EXTEND "extsw %0,%0\n" 184 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 185 #else 186 #define __DO_SIGN_EXTEND 187 #define WRLOCK_TOKEN (-1) 188 #endif 189 190 /* 191 * This returns the old value in the lock + 1, 192 * so we got a read lock if the return value is > 0. 193 */ 194 static inline long __arch_read_trylock(arch_rwlock_t *rw) 195 { 196 long tmp; 197 198 __asm__ __volatile__( 199 "1: " PPC_LWARX(%0,0,%1,1) "\n" 200 __DO_SIGN_EXTEND 201 " addic. %0,%0,1\n\ 202 ble- 2f\n" 203 PPC405_ERR77(0,%1) 204 " stwcx. %0,0,%1\n\ 205 bne- 1b\n" 206 PPC_ACQUIRE_BARRIER 207 "2:" : "=&r" (tmp) 208 : "r" (&rw->lock) 209 : "cr0", "xer", "memory"); 210 211 return tmp; 212 } 213 214 /* 215 * This returns the old value in the lock, 216 * so we got the write lock if the return value is 0. 217 */ 218 static inline long __arch_write_trylock(arch_rwlock_t *rw) 219 { 220 long tmp, token; 221 222 token = WRLOCK_TOKEN; 223 __asm__ __volatile__( 224 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 225 cmpwi 0,%0,0\n\ 226 bne- 2f\n" 227 PPC405_ERR77(0,%1) 228 " stwcx. %1,0,%2\n\ 229 bne- 1b\n" 230 PPC_ACQUIRE_BARRIER 231 "2:" : "=&r" (tmp) 232 : "r" (token), "r" (&rw->lock) 233 : "cr0", "memory"); 234 235 return tmp; 236 } 237 238 static inline void arch_read_lock(arch_rwlock_t *rw) 239 { 240 while (1) { 241 if (likely(__arch_read_trylock(rw) > 0)) 242 break; 243 do { 244 HMT_low(); 245 if (SHARED_PROCESSOR) 246 __rw_yield(rw); 247 } while (unlikely(rw->lock < 0)); 248 HMT_medium(); 249 } 250 } 251 252 static inline void arch_write_lock(arch_rwlock_t *rw) 253 { 254 while (1) { 255 if (likely(__arch_write_trylock(rw) == 0)) 256 break; 257 do { 258 HMT_low(); 259 if (SHARED_PROCESSOR) 260 __rw_yield(rw); 261 } while (unlikely(rw->lock != 0)); 262 HMT_medium(); 263 } 264 } 265 266 static inline int arch_read_trylock(arch_rwlock_t *rw) 267 { 268 return __arch_read_trylock(rw) > 0; 269 } 270 271 static inline int arch_write_trylock(arch_rwlock_t *rw) 272 { 273 return __arch_write_trylock(rw) == 0; 274 } 275 276 static inline void arch_read_unlock(arch_rwlock_t *rw) 277 { 278 long tmp; 279 280 __asm__ __volatile__( 281 "# read_unlock\n\t" 282 PPC_RELEASE_BARRIER 283 "1: lwarx %0,0,%1\n\ 284 addic %0,%0,-1\n" 285 PPC405_ERR77(0,%1) 286 " stwcx. %0,0,%1\n\ 287 bne- 1b" 288 : "=&r"(tmp) 289 : "r"(&rw->lock) 290 : "cr0", "xer", "memory"); 291 } 292 293 static inline void arch_write_unlock(arch_rwlock_t *rw) 294 { 295 __asm__ __volatile__("# write_unlock\n\t" 296 PPC_RELEASE_BARRIER: : :"memory"); 297 rw->lock = 0; 298 } 299 300 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 301 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 302 303 #define arch_spin_relax(lock) __spin_yield(lock) 304 #define arch_read_relax(lock) __rw_yield(lock) 305 #define arch_write_relax(lock) __rw_yield(lock) 306 307 #endif /* __KERNEL__ */ 308 #endif /* __ASM_SPINLOCK_H */ 309