1 #ifndef __ASM_SPINLOCK_H 2 #define __ASM_SPINLOCK_H 3 #ifdef __KERNEL__ 4 5 /* 6 * Simple spin lock operations. 7 * 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 11 * Rework to support virtual processors 12 * 13 * Type of int is used as a full 64b word is not necessary. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * (the type definitions are in asm/spinlock_types.h) 21 */ 22 #include <linux/irqflags.h> 23 #ifdef CONFIG_PPC64 24 #include <asm/paca.h> 25 #include <asm/hvcall.h> 26 #endif 27 #include <asm/asm-compat.h> 28 #include <asm/synch.h> 29 #include <asm/ppc-opcode.h> 30 31 #ifdef CONFIG_PPC64 32 /* use 0x800000yy when locked, where yy == CPU number */ 33 #ifdef __BIG_ENDIAN__ 34 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 35 #else 36 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) 37 #endif 38 #else 39 #define LOCK_TOKEN 1 40 #endif 41 42 #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) 43 #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) 44 #define SYNC_IO do { \ 45 if (unlikely(get_paca()->io_sync)) { \ 46 mb(); \ 47 get_paca()->io_sync = 0; \ 48 } \ 49 } while (0) 50 #else 51 #define CLEAR_IO_SYNC 52 #define SYNC_IO 53 #endif 54 55 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 56 { 57 return lock.slock == 0; 58 } 59 60 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 61 { 62 return !arch_spin_value_unlocked(*lock); 63 } 64 65 /* 66 * This returns the old value in the lock, so we succeeded 67 * in getting the lock if the return value is 0. 68 */ 69 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 70 { 71 unsigned long tmp, token; 72 73 token = LOCK_TOKEN; 74 __asm__ __volatile__( 75 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 76 cmpwi 0,%0,0\n\ 77 bne- 2f\n\ 78 stwcx. %1,0,%2\n\ 79 bne- 1b\n" 80 PPC_ACQUIRE_BARRIER 81 "2:" 82 : "=&r" (tmp) 83 : "r" (token), "r" (&lock->slock) 84 : "cr0", "memory"); 85 86 return tmp; 87 } 88 89 static inline int arch_spin_trylock(arch_spinlock_t *lock) 90 { 91 CLEAR_IO_SYNC; 92 return __arch_spin_trylock(lock) == 0; 93 } 94 95 /* 96 * On a system with shared processors (that is, where a physical 97 * processor is multiplexed between several virtual processors), 98 * there is no point spinning on a lock if the holder of the lock 99 * isn't currently scheduled on a physical processor. Instead 100 * we detect this situation and ask the hypervisor to give the 101 * rest of our timeslice to the lock holder. 102 * 103 * So that we can tell which virtual processor is holding a lock, 104 * we put 0x80000000 | smp_processor_id() in the lock when it is 105 * held. Conveniently, we have a word in the paca that holds this 106 * value. 107 */ 108 109 #if defined(CONFIG_PPC_SPLPAR) 110 /* We only yield to the hypervisor if we are in shared processor mode */ 111 #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) 112 extern void __spin_yield(arch_spinlock_t *lock); 113 extern void __rw_yield(arch_rwlock_t *lock); 114 #else /* SPLPAR */ 115 #define __spin_yield(x) barrier() 116 #define __rw_yield(x) barrier() 117 #define SHARED_PROCESSOR 0 118 #endif 119 120 static inline void arch_spin_lock(arch_spinlock_t *lock) 121 { 122 CLEAR_IO_SYNC; 123 while (1) { 124 if (likely(__arch_spin_trylock(lock) == 0)) 125 break; 126 do { 127 HMT_low(); 128 if (SHARED_PROCESSOR) 129 __spin_yield(lock); 130 } while (unlikely(lock->slock != 0)); 131 HMT_medium(); 132 } 133 } 134 135 static inline 136 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 137 { 138 unsigned long flags_dis; 139 140 CLEAR_IO_SYNC; 141 while (1) { 142 if (likely(__arch_spin_trylock(lock) == 0)) 143 break; 144 local_save_flags(flags_dis); 145 local_irq_restore(flags); 146 do { 147 HMT_low(); 148 if (SHARED_PROCESSOR) 149 __spin_yield(lock); 150 } while (unlikely(lock->slock != 0)); 151 HMT_medium(); 152 local_irq_restore(flags_dis); 153 } 154 } 155 156 static inline void arch_spin_unlock(arch_spinlock_t *lock) 157 { 158 SYNC_IO; 159 __asm__ __volatile__("# arch_spin_unlock\n\t" 160 PPC_RELEASE_BARRIER: : :"memory"); 161 lock->slock = 0; 162 } 163 164 #ifdef CONFIG_PPC64 165 extern void arch_spin_unlock_wait(arch_spinlock_t *lock); 166 #else 167 #define arch_spin_unlock_wait(lock) \ 168 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 169 #endif 170 171 /* 172 * Read-write spinlocks, allowing multiple readers 173 * but only one writer. 174 * 175 * NOTE! it is quite common to have readers in interrupts 176 * but no interrupt writers. For those circumstances we 177 * can "mix" irq-safe locks - any writer needs to get a 178 * irq-safe write-lock, but readers can get non-irqsafe 179 * read-locks. 180 */ 181 182 #define arch_read_can_lock(rw) ((rw)->lock >= 0) 183 #define arch_write_can_lock(rw) (!(rw)->lock) 184 185 #ifdef CONFIG_PPC64 186 #define __DO_SIGN_EXTEND "extsw %0,%0\n" 187 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 188 #else 189 #define __DO_SIGN_EXTEND 190 #define WRLOCK_TOKEN (-1) 191 #endif 192 193 /* 194 * This returns the old value in the lock + 1, 195 * so we got a read lock if the return value is > 0. 196 */ 197 static inline long __arch_read_trylock(arch_rwlock_t *rw) 198 { 199 long tmp; 200 201 __asm__ __volatile__( 202 "1: " PPC_LWARX(%0,0,%1,1) "\n" 203 __DO_SIGN_EXTEND 204 " addic. %0,%0,1\n\ 205 ble- 2f\n" 206 PPC405_ERR77(0,%1) 207 " stwcx. %0,0,%1\n\ 208 bne- 1b\n" 209 PPC_ACQUIRE_BARRIER 210 "2:" : "=&r" (tmp) 211 : "r" (&rw->lock) 212 : "cr0", "xer", "memory"); 213 214 return tmp; 215 } 216 217 /* 218 * This returns the old value in the lock, 219 * so we got the write lock if the return value is 0. 220 */ 221 static inline long __arch_write_trylock(arch_rwlock_t *rw) 222 { 223 long tmp, token; 224 225 token = WRLOCK_TOKEN; 226 __asm__ __volatile__( 227 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 228 cmpwi 0,%0,0\n\ 229 bne- 2f\n" 230 PPC405_ERR77(0,%1) 231 " stwcx. %1,0,%2\n\ 232 bne- 1b\n" 233 PPC_ACQUIRE_BARRIER 234 "2:" : "=&r" (tmp) 235 : "r" (token), "r" (&rw->lock) 236 : "cr0", "memory"); 237 238 return tmp; 239 } 240 241 static inline void arch_read_lock(arch_rwlock_t *rw) 242 { 243 while (1) { 244 if (likely(__arch_read_trylock(rw) > 0)) 245 break; 246 do { 247 HMT_low(); 248 if (SHARED_PROCESSOR) 249 __rw_yield(rw); 250 } while (unlikely(rw->lock < 0)); 251 HMT_medium(); 252 } 253 } 254 255 static inline void arch_write_lock(arch_rwlock_t *rw) 256 { 257 while (1) { 258 if (likely(__arch_write_trylock(rw) == 0)) 259 break; 260 do { 261 HMT_low(); 262 if (SHARED_PROCESSOR) 263 __rw_yield(rw); 264 } while (unlikely(rw->lock != 0)); 265 HMT_medium(); 266 } 267 } 268 269 static inline int arch_read_trylock(arch_rwlock_t *rw) 270 { 271 return __arch_read_trylock(rw) > 0; 272 } 273 274 static inline int arch_write_trylock(arch_rwlock_t *rw) 275 { 276 return __arch_write_trylock(rw) == 0; 277 } 278 279 static inline void arch_read_unlock(arch_rwlock_t *rw) 280 { 281 long tmp; 282 283 __asm__ __volatile__( 284 "# read_unlock\n\t" 285 PPC_RELEASE_BARRIER 286 "1: lwarx %0,0,%1\n\ 287 addic %0,%0,-1\n" 288 PPC405_ERR77(0,%1) 289 " stwcx. %0,0,%1\n\ 290 bne- 1b" 291 : "=&r"(tmp) 292 : "r"(&rw->lock) 293 : "cr0", "xer", "memory"); 294 } 295 296 static inline void arch_write_unlock(arch_rwlock_t *rw) 297 { 298 __asm__ __volatile__("# write_unlock\n\t" 299 PPC_RELEASE_BARRIER: : :"memory"); 300 rw->lock = 0; 301 } 302 303 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 304 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 305 306 #define arch_spin_relax(lock) __spin_yield(lock) 307 #define arch_read_relax(lock) __rw_yield(lock) 308 #define arch_write_relax(lock) __rw_yield(lock) 309 310 #endif /* __KERNEL__ */ 311 #endif /* __ASM_SPINLOCK_H */ 312