spinlock.h (fb3a6bbc912b12347614e5742c7c61416cdb0ca0) | spinlock.h (e5931943d02bf751b1ec849c0d2ade23d76a8d41) |
---|---|
1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3#ifdef __KERNEL__ 4 5/* 6 * Simple spin lock operations. 7 * 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM --- 152 unchanged lines hidden (view full) --- 161 * 162 * NOTE! it is quite common to have readers in interrupts 163 * but no interrupt writers. For those circumstances we 164 * can "mix" irq-safe locks - any writer needs to get a 165 * irq-safe write-lock, but readers can get non-irqsafe 166 * read-locks. 167 */ 168 | 1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3#ifdef __KERNEL__ 4 5/* 6 * Simple spin lock operations. 7 * 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM --- 152 unchanged lines hidden (view full) --- 161 * 162 * NOTE! it is quite common to have readers in interrupts 163 * but no interrupt writers. For those circumstances we 164 * can "mix" irq-safe locks - any writer needs to get a 165 * irq-safe write-lock, but readers can get non-irqsafe 166 * read-locks. 167 */ 168 |
169#define __raw_read_can_lock(rw) ((rw)->lock >= 0) 170#define __raw_write_can_lock(rw) (!(rw)->lock) | 169#define arch_read_can_lock(rw) ((rw)->lock >= 0) 170#define arch_write_can_lock(rw) (!(rw)->lock) |
171 172#ifdef CONFIG_PPC64 173#define __DO_SIGN_EXTEND "extsw %0,%0\n" 174#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 175#else 176#define __DO_SIGN_EXTEND 177#define WRLOCK_TOKEN (-1) 178#endif 179 180/* 181 * This returns the old value in the lock + 1, 182 * so we got a read lock if the return value is > 0. 183 */ | 171 172#ifdef CONFIG_PPC64 173#define __DO_SIGN_EXTEND "extsw %0,%0\n" 174#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 175#else 176#define __DO_SIGN_EXTEND 177#define WRLOCK_TOKEN (-1) 178#endif 179 180/* 181 * This returns the old value in the lock + 1, 182 * so we got a read lock if the return value is > 0. 183 */ |
184static inline long arch_read_trylock(arch_rwlock_t *rw) | 184static inline long __arch_read_trylock(arch_rwlock_t *rw) |
185{ 186 long tmp; 187 188 __asm__ __volatile__( 189"1: lwarx %0,0,%1\n" 190 __DO_SIGN_EXTEND 191" addic. %0,%0,1\n\ 192 ble- 2f\n" --- 7 unchanged lines hidden (view full) --- 200 201 return tmp; 202} 203 204/* 205 * This returns the old value in the lock, 206 * so we got the write lock if the return value is 0. 207 */ | 185{ 186 long tmp; 187 188 __asm__ __volatile__( 189"1: lwarx %0,0,%1\n" 190 __DO_SIGN_EXTEND 191" addic. %0,%0,1\n\ 192 ble- 2f\n" --- 7 unchanged lines hidden (view full) --- 200 201 return tmp; 202} 203 204/* 205 * This returns the old value in the lock, 206 * so we got the write lock if the return value is 0. 207 */ |
208static inline long arch_write_trylock(arch_rwlock_t *rw) | 208static inline long __arch_write_trylock(arch_rwlock_t *rw) |
209{ 210 long tmp, token; 211 212 token = WRLOCK_TOKEN; 213 __asm__ __volatile__( 214"1: lwarx %0,0,%2\n\ 215 cmpwi 0,%0,0\n\ 216 bne- 2f\n" 217 PPC405_ERR77(0,%1) 218" stwcx. %1,0,%2\n\ 219 bne- 1b\n\ 220 isync\n\ 2212:" : "=&r" (tmp) 222 : "r" (token), "r" (&rw->lock) 223 : "cr0", "memory"); 224 225 return tmp; 226} 227 | 209{ 210 long tmp, token; 211 212 token = WRLOCK_TOKEN; 213 __asm__ __volatile__( 214"1: lwarx %0,0,%2\n\ 215 cmpwi 0,%0,0\n\ 216 bne- 2f\n" 217 PPC405_ERR77(0,%1) 218" stwcx. %1,0,%2\n\ 219 bne- 1b\n\ 220 isync\n\ 2212:" : "=&r" (tmp) 222 : "r" (token), "r" (&rw->lock) 223 : "cr0", "memory"); 224 225 return tmp; 226} 227 |
228static inline void __raw_read_lock(arch_rwlock_t *rw) | 228static inline void arch_read_lock(arch_rwlock_t *rw) |
229{ 230 while (1) { | 229{ 230 while (1) { |
231 if (likely(arch_read_trylock(rw) > 0)) | 231 if (likely(__arch_read_trylock(rw) > 0)) |
232 break; 233 do { 234 HMT_low(); 235 if (SHARED_PROCESSOR) 236 __rw_yield(rw); 237 } while (unlikely(rw->lock < 0)); 238 HMT_medium(); 239 } 240} 241 | 232 break; 233 do { 234 HMT_low(); 235 if (SHARED_PROCESSOR) 236 __rw_yield(rw); 237 } while (unlikely(rw->lock < 0)); 238 HMT_medium(); 239 } 240} 241 |
242static inline void __raw_write_lock(arch_rwlock_t *rw) | 242static inline void arch_write_lock(arch_rwlock_t *rw) |
243{ 244 while (1) { | 243{ 244 while (1) { |
245 if (likely(arch_write_trylock(rw) == 0)) | 245 if (likely(__arch_write_trylock(rw) == 0)) |
246 break; 247 do { 248 HMT_low(); 249 if (SHARED_PROCESSOR) 250 __rw_yield(rw); 251 } while (unlikely(rw->lock != 0)); 252 HMT_medium(); 253 } 254} 255 | 246 break; 247 do { 248 HMT_low(); 249 if (SHARED_PROCESSOR) 250 __rw_yield(rw); 251 } while (unlikely(rw->lock != 0)); 252 HMT_medium(); 253 } 254} 255 |
256static inline int __raw_read_trylock(arch_rwlock_t *rw) | 256static inline int arch_read_trylock(arch_rwlock_t *rw) |
257{ | 257{ |
258 return arch_read_trylock(rw) > 0; | 258 return __arch_read_trylock(rw) > 0; |
259} 260 | 259} 260 |
261static inline int __raw_write_trylock(arch_rwlock_t *rw) | 261static inline int arch_write_trylock(arch_rwlock_t *rw) |
262{ | 262{ |
263 return arch_write_trylock(rw) == 0; | 263 return __arch_write_trylock(rw) == 0; |
264} 265 | 264} 265 |
266static inline void __raw_read_unlock(arch_rwlock_t *rw) | 266static inline void arch_read_unlock(arch_rwlock_t *rw) |
267{ 268 long tmp; 269 270 __asm__ __volatile__( 271 "# read_unlock\n\t" 272 LWSYNC_ON_SMP 273"1: lwarx %0,0,%1\n\ 274 addic %0,%0,-1\n" 275 PPC405_ERR77(0,%1) 276" stwcx. %0,0,%1\n\ 277 bne- 1b" 278 : "=&r"(tmp) 279 : "r"(&rw->lock) 280 : "cr0", "xer", "memory"); 281} 282 | 267{ 268 long tmp; 269 270 __asm__ __volatile__( 271 "# read_unlock\n\t" 272 LWSYNC_ON_SMP 273"1: lwarx %0,0,%1\n\ 274 addic %0,%0,-1\n" 275 PPC405_ERR77(0,%1) 276" stwcx. %0,0,%1\n\ 277 bne- 1b" 278 : "=&r"(tmp) 279 : "r"(&rw->lock) 280 : "cr0", "xer", "memory"); 281} 282 |
283static inline void __raw_write_unlock(arch_rwlock_t *rw) | 283static inline void arch_write_unlock(arch_rwlock_t *rw) |
284{ 285 __asm__ __volatile__("# write_unlock\n\t" 286 LWSYNC_ON_SMP: : :"memory"); 287 rw->lock = 0; 288} 289 | 284{ 285 __asm__ __volatile__("# write_unlock\n\t" 286 LWSYNC_ON_SMP: : :"memory"); 287 rw->lock = 0; 288} 289 |
290#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 291#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 290#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 291#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
292 293#define arch_spin_relax(lock) __spin_yield(lock) 294#define arch_read_relax(lock) __rw_yield(lock) 295#define arch_write_relax(lock) __rw_yield(lock) 296 297#endif /* __KERNEL__ */ 298#endif /* __ASM_SPINLOCK_H */ | 292 293#define arch_spin_relax(lock) __spin_yield(lock) 294#define arch_read_relax(lock) __rw_yield(lock) 295#define arch_write_relax(lock) __rw_yield(lock) 296 297#endif /* __KERNEL__ */ 298#endif /* __ASM_SPINLOCK_H */ |