1b8b572e1SStephen Rothwell #ifndef __ASM_SPINLOCK_H 2b8b572e1SStephen Rothwell #define __ASM_SPINLOCK_H 3b8b572e1SStephen Rothwell #ifdef __KERNEL__ 4b8b572e1SStephen Rothwell 5b8b572e1SStephen Rothwell /* 6b8b572e1SStephen Rothwell * Simple spin lock operations. 7b8b572e1SStephen Rothwell * 8b8b572e1SStephen Rothwell * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 9b8b572e1SStephen Rothwell * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10b8b572e1SStephen Rothwell * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 11b8b572e1SStephen Rothwell * Rework to support virtual processors 12b8b572e1SStephen Rothwell * 13b8b572e1SStephen Rothwell * Type of int is used as a full 64b word is not necessary. 14b8b572e1SStephen Rothwell * 15b8b572e1SStephen Rothwell * This program is free software; you can redistribute it and/or 16b8b572e1SStephen Rothwell * modify it under the terms of the GNU General Public License 17b8b572e1SStephen Rothwell * as published by the Free Software Foundation; either version 18b8b572e1SStephen Rothwell * 2 of the License, or (at your option) any later version. 19b8b572e1SStephen Rothwell * 20b8b572e1SStephen Rothwell * (the type definitions are in asm/spinlock_types.h) 21b8b572e1SStephen Rothwell */ 22b8b572e1SStephen Rothwell #include <linux/irqflags.h> 23b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64 24b8b572e1SStephen Rothwell #include <asm/paca.h> 25b8b572e1SStephen Rothwell #include <asm/hvcall.h> 26b8b572e1SStephen Rothwell #include <asm/iseries/hv_call.h> 27b8b572e1SStephen Rothwell #endif 28b8b572e1SStephen Rothwell #include <asm/asm-compat.h> 29b8b572e1SStephen Rothwell #include <asm/synch.h> 30b8b572e1SStephen Rothwell 310199c4e6SThomas Gleixner #define arch_spin_is_locked(x) ((x)->slock != 0) 32b8b572e1SStephen Rothwell 33b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64 34b8b572e1SStephen Rothwell /* use 0x800000yy when locked, where yy == CPU number */ 35b8b572e1SStephen Rothwell #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 36b8b572e1SStephen Rothwell #else 37b8b572e1SStephen Rothwell #define LOCK_TOKEN 1 38b8b572e1SStephen Rothwell #endif 39b8b572e1SStephen Rothwell 40b8b572e1SStephen Rothwell #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) 41b8b572e1SStephen Rothwell #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) 42b8b572e1SStephen Rothwell #define SYNC_IO do { \ 43b8b572e1SStephen Rothwell if (unlikely(get_paca()->io_sync)) { \ 44b8b572e1SStephen Rothwell mb(); \ 45b8b572e1SStephen Rothwell get_paca()->io_sync = 0; \ 46b8b572e1SStephen Rothwell } \ 47b8b572e1SStephen Rothwell } while (0) 48b8b572e1SStephen Rothwell #else 49b8b572e1SStephen Rothwell #define CLEAR_IO_SYNC 50b8b572e1SStephen Rothwell #define SYNC_IO 51b8b572e1SStephen Rothwell #endif 52b8b572e1SStephen Rothwell 53b8b572e1SStephen Rothwell /* 54b8b572e1SStephen Rothwell * This returns the old value in the lock, so we succeeded 55b8b572e1SStephen Rothwell * in getting the lock if the return value is 0. 56b8b572e1SStephen Rothwell */ 570199c4e6SThomas Gleixner static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 58b8b572e1SStephen Rothwell { 59b8b572e1SStephen Rothwell unsigned long tmp, token; 60b8b572e1SStephen Rothwell 61b8b572e1SStephen Rothwell token = LOCK_TOKEN; 62b8b572e1SStephen Rothwell __asm__ __volatile__( 63b8b572e1SStephen Rothwell "1: lwarx %0,0,%2\n\ 64b8b572e1SStephen Rothwell cmpwi 0,%0,0\n\ 65b8b572e1SStephen Rothwell bne- 2f\n\ 66b8b572e1SStephen Rothwell stwcx. %1,0,%2\n\ 67b8b572e1SStephen Rothwell bne- 1b\n\ 68b8b572e1SStephen Rothwell isync\n\ 69b8b572e1SStephen Rothwell 2:" : "=&r" (tmp) 70b8b572e1SStephen Rothwell : "r" (token), "r" (&lock->slock) 71b8b572e1SStephen Rothwell : "cr0", "memory"); 72b8b572e1SStephen Rothwell 73b8b572e1SStephen Rothwell return tmp; 74b8b572e1SStephen Rothwell } 75b8b572e1SStephen Rothwell 760199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *lock) 77b8b572e1SStephen Rothwell { 78b8b572e1SStephen Rothwell CLEAR_IO_SYNC; 790199c4e6SThomas Gleixner return __arch_spin_trylock(lock) == 0; 80b8b572e1SStephen Rothwell } 81b8b572e1SStephen Rothwell 82b8b572e1SStephen Rothwell /* 83b8b572e1SStephen Rothwell * On a system with shared processors (that is, where a physical 84b8b572e1SStephen Rothwell * processor is multiplexed between several virtual processors), 85b8b572e1SStephen Rothwell * there is no point spinning on a lock if the holder of the lock 86b8b572e1SStephen Rothwell * isn't currently scheduled on a physical processor. Instead 87b8b572e1SStephen Rothwell * we detect this situation and ask the hypervisor to give the 88b8b572e1SStephen Rothwell * rest of our timeslice to the lock holder. 89b8b572e1SStephen Rothwell * 90b8b572e1SStephen Rothwell * So that we can tell which virtual processor is holding a lock, 91b8b572e1SStephen Rothwell * we put 0x80000000 | smp_processor_id() in the lock when it is 92b8b572e1SStephen Rothwell * held. Conveniently, we have a word in the paca that holds this 93b8b572e1SStephen Rothwell * value. 94b8b572e1SStephen Rothwell */ 95b8b572e1SStephen Rothwell 96b8b572e1SStephen Rothwell #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 97b8b572e1SStephen Rothwell /* We only yield to the hypervisor if we are in shared processor mode */ 98b8b572e1SStephen Rothwell #define SHARED_PROCESSOR (get_lppaca()->shared_proc) 99445c8951SThomas Gleixner extern void __spin_yield(arch_spinlock_t *lock); 100b8b572e1SStephen Rothwell extern void __rw_yield(raw_rwlock_t *lock); 101b8b572e1SStephen Rothwell #else /* SPLPAR || ISERIES */ 102b8b572e1SStephen Rothwell #define __spin_yield(x) barrier() 103b8b572e1SStephen Rothwell #define __rw_yield(x) barrier() 104b8b572e1SStephen Rothwell #define SHARED_PROCESSOR 0 105b8b572e1SStephen Rothwell #endif 106b8b572e1SStephen Rothwell 1070199c4e6SThomas Gleixner static inline void arch_spin_lock(arch_spinlock_t *lock) 108b8b572e1SStephen Rothwell { 109b8b572e1SStephen Rothwell CLEAR_IO_SYNC; 110b8b572e1SStephen Rothwell while (1) { 1110199c4e6SThomas Gleixner if (likely(__arch_spin_trylock(lock) == 0)) 112b8b572e1SStephen Rothwell break; 113b8b572e1SStephen Rothwell do { 114b8b572e1SStephen Rothwell HMT_low(); 115b8b572e1SStephen Rothwell if (SHARED_PROCESSOR) 116b8b572e1SStephen Rothwell __spin_yield(lock); 117b8b572e1SStephen Rothwell } while (unlikely(lock->slock != 0)); 118b8b572e1SStephen Rothwell HMT_medium(); 119b8b572e1SStephen Rothwell } 120b8b572e1SStephen Rothwell } 121b8b572e1SStephen Rothwell 122b8b572e1SStephen Rothwell static inline 1230199c4e6SThomas Gleixner void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 124b8b572e1SStephen Rothwell { 125b8b572e1SStephen Rothwell unsigned long flags_dis; 126b8b572e1SStephen Rothwell 127b8b572e1SStephen Rothwell CLEAR_IO_SYNC; 128b8b572e1SStephen Rothwell while (1) { 1290199c4e6SThomas Gleixner if (likely(__arch_spin_trylock(lock) == 0)) 130b8b572e1SStephen Rothwell break; 131b8b572e1SStephen Rothwell local_save_flags(flags_dis); 132b8b572e1SStephen Rothwell local_irq_restore(flags); 133b8b572e1SStephen Rothwell do { 134b8b572e1SStephen Rothwell HMT_low(); 135b8b572e1SStephen Rothwell if (SHARED_PROCESSOR) 136b8b572e1SStephen Rothwell __spin_yield(lock); 137b8b572e1SStephen Rothwell } while (unlikely(lock->slock != 0)); 138b8b572e1SStephen Rothwell HMT_medium(); 139b8b572e1SStephen Rothwell local_irq_restore(flags_dis); 140b8b572e1SStephen Rothwell } 141b8b572e1SStephen Rothwell } 142b8b572e1SStephen Rothwell 1430199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *lock) 144b8b572e1SStephen Rothwell { 145b8b572e1SStephen Rothwell SYNC_IO; 1460199c4e6SThomas Gleixner __asm__ __volatile__("# arch_spin_unlock\n\t" 147b8b572e1SStephen Rothwell LWSYNC_ON_SMP: : :"memory"); 148b8b572e1SStephen Rothwell lock->slock = 0; 149b8b572e1SStephen Rothwell } 150b8b572e1SStephen Rothwell 151b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64 1520199c4e6SThomas Gleixner extern void arch_spin_unlock_wait(arch_spinlock_t *lock); 153b8b572e1SStephen Rothwell #else 1540199c4e6SThomas Gleixner #define arch_spin_unlock_wait(lock) \ 1550199c4e6SThomas Gleixner do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 156b8b572e1SStephen Rothwell #endif 157b8b572e1SStephen Rothwell 158b8b572e1SStephen Rothwell /* 159b8b572e1SStephen Rothwell * Read-write spinlocks, allowing multiple readers 160b8b572e1SStephen Rothwell * but only one writer. 161b8b572e1SStephen Rothwell * 162b8b572e1SStephen Rothwell * NOTE! it is quite common to have readers in interrupts 163b8b572e1SStephen Rothwell * but no interrupt writers. For those circumstances we 164b8b572e1SStephen Rothwell * can "mix" irq-safe locks - any writer needs to get a 165b8b572e1SStephen Rothwell * irq-safe write-lock, but readers can get non-irqsafe 166b8b572e1SStephen Rothwell * read-locks. 167b8b572e1SStephen Rothwell */ 168b8b572e1SStephen Rothwell 169b8b572e1SStephen Rothwell #define __raw_read_can_lock(rw) ((rw)->lock >= 0) 170b8b572e1SStephen Rothwell #define __raw_write_can_lock(rw) (!(rw)->lock) 171b8b572e1SStephen Rothwell 172b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64 173b8b572e1SStephen Rothwell #define __DO_SIGN_EXTEND "extsw %0,%0\n" 174b8b572e1SStephen Rothwell #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 175b8b572e1SStephen Rothwell #else 176b8b572e1SStephen Rothwell #define __DO_SIGN_EXTEND 177b8b572e1SStephen Rothwell #define WRLOCK_TOKEN (-1) 178b8b572e1SStephen Rothwell #endif 179b8b572e1SStephen Rothwell 180b8b572e1SStephen Rothwell /* 181b8b572e1SStephen Rothwell * This returns the old value in the lock + 1, 182b8b572e1SStephen Rothwell * so we got a read lock if the return value is > 0. 183b8b572e1SStephen Rothwell */ 1848307a980SHeiko Carstens static inline long arch_read_trylock(raw_rwlock_t *rw) 185b8b572e1SStephen Rothwell { 186b8b572e1SStephen Rothwell long tmp; 187b8b572e1SStephen Rothwell 188b8b572e1SStephen Rothwell __asm__ __volatile__( 189b8b572e1SStephen Rothwell "1: lwarx %0,0,%1\n" 190b8b572e1SStephen Rothwell __DO_SIGN_EXTEND 191b8b572e1SStephen Rothwell " addic. %0,%0,1\n\ 192b8b572e1SStephen Rothwell ble- 2f\n" 193b8b572e1SStephen Rothwell PPC405_ERR77(0,%1) 194b8b572e1SStephen Rothwell " stwcx. %0,0,%1\n\ 195b8b572e1SStephen Rothwell bne- 1b\n\ 196b8b572e1SStephen Rothwell isync\n\ 197b8b572e1SStephen Rothwell 2:" : "=&r" (tmp) 198b8b572e1SStephen Rothwell : "r" (&rw->lock) 199b8b572e1SStephen Rothwell : "cr0", "xer", "memory"); 200b8b572e1SStephen Rothwell 201b8b572e1SStephen Rothwell return tmp; 202b8b572e1SStephen Rothwell } 203b8b572e1SStephen Rothwell 204b8b572e1SStephen Rothwell /* 205b8b572e1SStephen Rothwell * This returns the old value in the lock, 206b8b572e1SStephen Rothwell * so we got the write lock if the return value is 0. 207b8b572e1SStephen Rothwell */ 2088307a980SHeiko Carstens static inline long arch_write_trylock(raw_rwlock_t *rw) 209b8b572e1SStephen Rothwell { 210b8b572e1SStephen Rothwell long tmp, token; 211b8b572e1SStephen Rothwell 212b8b572e1SStephen Rothwell token = WRLOCK_TOKEN; 213b8b572e1SStephen Rothwell __asm__ __volatile__( 214b8b572e1SStephen Rothwell "1: lwarx %0,0,%2\n\ 215b8b572e1SStephen Rothwell cmpwi 0,%0,0\n\ 216b8b572e1SStephen Rothwell bne- 2f\n" 217b8b572e1SStephen Rothwell PPC405_ERR77(0,%1) 218b8b572e1SStephen Rothwell " stwcx. %1,0,%2\n\ 219b8b572e1SStephen Rothwell bne- 1b\n\ 220b8b572e1SStephen Rothwell isync\n\ 221b8b572e1SStephen Rothwell 2:" : "=&r" (tmp) 222b8b572e1SStephen Rothwell : "r" (token), "r" (&rw->lock) 223b8b572e1SStephen Rothwell : "cr0", "memory"); 224b8b572e1SStephen Rothwell 225b8b572e1SStephen Rothwell return tmp; 226b8b572e1SStephen Rothwell } 227b8b572e1SStephen Rothwell 228b8b572e1SStephen Rothwell static inline void __raw_read_lock(raw_rwlock_t *rw) 229b8b572e1SStephen Rothwell { 230b8b572e1SStephen Rothwell while (1) { 2318307a980SHeiko Carstens if (likely(arch_read_trylock(rw) > 0)) 232b8b572e1SStephen Rothwell break; 233b8b572e1SStephen Rothwell do { 234b8b572e1SStephen Rothwell HMT_low(); 235b8b572e1SStephen Rothwell if (SHARED_PROCESSOR) 236b8b572e1SStephen Rothwell __rw_yield(rw); 237b8b572e1SStephen Rothwell } while (unlikely(rw->lock < 0)); 238b8b572e1SStephen Rothwell HMT_medium(); 239b8b572e1SStephen Rothwell } 240b8b572e1SStephen Rothwell } 241b8b572e1SStephen Rothwell 242b8b572e1SStephen Rothwell static inline void __raw_write_lock(raw_rwlock_t *rw) 243b8b572e1SStephen Rothwell { 244b8b572e1SStephen Rothwell while (1) { 2458307a980SHeiko Carstens if (likely(arch_write_trylock(rw) == 0)) 246b8b572e1SStephen Rothwell break; 247b8b572e1SStephen Rothwell do { 248b8b572e1SStephen Rothwell HMT_low(); 249b8b572e1SStephen Rothwell if (SHARED_PROCESSOR) 250b8b572e1SStephen Rothwell __rw_yield(rw); 251b8b572e1SStephen Rothwell } while (unlikely(rw->lock != 0)); 252b8b572e1SStephen Rothwell HMT_medium(); 253b8b572e1SStephen Rothwell } 254b8b572e1SStephen Rothwell } 255b8b572e1SStephen Rothwell 256b8b572e1SStephen Rothwell static inline int __raw_read_trylock(raw_rwlock_t *rw) 257b8b572e1SStephen Rothwell { 2588307a980SHeiko Carstens return arch_read_trylock(rw) > 0; 259b8b572e1SStephen Rothwell } 260b8b572e1SStephen Rothwell 261b8b572e1SStephen Rothwell static inline int __raw_write_trylock(raw_rwlock_t *rw) 262b8b572e1SStephen Rothwell { 2638307a980SHeiko Carstens return arch_write_trylock(rw) == 0; 264b8b572e1SStephen Rothwell } 265b8b572e1SStephen Rothwell 266b8b572e1SStephen Rothwell static inline void __raw_read_unlock(raw_rwlock_t *rw) 267b8b572e1SStephen Rothwell { 268b8b572e1SStephen Rothwell long tmp; 269b8b572e1SStephen Rothwell 270b8b572e1SStephen Rothwell __asm__ __volatile__( 271b8b572e1SStephen Rothwell "# read_unlock\n\t" 272b8b572e1SStephen Rothwell LWSYNC_ON_SMP 273b8b572e1SStephen Rothwell "1: lwarx %0,0,%1\n\ 274b8b572e1SStephen Rothwell addic %0,%0,-1\n" 275b8b572e1SStephen Rothwell PPC405_ERR77(0,%1) 276b8b572e1SStephen Rothwell " stwcx. %0,0,%1\n\ 277b8b572e1SStephen Rothwell bne- 1b" 278b8b572e1SStephen Rothwell : "=&r"(tmp) 279b8b572e1SStephen Rothwell : "r"(&rw->lock) 280efc3624cSPaul Mackerras : "cr0", "xer", "memory"); 281b8b572e1SStephen Rothwell } 282b8b572e1SStephen Rothwell 283b8b572e1SStephen Rothwell static inline void __raw_write_unlock(raw_rwlock_t *rw) 284b8b572e1SStephen Rothwell { 285b8b572e1SStephen Rothwell __asm__ __volatile__("# write_unlock\n\t" 286b8b572e1SStephen Rothwell LWSYNC_ON_SMP: : :"memory"); 287b8b572e1SStephen Rothwell rw->lock = 0; 288b8b572e1SStephen Rothwell } 289b8b572e1SStephen Rothwell 290f5f7eac4SRobin Holt #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 291f5f7eac4SRobin Holt #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 292f5f7eac4SRobin Holt 2930199c4e6SThomas Gleixner #define arch_spin_relax(lock) __spin_yield(lock) 2940199c4e6SThomas Gleixner #define arch_read_relax(lock) __rw_yield(lock) 2950199c4e6SThomas Gleixner #define arch_write_relax(lock) __rw_yield(lock) 296b8b572e1SStephen Rothwell 297b8b572e1SStephen Rothwell #endif /* __KERNEL__ */ 298b8b572e1SStephen Rothwell #endif /* __ASM_SPINLOCK_H */ 299