1b8b572e1SStephen Rothwell #ifndef __ASM_SPINLOCK_H 2b8b572e1SStephen Rothwell #define __ASM_SPINLOCK_H 3b8b572e1SStephen Rothwell #ifdef __KERNEL__ 4b8b572e1SStephen Rothwell 5b8b572e1SStephen Rothwell /* 6b8b572e1SStephen Rothwell * Simple spin lock operations. 7b8b572e1SStephen Rothwell * 8b8b572e1SStephen Rothwell * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 9b8b572e1SStephen Rothwell * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10b8b572e1SStephen Rothwell * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 11b8b572e1SStephen Rothwell * Rework to support virtual processors 12b8b572e1SStephen Rothwell * 13b8b572e1SStephen Rothwell * Type of int is used as a full 64b word is not necessary. 14b8b572e1SStephen Rothwell * 15b8b572e1SStephen Rothwell * This program is free software; you can redistribute it and/or 16b8b572e1SStephen Rothwell * modify it under the terms of the GNU General Public License 17b8b572e1SStephen Rothwell * as published by the Free Software Foundation; either version 18b8b572e1SStephen Rothwell * 2 of the License, or (at your option) any later version. 19b8b572e1SStephen Rothwell * 20b8b572e1SStephen Rothwell * (the type definitions are in asm/spinlock_types.h) 21b8b572e1SStephen Rothwell */ 22b8b572e1SStephen Rothwell #include <linux/irqflags.h> 23b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64 24b8b572e1SStephen Rothwell #include <asm/paca.h> 25b8b572e1SStephen Rothwell #include <asm/hvcall.h> 26b8b572e1SStephen Rothwell #endif 27b8b572e1SStephen Rothwell #include <asm/asm-compat.h> 28b8b572e1SStephen Rothwell #include <asm/synch.h> 294e14a4d1SAnton Blanchard #include <asm/ppc-opcode.h> 30b8b572e1SStephen Rothwell 31b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64 32b8b572e1SStephen Rothwell /* use 0x800000yy when locked, where yy == CPU number */ 3354bb7f4bSAnton Blanchard #ifdef __BIG_ENDIAN__ 34b8b572e1SStephen Rothwell #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 35b8b572e1SStephen Rothwell #else 3654bb7f4bSAnton Blanchard #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) 3754bb7f4bSAnton Blanchard #endif 3854bb7f4bSAnton Blanchard #else 39b8b572e1SStephen Rothwell #define LOCK_TOKEN 1 40b8b572e1SStephen Rothwell #endif 41b8b572e1SStephen Rothwell 42b8b572e1SStephen Rothwell #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) 43b8b572e1SStephen Rothwell #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) 44b8b572e1SStephen Rothwell #define SYNC_IO do { \ 45b8b572e1SStephen Rothwell if (unlikely(get_paca()->io_sync)) { \ 46b8b572e1SStephen Rothwell mb(); \ 47b8b572e1SStephen Rothwell get_paca()->io_sync = 0; \ 48b8b572e1SStephen Rothwell } \ 49b8b572e1SStephen Rothwell } while (0) 50b8b572e1SStephen Rothwell #else 51b8b572e1SStephen Rothwell #define CLEAR_IO_SYNC 52b8b572e1SStephen Rothwell #define SYNC_IO 53b8b572e1SStephen Rothwell #endif 54b8b572e1SStephen Rothwell 5541946c86SPan Xinhui #ifdef CONFIG_PPC_PSERIES 5641946c86SPan Xinhui #define vcpu_is_preempted vcpu_is_preempted 5741946c86SPan Xinhui static inline bool vcpu_is_preempted(int cpu) 5841946c86SPan Xinhui { 5941946c86SPan Xinhui return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); 6041946c86SPan Xinhui } 6141946c86SPan Xinhui #endif 6241946c86SPan Xinhui 633405d230SMichael Ellerman static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 643405d230SMichael Ellerman { 653405d230SMichael Ellerman return lock.slock == 0; 663405d230SMichael Ellerman } 673405d230SMichael Ellerman 687179ba52SMichael Ellerman static inline int arch_spin_is_locked(arch_spinlock_t *lock) 697179ba52SMichael Ellerman { 7051d7d520SMichael Ellerman smp_mb(); 717179ba52SMichael Ellerman return !arch_spin_value_unlocked(*lock); 727179ba52SMichael Ellerman } 737179ba52SMichael Ellerman 74b8b572e1SStephen Rothwell /* 75b8b572e1SStephen Rothwell * This returns the old value in the lock, so we succeeded 76b8b572e1SStephen Rothwell * in getting the lock if the return value is 0. 77b8b572e1SStephen Rothwell */ 780199c4e6SThomas Gleixner static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 79b8b572e1SStephen Rothwell { 80b8b572e1SStephen Rothwell unsigned long tmp, token; 81b8b572e1SStephen Rothwell 82b8b572e1SStephen Rothwell token = LOCK_TOKEN; 83b8b572e1SStephen Rothwell __asm__ __volatile__( 844e14a4d1SAnton Blanchard "1: " PPC_LWARX(%0,0,%2,1) "\n\ 85b8b572e1SStephen Rothwell cmpwi 0,%0,0\n\ 86b8b572e1SStephen Rothwell bne- 2f\n\ 87b8b572e1SStephen Rothwell stwcx. %1,0,%2\n\ 88f10e2e5bSAnton Blanchard bne- 1b\n" 89f10e2e5bSAnton Blanchard PPC_ACQUIRE_BARRIER 90f10e2e5bSAnton Blanchard "2:" 91f10e2e5bSAnton Blanchard : "=&r" (tmp) 92b8b572e1SStephen Rothwell : "r" (token), "r" (&lock->slock) 93b8b572e1SStephen Rothwell : "cr0", "memory"); 94b8b572e1SStephen Rothwell 95b8b572e1SStephen Rothwell return tmp; 96b8b572e1SStephen Rothwell } 97b8b572e1SStephen Rothwell 980199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *lock) 99b8b572e1SStephen Rothwell { 100b8b572e1SStephen Rothwell CLEAR_IO_SYNC; 1010199c4e6SThomas Gleixner return __arch_spin_trylock(lock) == 0; 102b8b572e1SStephen Rothwell } 103b8b572e1SStephen Rothwell 104b8b572e1SStephen Rothwell /* 105b8b572e1SStephen Rothwell * On a system with shared processors (that is, where a physical 106b8b572e1SStephen Rothwell * processor is multiplexed between several virtual processors), 107b8b572e1SStephen Rothwell * there is no point spinning on a lock if the holder of the lock 108b8b572e1SStephen Rothwell * isn't currently scheduled on a physical processor. Instead 109b8b572e1SStephen Rothwell * we detect this situation and ask the hypervisor to give the 110b8b572e1SStephen Rothwell * rest of our timeslice to the lock holder. 111b8b572e1SStephen Rothwell * 112b8b572e1SStephen Rothwell * So that we can tell which virtual processor is holding a lock, 113b8b572e1SStephen Rothwell * we put 0x80000000 | smp_processor_id() in the lock when it is 114b8b572e1SStephen Rothwell * held. Conveniently, we have a word in the paca that holds this 115b8b572e1SStephen Rothwell * value. 116b8b572e1SStephen Rothwell */ 117b8b572e1SStephen Rothwell 1181b041885SStephen Rothwell #if defined(CONFIG_PPC_SPLPAR) 119b8b572e1SStephen Rothwell /* We only yield to the hypervisor if we are in shared processor mode */ 120f13c13a0SAnton Blanchard #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) 121445c8951SThomas Gleixner extern void __spin_yield(arch_spinlock_t *lock); 122fb3a6bbcSThomas Gleixner extern void __rw_yield(arch_rwlock_t *lock); 1231b041885SStephen Rothwell #else /* SPLPAR */ 124b8b572e1SStephen Rothwell #define __spin_yield(x) barrier() 125b8b572e1SStephen Rothwell #define __rw_yield(x) barrier() 126b8b572e1SStephen Rothwell #define SHARED_PROCESSOR 0 127b8b572e1SStephen Rothwell #endif 128b8b572e1SStephen Rothwell 1290199c4e6SThomas Gleixner static inline void arch_spin_lock(arch_spinlock_t *lock) 130b8b572e1SStephen Rothwell { 131b8b572e1SStephen Rothwell CLEAR_IO_SYNC; 132b8b572e1SStephen Rothwell while (1) { 1330199c4e6SThomas Gleixner if (likely(__arch_spin_trylock(lock) == 0)) 134b8b572e1SStephen Rothwell break; 135b8b572e1SStephen Rothwell do { 136b8b572e1SStephen Rothwell HMT_low(); 137b8b572e1SStephen Rothwell if (SHARED_PROCESSOR) 138b8b572e1SStephen Rothwell __spin_yield(lock); 139b8b572e1SStephen Rothwell } while (unlikely(lock->slock != 0)); 140b8b572e1SStephen Rothwell HMT_medium(); 141b8b572e1SStephen Rothwell } 142b8b572e1SStephen Rothwell } 143b8b572e1SStephen Rothwell 144b8b572e1SStephen Rothwell static inline 1450199c4e6SThomas Gleixner void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 146b8b572e1SStephen Rothwell { 147b8b572e1SStephen Rothwell unsigned long flags_dis; 148b8b572e1SStephen Rothwell 149b8b572e1SStephen Rothwell CLEAR_IO_SYNC; 150b8b572e1SStephen Rothwell while (1) { 1510199c4e6SThomas Gleixner if (likely(__arch_spin_trylock(lock) == 0)) 152b8b572e1SStephen Rothwell break; 153b8b572e1SStephen Rothwell local_save_flags(flags_dis); 154b8b572e1SStephen Rothwell local_irq_restore(flags); 155b8b572e1SStephen Rothwell do { 156b8b572e1SStephen Rothwell HMT_low(); 157b8b572e1SStephen Rothwell if (SHARED_PROCESSOR) 158b8b572e1SStephen Rothwell __spin_yield(lock); 159b8b572e1SStephen Rothwell } while (unlikely(lock->slock != 0)); 160b8b572e1SStephen Rothwell HMT_medium(); 161b8b572e1SStephen Rothwell local_irq_restore(flags_dis); 162b8b572e1SStephen Rothwell } 163b8b572e1SStephen Rothwell } 164b8b572e1SStephen Rothwell 1650199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *lock) 166b8b572e1SStephen Rothwell { 167b8b572e1SStephen Rothwell SYNC_IO; 1680199c4e6SThomas Gleixner __asm__ __volatile__("# arch_spin_unlock\n\t" 169f10e2e5bSAnton Blanchard PPC_RELEASE_BARRIER: : :"memory"); 170b8b572e1SStephen Rothwell lock->slock = 0; 171b8b572e1SStephen Rothwell } 172b8b572e1SStephen Rothwell 1736262db7cSBoqun Feng static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 1746262db7cSBoqun Feng { 1756262db7cSBoqun Feng arch_spinlock_t lock_val; 1766262db7cSBoqun Feng 1776262db7cSBoqun Feng smp_mb(); 1786262db7cSBoqun Feng 1796262db7cSBoqun Feng /* 1806262db7cSBoqun Feng * Atomically load and store back the lock value (unchanged). This 1816262db7cSBoqun Feng * ensures that our observation of the lock value is ordered with 1826262db7cSBoqun Feng * respect to other lock operations. 1836262db7cSBoqun Feng */ 1846262db7cSBoqun Feng __asm__ __volatile__( 1856262db7cSBoqun Feng "1: " PPC_LWARX(%0, 0, %2, 0) "\n" 1866262db7cSBoqun Feng " stwcx. %0, 0, %2\n" 1876262db7cSBoqun Feng " bne- 1b\n" 1886262db7cSBoqun Feng : "=&r" (lock_val), "+m" (*lock) 1896262db7cSBoqun Feng : "r" (lock) 1906262db7cSBoqun Feng : "cr0", "xer"); 1916262db7cSBoqun Feng 1926262db7cSBoqun Feng if (arch_spin_value_unlocked(lock_val)) 1936262db7cSBoqun Feng goto out; 1946262db7cSBoqun Feng 1956262db7cSBoqun Feng while (lock->slock) { 1966262db7cSBoqun Feng HMT_low(); 1976262db7cSBoqun Feng if (SHARED_PROCESSOR) 1986262db7cSBoqun Feng __spin_yield(lock); 1996262db7cSBoqun Feng } 2006262db7cSBoqun Feng HMT_medium(); 2016262db7cSBoqun Feng 2026262db7cSBoqun Feng out: 2036262db7cSBoqun Feng smp_mb(); 2046262db7cSBoqun Feng } 205b8b572e1SStephen Rothwell 206b8b572e1SStephen Rothwell /* 207b8b572e1SStephen Rothwell * Read-write spinlocks, allowing multiple readers 208b8b572e1SStephen Rothwell * but only one writer. 209b8b572e1SStephen Rothwell * 210b8b572e1SStephen Rothwell * NOTE! it is quite common to have readers in interrupts 211b8b572e1SStephen Rothwell * but no interrupt writers. For those circumstances we 212b8b572e1SStephen Rothwell * can "mix" irq-safe locks - any writer needs to get a 213b8b572e1SStephen Rothwell * irq-safe write-lock, but readers can get non-irqsafe 214b8b572e1SStephen Rothwell * read-locks. 215b8b572e1SStephen Rothwell */ 216b8b572e1SStephen Rothwell 217e5931943SThomas Gleixner #define arch_read_can_lock(rw) ((rw)->lock >= 0) 218e5931943SThomas Gleixner #define arch_write_can_lock(rw) (!(rw)->lock) 219b8b572e1SStephen Rothwell 220b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64 221b8b572e1SStephen Rothwell #define __DO_SIGN_EXTEND "extsw %0,%0\n" 222b8b572e1SStephen Rothwell #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 223b8b572e1SStephen Rothwell #else 224b8b572e1SStephen Rothwell #define __DO_SIGN_EXTEND 225b8b572e1SStephen Rothwell #define WRLOCK_TOKEN (-1) 226b8b572e1SStephen Rothwell #endif 227b8b572e1SStephen Rothwell 228b8b572e1SStephen Rothwell /* 229b8b572e1SStephen Rothwell * This returns the old value in the lock + 1, 230b8b572e1SStephen Rothwell * so we got a read lock if the return value is > 0. 231b8b572e1SStephen Rothwell */ 232e5931943SThomas Gleixner static inline long __arch_read_trylock(arch_rwlock_t *rw) 233b8b572e1SStephen Rothwell { 234b8b572e1SStephen Rothwell long tmp; 235b8b572e1SStephen Rothwell 236b8b572e1SStephen Rothwell __asm__ __volatile__( 2374e14a4d1SAnton Blanchard "1: " PPC_LWARX(%0,0,%1,1) "\n" 238b8b572e1SStephen Rothwell __DO_SIGN_EXTEND 239b8b572e1SStephen Rothwell " addic. %0,%0,1\n\ 240b8b572e1SStephen Rothwell ble- 2f\n" 241b8b572e1SStephen Rothwell PPC405_ERR77(0,%1) 242b8b572e1SStephen Rothwell " stwcx. %0,0,%1\n\ 243f10e2e5bSAnton Blanchard bne- 1b\n" 244f10e2e5bSAnton Blanchard PPC_ACQUIRE_BARRIER 245f10e2e5bSAnton Blanchard "2:" : "=&r" (tmp) 246b8b572e1SStephen Rothwell : "r" (&rw->lock) 247b8b572e1SStephen Rothwell : "cr0", "xer", "memory"); 248b8b572e1SStephen Rothwell 249b8b572e1SStephen Rothwell return tmp; 250b8b572e1SStephen Rothwell } 251b8b572e1SStephen Rothwell 252b8b572e1SStephen Rothwell /* 253b8b572e1SStephen Rothwell * This returns the old value in the lock, 254b8b572e1SStephen Rothwell * so we got the write lock if the return value is 0. 255b8b572e1SStephen Rothwell */ 256e5931943SThomas Gleixner static inline long __arch_write_trylock(arch_rwlock_t *rw) 257b8b572e1SStephen Rothwell { 258b8b572e1SStephen Rothwell long tmp, token; 259b8b572e1SStephen Rothwell 260b8b572e1SStephen Rothwell token = WRLOCK_TOKEN; 261b8b572e1SStephen Rothwell __asm__ __volatile__( 2624e14a4d1SAnton Blanchard "1: " PPC_LWARX(%0,0,%2,1) "\n\ 263b8b572e1SStephen Rothwell cmpwi 0,%0,0\n\ 264b8b572e1SStephen Rothwell bne- 2f\n" 265b8b572e1SStephen Rothwell PPC405_ERR77(0,%1) 266b8b572e1SStephen Rothwell " stwcx. %1,0,%2\n\ 267f10e2e5bSAnton Blanchard bne- 1b\n" 268f10e2e5bSAnton Blanchard PPC_ACQUIRE_BARRIER 269f10e2e5bSAnton Blanchard "2:" : "=&r" (tmp) 270b8b572e1SStephen Rothwell : "r" (token), "r" (&rw->lock) 271b8b572e1SStephen Rothwell : "cr0", "memory"); 272b8b572e1SStephen Rothwell 273b8b572e1SStephen Rothwell return tmp; 274b8b572e1SStephen Rothwell } 275b8b572e1SStephen Rothwell 276e5931943SThomas Gleixner static inline void arch_read_lock(arch_rwlock_t *rw) 277b8b572e1SStephen Rothwell { 278b8b572e1SStephen Rothwell while (1) { 279e5931943SThomas Gleixner if (likely(__arch_read_trylock(rw) > 0)) 280b8b572e1SStephen Rothwell break; 281b8b572e1SStephen Rothwell do { 282b8b572e1SStephen Rothwell HMT_low(); 283b8b572e1SStephen Rothwell if (SHARED_PROCESSOR) 284b8b572e1SStephen Rothwell __rw_yield(rw); 285b8b572e1SStephen Rothwell } while (unlikely(rw->lock < 0)); 286b8b572e1SStephen Rothwell HMT_medium(); 287b8b572e1SStephen Rothwell } 288b8b572e1SStephen Rothwell } 289b8b572e1SStephen Rothwell 290e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw) 291b8b572e1SStephen Rothwell { 292b8b572e1SStephen Rothwell while (1) { 293e5931943SThomas Gleixner if (likely(__arch_write_trylock(rw) == 0)) 294b8b572e1SStephen Rothwell break; 295b8b572e1SStephen Rothwell do { 296b8b572e1SStephen Rothwell HMT_low(); 297b8b572e1SStephen Rothwell if (SHARED_PROCESSOR) 298b8b572e1SStephen Rothwell __rw_yield(rw); 299b8b572e1SStephen Rothwell } while (unlikely(rw->lock != 0)); 300b8b572e1SStephen Rothwell HMT_medium(); 301b8b572e1SStephen Rothwell } 302b8b572e1SStephen Rothwell } 303b8b572e1SStephen Rothwell 304e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *rw) 305b8b572e1SStephen Rothwell { 306e5931943SThomas Gleixner return __arch_read_trylock(rw) > 0; 307b8b572e1SStephen Rothwell } 308b8b572e1SStephen Rothwell 309e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *rw) 310b8b572e1SStephen Rothwell { 311e5931943SThomas Gleixner return __arch_write_trylock(rw) == 0; 312b8b572e1SStephen Rothwell } 313b8b572e1SStephen Rothwell 314e5931943SThomas Gleixner static inline void arch_read_unlock(arch_rwlock_t *rw) 315b8b572e1SStephen Rothwell { 316b8b572e1SStephen Rothwell long tmp; 317b8b572e1SStephen Rothwell 318b8b572e1SStephen Rothwell __asm__ __volatile__( 319b8b572e1SStephen Rothwell "# read_unlock\n\t" 320f10e2e5bSAnton Blanchard PPC_RELEASE_BARRIER 321b8b572e1SStephen Rothwell "1: lwarx %0,0,%1\n\ 322b8b572e1SStephen Rothwell addic %0,%0,-1\n" 323b8b572e1SStephen Rothwell PPC405_ERR77(0,%1) 324b8b572e1SStephen Rothwell " stwcx. %0,0,%1\n\ 325b8b572e1SStephen Rothwell bne- 1b" 326b8b572e1SStephen Rothwell : "=&r"(tmp) 327b8b572e1SStephen Rothwell : "r"(&rw->lock) 328efc3624cSPaul Mackerras : "cr0", "xer", "memory"); 329b8b572e1SStephen Rothwell } 330b8b572e1SStephen Rothwell 331e5931943SThomas Gleixner static inline void arch_write_unlock(arch_rwlock_t *rw) 332b8b572e1SStephen Rothwell { 333b8b572e1SStephen Rothwell __asm__ __volatile__("# write_unlock\n\t" 334f10e2e5bSAnton Blanchard PPC_RELEASE_BARRIER: : :"memory"); 335b8b572e1SStephen Rothwell rw->lock = 0; 336b8b572e1SStephen Rothwell } 337b8b572e1SStephen Rothwell 338e5931943SThomas Gleixner #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 339e5931943SThomas Gleixner #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 340f5f7eac4SRobin Holt 3410199c4e6SThomas Gleixner #define arch_spin_relax(lock) __spin_yield(lock) 3420199c4e6SThomas Gleixner #define arch_read_relax(lock) __rw_yield(lock) 3430199c4e6SThomas Gleixner #define arch_write_relax(lock) __rw_yield(lock) 344b8b572e1SStephen Rothwell 345b8b572e1SStephen Rothwell #endif /* __KERNEL__ */ 346b8b572e1SStephen Rothwell #endif /* __ASM_SPINLOCK_H */ 347