1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef __ASM_SPINLOCK_H 3 #define __ASM_SPINLOCK_H 4 #ifdef __KERNEL__ 5 6 /* 7 * Simple spin lock operations. 8 * 9 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 10 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 11 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 12 * Rework to support virtual processors 13 * 14 * Type of int is used as a full 64b word is not necessary. 15 * 16 * (the type definitions are in asm/spinlock_types.h) 17 */ 18 #include <linux/irqflags.h> 19 #ifdef CONFIG_PPC64 20 #include <asm/paca.h> 21 #include <asm/hvcall.h> 22 #endif 23 #include <asm/synch.h> 24 #include <asm/ppc-opcode.h> 25 #include <asm/asm-405.h> 26 27 #ifdef CONFIG_PPC64 28 /* use 0x800000yy when locked, where yy == CPU number */ 29 #ifdef __BIG_ENDIAN__ 30 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 31 #else 32 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) 33 #endif 34 #else 35 #define LOCK_TOKEN 1 36 #endif 37 38 #ifdef CONFIG_PPC_PSERIES 39 #define vcpu_is_preempted vcpu_is_preempted 40 static inline bool vcpu_is_preempted(int cpu) 41 { 42 if (!firmware_has_feature(FW_FEATURE_SPLPAR)) 43 return false; 44 return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); 45 } 46 #endif 47 48 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 49 { 50 return lock.slock == 0; 51 } 52 53 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 54 { 55 smp_mb(); 56 return !arch_spin_value_unlocked(*lock); 57 } 58 59 /* 60 * This returns the old value in the lock, so we succeeded 61 * in getting the lock if the return value is 0. 62 */ 63 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 64 { 65 unsigned long tmp, token; 66 67 token = LOCK_TOKEN; 68 __asm__ __volatile__( 69 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 70 cmpwi 0,%0,0\n\ 71 bne- 2f\n\ 72 stwcx. %1,0,%2\n\ 73 bne- 1b\n" 74 PPC_ACQUIRE_BARRIER 75 "2:" 76 : "=&r" (tmp) 77 : "r" (token), "r" (&lock->slock) 78 : "cr0", "memory"); 79 80 return tmp; 81 } 82 83 static inline int arch_spin_trylock(arch_spinlock_t *lock) 84 { 85 return __arch_spin_trylock(lock) == 0; 86 } 87 88 /* 89 * On a system with shared processors (that is, where a physical 90 * processor is multiplexed between several virtual processors), 91 * there is no point spinning on a lock if the holder of the lock 92 * isn't currently scheduled on a physical processor. Instead 93 * we detect this situation and ask the hypervisor to give the 94 * rest of our timeslice to the lock holder. 95 * 96 * So that we can tell which virtual processor is holding a lock, 97 * we put 0x80000000 | smp_processor_id() in the lock when it is 98 * held. Conveniently, we have a word in the paca that holds this 99 * value. 100 */ 101 102 #if defined(CONFIG_PPC_SPLPAR) 103 /* We only yield to the hypervisor if we are in shared processor mode */ 104 extern void __spin_yield(arch_spinlock_t *lock); 105 extern void __rw_yield(arch_rwlock_t *lock); 106 #else /* SPLPAR */ 107 #define __spin_yield(x) barrier() 108 #define __rw_yield(x) barrier() 109 #endif 110 111 static inline bool is_shared_processor(void) 112 { 113 /* 114 * LPPACA is only available on Pseries so guard anything LPPACA related to 115 * allow other platforms (which include this common header) to compile. 116 */ 117 #ifdef CONFIG_PPC_PSERIES 118 return (IS_ENABLED(CONFIG_PPC_SPLPAR) && 119 lppaca_shared_proc(local_paca->lppaca_ptr)); 120 #else 121 return false; 122 #endif 123 } 124 125 static inline void arch_spin_lock(arch_spinlock_t *lock) 126 { 127 while (1) { 128 if (likely(__arch_spin_trylock(lock) == 0)) 129 break; 130 do { 131 HMT_low(); 132 if (is_shared_processor()) 133 __spin_yield(lock); 134 } while (unlikely(lock->slock != 0)); 135 HMT_medium(); 136 } 137 } 138 139 static inline 140 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 141 { 142 unsigned long flags_dis; 143 144 while (1) { 145 if (likely(__arch_spin_trylock(lock) == 0)) 146 break; 147 local_save_flags(flags_dis); 148 local_irq_restore(flags); 149 do { 150 HMT_low(); 151 if (is_shared_processor()) 152 __spin_yield(lock); 153 } while (unlikely(lock->slock != 0)); 154 HMT_medium(); 155 local_irq_restore(flags_dis); 156 } 157 } 158 #define arch_spin_lock_flags arch_spin_lock_flags 159 160 static inline void arch_spin_unlock(arch_spinlock_t *lock) 161 { 162 __asm__ __volatile__("# arch_spin_unlock\n\t" 163 PPC_RELEASE_BARRIER: : :"memory"); 164 lock->slock = 0; 165 } 166 167 /* 168 * Read-write spinlocks, allowing multiple readers 169 * but only one writer. 170 * 171 * NOTE! it is quite common to have readers in interrupts 172 * but no interrupt writers. For those circumstances we 173 * can "mix" irq-safe locks - any writer needs to get a 174 * irq-safe write-lock, but readers can get non-irqsafe 175 * read-locks. 176 */ 177 178 #ifdef CONFIG_PPC64 179 #define __DO_SIGN_EXTEND "extsw %0,%0\n" 180 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 181 #else 182 #define __DO_SIGN_EXTEND 183 #define WRLOCK_TOKEN (-1) 184 #endif 185 186 /* 187 * This returns the old value in the lock + 1, 188 * so we got a read lock if the return value is > 0. 189 */ 190 static inline long __arch_read_trylock(arch_rwlock_t *rw) 191 { 192 long tmp; 193 194 __asm__ __volatile__( 195 "1: " PPC_LWARX(%0,0,%1,1) "\n" 196 __DO_SIGN_EXTEND 197 " addic. %0,%0,1\n\ 198 ble- 2f\n" 199 PPC405_ERR77(0,%1) 200 " stwcx. %0,0,%1\n\ 201 bne- 1b\n" 202 PPC_ACQUIRE_BARRIER 203 "2:" : "=&r" (tmp) 204 : "r" (&rw->lock) 205 : "cr0", "xer", "memory"); 206 207 return tmp; 208 } 209 210 /* 211 * This returns the old value in the lock, 212 * so we got the write lock if the return value is 0. 213 */ 214 static inline long __arch_write_trylock(arch_rwlock_t *rw) 215 { 216 long tmp, token; 217 218 token = WRLOCK_TOKEN; 219 __asm__ __volatile__( 220 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 221 cmpwi 0,%0,0\n\ 222 bne- 2f\n" 223 PPC405_ERR77(0,%1) 224 " stwcx. %1,0,%2\n\ 225 bne- 1b\n" 226 PPC_ACQUIRE_BARRIER 227 "2:" : "=&r" (tmp) 228 : "r" (token), "r" (&rw->lock) 229 : "cr0", "memory"); 230 231 return tmp; 232 } 233 234 static inline void arch_read_lock(arch_rwlock_t *rw) 235 { 236 while (1) { 237 if (likely(__arch_read_trylock(rw) > 0)) 238 break; 239 do { 240 HMT_low(); 241 if (is_shared_processor()) 242 __rw_yield(rw); 243 } while (unlikely(rw->lock < 0)); 244 HMT_medium(); 245 } 246 } 247 248 static inline void arch_write_lock(arch_rwlock_t *rw) 249 { 250 while (1) { 251 if (likely(__arch_write_trylock(rw) == 0)) 252 break; 253 do { 254 HMT_low(); 255 if (is_shared_processor()) 256 __rw_yield(rw); 257 } while (unlikely(rw->lock != 0)); 258 HMT_medium(); 259 } 260 } 261 262 static inline int arch_read_trylock(arch_rwlock_t *rw) 263 { 264 return __arch_read_trylock(rw) > 0; 265 } 266 267 static inline int arch_write_trylock(arch_rwlock_t *rw) 268 { 269 return __arch_write_trylock(rw) == 0; 270 } 271 272 static inline void arch_read_unlock(arch_rwlock_t *rw) 273 { 274 long tmp; 275 276 __asm__ __volatile__( 277 "# read_unlock\n\t" 278 PPC_RELEASE_BARRIER 279 "1: lwarx %0,0,%1\n\ 280 addic %0,%0,-1\n" 281 PPC405_ERR77(0,%1) 282 " stwcx. %0,0,%1\n\ 283 bne- 1b" 284 : "=&r"(tmp) 285 : "r"(&rw->lock) 286 : "cr0", "xer", "memory"); 287 } 288 289 static inline void arch_write_unlock(arch_rwlock_t *rw) 290 { 291 __asm__ __volatile__("# write_unlock\n\t" 292 PPC_RELEASE_BARRIER: : :"memory"); 293 rw->lock = 0; 294 } 295 296 #define arch_spin_relax(lock) __spin_yield(lock) 297 #define arch_read_relax(lock) __rw_yield(lock) 298 #define arch_write_relax(lock) __rw_yield(lock) 299 300 /* See include/linux/spinlock.h */ 301 #define smp_mb__after_spinlock() smp_mb() 302 303 #endif /* __KERNEL__ */ 304 #endif /* __ASM_SPINLOCK_H */ 305