1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef __ASM_SPINLOCK_H 3 #define __ASM_SPINLOCK_H 4 #ifdef __KERNEL__ 5 6 /* 7 * Simple spin lock operations. 8 * 9 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 10 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 11 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 12 * Rework to support virtual processors 13 * 14 * Type of int is used as a full 64b word is not necessary. 15 * 16 * (the type definitions are in asm/spinlock_types.h) 17 */ 18 #include <linux/jump_label.h> 19 #include <linux/irqflags.h> 20 #ifdef CONFIG_PPC64 21 #include <asm/paca.h> 22 #include <asm/hvcall.h> 23 #endif 24 #include <asm/synch.h> 25 #include <asm/ppc-opcode.h> 26 27 #ifdef CONFIG_PPC64 28 /* use 0x800000yy when locked, where yy == CPU number */ 29 #ifdef __BIG_ENDIAN__ 30 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 31 #else 32 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) 33 #endif 34 #else 35 #define LOCK_TOKEN 1 36 #endif 37 38 #ifdef CONFIG_PPC_PSERIES 39 DECLARE_STATIC_KEY_FALSE(shared_processor); 40 41 #define vcpu_is_preempted vcpu_is_preempted 42 static inline bool vcpu_is_preempted(int cpu) 43 { 44 if (!static_branch_unlikely(&shared_processor)) 45 return false; 46 return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); 47 } 48 #endif 49 50 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 51 { 52 return lock.slock == 0; 53 } 54 55 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 56 { 57 smp_mb(); 58 return !arch_spin_value_unlocked(*lock); 59 } 60 61 /* 62 * This returns the old value in the lock, so we succeeded 63 * in getting the lock if the return value is 0. 64 */ 65 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 66 { 67 unsigned long tmp, token; 68 69 token = LOCK_TOKEN; 70 __asm__ __volatile__( 71 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 72 cmpwi 0,%0,0\n\ 73 bne- 2f\n\ 74 stwcx. %1,0,%2\n\ 75 bne- 1b\n" 76 PPC_ACQUIRE_BARRIER 77 "2:" 78 : "=&r" (tmp) 79 : "r" (token), "r" (&lock->slock) 80 : "cr0", "memory"); 81 82 return tmp; 83 } 84 85 static inline int arch_spin_trylock(arch_spinlock_t *lock) 86 { 87 return __arch_spin_trylock(lock) == 0; 88 } 89 90 /* 91 * On a system with shared processors (that is, where a physical 92 * processor is multiplexed between several virtual processors), 93 * there is no point spinning on a lock if the holder of the lock 94 * isn't currently scheduled on a physical processor. Instead 95 * we detect this situation and ask the hypervisor to give the 96 * rest of our timeslice to the lock holder. 97 * 98 * So that we can tell which virtual processor is holding a lock, 99 * we put 0x80000000 | smp_processor_id() in the lock when it is 100 * held. Conveniently, we have a word in the paca that holds this 101 * value. 102 */ 103 104 #if defined(CONFIG_PPC_SPLPAR) 105 /* We only yield to the hypervisor if we are in shared processor mode */ 106 void splpar_spin_yield(arch_spinlock_t *lock); 107 void splpar_rw_yield(arch_rwlock_t *lock); 108 #else /* SPLPAR */ 109 static inline void splpar_spin_yield(arch_spinlock_t *lock) {}; 110 static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; 111 #endif 112 113 static inline bool is_shared_processor(void) 114 { 115 #ifdef CONFIG_PPC_SPLPAR 116 return static_branch_unlikely(&shared_processor); 117 #else 118 return false; 119 #endif 120 } 121 122 static inline void spin_yield(arch_spinlock_t *lock) 123 { 124 if (is_shared_processor()) 125 splpar_spin_yield(lock); 126 else 127 barrier(); 128 } 129 130 static inline void rw_yield(arch_rwlock_t *lock) 131 { 132 if (is_shared_processor()) 133 splpar_rw_yield(lock); 134 else 135 barrier(); 136 } 137 138 static inline void arch_spin_lock(arch_spinlock_t *lock) 139 { 140 while (1) { 141 if (likely(__arch_spin_trylock(lock) == 0)) 142 break; 143 do { 144 HMT_low(); 145 if (is_shared_processor()) 146 splpar_spin_yield(lock); 147 } while (unlikely(lock->slock != 0)); 148 HMT_medium(); 149 } 150 } 151 152 static inline 153 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 154 { 155 unsigned long flags_dis; 156 157 while (1) { 158 if (likely(__arch_spin_trylock(lock) == 0)) 159 break; 160 local_save_flags(flags_dis); 161 local_irq_restore(flags); 162 do { 163 HMT_low(); 164 if (is_shared_processor()) 165 splpar_spin_yield(lock); 166 } while (unlikely(lock->slock != 0)); 167 HMT_medium(); 168 local_irq_restore(flags_dis); 169 } 170 } 171 #define arch_spin_lock_flags arch_spin_lock_flags 172 173 static inline void arch_spin_unlock(arch_spinlock_t *lock) 174 { 175 __asm__ __volatile__("# arch_spin_unlock\n\t" 176 PPC_RELEASE_BARRIER: : :"memory"); 177 lock->slock = 0; 178 } 179 180 /* 181 * Read-write spinlocks, allowing multiple readers 182 * but only one writer. 183 * 184 * NOTE! it is quite common to have readers in interrupts 185 * but no interrupt writers. For those circumstances we 186 * can "mix" irq-safe locks - any writer needs to get a 187 * irq-safe write-lock, but readers can get non-irqsafe 188 * read-locks. 189 */ 190 191 #ifdef CONFIG_PPC64 192 #define __DO_SIGN_EXTEND "extsw %0,%0\n" 193 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 194 #else 195 #define __DO_SIGN_EXTEND 196 #define WRLOCK_TOKEN (-1) 197 #endif 198 199 /* 200 * This returns the old value in the lock + 1, 201 * so we got a read lock if the return value is > 0. 202 */ 203 static inline long __arch_read_trylock(arch_rwlock_t *rw) 204 { 205 long tmp; 206 207 __asm__ __volatile__( 208 "1: " PPC_LWARX(%0,0,%1,1) "\n" 209 __DO_SIGN_EXTEND 210 " addic. %0,%0,1\n\ 211 ble- 2f\n" 212 " stwcx. %0,0,%1\n\ 213 bne- 1b\n" 214 PPC_ACQUIRE_BARRIER 215 "2:" : "=&r" (tmp) 216 : "r" (&rw->lock) 217 : "cr0", "xer", "memory"); 218 219 return tmp; 220 } 221 222 /* 223 * This returns the old value in the lock, 224 * so we got the write lock if the return value is 0. 225 */ 226 static inline long __arch_write_trylock(arch_rwlock_t *rw) 227 { 228 long tmp, token; 229 230 token = WRLOCK_TOKEN; 231 __asm__ __volatile__( 232 "1: " PPC_LWARX(%0,0,%2,1) "\n\ 233 cmpwi 0,%0,0\n\ 234 bne- 2f\n" 235 " stwcx. %1,0,%2\n\ 236 bne- 1b\n" 237 PPC_ACQUIRE_BARRIER 238 "2:" : "=&r" (tmp) 239 : "r" (token), "r" (&rw->lock) 240 : "cr0", "memory"); 241 242 return tmp; 243 } 244 245 static inline void arch_read_lock(arch_rwlock_t *rw) 246 { 247 while (1) { 248 if (likely(__arch_read_trylock(rw) > 0)) 249 break; 250 do { 251 HMT_low(); 252 if (is_shared_processor()) 253 splpar_rw_yield(rw); 254 } while (unlikely(rw->lock < 0)); 255 HMT_medium(); 256 } 257 } 258 259 static inline void arch_write_lock(arch_rwlock_t *rw) 260 { 261 while (1) { 262 if (likely(__arch_write_trylock(rw) == 0)) 263 break; 264 do { 265 HMT_low(); 266 if (is_shared_processor()) 267 splpar_rw_yield(rw); 268 } while (unlikely(rw->lock != 0)); 269 HMT_medium(); 270 } 271 } 272 273 static inline int arch_read_trylock(arch_rwlock_t *rw) 274 { 275 return __arch_read_trylock(rw) > 0; 276 } 277 278 static inline int arch_write_trylock(arch_rwlock_t *rw) 279 { 280 return __arch_write_trylock(rw) == 0; 281 } 282 283 static inline void arch_read_unlock(arch_rwlock_t *rw) 284 { 285 long tmp; 286 287 __asm__ __volatile__( 288 "# read_unlock\n\t" 289 PPC_RELEASE_BARRIER 290 "1: lwarx %0,0,%1\n\ 291 addic %0,%0,-1\n" 292 " stwcx. %0,0,%1\n\ 293 bne- 1b" 294 : "=&r"(tmp) 295 : "r"(&rw->lock) 296 : "cr0", "xer", "memory"); 297 } 298 299 static inline void arch_write_unlock(arch_rwlock_t *rw) 300 { 301 __asm__ __volatile__("# write_unlock\n\t" 302 PPC_RELEASE_BARRIER: : :"memory"); 303 rw->lock = 0; 304 } 305 306 #define arch_spin_relax(lock) spin_yield(lock) 307 #define arch_read_relax(lock) rw_yield(lock) 308 #define arch_write_relax(lock) rw_yield(lock) 309 310 /* See include/linux/spinlock.h */ 311 #define smp_mb__after_spinlock() smp_mb() 312 313 #endif /* __KERNEL__ */ 314 #endif /* __ASM_SPINLOCK_H */ 315