1 /* 2 * S390 version 3 * Copyright IBM Corp. 1999 4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5 * 6 * Derived from "include/asm-i386/spinlock.h" 7 */ 8 9 #ifndef __ASM_SPINLOCK_H 10 #define __ASM_SPINLOCK_H 11 12 #include <linux/smp.h> 13 #include <asm/atomic_ops.h> 14 #include <asm/barrier.h> 15 #include <asm/processor.h> 16 17 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) 18 19 extern int spin_retry; 20 21 #ifndef CONFIG_SMP 22 static inline bool arch_vcpu_is_preempted(int cpu) { return false; } 23 #else 24 bool arch_vcpu_is_preempted(int cpu); 25 #endif 26 27 #define vcpu_is_preempted arch_vcpu_is_preempted 28 29 /* 30 * Simple spin lock operations. There are two variants, one clears IRQ's 31 * on the local processor, one does not. 32 * 33 * We make no fairness assumptions. They have a cost. 34 * 35 * (the type definitions are in asm/spinlock_types.h) 36 */ 37 38 void arch_lock_relax(int cpu); 39 40 void arch_spin_lock_wait(arch_spinlock_t *); 41 int arch_spin_trylock_retry(arch_spinlock_t *); 42 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 43 44 static inline void arch_spin_relax(arch_spinlock_t *lock) 45 { 46 arch_lock_relax(lock->lock); 47 } 48 49 static inline u32 arch_spin_lockval(int cpu) 50 { 51 return ~cpu; 52 } 53 54 static inline int arch_spin_value_unlocked(arch_spinlock_t lock) 55 { 56 return lock.lock == 0; 57 } 58 59 static inline int arch_spin_is_locked(arch_spinlock_t *lp) 60 { 61 return READ_ONCE(lp->lock) != 0; 62 } 63 64 static inline int arch_spin_trylock_once(arch_spinlock_t *lp) 65 { 66 barrier(); 67 return likely(arch_spin_value_unlocked(*lp) && 68 __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL)); 69 } 70 71 static inline void arch_spin_lock(arch_spinlock_t *lp) 72 { 73 if (!arch_spin_trylock_once(lp)) 74 arch_spin_lock_wait(lp); 75 } 76 77 static inline void arch_spin_lock_flags(arch_spinlock_t *lp, 78 unsigned long flags) 79 { 80 if (!arch_spin_trylock_once(lp)) 81 arch_spin_lock_wait_flags(lp, flags); 82 } 83 84 static inline int arch_spin_trylock(arch_spinlock_t *lp) 85 { 86 if (!arch_spin_trylock_once(lp)) 87 return arch_spin_trylock_retry(lp); 88 return 1; 89 } 90 91 static inline void arch_spin_unlock(arch_spinlock_t *lp) 92 { 93 typecheck(int, lp->lock); 94 asm volatile( 95 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES 96 " .long 0xb2fa0070\n" /* NIAI 7 */ 97 #endif 98 " st %1,%0\n" 99 : "=Q" (lp->lock) : "d" (0) : "cc", "memory"); 100 } 101 102 /* 103 * Read-write spinlocks, allowing multiple readers 104 * but only one writer. 105 * 106 * NOTE! it is quite common to have readers in interrupts 107 * but no interrupt writers. For those circumstances we 108 * can "mix" irq-safe locks - any writer needs to get a 109 * irq-safe write-lock, but readers can get non-irqsafe 110 * read-locks. 111 */ 112 113 /** 114 * read_can_lock - would read_trylock() succeed? 115 * @lock: the rwlock in question. 116 */ 117 #define arch_read_can_lock(x) ((int)(x)->lock >= 0) 118 119 /** 120 * write_can_lock - would write_trylock() succeed? 121 * @lock: the rwlock in question. 122 */ 123 #define arch_write_can_lock(x) ((x)->lock == 0) 124 125 extern int _raw_read_trylock_retry(arch_rwlock_t *lp); 126 extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 127 128 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 129 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 130 131 static inline int arch_read_trylock_once(arch_rwlock_t *rw) 132 { 133 int old = ACCESS_ONCE(rw->lock); 134 return likely(old >= 0 && 135 __atomic_cmpxchg_bool(&rw->lock, old, old + 1)); 136 } 137 138 static inline int arch_write_trylock_once(arch_rwlock_t *rw) 139 { 140 int old = ACCESS_ONCE(rw->lock); 141 return likely(old == 0 && 142 __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)); 143 } 144 145 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 146 147 #define __RAW_OP_OR "lao" 148 #define __RAW_OP_AND "lan" 149 #define __RAW_OP_ADD "laa" 150 151 #define __RAW_LOCK(ptr, op_val, op_string) \ 152 ({ \ 153 int old_val; \ 154 \ 155 typecheck(int *, ptr); \ 156 asm volatile( \ 157 op_string " %0,%2,%1\n" \ 158 "bcr 14,0\n" \ 159 : "=d" (old_val), "+Q" (*ptr) \ 160 : "d" (op_val) \ 161 : "cc", "memory"); \ 162 old_val; \ 163 }) 164 165 #define __RAW_UNLOCK(ptr, op_val, op_string) \ 166 ({ \ 167 int old_val; \ 168 \ 169 typecheck(int *, ptr); \ 170 asm volatile( \ 171 op_string " %0,%2,%1\n" \ 172 : "=d" (old_val), "+Q" (*ptr) \ 173 : "d" (op_val) \ 174 : "cc", "memory"); \ 175 old_val; \ 176 }) 177 178 extern void _raw_read_lock_wait(arch_rwlock_t *lp); 179 extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev); 180 181 static inline void arch_read_lock(arch_rwlock_t *rw) 182 { 183 int old; 184 185 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); 186 if (old < 0) 187 _raw_read_lock_wait(rw); 188 } 189 190 static inline void arch_read_unlock(arch_rwlock_t *rw) 191 { 192 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD); 193 } 194 195 static inline void arch_write_lock(arch_rwlock_t *rw) 196 { 197 int old; 198 199 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); 200 if (old != 0) 201 _raw_write_lock_wait(rw, old); 202 rw->owner = SPINLOCK_LOCKVAL; 203 } 204 205 static inline void arch_write_unlock(arch_rwlock_t *rw) 206 { 207 rw->owner = 0; 208 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND); 209 } 210 211 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 212 213 extern void _raw_read_lock_wait(arch_rwlock_t *lp); 214 extern void _raw_write_lock_wait(arch_rwlock_t *lp); 215 216 static inline void arch_read_lock(arch_rwlock_t *rw) 217 { 218 if (!arch_read_trylock_once(rw)) 219 _raw_read_lock_wait(rw); 220 } 221 222 static inline void arch_read_unlock(arch_rwlock_t *rw) 223 { 224 int old; 225 226 do { 227 old = ACCESS_ONCE(rw->lock); 228 } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1)); 229 } 230 231 static inline void arch_write_lock(arch_rwlock_t *rw) 232 { 233 if (!arch_write_trylock_once(rw)) 234 _raw_write_lock_wait(rw); 235 rw->owner = SPINLOCK_LOCKVAL; 236 } 237 238 static inline void arch_write_unlock(arch_rwlock_t *rw) 239 { 240 typecheck(int, rw->lock); 241 242 rw->owner = 0; 243 asm volatile( 244 "st %1,%0\n" 245 : "+Q" (rw->lock) 246 : "d" (0) 247 : "cc", "memory"); 248 } 249 250 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 251 252 static inline int arch_read_trylock(arch_rwlock_t *rw) 253 { 254 if (!arch_read_trylock_once(rw)) 255 return _raw_read_trylock_retry(rw); 256 return 1; 257 } 258 259 static inline int arch_write_trylock(arch_rwlock_t *rw) 260 { 261 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw)) 262 return 0; 263 rw->owner = SPINLOCK_LOCKVAL; 264 return 1; 265 } 266 267 static inline void arch_read_relax(arch_rwlock_t *rw) 268 { 269 arch_lock_relax(rw->owner); 270 } 271 272 static inline void arch_write_relax(arch_rwlock_t *rw) 273 { 274 arch_lock_relax(rw->owner); 275 } 276 277 #endif /* __ASM_SPINLOCK_H */ 278