1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASM_SPINLOCK_H 3 #define __ASM_SPINLOCK_H 4 5 #if __LINUX_ARM_ARCH__ < 6 6 #error SMP not supported on pre-ARMv6 CPUs 7 #endif 8 9 #include <linux/prefetch.h> 10 #include <asm/barrier.h> 11 #include <asm/processor.h> 12 13 /* 14 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K 15 * extensions, so when running on UP, we have to patch these instructions away. 16 */ 17 #ifdef CONFIG_THUMB2_KERNEL 18 /* 19 * For Thumb-2, special care is needed to ensure that the conditional WFE 20 * instruction really does assemble to exactly 4 bytes (as required by 21 * the SMP_ON_UP fixup code). By itself "wfene" might cause the 22 * assembler to insert a extra (16-bit) IT instruction, depending on the 23 * presence or absence of neighbouring conditional instructions. 24 * 25 * To avoid this unpredictableness, an approprite IT is inserted explicitly: 26 * the assembler won't change IT instructions which are explicitly present 27 * in the input. 28 */ 29 #define WFE(cond) __ALT_SMP_ASM( \ 30 "it " cond "\n\t" \ 31 "wfe" cond ".n", \ 32 \ 33 "nop.w" \ 34 ) 35 #else 36 #define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop") 37 #endif 38 39 #define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop)) 40 41 static inline void dsb_sev(void) 42 { 43 44 dsb(ishst); 45 __asm__(SEV); 46 } 47 48 /* 49 * ARMv6 ticket-based spin-locking. 50 * 51 * A memory barrier is required after we get a lock, and before we 52 * release it, because V6 CPUs are assumed to have weakly ordered 53 * memory. 54 */ 55 56 static inline void arch_spin_lock(arch_spinlock_t *lock) 57 { 58 unsigned long tmp; 59 u32 newval; 60 arch_spinlock_t lockval; 61 62 prefetchw(&lock->slock); 63 __asm__ __volatile__( 64 "1: ldrex %0, [%3]\n" 65 " add %1, %0, %4\n" 66 " strex %2, %1, [%3]\n" 67 " teq %2, #0\n" 68 " bne 1b" 69 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) 70 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 71 : "cc"); 72 73 while (lockval.tickets.next != lockval.tickets.owner) { 74 wfe(); 75 lockval.tickets.owner = READ_ONCE(lock->tickets.owner); 76 } 77 78 smp_mb(); 79 } 80 81 static inline int arch_spin_trylock(arch_spinlock_t *lock) 82 { 83 unsigned long contended, res; 84 u32 slock; 85 86 prefetchw(&lock->slock); 87 do { 88 __asm__ __volatile__( 89 " ldrex %0, [%3]\n" 90 " mov %2, #0\n" 91 " subs %1, %0, %0, ror #16\n" 92 " addeq %0, %0, %4\n" 93 " strexeq %2, %0, [%3]" 94 : "=&r" (slock), "=&r" (contended), "=&r" (res) 95 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 96 : "cc"); 97 } while (res); 98 99 if (!contended) { 100 smp_mb(); 101 return 1; 102 } else { 103 return 0; 104 } 105 } 106 107 static inline void arch_spin_unlock(arch_spinlock_t *lock) 108 { 109 smp_mb(); 110 lock->tickets.owner++; 111 dsb_sev(); 112 } 113 114 static inline int arch_spin_value_unlocked(arch_spinlock_t lock) 115 { 116 return lock.tickets.owner == lock.tickets.next; 117 } 118 119 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 120 { 121 return !arch_spin_value_unlocked(READ_ONCE(*lock)); 122 } 123 124 static inline int arch_spin_is_contended(arch_spinlock_t *lock) 125 { 126 struct __raw_tickets tickets = READ_ONCE(lock->tickets); 127 return (tickets.next - tickets.owner) > 1; 128 } 129 #define arch_spin_is_contended arch_spin_is_contended 130 131 /* 132 * RWLOCKS 133 * 134 * 135 * Write locks are easy - we just set bit 31. When unlocking, we can 136 * just write zero since the lock is exclusively held. 137 */ 138 139 static inline void arch_write_lock(arch_rwlock_t *rw) 140 { 141 unsigned long tmp; 142 143 prefetchw(&rw->lock); 144 __asm__ __volatile__( 145 "1: ldrex %0, [%1]\n" 146 " teq %0, #0\n" 147 WFE("ne") 148 " strexeq %0, %2, [%1]\n" 149 " teq %0, #0\n" 150 " bne 1b" 151 : "=&r" (tmp) 152 : "r" (&rw->lock), "r" (0x80000000) 153 : "cc"); 154 155 smp_mb(); 156 } 157 158 static inline int arch_write_trylock(arch_rwlock_t *rw) 159 { 160 unsigned long contended, res; 161 162 prefetchw(&rw->lock); 163 do { 164 __asm__ __volatile__( 165 " ldrex %0, [%2]\n" 166 " mov %1, #0\n" 167 " teq %0, #0\n" 168 " strexeq %1, %3, [%2]" 169 : "=&r" (contended), "=&r" (res) 170 : "r" (&rw->lock), "r" (0x80000000) 171 : "cc"); 172 } while (res); 173 174 if (!contended) { 175 smp_mb(); 176 return 1; 177 } else { 178 return 0; 179 } 180 } 181 182 static inline void arch_write_unlock(arch_rwlock_t *rw) 183 { 184 smp_mb(); 185 186 __asm__ __volatile__( 187 "str %1, [%0]\n" 188 : 189 : "r" (&rw->lock), "r" (0) 190 : "cc"); 191 192 dsb_sev(); 193 } 194 195 /* 196 * Read locks are a bit more hairy: 197 * - Exclusively load the lock value. 198 * - Increment it. 199 * - Store new lock value if positive, and we still own this location. 200 * If the value is negative, we've already failed. 201 * - If we failed to store the value, we want a negative result. 202 * - If we failed, try again. 203 * Unlocking is similarly hairy. We may have multiple read locks 204 * currently active. However, we know we won't have any write 205 * locks. 206 */ 207 static inline void arch_read_lock(arch_rwlock_t *rw) 208 { 209 unsigned long tmp, tmp2; 210 211 prefetchw(&rw->lock); 212 __asm__ __volatile__( 213 "1: ldrex %0, [%2]\n" 214 " adds %0, %0, #1\n" 215 " strexpl %1, %0, [%2]\n" 216 WFE("mi") 217 " rsbpls %0, %1, #0\n" 218 " bmi 1b" 219 : "=&r" (tmp), "=&r" (tmp2) 220 : "r" (&rw->lock) 221 : "cc"); 222 223 smp_mb(); 224 } 225 226 static inline void arch_read_unlock(arch_rwlock_t *rw) 227 { 228 unsigned long tmp, tmp2; 229 230 smp_mb(); 231 232 prefetchw(&rw->lock); 233 __asm__ __volatile__( 234 "1: ldrex %0, [%2]\n" 235 " sub %0, %0, #1\n" 236 " strex %1, %0, [%2]\n" 237 " teq %1, #0\n" 238 " bne 1b" 239 : "=&r" (tmp), "=&r" (tmp2) 240 : "r" (&rw->lock) 241 : "cc"); 242 243 if (tmp == 0) 244 dsb_sev(); 245 } 246 247 static inline int arch_read_trylock(arch_rwlock_t *rw) 248 { 249 unsigned long contended, res; 250 251 prefetchw(&rw->lock); 252 do { 253 __asm__ __volatile__( 254 " ldrex %0, [%2]\n" 255 " mov %1, #0\n" 256 " adds %0, %0, #1\n" 257 " strexpl %1, %0, [%2]" 258 : "=&r" (contended), "=&r" (res) 259 : "r" (&rw->lock) 260 : "cc"); 261 } while (res); 262 263 /* If the lock is negative, then it is already held for write. */ 264 if (contended < 0x80000000) { 265 smp_mb(); 266 return 1; 267 } else { 268 return 0; 269 } 270 } 271 272 #endif /* __ASM_SPINLOCK_H */ 273