1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 */ 9 #ifndef _ASM_SPINLOCK_H 10 #define _ASM_SPINLOCK_H 11 12 #include <linux/compiler.h> 13 14 #include <asm/barrier.h> 15 #include <asm/compiler.h> 16 #include <asm/war.h> 17 18 /* 19 * Your basic SMP spinlocks, allowing only a single CPU anywhere 20 * 21 * Simple spin lock operations. There are two variants, one clears IRQ's 22 * on the local processor, one does not. 23 * 24 * These are fair FIFO ticket locks 25 * 26 * (the type definitions are in asm/spinlock_types.h) 27 */ 28 29 30 /* 31 * Ticket locks are conceptually two parts, one indicating the current head of 32 * the queue, and the other indicating the current tail. The lock is acquired 33 * by atomically noting the tail and incrementing it by one (thus adding 34 * ourself to the queue and noting our position), then waiting until the head 35 * becomes equal to the the initial value of the tail. 36 */ 37 38 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 39 { 40 u32 counters = ACCESS_ONCE(lock->lock); 41 42 return ((counters >> 16) ^ counters) & 0xffff; 43 } 44 45 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 46 #define arch_spin_unlock_wait(x) \ 47 while (arch_spin_is_locked(x)) { cpu_relax(); } 48 49 static inline int arch_spin_is_contended(arch_spinlock_t *lock) 50 { 51 u32 counters = ACCESS_ONCE(lock->lock); 52 53 return (((counters >> 16) - counters) & 0xffff) > 1; 54 } 55 #define arch_spin_is_contended arch_spin_is_contended 56 57 static inline void arch_spin_lock(arch_spinlock_t *lock) 58 { 59 int my_ticket; 60 int tmp; 61 int inc = 0x10000; 62 63 if (R10000_LLSC_WAR) { 64 __asm__ __volatile__ ( 65 " .set push # arch_spin_lock \n" 66 " .set noreorder \n" 67 " \n" 68 "1: ll %[ticket], %[ticket_ptr] \n" 69 " addu %[my_ticket], %[ticket], %[inc] \n" 70 " sc %[my_ticket], %[ticket_ptr] \n" 71 " beqzl %[my_ticket], 1b \n" 72 " nop \n" 73 " srl %[my_ticket], %[ticket], 16 \n" 74 " andi %[ticket], %[ticket], 0xffff \n" 75 " bne %[ticket], %[my_ticket], 4f \n" 76 " subu %[ticket], %[my_ticket], %[ticket] \n" 77 "2: \n" 78 " .subsection 2 \n" 79 "4: andi %[ticket], %[ticket], 0xffff \n" 80 " sll %[ticket], 5 \n" 81 " \n" 82 "6: bnez %[ticket], 6b \n" 83 " subu %[ticket], 1 \n" 84 " \n" 85 " lhu %[ticket], %[serving_now_ptr] \n" 86 " beq %[ticket], %[my_ticket], 2b \n" 87 " subu %[ticket], %[my_ticket], %[ticket] \n" 88 " b 4b \n" 89 " subu %[ticket], %[ticket], 1 \n" 90 " .previous \n" 91 " .set pop \n" 92 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 93 [serving_now_ptr] "+m" (lock->h.serving_now), 94 [ticket] "=&r" (tmp), 95 [my_ticket] "=&r" (my_ticket) 96 : [inc] "r" (inc)); 97 } else { 98 __asm__ __volatile__ ( 99 " .set push # arch_spin_lock \n" 100 " .set noreorder \n" 101 " \n" 102 "1: ll %[ticket], %[ticket_ptr] \n" 103 " addu %[my_ticket], %[ticket], %[inc] \n" 104 " sc %[my_ticket], %[ticket_ptr] \n" 105 " beqz %[my_ticket], 1b \n" 106 " srl %[my_ticket], %[ticket], 16 \n" 107 " andi %[ticket], %[ticket], 0xffff \n" 108 " bne %[ticket], %[my_ticket], 4f \n" 109 " subu %[ticket], %[my_ticket], %[ticket] \n" 110 "2: \n" 111 " .subsection 2 \n" 112 "4: andi %[ticket], %[ticket], 0x1fff \n" 113 " sll %[ticket], 5 \n" 114 " \n" 115 "6: bnez %[ticket], 6b \n" 116 " subu %[ticket], 1 \n" 117 " \n" 118 " lhu %[ticket], %[serving_now_ptr] \n" 119 " beq %[ticket], %[my_ticket], 2b \n" 120 " subu %[ticket], %[my_ticket], %[ticket] \n" 121 " b 4b \n" 122 " subu %[ticket], %[ticket], 1 \n" 123 " .previous \n" 124 " .set pop \n" 125 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 126 [serving_now_ptr] "+m" (lock->h.serving_now), 127 [ticket] "=&r" (tmp), 128 [my_ticket] "=&r" (my_ticket) 129 : [inc] "r" (inc)); 130 } 131 132 smp_llsc_mb(); 133 } 134 135 static inline void arch_spin_unlock(arch_spinlock_t *lock) 136 { 137 unsigned int serving_now = lock->h.serving_now + 1; 138 wmb(); 139 lock->h.serving_now = (u16)serving_now; 140 nudge_writes(); 141 } 142 143 static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) 144 { 145 int tmp, tmp2, tmp3; 146 int inc = 0x10000; 147 148 if (R10000_LLSC_WAR) { 149 __asm__ __volatile__ ( 150 " .set push # arch_spin_trylock \n" 151 " .set noreorder \n" 152 " \n" 153 "1: ll %[ticket], %[ticket_ptr] \n" 154 " srl %[my_ticket], %[ticket], 16 \n" 155 " andi %[now_serving], %[ticket], 0xffff \n" 156 " bne %[my_ticket], %[now_serving], 3f \n" 157 " addu %[ticket], %[ticket], %[inc] \n" 158 " sc %[ticket], %[ticket_ptr] \n" 159 " beqzl %[ticket], 1b \n" 160 " li %[ticket], 1 \n" 161 "2: \n" 162 " .subsection 2 \n" 163 "3: b 2b \n" 164 " li %[ticket], 0 \n" 165 " .previous \n" 166 " .set pop \n" 167 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 168 [ticket] "=&r" (tmp), 169 [my_ticket] "=&r" (tmp2), 170 [now_serving] "=&r" (tmp3) 171 : [inc] "r" (inc)); 172 } else { 173 __asm__ __volatile__ ( 174 " .set push # arch_spin_trylock \n" 175 " .set noreorder \n" 176 " \n" 177 "1: ll %[ticket], %[ticket_ptr] \n" 178 " srl %[my_ticket], %[ticket], 16 \n" 179 " andi %[now_serving], %[ticket], 0xffff \n" 180 " bne %[my_ticket], %[now_serving], 3f \n" 181 " addu %[ticket], %[ticket], %[inc] \n" 182 " sc %[ticket], %[ticket_ptr] \n" 183 " beqz %[ticket], 1b \n" 184 " li %[ticket], 1 \n" 185 "2: \n" 186 " .subsection 2 \n" 187 "3: b 2b \n" 188 " li %[ticket], 0 \n" 189 " .previous \n" 190 " .set pop \n" 191 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 192 [ticket] "=&r" (tmp), 193 [my_ticket] "=&r" (tmp2), 194 [now_serving] "=&r" (tmp3) 195 : [inc] "r" (inc)); 196 } 197 198 smp_llsc_mb(); 199 200 return tmp; 201 } 202 203 /* 204 * Read-write spinlocks, allowing multiple readers but only one writer. 205 * 206 * NOTE! it is quite common to have readers in interrupts but no interrupt 207 * writers. For those circumstances we can "mix" irq-safe locks - any writer 208 * needs to get a irq-safe write-lock, but readers can get non-irqsafe 209 * read-locks. 210 */ 211 212 /* 213 * read_can_lock - would read_trylock() succeed? 214 * @lock: the rwlock in question. 215 */ 216 #define arch_read_can_lock(rw) ((rw)->lock >= 0) 217 218 /* 219 * write_can_lock - would write_trylock() succeed? 220 * @lock: the rwlock in question. 221 */ 222 #define arch_write_can_lock(rw) (!(rw)->lock) 223 224 static inline void arch_read_lock(arch_rwlock_t *rw) 225 { 226 unsigned int tmp; 227 228 if (R10000_LLSC_WAR) { 229 __asm__ __volatile__( 230 " .set noreorder # arch_read_lock \n" 231 "1: ll %1, %2 \n" 232 " bltz %1, 1b \n" 233 " addu %1, 1 \n" 234 " sc %1, %0 \n" 235 " beqzl %1, 1b \n" 236 " nop \n" 237 " .set reorder \n" 238 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 239 : GCC_OFF_SMALL_ASM() (rw->lock) 240 : "memory"); 241 } else { 242 do { 243 __asm__ __volatile__( 244 "1: ll %1, %2 # arch_read_lock \n" 245 " bltz %1, 1b \n" 246 " addu %1, 1 \n" 247 "2: sc %1, %0 \n" 248 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 249 : GCC_OFF_SMALL_ASM() (rw->lock) 250 : "memory"); 251 } while (unlikely(!tmp)); 252 } 253 254 smp_llsc_mb(); 255 } 256 257 static inline void arch_read_unlock(arch_rwlock_t *rw) 258 { 259 unsigned int tmp; 260 261 smp_mb__before_llsc(); 262 263 if (R10000_LLSC_WAR) { 264 __asm__ __volatile__( 265 "1: ll %1, %2 # arch_read_unlock \n" 266 " addiu %1, 1 \n" 267 " sc %1, %0 \n" 268 " beqzl %1, 1b \n" 269 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 270 : GCC_OFF_SMALL_ASM() (rw->lock) 271 : "memory"); 272 } else { 273 do { 274 __asm__ __volatile__( 275 "1: ll %1, %2 # arch_read_unlock \n" 276 " addiu %1, -1 \n" 277 " sc %1, %0 \n" 278 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 279 : GCC_OFF_SMALL_ASM() (rw->lock) 280 : "memory"); 281 } while (unlikely(!tmp)); 282 } 283 } 284 285 static inline void arch_write_lock(arch_rwlock_t *rw) 286 { 287 unsigned int tmp; 288 289 if (R10000_LLSC_WAR) { 290 __asm__ __volatile__( 291 " .set noreorder # arch_write_lock \n" 292 "1: ll %1, %2 \n" 293 " bnez %1, 1b \n" 294 " lui %1, 0x8000 \n" 295 " sc %1, %0 \n" 296 " beqzl %1, 1b \n" 297 " nop \n" 298 " .set reorder \n" 299 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 300 : GCC_OFF_SMALL_ASM() (rw->lock) 301 : "memory"); 302 } else { 303 do { 304 __asm__ __volatile__( 305 "1: ll %1, %2 # arch_write_lock \n" 306 " bnez %1, 1b \n" 307 " lui %1, 0x8000 \n" 308 "2: sc %1, %0 \n" 309 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 310 : GCC_OFF_SMALL_ASM() (rw->lock) 311 : "memory"); 312 } while (unlikely(!tmp)); 313 } 314 315 smp_llsc_mb(); 316 } 317 318 static inline void arch_write_unlock(arch_rwlock_t *rw) 319 { 320 smp_mb(); 321 322 __asm__ __volatile__( 323 " # arch_write_unlock \n" 324 " sw $0, %0 \n" 325 : "=m" (rw->lock) 326 : "m" (rw->lock) 327 : "memory"); 328 } 329 330 static inline int arch_read_trylock(arch_rwlock_t *rw) 331 { 332 unsigned int tmp; 333 int ret; 334 335 if (R10000_LLSC_WAR) { 336 __asm__ __volatile__( 337 " .set noreorder # arch_read_trylock \n" 338 " li %2, 0 \n" 339 "1: ll %1, %3 \n" 340 " bltz %1, 2f \n" 341 " addu %1, 1 \n" 342 " sc %1, %0 \n" 343 " .set reorder \n" 344 " beqzl %1, 1b \n" 345 " nop \n" 346 __WEAK_LLSC_MB 347 " li %2, 1 \n" 348 "2: \n" 349 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 350 : GCC_OFF_SMALL_ASM() (rw->lock) 351 : "memory"); 352 } else { 353 __asm__ __volatile__( 354 " .set noreorder # arch_read_trylock \n" 355 " li %2, 0 \n" 356 "1: ll %1, %3 \n" 357 " bltz %1, 2f \n" 358 " addu %1, 1 \n" 359 " sc %1, %0 \n" 360 " beqz %1, 1b \n" 361 " nop \n" 362 " .set reorder \n" 363 __WEAK_LLSC_MB 364 " li %2, 1 \n" 365 "2: \n" 366 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 367 : GCC_OFF_SMALL_ASM() (rw->lock) 368 : "memory"); 369 } 370 371 return ret; 372 } 373 374 static inline int arch_write_trylock(arch_rwlock_t *rw) 375 { 376 unsigned int tmp; 377 int ret; 378 379 if (R10000_LLSC_WAR) { 380 __asm__ __volatile__( 381 " .set noreorder # arch_write_trylock \n" 382 " li %2, 0 \n" 383 "1: ll %1, %3 \n" 384 " bnez %1, 2f \n" 385 " lui %1, 0x8000 \n" 386 " sc %1, %0 \n" 387 " beqzl %1, 1b \n" 388 " nop \n" 389 __WEAK_LLSC_MB 390 " li %2, 1 \n" 391 " .set reorder \n" 392 "2: \n" 393 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 394 : GCC_OFF_SMALL_ASM() (rw->lock) 395 : "memory"); 396 } else { 397 do { 398 __asm__ __volatile__( 399 " ll %1, %3 # arch_write_trylock \n" 400 " li %2, 0 \n" 401 " bnez %1, 2f \n" 402 " lui %1, 0x8000 \n" 403 " sc %1, %0 \n" 404 " li %2, 1 \n" 405 "2: \n" 406 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), 407 "=&r" (ret) 408 : GCC_OFF_SMALL_ASM() (rw->lock) 409 : "memory"); 410 } while (unlikely(!tmp)); 411 412 smp_llsc_mb(); 413 } 414 415 return ret; 416 } 417 418 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 419 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 420 421 #define arch_spin_relax(lock) cpu_relax() 422 #define arch_read_relax(lock) cpu_relax() 423 #define arch_write_relax(lock) cpu_relax() 424 425 #endif /* _ASM_SPINLOCK_H */ 426