1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 */ 9 #ifndef _ASM_SPINLOCK_H 10 #define _ASM_SPINLOCK_H 11 12 #include <linux/compiler.h> 13 14 #include <asm/barrier.h> 15 #include <asm/war.h> 16 17 /* 18 * Your basic SMP spinlocks, allowing only a single CPU anywhere 19 * 20 * Simple spin lock operations. There are two variants, one clears IRQ's 21 * on the local processor, one does not. 22 * 23 * These are fair FIFO ticket locks 24 * 25 * (the type definitions are in asm/spinlock_types.h) 26 */ 27 28 29 /* 30 * Ticket locks are conceptually two parts, one indicating the current head of 31 * the queue, and the other indicating the current tail. The lock is acquired 32 * by atomically noting the tail and incrementing it by one (thus adding 33 * ourself to the queue and noting our position), then waiting until the head 34 * becomes equal to the the initial value of the tail. 35 */ 36 37 static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 38 { 39 unsigned int counters = ACCESS_ONCE(lock->lock); 40 41 return ((counters >> 14) ^ counters) & 0x1fff; 42 } 43 44 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 45 #define __raw_spin_unlock_wait(x) \ 46 while (__raw_spin_is_locked(x)) { cpu_relax(); } 47 48 static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 49 { 50 unsigned int counters = ACCESS_ONCE(lock->lock); 51 52 return (((counters >> 14) - counters) & 0x1fff) > 1; 53 } 54 #define __raw_spin_is_contended __raw_spin_is_contended 55 56 static inline void __raw_spin_lock(raw_spinlock_t *lock) 57 { 58 int my_ticket; 59 int tmp; 60 61 if (R10000_LLSC_WAR) { 62 __asm__ __volatile__ ( 63 " .set push # __raw_spin_lock \n" 64 " .set noreorder \n" 65 " \n" 66 "1: ll %[ticket], %[ticket_ptr] \n" 67 " addiu %[my_ticket], %[ticket], 0x4000 \n" 68 " sc %[my_ticket], %[ticket_ptr] \n" 69 " beqzl %[my_ticket], 1b \n" 70 " nop \n" 71 " srl %[my_ticket], %[ticket], 14 \n" 72 " andi %[my_ticket], %[my_ticket], 0x1fff \n" 73 " andi %[ticket], %[ticket], 0x1fff \n" 74 " bne %[ticket], %[my_ticket], 4f \n" 75 " subu %[ticket], %[my_ticket], %[ticket] \n" 76 "2: \n" 77 " .subsection 2 \n" 78 "4: andi %[ticket], %[ticket], 0x1fff \n" 79 " sll %[ticket], 5 \n" 80 " \n" 81 "6: bnez %[ticket], 6b \n" 82 " subu %[ticket], 1 \n" 83 " \n" 84 " lw %[ticket], %[ticket_ptr] \n" 85 " andi %[ticket], %[ticket], 0x1fff \n" 86 " beq %[ticket], %[my_ticket], 2b \n" 87 " subu %[ticket], %[my_ticket], %[ticket] \n" 88 " b 4b \n" 89 " subu %[ticket], %[ticket], 1 \n" 90 " .previous \n" 91 " .set pop \n" 92 : [ticket_ptr] "+m" (lock->lock), 93 [ticket] "=&r" (tmp), 94 [my_ticket] "=&r" (my_ticket)); 95 } else { 96 __asm__ __volatile__ ( 97 " .set push # __raw_spin_lock \n" 98 " .set noreorder \n" 99 " \n" 100 " ll %[ticket], %[ticket_ptr] \n" 101 "1: addiu %[my_ticket], %[ticket], 0x4000 \n" 102 " sc %[my_ticket], %[ticket_ptr] \n" 103 " beqz %[my_ticket], 3f \n" 104 " nop \n" 105 " srl %[my_ticket], %[ticket], 14 \n" 106 " andi %[my_ticket], %[my_ticket], 0x1fff \n" 107 " andi %[ticket], %[ticket], 0x1fff \n" 108 " bne %[ticket], %[my_ticket], 4f \n" 109 " subu %[ticket], %[my_ticket], %[ticket] \n" 110 "2: \n" 111 " .subsection 2 \n" 112 "3: b 1b \n" 113 " ll %[ticket], %[ticket_ptr] \n" 114 " \n" 115 "4: andi %[ticket], %[ticket], 0x1fff \n" 116 " sll %[ticket], 5 \n" 117 " \n" 118 "6: bnez %[ticket], 6b \n" 119 " subu %[ticket], 1 \n" 120 " \n" 121 " lw %[ticket], %[ticket_ptr] \n" 122 " andi %[ticket], %[ticket], 0x1fff \n" 123 " beq %[ticket], %[my_ticket], 2b \n" 124 " subu %[ticket], %[my_ticket], %[ticket] \n" 125 " b 4b \n" 126 " subu %[ticket], %[ticket], 1 \n" 127 " .previous \n" 128 " .set pop \n" 129 : [ticket_ptr] "+m" (lock->lock), 130 [ticket] "=&r" (tmp), 131 [my_ticket] "=&r" (my_ticket)); 132 } 133 134 smp_llsc_mb(); 135 } 136 137 static inline void __raw_spin_unlock(raw_spinlock_t *lock) 138 { 139 int tmp; 140 141 smp_llsc_mb(); 142 143 if (R10000_LLSC_WAR) { 144 __asm__ __volatile__ ( 145 " # __raw_spin_unlock \n" 146 "1: ll %[ticket], %[ticket_ptr] \n" 147 " addiu %[ticket], %[ticket], 1 \n" 148 " ori %[ticket], %[ticket], 0x2000 \n" 149 " xori %[ticket], %[ticket], 0x2000 \n" 150 " sc %[ticket], %[ticket_ptr] \n" 151 " beqzl %[ticket], 1b \n" 152 : [ticket_ptr] "+m" (lock->lock), 153 [ticket] "=&r" (tmp)); 154 } else { 155 __asm__ __volatile__ ( 156 " .set push # __raw_spin_unlock \n" 157 " .set noreorder \n" 158 " \n" 159 " ll %[ticket], %[ticket_ptr] \n" 160 "1: addiu %[ticket], %[ticket], 1 \n" 161 " ori %[ticket], %[ticket], 0x2000 \n" 162 " xori %[ticket], %[ticket], 0x2000 \n" 163 " sc %[ticket], %[ticket_ptr] \n" 164 " beqz %[ticket], 2f \n" 165 " nop \n" 166 " \n" 167 " .subsection 2 \n" 168 "2: b 1b \n" 169 " ll %[ticket], %[ticket_ptr] \n" 170 " .previous \n" 171 " .set pop \n" 172 : [ticket_ptr] "+m" (lock->lock), 173 [ticket] "=&r" (tmp)); 174 } 175 } 176 177 static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) 178 { 179 int tmp, tmp2, tmp3; 180 181 if (R10000_LLSC_WAR) { 182 __asm__ __volatile__ ( 183 " .set push # __raw_spin_trylock \n" 184 " .set noreorder \n" 185 " \n" 186 "1: ll %[ticket], %[ticket_ptr] \n" 187 " srl %[my_ticket], %[ticket], 14 \n" 188 " andi %[my_ticket], %[my_ticket], 0x1fff \n" 189 " andi %[now_serving], %[ticket], 0x1fff \n" 190 " bne %[my_ticket], %[now_serving], 3f \n" 191 " addiu %[ticket], %[ticket], 0x4000 \n" 192 " sc %[ticket], %[ticket_ptr] \n" 193 " beqzl %[ticket], 1b \n" 194 " li %[ticket], 1 \n" 195 "2: \n" 196 " .subsection 2 \n" 197 "3: b 2b \n" 198 " li %[ticket], 0 \n" 199 " .previous \n" 200 " .set pop \n" 201 : [ticket_ptr] "+m" (lock->lock), 202 [ticket] "=&r" (tmp), 203 [my_ticket] "=&r" (tmp2), 204 [now_serving] "=&r" (tmp3)); 205 } else { 206 __asm__ __volatile__ ( 207 " .set push # __raw_spin_trylock \n" 208 " .set noreorder \n" 209 " \n" 210 " ll %[ticket], %[ticket_ptr] \n" 211 "1: srl %[my_ticket], %[ticket], 14 \n" 212 " andi %[my_ticket], %[my_ticket], 0x1fff \n" 213 " andi %[now_serving], %[ticket], 0x1fff \n" 214 " bne %[my_ticket], %[now_serving], 3f \n" 215 " addiu %[ticket], %[ticket], 0x4000 \n" 216 " sc %[ticket], %[ticket_ptr] \n" 217 " beqz %[ticket], 4f \n" 218 " li %[ticket], 1 \n" 219 "2: \n" 220 " .subsection 2 \n" 221 "3: b 2b \n" 222 " li %[ticket], 0 \n" 223 "4: b 1b \n" 224 " ll %[ticket], %[ticket_ptr] \n" 225 " .previous \n" 226 " .set pop \n" 227 : [ticket_ptr] "+m" (lock->lock), 228 [ticket] "=&r" (tmp), 229 [my_ticket] "=&r" (tmp2), 230 [now_serving] "=&r" (tmp3)); 231 } 232 233 smp_llsc_mb(); 234 235 return tmp; 236 } 237 238 /* 239 * Read-write spinlocks, allowing multiple readers but only one writer. 240 * 241 * NOTE! it is quite common to have readers in interrupts but no interrupt 242 * writers. For those circumstances we can "mix" irq-safe locks - any writer 243 * needs to get a irq-safe write-lock, but readers can get non-irqsafe 244 * read-locks. 245 */ 246 247 /* 248 * read_can_lock - would read_trylock() succeed? 249 * @lock: the rwlock in question. 250 */ 251 #define __raw_read_can_lock(rw) ((rw)->lock >= 0) 252 253 /* 254 * write_can_lock - would write_trylock() succeed? 255 * @lock: the rwlock in question. 256 */ 257 #define __raw_write_can_lock(rw) (!(rw)->lock) 258 259 static inline void __raw_read_lock(raw_rwlock_t *rw) 260 { 261 unsigned int tmp; 262 263 if (R10000_LLSC_WAR) { 264 __asm__ __volatile__( 265 " .set noreorder # __raw_read_lock \n" 266 "1: ll %1, %2 \n" 267 " bltz %1, 1b \n" 268 " addu %1, 1 \n" 269 " sc %1, %0 \n" 270 " beqzl %1, 1b \n" 271 " nop \n" 272 " .set reorder \n" 273 : "=m" (rw->lock), "=&r" (tmp) 274 : "m" (rw->lock) 275 : "memory"); 276 } else { 277 __asm__ __volatile__( 278 " .set noreorder # __raw_read_lock \n" 279 "1: ll %1, %2 \n" 280 " bltz %1, 2f \n" 281 " addu %1, 1 \n" 282 " sc %1, %0 \n" 283 " beqz %1, 1b \n" 284 " nop \n" 285 " .subsection 2 \n" 286 "2: ll %1, %2 \n" 287 " bltz %1, 2b \n" 288 " addu %1, 1 \n" 289 " b 1b \n" 290 " nop \n" 291 " .previous \n" 292 " .set reorder \n" 293 : "=m" (rw->lock), "=&r" (tmp) 294 : "m" (rw->lock) 295 : "memory"); 296 } 297 298 smp_llsc_mb(); 299 } 300 301 /* Note the use of sub, not subu which will make the kernel die with an 302 overflow exception if we ever try to unlock an rwlock that is already 303 unlocked or is being held by a writer. */ 304 static inline void __raw_read_unlock(raw_rwlock_t *rw) 305 { 306 unsigned int tmp; 307 308 smp_llsc_mb(); 309 310 if (R10000_LLSC_WAR) { 311 __asm__ __volatile__( 312 "1: ll %1, %2 # __raw_read_unlock \n" 313 " sub %1, 1 \n" 314 " sc %1, %0 \n" 315 " beqzl %1, 1b \n" 316 : "=m" (rw->lock), "=&r" (tmp) 317 : "m" (rw->lock) 318 : "memory"); 319 } else { 320 __asm__ __volatile__( 321 " .set noreorder # __raw_read_unlock \n" 322 "1: ll %1, %2 \n" 323 " sub %1, 1 \n" 324 " sc %1, %0 \n" 325 " beqz %1, 2f \n" 326 " nop \n" 327 " .subsection 2 \n" 328 "2: b 1b \n" 329 " nop \n" 330 " .previous \n" 331 " .set reorder \n" 332 : "=m" (rw->lock), "=&r" (tmp) 333 : "m" (rw->lock) 334 : "memory"); 335 } 336 } 337 338 static inline void __raw_write_lock(raw_rwlock_t *rw) 339 { 340 unsigned int tmp; 341 342 if (R10000_LLSC_WAR) { 343 __asm__ __volatile__( 344 " .set noreorder # __raw_write_lock \n" 345 "1: ll %1, %2 \n" 346 " bnez %1, 1b \n" 347 " lui %1, 0x8000 \n" 348 " sc %1, %0 \n" 349 " beqzl %1, 1b \n" 350 " nop \n" 351 " .set reorder \n" 352 : "=m" (rw->lock), "=&r" (tmp) 353 : "m" (rw->lock) 354 : "memory"); 355 } else { 356 __asm__ __volatile__( 357 " .set noreorder # __raw_write_lock \n" 358 "1: ll %1, %2 \n" 359 " bnez %1, 2f \n" 360 " lui %1, 0x8000 \n" 361 " sc %1, %0 \n" 362 " beqz %1, 2f \n" 363 " nop \n" 364 " .subsection 2 \n" 365 "2: ll %1, %2 \n" 366 " bnez %1, 2b \n" 367 " lui %1, 0x8000 \n" 368 " b 1b \n" 369 " nop \n" 370 " .previous \n" 371 " .set reorder \n" 372 : "=m" (rw->lock), "=&r" (tmp) 373 : "m" (rw->lock) 374 : "memory"); 375 } 376 377 smp_llsc_mb(); 378 } 379 380 static inline void __raw_write_unlock(raw_rwlock_t *rw) 381 { 382 smp_mb(); 383 384 __asm__ __volatile__( 385 " # __raw_write_unlock \n" 386 " sw $0, %0 \n" 387 : "=m" (rw->lock) 388 : "m" (rw->lock) 389 : "memory"); 390 } 391 392 static inline int __raw_read_trylock(raw_rwlock_t *rw) 393 { 394 unsigned int tmp; 395 int ret; 396 397 if (R10000_LLSC_WAR) { 398 __asm__ __volatile__( 399 " .set noreorder # __raw_read_trylock \n" 400 " li %2, 0 \n" 401 "1: ll %1, %3 \n" 402 " bltz %1, 2f \n" 403 " addu %1, 1 \n" 404 " sc %1, %0 \n" 405 " .set reorder \n" 406 " beqzl %1, 1b \n" 407 " nop \n" 408 __WEAK_LLSC_MB 409 " li %2, 1 \n" 410 "2: \n" 411 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 412 : "m" (rw->lock) 413 : "memory"); 414 } else { 415 __asm__ __volatile__( 416 " .set noreorder # __raw_read_trylock \n" 417 " li %2, 0 \n" 418 "1: ll %1, %3 \n" 419 " bltz %1, 2f \n" 420 " addu %1, 1 \n" 421 " sc %1, %0 \n" 422 " beqz %1, 1b \n" 423 " nop \n" 424 " .set reorder \n" 425 __WEAK_LLSC_MB 426 " li %2, 1 \n" 427 "2: \n" 428 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 429 : "m" (rw->lock) 430 : "memory"); 431 } 432 433 return ret; 434 } 435 436 static inline int __raw_write_trylock(raw_rwlock_t *rw) 437 { 438 unsigned int tmp; 439 int ret; 440 441 if (R10000_LLSC_WAR) { 442 __asm__ __volatile__( 443 " .set noreorder # __raw_write_trylock \n" 444 " li %2, 0 \n" 445 "1: ll %1, %3 \n" 446 " bnez %1, 2f \n" 447 " lui %1, 0x8000 \n" 448 " sc %1, %0 \n" 449 " beqzl %1, 1b \n" 450 " nop \n" 451 __WEAK_LLSC_MB 452 " li %2, 1 \n" 453 " .set reorder \n" 454 "2: \n" 455 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 456 : "m" (rw->lock) 457 : "memory"); 458 } else { 459 __asm__ __volatile__( 460 " .set noreorder # __raw_write_trylock \n" 461 " li %2, 0 \n" 462 "1: ll %1, %3 \n" 463 " bnez %1, 2f \n" 464 " lui %1, 0x8000 \n" 465 " sc %1, %0 \n" 466 " beqz %1, 3f \n" 467 " li %2, 1 \n" 468 "2: \n" 469 __WEAK_LLSC_MB 470 " .subsection 2 \n" 471 "3: b 1b \n" 472 " li %2, 0 \n" 473 " .previous \n" 474 " .set reorder \n" 475 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 476 : "m" (rw->lock) 477 : "memory"); 478 } 479 480 return ret; 481 } 482 483 #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 484 #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 485 486 #define _raw_spin_relax(lock) cpu_relax() 487 #define _raw_read_relax(lock) cpu_relax() 488 #define _raw_write_relax(lock) cpu_relax() 489 490 #endif /* _ASM_SPINLOCK_H */ 491