1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_SPINLOCK_H 3 #define __LINUX_SPINLOCK_H 4 5 /* 6 * include/linux/spinlock.h - generic spinlock/rwlock declarations 7 * 8 * here's the role of the various spinlock/rwlock related include files: 9 * 10 * on SMP builds: 11 * 12 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the 13 * initializers 14 * 15 * linux/spinlock_types.h: 16 * defines the generic type and initializers 17 * 18 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel 19 * implementations, mostly inline assembly code 20 * 21 * (also included on UP-debug builds:) 22 * 23 * linux/spinlock_api_smp.h: 24 * contains the prototypes for the _spin_*() APIs. 25 * 26 * linux/spinlock.h: builds the final spin_*() APIs. 27 * 28 * on UP builds: 29 * 30 * linux/spinlock_type_up.h: 31 * contains the generic, simplified UP spinlock type. 32 * (which is an empty structure on non-debug builds) 33 * 34 * linux/spinlock_types.h: 35 * defines the generic type and initializers 36 * 37 * linux/spinlock_up.h: 38 * contains the arch_spin_*()/etc. version of UP 39 * builds. (which are NOPs on non-debug, non-preempt 40 * builds) 41 * 42 * (included on UP-non-debug builds:) 43 * 44 * linux/spinlock_api_up.h: 45 * builds the _spin_*() APIs. 46 * 47 * linux/spinlock.h: builds the final spin_*() APIs. 48 */ 49 50 #include <linux/typecheck.h> 51 #include <linux/preempt.h> 52 #include <linux/linkage.h> 53 #include <linux/compiler.h> 54 #include <linux/irqflags.h> 55 #include <linux/thread_info.h> 56 #include <linux/kernel.h> 57 #include <linux/stringify.h> 58 #include <linux/bottom_half.h> 59 #include <asm/barrier.h> 60 61 62 /* 63 * Must define these before including other files, inline functions need them 64 */ 65 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 66 67 #define LOCK_SECTION_START(extra) \ 68 ".subsection 1\n\t" \ 69 extra \ 70 ".ifndef " LOCK_SECTION_NAME "\n\t" \ 71 LOCK_SECTION_NAME ":\n\t" \ 72 ".endif\n" 73 74 #define LOCK_SECTION_END \ 75 ".previous\n\t" 76 77 #define __lockfunc __attribute__((section(".spinlock.text"))) 78 79 /* 80 * Pull the arch_spinlock_t and arch_rwlock_t definitions: 81 */ 82 #include <linux/spinlock_types.h> 83 84 /* 85 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): 86 */ 87 #ifdef CONFIG_SMP 88 # include <asm/spinlock.h> 89 #else 90 # include <linux/spinlock_up.h> 91 #endif 92 93 #ifdef CONFIG_DEBUG_SPINLOCK 94 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 95 struct lock_class_key *key); 96 # define raw_spin_lock_init(lock) \ 97 do { \ 98 static struct lock_class_key __key; \ 99 \ 100 __raw_spin_lock_init((lock), #lock, &__key); \ 101 } while (0) 102 103 #else 104 # define raw_spin_lock_init(lock) \ 105 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 106 #endif 107 108 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 109 110 #ifdef arch_spin_is_contended 111 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 112 #else 113 #define raw_spin_is_contended(lock) (((void)(lock), 0)) 114 #endif /*arch_spin_is_contended*/ 115 116 /* 117 * This barrier must provide two things: 118 * 119 * - it must guarantee a STORE before the spin_lock() is ordered against a 120 * LOAD after it, see the comments at its two usage sites. 121 * 122 * - it must ensure the critical section is RCsc. 123 * 124 * The latter is important for cases where we observe values written by other 125 * CPUs in spin-loops, without barriers, while being subject to scheduling. 126 * 127 * CPU0 CPU1 CPU2 128 * 129 * for (;;) { 130 * if (READ_ONCE(X)) 131 * break; 132 * } 133 * X=1 134 * <sched-out> 135 * <sched-in> 136 * r = X; 137 * 138 * without transitivity it could be that CPU1 observes X!=0 breaks the loop, 139 * we get migrated and CPU2 sees X==0. 140 * 141 * Since most load-store architectures implement ACQUIRE with an smp_mb() after 142 * the LL/SC loop, they need no further barriers. Similarly all our TSO 143 * architectures imply an smp_mb() for each atomic instruction and equally don't 144 * need more. 145 * 146 * Architectures that can implement ACQUIRE better need to take care. 147 */ 148 #ifndef smp_mb__after_spinlock 149 #define smp_mb__after_spinlock() do { } while (0) 150 #endif 151 152 #ifdef CONFIG_DEBUG_SPINLOCK 153 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 154 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) 155 extern int do_raw_spin_trylock(raw_spinlock_t *lock); 156 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); 157 #else 158 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) 159 { 160 __acquire(lock); 161 arch_spin_lock(&lock->raw_lock); 162 } 163 164 #ifndef arch_spin_lock_flags 165 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 166 #endif 167 168 static inline void 169 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) 170 { 171 __acquire(lock); 172 arch_spin_lock_flags(&lock->raw_lock, *flags); 173 } 174 175 static inline int do_raw_spin_trylock(raw_spinlock_t *lock) 176 { 177 return arch_spin_trylock(&(lock)->raw_lock); 178 } 179 180 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 181 { 182 arch_spin_unlock(&lock->raw_lock); 183 __release(lock); 184 } 185 #endif 186 187 /* 188 * Define the various spin_lock methods. Note we define these 189 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The 190 * various methods are defined as nops in the case they are not 191 * required. 192 */ 193 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) 194 195 #define raw_spin_lock(lock) _raw_spin_lock(lock) 196 197 #ifdef CONFIG_DEBUG_LOCK_ALLOC 198 # define raw_spin_lock_nested(lock, subclass) \ 199 _raw_spin_lock_nested(lock, subclass) 200 201 # define raw_spin_lock_nest_lock(lock, nest_lock) \ 202 do { \ 203 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 204 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 205 } while (0) 206 #else 207 /* 208 * Always evaluate the 'subclass' argument to avoid that the compiler 209 * warns about set-but-not-used variables when building with 210 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. 211 */ 212 # define raw_spin_lock_nested(lock, subclass) \ 213 _raw_spin_lock(((void)(subclass), (lock))) 214 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 215 #endif 216 217 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 218 219 #define raw_spin_lock_irqsave(lock, flags) \ 220 do { \ 221 typecheck(unsigned long, flags); \ 222 flags = _raw_spin_lock_irqsave(lock); \ 223 } while (0) 224 225 #ifdef CONFIG_DEBUG_LOCK_ALLOC 226 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 227 do { \ 228 typecheck(unsigned long, flags); \ 229 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ 230 } while (0) 231 #else 232 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 233 do { \ 234 typecheck(unsigned long, flags); \ 235 flags = _raw_spin_lock_irqsave(lock); \ 236 } while (0) 237 #endif 238 239 #else 240 241 #define raw_spin_lock_irqsave(lock, flags) \ 242 do { \ 243 typecheck(unsigned long, flags); \ 244 _raw_spin_lock_irqsave(lock, flags); \ 245 } while (0) 246 247 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 248 raw_spin_lock_irqsave(lock, flags) 249 250 #endif 251 252 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) 253 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) 254 #define raw_spin_unlock(lock) _raw_spin_unlock(lock) 255 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) 256 257 #define raw_spin_unlock_irqrestore(lock, flags) \ 258 do { \ 259 typecheck(unsigned long, flags); \ 260 _raw_spin_unlock_irqrestore(lock, flags); \ 261 } while (0) 262 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) 263 264 #define raw_spin_trylock_bh(lock) \ 265 __cond_lock(lock, _raw_spin_trylock_bh(lock)) 266 267 #define raw_spin_trylock_irq(lock) \ 268 ({ \ 269 local_irq_disable(); \ 270 raw_spin_trylock(lock) ? \ 271 1 : ({ local_irq_enable(); 0; }); \ 272 }) 273 274 #define raw_spin_trylock_irqsave(lock, flags) \ 275 ({ \ 276 local_irq_save(flags); \ 277 raw_spin_trylock(lock) ? \ 278 1 : ({ local_irq_restore(flags); 0; }); \ 279 }) 280 281 /* Include rwlock functions */ 282 #include <linux/rwlock.h> 283 284 /* 285 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: 286 */ 287 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 288 # include <linux/spinlock_api_smp.h> 289 #else 290 # include <linux/spinlock_api_up.h> 291 #endif 292 293 /* 294 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n 295 */ 296 297 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) 298 { 299 return &lock->rlock; 300 } 301 302 #define spin_lock_init(_lock) \ 303 do { \ 304 spinlock_check(_lock); \ 305 raw_spin_lock_init(&(_lock)->rlock); \ 306 } while (0) 307 308 static __always_inline void spin_lock(spinlock_t *lock) 309 { 310 raw_spin_lock(&lock->rlock); 311 } 312 313 static __always_inline void spin_lock_bh(spinlock_t *lock) 314 { 315 raw_spin_lock_bh(&lock->rlock); 316 } 317 318 static __always_inline int spin_trylock(spinlock_t *lock) 319 { 320 return raw_spin_trylock(&lock->rlock); 321 } 322 323 #define spin_lock_nested(lock, subclass) \ 324 do { \ 325 raw_spin_lock_nested(spinlock_check(lock), subclass); \ 326 } while (0) 327 328 #define spin_lock_nest_lock(lock, nest_lock) \ 329 do { \ 330 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 331 } while (0) 332 333 static __always_inline void spin_lock_irq(spinlock_t *lock) 334 { 335 raw_spin_lock_irq(&lock->rlock); 336 } 337 338 #define spin_lock_irqsave(lock, flags) \ 339 do { \ 340 raw_spin_lock_irqsave(spinlock_check(lock), flags); \ 341 } while (0) 342 343 #define spin_lock_irqsave_nested(lock, flags, subclass) \ 344 do { \ 345 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ 346 } while (0) 347 348 static __always_inline void spin_unlock(spinlock_t *lock) 349 { 350 raw_spin_unlock(&lock->rlock); 351 } 352 353 static __always_inline void spin_unlock_bh(spinlock_t *lock) 354 { 355 raw_spin_unlock_bh(&lock->rlock); 356 } 357 358 static __always_inline void spin_unlock_irq(spinlock_t *lock) 359 { 360 raw_spin_unlock_irq(&lock->rlock); 361 } 362 363 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 364 { 365 raw_spin_unlock_irqrestore(&lock->rlock, flags); 366 } 367 368 static __always_inline int spin_trylock_bh(spinlock_t *lock) 369 { 370 return raw_spin_trylock_bh(&lock->rlock); 371 } 372 373 static __always_inline int spin_trylock_irq(spinlock_t *lock) 374 { 375 return raw_spin_trylock_irq(&lock->rlock); 376 } 377 378 #define spin_trylock_irqsave(lock, flags) \ 379 ({ \ 380 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 381 }) 382 383 /** 384 * spin_is_locked() - Check whether a spinlock is locked. 385 * @lock: Pointer to the spinlock. 386 * 387 * This function is NOT required to provide any memory ordering 388 * guarantees; it could be used for debugging purposes or, when 389 * additional synchronization is needed, accompanied with other 390 * constructs (memory barriers) enforcing the synchronization. 391 * 392 * Returns: 1 if @lock is locked, 0 otherwise. 393 * 394 * Note that the function only tells you that the spinlock is 395 * seen to be locked, not that it is locked on your CPU. 396 * 397 * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, 398 * the return value is always 0 (see include/linux/spinlock_up.h). 399 * Therefore you should not rely heavily on the return value. 400 */ 401 static __always_inline int spin_is_locked(spinlock_t *lock) 402 { 403 return raw_spin_is_locked(&lock->rlock); 404 } 405 406 static __always_inline int spin_is_contended(spinlock_t *lock) 407 { 408 return raw_spin_is_contended(&lock->rlock); 409 } 410 411 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) 412 413 /* 414 * Pull the atomic_t declaration: 415 * (asm-mips/atomic.h needs above definitions) 416 */ 417 #include <linux/atomic.h> 418 /** 419 * atomic_dec_and_lock - lock on reaching reference count zero 420 * @atomic: the atomic counter 421 * @lock: the spinlock in question 422 * 423 * Decrements @atomic by 1. If the result is 0, returns true and locks 424 * @lock. Returns false for all other cases. 425 */ 426 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); 427 #define atomic_dec_and_lock(atomic, lock) \ 428 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 429 430 extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, 431 unsigned long *flags); 432 #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ 433 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) 434 435 int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, 436 size_t max_size, unsigned int cpu_mult, 437 gfp_t gfp); 438 439 void free_bucket_spinlocks(spinlock_t *locks); 440 441 #endif /* __LINUX_SPINLOCK_H */ 442