/openbmc/linux/tools/memory-model/litmus-tests/ |
H A D | Z6.0+pooncelock+poonceLock+pombonce.litmus | 6 * This litmus test demonstrates how smp_mb__after_spinlock() may be 27 smp_mb__after_spinlock();
|
H A D | MP+polockmbonce+poacquiresilsil.litmus | 6 * Do spinlocks combined with smp_mb__after_spinlock() provide order 18 smp_mb__after_spinlock();
|
H A D | README | 74 Protect the access with a lock and an smp_mb__after_spinlock() 153 As above, but with smp_mb__after_spinlock() immediately
|
/openbmc/linux/arch/xtensa/include/asm/ |
H A D | spinlock.h | 18 #define smp_mb__after_spinlock() smp_mb() macro
|
/openbmc/linux/arch/csky/include/asm/ |
H A D | spinlock.h | 10 #define smp_mb__after_spinlock() smp_mb() macro
|
/openbmc/linux/kernel/kcsan/ |
H A D | selftest.c | 148 KCSAN_CHECK_READ_BARRIER(smp_mb__after_spinlock()); in test_barrier() 177 KCSAN_CHECK_WRITE_BARRIER(smp_mb__after_spinlock()); in test_barrier() 209 KCSAN_CHECK_RW_BARRIER(smp_mb__after_spinlock()); in test_barrier()
|
H A D | kcsan_test.c | 578 KCSAN_EXPECT_READ_BARRIER(smp_mb__after_spinlock(), true); in test_barrier_nothreads() 623 KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_spinlock(), true); in test_barrier_nothreads() 668 KCSAN_EXPECT_RW_BARRIER(smp_mb__after_spinlock(), true); in test_barrier_nothreads()
|
/openbmc/linux/arch/arm64/include/asm/ |
H A D | spinlock.h | 12 #define smp_mb__after_spinlock() smp_mb() macro
|
/openbmc/linux/arch/powerpc/include/asm/ |
H A D | spinlock.h | 14 #define smp_mb__after_spinlock() smp_mb() macro
|
/openbmc/u-boot/arch/riscv/include/asm/ |
H A D | barrier.h | 63 #define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) macro
|
/openbmc/linux/arch/riscv/include/asm/ |
H A D | barrier.h | 72 #define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) macro
|
/openbmc/linux/tools/memory-model/Documentation/ |
H A D | locking.txt | 185 of smp_mb__after_spinlock(): 199 smp_mb__after_spinlock(); 212 This addition of smp_mb__after_spinlock() strengthens the lock 214 In other words, the addition of the smp_mb__after_spinlock() prohibits
|
H A D | recipes.txt | 160 of smp_mb__after_spinlock(): 174 smp_mb__after_spinlock(); 187 This addition of smp_mb__after_spinlock() strengthens the lock acquisition
|
H A D | ordering.txt | 160 o smp_mb__after_spinlock(), which provides full ordering subsequent
|
H A D | explanation.txt | 2752 smp_mb__after_spinlock(). The LKMM uses fence events with special 2764 smp_mb__after_spinlock() orders po-earlier lock acquisition
|
/openbmc/linux/include/linux/ |
H A D | spinlock.h | 175 #ifndef smp_mb__after_spinlock 176 #define smp_mb__after_spinlock() kcsan_mb() macro
|
/openbmc/linux/tools/memory-model/ |
H A D | linux-kernel.bell | 33 'after-spinlock (*smp_mb__after_spinlock*) ||
|
H A D | linux-kernel.def | 25 smp_mb__after_spinlock() { __fence{after-spinlock}; }
|
/openbmc/linux/kernel/ |
H A D | kthread.c | 1499 smp_mb__after_spinlock(); in kthread_unuse_mm()
|
H A D | exit.c | 560 smp_mb__after_spinlock(); in exit_mm()
|
/openbmc/linux/kernel/rcu/ |
H A D | tree_nocb.h | 1040 smp_mb__after_spinlock(); /* Timer expire before wakeup. */ in do_nocb_deferred_wakeup_timer()
|
/openbmc/linux/kernel/sched/ |
H A D | core.c | 1810 smp_mb__after_spinlock(); in uclamp_sync_util_min_rt_default() 4231 smp_mb__after_spinlock(); in try_to_wake_up() 4457 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */ in cpu_curr_snapshot() 6614 smp_mb__after_spinlock(); in __schedule()
|
/openbmc/linux/Documentation/RCU/ |
H A D | whatisRCU.rst | 659 smp_mb__after_spinlock(); 685 been able to write-acquire the lock otherwise. The smp_mb__after_spinlock()
|