/openbmc/linux/tools/memory-model/litmus-tests/ |
H A D | MP+polockonce+poacquiresilsil.litmus | 6 * Do spinlocks provide order to outside observers using spin_is_locked() 8 * first spin_is_locked() returns false and the second true, we know that 29 r2 = spin_is_locked(lo); 30 r3 = spin_is_locked(lo);
|
H A D | MP+polockmbonce+poacquiresilsil.litmus | 7 * to outside observers using spin_is_locked() to sense the lock-held 8 * state, ordered by acquire? Note that when the first spin_is_locked() 30 r2 = spin_is_locked(lo); 31 r3 = spin_is_locked(lo);
|
H A D | README | 76 spin_is_locked() calls in the other process. 80 acquire load followed by a pair of spin_is_locked() calls
|
/openbmc/linux/include/linux/ |
H A D | spinlock_rt.h | 150 static inline int spin_is_locked(spinlock_t *lock) in spin_is_locked() function 155 #define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
|
H A D | spinlock.h | 442 static __always_inline int spin_is_locked(spinlock_t *lock) in spin_is_locked() function
|
/openbmc/linux/tools/memory-model/scripts/ |
H A D | simpletest.sh | 25 exclude="${exclude}spin_lock(\|spin_unlock(\|spin_trylock(\|spin_is_locked("
|
/openbmc/linux/tools/memory-model/ |
H A D | lock.cat | 22 * RL Read-Locked: a spin_is_locked() event which returns True 23 * RU Read-Unlocked: a spin_is_locked() event which returns False 100 * RU, i.e., spin_is_locked() returning False, is slightly different. 101 * We rely on the memory model to rule out cases where spin_is_locked()
|
H A D | linux-kernel.def | 44 spin_is_locked(X) __islocked(X)
|
/openbmc/linux/drivers/s390/char/ |
H A D | sclp_con.c | 234 if (spin_is_locked(&sclp_con_lock)) in sclp_console_notify()
|
H A D | sclp_vt220.c | 795 if (spin_is_locked(&sclp_vt220_lock)) in sclp_vt220_notify()
|
H A D | raw3270.c | 884 if (spin_is_locked(get_ccwdev_lock(rp->cdev))) in raw3270_view_lock_unavailable()
|
/openbmc/linux/arch/powerpc/include/asm/ |
H A D | kvm_book3s_64.h | 654 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_secondary_pte() 666 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_host_pte()
|
/openbmc/linux/drivers/net/ethernet/sfc/ |
H A D | tc_counters.c | 43 EFX_WARN_ON_PARANOID(spin_is_locked(&cnt->lock)); in efx_tc_counter_free() 193 EFX_WARN_ON_PARANOID(spin_is_locked(&cnt->lock)); in efx_tc_flower_release_counter()
|
/openbmc/linux/fs/ |
H A D | userfaultfd.c | 217 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); in userfaultfd_ctx_put() 219 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); in userfaultfd_ctx_put() 221 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock)); in userfaultfd_ctx_put() 223 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); in userfaultfd_ctx_put()
|
/openbmc/linux/drivers/hv/ |
H A D | ring_buffer.c | 270 return spin_is_locked(&rinfo->ring_lock); in hv_ringbuffer_spinlock_busy()
|
/openbmc/linux/drivers/firmware/google/ |
H A D | gsmi.c | 691 if (spin_is_locked(&gsmi_dev.lock)) in gsmi_panic_callback()
|
/openbmc/linux/include/net/ |
H A D | sch_generic.h | 168 return spin_is_locked(&qdisc->seqlock); in qdisc_is_running()
|
H A D | sock.h | 1850 !spin_is_locked(&sk->sk_lock.slock); in sock_allow_reclassification()
|
/openbmc/linux/arch/parisc/kernel/ |
H A D | firmware.c | 211 if (spin_is_locked(&pdc_lock)) in pdc_emergency_unlock()
|
/openbmc/linux/fs/ocfs2/ |
H A D | inode.c | 1176 mlog_bug_on_msg(spin_is_locked(&oi->ip_lock), in ocfs2_clear_inode()
|
H A D | dlmglue.c | 779 mlog_bug_on_msg(spin_is_locked(&res->l_lock), in ocfs2_lock_res_free()
|
/openbmc/linux/arch/powerpc/kvm/ |
H A D | book3s_hv_nested.c | 871 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_nested_guest_pte()
|
/openbmc/linux/ipc/ |
H A D | sem.c | 1874 spin_is_locked(&ulp->lock)) { in __lookup_undo()
|
/openbmc/linux/Documentation/scsi/ |
H A D | ChangeLog.megaraid | 249 BUG_ON(!spin_is_locked(&some_lock));
|
/openbmc/linux/Documentation/dev-tools/ |
H A D | checkpatch.rst | 462 assertions based on spin_is_locked()
|