/openbmc/linux/kernel/locking/ |
H A D | spinlock_debug.c | 16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument 21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init() 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init() 24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init() 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init() 27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init() 28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 29 lock->owner_cpu = -1; in __raw_spin_lock_init() 35 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument 40 * Make sure we are not reinitializing a held lock: in __rwlock_init() [all …]
|
H A D | rtmutex.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support 7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 17 * See Documentation/locking/rt-mutex-design.rst for details. 27 #include <trace/events/lock.h> 36 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument 42 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument 47 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument 52 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument [all …]
|
H A D | rtmutex_api.c | 1 // SPDX-License-Identifier: GPL-2.0-only 17 * Debug aware fast / slowpath lock,trylock,unlock 22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, in __rt_mutex_lock_common() argument 30 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_); in __rt_mutex_lock_common() 31 ret = __rt_mutex_lock(&lock->rtmutex, state); in __rt_mutex_lock_common() 33 mutex_release(&lock->dep_map, _RET_IP_); in __rt_mutex_lock_common() 45 * rt_mutex_lock_nested - lock a rt_mutex 47 * @lock: the rt_mutex to be locked 50 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) in rt_mutex_lock_nested() argument 52 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass); in rt_mutex_lock_nested() [all …]
|
H A D | mutex.c | 1 // SPDX-License-Identifier: GPL-2.0-only 14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 15 * from the -rt tree, where it was originally implemented for rtmutexes 19 * Also see Documentation/locking/mutex-design.rst. 34 #include <trace/events/lock.h> 46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument 48 atomic_long_set(&lock->owner, 0); in __mutex_init() 49 raw_spin_lock_init(&lock->wait_lock); in __mutex_init() 50 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init() 52 osq_lock_init(&lock->osq); in __mutex_init() [all …]
|
H A D | ww_mutex.h | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument 13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_first() 14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first() 21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument 24 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_next() 31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument 34 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_prev() 41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument 45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_last() [all …]
|
H A D | spinlock.c | 1 // SPDX-License-Identifier: GPL-2.0 10 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) 33 * If lockdep is enabled then we use the non-preemption spin-ops 35 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 46 * Some architectures can relax in favour of the CPU owning the lock. 63 * This could be a long-held lock. We both prepare to spin for a long 65 * towards that other CPU that it should break the lock ASAP. 68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 72 if (likely(do_raw_##op##_trylock(lock))) \ 76 arch_##op##_relax(&lock->raw_lock); \ [all …]
|
H A D | qrwlock.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 5 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. 15 #include <trace/events/lock.h> 18 * queued_read_lock_slowpath - acquire read lock of a queued rwlock 19 * @lock: Pointer to queued rwlock structure 21 void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock) in queued_read_lock_slowpath() argument 24 * Readers come here when they cannot get the lock without waiting in queued_read_lock_slowpath() 28 * Readers in interrupt context will get the lock immediately in queued_read_lock_slowpath() 29 * if the writer is just waiting (not holding the lock yet), in queued_read_lock_slowpath() 30 * so spin with ACQUIRE semantics until the lock is available in queued_read_lock_slowpath() [all …]
|
/openbmc/linux/include/linux/ |
H A D | rwlock_api_smp.h | 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock); 21 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 24 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 25 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 26 __acquires(lock); 27 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) [all …]
|
H A D | spinlock.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 7 * include/linux/spinlock.h - generic spinlock/rwlock declarations 24 * (also included on UP-debug builds:) 35 * (which is an empty structure on non-debug builds) 44 * builds. (which are NOPs on non-debug, non-preempt 47 * (included on UP-non-debug builds:) 72 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 92 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): 101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 104 # define raw_spin_lock_init(lock) \ argument [all …]
|
H A D | spinlock_api_smp.h | 22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 __acquires(lock); 26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 __acquires(lock); 28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 __acquires(lock); 32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 __acquires(lock); [all …]
|
H A D | spinlock_rt.h | 1 // SPDX-License-Identifier: GPL-2.0-only 10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, 13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, in __rt_spin_lock_init() argument 23 rt_mutex_base_init(&(slock)->lock); \ 31 rt_mutex_base_init(&(slock)->lock); \ 35 extern void rt_spin_lock(spinlock_t *lock); 36 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass); 37 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock); 38 extern void rt_spin_unlock(spinlock_t *lock); 39 extern void rt_spin_lock_unlock(spinlock_t *lock); [all …]
|
H A D | local_lock.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 8 * local_lock_init - Runtime initialize a lock instance 10 #define local_lock_init(lock) __local_lock_init(lock) argument 13 * local_lock - Acquire a per CPU local lock 14 * @lock: The lock variable 16 #define local_lock(lock) __local_lock(lock) argument 19 * local_lock_irq - Acquire a per CPU local lock and disable interrupts 20 * @lock: The lock variable 22 #define local_lock_irq(lock) __local_lock_irq(lock) argument 25 * local_lock_irqsave - Acquire a per CPU local lock, save and disable [all …]
|
H A D | rwlock.h | 18 extern void __rwlock_init(rwlock_t *lock, const char *name, 20 # define rwlock_init(lock) \ argument 24 __rwlock_init((lock), #lock, &__key); \ 27 # define rwlock_init(lock) \ argument 28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); 33 extern int do_raw_read_trylock(rwlock_t *lock); 34 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); 35 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); 36 extern int do_raw_write_trylock(rwlock_t *lock); [all …]
|
/openbmc/linux/drivers/gpu/drm/ |
H A D | drm_lock.c | 50 * Take the heavyweight lock. 52 * \param lock lock pointer. 54 * \return one if the lock is held, or zero otherwise. 56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. 63 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local 65 spin_lock_bh(&lock_data->spinlock); in drm_lock_take() 67 old = *lock; in drm_lock_take() 72 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? in drm_lock_take() 75 prev = cmpxchg(lock, old, new); in drm_lock_take() 77 spin_unlock_bh(&lock_data->spinlock); in drm_lock_take() [all …]
|
/openbmc/linux/fs/ocfs2/dlm/ |
H A D | dlmast.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 35 struct dlm_lock *lock); 36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 39 * lock level will obsolete a pending bast. 40 * For example, if dlm_thread queued a bast for an EX lock that 42 * lock owner downconverted to NL, the bast is now obsolete. 44 * This is needed because the lock and convert paths can queue 45 * asts out-of-band (not waiting for dlm_thread) in order to 47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument 49 assert_spin_locked(&dlm->ast_lock); in dlm_should_cancel_bast() [all …]
|
H A D | dlmlock.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 5 * underlying calls for lock creation 45 struct dlm_lock *lock, int flags); 49 static void dlm_lock_detach_lockres(struct dlm_lock *lock); 57 return -ENOMEM; in dlm_init_lock_cache() 66 /* Tell us whether we can grant a new lock request. 68 * caller needs: res->spinlock 71 * returns: 1 if the lock can be granted, 0 otherwise. 74 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument 78 list_for_each_entry(tmplock, &res->granted, list) { in dlm_can_grant_new_lock() [all …]
|
H A D | dlmconvert.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 5 * underlying calls for lock conversion 37 * needs a spinlock held on entry (res->spinlock) and it is the 38 * only one that holds a lock on exit (res->spinlock). 43 struct dlm_lock *lock, int flags, 48 struct dlm_lock *lock, int flags, int type); 55 * taken: takes and drops res->spinlock 61 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument 66 spin_lock(&res->spinlock); in dlmconvert_master() 70 res->state |= DLM_LOCK_RES_IN_PROGRESS; in dlmconvert_master() [all …]
|
/openbmc/linux/drivers/md/persistent-data/ |
H A D | dm-block-manager.c | 1 // SPDX-License-Identifier: GPL-2.0-only 7 #include "dm-block-manager.h" 8 #include "dm-persistent-data-internal.h" 10 #include <linux/dm-bufio.h> 15 #include <linux/device-mapper.h> 21 /*----------------------------------------------------------------*/ 32 * trace is also emitted for the previous lock acquisition. 45 spinlock_t lock; member 61 static unsigned int __find_holder(struct block_lock *lock, in __find_holder() argument 67 if (lock->holders[i] == task) in __find_holder() [all …]
|
/openbmc/openbmc-test-automation/openpower/ext_interfaces/ |
H A D | test_lock_management.robot | 3 Documentation Test lock management feature of management console on BMC. 28 Acquire Read Write Lock 31 [Template] Acquire Lock On Resource 34 HMCID-01 ReadCase1 False 35 HMCID-01 ReadCase2 False 36 HMCID-01 ReadCase3 False 37 HMCID-01 WriteCase1 False 38 HMCID-01 WriteCase2 False 39 HMCID-01 WriteCase3 False 42 Acquire Read Lock On Read Lock [all …]
|
/openbmc/linux/include/asm-generic/ |
H A D | qrwlock.h | 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 3 * Queue read/write lock 7 * asm-generic/spinlock.h meets these requirements. 9 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. 20 #include <asm-generic/qrwlock_types.h> 28 #define _QW_LOCKED 0x0ff /* A writer holds the lock */ 36 extern void queued_read_lock_slowpath(struct qrwlock *lock); 37 extern void queued_write_lock_slowpath(struct qrwlock *lock); 40 * queued_read_trylock - try to acquire read lock of a queued rwlock 41 * @lock : Pointer to queued rwlock structure [all …]
|
/openbmc/linux/Documentation/locking/ |
H A D | lockdep-design.rst | 8 Lock-class 9 ---------- 15 tens of thousands of) instantiations. For example a lock in the inode 17 lock class. 19 The validator tracks the 'usage state' of lock-classes, and it tracks 20 the dependencies between different lock-classes. Lock usage indicates 21 how a lock is used with regard to its IRQ contexts, while lock 22 dependency can be understood as lock order, where L1 -> L2 suggests that 26 continuing effort to prove lock usages and dependencies are correct or 29 A lock-class's behavior is constructed by its instances collectively: [all …]
|
/openbmc/qemu/util/ |
H A D | qemu-coroutine-lock.c | 24 * The lock-free mutex implementation is based on OSv 38 QSIMPLEQ_INIT(&queue->entries); in qemu_co_queue_init() 41 void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock, in qemu_co_queue_wait_impl() argument 46 QSIMPLEQ_INSERT_HEAD(&queue->entries, self, co_queue_next); in qemu_co_queue_wait_impl() 48 QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next); in qemu_co_queue_wait_impl() 51 if (lock) { in qemu_co_queue_wait_impl() 52 qemu_lockable_unlock(lock); in qemu_co_queue_wait_impl() 69 if (lock) { in qemu_co_queue_wait_impl() 70 qemu_lockable_lock(lock); in qemu_co_queue_wait_impl() 74 bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock) in qemu_co_enter_next_impl() argument [all …]
|
/openbmc/linux/arch/powerpc/include/asm/ |
H A D | simple_spinlock.h | 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 6 * Simple spin lock operations. 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 18 #include <linux/kcsan-checks.h> 22 #include <asm/ppc-opcode.h> 27 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 29 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) 35 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument 37 return lock.slock == 0; in arch_spin_value_unlocked() 40 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument [all …]
|
/openbmc/linux/tools/testing/selftests/bpf/progs/ |
H A D | linked_list.c | 1 // SPDX-License-Identifier: GPL-2.0 15 int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) in list_push_pop() argument 24 bpf_spin_lock(lock); in list_push_pop() 26 bpf_spin_unlock(lock); in list_push_pop() 33 bpf_spin_lock(lock); in list_push_pop() 35 bpf_spin_unlock(lock); in list_push_pop() 43 bpf_spin_lock(lock); in list_push_pop() 44 f->data = 42; in list_push_pop() 45 bpf_list_push_front(head, &f->node2); in list_push_pop() 46 bpf_spin_unlock(lock); in list_push_pop() [all …]
|
/openbmc/linux/tools/perf/tests/shell/ |
H A D | lock_contention.sh | 2 # kernel lock contention analysis test 3 # SPDX-License-Identifier: GPL-2.0 5 set -e 12 rm -f ${perfdata} 13 rm -f ${result} 14 trap - EXIT TERM INT 24 if [ "$(id -u)" != 0 ]; then 30 if ! perf list | grep -q lock:contention_begin; then 31 echo "[Skip] No lock contention tracepoints" 39 echo "Testing perf lock record and perf lock contention" [all …]
|