Home
last modified time | relevance | path

Searched full:lock (Results 1 – 25 of 6928) sorted by relevance

12345678910>>...278

/openbmc/linux/include/linux/
H A Drwlock_api_smp.h18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
21 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
24 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
25 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
26 __acquires(lock);
27 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
[all …]
H A Dspinlock_api_up.h19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument
24 * flags straight, to suppress compiler warnings of unused lock
27 #define ___LOCK(lock) \ argument
28 do { __acquire(lock); (void)(lock); } while (0)
30 #define __LOCK(lock) \ argument
31 do { preempt_disable(); ___LOCK(lock); } while (0)
33 #define __LOCK_BH(lock) \ argument
34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
36 #define __LOCK_IRQ(lock) \ argument
37 do { local_irq_disable(); __LOCK(lock); } while (0)
[all …]
H A Drwlock.h18 extern void __rwlock_init(rwlock_t *lock, const char *name,
20 # define rwlock_init(lock) \ argument
24 __rwlock_init((lock), #lock, &__key); \
27 # define rwlock_init(lock) \ argument
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
33 extern int do_raw_read_trylock(rwlock_t *lock);
34 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
35 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
36 extern int do_raw_write_trylock(rwlock_t *lock);
[all …]
H A Dspinlock_api_smp.h22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock);
26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27 __acquires(lock);
28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30 __acquires(lock);
32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33 __acquires(lock);
[all …]
H A Dspinlock_rt.h10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, in __rt_spin_lock_init() argument
23 rt_mutex_base_init(&(slock)->lock); \
31 rt_mutex_base_init(&(slock)->lock); \
35 extern void rt_spin_lock(spinlock_t *lock);
36 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
37 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
38 extern void rt_spin_unlock(spinlock_t *lock);
39 extern void rt_spin_lock_unlock(spinlock_t *lock);
40 extern int rt_spin_trylock_bh(spinlock_t *lock);
[all …]
H A Dspinlock.h72 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
104 # define raw_spin_lock_init(lock) \ argument
108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
112 # define raw_spin_lock_init(lock) \ argument
113 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
116 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) argument
119 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) argument
121 #define raw_spin_is_contended(lock) (((void)(lock), 0)) argument
126 * between program-order earlier lock acquisitions and program-order later
[all …]
H A Dlocal_lock.h8 * local_lock_init - Runtime initialize a lock instance
10 #define local_lock_init(lock) __local_lock_init(lock) argument
13 * local_lock - Acquire a per CPU local lock
14 * @lock: The lock variable
16 #define local_lock(lock) __local_lock(lock) argument
19 * local_lock_irq - Acquire a per CPU local lock and disable interrupts
20 * @lock: The lock variable
22 #define local_lock_irq(lock) __local_lock_irq(lock) argument
25 * local_lock_irqsave - Acquire a per CPU local lock, save and disable
27 * @lock: The lock variable
[all …]
H A Dspinlock_up.h29 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument
31 lock->slock = 0; in arch_spin_lock()
35 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
37 char oldval = lock->slock; in arch_spin_trylock()
39 lock->slock = 0; in arch_spin_trylock()
45 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument
48 lock->slock = 1; in arch_spin_unlock()
54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) argument
55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) argument
56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) argument
[all …]
/openbmc/linux/kernel/locking/
H A Dspinlock_debug.c16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument
21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init()
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init()
24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init()
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init()
27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init()
28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init()
29 lock->owner_cpu = -1; in __raw_spin_lock_init()
35 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument
40 * Make sure we are not reinitializing a held lock: in __rwlock_init()
[all …]
H A Drtmutex_api.c17 * Debug aware fast / slowpath lock,trylock,unlock
22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, in __rt_mutex_lock_common() argument
30 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_); in __rt_mutex_lock_common()
31 ret = __rt_mutex_lock(&lock->rtmutex, state); in __rt_mutex_lock_common()
33 mutex_release(&lock->dep_map, _RET_IP_); in __rt_mutex_lock_common()
45 * rt_mutex_lock_nested - lock a rt_mutex
47 * @lock: the rt_mutex to be locked
50 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) in rt_mutex_lock_nested() argument
52 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass); in rt_mutex_lock_nested()
56 void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock) in _rt_mutex_lock_nest_lock() argument
[all …]
H A Drtmutex.c27 #include <trace/events/lock.h>
36 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument
42 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument
47 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument
52 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument
66 * lock->owner state tracking:
68 * lock->owner holds the task_struct pointer of the owner. Bit 0
69 * is used to keep track of the "lock has waiters" state.
72 * NULL 0 lock is free (fast acquire possible)
73 * NULL 1 lock is free and has waiters and the top waiter
[all …]
H A Dmutex.c34 #include <trace/events/lock.h>
46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument
48 atomic_long_set(&lock->owner, 0); in __mutex_init()
49 raw_spin_lock_init(&lock->wait_lock); in __mutex_init()
50 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
52 osq_lock_init(&lock->osq); in __mutex_init()
55 debug_mutex_init(lock, name, key); in __mutex_init()
60 * @owner: contains: 'struct task_struct *' to the current lock owner,
65 * Bit1 indicates unlock needs to hand the lock to the top-waiter
79 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument
[all …]
H A Dspinlock.c35 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
46 * Some architectures can relax in favour of the CPU owning the lock.
63 * This could be a long-held lock. We both prepare to spin for a long
65 * towards that other CPU that it should break the lock ASAP.
68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
72 if (likely(do_raw_##op##_trylock(lock))) \
76 arch_##op##_relax(&lock->raw_lock); \
80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
87 if (likely(do_raw_##op##_trylock(lock))) \
92 arch_##op##_relax(&lock->raw_lock); \
[all …]
H A Dww_mutex.h9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument
13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_first()
14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first()
21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument
24 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_next()
31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument
34 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_prev()
41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument
45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_last()
46 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_last()
[all …]
H A Dqrwlock.c15 #include <trace/events/lock.h>
18 * queued_read_lock_slowpath - acquire read lock of a queued rwlock
19 * @lock: Pointer to queued rwlock structure
21 void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock) in queued_read_lock_slowpath() argument
24 * Readers come here when they cannot get the lock without waiting in queued_read_lock_slowpath()
28 * Readers in interrupt context will get the lock immediately in queued_read_lock_slowpath()
29 * if the writer is just waiting (not holding the lock yet), in queued_read_lock_slowpath()
30 * so spin with ACQUIRE semantics until the lock is available in queued_read_lock_slowpath()
33 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); in queued_read_lock_slowpath()
36 atomic_sub(_QR_BIAS, &lock->cnts); in queued_read_lock_slowpath()
[all …]
/openbmc/linux/drivers/gpu/drm/
H A Ddrm_lock.c50 * Take the heavyweight lock.
52 * \param lock lock pointer.
54 * \return one if the lock is held, or zero otherwise.
56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
63 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local
67 old = *lock; in drm_lock_take()
75 prev = cmpxchg(lock, old, new); in drm_lock_take()
82 DRM_ERROR("%d holds heavyweight lock\n", in drm_lock_take()
90 /* Have lock */ in drm_lock_take()
97 * This takes a lock forcibly and hands it to context. Should ONLY be used
[all …]
/openbmc/openbmc-test-automation/openpower/ext_interfaces/
H A Dtest_lock_management.robot3 Documentation Test lock management feature of management console on BMC.
28 Acquire Read Write Lock
31 [Template] Acquire Lock On Resource
42 Acquire Read Lock On Read Lock
43 [Documentation] Acquire read lock on another read lock.
45 [Template] Acquire Lock On Another Lock
51 Fail To Acquire Lock On Another Lock
52 [Documentation] Fail to acquire another lock.
54 [Template] Verify Acquire Lock Fails On Another Lock
62 Acquire Lock After Reboot
[all …]
/openbmc/linux/fs/ocfs2/dlm/
H A Ddlmast.c35 struct dlm_lock *lock);
36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
39 * lock level will obsolete a pending bast.
40 * For example, if dlm_thread queued a bast for an EX lock that
42 * lock owner downconverted to NL, the bast is now obsolete.
44 * This is needed because the lock and convert paths can queue
47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument
50 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast()
52 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast()
54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast()
[all …]
H A Ddlmlock.c5 * underlying calls for lock creation
45 struct dlm_lock *lock, int flags);
49 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
66 /* Tell us whether we can grant a new lock request.
71 * returns: 1 if the lock can be granted, 0 otherwise.
74 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument
79 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
84 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
87 lock->ml.type)) in dlm_can_grant_new_lock()
94 /* performs lock creation at the lockres master site
[all …]
H A Ddlmconvert.c5 * underlying calls for lock conversion
38 * only one that holds a lock on exit (res->spinlock).
43 struct dlm_lock *lock, int flags,
48 struct dlm_lock *lock, int flags, int type);
61 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument
72 status = __dlmconvert_master(dlm, res, lock, flags, type, in dlmconvert_master()
83 dlm_queue_ast(dlm, lock); in dlmconvert_master()
93 /* performs lock conversion at the lockres master site
96 * taken: takes and drops lock->spinlock
99 * call_ast: whether ast should be called for this lock
[all …]
/openbmc/linux/include/asm-generic/
H A Dqrwlock.h3 * Queue read/write lock
28 #define _QW_LOCKED 0x0ff /* A writer holds the lock */
36 extern void queued_read_lock_slowpath(struct qrwlock *lock);
37 extern void queued_write_lock_slowpath(struct qrwlock *lock);
40 * queued_read_trylock - try to acquire read lock of a queued rwlock
41 * @lock : Pointer to queued rwlock structure
42 * Return: 1 if lock acquired, 0 if failed
44 static inline int queued_read_trylock(struct qrwlock *lock) in queued_read_trylock() argument
48 cnts = atomic_read(&lock->cnts); in queued_read_trylock()
50 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); in queued_read_trylock()
[all …]
/openbmc/linux/drivers/md/persistent-data/
H A Ddm-block-manager.c32 * trace is also emitted for the previous lock acquisition.
45 spinlock_t lock; member
61 static unsigned int __find_holder(struct block_lock *lock, in __find_holder() argument
67 if (lock->holders[i] == task) in __find_holder()
74 /* call this *after* you increment lock->count */
75 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument
77 unsigned int h = __find_holder(lock, NULL); in __add_holder()
83 lock->holders[h] = task; in __add_holder()
86 t = lock->traces + h; in __add_holder()
91 /* call this *before* you decrement lock->count */
[all …]
/openbmc/linux/rust/kernel/sync/
H A Dlock.rs3 //! Generic kernel lock and guard.
5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
16 /// The "backend" of a lock.
18 /// It is the actual implementation of the lock, without the need to repeat patterns used in all
23 /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
24 /// is owned, that is, between calls to `lock` and `unlock`.
26 /// lock operation.
28 /// The state required by the lock.
31 /// The state required to be kept between lock and unlock.
34 /// Initialises the lock.
[all …]
/openbmc/qemu/util/
H A Dqemu-coroutine-lock.c24 * The lock-free mutex implementation is based on OSv
41 void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock, in qemu_co_queue_wait_impl() argument
51 if (lock) { in qemu_co_queue_wait_impl()
52 qemu_lockable_unlock(lock); in qemu_co_queue_wait_impl()
69 if (lock) { in qemu_co_queue_wait_impl()
70 qemu_lockable_lock(lock); in qemu_co_queue_wait_impl()
74 bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock) in qemu_co_enter_next_impl() argument
84 if (lock) { in qemu_co_enter_next_impl()
85 qemu_lockable_unlock(lock); in qemu_co_enter_next_impl()
88 if (lock) { in qemu_co_enter_next_impl()
[all …]
/openbmc/linux/arch/powerpc/include/asm/
H A Dsimple_spinlock.h6 * Simple spin lock operations.
35 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument
37 return lock.slock == 0; in arch_spin_value_unlocked()
40 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument
42 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked()
46 * This returns the old value in the lock, so we succeeded
47 * in getting the lock if the return value is 0.
49 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) in __arch_spin_trylock() argument
64 : "r" (token), "r" (&lock->slock), [eh] "n" (eh) in __arch_spin_trylock()
70 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
[all …]

12345678910>>...278