Lines Matching full:mutex

3  * kernel/locking/mutex.c
19 * Also see Documentation/locking/mutex-design.rst.
21 #include <linux/mutex.h>
37 #include "mutex.h"
46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init()
77 * DO NOT USE (outside of mutex code).
79 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner()
89 bool mutex_is_locked(struct mutex *lock) in mutex_is_locked()
103 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) in __mutex_trylock_common()
142 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) in __mutex_trylock_or_handoff()
150 static inline bool __mutex_trylock(struct mutex *lock) in __mutex_trylock()
166 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) in __mutex_trylock_fast()
177 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) in __mutex_unlock_fast()
185 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) in __mutex_set_flag()
190 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) in __mutex_clear_flag()
195 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_waiter_is_first()
205 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, in __mutex_add_waiter()
216 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_remove_waiter()
231 static void __mutex_handoff(struct mutex *lock, struct task_struct *task) in __mutex_handoff()
253 * We split the mutex lock/unlock logic into separate fastpath and
258 static void __sched __mutex_lock_slowpath(struct mutex *lock);
261 * mutex_lock - acquire the mutex
262 * @lock: the mutex to be acquired
264 * Lock the mutex exclusively for this task. If the mutex is not
267 * The mutex must later on be released by the same task that
269 * may not exit without first unlocking the mutex. Also, kernel
270 * memory where the mutex resides must not be freed with
271 * the mutex still locked. The mutex must first be initialized
273 * the mutex to 0 is not allowed.
281 void __sched mutex_lock(struct mutex *lock) in mutex_lock()
298 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) in __mutex_trylock_or_owner()
304 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in ww_mutex_spin_on_owner()
352 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, in mutex_spin_on_owner()
390 * Initial check for entering the mutex spinning loop
392 static inline int mutex_can_spin_on_owner(struct mutex *lock) in mutex_can_spin_on_owner()
412 * If lock->owner is not set, the mutex has been released. Return true in mutex_can_spin_on_owner()
427 * The mutex spinners are queued up using MCS lock so that only one
428 * spinner can compete for the mutex. However, if mutex spinning isn't
441 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in mutex_optimistic_spin()
456 * In order to avoid a stampede of mutex spinners trying to in mutex_optimistic_spin()
457 * acquire the mutex all at once, the spinners need to take a in mutex_optimistic_spin()
467 /* Try to acquire the mutex... */ in mutex_optimistic_spin()
501 * reschedule now, before we try-lock the mutex. This avoids getting in mutex_optimistic_spin()
502 * scheduled out right after we obtained the mutex. in mutex_optimistic_spin()
517 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in mutex_optimistic_spin()
524 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
527 * mutex_unlock - release the mutex
528 * @lock: the mutex to be released
530 * Unlock a mutex that has been locked by this task previously.
533 * of a not locked mutex is not allowed.
537 void __sched mutex_unlock(struct mutex *lock) in mutex_unlock()
548 * ww_mutex_unlock - release the w/w mutex
549 * @lock: the mutex to be released
551 * Unlock a mutex that has been locked by this task previously with any of the
556 * of a unlocked mutex is not allowed.
566 * Lock a mutex (possibly interruptible), slowpath:
569 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, in __mutex_lock_common()
744 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, in __mutex_lock()
751 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, in __ww_mutex_lock()
758 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
759 * @ww: mutex to lock
762 * Trylocks a mutex with the optional acquire context; no deadlock detection is
763 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
768 * A mutex acquired with this function must be released with ww_mutex_unlock.
797 mutex_lock_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_nested()
805 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) in _mutex_lock_nest_lock()
812 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_killable_nested()
819 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_interruptible_nested()
826 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_io_nested()
901 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) in __mutex_unlock_slowpath()
960 __mutex_lock_killable_slowpath(struct mutex *lock);
963 __mutex_lock_interruptible_slowpath(struct mutex *lock);
966 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
967 * @lock: The mutex to be acquired.
969 * Lock the mutex like mutex_lock(). If a signal is delivered while the
971 * mutex.
977 int __sched mutex_lock_interruptible(struct mutex *lock) in mutex_lock_interruptible()
990 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
991 * @lock: The mutex to be acquired.
993 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
995 * function will return without acquiring the mutex.
1001 int __sched mutex_lock_killable(struct mutex *lock) in mutex_lock_killable()
1013 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1014 * @lock: The mutex to be acquired.
1016 * Lock the mutex like mutex_lock(). While the task is waiting for this
1017 * mutex, it will be accounted as being in the IO wait state by the
1022 void __sched mutex_lock_io(struct mutex *lock) in mutex_lock_io()
1033 __mutex_lock_slowpath(struct mutex *lock) in __mutex_lock_slowpath()
1039 __mutex_lock_killable_slowpath(struct mutex *lock) in __mutex_lock_killable_slowpath()
1045 __mutex_lock_interruptible_slowpath(struct mutex *lock) in __mutex_lock_interruptible_slowpath()
1068 * mutex_trylock - try to acquire the mutex, without waiting
1069 * @lock: the mutex to be acquired
1071 * Try to acquire the mutex atomically. Returns 1 if the mutex
1079 * mutex must be released by the same task that acquired it.
1081 int __sched mutex_trylock(struct mutex *lock) in mutex_trylock()
1130 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1132 * @lock: the mutex to return holding if we dec to 0
1136 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) in atomic_dec_and_mutex_lock()