1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21b375dc3SMaarten Lankhorst /*
31b375dc3SMaarten Lankhorst * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance
41b375dc3SMaarten Lankhorst *
51b375dc3SMaarten Lankhorst * Original mutex implementation started by Ingo Molnar:
61b375dc3SMaarten Lankhorst *
71b375dc3SMaarten Lankhorst * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
81b375dc3SMaarten Lankhorst *
955f036caSPeter Ziljstra * Wait/Die implementation:
101b375dc3SMaarten Lankhorst * Copyright (C) 2013 Canonical Ltd.
1108295b3bSThomas Hellstrom * Choice of algorithm:
1208295b3bSThomas Hellstrom * Copyright (C) 2018 WMWare Inc.
131b375dc3SMaarten Lankhorst *
141b375dc3SMaarten Lankhorst * This file contains the main data structure and API definitions.
151b375dc3SMaarten Lankhorst */
161b375dc3SMaarten Lankhorst
171b375dc3SMaarten Lankhorst #ifndef __LINUX_WW_MUTEX_H
181b375dc3SMaarten Lankhorst #define __LINUX_WW_MUTEX_H
191b375dc3SMaarten Lankhorst
201b375dc3SMaarten Lankhorst #include <linux/mutex.h>
212408f7a3SPeter Zijlstra #include <linux/rtmutex.h>
221b375dc3SMaarten Lankhorst
232408f7a3SPeter Zijlstra #if defined(CONFIG_DEBUG_MUTEXES) || \
242408f7a3SPeter Zijlstra (defined(CONFIG_PREEMPT_RT) && defined(CONFIG_DEBUG_RT_MUTEXES))
252408f7a3SPeter Zijlstra #define DEBUG_WW_MUTEXES
262408f7a3SPeter Zijlstra #endif
272408f7a3SPeter Zijlstra
282408f7a3SPeter Zijlstra #ifndef CONFIG_PREEMPT_RT
29653a5b0bSThomas Gleixner #define WW_MUTEX_BASE mutex
30653a5b0bSThomas Gleixner #define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k)
31653a5b0bSThomas Gleixner #define ww_mutex_base_is_locked(b) mutex_is_locked((b))
322408f7a3SPeter Zijlstra #else
332408f7a3SPeter Zijlstra #define WW_MUTEX_BASE rt_mutex
342408f7a3SPeter Zijlstra #define ww_mutex_base_init(l,n,k) __rt_mutex_init(l,n,k)
352408f7a3SPeter Zijlstra #define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex)
362408f7a3SPeter Zijlstra #endif
37653a5b0bSThomas Gleixner
381b375dc3SMaarten Lankhorst struct ww_class {
391b375dc3SMaarten Lankhorst atomic_long_t stamp;
401b375dc3SMaarten Lankhorst struct lock_class_key acquire_key;
411b375dc3SMaarten Lankhorst struct lock_class_key mutex_key;
421b375dc3SMaarten Lankhorst const char *acquire_name;
431b375dc3SMaarten Lankhorst const char *mutex_name;
4408295b3bSThomas Hellstrom unsigned int is_wait_die;
451b375dc3SMaarten Lankhorst };
461b375dc3SMaarten Lankhorst
474f1893ecSThomas Gleixner struct ww_mutex {
48653a5b0bSThomas Gleixner struct WW_MUTEX_BASE base;
494f1893ecSThomas Gleixner struct ww_acquire_ctx *ctx;
502408f7a3SPeter Zijlstra #ifdef DEBUG_WW_MUTEXES
514f1893ecSThomas Gleixner struct ww_class *ww_class;
524f1893ecSThomas Gleixner #endif
534f1893ecSThomas Gleixner };
544f1893ecSThomas Gleixner
551b375dc3SMaarten Lankhorst struct ww_acquire_ctx {
561b375dc3SMaarten Lankhorst struct task_struct *task;
571b375dc3SMaarten Lankhorst unsigned long stamp;
5855f036caSPeter Ziljstra unsigned int acquired;
5908295b3bSThomas Hellstrom unsigned short wounded;
6008295b3bSThomas Hellstrom unsigned short is_wait_die;
612408f7a3SPeter Zijlstra #ifdef DEBUG_WW_MUTEXES
6255f036caSPeter Ziljstra unsigned int done_acquire;
631b375dc3SMaarten Lankhorst struct ww_class *ww_class;
642408f7a3SPeter Zijlstra void *contending_lock;
651b375dc3SMaarten Lankhorst #endif
661b375dc3SMaarten Lankhorst #ifdef CONFIG_DEBUG_LOCK_ALLOC
671b375dc3SMaarten Lankhorst struct lockdep_map dep_map;
681b375dc3SMaarten Lankhorst #endif
691b375dc3SMaarten Lankhorst #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
7055f036caSPeter Ziljstra unsigned int deadlock_inject_interval;
7155f036caSPeter Ziljstra unsigned int deadlock_inject_countdown;
721b375dc3SMaarten Lankhorst #endif
731b375dc3SMaarten Lankhorst };
741b375dc3SMaarten Lankhorst
7508295b3bSThomas Hellstrom #define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \
761b375dc3SMaarten Lankhorst { .stamp = ATOMIC_LONG_INIT(0) \
771b375dc3SMaarten Lankhorst , .acquire_name = #ww_class "_acquire" \
7808295b3bSThomas Hellstrom , .mutex_name = #ww_class "_mutex" \
7908295b3bSThomas Hellstrom , .is_wait_die = _is_wait_die }
801b375dc3SMaarten Lankhorst
8108295b3bSThomas Hellstrom #define DEFINE_WD_CLASS(classname) \
8208295b3bSThomas Hellstrom struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1)
8308295b3bSThomas Hellstrom
841b375dc3SMaarten Lankhorst #define DEFINE_WW_CLASS(classname) \
8508295b3bSThomas Hellstrom struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0)
861b375dc3SMaarten Lankhorst
871b375dc3SMaarten Lankhorst /**
881b375dc3SMaarten Lankhorst * ww_mutex_init - initialize the w/w mutex
891b375dc3SMaarten Lankhorst * @lock: the mutex to be initialized
901b375dc3SMaarten Lankhorst * @ww_class: the w/w class the mutex should belong to
911b375dc3SMaarten Lankhorst *
921b375dc3SMaarten Lankhorst * Initialize the w/w mutex to unlocked state and associate it with the given
935261ced4SWaiman Long * class. Static define macro for w/w mutex is not provided and this function
945261ced4SWaiman Long * is the only way to properly initialize the w/w mutex.
951b375dc3SMaarten Lankhorst *
961b375dc3SMaarten Lankhorst * It is not allowed to initialize an already locked mutex.
971b375dc3SMaarten Lankhorst */
ww_mutex_init(struct ww_mutex * lock,struct ww_class * ww_class)981b375dc3SMaarten Lankhorst static inline void ww_mutex_init(struct ww_mutex *lock,
991b375dc3SMaarten Lankhorst struct ww_class *ww_class)
1001b375dc3SMaarten Lankhorst {
101653a5b0bSThomas Gleixner ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
1021b375dc3SMaarten Lankhorst lock->ctx = NULL;
1032408f7a3SPeter Zijlstra #ifdef DEBUG_WW_MUTEXES
1041b375dc3SMaarten Lankhorst lock->ww_class = ww_class;
1051b375dc3SMaarten Lankhorst #endif
1061b375dc3SMaarten Lankhorst }
1071b375dc3SMaarten Lankhorst
1081b375dc3SMaarten Lankhorst /**
1091b375dc3SMaarten Lankhorst * ww_acquire_init - initialize a w/w acquire context
1101b375dc3SMaarten Lankhorst * @ctx: w/w acquire context to initialize
1111b375dc3SMaarten Lankhorst * @ww_class: w/w class of the context
1121b375dc3SMaarten Lankhorst *
1131b375dc3SMaarten Lankhorst * Initializes an context to acquire multiple mutexes of the given w/w class.
1141b375dc3SMaarten Lankhorst *
1151b375dc3SMaarten Lankhorst * Context-based w/w mutex acquiring can be done in any order whatsoever within
1161b375dc3SMaarten Lankhorst * a given lock class. Deadlocks will be detected and handled with the
11755f036caSPeter Ziljstra * wait/die logic.
1181b375dc3SMaarten Lankhorst *
1191b375dc3SMaarten Lankhorst * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
1201b375dc3SMaarten Lankhorst * result in undetected deadlocks and is so forbidden. Mixing different contexts
1211b375dc3SMaarten Lankhorst * for the same w/w class when acquiring mutexes can also result in undetected
1221b375dc3SMaarten Lankhorst * deadlocks, and is hence also forbidden. Both types of abuse will be caught by
1231b375dc3SMaarten Lankhorst * enabling CONFIG_PROVE_LOCKING.
1241b375dc3SMaarten Lankhorst *
1251b375dc3SMaarten Lankhorst * Nesting of acquire contexts for _different_ w/w classes is possible, subject
1261b375dc3SMaarten Lankhorst * to the usual locking rules between different lock classes.
1271b375dc3SMaarten Lankhorst *
1281b375dc3SMaarten Lankhorst * An acquire context must be released with ww_acquire_fini by the same task
1291b375dc3SMaarten Lankhorst * before the memory is freed. It is recommended to allocate the context itself
1301b375dc3SMaarten Lankhorst * on the stack.
1311b375dc3SMaarten Lankhorst */
ww_acquire_init(struct ww_acquire_ctx * ctx,struct ww_class * ww_class)1321b375dc3SMaarten Lankhorst static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
1331b375dc3SMaarten Lankhorst struct ww_class *ww_class)
1341b375dc3SMaarten Lankhorst {
1351b375dc3SMaarten Lankhorst ctx->task = current;
136f4ec57b6SPeter Zijlstra ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
1371b375dc3SMaarten Lankhorst ctx->acquired = 0;
13808295b3bSThomas Hellstrom ctx->wounded = false;
13908295b3bSThomas Hellstrom ctx->is_wait_die = ww_class->is_wait_die;
1402408f7a3SPeter Zijlstra #ifdef DEBUG_WW_MUTEXES
1411b375dc3SMaarten Lankhorst ctx->ww_class = ww_class;
1421b375dc3SMaarten Lankhorst ctx->done_acquire = 0;
1431b375dc3SMaarten Lankhorst ctx->contending_lock = NULL;
1441b375dc3SMaarten Lankhorst #endif
1451b375dc3SMaarten Lankhorst #ifdef CONFIG_DEBUG_LOCK_ALLOC
1461b375dc3SMaarten Lankhorst debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
1471b375dc3SMaarten Lankhorst lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
1481b375dc3SMaarten Lankhorst &ww_class->acquire_key, 0);
1491b375dc3SMaarten Lankhorst mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
1501b375dc3SMaarten Lankhorst #endif
1511b375dc3SMaarten Lankhorst #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1521b375dc3SMaarten Lankhorst ctx->deadlock_inject_interval = 1;
1531b375dc3SMaarten Lankhorst ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
1541b375dc3SMaarten Lankhorst #endif
1551b375dc3SMaarten Lankhorst }
1561b375dc3SMaarten Lankhorst
1571b375dc3SMaarten Lankhorst /**
1581b375dc3SMaarten Lankhorst * ww_acquire_done - marks the end of the acquire phase
1591b375dc3SMaarten Lankhorst * @ctx: the acquire context
1601b375dc3SMaarten Lankhorst *
1611b375dc3SMaarten Lankhorst * Marks the end of the acquire phase, any further w/w mutex lock calls using
1621b375dc3SMaarten Lankhorst * this context are forbidden.
1631b375dc3SMaarten Lankhorst *
1641b375dc3SMaarten Lankhorst * Calling this function is optional, it is just useful to document w/w mutex
1651b375dc3SMaarten Lankhorst * code and clearly designated the acquire phase from actually using the locked
1661b375dc3SMaarten Lankhorst * data structures.
1671b375dc3SMaarten Lankhorst */
ww_acquire_done(struct ww_acquire_ctx * ctx)1681b375dc3SMaarten Lankhorst static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
1691b375dc3SMaarten Lankhorst {
1702408f7a3SPeter Zijlstra #ifdef DEBUG_WW_MUTEXES
1711b375dc3SMaarten Lankhorst lockdep_assert_held(ctx);
1721b375dc3SMaarten Lankhorst
1731b375dc3SMaarten Lankhorst DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
1741b375dc3SMaarten Lankhorst ctx->done_acquire = 1;
1751b375dc3SMaarten Lankhorst #endif
1761b375dc3SMaarten Lankhorst }
1771b375dc3SMaarten Lankhorst
1781b375dc3SMaarten Lankhorst /**
1791b375dc3SMaarten Lankhorst * ww_acquire_fini - releases a w/w acquire context
1801b375dc3SMaarten Lankhorst * @ctx: the acquire context to free
1811b375dc3SMaarten Lankhorst *
1821b375dc3SMaarten Lankhorst * Releases a w/w acquire context. This must be called _after_ all acquired w/w
1831b375dc3SMaarten Lankhorst * mutexes have been released with ww_mutex_unlock.
1841b375dc3SMaarten Lankhorst */
ww_acquire_fini(struct ww_acquire_ctx * ctx)1851b375dc3SMaarten Lankhorst static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
1861b375dc3SMaarten Lankhorst {
187bee64578SWaiman Long #ifdef CONFIG_DEBUG_LOCK_ALLOC
1885facae4fSQian Cai mutex_release(&ctx->dep_map, _THIS_IP_);
189bee64578SWaiman Long #endif
1902408f7a3SPeter Zijlstra #ifdef DEBUG_WW_MUTEXES
1911b375dc3SMaarten Lankhorst DEBUG_LOCKS_WARN_ON(ctx->acquired);
19297f2645fSMasahiro Yamada if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
1931b375dc3SMaarten Lankhorst /*
1941b375dc3SMaarten Lankhorst * lockdep will normally handle this,
1951b375dc3SMaarten Lankhorst * but fail without anyway
1961b375dc3SMaarten Lankhorst */
1971b375dc3SMaarten Lankhorst ctx->done_acquire = 1;
1981b375dc3SMaarten Lankhorst
19997f2645fSMasahiro Yamada if (!IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC))
2001b375dc3SMaarten Lankhorst /* ensure ww_acquire_fini will still fail if called twice */
2011b375dc3SMaarten Lankhorst ctx->acquired = ~0U;
2021b375dc3SMaarten Lankhorst #endif
2031b375dc3SMaarten Lankhorst }
2041b375dc3SMaarten Lankhorst
2051b375dc3SMaarten Lankhorst /**
2061b375dc3SMaarten Lankhorst * ww_mutex_lock - acquire the w/w mutex
2071b375dc3SMaarten Lankhorst * @lock: the mutex to be acquired
2081b375dc3SMaarten Lankhorst * @ctx: w/w acquire context, or NULL to acquire only a single lock.
2091b375dc3SMaarten Lankhorst *
2101b375dc3SMaarten Lankhorst * Lock the w/w mutex exclusively for this task.
2111b375dc3SMaarten Lankhorst *
2121b375dc3SMaarten Lankhorst * Deadlocks within a given w/w class of locks are detected and handled with the
21355f036caSPeter Ziljstra * wait/die algorithm. If the lock isn't immediately available this function
2141b375dc3SMaarten Lankhorst * will either sleep until it is (wait case). Or it selects the current context
21555f036caSPeter Ziljstra * for backing off by returning -EDEADLK (die case). Trying to acquire the
2161b375dc3SMaarten Lankhorst * same lock with the same context twice is also detected and signalled by
2171b375dc3SMaarten Lankhorst * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
2181b375dc3SMaarten Lankhorst *
21955f036caSPeter Ziljstra * In the die case the caller must release all currently held w/w mutexes for
2201b375dc3SMaarten Lankhorst * the given context and then wait for this contending lock to be available by
2211b375dc3SMaarten Lankhorst * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
2221b375dc3SMaarten Lankhorst * lock and proceed with trying to acquire further w/w mutexes (e.g. when
2231b375dc3SMaarten Lankhorst * scanning through lru lists trying to free resources).
2241b375dc3SMaarten Lankhorst *
2251b375dc3SMaarten Lankhorst * The mutex must later on be released by the same task that
2261b375dc3SMaarten Lankhorst * acquired it. The task may not exit without first unlocking the mutex. Also,
2271b375dc3SMaarten Lankhorst * kernel memory where the mutex resides must not be freed with the mutex still
2281b375dc3SMaarten Lankhorst * locked. The mutex must first be initialized (or statically defined) before it
2291b375dc3SMaarten Lankhorst * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
2301b375dc3SMaarten Lankhorst * of the same w/w lock class as was used to initialize the acquire context.
2311b375dc3SMaarten Lankhorst *
2321b375dc3SMaarten Lankhorst * A mutex acquired with this function must be released with ww_mutex_unlock.
2331b375dc3SMaarten Lankhorst */
23423b19ec3SIngo Molnar extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
2351b375dc3SMaarten Lankhorst
2361b375dc3SMaarten Lankhorst /**
2371b375dc3SMaarten Lankhorst * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
2381b375dc3SMaarten Lankhorst * @lock: the mutex to be acquired
2391b375dc3SMaarten Lankhorst * @ctx: w/w acquire context
2401b375dc3SMaarten Lankhorst *
2411b375dc3SMaarten Lankhorst * Lock the w/w mutex exclusively for this task.
2421b375dc3SMaarten Lankhorst *
2431b375dc3SMaarten Lankhorst * Deadlocks within a given w/w class of locks are detected and handled with the
24455f036caSPeter Ziljstra * wait/die algorithm. If the lock isn't immediately available this function
2451b375dc3SMaarten Lankhorst * will either sleep until it is (wait case). Or it selects the current context
24655f036caSPeter Ziljstra * for backing off by returning -EDEADLK (die case). Trying to acquire the
2471b375dc3SMaarten Lankhorst * same lock with the same context twice is also detected and signalled by
2481b375dc3SMaarten Lankhorst * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
2491b375dc3SMaarten Lankhorst * signal arrives while waiting for the lock then this function returns -EINTR.
2501b375dc3SMaarten Lankhorst *
25155f036caSPeter Ziljstra * In the die case the caller must release all currently held w/w mutexes for
2521b375dc3SMaarten Lankhorst * the given context and then wait for this contending lock to be available by
2531b375dc3SMaarten Lankhorst * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
2541b375dc3SMaarten Lankhorst * not acquire this lock and proceed with trying to acquire further w/w mutexes
2551b375dc3SMaarten Lankhorst * (e.g. when scanning through lru lists trying to free resources).
2561b375dc3SMaarten Lankhorst *
2571b375dc3SMaarten Lankhorst * The mutex must later on be released by the same task that
2581b375dc3SMaarten Lankhorst * acquired it. The task may not exit without first unlocking the mutex. Also,
2591b375dc3SMaarten Lankhorst * kernel memory where the mutex resides must not be freed with the mutex still
2601b375dc3SMaarten Lankhorst * locked. The mutex must first be initialized (or statically defined) before it
2611b375dc3SMaarten Lankhorst * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
2621b375dc3SMaarten Lankhorst * of the same w/w lock class as was used to initialize the acquire context.
2631b375dc3SMaarten Lankhorst *
2641b375dc3SMaarten Lankhorst * A mutex acquired with this function must be released with ww_mutex_unlock.
2651b375dc3SMaarten Lankhorst */
266c5470b22SNicolai Hähnle extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
267c5470b22SNicolai Hähnle struct ww_acquire_ctx *ctx);
2681b375dc3SMaarten Lankhorst
2691b375dc3SMaarten Lankhorst /**
2701b375dc3SMaarten Lankhorst * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
2711b375dc3SMaarten Lankhorst * @lock: the mutex to be acquired
2721b375dc3SMaarten Lankhorst * @ctx: w/w acquire context
2731b375dc3SMaarten Lankhorst *
27455f036caSPeter Ziljstra * Acquires a w/w mutex with the given context after a die case. This function
2751b375dc3SMaarten Lankhorst * will sleep until the lock becomes available.
2761b375dc3SMaarten Lankhorst *
2771b375dc3SMaarten Lankhorst * The caller must have released all w/w mutexes already acquired with the
2781b375dc3SMaarten Lankhorst * context and then call this function on the contended lock.
2791b375dc3SMaarten Lankhorst *
2801b375dc3SMaarten Lankhorst * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
2811b375dc3SMaarten Lankhorst * needs with ww_mutex_lock. Note that the -EALREADY return code from
2821b375dc3SMaarten Lankhorst * ww_mutex_lock can be used to avoid locking this contended mutex twice.
2831b375dc3SMaarten Lankhorst *
2841b375dc3SMaarten Lankhorst * It is forbidden to call this function with any other w/w mutexes associated
2851b375dc3SMaarten Lankhorst * with the context held. It is forbidden to call this on anything else than the
2861b375dc3SMaarten Lankhorst * contending mutex.
2871b375dc3SMaarten Lankhorst *
2881b375dc3SMaarten Lankhorst * Note that the slowpath lock acquiring can also be done by calling
2891b375dc3SMaarten Lankhorst * ww_mutex_lock directly. This function here is simply to help w/w mutex
2901b375dc3SMaarten Lankhorst * locking code readability by clearly denoting the slowpath.
2911b375dc3SMaarten Lankhorst */
2921b375dc3SMaarten Lankhorst static inline void
ww_mutex_lock_slow(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)2931b375dc3SMaarten Lankhorst ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
2941b375dc3SMaarten Lankhorst {
2951b375dc3SMaarten Lankhorst int ret;
2962408f7a3SPeter Zijlstra #ifdef DEBUG_WW_MUTEXES
2971b375dc3SMaarten Lankhorst DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
2981b375dc3SMaarten Lankhorst #endif
2991b375dc3SMaarten Lankhorst ret = ww_mutex_lock(lock, ctx);
3001b375dc3SMaarten Lankhorst (void)ret;
3011b375dc3SMaarten Lankhorst }
3021b375dc3SMaarten Lankhorst
3031b375dc3SMaarten Lankhorst /**
3041b375dc3SMaarten Lankhorst * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible
3051b375dc3SMaarten Lankhorst * @lock: the mutex to be acquired
3061b375dc3SMaarten Lankhorst * @ctx: w/w acquire context
3071b375dc3SMaarten Lankhorst *
30855f036caSPeter Ziljstra * Acquires a w/w mutex with the given context after a die case. This function
3091b375dc3SMaarten Lankhorst * will sleep until the lock becomes available and returns 0 when the lock has
3101b375dc3SMaarten Lankhorst * been acquired. If a signal arrives while waiting for the lock then this
3111b375dc3SMaarten Lankhorst * function returns -EINTR.
3121b375dc3SMaarten Lankhorst *
3131b375dc3SMaarten Lankhorst * The caller must have released all w/w mutexes already acquired with the
3141b375dc3SMaarten Lankhorst * context and then call this function on the contended lock.
3151b375dc3SMaarten Lankhorst *
3161b375dc3SMaarten Lankhorst * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
3171b375dc3SMaarten Lankhorst * needs with ww_mutex_lock. Note that the -EALREADY return code from
3181b375dc3SMaarten Lankhorst * ww_mutex_lock can be used to avoid locking this contended mutex twice.
3191b375dc3SMaarten Lankhorst *
3201b375dc3SMaarten Lankhorst * It is forbidden to call this function with any other w/w mutexes associated
3211b375dc3SMaarten Lankhorst * with the given context held. It is forbidden to call this on anything else
3221b375dc3SMaarten Lankhorst * than the contending mutex.
3231b375dc3SMaarten Lankhorst *
3241b375dc3SMaarten Lankhorst * Note that the slowpath lock acquiring can also be done by calling
3251b375dc3SMaarten Lankhorst * ww_mutex_lock_interruptible directly. This function here is simply to help
3261b375dc3SMaarten Lankhorst * w/w mutex locking code readability by clearly denoting the slowpath.
3271b375dc3SMaarten Lankhorst */
3281b375dc3SMaarten Lankhorst static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)3291b375dc3SMaarten Lankhorst ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
3301b375dc3SMaarten Lankhorst struct ww_acquire_ctx *ctx)
3311b375dc3SMaarten Lankhorst {
3322408f7a3SPeter Zijlstra #ifdef DEBUG_WW_MUTEXES
3331b375dc3SMaarten Lankhorst DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
3341b375dc3SMaarten Lankhorst #endif
3351b375dc3SMaarten Lankhorst return ww_mutex_lock_interruptible(lock, ctx);
3361b375dc3SMaarten Lankhorst }
3371b375dc3SMaarten Lankhorst
3381b375dc3SMaarten Lankhorst extern void ww_mutex_unlock(struct ww_mutex *lock);
3391b375dc3SMaarten Lankhorst
340*12235da8SMaarten Lankhorst extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
341*12235da8SMaarten Lankhorst struct ww_acquire_ctx *ctx);
3421b375dc3SMaarten Lankhorst
3431b375dc3SMaarten Lankhorst /***
3441b375dc3SMaarten Lankhorst * ww_mutex_destroy - mark a w/w mutex unusable
3451b375dc3SMaarten Lankhorst * @lock: the mutex to be destroyed
3461b375dc3SMaarten Lankhorst *
3471b375dc3SMaarten Lankhorst * This function marks the mutex uninitialized, and any subsequent
3481b375dc3SMaarten Lankhorst * use of the mutex is forbidden. The mutex must not be locked when
3491b375dc3SMaarten Lankhorst * this function is called.
3501b375dc3SMaarten Lankhorst */
ww_mutex_destroy(struct ww_mutex * lock)3511b375dc3SMaarten Lankhorst static inline void ww_mutex_destroy(struct ww_mutex *lock)
3521b375dc3SMaarten Lankhorst {
3532408f7a3SPeter Zijlstra #ifndef CONFIG_PREEMPT_RT
3541b375dc3SMaarten Lankhorst mutex_destroy(&lock->base);
3552408f7a3SPeter Zijlstra #endif
3561b375dc3SMaarten Lankhorst }
3571b375dc3SMaarten Lankhorst
3581b375dc3SMaarten Lankhorst /**
3591b375dc3SMaarten Lankhorst * ww_mutex_is_locked - is the w/w mutex locked
3601b375dc3SMaarten Lankhorst * @lock: the mutex to be queried
3611b375dc3SMaarten Lankhorst *
3621b375dc3SMaarten Lankhorst * Returns 1 if the mutex is locked, 0 if unlocked.
3631b375dc3SMaarten Lankhorst */
ww_mutex_is_locked(struct ww_mutex * lock)3641b375dc3SMaarten Lankhorst static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
3651b375dc3SMaarten Lankhorst {
366653a5b0bSThomas Gleixner return ww_mutex_base_is_locked(&lock->base);
3671b375dc3SMaarten Lankhorst }
3681b375dc3SMaarten Lankhorst
3691b375dc3SMaarten Lankhorst #endif
370