1f8635d50SPeter Zijlstra // SPDX-License-Identifier: GPL-2.0-only
2f8635d50SPeter Zijlstra /*
3f8635d50SPeter Zijlstra * rtmutex API
4f8635d50SPeter Zijlstra */
5f8635d50SPeter Zijlstra #include <linux/spinlock.h>
6f8635d50SPeter Zijlstra #include <linux/export.h>
7f8635d50SPeter Zijlstra
8f8635d50SPeter Zijlstra #define RT_MUTEX_BUILD_MUTEX
9f8635d50SPeter Zijlstra #define WW_RT
10f8635d50SPeter Zijlstra #include "rtmutex.c"
11f8635d50SPeter Zijlstra
ww_mutex_trylock(struct ww_mutex * lock,struct ww_acquire_ctx * ww_ctx)1212235da8SMaarten Lankhorst int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
1312235da8SMaarten Lankhorst {
1412235da8SMaarten Lankhorst struct rt_mutex *rtm = &lock->base;
1512235da8SMaarten Lankhorst
1612235da8SMaarten Lankhorst if (!ww_ctx)
1712235da8SMaarten Lankhorst return rt_mutex_trylock(rtm);
1812235da8SMaarten Lankhorst
1912235da8SMaarten Lankhorst /*
2012235da8SMaarten Lankhorst * Reset the wounded flag after a kill. No other process can
2112235da8SMaarten Lankhorst * race and wound us here, since they can't have a valid owner
2212235da8SMaarten Lankhorst * pointer if we don't have any locks held.
2312235da8SMaarten Lankhorst */
2412235da8SMaarten Lankhorst if (ww_ctx->acquired == 0)
2512235da8SMaarten Lankhorst ww_ctx->wounded = 0;
2612235da8SMaarten Lankhorst
2712235da8SMaarten Lankhorst if (__rt_mutex_trylock(&rtm->rtmutex)) {
2812235da8SMaarten Lankhorst ww_mutex_set_context_fastpath(lock, ww_ctx);
29*2202e15bSSebastian Andrzej Siewior mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
3012235da8SMaarten Lankhorst return 1;
3112235da8SMaarten Lankhorst }
3212235da8SMaarten Lankhorst
3312235da8SMaarten Lankhorst return 0;
3412235da8SMaarten Lankhorst }
3512235da8SMaarten Lankhorst EXPORT_SYMBOL(ww_mutex_trylock);
3612235da8SMaarten Lankhorst
37f8635d50SPeter Zijlstra static int __sched
__ww_rt_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ww_ctx,unsigned int state,unsigned long ip)38f8635d50SPeter Zijlstra __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
39f8635d50SPeter Zijlstra unsigned int state, unsigned long ip)
40f8635d50SPeter Zijlstra {
41f8635d50SPeter Zijlstra struct lockdep_map __maybe_unused *nest_lock = NULL;
42f8635d50SPeter Zijlstra struct rt_mutex *rtm = &lock->base;
43f8635d50SPeter Zijlstra int ret;
44f8635d50SPeter Zijlstra
45f8635d50SPeter Zijlstra might_sleep();
46f8635d50SPeter Zijlstra
47f8635d50SPeter Zijlstra if (ww_ctx) {
48f8635d50SPeter Zijlstra if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
49f8635d50SPeter Zijlstra return -EALREADY;
50f8635d50SPeter Zijlstra
51f8635d50SPeter Zijlstra /*
52f8635d50SPeter Zijlstra * Reset the wounded flag after a kill. No other process can
53f8635d50SPeter Zijlstra * race and wound us here, since they can't have a valid owner
54f8635d50SPeter Zijlstra * pointer if we don't have any locks held.
55f8635d50SPeter Zijlstra */
56f8635d50SPeter Zijlstra if (ww_ctx->acquired == 0)
57f8635d50SPeter Zijlstra ww_ctx->wounded = 0;
58f8635d50SPeter Zijlstra
59f8635d50SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC
60f8635d50SPeter Zijlstra nest_lock = &ww_ctx->dep_map;
61f8635d50SPeter Zijlstra #endif
62f8635d50SPeter Zijlstra }
63f8635d50SPeter Zijlstra mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
64f8635d50SPeter Zijlstra
65f8635d50SPeter Zijlstra if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
66f8635d50SPeter Zijlstra if (ww_ctx)
67f8635d50SPeter Zijlstra ww_mutex_set_context_fastpath(lock, ww_ctx);
68f8635d50SPeter Zijlstra return 0;
69f8635d50SPeter Zijlstra }
70f8635d50SPeter Zijlstra
71f8635d50SPeter Zijlstra ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
72f8635d50SPeter Zijlstra
73f8635d50SPeter Zijlstra if (ret)
74f8635d50SPeter Zijlstra mutex_release(&rtm->dep_map, ip);
75f8635d50SPeter Zijlstra return ret;
76f8635d50SPeter Zijlstra }
77f8635d50SPeter Zijlstra
78f8635d50SPeter Zijlstra int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)79f8635d50SPeter Zijlstra ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
80f8635d50SPeter Zijlstra {
81f8635d50SPeter Zijlstra return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
82f8635d50SPeter Zijlstra }
83f8635d50SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_lock);
84f8635d50SPeter Zijlstra
85f8635d50SPeter Zijlstra int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)86f8635d50SPeter Zijlstra ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
87f8635d50SPeter Zijlstra {
88f8635d50SPeter Zijlstra return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
89f8635d50SPeter Zijlstra }
90f8635d50SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_lock_interruptible);
91f8635d50SPeter Zijlstra
ww_mutex_unlock(struct ww_mutex * lock)92f8635d50SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock)
93f8635d50SPeter Zijlstra {
94f8635d50SPeter Zijlstra struct rt_mutex *rtm = &lock->base;
95f8635d50SPeter Zijlstra
96f8635d50SPeter Zijlstra __ww_mutex_unlock(lock);
97f8635d50SPeter Zijlstra
98f8635d50SPeter Zijlstra mutex_release(&rtm->dep_map, _RET_IP_);
99f8635d50SPeter Zijlstra __rt_mutex_unlock(&rtm->rtmutex);
100f8635d50SPeter Zijlstra }
101f8635d50SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock);
102