xref: /openbmc/linux/kernel/locking/ww_rt_mutex.c (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * rtmutex API
4   */
5  #include <linux/spinlock.h>
6  #include <linux/export.h>
7  
8  #define RT_MUTEX_BUILD_MUTEX
9  #define WW_RT
10  #include "rtmutex.c"
11  
ww_mutex_trylock(struct ww_mutex * lock,struct ww_acquire_ctx * ww_ctx)12  int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
13  {
14  	struct rt_mutex *rtm = &lock->base;
15  
16  	if (!ww_ctx)
17  		return rt_mutex_trylock(rtm);
18  
19  	/*
20  	 * Reset the wounded flag after a kill. No other process can
21  	 * race and wound us here, since they can't have a valid owner
22  	 * pointer if we don't have any locks held.
23  	 */
24  	if (ww_ctx->acquired == 0)
25  		ww_ctx->wounded = 0;
26  
27  	if (__rt_mutex_trylock(&rtm->rtmutex)) {
28  		ww_mutex_set_context_fastpath(lock, ww_ctx);
29  		mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
30  		return 1;
31  	}
32  
33  	return 0;
34  }
35  EXPORT_SYMBOL(ww_mutex_trylock);
36  
37  static int __sched
__ww_rt_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ww_ctx,unsigned int state,unsigned long ip)38  __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
39  		   unsigned int state, unsigned long ip)
40  {
41  	struct lockdep_map __maybe_unused *nest_lock = NULL;
42  	struct rt_mutex *rtm = &lock->base;
43  	int ret;
44  
45  	might_sleep();
46  
47  	if (ww_ctx) {
48  		if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
49  			return -EALREADY;
50  
51  		/*
52  		 * Reset the wounded flag after a kill. No other process can
53  		 * race and wound us here, since they can't have a valid owner
54  		 * pointer if we don't have any locks held.
55  		 */
56  		if (ww_ctx->acquired == 0)
57  			ww_ctx->wounded = 0;
58  
59  #ifdef CONFIG_DEBUG_LOCK_ALLOC
60  		nest_lock = &ww_ctx->dep_map;
61  #endif
62  	}
63  	mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
64  
65  	if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
66  		if (ww_ctx)
67  			ww_mutex_set_context_fastpath(lock, ww_ctx);
68  		return 0;
69  	}
70  
71  	ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
72  
73  	if (ret)
74  		mutex_release(&rtm->dep_map, ip);
75  	return ret;
76  }
77  
78  int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)79  ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
80  {
81  	return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
82  }
83  EXPORT_SYMBOL(ww_mutex_lock);
84  
85  int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)86  ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
87  {
88  	return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
89  }
90  EXPORT_SYMBOL(ww_mutex_lock_interruptible);
91  
ww_mutex_unlock(struct ww_mutex * lock)92  void __sched ww_mutex_unlock(struct ww_mutex *lock)
93  {
94  	struct rt_mutex *rtm = &lock->base;
95  
96  	__ww_mutex_unlock(lock);
97  
98  	mutex_release(&rtm->dep_map, _RET_IP_);
99  	__rt_mutex_unlock(&rtm->rtmutex);
100  }
101  EXPORT_SYMBOL(ww_mutex_unlock);
102