xref: /openbmc/linux/include/linux/rwsem.h (revision 54da6a09)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* rwsem.h: R/W semaphores, public interface
3  *
4  * Written by David Howells (dhowells@redhat.com).
5  * Derived from asm-i386/semaphore.h
6  */
7 
8 #ifndef _LINUX_RWSEM_H
9 #define _LINUX_RWSEM_H
10 
11 #include <linux/linkage.h>
12 
13 #include <linux/types.h>
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/atomic.h>
17 #include <linux/err.h>
18 #include <linux/cleanup.h>
19 
20 #ifdef CONFIG_DEBUG_LOCK_ALLOC
21 # define __RWSEM_DEP_MAP_INIT(lockname)			\
22 	.dep_map = {					\
23 		.name = #lockname,			\
24 		.wait_type_inner = LD_WAIT_SLEEP,	\
25 	},
26 #else
27 # define __RWSEM_DEP_MAP_INIT(lockname)
28 #endif
29 
30 #ifndef CONFIG_PREEMPT_RT
31 
32 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
33 #include <linux/osq_lock.h>
34 #endif
35 
36 /*
37  * For an uncontended rwsem, count and owner are the only fields a task
38  * needs to touch when acquiring the rwsem. So they are put next to each
39  * other to increase the chance that they will share the same cacheline.
40  *
41  * In a contended rwsem, the owner is likely the most frequently accessed
42  * field in the structure as the optimistic waiter that holds the osq lock
43  * will spin on owner. For an embedded rwsem, other hot fields in the
44  * containing structure should be moved further away from the rwsem to
45  * reduce the chance that they will share the same cacheline causing
46  * cacheline bouncing problem.
47  */
48 struct rw_semaphore {
49 	atomic_long_t count;
50 	/*
51 	 * Write owner or one of the read owners as well flags regarding
52 	 * the current state of the rwsem. Can be used as a speculative
53 	 * check to see if the write owner is running on the cpu.
54 	 */
55 	atomic_long_t owner;
56 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
57 	struct optimistic_spin_queue osq; /* spinner MCS lock */
58 #endif
59 	raw_spinlock_t wait_lock;
60 	struct list_head wait_list;
61 #ifdef CONFIG_DEBUG_RWSEMS
62 	void *magic;
63 #endif
64 #ifdef CONFIG_DEBUG_LOCK_ALLOC
65 	struct lockdep_map	dep_map;
66 #endif
67 };
68 
69 /* In all implementations count != 0 means locked */
rwsem_is_locked(struct rw_semaphore * sem)70 static inline int rwsem_is_locked(struct rw_semaphore *sem)
71 {
72 	return atomic_long_read(&sem->count) != 0;
73 }
74 
75 #define RWSEM_UNLOCKED_VALUE		0L
76 #define __RWSEM_COUNT_INIT(name)	.count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
77 
78 /* Common initializer macros and functions */
79 
80 #ifdef CONFIG_DEBUG_RWSEMS
81 # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
82 #else
83 # define __RWSEM_DEBUG_INIT(lockname)
84 #endif
85 
86 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
87 #define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
88 #else
89 #define __RWSEM_OPT_INIT(lockname)
90 #endif
91 
92 #define __RWSEM_INITIALIZER(name)				\
93 	{ __RWSEM_COUNT_INIT(name),				\
94 	  .owner = ATOMIC_LONG_INIT(0),				\
95 	  __RWSEM_OPT_INIT(name)				\
96 	  .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
97 	  .wait_list = LIST_HEAD_INIT((name).wait_list),	\
98 	  __RWSEM_DEBUG_INIT(name)				\
99 	  __RWSEM_DEP_MAP_INIT(name) }
100 
101 #define DECLARE_RWSEM(name) \
102 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
103 
104 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
105 			 struct lock_class_key *key);
106 
107 #define init_rwsem(sem)						\
108 do {								\
109 	static struct lock_class_key __key;			\
110 								\
111 	__init_rwsem((sem), #sem, &__key);			\
112 } while (0)
113 
114 /*
115  * This is the same regardless of which rwsem implementation that is being used.
116  * It is just a heuristic meant to be called by somebody already holding the
117  * rwsem to see if somebody from an incompatible type is wanting access to the
118  * lock.
119  */
rwsem_is_contended(struct rw_semaphore * sem)120 static inline int rwsem_is_contended(struct rw_semaphore *sem)
121 {
122 	return !list_empty(&sem->wait_list);
123 }
124 
125 #else /* !CONFIG_PREEMPT_RT */
126 
127 #include <linux/rwbase_rt.h>
128 
129 struct rw_semaphore {
130 	struct rwbase_rt	rwbase;
131 #ifdef CONFIG_DEBUG_LOCK_ALLOC
132 	struct lockdep_map	dep_map;
133 #endif
134 };
135 
136 #define __RWSEM_INITIALIZER(name)				\
137 	{							\
138 		.rwbase = __RWBASE_INITIALIZER(name),		\
139 		__RWSEM_DEP_MAP_INIT(name)			\
140 	}
141 
142 #define DECLARE_RWSEM(lockname) \
143 	struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
144 
145 extern void  __init_rwsem(struct rw_semaphore *rwsem, const char *name,
146 			  struct lock_class_key *key);
147 
148 #define init_rwsem(sem)						\
149 do {								\
150 	static struct lock_class_key __key;			\
151 								\
152 	__init_rwsem((sem), #sem, &__key);			\
153 } while (0)
154 
rwsem_is_locked(struct rw_semaphore * sem)155 static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
156 {
157 	return rw_base_is_locked(&sem->rwbase);
158 }
159 
rwsem_is_contended(struct rw_semaphore * sem)160 static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
161 {
162 	return rw_base_is_contended(&sem->rwbase);
163 }
164 
165 #endif /* CONFIG_PREEMPT_RT */
166 
167 /*
168  * The functions below are the same for all rwsem implementations including
169  * the RT specific variant.
170  */
171 
172 /*
173  * lock for reading
174  */
175 extern void down_read(struct rw_semaphore *sem);
176 extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
177 extern int __must_check down_read_killable(struct rw_semaphore *sem);
178 
179 /*
180  * trylock for reading -- returns 1 if successful, 0 if contention
181  */
182 extern int down_read_trylock(struct rw_semaphore *sem);
183 
184 /*
185  * lock for writing
186  */
187 extern void down_write(struct rw_semaphore *sem);
188 extern int __must_check down_write_killable(struct rw_semaphore *sem);
189 
190 /*
191  * trylock for writing -- returns 1 if successful, 0 if contention
192  */
193 extern int down_write_trylock(struct rw_semaphore *sem);
194 
195 /*
196  * release a read lock
197  */
198 extern void up_read(struct rw_semaphore *sem);
199 
200 /*
201  * release a write lock
202  */
203 extern void up_write(struct rw_semaphore *sem);
204 
205 DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
206 DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
207 
208 DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
209 DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
210 
211 
212 /*
213  * downgrade write lock to read lock
214  */
215 extern void downgrade_write(struct rw_semaphore *sem);
216 
217 #ifdef CONFIG_DEBUG_LOCK_ALLOC
218 /*
219  * nested locking. NOTE: rwsems are not allowed to recurse
220  * (which occurs if the same task tries to acquire the same
221  * lock instance multiple times), but multiple locks of the
222  * same lock class might be taken, if the order of the locks
223  * is always the same. This ordering rule can be expressed
224  * to lockdep via the _nested() APIs, but enumerating the
225  * subclasses that are used. (If the nesting relationship is
226  * static then another method for expressing nested locking is
227  * the explicit definition of lock class keys and the use of
228  * lockdep_set_class() at lock initialization time.
229  * See Documentation/locking/lockdep-design.rst for more details.)
230  */
231 extern void down_read_nested(struct rw_semaphore *sem, int subclass);
232 extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
233 extern void down_write_nested(struct rw_semaphore *sem, int subclass);
234 extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
235 extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
236 
237 # define down_write_nest_lock(sem, nest_lock)			\
238 do {								\
239 	typecheck(struct lockdep_map *, &(nest_lock)->dep_map);	\
240 	_down_write_nest_lock(sem, &(nest_lock)->dep_map);	\
241 } while (0)
242 
243 /*
244  * Take/release a lock when not the owner will release it.
245  *
246  * [ This API should be avoided as much as possible - the
247  *   proper abstraction for this case is completions. ]
248  */
249 extern void down_read_non_owner(struct rw_semaphore *sem);
250 extern void up_read_non_owner(struct rw_semaphore *sem);
251 #else
252 # define down_read_nested(sem, subclass)		down_read(sem)
253 # define down_read_killable_nested(sem, subclass)	down_read_killable(sem)
254 # define down_write_nest_lock(sem, nest_lock)	down_write(sem)
255 # define down_write_nested(sem, subclass)	down_write(sem)
256 # define down_write_killable_nested(sem, subclass)	down_write_killable(sem)
257 # define down_read_non_owner(sem)		down_read(sem)
258 # define up_read_non_owner(sem)			up_read(sem)
259 #endif
260 
261 #endif /* _LINUX_RWSEM_H */
262