xref: /openbmc/linux/kernel/locking/rwbase_rt.c (revision 0ed66cb7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  * RT-specific reader/writer semaphores and reader/writer locks
5  *
6  * down_write/write_lock()
7  *  1) Lock rtmutex
8  *  2) Remove the reader BIAS to force readers into the slow path
9  *  3) Wait until all readers have left the critical section
10  *  4) Mark it write locked
11  *
12  * up_write/write_unlock()
13  *  1) Remove the write locked marker
14  *  2) Set the reader BIAS, so readers can use the fast path again
15  *  3) Unlock rtmutex, to release blocked readers
16  *
17  * down_read/read_lock()
18  *  1) Try fast path acquisition (reader BIAS is set)
19  *  2) Take tmutex::wait_lock, which protects the writelocked flag
20  *  3) If !writelocked, acquire it for read
21  *  4) If writelocked, block on tmutex
22  *  5) unlock rtmutex, goto 1)
23  *
24  * up_read/read_unlock()
25  *  1) Try fast path release (reader count != 1)
26  *  2) Wake the writer waiting in down_write()/write_lock() #3
27  *
28  * down_read/read_lock()#3 has the consequence, that rw semaphores and rw
29  * locks on RT are not writer fair, but writers, which should be avoided in
30  * RT tasks (think mmap_sem), are subject to the rtmutex priority/DL
31  * inheritance mechanism.
32  *
33  * It's possible to make the rw primitives writer fair by keeping a list of
34  * active readers. A blocked writer would force all newly incoming readers
35  * to block on the rtmutex, but the rtmutex would have to be proxy locked
36  * for one reader after the other. We can't use multi-reader inheritance
37  * because there is no way to support that with SCHED_DEADLINE.
38  * Implementing the one by one reader boosting/handover mechanism is a
39  * major surgery for a very dubious value.
40  *
41  * The risk of writer starvation is there, but the pathological use cases
42  * which trigger it are not necessarily the typical RT workloads.
43  *
44  * Common code shared between RT rw_semaphore and rwlock
45  */
46 
47 static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
48 {
49 	int r;
50 
51 	/*
52 	 * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
53 	 * set.
54 	 */
55 	for (r = atomic_read(&rwb->readers); r < 0;) {
56 		if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
57 			return 1;
58 	}
59 	return 0;
60 }
61 
62 static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
63 				      unsigned int state)
64 {
65 	struct rt_mutex_base *rtm = &rwb->rtmutex;
66 	int ret;
67 
68 	raw_spin_lock_irq(&rtm->wait_lock);
69 	/*
70 	 * Allow readers, as long as the writer has not completely
71 	 * acquired the semaphore for write.
72 	 */
73 	if (atomic_read(&rwb->readers) != WRITER_BIAS) {
74 		atomic_inc(&rwb->readers);
75 		raw_spin_unlock_irq(&rtm->wait_lock);
76 		return 0;
77 	}
78 
79 	/*
80 	 * Call into the slow lock path with the rtmutex->wait_lock
81 	 * held, so this can't result in the following race:
82 	 *
83 	 * Reader1		Reader2		Writer
84 	 *			down_read()
85 	 *					down_write()
86 	 *					rtmutex_lock(m)
87 	 *					wait()
88 	 * down_read()
89 	 * unlock(m->wait_lock)
90 	 *			up_read()
91 	 *			wake(Writer)
92 	 *					lock(m->wait_lock)
93 	 *					sem->writelocked=true
94 	 *					unlock(m->wait_lock)
95 	 *
96 	 *					up_write()
97 	 *					sem->writelocked=false
98 	 *					rtmutex_unlock(m)
99 	 *			down_read()
100 	 *					down_write()
101 	 *					rtmutex_lock(m)
102 	 *					wait()
103 	 * rtmutex_lock(m)
104 	 *
105 	 * That would put Reader1 behind the writer waiting on
106 	 * Reader2 to call up_read(), which might be unbound.
107 	 */
108 
109 	/*
110 	 * For rwlocks this returns 0 unconditionally, so the below
111 	 * !ret conditionals are optimized out.
112 	 */
113 	ret = rwbase_rtmutex_slowlock_locked(rtm, state);
114 
115 	/*
116 	 * On success the rtmutex is held, so there can't be a writer
117 	 * active. Increment the reader count and immediately drop the
118 	 * rtmutex again.
119 	 *
120 	 * rtmutex->wait_lock has to be unlocked in any case of course.
121 	 */
122 	if (!ret)
123 		atomic_inc(&rwb->readers);
124 	raw_spin_unlock_irq(&rtm->wait_lock);
125 	if (!ret)
126 		rwbase_rtmutex_unlock(rtm);
127 	return ret;
128 }
129 
130 static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
131 					    unsigned int state)
132 {
133 	if (rwbase_read_trylock(rwb))
134 		return 0;
135 
136 	return __rwbase_read_lock(rwb, state);
137 }
138 
139 static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
140 					 unsigned int state)
141 {
142 	struct rt_mutex_base *rtm = &rwb->rtmutex;
143 	struct task_struct *owner;
144 
145 	raw_spin_lock_irq(&rtm->wait_lock);
146 	/*
147 	 * Wake the writer, i.e. the rtmutex owner. It might release the
148 	 * rtmutex concurrently in the fast path (due to a signal), but to
149 	 * clean up rwb->readers it needs to acquire rtm->wait_lock. The
150 	 * worst case which can happen is a spurious wakeup.
151 	 */
152 	owner = rt_mutex_owner(rtm);
153 	if (owner)
154 		wake_up_state(owner, state);
155 
156 	raw_spin_unlock_irq(&rtm->wait_lock);
157 }
158 
159 static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
160 					       unsigned int state)
161 {
162 	/*
163 	 * rwb->readers can only hit 0 when a writer is waiting for the
164 	 * active readers to leave the critical section.
165 	 */
166 	if (unlikely(atomic_dec_and_test(&rwb->readers)))
167 		__rwbase_read_unlock(rwb, state);
168 }
169 
170 static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
171 					 unsigned long flags)
172 {
173 	struct rt_mutex_base *rtm = &rwb->rtmutex;
174 
175 	atomic_add(READER_BIAS - bias, &rwb->readers);
176 	raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
177 	rwbase_rtmutex_unlock(rtm);
178 }
179 
180 static inline void rwbase_write_unlock(struct rwbase_rt *rwb)
181 {
182 	struct rt_mutex_base *rtm = &rwb->rtmutex;
183 	unsigned long flags;
184 
185 	raw_spin_lock_irqsave(&rtm->wait_lock, flags);
186 	__rwbase_write_unlock(rwb, WRITER_BIAS, flags);
187 }
188 
189 static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
190 {
191 	struct rt_mutex_base *rtm = &rwb->rtmutex;
192 	unsigned long flags;
193 
194 	raw_spin_lock_irqsave(&rtm->wait_lock, flags);
195 	/* Release it and account current as reader */
196 	__rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
197 }
198 
199 static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
200 				     unsigned int state)
201 {
202 	struct rt_mutex_base *rtm = &rwb->rtmutex;
203 	unsigned long flags;
204 
205 	/* Take the rtmutex as a first step */
206 	if (rwbase_rtmutex_lock_state(rtm, state))
207 		return -EINTR;
208 
209 	/* Force readers into slow path */
210 	atomic_sub(READER_BIAS, &rwb->readers);
211 
212 	raw_spin_lock_irqsave(&rtm->wait_lock, flags);
213 	/*
214 	 * set_current_state() for rw_semaphore
215 	 * current_save_and_set_rtlock_wait_state() for rwlock
216 	 */
217 	rwbase_set_and_save_current_state(state);
218 
219 	/* Block until all readers have left the critical section. */
220 	for (; atomic_read(&rwb->readers);) {
221 		/* Optimized out for rwlocks */
222 		if (rwbase_signal_pending_state(state, current)) {
223 			__set_current_state(TASK_RUNNING);
224 			__rwbase_write_unlock(rwb, 0, flags);
225 			return -EINTR;
226 		}
227 		raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
228 
229 		/*
230 		 * Schedule and wait for the readers to leave the critical
231 		 * section. The last reader leaving it wakes the waiter.
232 		 */
233 		if (atomic_read(&rwb->readers) != 0)
234 			rwbase_schedule();
235 		set_current_state(state);
236 		raw_spin_lock_irqsave(&rtm->wait_lock, flags);
237 	}
238 
239 	atomic_set(&rwb->readers, WRITER_BIAS);
240 	rwbase_restore_current_state();
241 	raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
242 	return 0;
243 }
244 
245 static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
246 {
247 	struct rt_mutex_base *rtm = &rwb->rtmutex;
248 	unsigned long flags;
249 
250 	if (!rwbase_rtmutex_trylock(rtm))
251 		return 0;
252 
253 	atomic_sub(READER_BIAS, &rwb->readers);
254 
255 	raw_spin_lock_irqsave(&rtm->wait_lock, flags);
256 	if (!atomic_read(&rwb->readers)) {
257 		atomic_set(&rwb->readers, WRITER_BIAS);
258 		raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
259 		return 1;
260 	}
261 	__rwbase_write_unlock(rwb, 0, flags);
262 	return 0;
263 }
264