xref: /openbmc/qemu/include/block/graph-lock.h (revision 55abfc1f)
1 /*
2  * Graph lock: rwlock to protect block layer graph manipulations (add/remove
3  * edges and nodes)
4  *
5  *  Copyright (c) 2022 Red Hat
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 #ifndef GRAPH_LOCK_H
21 #define GRAPH_LOCK_H
22 
23 #include "qemu/clang-tsa.h"
24 
25 /**
26  * Graph Lock API
27  * This API provides a rwlock used to protect block layer
28  * graph modifications like edge (BdrvChild) and node (BlockDriverState)
29  * addition and removal.
30  * Currently we have 1 writer only, the Main loop, and many
31  * readers, mostly coroutines running in other AioContext thus other threads.
32  *
33  * We distinguish between writer (main loop, under BQL) that modifies the
34  * graph, and readers (all other coroutines running in various AioContext),
35  * that go through the graph edges, reading
36  * BlockDriverState ->parents and->children.
37  *
38  * The writer (main loop)  has an "exclusive" access, so it first waits for
39  * current read to finish, and then prevents incoming ones from
40  * entering while it has the exclusive access.
41  *
42  * The readers (coroutines in multiple AioContext) are free to
43  * access the graph as long the writer is not modifying the graph.
44  * In case it is, they go in a CoQueue and sleep until the writer
45  * is done.
46  *
47  * If a coroutine changes AioContext, the counter in the original and new
48  * AioContext are left intact, since the writer does not care where is the
49  * reader, but only if there is one.
50  * As a result, some AioContexts might have a negative reader count, to
51  * balance the positive count of the AioContext that took the lock.
52  * This also means that when an AioContext is deleted it may have a nonzero
53  * reader count. In that case we transfer the count to a global shared counter
54  * so that the writer is always aware of all readers.
55  */
56 typedef struct BdrvGraphRWlock BdrvGraphRWlock;
57 
58 /* Dummy lock object to use for Thread Safety Analysis (TSA) */
59 typedef struct TSA_CAPABILITY("mutex") BdrvGraphLock {
60 } BdrvGraphLock;
61 
62 extern BdrvGraphLock graph_lock;
63 
64 /*
65  * clang doesn't check consistency in locking annotations between forward
66  * declarations and the function definition. Having the annotation on the
67  * definition, but not the declaration in a header file, may give the reader
68  * a false sense of security because the condition actually remains unchecked
69  * for callers in other source files.
70  *
71  * Therefore, as a convention, for public functions, GRAPH_RDLOCK and
72  * GRAPH_WRLOCK annotations should be present only in the header file.
73  */
74 #define GRAPH_WRLOCK TSA_REQUIRES(graph_lock)
75 #define GRAPH_RDLOCK TSA_REQUIRES_SHARED(graph_lock)
76 #define GRAPH_UNLOCKED TSA_EXCLUDES(graph_lock)
77 
78 /*
79  * TSA annotations are not part of function types, so checks are defeated when
80  * using a function pointer. As a workaround, annotate function pointers with
81  * this macro that will require that the lock is at least taken while reading
82  * the pointer. In most cases this is equivalent to actually protecting the
83  * function call.
84  */
85 #define GRAPH_RDLOCK_PTR TSA_GUARDED_BY(graph_lock)
86 #define GRAPH_WRLOCK_PTR TSA_GUARDED_BY(graph_lock)
87 #define GRAPH_UNLOCKED_PTR
88 
89 /*
90  * register_aiocontext:
91  * Add AioContext @ctx to the list of AioContext.
92  * This list is used to obtain the total number of readers
93  * currently running the graph.
94  */
95 void register_aiocontext(AioContext *ctx);
96 
97 /*
98  * unregister_aiocontext:
99  * Removes AioContext @ctx to the list of AioContext.
100  */
101 void unregister_aiocontext(AioContext *ctx);
102 
103 /*
104  * bdrv_graph_wrlock:
105  * Start an exclusive write operation to modify the graph. This means we are
106  * adding or removing an edge or a node in the block layer graph. Nobody else
107  * is allowed to access the graph.
108  *
109  * Must only be called from outside bdrv_graph_co_rdlock.
110  *
111  * The wrlock can only be taken from the main loop, with BQL held, as only the
112  * main loop is allowed to modify the graph.
113  *
114  * If @bs is non-NULL, its AioContext is temporarily released.
115  *
116  * This function polls. Callers must not hold the lock of any AioContext other
117  * than the current one and the one of @bs.
118  */
119 void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA
120 bdrv_graph_wrlock(BlockDriverState *bs);
121 
122 /*
123  * bdrv_graph_wrunlock:
124  * Write finished, reset global has_writer to 0 and restart
125  * all readers that are waiting.
126  *
127  * If @bs is non-NULL, its AioContext is temporarily released.
128  */
129 void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA
130 bdrv_graph_wrunlock(BlockDriverState *bs);
131 
132 /*
133  * bdrv_graph_wrunlock_ctx:
134  * Write finished, reset global has_writer to 0 and restart
135  * all readers that are waiting.
136  *
137  * If @ctx is non-NULL, its lock is temporarily released.
138  */
139 void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA
140 bdrv_graph_wrunlock_ctx(AioContext *ctx);
141 
142 /*
143  * bdrv_graph_co_rdlock:
144  * Read the bs graph. This usually means traversing all nodes in
145  * the graph, therefore it can't happen while another thread is
146  * modifying it.
147  * Increases the reader counter of the current aiocontext,
148  * and if has_writer is set, it means that the writer is modifying
149  * the graph, therefore wait in a coroutine queue.
150  * The writer will then wake this coroutine once it is done.
151  *
152  * This lock should be taken from Iothreads (IO_CODE() class of functions)
153  * because it signals the writer that there are some
154  * readers currently running, or waits until the current
155  * write is finished before continuing.
156  * Calling this function from the Main Loop with BQL held
157  * is not necessary, since the Main Loop itself is the only
158  * writer, thus won't be able to read and write at the same time.
159  * The only exception to that is when we can't take the lock in the
160  * function/coroutine itself, and need to delegate the caller (usually main
161  * loop) to take it and wait that the coroutine ends, so that
162  * we always signal that a reader is running.
163  */
164 void coroutine_fn TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA
165 bdrv_graph_co_rdlock(void);
166 
167 /*
168  * bdrv_graph_rdunlock:
169  * Read terminated, decrease the count of readers in the current aiocontext.
170  * If the writer is waiting for reads to finish (has_writer == 1), signal
171  * the writer that we are done via aio_wait_kick() to let it continue.
172  */
173 void coroutine_fn TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA
174 bdrv_graph_co_rdunlock(void);
175 
176 /*
177  * bdrv_graph_rd{un}lock_main_loop:
178  * Just a placeholder to mark where the graph rdlock should be taken
179  * in the main loop. It is just asserting that we are not
180  * in a coroutine and in GLOBAL_STATE_CODE.
181  */
182 void TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA
183 bdrv_graph_rdlock_main_loop(void);
184 
185 void TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA
186 bdrv_graph_rdunlock_main_loop(void);
187 
188 /*
189  * assert_bdrv_graph_readable:
190  * Make sure that the reader is either the main loop,
191  * or there is at least a reader helding the rdlock.
192  * In this way an incoming writer is aware of the read and waits.
193  */
194 void GRAPH_RDLOCK assert_bdrv_graph_readable(void);
195 
196 /*
197  * assert_bdrv_graph_writable:
198  * Make sure that the writer is the main loop and has set @has_writer,
199  * so that incoming readers will pause.
200  */
201 void GRAPH_WRLOCK assert_bdrv_graph_writable(void);
202 
203 /*
204  * Calling this function tells TSA that we know that the lock is effectively
205  * taken even though we cannot prove it (yet) with GRAPH_RDLOCK. This can be
206  * useful in intermediate stages of a conversion to using the GRAPH_RDLOCK
207  * macro.
208  */
209 static inline void TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA
210 assume_graph_lock(void)
211 {
212 }
213 
214 typedef struct GraphLockable { } GraphLockable;
215 
216 /*
217  * In C, compound literals have the lifetime of an automatic variable.
218  * In C++ it would be different, but then C++ wouldn't need QemuLockable
219  * either...
220  */
221 #define GML_OBJ_() (&(GraphLockable) { })
222 
223 /*
224  * This is not marked as TSA_ACQUIRE_SHARED() because TSA doesn't understand the
225  * cleanup attribute and would therefore complain that the graph is never
226  * unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that
227  * we hold the lock while unlocking is left unchecked.
228  */
229 static inline GraphLockable * TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA coroutine_fn
230 graph_lockable_auto_lock(GraphLockable *x)
231 {
232     bdrv_graph_co_rdlock();
233     return x;
234 }
235 
236 static inline void TSA_NO_TSA coroutine_fn
237 graph_lockable_auto_unlock(GraphLockable *x)
238 {
239     bdrv_graph_co_rdunlock();
240 }
241 
242 G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockable, graph_lockable_auto_unlock)
243 
244 #define WITH_GRAPH_RDLOCK_GUARD_(var)                                         \
245     for (g_autoptr(GraphLockable) var = graph_lockable_auto_lock(GML_OBJ_()); \
246          var;                                                                 \
247          graph_lockable_auto_unlock(var), var = NULL)
248 
249 #define WITH_GRAPH_RDLOCK_GUARD() \
250     WITH_GRAPH_RDLOCK_GUARD_(glue(graph_lockable_auto, __COUNTER__))
251 
252 #define GRAPH_RDLOCK_GUARD(x)                                       \
253     g_autoptr(GraphLockable)                                        \
254     glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED =          \
255             graph_lockable_auto_lock(GML_OBJ_())
256 
257 
258 typedef struct GraphLockableMainloop { } GraphLockableMainloop;
259 
260 /*
261  * In C, compound literals have the lifetime of an automatic variable.
262  * In C++ it would be different, but then C++ wouldn't need QemuLockable
263  * either...
264  */
265 #define GMLML_OBJ_() (&(GraphLockableMainloop) { })
266 
267 /*
268  * This is not marked as TSA_ACQUIRE_SHARED() because TSA doesn't understand the
269  * cleanup attribute and would therefore complain that the graph is never
270  * unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that
271  * we hold the lock while unlocking is left unchecked.
272  */
273 static inline GraphLockableMainloop * TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA
274 graph_lockable_auto_lock_mainloop(GraphLockableMainloop *x)
275 {
276     bdrv_graph_rdlock_main_loop();
277     return x;
278 }
279 
280 static inline void TSA_NO_TSA
281 graph_lockable_auto_unlock_mainloop(GraphLockableMainloop *x)
282 {
283     bdrv_graph_rdunlock_main_loop();
284 }
285 
286 G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockableMainloop,
287                               graph_lockable_auto_unlock_mainloop)
288 
289 #define GRAPH_RDLOCK_GUARD_MAINLOOP(x)                              \
290     g_autoptr(GraphLockableMainloop)                                \
291     glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED =          \
292             graph_lockable_auto_lock_mainloop(GMLML_OBJ_())
293 
294 #endif /* GRAPH_LOCK_H */
295 
296