1 /* 2 * Graph lock: rwlock to protect block layer graph manipulations (add/remove 3 * edges and nodes) 4 * 5 * Copyright (c) 2022 Red Hat 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #ifndef GRAPH_LOCK_H 21 #define GRAPH_LOCK_H 22 23 /** 24 * Graph Lock API 25 * This API provides a rwlock used to protect block layer 26 * graph modifications like edge (BdrvChild) and node (BlockDriverState) 27 * addition and removal. 28 * Currently we have 1 writer only, the Main loop, and many 29 * readers, mostly coroutines running in other AioContext thus other threads. 30 * 31 * We distinguish between writer (main loop, under BQL) that modifies the 32 * graph, and readers (all other coroutines running in various AioContext), 33 * that go through the graph edges, reading 34 * BlockDriverState ->parents and->children. 35 * 36 * The writer (main loop) has an "exclusive" access, so it first waits for 37 * current read to finish, and then prevents incoming ones from 38 * entering while it has the exclusive access. 39 * 40 * The readers (coroutines in multiple AioContext) are free to 41 * access the graph as long the writer is not modifying the graph. 42 * In case it is, they go in a CoQueue and sleep until the writer 43 * is done. 44 * 45 * If a coroutine changes AioContext, the counter in the original and new 46 * AioContext are left intact, since the writer does not care where is the 47 * reader, but only if there is one. 48 * As a result, some AioContexts might have a negative reader count, to 49 * balance the positive count of the AioContext that took the lock. 50 * This also means that when an AioContext is deleted it may have a nonzero 51 * reader count. In that case we transfer the count to a global shared counter 52 * so that the writer is always aware of all readers. 53 */ 54 typedef struct BdrvGraphRWlock BdrvGraphRWlock; 55 56 /* Dummy lock object to use for Thread Safety Analysis (TSA) */ 57 typedef struct TSA_CAPABILITY("mutex") BdrvGraphLock { 58 } BdrvGraphLock; 59 60 extern BdrvGraphLock graph_lock; 61 62 /* 63 * clang doesn't check consistency in locking annotations between forward 64 * declarations and the function definition. Having the annotation on the 65 * definition, but not the declaration in a header file, may give the reader 66 * a false sense of security because the condition actually remains unchecked 67 * for callers in other source files. 68 * 69 * Therefore, as a convention, for public functions, GRAPH_RDLOCK and 70 * GRAPH_WRLOCK annotations should be present only in the header file. 71 */ 72 #define GRAPH_WRLOCK TSA_REQUIRES(graph_lock) 73 #define GRAPH_RDLOCK TSA_REQUIRES_SHARED(graph_lock) 74 #define GRAPH_UNLOCKED TSA_EXCLUDES(graph_lock) 75 76 /* 77 * TSA annotations are not part of function types, so checks are defeated when 78 * using a function pointer. As a workaround, annotate function pointers with 79 * this macro that will require that the lock is at least taken while reading 80 * the pointer. In most cases this is equivalent to actually protecting the 81 * function call. 82 */ 83 #define GRAPH_RDLOCK_PTR TSA_GUARDED_BY(graph_lock) 84 #define GRAPH_WRLOCK_PTR TSA_GUARDED_BY(graph_lock) 85 #define GRAPH_UNLOCKED_PTR 86 87 /* 88 * register_aiocontext: 89 * Add AioContext @ctx to the list of AioContext. 90 * This list is used to obtain the total number of readers 91 * currently running the graph. 92 */ 93 void register_aiocontext(AioContext *ctx); 94 95 /* 96 * unregister_aiocontext: 97 * Removes AioContext @ctx to the list of AioContext. 98 */ 99 void unregister_aiocontext(AioContext *ctx); 100 101 /* 102 * bdrv_graph_wrlock: 103 * Start an exclusive write operation to modify the graph. This means we are 104 * adding or removing an edge or a node in the block layer graph. Nobody else 105 * is allowed to access the graph. 106 * 107 * Must only be called from outside bdrv_graph_co_rdlock. 108 * 109 * The wrlock can only be taken from the main loop, with BQL held, as only the 110 * main loop is allowed to modify the graph. 111 */ 112 void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA 113 bdrv_graph_wrlock(void); 114 115 /* 116 * bdrv_graph_wrlock_drained: 117 * Similar to bdrv_graph_wrlock, but will begin a drained section before 118 * locking. 119 */ 120 void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA 121 bdrv_graph_wrlock_drained(void); 122 123 /* 124 * bdrv_graph_wrunlock: 125 * Write finished, reset global has_writer to 0 and restart 126 * all readers that are waiting. 127 * 128 * Also ends the drained section if bdrv_graph_wrlock_drained() was used to lock 129 * the graph. 130 */ 131 void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA 132 bdrv_graph_wrunlock(void); 133 134 /* 135 * bdrv_graph_co_rdlock: 136 * Read the bs graph. This usually means traversing all nodes in 137 * the graph, therefore it can't happen while another thread is 138 * modifying it. 139 * Increases the reader counter of the current aiocontext, 140 * and if has_writer is set, it means that the writer is modifying 141 * the graph, therefore wait in a coroutine queue. 142 * The writer will then wake this coroutine once it is done. 143 * 144 * This lock should be taken from Iothreads (IO_CODE() class of functions) 145 * because it signals the writer that there are some 146 * readers currently running, or waits until the current 147 * write is finished before continuing. 148 * Calling this function from the Main Loop with BQL held 149 * is not necessary, since the Main Loop itself is the only 150 * writer, thus won't be able to read and write at the same time. 151 * The only exception to that is when we can't take the lock in the 152 * function/coroutine itself, and need to delegate the caller (usually main 153 * loop) to take it and wait that the coroutine ends, so that 154 * we always signal that a reader is running. 155 */ 156 void coroutine_fn TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA 157 bdrv_graph_co_rdlock(void); 158 159 /* 160 * bdrv_graph_rdunlock: 161 * Read terminated, decrease the count of readers in the current aiocontext. 162 * If the writer is waiting for reads to finish (has_writer == 1), signal 163 * the writer that we are done via aio_wait_kick() to let it continue. 164 */ 165 void coroutine_fn TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA 166 bdrv_graph_co_rdunlock(void); 167 168 /* 169 * bdrv_graph_rd{un}lock_main_loop: 170 * Just a placeholder to mark where the graph rdlock should be taken 171 * in the main loop. It is just asserting that we are not 172 * in a coroutine and in GLOBAL_STATE_CODE. 173 */ 174 void TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA 175 bdrv_graph_rdlock_main_loop(void); 176 177 void TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA 178 bdrv_graph_rdunlock_main_loop(void); 179 180 /* 181 * assert_bdrv_graph_readable: 182 * Make sure that the reader is either the main loop, 183 * or there is at least a reader helding the rdlock. 184 * In this way an incoming writer is aware of the read and waits. 185 */ 186 void GRAPH_RDLOCK assert_bdrv_graph_readable(void); 187 188 /* 189 * assert_bdrv_graph_writable: 190 * Make sure that the writer is the main loop and has set @has_writer, 191 * so that incoming readers will pause. 192 */ 193 void GRAPH_WRLOCK assert_bdrv_graph_writable(void); 194 195 /* 196 * Calling this function tells TSA that we know that the lock is effectively 197 * taken even though we cannot prove it (yet) with GRAPH_RDLOCK. This can be 198 * useful in intermediate stages of a conversion to using the GRAPH_RDLOCK 199 * macro. 200 */ 201 static inline void TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA 202 assume_graph_lock(void) 203 { 204 } 205 206 typedef struct GraphLockable { } GraphLockable; 207 208 /* 209 * In C, compound literals have the lifetime of an automatic variable. 210 * In C++ it would be different, but then C++ wouldn't need QemuLockable 211 * either... 212 */ 213 #define GML_OBJ_() (&(GraphLockable) { }) 214 215 /* 216 * This is not marked as TSA_ACQUIRE_SHARED() because TSA doesn't understand the 217 * cleanup attribute and would therefore complain that the graph is never 218 * unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that 219 * we hold the lock while unlocking is left unchecked. 220 */ 221 static inline GraphLockable * TSA_ACQUIRE_SHARED(graph_lock) coroutine_fn 222 graph_lockable_auto_lock(GraphLockable *x) 223 { 224 bdrv_graph_co_rdlock(); 225 return x; 226 } 227 228 static inline void TSA_RELEASE_SHARED(graph_lock) coroutine_fn 229 graph_lockable_auto_unlock(GraphLockable **x) 230 { 231 bdrv_graph_co_rdunlock(); 232 } 233 234 #define GRAPH_AUTO_UNLOCK __attribute__((cleanup(graph_lockable_auto_unlock))) 235 236 /* 237 * @var is only used to break the loop after the first iteration. 238 * @unlock_var can't be unlocked and then set to NULL because TSA wants the lock 239 * to be held at the start of every iteration of the loop. 240 */ 241 #define WITH_GRAPH_RDLOCK_GUARD_(var) \ 242 for (GraphLockable *unlock_var GRAPH_AUTO_UNLOCK = \ 243 graph_lockable_auto_lock(GML_OBJ_()), \ 244 *var = unlock_var; \ 245 var; \ 246 var = NULL) 247 248 #define WITH_GRAPH_RDLOCK_GUARD() \ 249 WITH_GRAPH_RDLOCK_GUARD_(glue(graph_lockable_auto, __COUNTER__)) 250 251 #define GRAPH_RDLOCK_GUARD(x) \ 252 GraphLockable * GRAPH_AUTO_UNLOCK \ 253 glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \ 254 graph_lockable_auto_lock(GML_OBJ_()) 255 256 257 typedef struct GraphLockableMainloop { } GraphLockableMainloop; 258 259 /* 260 * In C, compound literals have the lifetime of an automatic variable. 261 * In C++ it would be different, but then C++ wouldn't need QemuLockable 262 * either... 263 */ 264 #define GMLML_OBJ_() (&(GraphLockableMainloop) { }) 265 266 /* 267 * This is not marked as TSA_ACQUIRE_SHARED() because TSA doesn't understand the 268 * cleanup attribute and would therefore complain that the graph is never 269 * unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that 270 * we hold the lock while unlocking is left unchecked. 271 */ 272 static inline GraphLockableMainloop * TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA 273 graph_lockable_auto_lock_mainloop(GraphLockableMainloop *x) 274 { 275 bdrv_graph_rdlock_main_loop(); 276 return x; 277 } 278 279 static inline void TSA_NO_TSA 280 graph_lockable_auto_unlock_mainloop(GraphLockableMainloop *x) 281 { 282 bdrv_graph_rdunlock_main_loop(); 283 } 284 285 G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockableMainloop, 286 graph_lockable_auto_unlock_mainloop) 287 288 #define GRAPH_RDLOCK_GUARD_MAINLOOP(x) \ 289 g_autoptr(GraphLockableMainloop) \ 290 glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \ 291 graph_lockable_auto_lock_mainloop(GMLML_OBJ_()) 292 293 #endif /* GRAPH_LOCK_H */ 294 295