xref: /openbmc/qemu/include/block/graph-lock.h (revision 587d82fa)
1 /*
2  * Graph lock: rwlock to protect block layer graph manipulations (add/remove
3  * edges and nodes)
4  *
5  *  Copyright (c) 2022 Red Hat
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 #ifndef GRAPH_LOCK_H
21 #define GRAPH_LOCK_H
22 
23 #include "qemu/osdep.h"
24 
25 #include "qemu/coroutine.h"
26 
27 /**
28  * Graph Lock API
29  * This API provides a rwlock used to protect block layer
30  * graph modifications like edge (BdrvChild) and node (BlockDriverState)
31  * addition and removal.
32  * Currently we have 1 writer only, the Main loop, and many
33  * readers, mostly coroutines running in other AioContext thus other threads.
34  *
35  * We distinguish between writer (main loop, under BQL) that modifies the
36  * graph, and readers (all other coroutines running in various AioContext),
37  * that go through the graph edges, reading
38  * BlockDriverState ->parents and->children.
39  *
40  * The writer (main loop)  has an "exclusive" access, so it first waits for
41  * current read to finish, and then prevents incoming ones from
42  * entering while it has the exclusive access.
43  *
44  * The readers (coroutines in multiple AioContext) are free to
45  * access the graph as long the writer is not modifying the graph.
46  * In case it is, they go in a CoQueue and sleep until the writer
47  * is done.
48  *
49  * If a coroutine changes AioContext, the counter in the original and new
50  * AioContext are left intact, since the writer does not care where is the
51  * reader, but only if there is one.
52  * As a result, some AioContexts might have a negative reader count, to
53  * balance the positive count of the AioContext that took the lock.
54  * This also means that when an AioContext is deleted it may have a nonzero
55  * reader count. In that case we transfer the count to a global shared counter
56  * so that the writer is always aware of all readers.
57  */
58 typedef struct BdrvGraphRWlock BdrvGraphRWlock;
59 
60 /*
61  * register_aiocontext:
62  * Add AioContext @ctx to the list of AioContext.
63  * This list is used to obtain the total number of readers
64  * currently running the graph.
65  */
66 void register_aiocontext(AioContext *ctx);
67 
68 /*
69  * unregister_aiocontext:
70  * Removes AioContext @ctx to the list of AioContext.
71  */
72 void unregister_aiocontext(AioContext *ctx);
73 
74 /*
75  * bdrv_graph_wrlock:
76  * Start an exclusive write operation to modify the graph. This means we are
77  * adding or removing an edge or a node in the block layer graph. Nobody else
78  * is allowed to access the graph.
79  *
80  * Must only be called from outside bdrv_graph_co_rdlock.
81  *
82  * The wrlock can only be taken from the main loop, with BQL held, as only the
83  * main loop is allowed to modify the graph.
84  *
85  * This function polls. Callers must not hold the lock of any AioContext other
86  * than the current one.
87  */
88 void bdrv_graph_wrlock(void);
89 
90 /*
91  * bdrv_graph_wrunlock:
92  * Write finished, reset global has_writer to 0 and restart
93  * all readers that are waiting.
94  */
95 void bdrv_graph_wrunlock(void);
96 
97 /*
98  * bdrv_graph_co_rdlock:
99  * Read the bs graph. This usually means traversing all nodes in
100  * the graph, therefore it can't happen while another thread is
101  * modifying it.
102  * Increases the reader counter of the current aiocontext,
103  * and if has_writer is set, it means that the writer is modifying
104  * the graph, therefore wait in a coroutine queue.
105  * The writer will then wake this coroutine once it is done.
106  *
107  * This lock should be taken from Iothreads (IO_CODE() class of functions)
108  * because it signals the writer that there are some
109  * readers currently running, or waits until the current
110  * write is finished before continuing.
111  * Calling this function from the Main Loop with BQL held
112  * is not necessary, since the Main Loop itself is the only
113  * writer, thus won't be able to read and write at the same time.
114  * The only exception to that is when we can't take the lock in the
115  * function/coroutine itself, and need to delegate the caller (usually main
116  * loop) to take it and wait that the coroutine ends, so that
117  * we always signal that a reader is running.
118  */
119 void coroutine_fn bdrv_graph_co_rdlock(void);
120 
121 /*
122  * bdrv_graph_rdunlock:
123  * Read terminated, decrease the count of readers in the current aiocontext.
124  * If the writer is waiting for reads to finish (has_writer == 1), signal
125  * the writer that we are done via aio_wait_kick() to let it continue.
126  */
127 void coroutine_fn bdrv_graph_co_rdunlock(void);
128 
129 /*
130  * bdrv_graph_rd{un}lock_main_loop:
131  * Just a placeholder to mark where the graph rdlock should be taken
132  * in the main loop. It is just asserting that we are not
133  * in a coroutine and in GLOBAL_STATE_CODE.
134  */
135 void bdrv_graph_rdlock_main_loop(void);
136 void bdrv_graph_rdunlock_main_loop(void);
137 
138 typedef struct GraphLockable { } GraphLockable;
139 
140 /*
141  * In C, compound literals have the lifetime of an automatic variable.
142  * In C++ it would be different, but then C++ wouldn't need QemuLockable
143  * either...
144  */
145 #define GML_OBJ_() (&(GraphLockable) { })
146 
147 static inline GraphLockable *graph_lockable_auto_lock(GraphLockable *x)
148 {
149     bdrv_graph_co_rdlock();
150     return x;
151 }
152 
153 static inline void graph_lockable_auto_unlock(GraphLockable *x)
154 {
155     bdrv_graph_co_rdunlock();
156 }
157 
158 G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockable, graph_lockable_auto_unlock)
159 
160 #define WITH_GRAPH_RDLOCK_GUARD_(var)                                         \
161     for (g_autoptr(GraphLockable) var = graph_lockable_auto_lock(GML_OBJ_()); \
162          var;                                                                 \
163          graph_lockable_auto_unlock(var), var = NULL)
164 
165 #define WITH_GRAPH_RDLOCK_GUARD() \
166     WITH_GRAPH_RDLOCK_GUARD_(glue(graph_lockable_auto, __COUNTER__))
167 
168 #define GRAPH_RDLOCK_GUARD(x)                                       \
169     g_autoptr(GraphLockable)                                        \
170     glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED =          \
171             graph_lockable_auto_lock(GML_OBJ_())
172 
173 
174 typedef struct GraphLockableMainloop { } GraphLockableMainloop;
175 
176 /*
177  * In C, compound literals have the lifetime of an automatic variable.
178  * In C++ it would be different, but then C++ wouldn't need QemuLockable
179  * either...
180  */
181 #define GMLML_OBJ_() (&(GraphLockableMainloop) { })
182 
183 static inline GraphLockableMainloop *
184 graph_lockable_auto_lock_mainloop(GraphLockableMainloop *x)
185 {
186     bdrv_graph_rdlock_main_loop();
187     return x;
188 }
189 
190 static inline void
191 graph_lockable_auto_unlock_mainloop(GraphLockableMainloop *x)
192 {
193     bdrv_graph_rdunlock_main_loop();
194 }
195 
196 G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockableMainloop,
197                               graph_lockable_auto_unlock_mainloop)
198 
199 #define GRAPH_RDLOCK_GUARD_MAINLOOP(x)                              \
200     g_autoptr(GraphLockableMainloop)                                \
201     glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED =          \
202             graph_lockable_auto_lock_mainloop(GMLML_OBJ_())
203 
204 #endif /* GRAPH_LOCK_H */
205 
206