xref: /openbmc/qemu/block/throttle-groups.c (revision db7a99cd)
1 /*
2  * QEMU block throttling group infrastructure
3  *
4  * Copyright (C) Nodalink, EURL. 2014
5  * Copyright (C) Igalia, S.L. 2015
6  *
7  * Authors:
8  *   Benoît Canet <benoit.canet@nodalink.com>
9  *   Alberto Garcia <berto@igalia.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation; either version 2 or
14  * (at your option) version 3 of the License.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, see <http://www.gnu.org/licenses/>.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "sysemu/block-backend.h"
27 #include "block/throttle-groups.h"
28 #include "qemu/queue.h"
29 #include "qemu/thread.h"
30 #include "sysemu/qtest.h"
31 
32 /* The ThrottleGroup structure (with its ThrottleState) is shared
33  * among different BlockBackends and it's independent from
34  * AioContext, so in order to use it from different threads it needs
35  * its own locking.
36  *
37  * This locking is however handled internally in this file, so it's
38  * transparent to outside users.
39  *
40  * The whole ThrottleGroup structure is private and invisible to
41  * outside users, that only use it through its ThrottleState.
42  *
43  * In addition to the ThrottleGroup structure, BlockBackendPublic has
44  * fields that need to be accessed by other members of the group and
45  * therefore also need to be protected by this lock. Once a
46  * BlockBackend is registered in a group those fields can be accessed
47  * by other threads any time.
48  *
49  * Again, all this is handled internally and is mostly transparent to
50  * the outside. The 'throttle_timers' field however has an additional
51  * constraint because it may be temporarily invalid (see for example
52  * bdrv_set_aio_context()). Therefore in this file a thread will
53  * access some other BlockBackend's timers only after verifying that
54  * that BlockBackend has throttled requests in the queue.
55  */
56 typedef struct ThrottleGroup {
57     char *name; /* This is constant during the lifetime of the group */
58 
59     QemuMutex lock; /* This lock protects the following four fields */
60     ThrottleState ts;
61     QLIST_HEAD(, BlockBackendPublic) head;
62     BlockBackend *tokens[2];
63     bool any_timer_armed[2];
64 
65     /* These two are protected by the global throttle_groups_lock */
66     unsigned refcount;
67     QTAILQ_ENTRY(ThrottleGroup) list;
68 } ThrottleGroup;
69 
70 static QemuMutex throttle_groups_lock;
71 static QTAILQ_HEAD(, ThrottleGroup) throttle_groups =
72     QTAILQ_HEAD_INITIALIZER(throttle_groups);
73 
74 /* Increments the reference count of a ThrottleGroup given its name.
75  *
76  * If no ThrottleGroup is found with the given name a new one is
77  * created.
78  *
79  * @name: the name of the ThrottleGroup
80  * @ret:  the ThrottleState member of the ThrottleGroup
81  */
82 ThrottleState *throttle_group_incref(const char *name)
83 {
84     ThrottleGroup *tg = NULL;
85     ThrottleGroup *iter;
86 
87     qemu_mutex_lock(&throttle_groups_lock);
88 
89     /* Look for an existing group with that name */
90     QTAILQ_FOREACH(iter, &throttle_groups, list) {
91         if (!strcmp(name, iter->name)) {
92             tg = iter;
93             break;
94         }
95     }
96 
97     /* Create a new one if not found */
98     if (!tg) {
99         tg = g_new0(ThrottleGroup, 1);
100         tg->name = g_strdup(name);
101         qemu_mutex_init(&tg->lock);
102         throttle_init(&tg->ts);
103         QLIST_INIT(&tg->head);
104 
105         QTAILQ_INSERT_TAIL(&throttle_groups, tg, list);
106     }
107 
108     tg->refcount++;
109 
110     qemu_mutex_unlock(&throttle_groups_lock);
111 
112     return &tg->ts;
113 }
114 
115 /* Decrease the reference count of a ThrottleGroup.
116  *
117  * When the reference count reaches zero the ThrottleGroup is
118  * destroyed.
119  *
120  * @ts:  The ThrottleGroup to unref, given by its ThrottleState member
121  */
122 void throttle_group_unref(ThrottleState *ts)
123 {
124     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
125 
126     qemu_mutex_lock(&throttle_groups_lock);
127     if (--tg->refcount == 0) {
128         QTAILQ_REMOVE(&throttle_groups, tg, list);
129         qemu_mutex_destroy(&tg->lock);
130         g_free(tg->name);
131         g_free(tg);
132     }
133     qemu_mutex_unlock(&throttle_groups_lock);
134 }
135 
136 /* Get the name from a BlockBackend's ThrottleGroup. The name (and the pointer)
137  * is guaranteed to remain constant during the lifetime of the group.
138  *
139  * @blk:  a BlockBackend that is member of a throttling group
140  * @ret:  the name of the group.
141  */
142 const char *throttle_group_get_name(BlockBackend *blk)
143 {
144     BlockBackendPublic *blkp = blk_get_public(blk);
145     ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
146     return tg->name;
147 }
148 
149 /* Return the next BlockBackend in the round-robin sequence, simulating a
150  * circular list.
151  *
152  * This assumes that tg->lock is held.
153  *
154  * @blk: the current BlockBackend
155  * @ret: the next BlockBackend in the sequence
156  */
157 static BlockBackend *throttle_group_next_blk(BlockBackend *blk)
158 {
159     BlockBackendPublic *blkp = blk_get_public(blk);
160     ThrottleState *ts = blkp->throttle_state;
161     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
162     BlockBackendPublic *next = QLIST_NEXT(blkp, round_robin);
163 
164     if (!next) {
165         next = QLIST_FIRST(&tg->head);
166     }
167 
168     return blk_by_public(next);
169 }
170 
171 /*
172  * Return whether a BlockBackend has pending requests.
173  *
174  * This assumes that tg->lock is held.
175  *
176  * @blk: the BlockBackend
177  * @is_write:  the type of operation (read/write)
178  * @ret:       whether the BlockBackend has pending requests.
179  */
180 static inline bool blk_has_pending_reqs(BlockBackend *blk,
181                                         bool is_write)
182 {
183     const BlockBackendPublic *blkp = blk_get_public(blk);
184     return blkp->pending_reqs[is_write];
185 }
186 
187 /* Return the next BlockBackend in the round-robin sequence with pending I/O
188  * requests.
189  *
190  * This assumes that tg->lock is held.
191  *
192  * @blk:       the current BlockBackend
193  * @is_write:  the type of operation (read/write)
194  * @ret:       the next BlockBackend with pending requests, or blk if there is
195  *             none.
196  */
197 static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write)
198 {
199     BlockBackendPublic *blkp = blk_get_public(blk);
200     ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
201     BlockBackend *token, *start;
202 
203     start = token = tg->tokens[is_write];
204 
205     /* get next bs round in round robin style */
206     token = throttle_group_next_blk(token);
207     while (token != start && !blk_has_pending_reqs(token, is_write)) {
208         token = throttle_group_next_blk(token);
209     }
210 
211     /* If no IO are queued for scheduling on the next round robin token
212      * then decide the token is the current bs because chances are
213      * the current bs get the current request queued.
214      */
215     if (token == start && !blk_has_pending_reqs(token, is_write)) {
216         token = blk;
217     }
218 
219     /* Either we return the original BB, or one with pending requests */
220     assert(token == blk || blk_has_pending_reqs(token, is_write));
221 
222     return token;
223 }
224 
225 /* Check if the next I/O request for a BlockBackend needs to be throttled or
226  * not. If there's no timer set in this group, set one and update the token
227  * accordingly.
228  *
229  * This assumes that tg->lock is held.
230  *
231  * @blk:        the current BlockBackend
232  * @is_write:   the type of operation (read/write)
233  * @ret:        whether the I/O request needs to be throttled or not
234  */
235 static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
236 {
237     BlockBackendPublic *blkp = blk_get_public(blk);
238     ThrottleState *ts = blkp->throttle_state;
239     ThrottleTimers *tt = &blkp->throttle_timers;
240     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
241     bool must_wait;
242 
243     if (atomic_read(&blkp->io_limits_disabled)) {
244         return false;
245     }
246 
247     /* Check if any of the timers in this group is already armed */
248     if (tg->any_timer_armed[is_write]) {
249         return true;
250     }
251 
252     must_wait = throttle_schedule_timer(ts, tt, is_write);
253 
254     /* If a timer just got armed, set blk as the current token */
255     if (must_wait) {
256         tg->tokens[is_write] = blk;
257         tg->any_timer_armed[is_write] = true;
258     }
259 
260     return must_wait;
261 }
262 
263 /* Start the next pending I/O request for a BlockBackend.  Return whether
264  * any request was actually pending.
265  *
266  * @blk:       the current BlockBackend
267  * @is_write:  the type of operation (read/write)
268  */
269 static bool coroutine_fn throttle_group_co_restart_queue(BlockBackend *blk,
270                                                          bool is_write)
271 {
272     BlockBackendPublic *blkp = blk_get_public(blk);
273     bool ret;
274 
275     qemu_co_mutex_lock(&blkp->throttled_reqs_lock);
276     ret = qemu_co_queue_next(&blkp->throttled_reqs[is_write]);
277     qemu_co_mutex_unlock(&blkp->throttled_reqs_lock);
278 
279     return ret;
280 }
281 
282 /* Look for the next pending I/O request and schedule it.
283  *
284  * This assumes that tg->lock is held.
285  *
286  * @blk:       the current BlockBackend
287  * @is_write:  the type of operation (read/write)
288  */
289 static void schedule_next_request(BlockBackend *blk, bool is_write)
290 {
291     BlockBackendPublic *blkp = blk_get_public(blk);
292     ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
293     bool must_wait;
294     BlockBackend *token;
295 
296     /* Check if there's any pending request to schedule next */
297     token = next_throttle_token(blk, is_write);
298     if (!blk_has_pending_reqs(token, is_write)) {
299         return;
300     }
301 
302     /* Set a timer for the request if it needs to be throttled */
303     must_wait = throttle_group_schedule_timer(token, is_write);
304 
305     /* If it doesn't have to wait, queue it for immediate execution */
306     if (!must_wait) {
307         /* Give preference to requests from the current blk */
308         if (qemu_in_coroutine() &&
309             throttle_group_co_restart_queue(blk, is_write)) {
310             token = blk;
311         } else {
312             ThrottleTimers *tt = &blk_get_public(token)->throttle_timers;
313             int64_t now = qemu_clock_get_ns(tt->clock_type);
314             timer_mod(tt->timers[is_write], now);
315             tg->any_timer_armed[is_write] = true;
316         }
317         tg->tokens[is_write] = token;
318     }
319 }
320 
321 /* Check if an I/O request needs to be throttled, wait and set a timer
322  * if necessary, and schedule the next request using a round robin
323  * algorithm.
324  *
325  * @blk:       the current BlockBackend
326  * @bytes:     the number of bytes for this I/O
327  * @is_write:  the type of operation (read/write)
328  */
329 void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
330                                                         unsigned int bytes,
331                                                         bool is_write)
332 {
333     bool must_wait;
334     BlockBackend *token;
335 
336     BlockBackendPublic *blkp = blk_get_public(blk);
337     ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
338     qemu_mutex_lock(&tg->lock);
339 
340     /* First we check if this I/O has to be throttled. */
341     token = next_throttle_token(blk, is_write);
342     must_wait = throttle_group_schedule_timer(token, is_write);
343 
344     /* Wait if there's a timer set or queued requests of this type */
345     if (must_wait || blkp->pending_reqs[is_write]) {
346         blkp->pending_reqs[is_write]++;
347         qemu_mutex_unlock(&tg->lock);
348         qemu_co_mutex_lock(&blkp->throttled_reqs_lock);
349         qemu_co_queue_wait(&blkp->throttled_reqs[is_write],
350                            &blkp->throttled_reqs_lock);
351         qemu_co_mutex_unlock(&blkp->throttled_reqs_lock);
352         qemu_mutex_lock(&tg->lock);
353         blkp->pending_reqs[is_write]--;
354     }
355 
356     /* The I/O will be executed, so do the accounting */
357     throttle_account(blkp->throttle_state, is_write, bytes);
358 
359     /* Schedule the next request */
360     schedule_next_request(blk, is_write);
361 
362     qemu_mutex_unlock(&tg->lock);
363 }
364 
365 typedef struct {
366     BlockBackend *blk;
367     bool is_write;
368 } RestartData;
369 
370 static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
371 {
372     RestartData *data = opaque;
373     BlockBackend *blk = data->blk;
374     bool is_write = data->is_write;
375     BlockBackendPublic *blkp = blk_get_public(blk);
376     ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
377     bool empty_queue;
378 
379     empty_queue = !throttle_group_co_restart_queue(blk, is_write);
380 
381     /* If the request queue was empty then we have to take care of
382      * scheduling the next one */
383     if (empty_queue) {
384         qemu_mutex_lock(&tg->lock);
385         schedule_next_request(blk, is_write);
386         qemu_mutex_unlock(&tg->lock);
387     }
388 }
389 
390 static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
391 {
392     Coroutine *co;
393     RestartData rd = {
394         .blk = blk,
395         .is_write = is_write
396     };
397 
398     co = qemu_coroutine_create(throttle_group_restart_queue_entry, &rd);
399     aio_co_enter(blk_get_aio_context(blk), co);
400 }
401 
402 void throttle_group_restart_blk(BlockBackend *blk)
403 {
404     BlockBackendPublic *blkp = blk_get_public(blk);
405 
406     if (blkp->throttle_state) {
407         throttle_group_restart_queue(blk, 0);
408         throttle_group_restart_queue(blk, 1);
409     }
410 }
411 
412 /* Update the throttle configuration for a particular group. Similar
413  * to throttle_config(), but guarantees atomicity within the
414  * throttling group.
415  *
416  * @blk: a BlockBackend that is a member of the group
417  * @cfg: the configuration to set
418  */
419 void throttle_group_config(BlockBackend *blk, ThrottleConfig *cfg)
420 {
421     BlockBackendPublic *blkp = blk_get_public(blk);
422     ThrottleTimers *tt = &blkp->throttle_timers;
423     ThrottleState *ts = blkp->throttle_state;
424     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
425     qemu_mutex_lock(&tg->lock);
426     /* throttle_config() cancels the timers */
427     if (timer_pending(tt->timers[0])) {
428         tg->any_timer_armed[0] = false;
429     }
430     if (timer_pending(tt->timers[1])) {
431         tg->any_timer_armed[1] = false;
432     }
433     throttle_config(ts, tt, cfg);
434     qemu_mutex_unlock(&tg->lock);
435 
436     throttle_group_restart_blk(blk);
437 }
438 
439 /* Get the throttle configuration from a particular group. Similar to
440  * throttle_get_config(), but guarantees atomicity within the
441  * throttling group.
442  *
443  * @blk: a BlockBackend that is a member of the group
444  * @cfg: the configuration will be written here
445  */
446 void throttle_group_get_config(BlockBackend *blk, ThrottleConfig *cfg)
447 {
448     BlockBackendPublic *blkp = blk_get_public(blk);
449     ThrottleState *ts = blkp->throttle_state;
450     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
451     qemu_mutex_lock(&tg->lock);
452     throttle_get_config(ts, cfg);
453     qemu_mutex_unlock(&tg->lock);
454 }
455 
456 /* ThrottleTimers callback. This wakes up a request that was waiting
457  * because it had been throttled.
458  *
459  * @blk:       the BlockBackend whose request had been throttled
460  * @is_write:  the type of operation (read/write)
461  */
462 static void timer_cb(BlockBackend *blk, bool is_write)
463 {
464     BlockBackendPublic *blkp = blk_get_public(blk);
465     ThrottleState *ts = blkp->throttle_state;
466     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
467 
468     /* The timer has just been fired, so we can update the flag */
469     qemu_mutex_lock(&tg->lock);
470     tg->any_timer_armed[is_write] = false;
471     qemu_mutex_unlock(&tg->lock);
472 
473     /* Run the request that was waiting for this timer */
474     throttle_group_restart_queue(blk, is_write);
475 }
476 
477 static void read_timer_cb(void *opaque)
478 {
479     timer_cb(opaque, false);
480 }
481 
482 static void write_timer_cb(void *opaque)
483 {
484     timer_cb(opaque, true);
485 }
486 
487 /* Register a BlockBackend in the throttling group, also initializing its
488  * timers and updating its throttle_state pointer to point to it. If a
489  * throttling group with that name does not exist yet, it will be created.
490  *
491  * @blk:       the BlockBackend to insert
492  * @groupname: the name of the group
493  */
494 void throttle_group_register_blk(BlockBackend *blk, const char *groupname)
495 {
496     int i;
497     BlockBackendPublic *blkp = blk_get_public(blk);
498     ThrottleState *ts = throttle_group_incref(groupname);
499     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
500     int clock_type = QEMU_CLOCK_REALTIME;
501 
502     if (qtest_enabled()) {
503         /* For testing block IO throttling only */
504         clock_type = QEMU_CLOCK_VIRTUAL;
505     }
506 
507     blkp->throttle_state = ts;
508 
509     qemu_mutex_lock(&tg->lock);
510     /* If the ThrottleGroup is new set this BlockBackend as the token */
511     for (i = 0; i < 2; i++) {
512         if (!tg->tokens[i]) {
513             tg->tokens[i] = blk;
514         }
515     }
516 
517     QLIST_INSERT_HEAD(&tg->head, blkp, round_robin);
518 
519     throttle_timers_init(&blkp->throttle_timers,
520                          blk_get_aio_context(blk),
521                          clock_type,
522                          read_timer_cb,
523                          write_timer_cb,
524                          blk);
525 
526     qemu_mutex_unlock(&tg->lock);
527 }
528 
529 /* Unregister a BlockBackend from its group, removing it from the list,
530  * destroying the timers and setting the throttle_state pointer to NULL.
531  *
532  * The BlockBackend must not have pending throttled requests, so the caller has
533  * to drain them first.
534  *
535  * The group will be destroyed if it's empty after this operation.
536  *
537  * @blk: the BlockBackend to remove
538  */
539 void throttle_group_unregister_blk(BlockBackend *blk)
540 {
541     BlockBackendPublic *blkp = blk_get_public(blk);
542     ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
543     int i;
544 
545     assert(blkp->pending_reqs[0] == 0 && blkp->pending_reqs[1] == 0);
546     assert(qemu_co_queue_empty(&blkp->throttled_reqs[0]));
547     assert(qemu_co_queue_empty(&blkp->throttled_reqs[1]));
548 
549     qemu_mutex_lock(&tg->lock);
550     for (i = 0; i < 2; i++) {
551         if (tg->tokens[i] == blk) {
552             BlockBackend *token = throttle_group_next_blk(blk);
553             /* Take care of the case where this is the last blk in the group */
554             if (token == blk) {
555                 token = NULL;
556             }
557             tg->tokens[i] = token;
558         }
559     }
560 
561     /* remove the current blk from the list */
562     QLIST_REMOVE(blkp, round_robin);
563     throttle_timers_destroy(&blkp->throttle_timers);
564     qemu_mutex_unlock(&tg->lock);
565 
566     throttle_group_unref(&tg->ts);
567     blkp->throttle_state = NULL;
568 }
569 
570 static void throttle_groups_init(void)
571 {
572     qemu_mutex_init(&throttle_groups_lock);
573 }
574 
575 block_init(throttle_groups_init);
576