xref: /openbmc/qemu/block/throttle-groups.c (revision d04bf49c)
1 /*
2  * QEMU block throttling group infrastructure
3  *
4  * Copyright (C) Nodalink, EURL. 2014
5  * Copyright (C) Igalia, S.L. 2015
6  *
7  * Authors:
8  *   Benoît Canet <benoit.canet@nodalink.com>
9  *   Alberto Garcia <berto@igalia.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation; either version 2 or
14  * (at your option) version 3 of the License.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, see <http://www.gnu.org/licenses/>.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "sysemu/block-backend.h"
27 #include "block/throttle-groups.h"
28 #include "qemu/throttle-options.h"
29 #include "qemu/main-loop.h"
30 #include "qemu/queue.h"
31 #include "qemu/thread.h"
32 #include "sysemu/qtest.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-visit-block-core.h"
35 #include "qom/object.h"
36 #include "qom/object_interfaces.h"
37 
38 static void throttle_group_obj_init(Object *obj);
39 static void throttle_group_obj_complete(UserCreatable *obj, Error **errp);
40 static void timer_cb(ThrottleGroupMember *tgm, bool is_write);
41 
42 /* The ThrottleGroup structure (with its ThrottleState) is shared
43  * among different ThrottleGroupMembers and it's independent from
44  * AioContext, so in order to use it from different threads it needs
45  * its own locking.
46  *
47  * This locking is however handled internally in this file, so it's
48  * transparent to outside users.
49  *
50  * The whole ThrottleGroup structure is private and invisible to
51  * outside users, that only use it through its ThrottleState.
52  *
53  * In addition to the ThrottleGroup structure, ThrottleGroupMember has
54  * fields that need to be accessed by other members of the group and
55  * therefore also need to be protected by this lock. Once a
56  * ThrottleGroupMember is registered in a group those fields can be accessed
57  * by other threads any time.
58  *
59  * Again, all this is handled internally and is mostly transparent to
60  * the outside. The 'throttle_timers' field however has an additional
61  * constraint because it may be temporarily invalid (see for example
62  * blk_set_aio_context()). Therefore in this file a thread will
63  * access some other ThrottleGroupMember's timers only after verifying that
64  * that ThrottleGroupMember has throttled requests in the queue.
65  */
66 typedef struct ThrottleGroup {
67     Object parent_obj;
68 
69     /* refuse individual property change if initialization is complete */
70     bool is_initialized;
71     char *name; /* This is constant during the lifetime of the group */
72 
73     QemuMutex lock; /* This lock protects the following four fields */
74     ThrottleState ts;
75     QLIST_HEAD(, ThrottleGroupMember) head;
76     ThrottleGroupMember *tokens[2];
77     bool any_timer_armed[2];
78     QEMUClockType clock_type;
79 
80     /* This field is protected by the global QEMU mutex */
81     QTAILQ_ENTRY(ThrottleGroup) list;
82 } ThrottleGroup;
83 
84 /* This is protected by the global QEMU mutex */
85 static QTAILQ_HEAD(, ThrottleGroup) throttle_groups =
86     QTAILQ_HEAD_INITIALIZER(throttle_groups);
87 
88 
89 /* This function reads throttle_groups and must be called under the global
90  * mutex.
91  */
92 static ThrottleGroup *throttle_group_by_name(const char *name)
93 {
94     ThrottleGroup *iter;
95 
96     /* Look for an existing group with that name */
97     QTAILQ_FOREACH(iter, &throttle_groups, list) {
98         if (!g_strcmp0(name, iter->name)) {
99             return iter;
100         }
101     }
102 
103     return NULL;
104 }
105 
106 /* This function reads throttle_groups and must be called under the global
107  * mutex.
108  */
109 bool throttle_group_exists(const char *name)
110 {
111     return throttle_group_by_name(name) != NULL;
112 }
113 
114 /* Increments the reference count of a ThrottleGroup given its name.
115  *
116  * If no ThrottleGroup is found with the given name a new one is
117  * created.
118  *
119  * This function edits throttle_groups and must be called under the global
120  * mutex.
121  *
122  * @name: the name of the ThrottleGroup
123  * @ret:  the ThrottleState member of the ThrottleGroup
124  */
125 ThrottleState *throttle_group_incref(const char *name)
126 {
127     ThrottleGroup *tg = NULL;
128 
129     /* Look for an existing group with that name */
130     tg = throttle_group_by_name(name);
131 
132     if (tg) {
133         object_ref(OBJECT(tg));
134     } else {
135         /* Create a new one if not found */
136         /* new ThrottleGroup obj will have a refcnt = 1 */
137         tg = THROTTLE_GROUP(object_new(TYPE_THROTTLE_GROUP));
138         tg->name = g_strdup(name);
139         throttle_group_obj_complete(USER_CREATABLE(tg), &error_abort);
140     }
141 
142     return &tg->ts;
143 }
144 
145 /* Decrease the reference count of a ThrottleGroup.
146  *
147  * When the reference count reaches zero the ThrottleGroup is
148  * destroyed.
149  *
150  * This function edits throttle_groups and must be called under the global
151  * mutex.
152  *
153  * @ts:  The ThrottleGroup to unref, given by its ThrottleState member
154  */
155 void throttle_group_unref(ThrottleState *ts)
156 {
157     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
158     object_unref(OBJECT(tg));
159 }
160 
161 /* Get the name from a ThrottleGroupMember's group. The name (and the pointer)
162  * is guaranteed to remain constant during the lifetime of the group.
163  *
164  * @tgm:  a ThrottleGroupMember
165  * @ret:  the name of the group.
166  */
167 const char *throttle_group_get_name(ThrottleGroupMember *tgm)
168 {
169     ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
170     return tg->name;
171 }
172 
173 /* Return the next ThrottleGroupMember in the round-robin sequence, simulating
174  * a circular list.
175  *
176  * This assumes that tg->lock is held.
177  *
178  * @tgm: the current ThrottleGroupMember
179  * @ret: the next ThrottleGroupMember in the sequence
180  */
181 static ThrottleGroupMember *throttle_group_next_tgm(ThrottleGroupMember *tgm)
182 {
183     ThrottleState *ts = tgm->throttle_state;
184     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
185     ThrottleGroupMember *next = QLIST_NEXT(tgm, round_robin);
186 
187     if (!next) {
188         next = QLIST_FIRST(&tg->head);
189     }
190 
191     return next;
192 }
193 
194 /*
195  * Return whether a ThrottleGroupMember has pending requests.
196  *
197  * This assumes that tg->lock is held.
198  *
199  * @tgm:        the ThrottleGroupMember
200  * @is_write:   the type of operation (read/write)
201  * @ret:        whether the ThrottleGroupMember has pending requests.
202  */
203 static inline bool tgm_has_pending_reqs(ThrottleGroupMember *tgm,
204                                         bool is_write)
205 {
206     return tgm->pending_reqs[is_write];
207 }
208 
209 /* Return the next ThrottleGroupMember in the round-robin sequence with pending
210  * I/O requests.
211  *
212  * This assumes that tg->lock is held.
213  *
214  * @tgm:       the current ThrottleGroupMember
215  * @is_write:  the type of operation (read/write)
216  * @ret:       the next ThrottleGroupMember with pending requests, or tgm if
217  *             there is none.
218  */
219 static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
220                                                 bool is_write)
221 {
222     ThrottleState *ts = tgm->throttle_state;
223     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
224     ThrottleGroupMember *token, *start;
225 
226     /* If this member has its I/O limits disabled then it means that
227      * it's being drained. Skip the round-robin search and return tgm
228      * immediately if it has pending requests. Otherwise we could be
229      * forcing it to wait for other member's throttled requests. */
230     if (tgm_has_pending_reqs(tgm, is_write) &&
231         atomic_read(&tgm->io_limits_disabled)) {
232         return tgm;
233     }
234 
235     start = token = tg->tokens[is_write];
236 
237     /* get next bs round in round robin style */
238     token = throttle_group_next_tgm(token);
239     while (token != start && !tgm_has_pending_reqs(token, is_write)) {
240         token = throttle_group_next_tgm(token);
241     }
242 
243     /* If no IO are queued for scheduling on the next round robin token
244      * then decide the token is the current tgm because chances are
245      * the current tgm got the current request queued.
246      */
247     if (token == start && !tgm_has_pending_reqs(token, is_write)) {
248         token = tgm;
249     }
250 
251     /* Either we return the original TGM, or one with pending requests */
252     assert(token == tgm || tgm_has_pending_reqs(token, is_write));
253 
254     return token;
255 }
256 
257 /* Check if the next I/O request for a ThrottleGroupMember needs to be
258  * throttled or not. If there's no timer set in this group, set one and update
259  * the token accordingly.
260  *
261  * This assumes that tg->lock is held.
262  *
263  * @tgm:        the current ThrottleGroupMember
264  * @is_write:   the type of operation (read/write)
265  * @ret:        whether the I/O request needs to be throttled or not
266  */
267 static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
268                                           bool is_write)
269 {
270     ThrottleState *ts = tgm->throttle_state;
271     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
272     ThrottleTimers *tt = &tgm->throttle_timers;
273     bool must_wait;
274 
275     if (atomic_read(&tgm->io_limits_disabled)) {
276         return false;
277     }
278 
279     /* Check if any of the timers in this group is already armed */
280     if (tg->any_timer_armed[is_write]) {
281         return true;
282     }
283 
284     must_wait = throttle_schedule_timer(ts, tt, is_write);
285 
286     /* If a timer just got armed, set tgm as the current token */
287     if (must_wait) {
288         tg->tokens[is_write] = tgm;
289         tg->any_timer_armed[is_write] = true;
290     }
291 
292     return must_wait;
293 }
294 
295 /* Start the next pending I/O request for a ThrottleGroupMember. Return whether
296  * any request was actually pending.
297  *
298  * @tgm:       the current ThrottleGroupMember
299  * @is_write:  the type of operation (read/write)
300  */
301 static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tgm,
302                                                          bool is_write)
303 {
304     bool ret;
305 
306     qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
307     ret = qemu_co_queue_next(&tgm->throttled_reqs[is_write]);
308     qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
309 
310     return ret;
311 }
312 
313 /* Look for the next pending I/O request and schedule it.
314  *
315  * This assumes that tg->lock is held.
316  *
317  * @tgm:       the current ThrottleGroupMember
318  * @is_write:  the type of operation (read/write)
319  */
320 static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
321 {
322     ThrottleState *ts = tgm->throttle_state;
323     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
324     bool must_wait;
325     ThrottleGroupMember *token;
326 
327     /* Check if there's any pending request to schedule next */
328     token = next_throttle_token(tgm, is_write);
329     if (!tgm_has_pending_reqs(token, is_write)) {
330         return;
331     }
332 
333     /* Set a timer for the request if it needs to be throttled */
334     must_wait = throttle_group_schedule_timer(token, is_write);
335 
336     /* If it doesn't have to wait, queue it for immediate execution */
337     if (!must_wait) {
338         /* Give preference to requests from the current tgm */
339         if (qemu_in_coroutine() &&
340             throttle_group_co_restart_queue(tgm, is_write)) {
341             token = tgm;
342         } else {
343             ThrottleTimers *tt = &token->throttle_timers;
344             int64_t now = qemu_clock_get_ns(tg->clock_type);
345             timer_mod(tt->timers[is_write], now);
346             tg->any_timer_armed[is_write] = true;
347         }
348         tg->tokens[is_write] = token;
349     }
350 }
351 
352 /* Check if an I/O request needs to be throttled, wait and set a timer
353  * if necessary, and schedule the next request using a round robin
354  * algorithm.
355  *
356  * @tgm:       the current ThrottleGroupMember
357  * @bytes:     the number of bytes for this I/O
358  * @is_write:  the type of operation (read/write)
359  */
360 void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
361                                                         unsigned int bytes,
362                                                         bool is_write)
363 {
364     bool must_wait;
365     ThrottleGroupMember *token;
366     ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
367     qemu_mutex_lock(&tg->lock);
368 
369     /* First we check if this I/O has to be throttled. */
370     token = next_throttle_token(tgm, is_write);
371     must_wait = throttle_group_schedule_timer(token, is_write);
372 
373     /* Wait if there's a timer set or queued requests of this type */
374     if (must_wait || tgm->pending_reqs[is_write]) {
375         tgm->pending_reqs[is_write]++;
376         qemu_mutex_unlock(&tg->lock);
377         qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
378         qemu_co_queue_wait(&tgm->throttled_reqs[is_write],
379                            &tgm->throttled_reqs_lock);
380         qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
381         qemu_mutex_lock(&tg->lock);
382         tgm->pending_reqs[is_write]--;
383     }
384 
385     /* The I/O will be executed, so do the accounting */
386     throttle_account(tgm->throttle_state, is_write, bytes);
387 
388     /* Schedule the next request */
389     schedule_next_request(tgm, is_write);
390 
391     qemu_mutex_unlock(&tg->lock);
392 }
393 
394 typedef struct {
395     ThrottleGroupMember *tgm;
396     bool is_write;
397 } RestartData;
398 
399 static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
400 {
401     RestartData *data = opaque;
402     ThrottleGroupMember *tgm = data->tgm;
403     ThrottleState *ts = tgm->throttle_state;
404     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
405     bool is_write = data->is_write;
406     bool empty_queue;
407 
408     empty_queue = !throttle_group_co_restart_queue(tgm, is_write);
409 
410     /* If the request queue was empty then we have to take care of
411      * scheduling the next one */
412     if (empty_queue) {
413         qemu_mutex_lock(&tg->lock);
414         schedule_next_request(tgm, is_write);
415         qemu_mutex_unlock(&tg->lock);
416     }
417 
418     g_free(data);
419 
420     atomic_dec(&tgm->restart_pending);
421     aio_wait_kick();
422 }
423 
424 static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write)
425 {
426     Coroutine *co;
427     RestartData *rd = g_new0(RestartData, 1);
428 
429     rd->tgm = tgm;
430     rd->is_write = is_write;
431 
432     /* This function is called when a timer is fired or when
433      * throttle_group_restart_tgm() is called. Either way, there can
434      * be no timer pending on this tgm at this point */
435     assert(!timer_pending(tgm->throttle_timers.timers[is_write]));
436 
437     atomic_inc(&tgm->restart_pending);
438 
439     co = qemu_coroutine_create(throttle_group_restart_queue_entry, rd);
440     aio_co_enter(tgm->aio_context, co);
441 }
442 
443 void throttle_group_restart_tgm(ThrottleGroupMember *tgm)
444 {
445     int i;
446 
447     if (tgm->throttle_state) {
448         for (i = 0; i < 2; i++) {
449             QEMUTimer *t = tgm->throttle_timers.timers[i];
450             if (timer_pending(t)) {
451                 /* If there's a pending timer on this tgm, fire it now */
452                 timer_del(t);
453                 timer_cb(tgm, i);
454             } else {
455                 /* Else run the next request from the queue manually */
456                 throttle_group_restart_queue(tgm, i);
457             }
458         }
459     }
460 }
461 
462 /* Update the throttle configuration for a particular group. Similar
463  * to throttle_config(), but guarantees atomicity within the
464  * throttling group.
465  *
466  * @tgm:    a ThrottleGroupMember that is a member of the group
467  * @cfg: the configuration to set
468  */
469 void throttle_group_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
470 {
471     ThrottleState *ts = tgm->throttle_state;
472     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
473     qemu_mutex_lock(&tg->lock);
474     throttle_config(ts, tg->clock_type, cfg);
475     qemu_mutex_unlock(&tg->lock);
476 
477     throttle_group_restart_tgm(tgm);
478 }
479 
480 /* Get the throttle configuration from a particular group. Similar to
481  * throttle_get_config(), but guarantees atomicity within the
482  * throttling group.
483  *
484  * @tgm:    a ThrottleGroupMember that is a member of the group
485  * @cfg: the configuration will be written here
486  */
487 void throttle_group_get_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
488 {
489     ThrottleState *ts = tgm->throttle_state;
490     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
491     qemu_mutex_lock(&tg->lock);
492     throttle_get_config(ts, cfg);
493     qemu_mutex_unlock(&tg->lock);
494 }
495 
496 /* ThrottleTimers callback. This wakes up a request that was waiting
497  * because it had been throttled.
498  *
499  * @tgm:       the ThrottleGroupMember whose request had been throttled
500  * @is_write:  the type of operation (read/write)
501  */
502 static void timer_cb(ThrottleGroupMember *tgm, bool is_write)
503 {
504     ThrottleState *ts = tgm->throttle_state;
505     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
506 
507     /* The timer has just been fired, so we can update the flag */
508     qemu_mutex_lock(&tg->lock);
509     tg->any_timer_armed[is_write] = false;
510     qemu_mutex_unlock(&tg->lock);
511 
512     /* Run the request that was waiting for this timer */
513     throttle_group_restart_queue(tgm, is_write);
514 }
515 
516 static void read_timer_cb(void *opaque)
517 {
518     timer_cb(opaque, false);
519 }
520 
521 static void write_timer_cb(void *opaque)
522 {
523     timer_cb(opaque, true);
524 }
525 
526 /* Register a ThrottleGroupMember from the throttling group, also initializing
527  * its timers and updating its throttle_state pointer to point to it. If a
528  * throttling group with that name does not exist yet, it will be created.
529  *
530  * This function edits throttle_groups and must be called under the global
531  * mutex.
532  *
533  * @tgm:       the ThrottleGroupMember to insert
534  * @groupname: the name of the group
535  * @ctx:       the AioContext to use
536  */
537 void throttle_group_register_tgm(ThrottleGroupMember *tgm,
538                                  const char *groupname,
539                                  AioContext *ctx)
540 {
541     int i;
542     ThrottleState *ts = throttle_group_incref(groupname);
543     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
544 
545     tgm->throttle_state = ts;
546     tgm->aio_context = ctx;
547     atomic_set(&tgm->restart_pending, 0);
548 
549     qemu_mutex_lock(&tg->lock);
550     /* If the ThrottleGroup is new set this ThrottleGroupMember as the token */
551     for (i = 0; i < 2; i++) {
552         if (!tg->tokens[i]) {
553             tg->tokens[i] = tgm;
554         }
555     }
556 
557     QLIST_INSERT_HEAD(&tg->head, tgm, round_robin);
558 
559     throttle_timers_init(&tgm->throttle_timers,
560                          tgm->aio_context,
561                          tg->clock_type,
562                          read_timer_cb,
563                          write_timer_cb,
564                          tgm);
565     qemu_co_mutex_init(&tgm->throttled_reqs_lock);
566     qemu_co_queue_init(&tgm->throttled_reqs[0]);
567     qemu_co_queue_init(&tgm->throttled_reqs[1]);
568 
569     qemu_mutex_unlock(&tg->lock);
570 }
571 
572 /* Unregister a ThrottleGroupMember from its group, removing it from the list,
573  * destroying the timers and setting the throttle_state pointer to NULL.
574  *
575  * The ThrottleGroupMember must not have pending throttled requests, so the
576  * caller has to drain them first.
577  *
578  * The group will be destroyed if it's empty after this operation.
579  *
580  * @tgm the ThrottleGroupMember to remove
581  */
582 void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
583 {
584     ThrottleState *ts = tgm->throttle_state;
585     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
586     ThrottleGroupMember *token;
587     int i;
588 
589     if (!ts) {
590         /* Discard already unregistered tgm */
591         return;
592     }
593 
594     /* Wait for throttle_group_restart_queue_entry() coroutines to finish */
595     AIO_WAIT_WHILE(tgm->aio_context, atomic_read(&tgm->restart_pending) > 0);
596 
597     qemu_mutex_lock(&tg->lock);
598     for (i = 0; i < 2; i++) {
599         assert(tgm->pending_reqs[i] == 0);
600         assert(qemu_co_queue_empty(&tgm->throttled_reqs[i]));
601         assert(!timer_pending(tgm->throttle_timers.timers[i]));
602         if (tg->tokens[i] == tgm) {
603             token = throttle_group_next_tgm(tgm);
604             /* Take care of the case where this is the last tgm in the group */
605             if (token == tgm) {
606                 token = NULL;
607             }
608             tg->tokens[i] = token;
609         }
610     }
611 
612     /* remove the current tgm from the list */
613     QLIST_REMOVE(tgm, round_robin);
614     throttle_timers_destroy(&tgm->throttle_timers);
615     qemu_mutex_unlock(&tg->lock);
616 
617     throttle_group_unref(&tg->ts);
618     tgm->throttle_state = NULL;
619 }
620 
621 void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
622                                        AioContext *new_context)
623 {
624     ThrottleTimers *tt = &tgm->throttle_timers;
625     throttle_timers_attach_aio_context(tt, new_context);
626     tgm->aio_context = new_context;
627 }
628 
629 void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
630 {
631     ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
632     ThrottleTimers *tt = &tgm->throttle_timers;
633     int i;
634 
635     /* Requests must have been drained */
636     assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
637     assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
638     assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
639 
640     /* Kick off next ThrottleGroupMember, if necessary */
641     qemu_mutex_lock(&tg->lock);
642     for (i = 0; i < 2; i++) {
643         if (timer_pending(tt->timers[i])) {
644             tg->any_timer_armed[i] = false;
645             schedule_next_request(tgm, i);
646         }
647     }
648     qemu_mutex_unlock(&tg->lock);
649 
650     throttle_timers_detach_aio_context(tt);
651     tgm->aio_context = NULL;
652 }
653 
654 #undef THROTTLE_OPT_PREFIX
655 #define THROTTLE_OPT_PREFIX "x-"
656 
657 /* Helper struct and array for QOM property setter/getter */
658 typedef struct {
659     const char *name;
660     BucketType type;
661     enum {
662         AVG,
663         MAX,
664         BURST_LENGTH,
665         IOPS_SIZE,
666     } category;
667 } ThrottleParamInfo;
668 
669 static ThrottleParamInfo properties[] = {
670     {
671         THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL,
672         THROTTLE_OPS_TOTAL, AVG,
673     },
674     {
675         THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX,
676         THROTTLE_OPS_TOTAL, MAX,
677     },
678     {
679         THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX_LENGTH,
680         THROTTLE_OPS_TOTAL, BURST_LENGTH,
681     },
682     {
683         THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ,
684         THROTTLE_OPS_READ, AVG,
685     },
686     {
687         THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX,
688         THROTTLE_OPS_READ, MAX,
689     },
690     {
691         THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX_LENGTH,
692         THROTTLE_OPS_READ, BURST_LENGTH,
693     },
694     {
695         THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE,
696         THROTTLE_OPS_WRITE, AVG,
697     },
698     {
699         THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX,
700         THROTTLE_OPS_WRITE, MAX,
701     },
702     {
703         THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX_LENGTH,
704         THROTTLE_OPS_WRITE, BURST_LENGTH,
705     },
706     {
707         THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL,
708         THROTTLE_BPS_TOTAL, AVG,
709     },
710     {
711         THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX,
712         THROTTLE_BPS_TOTAL, MAX,
713     },
714     {
715         THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX_LENGTH,
716         THROTTLE_BPS_TOTAL, BURST_LENGTH,
717     },
718     {
719         THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ,
720         THROTTLE_BPS_READ, AVG,
721     },
722     {
723         THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX,
724         THROTTLE_BPS_READ, MAX,
725     },
726     {
727         THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX_LENGTH,
728         THROTTLE_BPS_READ, BURST_LENGTH,
729     },
730     {
731         THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE,
732         THROTTLE_BPS_WRITE, AVG,
733     },
734     {
735         THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX,
736         THROTTLE_BPS_WRITE, MAX,
737     },
738     {
739         THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX_LENGTH,
740         THROTTLE_BPS_WRITE, BURST_LENGTH,
741     },
742     {
743         THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_SIZE,
744         0, IOPS_SIZE,
745     }
746 };
747 
748 /* This function edits throttle_groups and must be called under the global
749  * mutex */
750 static void throttle_group_obj_init(Object *obj)
751 {
752     ThrottleGroup *tg = THROTTLE_GROUP(obj);
753 
754     tg->clock_type = QEMU_CLOCK_REALTIME;
755     if (qtest_enabled()) {
756         /* For testing block IO throttling only */
757         tg->clock_type = QEMU_CLOCK_VIRTUAL;
758     }
759     tg->is_initialized = false;
760     qemu_mutex_init(&tg->lock);
761     throttle_init(&tg->ts);
762     QLIST_INIT(&tg->head);
763 }
764 
765 /* This function edits throttle_groups and must be called under the global
766  * mutex */
767 static void throttle_group_obj_complete(UserCreatable *obj, Error **errp)
768 {
769     ThrottleGroup *tg = THROTTLE_GROUP(obj);
770     ThrottleConfig cfg;
771 
772     /* set group name to object id if it exists */
773     if (!tg->name && tg->parent_obj.parent) {
774         tg->name = object_get_canonical_path_component(OBJECT(obj));
775     }
776     /* We must have a group name at this point */
777     assert(tg->name);
778 
779     /* error if name is duplicate */
780     if (throttle_group_exists(tg->name)) {
781         error_setg(errp, "A group with this name already exists");
782         return;
783     }
784 
785     /* check validity */
786     throttle_get_config(&tg->ts, &cfg);
787     if (!throttle_is_valid(&cfg, errp)) {
788         return;
789     }
790     throttle_config(&tg->ts, tg->clock_type, &cfg);
791     QTAILQ_INSERT_TAIL(&throttle_groups, tg, list);
792     tg->is_initialized = true;
793 }
794 
795 /* This function edits throttle_groups and must be called under the global
796  * mutex */
797 static void throttle_group_obj_finalize(Object *obj)
798 {
799     ThrottleGroup *tg = THROTTLE_GROUP(obj);
800     if (tg->is_initialized) {
801         QTAILQ_REMOVE(&throttle_groups, tg, list);
802     }
803     qemu_mutex_destroy(&tg->lock);
804     g_free(tg->name);
805 }
806 
807 static void throttle_group_set(Object *obj, Visitor *v, const char * name,
808                                void *opaque, Error **errp)
809 
810 {
811     ThrottleGroup *tg = THROTTLE_GROUP(obj);
812     ThrottleConfig *cfg;
813     ThrottleParamInfo *info = opaque;
814     Error *local_err = NULL;
815     int64_t value;
816 
817     /* If we have finished initialization, don't accept individual property
818      * changes through QOM. Throttle configuration limits must be set in one
819      * transaction, as certain combinations are invalid.
820      */
821     if (tg->is_initialized) {
822         error_setg(&local_err, "Property cannot be set after initialization");
823         goto ret;
824     }
825 
826     visit_type_int64(v, name, &value, &local_err);
827     if (local_err) {
828         goto ret;
829     }
830     if (value < 0) {
831         error_setg(&local_err, "Property values cannot be negative");
832         goto ret;
833     }
834 
835     cfg = &tg->ts.cfg;
836     switch (info->category) {
837     case AVG:
838         cfg->buckets[info->type].avg = value;
839         break;
840     case MAX:
841         cfg->buckets[info->type].max = value;
842         break;
843     case BURST_LENGTH:
844         if (value > UINT_MAX) {
845             error_setg(&local_err, "%s value must be in the"
846                        "range [0, %u]", info->name, UINT_MAX);
847             goto ret;
848         }
849         cfg->buckets[info->type].burst_length = value;
850         break;
851     case IOPS_SIZE:
852         cfg->op_size = value;
853         break;
854     }
855 
856 ret:
857     error_propagate(errp, local_err);
858     return;
859 
860 }
861 
862 static void throttle_group_get(Object *obj, Visitor *v, const char *name,
863                                void *opaque, Error **errp)
864 {
865     ThrottleGroup *tg = THROTTLE_GROUP(obj);
866     ThrottleConfig cfg;
867     ThrottleParamInfo *info = opaque;
868     int64_t value;
869 
870     throttle_get_config(&tg->ts, &cfg);
871     switch (info->category) {
872     case AVG:
873         value = cfg.buckets[info->type].avg;
874         break;
875     case MAX:
876         value = cfg.buckets[info->type].max;
877         break;
878     case BURST_LENGTH:
879         value = cfg.buckets[info->type].burst_length;
880         break;
881     case IOPS_SIZE:
882         value = cfg.op_size;
883         break;
884     }
885 
886     visit_type_int64(v, name, &value, errp);
887 }
888 
889 static void throttle_group_set_limits(Object *obj, Visitor *v,
890                                       const char *name, void *opaque,
891                                       Error **errp)
892 
893 {
894     ThrottleGroup *tg = THROTTLE_GROUP(obj);
895     ThrottleConfig cfg;
896     ThrottleLimits *argp;
897     Error *local_err = NULL;
898 
899     visit_type_ThrottleLimits(v, name, &argp, &local_err);
900     if (local_err) {
901         goto ret;
902     }
903     qemu_mutex_lock(&tg->lock);
904     throttle_get_config(&tg->ts, &cfg);
905     throttle_limits_to_config(argp, &cfg, &local_err);
906     if (local_err) {
907         goto unlock;
908     }
909     throttle_config(&tg->ts, tg->clock_type, &cfg);
910 
911 unlock:
912     qemu_mutex_unlock(&tg->lock);
913 ret:
914     qapi_free_ThrottleLimits(argp);
915     error_propagate(errp, local_err);
916     return;
917 }
918 
919 static void throttle_group_get_limits(Object *obj, Visitor *v,
920                                       const char *name, void *opaque,
921                                       Error **errp)
922 {
923     ThrottleGroup *tg = THROTTLE_GROUP(obj);
924     ThrottleConfig cfg;
925     ThrottleLimits arg = { 0 };
926     ThrottleLimits *argp = &arg;
927 
928     qemu_mutex_lock(&tg->lock);
929     throttle_get_config(&tg->ts, &cfg);
930     qemu_mutex_unlock(&tg->lock);
931 
932     throttle_config_to_limits(&cfg, argp);
933 
934     visit_type_ThrottleLimits(v, name, &argp, errp);
935 }
936 
937 static bool throttle_group_can_be_deleted(UserCreatable *uc)
938 {
939     return OBJECT(uc)->ref == 1;
940 }
941 
942 static void throttle_group_obj_class_init(ObjectClass *klass, void *class_data)
943 {
944     size_t i = 0;
945     UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
946 
947     ucc->complete = throttle_group_obj_complete;
948     ucc->can_be_deleted = throttle_group_can_be_deleted;
949 
950     /* individual properties */
951     for (i = 0; i < sizeof(properties) / sizeof(ThrottleParamInfo); i++) {
952         object_class_property_add(klass,
953                                   properties[i].name,
954                                   "int",
955                                   throttle_group_get,
956                                   throttle_group_set,
957                                   NULL, &properties[i]);
958     }
959 
960     /* ThrottleLimits */
961     object_class_property_add(klass,
962                               "limits", "ThrottleLimits",
963                               throttle_group_get_limits,
964                               throttle_group_set_limits,
965                               NULL, NULL);
966 }
967 
968 static const TypeInfo throttle_group_info = {
969     .name = TYPE_THROTTLE_GROUP,
970     .parent = TYPE_OBJECT,
971     .class_init = throttle_group_obj_class_init,
972     .instance_size = sizeof(ThrottleGroup),
973     .instance_init = throttle_group_obj_init,
974     .instance_finalize = throttle_group_obj_finalize,
975     .interfaces = (InterfaceInfo[]) {
976         { TYPE_USER_CREATABLE },
977         { }
978     },
979 };
980 
981 static void throttle_groups_init(void)
982 {
983     type_register_static(&throttle_group_info);
984 }
985 
986 type_init(throttle_groups_init);
987