xref: /openbmc/qemu/util/async.c (revision a6caeee8)
1 /*
2  * Data plane event loop
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2009-2017 QEMU contributors
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "qapi/error.h"
28 #include "block/aio.h"
29 #include "block/thread-pool.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/atomic.h"
32 #include "qemu/rcu_queue.h"
33 #include "block/raw-aio.h"
34 #include "qemu/coroutine_int.h"
35 #include "qemu/coroutine-tls.h"
36 #include "sysemu/cpu-timers.h"
37 #include "trace.h"
38 
39 /***********************************************************/
40 /* bottom halves (can be seen as timers which expire ASAP) */
41 
42 /* QEMUBH::flags values */
43 enum {
44     /* Already enqueued and waiting for aio_bh_poll() */
45     BH_PENDING   = (1 << 0),
46 
47     /* Invoke the callback */
48     BH_SCHEDULED = (1 << 1),
49 
50     /* Delete without invoking callback */
51     BH_DELETED   = (1 << 2),
52 
53     /* Delete after invoking callback */
54     BH_ONESHOT   = (1 << 3),
55 
56     /* Schedule periodically when the event loop is idle */
57     BH_IDLE      = (1 << 4),
58 };
59 
60 struct QEMUBH {
61     AioContext *ctx;
62     const char *name;
63     QEMUBHFunc *cb;
64     void *opaque;
65     QSLIST_ENTRY(QEMUBH) next;
66     unsigned flags;
67 };
68 
69 /* Called concurrently from any thread */
70 static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
71 {
72     AioContext *ctx = bh->ctx;
73     unsigned old_flags;
74 
75     /*
76      * The memory barrier implicit in qatomic_fetch_or makes sure that:
77      * 1. idle & any writes needed by the callback are done before the
78      *    locations are read in the aio_bh_poll.
79      * 2. ctx is loaded before the callback has a chance to execute and bh
80      *    could be freed.
81      */
82     old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
83     if (!(old_flags & BH_PENDING)) {
84         QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
85     }
86 
87     aio_notify(ctx);
88     /*
89      * Workaround for record/replay.
90      * vCPU execution should be suspended when new BH is set.
91      * This is needed to avoid guest timeouts caused
92      * by the long cycles of the execution.
93      */
94     icount_notify_exit();
95 }
96 
97 /* Only called from aio_bh_poll() and aio_ctx_finalize() */
98 static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
99 {
100     QEMUBH *bh = QSLIST_FIRST_RCU(head);
101 
102     if (!bh) {
103         return NULL;
104     }
105 
106     QSLIST_REMOVE_HEAD(head, next);
107 
108     /*
109      * The qatomic_and is paired with aio_bh_enqueue().  The implicit memory
110      * barrier ensures that the callback sees all writes done by the scheduling
111      * thread.  It also ensures that the scheduling thread sees the cleared
112      * flag before bh->cb has run, and thus will call aio_notify again if
113      * necessary.
114      */
115     *flags = qatomic_fetch_and(&bh->flags,
116                               ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
117     return bh;
118 }
119 
120 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
121                                   void *opaque, const char *name)
122 {
123     QEMUBH *bh;
124     bh = g_new(QEMUBH, 1);
125     *bh = (QEMUBH){
126         .ctx = ctx,
127         .cb = cb,
128         .opaque = opaque,
129         .name = name,
130     };
131     aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
132 }
133 
134 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
135                         const char *name)
136 {
137     QEMUBH *bh;
138     bh = g_new(QEMUBH, 1);
139     *bh = (QEMUBH){
140         .ctx = ctx,
141         .cb = cb,
142         .opaque = opaque,
143         .name = name,
144     };
145     return bh;
146 }
147 
148 void aio_bh_call(QEMUBH *bh)
149 {
150     bh->cb(bh->opaque);
151 }
152 
153 /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
154 int aio_bh_poll(AioContext *ctx)
155 {
156     BHListSlice slice;
157     BHListSlice *s;
158     int ret = 0;
159 
160     QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
161     QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
162 
163     while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
164         QEMUBH *bh;
165         unsigned flags;
166 
167         bh = aio_bh_dequeue(&s->bh_list, &flags);
168         if (!bh) {
169             QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
170             continue;
171         }
172 
173         if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
174             /* Idle BHs don't count as progress */
175             if (!(flags & BH_IDLE)) {
176                 ret = 1;
177             }
178             aio_bh_call(bh);
179         }
180         if (flags & (BH_DELETED | BH_ONESHOT)) {
181             g_free(bh);
182         }
183     }
184 
185     return ret;
186 }
187 
188 void qemu_bh_schedule_idle(QEMUBH *bh)
189 {
190     aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
191 }
192 
193 void qemu_bh_schedule(QEMUBH *bh)
194 {
195     aio_bh_enqueue(bh, BH_SCHEDULED);
196 }
197 
198 /* This func is async.
199  */
200 void qemu_bh_cancel(QEMUBH *bh)
201 {
202     qatomic_and(&bh->flags, ~BH_SCHEDULED);
203 }
204 
205 /* This func is async.The bottom half will do the delete action at the finial
206  * end.
207  */
208 void qemu_bh_delete(QEMUBH *bh)
209 {
210     aio_bh_enqueue(bh, BH_DELETED);
211 }
212 
213 static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
214 {
215     QEMUBH *bh;
216 
217     QSLIST_FOREACH_RCU(bh, head, next) {
218         if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
219             if (bh->flags & BH_IDLE) {
220                 /* idle bottom halves will be polled at least
221                  * every 10ms */
222                 timeout = 10000000;
223             } else {
224                 /* non-idle bottom halves will be executed
225                  * immediately */
226                 return 0;
227             }
228         }
229     }
230 
231     return timeout;
232 }
233 
234 int64_t
235 aio_compute_timeout(AioContext *ctx)
236 {
237     BHListSlice *s;
238     int64_t deadline;
239     int timeout = -1;
240 
241     timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
242     if (timeout == 0) {
243         return 0;
244     }
245 
246     QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
247         timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
248         if (timeout == 0) {
249             return 0;
250         }
251     }
252 
253     deadline = timerlistgroup_deadline_ns(&ctx->tlg);
254     if (deadline == 0) {
255         return 0;
256     } else {
257         return qemu_soonest_timeout(timeout, deadline);
258     }
259 }
260 
261 static gboolean
262 aio_ctx_prepare(GSource *source, gint    *timeout)
263 {
264     AioContext *ctx = (AioContext *) source;
265 
266     qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
267 
268     /*
269      * Write ctx->notify_me before computing the timeout
270      * (reading bottom half flags, etc.).  Pairs with
271      * smp_mb in aio_notify().
272      */
273     smp_mb();
274 
275     /* We assume there is no timeout already supplied */
276     *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
277 
278     if (aio_prepare(ctx)) {
279         *timeout = 0;
280     }
281 
282     return *timeout == 0;
283 }
284 
285 static gboolean
286 aio_ctx_check(GSource *source)
287 {
288     AioContext *ctx = (AioContext *) source;
289     QEMUBH *bh;
290     BHListSlice *s;
291 
292     /* Finish computing the timeout before clearing the flag.  */
293     qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
294     aio_notify_accept(ctx);
295 
296     QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
297         if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
298             return true;
299         }
300     }
301 
302     QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
303         QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
304             if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
305                 return true;
306             }
307         }
308     }
309     return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
310 }
311 
312 static gboolean
313 aio_ctx_dispatch(GSource     *source,
314                  GSourceFunc  callback,
315                  gpointer     user_data)
316 {
317     AioContext *ctx = (AioContext *) source;
318 
319     assert(callback == NULL);
320     aio_dispatch(ctx);
321     return true;
322 }
323 
324 static void
325 aio_ctx_finalize(GSource     *source)
326 {
327     AioContext *ctx = (AioContext *) source;
328     QEMUBH *bh;
329     unsigned flags;
330 
331     thread_pool_free(ctx->thread_pool);
332 
333 #ifdef CONFIG_LINUX_AIO
334     if (ctx->linux_aio) {
335         laio_detach_aio_context(ctx->linux_aio, ctx);
336         laio_cleanup(ctx->linux_aio);
337         ctx->linux_aio = NULL;
338     }
339 #endif
340 
341 #ifdef CONFIG_LINUX_IO_URING
342     if (ctx->linux_io_uring) {
343         luring_detach_aio_context(ctx->linux_io_uring, ctx);
344         luring_cleanup(ctx->linux_io_uring);
345         ctx->linux_io_uring = NULL;
346     }
347 #endif
348 
349     assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
350     qemu_bh_delete(ctx->co_schedule_bh);
351 
352     /* There must be no aio_bh_poll() calls going on */
353     assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
354 
355     while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
356         /*
357          * qemu_bh_delete() must have been called on BHs in this AioContext. In
358          * many cases memory leaks, hangs, or inconsistent state occur when a
359          * BH is leaked because something still expects it to run.
360          *
361          * If you hit this, fix the lifecycle of the BH so that
362          * qemu_bh_delete() and any associated cleanup is called before the
363          * AioContext is finalized.
364          */
365         if (unlikely(!(flags & BH_DELETED))) {
366             fprintf(stderr, "%s: BH '%s' leaked, aborting...\n",
367                     __func__, bh->name);
368             abort();
369         }
370 
371         g_free(bh);
372     }
373 
374     aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL);
375     event_notifier_cleanup(&ctx->notifier);
376     qemu_rec_mutex_destroy(&ctx->lock);
377     qemu_lockcnt_destroy(&ctx->list_lock);
378     timerlistgroup_deinit(&ctx->tlg);
379     aio_context_destroy(ctx);
380 }
381 
382 static GSourceFuncs aio_source_funcs = {
383     aio_ctx_prepare,
384     aio_ctx_check,
385     aio_ctx_dispatch,
386     aio_ctx_finalize
387 };
388 
389 GSource *aio_get_g_source(AioContext *ctx)
390 {
391     aio_context_use_g_source(ctx);
392     g_source_ref(&ctx->source);
393     return &ctx->source;
394 }
395 
396 ThreadPool *aio_get_thread_pool(AioContext *ctx)
397 {
398     if (!ctx->thread_pool) {
399         ctx->thread_pool = thread_pool_new(ctx);
400     }
401     return ctx->thread_pool;
402 }
403 
404 #ifdef CONFIG_LINUX_AIO
405 LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
406 {
407     if (!ctx->linux_aio) {
408         ctx->linux_aio = laio_init(errp);
409         if (ctx->linux_aio) {
410             laio_attach_aio_context(ctx->linux_aio, ctx);
411         }
412     }
413     return ctx->linux_aio;
414 }
415 
416 LinuxAioState *aio_get_linux_aio(AioContext *ctx)
417 {
418     assert(ctx->linux_aio);
419     return ctx->linux_aio;
420 }
421 #endif
422 
423 #ifdef CONFIG_LINUX_IO_URING
424 LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
425 {
426     if (ctx->linux_io_uring) {
427         return ctx->linux_io_uring;
428     }
429 
430     ctx->linux_io_uring = luring_init(errp);
431     if (!ctx->linux_io_uring) {
432         return NULL;
433     }
434 
435     luring_attach_aio_context(ctx->linux_io_uring, ctx);
436     return ctx->linux_io_uring;
437 }
438 
439 LuringState *aio_get_linux_io_uring(AioContext *ctx)
440 {
441     assert(ctx->linux_io_uring);
442     return ctx->linux_io_uring;
443 }
444 #endif
445 
446 void aio_notify(AioContext *ctx)
447 {
448     /*
449      * Write e.g. bh->flags before writing ctx->notified.  Pairs with smp_mb in
450      * aio_notify_accept.
451      */
452     smp_wmb();
453     qatomic_set(&ctx->notified, true);
454 
455     /*
456      * Write ctx->notified before reading ctx->notify_me.  Pairs
457      * with smp_mb in aio_ctx_prepare or aio_poll.
458      */
459     smp_mb();
460     if (qatomic_read(&ctx->notify_me)) {
461         event_notifier_set(&ctx->notifier);
462     }
463 }
464 
465 void aio_notify_accept(AioContext *ctx)
466 {
467     qatomic_set(&ctx->notified, false);
468 
469     /*
470      * Write ctx->notified before reading e.g. bh->flags.  Pairs with smp_wmb
471      * in aio_notify.
472      */
473     smp_mb();
474 }
475 
476 static void aio_timerlist_notify(void *opaque, QEMUClockType type)
477 {
478     aio_notify(opaque);
479 }
480 
481 static void aio_context_notifier_cb(EventNotifier *e)
482 {
483     AioContext *ctx = container_of(e, AioContext, notifier);
484 
485     event_notifier_test_and_clear(&ctx->notifier);
486 }
487 
488 /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
489 static bool aio_context_notifier_poll(void *opaque)
490 {
491     EventNotifier *e = opaque;
492     AioContext *ctx = container_of(e, AioContext, notifier);
493 
494     return qatomic_read(&ctx->notified);
495 }
496 
497 static void aio_context_notifier_poll_ready(EventNotifier *e)
498 {
499     /* Do nothing, we just wanted to kick the event loop */
500 }
501 
502 static void co_schedule_bh_cb(void *opaque)
503 {
504     AioContext *ctx = opaque;
505     QSLIST_HEAD(, Coroutine) straight, reversed;
506 
507     QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
508     QSLIST_INIT(&straight);
509 
510     while (!QSLIST_EMPTY(&reversed)) {
511         Coroutine *co = QSLIST_FIRST(&reversed);
512         QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
513         QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
514     }
515 
516     while (!QSLIST_EMPTY(&straight)) {
517         Coroutine *co = QSLIST_FIRST(&straight);
518         QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
519         trace_aio_co_schedule_bh_cb(ctx, co);
520         aio_context_acquire(ctx);
521 
522         /* Protected by write barrier in qemu_aio_coroutine_enter */
523         qatomic_set(&co->scheduled, NULL);
524         qemu_aio_coroutine_enter(ctx, co);
525         aio_context_release(ctx);
526     }
527 }
528 
529 AioContext *aio_context_new(Error **errp)
530 {
531     int ret;
532     AioContext *ctx;
533 
534     ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
535     QSLIST_INIT(&ctx->bh_list);
536     QSIMPLEQ_INIT(&ctx->bh_slice_list);
537     aio_context_setup(ctx);
538 
539     ret = event_notifier_init(&ctx->notifier, false);
540     if (ret < 0) {
541         error_setg_errno(errp, -ret, "Failed to initialize event notifier");
542         goto fail;
543     }
544     g_source_set_can_recurse(&ctx->source, true);
545     qemu_lockcnt_init(&ctx->list_lock);
546 
547     ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
548     QSLIST_INIT(&ctx->scheduled_coroutines);
549 
550     aio_set_event_notifier(ctx, &ctx->notifier,
551                            false,
552                            aio_context_notifier_cb,
553                            aio_context_notifier_poll,
554                            aio_context_notifier_poll_ready);
555 #ifdef CONFIG_LINUX_AIO
556     ctx->linux_aio = NULL;
557 #endif
558 
559 #ifdef CONFIG_LINUX_IO_URING
560     ctx->linux_io_uring = NULL;
561 #endif
562 
563     ctx->thread_pool = NULL;
564     qemu_rec_mutex_init(&ctx->lock);
565     timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
566 
567     ctx->poll_ns = 0;
568     ctx->poll_max_ns = 0;
569     ctx->poll_grow = 0;
570     ctx->poll_shrink = 0;
571 
572     ctx->aio_max_batch = 0;
573 
574     ctx->thread_pool_min = 0;
575     ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
576 
577     return ctx;
578 fail:
579     g_source_destroy(&ctx->source);
580     return NULL;
581 }
582 
583 void aio_co_schedule(AioContext *ctx, Coroutine *co)
584 {
585     trace_aio_co_schedule(ctx, co);
586     const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
587                                            __func__);
588 
589     if (scheduled) {
590         fprintf(stderr,
591                 "%s: Co-routine was already scheduled in '%s'\n",
592                 __func__, scheduled);
593         abort();
594     }
595 
596     /* The coroutine might run and release the last ctx reference before we
597      * invoke qemu_bh_schedule().  Take a reference to keep ctx alive until
598      * we're done.
599      */
600     aio_context_ref(ctx);
601 
602     QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
603                               co, co_scheduled_next);
604     qemu_bh_schedule(ctx->co_schedule_bh);
605 
606     aio_context_unref(ctx);
607 }
608 
609 typedef struct AioCoRescheduleSelf {
610     Coroutine *co;
611     AioContext *new_ctx;
612 } AioCoRescheduleSelf;
613 
614 static void aio_co_reschedule_self_bh(void *opaque)
615 {
616     AioCoRescheduleSelf *data = opaque;
617     aio_co_schedule(data->new_ctx, data->co);
618 }
619 
620 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
621 {
622     AioContext *old_ctx = qemu_get_current_aio_context();
623 
624     if (old_ctx != new_ctx) {
625         AioCoRescheduleSelf data = {
626             .co = qemu_coroutine_self(),
627             .new_ctx = new_ctx,
628         };
629         /*
630          * We can't directly schedule the coroutine in the target context
631          * because this would be racy: The other thread could try to enter the
632          * coroutine before it has yielded in this one.
633          */
634         aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data);
635         qemu_coroutine_yield();
636     }
637 }
638 
639 void aio_co_wake(struct Coroutine *co)
640 {
641     AioContext *ctx;
642 
643     /* Read coroutine before co->ctx.  Matches smp_wmb in
644      * qemu_coroutine_enter.
645      */
646     smp_read_barrier_depends();
647     ctx = qatomic_read(&co->ctx);
648 
649     aio_co_enter(ctx, co);
650 }
651 
652 void aio_co_enter(AioContext *ctx, struct Coroutine *co)
653 {
654     if (ctx != qemu_get_current_aio_context()) {
655         aio_co_schedule(ctx, co);
656         return;
657     }
658 
659     if (qemu_in_coroutine()) {
660         Coroutine *self = qemu_coroutine_self();
661         assert(self != co);
662         QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
663     } else {
664         aio_context_acquire(ctx);
665         qemu_aio_coroutine_enter(ctx, co);
666         aio_context_release(ctx);
667     }
668 }
669 
670 void aio_context_ref(AioContext *ctx)
671 {
672     g_source_ref(&ctx->source);
673 }
674 
675 void aio_context_unref(AioContext *ctx)
676 {
677     g_source_unref(&ctx->source);
678 }
679 
680 void aio_context_acquire(AioContext *ctx)
681 {
682     qemu_rec_mutex_lock(&ctx->lock);
683 }
684 
685 void aio_context_release(AioContext *ctx)
686 {
687     qemu_rec_mutex_unlock(&ctx->lock);
688 }
689 
690 QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext)
691 
692 AioContext *qemu_get_current_aio_context(void)
693 {
694     AioContext *ctx = get_my_aiocontext();
695     if (ctx) {
696         return ctx;
697     }
698     if (qemu_mutex_iothread_locked()) {
699         /* Possibly in a vCPU thread.  */
700         return qemu_get_aio_context();
701     }
702     return NULL;
703 }
704 
705 void qemu_set_current_aio_context(AioContext *ctx)
706 {
707     assert(!get_my_aiocontext());
708     set_my_aiocontext(ctx);
709 }
710 
711 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
712                                         int64_t max, Error **errp)
713 {
714 
715     if (min > max || !max || min > INT_MAX || max > INT_MAX) {
716         error_setg(errp, "bad thread-pool-min/thread-pool-max values");
717         return;
718     }
719 
720     ctx->thread_pool_min = min;
721     ctx->thread_pool_max = max;
722 
723     if (ctx->thread_pool) {
724         thread_pool_update_params(ctx->thread_pool, ctx);
725     }
726 }
727