xref: /openbmc/qemu/util/defer-call.c (revision 433fcea4)
1*433fcea4SStefan Hajnoczi /* SPDX-License-Identifier: GPL-2.0-or-later */
2*433fcea4SStefan Hajnoczi /*
3*433fcea4SStefan Hajnoczi  * Deferred calls
4*433fcea4SStefan Hajnoczi  *
5*433fcea4SStefan Hajnoczi  * Copyright Red Hat.
6*433fcea4SStefan Hajnoczi  *
7*433fcea4SStefan Hajnoczi  * This API defers a function call within a defer_call_begin()/defer_call_end()
8*433fcea4SStefan Hajnoczi  * section, allowing multiple calls to batch up. This is a performance
9*433fcea4SStefan Hajnoczi  * optimization that is used in the block layer to submit several I/O requests
10*433fcea4SStefan Hajnoczi  * at once instead of individually:
11*433fcea4SStefan Hajnoczi  *
12*433fcea4SStefan Hajnoczi  *   defer_call_begin(); <-- start of section
13*433fcea4SStefan Hajnoczi  *   ...
14*433fcea4SStefan Hajnoczi  *   defer_call(my_func, my_obj); <-- deferred my_func(my_obj) call
15*433fcea4SStefan Hajnoczi  *   defer_call(my_func, my_obj); <-- another
16*433fcea4SStefan Hajnoczi  *   defer_call(my_func, my_obj); <-- another
17*433fcea4SStefan Hajnoczi  *   ...
18*433fcea4SStefan Hajnoczi  *   defer_call_end(); <-- end of section, my_func(my_obj) is called once
19*433fcea4SStefan Hajnoczi  */
20*433fcea4SStefan Hajnoczi 
21*433fcea4SStefan Hajnoczi #include "qemu/osdep.h"
22*433fcea4SStefan Hajnoczi #include "qemu/coroutine-tls.h"
23*433fcea4SStefan Hajnoczi #include "qemu/notify.h"
24*433fcea4SStefan Hajnoczi #include "qemu/thread.h"
25*433fcea4SStefan Hajnoczi #include "qemu/defer-call.h"
26*433fcea4SStefan Hajnoczi 
27*433fcea4SStefan Hajnoczi /* A function call that has been deferred until defer_call_end() */
28*433fcea4SStefan Hajnoczi typedef struct {
29*433fcea4SStefan Hajnoczi     void (*fn)(void *);
30*433fcea4SStefan Hajnoczi     void *opaque;
31*433fcea4SStefan Hajnoczi } DeferredCall;
32*433fcea4SStefan Hajnoczi 
33*433fcea4SStefan Hajnoczi /* Per-thread state */
34*433fcea4SStefan Hajnoczi typedef struct {
35*433fcea4SStefan Hajnoczi     unsigned nesting_level;
36*433fcea4SStefan Hajnoczi     GArray *deferred_call_array;
37*433fcea4SStefan Hajnoczi } DeferCallThreadState;
38*433fcea4SStefan Hajnoczi 
39*433fcea4SStefan Hajnoczi /* Use get_ptr_defer_call_thread_state() to fetch this thread-local value */
40*433fcea4SStefan Hajnoczi QEMU_DEFINE_STATIC_CO_TLS(DeferCallThreadState, defer_call_thread_state);
41*433fcea4SStefan Hajnoczi 
42*433fcea4SStefan Hajnoczi /* Called at thread cleanup time */
defer_call_atexit(Notifier * n,void * value)43*433fcea4SStefan Hajnoczi static void defer_call_atexit(Notifier *n, void *value)
44*433fcea4SStefan Hajnoczi {
45*433fcea4SStefan Hajnoczi     DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
46*433fcea4SStefan Hajnoczi     g_array_free(thread_state->deferred_call_array, TRUE);
47*433fcea4SStefan Hajnoczi }
48*433fcea4SStefan Hajnoczi 
49*433fcea4SStefan Hajnoczi /* This won't involve coroutines, so use __thread */
50*433fcea4SStefan Hajnoczi static __thread Notifier defer_call_atexit_notifier;
51*433fcea4SStefan Hajnoczi 
52*433fcea4SStefan Hajnoczi /**
53*433fcea4SStefan Hajnoczi  * defer_call:
54*433fcea4SStefan Hajnoczi  * @fn: a function pointer to be invoked
55*433fcea4SStefan Hajnoczi  * @opaque: a user-defined argument to @fn()
56*433fcea4SStefan Hajnoczi  *
57*433fcea4SStefan Hajnoczi  * Call @fn(@opaque) immediately if not within a
58*433fcea4SStefan Hajnoczi  * defer_call_begin()/defer_call_end() section.
59*433fcea4SStefan Hajnoczi  *
60*433fcea4SStefan Hajnoczi  * Otherwise defer the call until the end of the outermost
61*433fcea4SStefan Hajnoczi  * defer_call_begin()/defer_call_end() section in this thread. If the same
62*433fcea4SStefan Hajnoczi  * @fn/@opaque pair has already been deferred, it will only be called once upon
63*433fcea4SStefan Hajnoczi  * defer_call_end() so that accumulated calls are batched into a single call.
64*433fcea4SStefan Hajnoczi  *
65*433fcea4SStefan Hajnoczi  * The caller must ensure that @opaque is not freed before @fn() is invoked.
66*433fcea4SStefan Hajnoczi  */
defer_call(void (* fn)(void *),void * opaque)67*433fcea4SStefan Hajnoczi void defer_call(void (*fn)(void *), void *opaque)
68*433fcea4SStefan Hajnoczi {
69*433fcea4SStefan Hajnoczi     DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
70*433fcea4SStefan Hajnoczi 
71*433fcea4SStefan Hajnoczi     /* Call immediately if we're not deferring calls */
72*433fcea4SStefan Hajnoczi     if (thread_state->nesting_level == 0) {
73*433fcea4SStefan Hajnoczi         fn(opaque);
74*433fcea4SStefan Hajnoczi         return;
75*433fcea4SStefan Hajnoczi     }
76*433fcea4SStefan Hajnoczi 
77*433fcea4SStefan Hajnoczi     GArray *array = thread_state->deferred_call_array;
78*433fcea4SStefan Hajnoczi     if (!array) {
79*433fcea4SStefan Hajnoczi         array = g_array_new(FALSE, FALSE, sizeof(DeferredCall));
80*433fcea4SStefan Hajnoczi         thread_state->deferred_call_array = array;
81*433fcea4SStefan Hajnoczi         defer_call_atexit_notifier.notify = defer_call_atexit;
82*433fcea4SStefan Hajnoczi         qemu_thread_atexit_add(&defer_call_atexit_notifier);
83*433fcea4SStefan Hajnoczi     }
84*433fcea4SStefan Hajnoczi 
85*433fcea4SStefan Hajnoczi     DeferredCall *fns = (DeferredCall *)array->data;
86*433fcea4SStefan Hajnoczi     DeferredCall new_fn = {
87*433fcea4SStefan Hajnoczi         .fn = fn,
88*433fcea4SStefan Hajnoczi         .opaque = opaque,
89*433fcea4SStefan Hajnoczi     };
90*433fcea4SStefan Hajnoczi 
91*433fcea4SStefan Hajnoczi     /*
92*433fcea4SStefan Hajnoczi      * There won't be many, so do a linear search. If this becomes a bottleneck
93*433fcea4SStefan Hajnoczi      * then a binary search (glib 2.62+) or different data structure could be
94*433fcea4SStefan Hajnoczi      * used.
95*433fcea4SStefan Hajnoczi      */
96*433fcea4SStefan Hajnoczi     for (guint i = 0; i < array->len; i++) {
97*433fcea4SStefan Hajnoczi         if (memcmp(&fns[i], &new_fn, sizeof(new_fn)) == 0) {
98*433fcea4SStefan Hajnoczi             return; /* already exists */
99*433fcea4SStefan Hajnoczi         }
100*433fcea4SStefan Hajnoczi     }
101*433fcea4SStefan Hajnoczi 
102*433fcea4SStefan Hajnoczi     g_array_append_val(array, new_fn);
103*433fcea4SStefan Hajnoczi }
104*433fcea4SStefan Hajnoczi 
105*433fcea4SStefan Hajnoczi /**
106*433fcea4SStefan Hajnoczi  * defer_call_begin: Defer defer_call() functions until defer_call_end()
107*433fcea4SStefan Hajnoczi  *
108*433fcea4SStefan Hajnoczi  * defer_call_begin() and defer_call_end() are thread-local operations. The
109*433fcea4SStefan Hajnoczi  * caller must ensure that each defer_call_begin() has a matching
110*433fcea4SStefan Hajnoczi  * defer_call_end() in the same thread.
111*433fcea4SStefan Hajnoczi  *
112*433fcea4SStefan Hajnoczi  * Nesting is supported. defer_call() functions are only called at the
113*433fcea4SStefan Hajnoczi  * outermost defer_call_end().
114*433fcea4SStefan Hajnoczi  */
defer_call_begin(void)115*433fcea4SStefan Hajnoczi void defer_call_begin(void)
116*433fcea4SStefan Hajnoczi {
117*433fcea4SStefan Hajnoczi     DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
118*433fcea4SStefan Hajnoczi 
119*433fcea4SStefan Hajnoczi     assert(thread_state->nesting_level < UINT32_MAX);
120*433fcea4SStefan Hajnoczi 
121*433fcea4SStefan Hajnoczi     thread_state->nesting_level++;
122*433fcea4SStefan Hajnoczi }
123*433fcea4SStefan Hajnoczi 
124*433fcea4SStefan Hajnoczi /**
125*433fcea4SStefan Hajnoczi  * defer_call_end: Run any pending defer_call() functions
126*433fcea4SStefan Hajnoczi  *
127*433fcea4SStefan Hajnoczi  * There must have been a matching defer_call_begin() call in the same thread
128*433fcea4SStefan Hajnoczi  * prior to this defer_call_end() call.
129*433fcea4SStefan Hajnoczi  */
defer_call_end(void)130*433fcea4SStefan Hajnoczi void defer_call_end(void)
131*433fcea4SStefan Hajnoczi {
132*433fcea4SStefan Hajnoczi     DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
133*433fcea4SStefan Hajnoczi 
134*433fcea4SStefan Hajnoczi     assert(thread_state->nesting_level > 0);
135*433fcea4SStefan Hajnoczi 
136*433fcea4SStefan Hajnoczi     if (--thread_state->nesting_level > 0) {
137*433fcea4SStefan Hajnoczi         return;
138*433fcea4SStefan Hajnoczi     }
139*433fcea4SStefan Hajnoczi 
140*433fcea4SStefan Hajnoczi     GArray *array = thread_state->deferred_call_array;
141*433fcea4SStefan Hajnoczi     if (!array) {
142*433fcea4SStefan Hajnoczi         return;
143*433fcea4SStefan Hajnoczi     }
144*433fcea4SStefan Hajnoczi 
145*433fcea4SStefan Hajnoczi     DeferredCall *fns = (DeferredCall *)array->data;
146*433fcea4SStefan Hajnoczi 
147*433fcea4SStefan Hajnoczi     for (guint i = 0; i < array->len; i++) {
148*433fcea4SStefan Hajnoczi         fns[i].fn(fns[i].opaque);
149*433fcea4SStefan Hajnoczi     }
150*433fcea4SStefan Hajnoczi 
151*433fcea4SStefan Hajnoczi     /*
152*433fcea4SStefan Hajnoczi      * This resets the array without freeing memory so that appending is cheap
153*433fcea4SStefan Hajnoczi      * in the future.
154*433fcea4SStefan Hajnoczi      */
155*433fcea4SStefan Hajnoczi     g_array_set_size(array, 0);
156*433fcea4SStefan Hajnoczi }
157