1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Deferred calls 4 * 5 * Copyright Red Hat. 6 * 7 * This API defers a function call within a defer_call_begin()/defer_call_end() 8 * section, allowing multiple calls to batch up. This is a performance 9 * optimization that is used in the block layer to submit several I/O requests 10 * at once instead of individually: 11 * 12 * defer_call_begin(); <-- start of section 13 * ... 14 * defer_call(my_func, my_obj); <-- deferred my_func(my_obj) call 15 * defer_call(my_func, my_obj); <-- another 16 * defer_call(my_func, my_obj); <-- another 17 * ... 18 * defer_call_end(); <-- end of section, my_func(my_obj) is called once 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu/coroutine-tls.h" 23 #include "qemu/notify.h" 24 #include "qemu/thread.h" 25 #include "qemu/defer-call.h" 26 27 /* A function call that has been deferred until defer_call_end() */ 28 typedef struct { 29 void (*fn)(void *); 30 void *opaque; 31 } DeferredCall; 32 33 /* Per-thread state */ 34 typedef struct { 35 unsigned nesting_level; 36 GArray *deferred_call_array; 37 } DeferCallThreadState; 38 39 /* Use get_ptr_defer_call_thread_state() to fetch this thread-local value */ 40 QEMU_DEFINE_STATIC_CO_TLS(DeferCallThreadState, defer_call_thread_state); 41 42 /* Called at thread cleanup time */ 43 static void defer_call_atexit(Notifier *n, void *value) 44 { 45 DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state(); 46 g_array_free(thread_state->deferred_call_array, TRUE); 47 } 48 49 /* This won't involve coroutines, so use __thread */ 50 static __thread Notifier defer_call_atexit_notifier; 51 52 /** 53 * defer_call: 54 * @fn: a function pointer to be invoked 55 * @opaque: a user-defined argument to @fn() 56 * 57 * Call @fn(@opaque) immediately if not within a 58 * defer_call_begin()/defer_call_end() section. 59 * 60 * Otherwise defer the call until the end of the outermost 61 * defer_call_begin()/defer_call_end() section in this thread. If the same 62 * @fn/@opaque pair has already been deferred, it will only be called once upon 63 * defer_call_end() so that accumulated calls are batched into a single call. 64 * 65 * The caller must ensure that @opaque is not freed before @fn() is invoked. 66 */ 67 void defer_call(void (*fn)(void *), void *opaque) 68 { 69 DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state(); 70 71 /* Call immediately if we're not deferring calls */ 72 if (thread_state->nesting_level == 0) { 73 fn(opaque); 74 return; 75 } 76 77 GArray *array = thread_state->deferred_call_array; 78 if (!array) { 79 array = g_array_new(FALSE, FALSE, sizeof(DeferredCall)); 80 thread_state->deferred_call_array = array; 81 defer_call_atexit_notifier.notify = defer_call_atexit; 82 qemu_thread_atexit_add(&defer_call_atexit_notifier); 83 } 84 85 DeferredCall *fns = (DeferredCall *)array->data; 86 DeferredCall new_fn = { 87 .fn = fn, 88 .opaque = opaque, 89 }; 90 91 /* 92 * There won't be many, so do a linear search. If this becomes a bottleneck 93 * then a binary search (glib 2.62+) or different data structure could be 94 * used. 95 */ 96 for (guint i = 0; i < array->len; i++) { 97 if (memcmp(&fns[i], &new_fn, sizeof(new_fn)) == 0) { 98 return; /* already exists */ 99 } 100 } 101 102 g_array_append_val(array, new_fn); 103 } 104 105 /** 106 * defer_call_begin: Defer defer_call() functions until defer_call_end() 107 * 108 * defer_call_begin() and defer_call_end() are thread-local operations. The 109 * caller must ensure that each defer_call_begin() has a matching 110 * defer_call_end() in the same thread. 111 * 112 * Nesting is supported. defer_call() functions are only called at the 113 * outermost defer_call_end(). 114 */ 115 void defer_call_begin(void) 116 { 117 DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state(); 118 119 assert(thread_state->nesting_level < UINT32_MAX); 120 121 thread_state->nesting_level++; 122 } 123 124 /** 125 * defer_call_end: Run any pending defer_call() functions 126 * 127 * There must have been a matching defer_call_begin() call in the same thread 128 * prior to this defer_call_end() call. 129 */ 130 void defer_call_end(void) 131 { 132 DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state(); 133 134 assert(thread_state->nesting_level > 0); 135 136 if (--thread_state->nesting_level > 0) { 137 return; 138 } 139 140 GArray *array = thread_state->deferred_call_array; 141 if (!array) { 142 return; 143 } 144 145 DeferredCall *fns = (DeferredCall *)array->data; 146 147 for (guint i = 0; i < array->len; i++) { 148 fns[i].fn(fns[i].opaque); 149 } 150 151 /* 152 * This resets the array without freeing memory so that appending is cheap 153 * in the future. 154 */ 155 g_array_set_size(array, 0); 156 } 157