1 /* 2 * QEMU coroutines 3 * 4 * Copyright IBM, Corp. 2011 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Kevin Wolf <kwolf@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "trace.h" 17 #include "qemu-common.h" 18 #include "qemu/thread.h" 19 #include "qemu/atomic.h" 20 #include "qemu/coroutine.h" 21 #include "qemu/coroutine_int.h" 22 #include "block/aio.h" 23 24 enum { 25 POOL_BATCH_SIZE = 64, 26 }; 27 28 /** Free list to speed up creation */ 29 static QSLIST_HEAD(, Coroutine) release_pool = QSLIST_HEAD_INITIALIZER(pool); 30 static unsigned int release_pool_size; 31 static __thread QSLIST_HEAD(, Coroutine) alloc_pool = QSLIST_HEAD_INITIALIZER(pool); 32 static __thread unsigned int alloc_pool_size; 33 static __thread Notifier coroutine_pool_cleanup_notifier; 34 35 static void coroutine_pool_cleanup(Notifier *n, void *value) 36 { 37 Coroutine *co; 38 Coroutine *tmp; 39 40 QSLIST_FOREACH_SAFE(co, &alloc_pool, pool_next, tmp) { 41 QSLIST_REMOVE_HEAD(&alloc_pool, pool_next); 42 qemu_coroutine_delete(co); 43 } 44 } 45 46 Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque) 47 { 48 Coroutine *co = NULL; 49 50 if (CONFIG_COROUTINE_POOL) { 51 co = QSLIST_FIRST(&alloc_pool); 52 if (!co) { 53 if (release_pool_size > POOL_BATCH_SIZE) { 54 /* Slow path; a good place to register the destructor, too. */ 55 if (!coroutine_pool_cleanup_notifier.notify) { 56 coroutine_pool_cleanup_notifier.notify = coroutine_pool_cleanup; 57 qemu_thread_atexit_add(&coroutine_pool_cleanup_notifier); 58 } 59 60 /* This is not exact; there could be a little skew between 61 * release_pool_size and the actual size of release_pool. But 62 * it is just a heuristic, it does not need to be perfect. 63 */ 64 alloc_pool_size = atomic_xchg(&release_pool_size, 0); 65 QSLIST_MOVE_ATOMIC(&alloc_pool, &release_pool); 66 co = QSLIST_FIRST(&alloc_pool); 67 } 68 } 69 if (co) { 70 QSLIST_REMOVE_HEAD(&alloc_pool, pool_next); 71 alloc_pool_size--; 72 } 73 } 74 75 if (!co) { 76 co = qemu_coroutine_new(); 77 } 78 79 co->entry = entry; 80 co->entry_arg = opaque; 81 QSIMPLEQ_INIT(&co->co_queue_wakeup); 82 return co; 83 } 84 85 static void coroutine_delete(Coroutine *co) 86 { 87 co->caller = NULL; 88 89 if (CONFIG_COROUTINE_POOL) { 90 if (release_pool_size < POOL_BATCH_SIZE * 2) { 91 QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next); 92 atomic_inc(&release_pool_size); 93 return; 94 } 95 if (alloc_pool_size < POOL_BATCH_SIZE) { 96 QSLIST_INSERT_HEAD(&alloc_pool, co, pool_next); 97 alloc_pool_size++; 98 return; 99 } 100 } 101 102 qemu_coroutine_delete(co); 103 } 104 105 void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co) 106 { 107 QSIMPLEQ_HEAD(, Coroutine) pending = QSIMPLEQ_HEAD_INITIALIZER(pending); 108 Coroutine *from = qemu_coroutine_self(); 109 110 QSIMPLEQ_INSERT_TAIL(&pending, co, co_queue_next); 111 112 /* Run co and any queued coroutines */ 113 while (!QSIMPLEQ_EMPTY(&pending)) { 114 Coroutine *to = QSIMPLEQ_FIRST(&pending); 115 CoroutineAction ret; 116 117 /* Cannot rely on the read barrier for to in aio_co_wake(), as there are 118 * callers outside of aio_co_wake() */ 119 const char *scheduled = atomic_mb_read(&to->scheduled); 120 121 QSIMPLEQ_REMOVE_HEAD(&pending, co_queue_next); 122 123 trace_qemu_aio_coroutine_enter(ctx, from, to, to->entry_arg); 124 125 /* if the Coroutine has already been scheduled, entering it again will 126 * cause us to enter it twice, potentially even after the coroutine has 127 * been deleted */ 128 if (scheduled) { 129 fprintf(stderr, 130 "%s: Co-routine was already scheduled in '%s'\n", 131 __func__, scheduled); 132 abort(); 133 } 134 135 if (to->caller) { 136 fprintf(stderr, "Co-routine re-entered recursively\n"); 137 abort(); 138 } 139 140 to->caller = from; 141 to->ctx = ctx; 142 143 /* Store to->ctx before anything that stores to. Matches 144 * barrier in aio_co_wake and qemu_co_mutex_wake. 145 */ 146 smp_wmb(); 147 148 ret = qemu_coroutine_switch(from, to, COROUTINE_ENTER); 149 150 /* Queued coroutines are run depth-first; previously pending coroutines 151 * run after those queued more recently. 152 */ 153 QSIMPLEQ_PREPEND(&pending, &to->co_queue_wakeup); 154 155 switch (ret) { 156 case COROUTINE_YIELD: 157 break; 158 case COROUTINE_TERMINATE: 159 assert(!to->locks_held); 160 trace_qemu_coroutine_terminate(to); 161 coroutine_delete(to); 162 break; 163 default: 164 abort(); 165 } 166 } 167 } 168 169 void qemu_coroutine_enter(Coroutine *co) 170 { 171 qemu_aio_coroutine_enter(qemu_get_current_aio_context(), co); 172 } 173 174 void qemu_coroutine_enter_if_inactive(Coroutine *co) 175 { 176 if (!qemu_coroutine_entered(co)) { 177 qemu_coroutine_enter(co); 178 } 179 } 180 181 void coroutine_fn qemu_coroutine_yield(void) 182 { 183 Coroutine *self = qemu_coroutine_self(); 184 Coroutine *to = self->caller; 185 186 trace_qemu_coroutine_yield(self, to); 187 188 if (!to) { 189 fprintf(stderr, "Co-routine is yielding to no one\n"); 190 abort(); 191 } 192 193 self->caller = NULL; 194 qemu_coroutine_switch(self, to, COROUTINE_YIELD); 195 } 196 197 bool qemu_coroutine_entered(Coroutine *co) 198 { 199 return co->caller; 200 } 201 202 AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co) 203 { 204 return co->ctx; 205 } 206