1 /* 2 * coroutine queues and locks 3 * 4 * Copyright (c) 2011 Kevin Wolf <kwolf@redhat.com> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 * 24 * The lock-free mutex implementation is based on OSv 25 * (core/lfmutex.cc, include/lockfree/mutex.hh). 26 * Copyright (C) 2013 Cloudius Systems, Ltd. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "qemu/coroutine.h" 31 #include "qemu/coroutine_int.h" 32 #include "qemu/processor.h" 33 #include "qemu/queue.h" 34 #include "block/aio.h" 35 #include "trace.h" 36 37 void qemu_co_queue_init(CoQueue *queue) 38 { 39 QSIMPLEQ_INIT(&queue->entries); 40 } 41 42 void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock) 43 { 44 Coroutine *self = qemu_coroutine_self(); 45 QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next); 46 47 if (lock) { 48 qemu_lockable_unlock(lock); 49 } 50 51 /* There is no race condition here. Other threads will call 52 * aio_co_schedule on our AioContext, which can reenter this 53 * coroutine but only after this yield and after the main loop 54 * has gone through the next iteration. 55 */ 56 qemu_coroutine_yield(); 57 assert(qemu_in_coroutine()); 58 59 /* TODO: OSv implements wait morphing here, where the wakeup 60 * primitive automatically places the woken coroutine on the 61 * mutex's queue. This avoids the thundering herd effect. 62 * This could be implemented for CoMutexes, but not really for 63 * other cases of QemuLockable. 64 */ 65 if (lock) { 66 qemu_lockable_lock(lock); 67 } 68 } 69 70 static bool qemu_co_queue_do_restart(CoQueue *queue, bool single) 71 { 72 Coroutine *next; 73 74 if (QSIMPLEQ_EMPTY(&queue->entries)) { 75 return false; 76 } 77 78 while ((next = QSIMPLEQ_FIRST(&queue->entries)) != NULL) { 79 QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next); 80 aio_co_wake(next); 81 if (single) { 82 break; 83 } 84 } 85 return true; 86 } 87 88 bool qemu_co_queue_next(CoQueue *queue) 89 { 90 return qemu_co_queue_do_restart(queue, true); 91 } 92 93 void qemu_co_queue_restart_all(CoQueue *queue) 94 { 95 qemu_co_queue_do_restart(queue, false); 96 } 97 98 bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock) 99 { 100 Coroutine *next; 101 102 next = QSIMPLEQ_FIRST(&queue->entries); 103 if (!next) { 104 return false; 105 } 106 107 QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next); 108 if (lock) { 109 qemu_lockable_unlock(lock); 110 } 111 aio_co_wake(next); 112 if (lock) { 113 qemu_lockable_lock(lock); 114 } 115 return true; 116 } 117 118 bool qemu_co_queue_empty(CoQueue *queue) 119 { 120 return QSIMPLEQ_FIRST(&queue->entries) == NULL; 121 } 122 123 /* The wait records are handled with a multiple-producer, single-consumer 124 * lock-free queue. There cannot be two concurrent pop_waiter() calls 125 * because pop_waiter() can only be called while mutex->handoff is zero. 126 * This can happen in three cases: 127 * - in qemu_co_mutex_unlock, before the hand-off protocol has started. 128 * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and 129 * not take part in the handoff. 130 * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from 131 * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail 132 * the cmpxchg (it will see either 0 or the next sequence value) and 133 * exit. The next hand-off cannot begin until qemu_co_mutex_lock has 134 * woken up someone. 135 * - in qemu_co_mutex_unlock, if it takes the hand-off token itself. 136 * In this case another iteration starts with mutex->handoff == 0; 137 * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and 138 * qemu_co_mutex_unlock will go back to case (1). 139 * 140 * The following functions manage this queue. 141 */ 142 typedef struct CoWaitRecord { 143 Coroutine *co; 144 QSLIST_ENTRY(CoWaitRecord) next; 145 } CoWaitRecord; 146 147 static void push_waiter(CoMutex *mutex, CoWaitRecord *w) 148 { 149 w->co = qemu_coroutine_self(); 150 QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next); 151 } 152 153 static void move_waiters(CoMutex *mutex) 154 { 155 QSLIST_HEAD(, CoWaitRecord) reversed; 156 QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push); 157 while (!QSLIST_EMPTY(&reversed)) { 158 CoWaitRecord *w = QSLIST_FIRST(&reversed); 159 QSLIST_REMOVE_HEAD(&reversed, next); 160 QSLIST_INSERT_HEAD(&mutex->to_pop, w, next); 161 } 162 } 163 164 static CoWaitRecord *pop_waiter(CoMutex *mutex) 165 { 166 CoWaitRecord *w; 167 168 if (QSLIST_EMPTY(&mutex->to_pop)) { 169 move_waiters(mutex); 170 if (QSLIST_EMPTY(&mutex->to_pop)) { 171 return NULL; 172 } 173 } 174 w = QSLIST_FIRST(&mutex->to_pop); 175 QSLIST_REMOVE_HEAD(&mutex->to_pop, next); 176 return w; 177 } 178 179 static bool has_waiters(CoMutex *mutex) 180 { 181 return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push); 182 } 183 184 void qemu_co_mutex_init(CoMutex *mutex) 185 { 186 memset(mutex, 0, sizeof(*mutex)); 187 } 188 189 static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co) 190 { 191 /* Read co before co->ctx; pairs with smp_wmb() in 192 * qemu_coroutine_enter(). 193 */ 194 smp_read_barrier_depends(); 195 mutex->ctx = co->ctx; 196 aio_co_wake(co); 197 } 198 199 static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx, 200 CoMutex *mutex) 201 { 202 Coroutine *self = qemu_coroutine_self(); 203 CoWaitRecord w; 204 unsigned old_handoff; 205 206 trace_qemu_co_mutex_lock_entry(mutex, self); 207 w.co = self; 208 push_waiter(mutex, &w); 209 210 /* This is the "Responsibility Hand-Off" protocol; a lock() picks from 211 * a concurrent unlock() the responsibility of waking somebody up. 212 */ 213 old_handoff = qatomic_mb_read(&mutex->handoff); 214 if (old_handoff && 215 has_waiters(mutex) && 216 qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { 217 /* There can be no concurrent pops, because there can be only 218 * one active handoff at a time. 219 */ 220 CoWaitRecord *to_wake = pop_waiter(mutex); 221 Coroutine *co = to_wake->co; 222 if (co == self) { 223 /* We got the lock ourselves! */ 224 assert(to_wake == &w); 225 mutex->ctx = ctx; 226 return; 227 } 228 229 qemu_co_mutex_wake(mutex, co); 230 } 231 232 qemu_coroutine_yield(); 233 trace_qemu_co_mutex_lock_return(mutex, self); 234 } 235 236 void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex) 237 { 238 AioContext *ctx = qemu_get_current_aio_context(); 239 Coroutine *self = qemu_coroutine_self(); 240 int waiters, i; 241 242 /* Running a very small critical section on pthread_mutex_t and CoMutex 243 * shows that pthread_mutex_t is much faster because it doesn't actually 244 * go to sleep. What happens is that the critical section is shorter 245 * than the latency of entering the kernel and thus FUTEX_WAIT always 246 * fails. With CoMutex there is no such latency but you still want to 247 * avoid wait and wakeup. So introduce it artificially. 248 */ 249 i = 0; 250 retry_fast_path: 251 waiters = qatomic_cmpxchg(&mutex->locked, 0, 1); 252 if (waiters != 0) { 253 while (waiters == 1 && ++i < 1000) { 254 if (qatomic_read(&mutex->ctx) == ctx) { 255 break; 256 } 257 if (qatomic_read(&mutex->locked) == 0) { 258 goto retry_fast_path; 259 } 260 cpu_relax(); 261 } 262 waiters = qatomic_fetch_inc(&mutex->locked); 263 } 264 265 if (waiters == 0) { 266 /* Uncontended. */ 267 trace_qemu_co_mutex_lock_uncontended(mutex, self); 268 mutex->ctx = ctx; 269 } else { 270 qemu_co_mutex_lock_slowpath(ctx, mutex); 271 } 272 mutex->holder = self; 273 self->locks_held++; 274 } 275 276 void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) 277 { 278 Coroutine *self = qemu_coroutine_self(); 279 280 trace_qemu_co_mutex_unlock_entry(mutex, self); 281 282 assert(mutex->locked); 283 assert(mutex->holder == self); 284 assert(qemu_in_coroutine()); 285 286 mutex->ctx = NULL; 287 mutex->holder = NULL; 288 self->locks_held--; 289 if (qatomic_fetch_dec(&mutex->locked) == 1) { 290 /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */ 291 return; 292 } 293 294 for (;;) { 295 CoWaitRecord *to_wake = pop_waiter(mutex); 296 unsigned our_handoff; 297 298 if (to_wake) { 299 qemu_co_mutex_wake(mutex, to_wake->co); 300 break; 301 } 302 303 /* Some concurrent lock() is in progress (we know this because 304 * mutex->locked was >1) but it hasn't yet put itself on the wait 305 * queue. Pick a sequence number for the handoff protocol (not 0). 306 */ 307 if (++mutex->sequence == 0) { 308 mutex->sequence = 1; 309 } 310 311 our_handoff = mutex->sequence; 312 qatomic_mb_set(&mutex->handoff, our_handoff); 313 if (!has_waiters(mutex)) { 314 /* The concurrent lock has not added itself yet, so it 315 * will be able to pick our handoff. 316 */ 317 break; 318 } 319 320 /* Try to do the handoff protocol ourselves; if somebody else has 321 * already taken it, however, we're done and they're responsible. 322 */ 323 if (qatomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) { 324 break; 325 } 326 } 327 328 trace_qemu_co_mutex_unlock_return(mutex, self); 329 } 330 331 void qemu_co_rwlock_init(CoRwlock *lock) 332 { 333 memset(lock, 0, sizeof(*lock)); 334 qemu_co_queue_init(&lock->queue); 335 qemu_co_mutex_init(&lock->mutex); 336 } 337 338 void qemu_co_rwlock_rdlock(CoRwlock *lock) 339 { 340 Coroutine *self = qemu_coroutine_self(); 341 342 qemu_co_mutex_lock(&lock->mutex); 343 /* For fairness, wait if a writer is in line. */ 344 while (lock->pending_writer) { 345 qemu_co_queue_wait(&lock->queue, &lock->mutex); 346 } 347 lock->reader++; 348 qemu_co_mutex_unlock(&lock->mutex); 349 350 /* The rest of the read-side critical section is run without the mutex. */ 351 self->locks_held++; 352 } 353 354 void qemu_co_rwlock_unlock(CoRwlock *lock) 355 { 356 Coroutine *self = qemu_coroutine_self(); 357 358 assert(qemu_in_coroutine()); 359 if (!lock->reader) { 360 /* The critical section started in qemu_co_rwlock_wrlock. */ 361 qemu_co_queue_restart_all(&lock->queue); 362 } else { 363 self->locks_held--; 364 365 qemu_co_mutex_lock(&lock->mutex); 366 lock->reader--; 367 assert(lock->reader >= 0); 368 /* Wakeup only one waiting writer */ 369 if (!lock->reader) { 370 qemu_co_queue_next(&lock->queue); 371 } 372 } 373 qemu_co_mutex_unlock(&lock->mutex); 374 } 375 376 void qemu_co_rwlock_downgrade(CoRwlock *lock) 377 { 378 Coroutine *self = qemu_coroutine_self(); 379 380 /* lock->mutex critical section started in qemu_co_rwlock_wrlock or 381 * qemu_co_rwlock_upgrade. 382 */ 383 assert(lock->reader == 0); 384 lock->reader++; 385 qemu_co_mutex_unlock(&lock->mutex); 386 387 /* The rest of the read-side critical section is run without the mutex. */ 388 self->locks_held++; 389 } 390 391 void qemu_co_rwlock_wrlock(CoRwlock *lock) 392 { 393 qemu_co_mutex_lock(&lock->mutex); 394 lock->pending_writer++; 395 while (lock->reader) { 396 qemu_co_queue_wait(&lock->queue, &lock->mutex); 397 } 398 lock->pending_writer--; 399 400 /* The rest of the write-side critical section is run with 401 * the mutex taken, so that lock->reader remains zero. 402 * There is no need to update self->locks_held. 403 */ 404 } 405 406 void qemu_co_rwlock_upgrade(CoRwlock *lock) 407 { 408 Coroutine *self = qemu_coroutine_self(); 409 410 qemu_co_mutex_lock(&lock->mutex); 411 assert(lock->reader > 0); 412 lock->reader--; 413 lock->pending_writer++; 414 while (lock->reader) { 415 qemu_co_queue_wait(&lock->queue, &lock->mutex); 416 } 417 lock->pending_writer--; 418 419 /* The rest of the write-side critical section is run with 420 * the mutex taken, similar to qemu_co_rwlock_wrlock. Do 421 * not account for the lock twice in self->locks_held. 422 */ 423 self->locks_held--; 424 } 425