Lines Matching full:mutex
24 * The lock-free mutex implementation is based on OSv
25 * (core/lfmutex.cc, include/lockfree/mutex.hh).
65 * mutex's queue. This avoids the thundering herd effect. in qemu_co_queue_wait_impl()
120 * because pop_waiter() can only be called while mutex->handoff is zero.
123 * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
131 * In this case another iteration starts with mutex->handoff == 0;
142 static void coroutine_fn push_waiter(CoMutex *mutex, CoWaitRecord *w) in push_waiter() argument
145 QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next); in push_waiter()
148 static void move_waiters(CoMutex *mutex) in move_waiters() argument
151 QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push); in move_waiters()
155 QSLIST_INSERT_HEAD(&mutex->to_pop, w, next); in move_waiters()
159 static CoWaitRecord *pop_waiter(CoMutex *mutex) in pop_waiter() argument
163 if (QSLIST_EMPTY(&mutex->to_pop)) { in pop_waiter()
164 move_waiters(mutex); in pop_waiter()
165 if (QSLIST_EMPTY(&mutex->to_pop)) { in pop_waiter()
169 w = QSLIST_FIRST(&mutex->to_pop); in pop_waiter()
170 QSLIST_REMOVE_HEAD(&mutex->to_pop, next); in pop_waiter()
174 static bool has_waiters(CoMutex *mutex) in has_waiters() argument
176 return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push); in has_waiters()
179 void qemu_co_mutex_init(CoMutex *mutex) in qemu_co_mutex_init() argument
181 memset(mutex, 0, sizeof(*mutex)); in qemu_co_mutex_init()
184 static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co) in qemu_co_mutex_wake() argument
190 mutex->ctx = co->ctx; in qemu_co_mutex_wake()
195 CoMutex *mutex) in qemu_co_mutex_lock_slowpath() argument
201 trace_qemu_co_mutex_lock_entry(mutex, self); in qemu_co_mutex_lock_slowpath()
202 push_waiter(mutex, &w); in qemu_co_mutex_lock_slowpath()
205 * Add waiter before reading mutex->handoff. Pairs with qatomic_set_mb in qemu_co_mutex_lock_slowpath()
213 old_handoff = qatomic_read(&mutex->handoff); in qemu_co_mutex_lock_slowpath()
215 has_waiters(mutex) && in qemu_co_mutex_lock_slowpath()
216 qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { in qemu_co_mutex_lock_slowpath()
220 CoWaitRecord *to_wake = pop_waiter(mutex); in qemu_co_mutex_lock_slowpath()
225 mutex->ctx = ctx; in qemu_co_mutex_lock_slowpath()
229 qemu_co_mutex_wake(mutex, co); in qemu_co_mutex_lock_slowpath()
233 trace_qemu_co_mutex_lock_return(mutex, self); in qemu_co_mutex_lock_slowpath()
236 void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex) in qemu_co_mutex_lock() argument
251 waiters = qatomic_cmpxchg(&mutex->locked, 0, 1); in qemu_co_mutex_lock()
254 if (qatomic_read(&mutex->ctx) == ctx) { in qemu_co_mutex_lock()
257 if (qatomic_read(&mutex->locked) == 0) { in qemu_co_mutex_lock()
262 waiters = qatomic_fetch_inc(&mutex->locked); in qemu_co_mutex_lock()
267 trace_qemu_co_mutex_lock_uncontended(mutex, self); in qemu_co_mutex_lock()
268 mutex->ctx = ctx; in qemu_co_mutex_lock()
270 qemu_co_mutex_lock_slowpath(ctx, mutex); in qemu_co_mutex_lock()
272 mutex->holder = self; in qemu_co_mutex_lock()
276 void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) in qemu_co_mutex_unlock() argument
280 trace_qemu_co_mutex_unlock_entry(mutex, self); in qemu_co_mutex_unlock()
282 assert(mutex->locked); in qemu_co_mutex_unlock()
283 assert(mutex->holder == self); in qemu_co_mutex_unlock()
286 mutex->ctx = NULL; in qemu_co_mutex_unlock()
287 mutex->holder = NULL; in qemu_co_mutex_unlock()
289 if (qatomic_fetch_dec(&mutex->locked) == 1) { in qemu_co_mutex_unlock()
295 CoWaitRecord *to_wake = pop_waiter(mutex); in qemu_co_mutex_unlock()
299 qemu_co_mutex_wake(mutex, to_wake->co); in qemu_co_mutex_unlock()
304 * mutex->locked was >1) but it hasn't yet put itself on the wait in qemu_co_mutex_unlock()
307 if (++mutex->sequence == 0) { in qemu_co_mutex_unlock()
308 mutex->sequence = 1; in qemu_co_mutex_unlock()
311 our_handoff = mutex->sequence; in qemu_co_mutex_unlock()
313 qatomic_set_mb(&mutex->handoff, our_handoff); in qemu_co_mutex_unlock()
314 if (!has_waiters(mutex)) { in qemu_co_mutex_unlock()
324 if (qatomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) { in qemu_co_mutex_unlock()
329 trace_qemu_co_mutex_unlock_return(mutex, self); in qemu_co_mutex_unlock()
340 qemu_co_mutex_init(&lock->mutex); in qemu_co_rwlock_init()
372 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_maybe_wake_one()
375 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_maybe_wake_one()
383 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_rdlock()
387 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_rdlock()
392 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_rdlock()
397 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_rdlock()
411 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_unlock()
424 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_downgrade()
436 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_wrlock()
439 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_wrlock()
444 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_wrlock()
454 qemu_co_mutex_lock(&lock->mutex); in qemu_co_rwlock_upgrade()
459 qemu_co_mutex_unlock(&lock->mutex); in qemu_co_rwlock_upgrade()