Lines Matching full:locked

91 		   (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {  in pv_hybrid_queued_unfair_trylock()
121 return !READ_ONCE(lock->locked) && in trylock_clear_pending()
142 * Try to clear pending bit & set locked bit in trylock_clear_pending()
289 * Wait for node->locked to become true, halt the vcpu after a short spin.
302 if (READ_ONCE(node->locked)) in pv_wait_node()
312 * Order pn->state vs pn->locked thusly: in pv_wait_node()
314 * [S] pn->state = vcpu_halted [S] next->locked = 1 in pv_wait_node()
316 * [L] pn->locked [RmW] pn->state = vcpu_hashed in pv_wait_node()
322 if (!READ_ONCE(node->locked)) { in pv_wait_node()
336 * If the locked flag is still not set after wakeup, it is a in pv_wait_node()
343 !READ_ONCE(node->locked)); in pv_wait_node()
347 * By now our node->locked should be 1 and our caller will not actually in pv_wait_node()
354 * Called after setting next->locked = 1 when we're the lock owner.
367 * observe its next->locked value and advance itself. in pv_kick_node()
371 * The write to next->locked in arch_mcs_spin_unlock_contended() in pv_kick_node()
391 WRITE_ONCE(lock->locked, _Q_SLOW_VAL); in pv_kick_node()
396 * Wait for l->locked to become clear and acquire the lock;
450 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL in pv_wait_head_or_lock()
452 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> in pv_wait_head_or_lock()
456 if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { in pv_wait_head_or_lock()
462 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in pv_wait_head_or_lock()
470 pv_wait(&lock->locked, _Q_SLOW_VAL); in pv_wait_head_or_lock()
503 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) in __pv_queued_spin_unlock_slowpath() argument
507 if (unlikely(locked != _Q_SLOW_VAL)) { in __pv_queued_spin_unlock_slowpath()
533 smp_store_release(&lock->locked, 0); in __pv_queued_spin_unlock_slowpath()
549 u8 locked; in __pv_queued_spin_unlock() local
556 locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0); in __pv_queued_spin_unlock()
557 if (likely(locked == _Q_LOCKED_VAL)) in __pv_queued_spin_unlock()
560 __pv_queued_spin_unlock_slowpath(lock, locked); in __pv_queued_spin_unlock()