Lines Matching full:owner

66  * lock->owner state tracking:
68 * lock->owner holds the task_struct pointer of the owner. Bit 0
71 * owner bit0
79 * possible when bit 0 of lock->owner is 0.
83 * we need to set the bit0 before looking at the lock, and the owner may be
88 * To prevent a cmpxchg of the owner releasing the lock, we need to
93 rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_owner_encode() argument
95 unsigned long val = (unsigned long)owner; in rt_mutex_owner_encode()
104 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_set_owner() argument
108 * for a new lock owner so WRITE_ONCE is insufficient. in rt_mutex_set_owner()
110 xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); in rt_mutex_set_owner()
116 WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); in rt_mutex_clear_owner()
121 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
122 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
128 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters() local
135 * lock->owner still has the waiters bit set, otherwise the in fixup_rt_mutex_waiters()
139 * l->owner=T1 in fixup_rt_mutex_waiters()
142 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
150 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
168 * l->owner = owner in fixup_rt_mutex_waiters()
169 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
170 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
175 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
176 * cmpxchg(l->owner, T1, NULL) in fixup_rt_mutex_waiters()
177 * ===> Success (l->owner = NULL) in fixup_rt_mutex_waiters()
179 * l->owner = owner in fixup_rt_mutex_waiters()
180 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
186 * bit. If the bit is set then nothing can change l->owner either in fixup_rt_mutex_waiters()
191 owner = READ_ONCE(*p); in fixup_rt_mutex_waiters()
192 if (owner & RT_MUTEX_HAS_WAITERS) { in fixup_rt_mutex_waiters()
195 * why xchg_acquire() is used for updating owner for in fixup_rt_mutex_waiters()
203 xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS); in fixup_rt_mutex_waiters()
205 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); in fixup_rt_mutex_waiters()
218 return try_cmpxchg_acquire(&lock->owner, &old, new); in rt_mutex_cmpxchg_acquire()
225 return try_cmpxchg_release(&lock->owner, &old, new); in rt_mutex_cmpxchg_release()
235 unsigned long owner, *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters() local
238 owner = *p; in mark_rt_mutex_waiters()
239 } while (cmpxchg_relaxed(p, owner, in mark_rt_mutex_waiters()
240 owner | RT_MUTEX_HAS_WAITERS) != owner); in mark_rt_mutex_waiters()
260 struct task_struct *owner = rt_mutex_owner(lock); in unlock_rt_mutex_safe() local
270 * cmpxchg(p, owner, 0) == owner in unlock_rt_mutex_safe()
279 * cmpxchg(p, owner, 0) != owner in unlock_rt_mutex_safe()
288 return rt_mutex_cmpxchg_release(lock, owner, NULL); in unlock_rt_mutex_safe()
309 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
310 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
314 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
320 lock->owner = NULL; in unlock_rt_mutex_safe()
586 * @task: the task owning the mutex (owner) for which a chain walk is
592 * @next_lock: the mutex on which the owner of @orig_lock was blocked before
596 * its priority to the mutex owner (can be NULL in the case
598 * actually deboosting the owner)
641 * [10] task = owner(lock); [L]
650 * Where P1 is the blocking task and P2 is the lock owner; going up one step
651 * the owner becomes the next blocked task etc..
730 * the previous owner of the lock might have released the lock. in rt_mutex_adjust_prio_chain()
871 * If there is no owner of the lock, end of chain. in rt_mutex_adjust_prio_chain()
878 /* [10] Grab the next task, i.e. owner of @lock */ in rt_mutex_adjust_prio_chain()
885 * [12] Store whether owner is blocked in rt_mutex_adjust_prio_chain()
898 /* If owner is not blocked, end of chain. */ in rt_mutex_adjust_prio_chain()
931 * taking the owner task in [10]. in rt_mutex_adjust_prio_chain()
942 * We must abort the chain walk if there is no lock owner even in rt_mutex_adjust_prio_chain()
960 * [10] Grab the next task, i.e. the owner of @lock in rt_mutex_adjust_prio_chain()
962 * Per holding lock->wait_lock and checking for !owner above, there in rt_mutex_adjust_prio_chain()
963 * must be an owner and it cannot go away. in rt_mutex_adjust_prio_chain()
973 * in the owner tasks pi waiters tree with this waiter in rt_mutex_adjust_prio_chain()
974 * and adjust the priority of the owner. in rt_mutex_adjust_prio_chain()
985 * the owner tasks pi waiters tree with the new top in rt_mutex_adjust_prio_chain()
987 * of the owner. in rt_mutex_adjust_prio_chain()
1071 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all in try_to_take_rt_mutex()
1078 * - There is a lock owner. The caller must fixup the in try_to_take_rt_mutex()
1089 * If @lock has an owner, give up. in try_to_take_rt_mutex()
1155 * Finish the lock acquisition. @task is the new owner. If in try_to_take_rt_mutex()
1186 struct task_struct *owner = rt_mutex_owner(lock); in task_blocks_on_rt_mutex() local
1205 if (owner == task && !(build_ww_mutex() && ww_ctx)) in task_blocks_on_rt_mutex()
1238 if (!owner) in task_blocks_on_rt_mutex()
1241 raw_spin_lock(&owner->pi_lock); in task_blocks_on_rt_mutex()
1243 rt_mutex_dequeue_pi(owner, top_waiter); in task_blocks_on_rt_mutex()
1244 rt_mutex_enqueue_pi(owner, waiter); in task_blocks_on_rt_mutex()
1246 rt_mutex_adjust_prio(lock, owner); in task_blocks_on_rt_mutex()
1247 if (owner->pi_blocked_on) in task_blocks_on_rt_mutex()
1253 /* Store the lock on which owner is blocked or NULL */ in task_blocks_on_rt_mutex()
1254 next_lock = task_blocked_on_lock(owner); in task_blocks_on_rt_mutex()
1256 raw_spin_unlock(&owner->pi_lock); in task_blocks_on_rt_mutex()
1258 * Even if full deadlock detection is on, if the owner is not in task_blocks_on_rt_mutex()
1266 * The owner can't disappear while holding a lock, in task_blocks_on_rt_mutex()
1267 * so the owner struct is protected by wait_lock. in task_blocks_on_rt_mutex()
1270 get_task_struct(owner); in task_blocks_on_rt_mutex()
1274 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, in task_blocks_on_rt_mutex()
1317 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in mark_wakeup_next_waiter()
1356 * If the lock already has an owner we fail to get the lock. in rt_mutex_slowtrylock()
1364 * The mutex has currently no owner. Lock the wait lock and try to in rt_mutex_slowtrylock()
1399 * have no waiters queued we cannot set owner to NULL here in rt_mutex_slowunlock()
1402 * foo->lock->owner = NULL; in rt_mutex_slowunlock()
1415 * owner = rt_mutex_owner(lock); in rt_mutex_slowunlock()
1418 * if (cmpxchg(&lock->owner, owner, 0) == owner) in rt_mutex_slowunlock()
1423 * lock->owner is serialized by lock->wait_lock: in rt_mutex_slowunlock()
1425 * lock->owner = NULL; in rt_mutex_slowunlock()
1459 struct task_struct *owner) in rtmutex_spin_on_owner() argument
1465 /* If owner changed, trylock again. */ in rtmutex_spin_on_owner()
1466 if (owner != rt_mutex_owner(lock)) in rtmutex_spin_on_owner()
1469 * Ensure that @owner is dereferenced after checking that in rtmutex_spin_on_owner()
1470 * the lock owner still matches @owner. If that fails, in rtmutex_spin_on_owner()
1471 * @owner might point to freed memory. If it still matches, in rtmutex_spin_on_owner()
1477 * - the lock owner has been scheduled out in rtmutex_spin_on_owner()
1481 * - the VCPU on which owner runs is preempted in rtmutex_spin_on_owner()
1483 if (!owner_on_cpu(owner) || need_resched() || in rtmutex_spin_on_owner()
1496 struct task_struct *owner) in rtmutex_spin_on_owner() argument
1519 struct task_struct *owner = rt_mutex_owner(lock); in remove_waiter() local
1531 * waiter of the lock and there is an owner to update. in remove_waiter()
1533 if (!owner || !is_top_waiter) in remove_waiter()
1536 raw_spin_lock(&owner->pi_lock); in remove_waiter()
1538 rt_mutex_dequeue_pi(owner, waiter); in remove_waiter()
1541 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); in remove_waiter()
1543 rt_mutex_adjust_prio(lock, owner); in remove_waiter()
1545 /* Store the lock on which owner is blocked or NULL */ in remove_waiter()
1546 next_lock = task_blocked_on_lock(owner); in remove_waiter()
1548 raw_spin_unlock(&owner->pi_lock); in remove_waiter()
1551 * Don't walk the chain, if the owner task is not blocked in remove_waiter()
1558 get_task_struct(owner); in remove_waiter()
1562 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, in remove_waiter()
1586 struct task_struct *owner; in rt_mutex_slowlock_block() local
1610 owner = rt_mutex_owner(lock); in rt_mutex_slowlock_block()
1612 owner = NULL; in rt_mutex_slowlock_block()
1615 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) in rt_mutex_slowlock_block()
1778 struct task_struct *owner; in rtlock_slowlock_locked() local
1800 owner = rt_mutex_owner(lock); in rtlock_slowlock_locked()
1802 owner = NULL; in rtlock_slowlock_locked()
1805 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) in rtlock_slowlock_locked()