mutex.c (0c3c0f0d6e56422cef60a33726d062e9923005c3) mutex.c (1e820c9608eace237e2c519d8fd9074aec479d81)
1/*
2 * kernel/locking/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>

--- 32 unchanged lines hidden (view full) ---

41 */
42# undef __mutex_slowpath_needs_to_unlock
43# define __mutex_slowpath_needs_to_unlock() 0
44#else
45# include "mutex.h"
46# include <asm/mutex.h>
47#endif
48
1/*
2 * kernel/locking/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>

--- 32 unchanged lines hidden (view full) ---

41 */
42# undef __mutex_slowpath_needs_to_unlock
43# define __mutex_slowpath_needs_to_unlock() 0
44#else
45# include "mutex.h"
46# include <asm/mutex.h>
47#endif
48
49/*
50 * A negative mutex count indicates that waiters are sleeping waiting for the
51 * mutex.
52 */
53#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
54
55void
56__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
57{
58 atomic_set(&lock->count, 1);
59 spin_lock_init(&lock->wait_lock);
60 INIT_LIST_HEAD(&lock->wait_list);
61 mutex_clear_owner(lock);
62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER

--- 415 unchanged lines hidden (view full) ---

478 * reschedule now, before we try-lock the mutex. This avoids getting
479 * scheduled out right after we obtained the mutex.
480 */
481 if (need_resched())
482 schedule_preempt_disabled();
483#endif
484 spin_lock_mutex(&lock->wait_lock, flags);
485
49void
50__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
51{
52 atomic_set(&lock->count, 1);
53 spin_lock_init(&lock->wait_lock);
54 INIT_LIST_HEAD(&lock->wait_list);
55 mutex_clear_owner(lock);
56#ifdef CONFIG_MUTEX_SPIN_ON_OWNER

--- 415 unchanged lines hidden (view full) ---

472 * reschedule now, before we try-lock the mutex. This avoids getting
473 * scheduled out right after we obtained the mutex.
474 */
475 if (need_resched())
476 schedule_preempt_disabled();
477#endif
478 spin_lock_mutex(&lock->wait_lock, flags);
479
486 /* once more, can we acquire the lock? */
487 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
480 /*
481 * Once more, try to acquire the lock. Only try-lock the mutex if
482 * lock->count >= 0 to reduce unnecessary xchg operations.
483 */
484 if (atomic_read(&lock->count) >= 0 && (atomic_xchg(&lock->count, 0) == 1))
488 goto skip_wait;
489
490 debug_mutex_lock_common(lock, &waiter);
491 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
492
493 /* add waiting tasks to the end of the waitqueue (FIFO): */
494 list_add_tail(&waiter.list, &lock->wait_list);
495 waiter.task = task;
496
497 lock_contended(&lock->dep_map, ip);
498
499 for (;;) {
500 /*
501 * Lets try to take the lock again - this is needed even if
502 * we get here for the first time (shortly after failing to
503 * acquire the lock), to make sure that we get a wakeup once
504 * it's unlocked. Later on, if we sleep, this is the
505 * operation that gives us the lock. We xchg it to -1, so
506 * that when we release the lock, we properly wake up the
485 goto skip_wait;
486
487 debug_mutex_lock_common(lock, &waiter);
488 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
489
490 /* add waiting tasks to the end of the waitqueue (FIFO): */
491 list_add_tail(&waiter.list, &lock->wait_list);
492 waiter.task = task;
493
494 lock_contended(&lock->dep_map, ip);
495
496 for (;;) {
497 /*
498 * Lets try to take the lock again - this is needed even if
499 * we get here for the first time (shortly after failing to
500 * acquire the lock), to make sure that we get a wakeup once
501 * it's unlocked. Later on, if we sleep, this is the
502 * operation that gives us the lock. We xchg it to -1, so
503 * that when we release the lock, we properly wake up the
507 * other waiters:
504 * other waiters. We only attempt the xchg if the count is
505 * non-negative in order to avoid unnecessary xchg operations:
508 */
506 */
509 if (MUTEX_SHOW_NO_WAITER(lock) &&
507 if (atomic_read(&lock->count) >= 0 &&
510 (atomic_xchg(&lock->count, -1) == 1))
511 break;
512
513 /*
514 * got a signal? (This code gets eliminated in the
515 * TASK_UNINTERRUPTIBLE case.)
516 */
517 if (unlikely(signal_pending_state(state, task))) {

--- 411 unchanged lines hidden ---
508 (atomic_xchg(&lock->count, -1) == 1))
509 break;
510
511 /*
512 * got a signal? (This code gets eliminated in the
513 * TASK_UNINTERRUPTIBLE case.)
514 */
515 if (unlikely(signal_pending_state(state, task))) {

--- 411 unchanged lines hidden ---