1 /*
2  * Queued spinlock
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15  * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
16  *
17  * Authors: Waiman Long <waiman.long@hpe.com>
18  */
19 #ifndef __ASM_GENERIC_QSPINLOCK_H
20 #define __ASM_GENERIC_QSPINLOCK_H
21 
22 #include <asm-generic/qspinlock_types.h>
23 
24 /**
25  * queued_spin_is_locked - is the spinlock locked?
26  * @lock: Pointer to queued spinlock structure
27  * Return: 1 if it is locked, 0 otherwise
28  */
29 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
30 {
31 	/*
32 	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
33 	 * isn't immediately observable.
34 	 */
35 	return atomic_read(&lock->val);
36 }
37 
38 /**
39  * queued_spin_value_unlocked - is the spinlock structure unlocked?
40  * @lock: queued spinlock structure
41  * Return: 1 if it is unlocked, 0 otherwise
42  *
43  * N.B. Whenever there are tasks waiting for the lock, it is considered
44  *      locked wrt the lockref code to avoid lock stealing by the lockref
45  *      code and change things underneath the lock. This also allows some
46  *      optimizations to be applied without conflict with lockref.
47  */
48 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
49 {
50 	return !atomic_read(&lock.val);
51 }
52 
53 /**
54  * queued_spin_is_contended - check if the lock is contended
55  * @lock : Pointer to queued spinlock structure
56  * Return: 1 if lock contended, 0 otherwise
57  */
58 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
59 {
60 	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
61 }
62 /**
63  * queued_spin_trylock - try to acquire the queued spinlock
64  * @lock : Pointer to queued spinlock structure
65  * Return: 1 if lock acquired, 0 if failed
66  */
67 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
68 {
69 	if (!atomic_read(&lock->val) &&
70 	   (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
71 		return 1;
72 	return 0;
73 }
74 
75 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
76 
77 /**
78  * queued_spin_lock - acquire a queued spinlock
79  * @lock: Pointer to queued spinlock structure
80  */
81 static __always_inline void queued_spin_lock(struct qspinlock *lock)
82 {
83 	u32 val;
84 
85 	val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
86 	if (likely(val == 0))
87 		return;
88 	queued_spin_lock_slowpath(lock, val);
89 }
90 
91 #ifndef queued_spin_unlock
92 /**
93  * queued_spin_unlock - release a queued spinlock
94  * @lock : Pointer to queued spinlock structure
95  */
96 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
97 {
98 	/*
99 	 * unlock() needs release semantics:
100 	 */
101 	smp_store_release(&lock->locked, 0);
102 }
103 #endif
104 
105 #ifndef virt_spin_lock
106 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
107 {
108 	return false;
109 }
110 #endif
111 
112 /*
113  * Remapping spinlock architecture specific functions to the corresponding
114  * queued spinlock functions.
115  */
116 #define arch_spin_is_locked(l)		queued_spin_is_locked(l)
117 #define arch_spin_is_contended(l)	queued_spin_is_contended(l)
118 #define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
119 #define arch_spin_lock(l)		queued_spin_lock(l)
120 #define arch_spin_trylock(l)		queued_spin_trylock(l)
121 #define arch_spin_unlock(l)		queued_spin_unlock(l)
122 
123 #endif /* __ASM_GENERIC_QSPINLOCK_H */
124