1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Queued spinlock
4  *
5  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
6  * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
7  *
8  * Authors: Waiman Long <waiman.long@hpe.com>
9  */
10 #ifndef __ASM_GENERIC_QSPINLOCK_H
11 #define __ASM_GENERIC_QSPINLOCK_H
12 
13 #include <asm-generic/qspinlock_types.h>
14 #include <linux/atomic.h>
15 
16 #ifndef queued_spin_is_locked
17 /**
18  * queued_spin_is_locked - is the spinlock locked?
19  * @lock: Pointer to queued spinlock structure
20  * Return: 1 if it is locked, 0 otherwise
21  */
22 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
23 {
24 	/*
25 	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
26 	 * isn't immediately observable.
27 	 */
28 	return atomic_read(&lock->val);
29 }
30 #endif
31 
32 /**
33  * queued_spin_value_unlocked - is the spinlock structure unlocked?
34  * @lock: queued spinlock structure
35  * Return: 1 if it is unlocked, 0 otherwise
36  *
37  * N.B. Whenever there are tasks waiting for the lock, it is considered
38  *      locked wrt the lockref code to avoid lock stealing by the lockref
39  *      code and change things underneath the lock. This also allows some
40  *      optimizations to be applied without conflict with lockref.
41  */
42 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
43 {
44 	return !atomic_read(&lock.val);
45 }
46 
47 /**
48  * queued_spin_is_contended - check if the lock is contended
49  * @lock : Pointer to queued spinlock structure
50  * Return: 1 if lock contended, 0 otherwise
51  */
52 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
53 {
54 	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
55 }
56 /**
57  * queued_spin_trylock - try to acquire the queued spinlock
58  * @lock : Pointer to queued spinlock structure
59  * Return: 1 if lock acquired, 0 if failed
60  */
61 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
62 {
63 	u32 val = atomic_read(&lock->val);
64 
65 	if (unlikely(val))
66 		return 0;
67 
68 	return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
69 }
70 
71 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
72 
73 #ifndef queued_spin_lock
74 /**
75  * queued_spin_lock - acquire a queued spinlock
76  * @lock: Pointer to queued spinlock structure
77  */
78 static __always_inline void queued_spin_lock(struct qspinlock *lock)
79 {
80 	u32 val = 0;
81 
82 	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
83 		return;
84 
85 	queued_spin_lock_slowpath(lock, val);
86 }
87 #endif
88 
89 #ifndef queued_spin_unlock
90 /**
91  * queued_spin_unlock - release a queued spinlock
92  * @lock : Pointer to queued spinlock structure
93  */
94 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
95 {
96 	/*
97 	 * unlock() needs release semantics:
98 	 */
99 	smp_store_release(&lock->locked, 0);
100 }
101 #endif
102 
103 #ifndef virt_spin_lock
104 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
105 {
106 	return false;
107 }
108 #endif
109 
110 /*
111  * Remapping spinlock architecture specific functions to the corresponding
112  * queued spinlock functions.
113  */
114 #define arch_spin_is_locked(l)		queued_spin_is_locked(l)
115 #define arch_spin_is_contended(l)	queued_spin_is_contended(l)
116 #define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
117 #define arch_spin_lock(l)		queued_spin_lock(l)
118 #define arch_spin_trylock(l)		queued_spin_trylock(l)
119 #define arch_spin_unlock(l)		queued_spin_unlock(l)
120 
121 #endif /* __ASM_GENERIC_QSPINLOCK_H */
122