1c942fddfSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
2a33fda35SWaiman Long /*
3a33fda35SWaiman Long * Queued spinlock
4a33fda35SWaiman Long *
5a8ad07e5SPeter Zijlstra * A 'generic' spinlock implementation that is based on MCS locks. For an
6a8ad07e5SPeter Zijlstra * architecture that's looking for a 'generic' spinlock, please first consider
7a8ad07e5SPeter Zijlstra * ticket-lock.h and only come looking here when you've considered all the
8a8ad07e5SPeter Zijlstra * constraints below and can show your hardware does actually perform better
9a8ad07e5SPeter Zijlstra * with qspinlock.
10a8ad07e5SPeter Zijlstra *
11a8ad07e5SPeter Zijlstra * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
12a8ad07e5SPeter Zijlstra * weaker than RCtso if you're power), where regular code only expects atomic_t
13a8ad07e5SPeter Zijlstra * to be RCpc.
14a8ad07e5SPeter Zijlstra *
15a8ad07e5SPeter Zijlstra * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
16a8ad07e5SPeter Zijlstra * of atomic operations to behave well together, please audit them carefully to
17a8ad07e5SPeter Zijlstra * ensure they all have forward progress. Many atomic operations may default to
18a8ad07e5SPeter Zijlstra * cmpxchg() loops which will not have good forward progress properties on
19a8ad07e5SPeter Zijlstra * LL/SC architectures.
20a8ad07e5SPeter Zijlstra *
21a8ad07e5SPeter Zijlstra * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
22a8ad07e5SPeter Zijlstra * do. Carefully read the patches that introduced
23a8ad07e5SPeter Zijlstra * queued_fetch_set_pending_acquire().
24a8ad07e5SPeter Zijlstra *
25a8ad07e5SPeter Zijlstra * qspinlock also heavily relies on mixed size atomic operations, in specific
26a8ad07e5SPeter Zijlstra * it requires architectures to have xchg16; something which many LL/SC
27a8ad07e5SPeter Zijlstra * architectures need to implement as a 32bit and+or in order to satisfy the
28a8ad07e5SPeter Zijlstra * forward progress guarantees mentioned above.
29a8ad07e5SPeter Zijlstra *
30a8ad07e5SPeter Zijlstra * Further reading on mixed size atomics that might be relevant:
31a8ad07e5SPeter Zijlstra *
32a8ad07e5SPeter Zijlstra * http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
33a8ad07e5SPeter Zijlstra *
34a33fda35SWaiman Long * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
3564d816cbSWaiman Long * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
36a33fda35SWaiman Long *
3764d816cbSWaiman Long * Authors: Waiman Long <waiman.long@hpe.com>
38a33fda35SWaiman Long */
39a33fda35SWaiman Long #ifndef __ASM_GENERIC_QSPINLOCK_H
40a33fda35SWaiman Long #define __ASM_GENERIC_QSPINLOCK_H
41a33fda35SWaiman Long
42a33fda35SWaiman Long #include <asm-generic/qspinlock_types.h>
43459e3953SHerbert Xu #include <linux/atomic.h>
44a33fda35SWaiman Long
45aa65ff6bSNicholas Piggin #ifndef queued_spin_is_locked
46a33fda35SWaiman Long /**
47a33fda35SWaiman Long * queued_spin_is_locked - is the spinlock locked?
48a33fda35SWaiman Long * @lock: Pointer to queued spinlock structure
49a33fda35SWaiman Long * Return: 1 if it is locked, 0 otherwise
50a33fda35SWaiman Long */
queued_spin_is_locked(struct qspinlock * lock)51a33fda35SWaiman Long static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
52a33fda35SWaiman Long {
5354cf809bSPeter Zijlstra /*
542c610022SPeter Zijlstra * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
552c610022SPeter Zijlstra * isn't immediately observable.
5654cf809bSPeter Zijlstra */
572c610022SPeter Zijlstra return atomic_read(&lock->val);
58a33fda35SWaiman Long }
59aa65ff6bSNicholas Piggin #endif
60a33fda35SWaiman Long
61a33fda35SWaiman Long /**
62a33fda35SWaiman Long * queued_spin_value_unlocked - is the spinlock structure unlocked?
63a33fda35SWaiman Long * @lock: queued spinlock structure
64a33fda35SWaiman Long * Return: 1 if it is unlocked, 0 otherwise
65a33fda35SWaiman Long *
66a33fda35SWaiman Long * N.B. Whenever there are tasks waiting for the lock, it is considered
67a33fda35SWaiman Long * locked wrt the lockref code to avoid lock stealing by the lockref
68a33fda35SWaiman Long * code and change things underneath the lock. This also allows some
69a33fda35SWaiman Long * optimizations to be applied without conflict with lockref.
70a33fda35SWaiman Long */
queued_spin_value_unlocked(struct qspinlock lock)71a33fda35SWaiman Long static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
72a33fda35SWaiman Long {
73*a739ceb7SLinus Torvalds return !lock.val.counter;
74a33fda35SWaiman Long }
75a33fda35SWaiman Long
76a33fda35SWaiman Long /**
77a33fda35SWaiman Long * queued_spin_is_contended - check if the lock is contended
78a33fda35SWaiman Long * @lock : Pointer to queued spinlock structure
79a33fda35SWaiman Long * Return: 1 if lock contended, 0 otherwise
80a33fda35SWaiman Long */
queued_spin_is_contended(struct qspinlock * lock)81a33fda35SWaiman Long static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
82a33fda35SWaiman Long {
83a33fda35SWaiman Long return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
84a33fda35SWaiman Long }
85a33fda35SWaiman Long /**
86a33fda35SWaiman Long * queued_spin_trylock - try to acquire the queued spinlock
87a33fda35SWaiman Long * @lock : Pointer to queued spinlock structure
88a33fda35SWaiman Long * Return: 1 if lock acquired, 0 if failed
89a33fda35SWaiman Long */
queued_spin_trylock(struct qspinlock * lock)90a33fda35SWaiman Long static __always_inline int queued_spin_trylock(struct qspinlock *lock)
91a33fda35SWaiman Long {
92f44ca087SArnd Bergmann int val = atomic_read(&lock->val);
9327df8968SMatthew Wilcox
9427df8968SMatthew Wilcox if (unlikely(val))
95a33fda35SWaiman Long return 0;
9627df8968SMatthew Wilcox
9727df8968SMatthew Wilcox return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
98a33fda35SWaiman Long }
99a33fda35SWaiman Long
100a33fda35SWaiman Long extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
101a33fda35SWaiman Long
10220c0e826SNicholas Piggin #ifndef queued_spin_lock
103a33fda35SWaiman Long /**
104a33fda35SWaiman Long * queued_spin_lock - acquire a queued spinlock
105a33fda35SWaiman Long * @lock: Pointer to queued spinlock structure
106a33fda35SWaiman Long */
queued_spin_lock(struct qspinlock * lock)107a33fda35SWaiman Long static __always_inline void queued_spin_lock(struct qspinlock *lock)
108a33fda35SWaiman Long {
109f44ca087SArnd Bergmann int val = 0;
110a33fda35SWaiman Long
11127df8968SMatthew Wilcox if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
112a33fda35SWaiman Long return;
11327df8968SMatthew Wilcox
114a33fda35SWaiman Long queued_spin_lock_slowpath(lock, val);
115a33fda35SWaiman Long }
11620c0e826SNicholas Piggin #endif
117a33fda35SWaiman Long
118a33fda35SWaiman Long #ifndef queued_spin_unlock
119a33fda35SWaiman Long /**
120a33fda35SWaiman Long * queued_spin_unlock - release a queued spinlock
121a33fda35SWaiman Long * @lock : Pointer to queued spinlock structure
122a33fda35SWaiman Long */
queued_spin_unlock(struct qspinlock * lock)123a33fda35SWaiman Long static __always_inline void queued_spin_unlock(struct qspinlock *lock)
124a33fda35SWaiman Long {
125a33fda35SWaiman Long /*
126ca50e426SPan Xinhui * unlock() needs release semantics:
127a33fda35SWaiman Long */
128626e5fbcSWill Deacon smp_store_release(&lock->locked, 0);
129a33fda35SWaiman Long }
130a33fda35SWaiman Long #endif
131a33fda35SWaiman Long
13243b3f028SPeter Zijlstra #ifndef virt_spin_lock
virt_spin_lock(struct qspinlock * lock)13343b3f028SPeter Zijlstra static __always_inline bool virt_spin_lock(struct qspinlock *lock)
1342aa79af6SPeter Zijlstra (Intel) {
1352aa79af6SPeter Zijlstra (Intel) return false;
1362aa79af6SPeter Zijlstra (Intel) }
1372aa79af6SPeter Zijlstra (Intel) #endif
1382aa79af6SPeter Zijlstra (Intel)
139a33fda35SWaiman Long /*
140a33fda35SWaiman Long * Remapping spinlock architecture specific functions to the corresponding
141a33fda35SWaiman Long * queued spinlock functions.
142a33fda35SWaiman Long */
143a33fda35SWaiman Long #define arch_spin_is_locked(l) queued_spin_is_locked(l)
144a33fda35SWaiman Long #define arch_spin_is_contended(l) queued_spin_is_contended(l)
145a33fda35SWaiman Long #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
146a33fda35SWaiman Long #define arch_spin_lock(l) queued_spin_lock(l)
147a33fda35SWaiman Long #define arch_spin_trylock(l) queued_spin_trylock(l)
148a33fda35SWaiman Long #define arch_spin_unlock(l) queued_spin_unlock(l)
149a33fda35SWaiman Long
150a33fda35SWaiman Long #endif /* __ASM_GENERIC_QSPINLOCK_H */
151