xref: /openbmc/linux/arch/x86/include/asm/qspinlock.h (revision e3d786a3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_QSPINLOCK_H
3 #define _ASM_X86_QSPINLOCK_H
4 
5 #include <linux/jump_label.h>
6 #include <asm/cpufeature.h>
7 #include <asm-generic/qspinlock_types.h>
8 #include <asm/paravirt.h>
9 #include <asm/rmwcc.h>
10 
11 #define _Q_PENDING_LOOPS	(1 << 9)
12 
13 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
14 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
15 {
16 	u32 val = 0;
17 
18 	if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
19 			     "I", _Q_PENDING_OFFSET))
20 		val |= _Q_PENDING_VAL;
21 
22 	val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
23 
24 	return val;
25 }
26 
27 #ifdef CONFIG_PARAVIRT_SPINLOCKS
28 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
29 extern void __pv_init_lock_hash(void);
30 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
31 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
32 
33 #define	queued_spin_unlock queued_spin_unlock
34 /**
35  * queued_spin_unlock - release a queued spinlock
36  * @lock : Pointer to queued spinlock structure
37  *
38  * A smp_store_release() on the least-significant byte.
39  */
40 static inline void native_queued_spin_unlock(struct qspinlock *lock)
41 {
42 	smp_store_release(&lock->locked, 0);
43 }
44 
45 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
46 {
47 	pv_queued_spin_lock_slowpath(lock, val);
48 }
49 
50 static inline void queued_spin_unlock(struct qspinlock *lock)
51 {
52 	pv_queued_spin_unlock(lock);
53 }
54 
55 #define vcpu_is_preempted vcpu_is_preempted
56 static inline bool vcpu_is_preempted(long cpu)
57 {
58 	return pv_vcpu_is_preempted(cpu);
59 }
60 #endif
61 
62 #ifdef CONFIG_PARAVIRT
63 DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
64 
65 void native_pv_lock_init(void) __init;
66 
67 #define virt_spin_lock virt_spin_lock
68 static inline bool virt_spin_lock(struct qspinlock *lock)
69 {
70 	if (!static_branch_likely(&virt_spin_lock_key))
71 		return false;
72 
73 	/*
74 	 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
75 	 * back to a Test-and-Set spinlock, because fair locks have
76 	 * horrible lock 'holder' preemption issues.
77 	 */
78 
79 	do {
80 		while (atomic_read(&lock->val) != 0)
81 			cpu_relax();
82 	} while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
83 
84 	return true;
85 }
86 #else
87 static inline void native_pv_lock_init(void)
88 {
89 }
90 #endif /* CONFIG_PARAVIRT */
91 
92 #include <asm-generic/qspinlock.h>
93 
94 #endif /* _ASM_X86_QSPINLOCK_H */
95