xref: /openbmc/linux/arch/x86/include/asm/spinlock.h (revision 2994488f)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_SPINLOCK_H
21965aae3SH. Peter Anvin #define _ASM_X86_SPINLOCK_H
3bb898558SAl Viro 
460063497SArun Sharma #include <linux/atomic.h>
5bb898558SAl Viro #include <asm/page.h>
6bb898558SAl Viro #include <asm/processor.h>
7bb898558SAl Viro #include <linux/compiler.h>
8bb898558SAl Viro #include <asm/paravirt.h>
9bb898558SAl Viro /*
10bb898558SAl Viro  * Your basic SMP spinlocks, allowing only a single CPU anywhere
11bb898558SAl Viro  *
12bb898558SAl Viro  * Simple spin lock operations.  There are two variants, one clears IRQ's
13bb898558SAl Viro  * on the local processor, one does not.
14bb898558SAl Viro  *
15bb898558SAl Viro  * These are fair FIFO ticket locks, which are currently limited to 256
16bb898558SAl Viro  * CPUs.
17bb898558SAl Viro  *
18bb898558SAl Viro  * (the type definitions are in asm/spinlock_types.h)
19bb898558SAl Viro  */
20bb898558SAl Viro 
21bb898558SAl Viro #ifdef CONFIG_X86_32
22bb898558SAl Viro # define LOCK_PTR_REG "a"
23bb898558SAl Viro # define REG_PTR_MODE "k"
24bb898558SAl Viro #else
25bb898558SAl Viro # define LOCK_PTR_REG "D"
26bb898558SAl Viro # define REG_PTR_MODE "q"
27bb898558SAl Viro #endif
28bb898558SAl Viro 
29bb898558SAl Viro #if defined(CONFIG_X86_32) && \
30bb898558SAl Viro 	(defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
31bb898558SAl Viro /*
32bb898558SAl Viro  * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
33bb898558SAl Viro  * (PPro errata 66, 92)
34bb898558SAl Viro  */
35bb898558SAl Viro # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
36bb898558SAl Viro #else
37bb898558SAl Viro # define UNLOCK_LOCK_PREFIX
38bb898558SAl Viro #endif
39bb898558SAl Viro 
40bb898558SAl Viro /*
41bb898558SAl Viro  * Ticket locks are conceptually two parts, one indicating the current head of
42bb898558SAl Viro  * the queue, and the other indicating the current tail. The lock is acquired
43bb898558SAl Viro  * by atomically noting the tail and incrementing it by one (thus adding
44bb898558SAl Viro  * ourself to the queue and noting our position), then waiting until the head
45bb898558SAl Viro  * becomes equal to the the initial value of the tail.
46bb898558SAl Viro  *
47bb898558SAl Viro  * We use an xadd covering *both* parts of the lock, to increment the tail and
48bb898558SAl Viro  * also load the position of the head, which takes care of memory ordering
49bb898558SAl Viro  * issues and should be optimal for the uncontended case. Note the tail must be
50bb898558SAl Viro  * in the high part, because a wide xadd increment of the low part would carry
51bb898558SAl Viro  * up and contaminate the high part.
52bb898558SAl Viro  *
53bb898558SAl Viro  * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
54bb898558SAl Viro  * save some instructions and make the code more elegant. There really isn't
55bb898558SAl Viro  * much between them in performance though, especially as locks are out of line.
56bb898558SAl Viro  */
57445c8951SThomas Gleixner static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
58bb898558SAl Viro {
592994488fSJeremy Fitzhardinge 	register struct __raw_tickets inc = { .tail = 1 };
60bb898558SAl Viro 
612994488fSJeremy Fitzhardinge 	inc = xadd(&lock->tickets, inc);
62c576a3eaSJeremy Fitzhardinge 
63c576a3eaSJeremy Fitzhardinge 	for (;;) {
642994488fSJeremy Fitzhardinge 		if (inc.head == inc.tail)
65c576a3eaSJeremy Fitzhardinge 			break;
66c576a3eaSJeremy Fitzhardinge 		cpu_relax();
672994488fSJeremy Fitzhardinge 		inc.head = ACCESS_ONCE(lock->tickets.head);
68c576a3eaSJeremy Fitzhardinge 	}
69c576a3eaSJeremy Fitzhardinge 	barrier();		/* make sure nothing creeps before the lock is taken */
70bb898558SAl Viro }
71bb898558SAl Viro 
722994488fSJeremy Fitzhardinge #if (NR_CPUS < 256)
73445c8951SThomas Gleixner static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
74bb898558SAl Viro {
7584eb950dSJeremy Fitzhardinge 	unsigned int tmp, new;
76bb898558SAl Viro 
77bb898558SAl Viro 	asm volatile("movzwl %2, %0\n\t"
78bb898558SAl Viro 		     "cmpb %h0,%b0\n\t"
79bb898558SAl Viro 		     "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
80bb898558SAl Viro 		     "jne 1f\n\t"
81bb898558SAl Viro 		     LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
82bb898558SAl Viro 		     "1:"
83bb898558SAl Viro 		     "sete %b1\n\t"
84bb898558SAl Viro 		     "movzbl %b1,%0\n\t"
85bb898558SAl Viro 		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
86bb898558SAl Viro 		     :
87bb898558SAl Viro 		     : "memory", "cc");
88bb898558SAl Viro 
89bb898558SAl Viro 	return tmp;
90bb898558SAl Viro }
91bb898558SAl Viro 
92445c8951SThomas Gleixner static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
93bb898558SAl Viro {
94bb898558SAl Viro 	asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
95bb898558SAl Viro 		     : "+m" (lock->slock)
96bb898558SAl Viro 		     :
97bb898558SAl Viro 		     : "memory", "cc");
98bb898558SAl Viro }
99bb898558SAl Viro #else
100445c8951SThomas Gleixner static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
101bb898558SAl Viro {
10284eb950dSJeremy Fitzhardinge 	unsigned tmp;
10384eb950dSJeremy Fitzhardinge 	unsigned new;
104bb898558SAl Viro 
105bb898558SAl Viro 	asm volatile("movl %2,%0\n\t"
106bb898558SAl Viro 		     "movl %0,%1\n\t"
107bb898558SAl Viro 		     "roll $16, %0\n\t"
108bb898558SAl Viro 		     "cmpl %0,%1\n\t"
109bb898558SAl Viro 		     "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
110bb898558SAl Viro 		     "jne 1f\n\t"
111bb898558SAl Viro 		     LOCK_PREFIX "cmpxchgl %1,%2\n\t"
112bb898558SAl Viro 		     "1:"
113bb898558SAl Viro 		     "sete %b1\n\t"
114bb898558SAl Viro 		     "movzbl %b1,%0\n\t"
115bb898558SAl Viro 		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
116bb898558SAl Viro 		     :
117bb898558SAl Viro 		     : "memory", "cc");
118bb898558SAl Viro 
119bb898558SAl Viro 	return tmp;
120bb898558SAl Viro }
121bb898558SAl Viro 
122445c8951SThomas Gleixner static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
123bb898558SAl Viro {
124bb898558SAl Viro 	asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
125bb898558SAl Viro 		     : "+m" (lock->slock)
126bb898558SAl Viro 		     :
127bb898558SAl Viro 		     : "memory", "cc");
128bb898558SAl Viro }
129bb898558SAl Viro #endif
130bb898558SAl Viro 
131445c8951SThomas Gleixner static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
132bb898558SAl Viro {
13384eb950dSJeremy Fitzhardinge 	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
134bb898558SAl Viro 
13584eb950dSJeremy Fitzhardinge 	return !!(tmp.tail ^ tmp.head);
136bb898558SAl Viro }
137bb898558SAl Viro 
138445c8951SThomas Gleixner static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
139bb898558SAl Viro {
14084eb950dSJeremy Fitzhardinge 	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
141bb898558SAl Viro 
14284eb950dSJeremy Fitzhardinge 	return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
143bb898558SAl Viro }
144bb898558SAl Viro 
145b4ecc126SJeremy Fitzhardinge #ifndef CONFIG_PARAVIRT_SPINLOCKS
146bb898558SAl Viro 
1470199c4e6SThomas Gleixner static inline int arch_spin_is_locked(arch_spinlock_t *lock)
148bb898558SAl Viro {
149bb898558SAl Viro 	return __ticket_spin_is_locked(lock);
150bb898558SAl Viro }
151bb898558SAl Viro 
1520199c4e6SThomas Gleixner static inline int arch_spin_is_contended(arch_spinlock_t *lock)
153bb898558SAl Viro {
154bb898558SAl Viro 	return __ticket_spin_is_contended(lock);
155bb898558SAl Viro }
1560199c4e6SThomas Gleixner #define arch_spin_is_contended	arch_spin_is_contended
157bb898558SAl Viro 
1580199c4e6SThomas Gleixner static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
159bb898558SAl Viro {
160bb898558SAl Viro 	__ticket_spin_lock(lock);
161bb898558SAl Viro }
162bb898558SAl Viro 
1630199c4e6SThomas Gleixner static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
164bb898558SAl Viro {
165bb898558SAl Viro 	return __ticket_spin_trylock(lock);
166bb898558SAl Viro }
167bb898558SAl Viro 
1680199c4e6SThomas Gleixner static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
169bb898558SAl Viro {
170bb898558SAl Viro 	__ticket_spin_unlock(lock);
171bb898558SAl Viro }
172bb898558SAl Viro 
1730199c4e6SThomas Gleixner static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
174bb898558SAl Viro 						  unsigned long flags)
175bb898558SAl Viro {
1760199c4e6SThomas Gleixner 	arch_spin_lock(lock);
177bb898558SAl Viro }
178bb898558SAl Viro 
179b4ecc126SJeremy Fitzhardinge #endif	/* CONFIG_PARAVIRT_SPINLOCKS */
180bb898558SAl Viro 
1810199c4e6SThomas Gleixner static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
182bb898558SAl Viro {
1830199c4e6SThomas Gleixner 	while (arch_spin_is_locked(lock))
184bb898558SAl Viro 		cpu_relax();
185bb898558SAl Viro }
186bb898558SAl Viro 
187bb898558SAl Viro /*
188bb898558SAl Viro  * Read-write spinlocks, allowing multiple readers
189bb898558SAl Viro  * but only one writer.
190bb898558SAl Viro  *
191bb898558SAl Viro  * NOTE! it is quite common to have readers in interrupts
192bb898558SAl Viro  * but no interrupt writers. For those circumstances we
193bb898558SAl Viro  * can "mix" irq-safe locks - any writer needs to get a
194bb898558SAl Viro  * irq-safe write-lock, but readers can get non-irqsafe
195bb898558SAl Viro  * read-locks.
196bb898558SAl Viro  *
197bb898558SAl Viro  * On x86, we implement read-write locks as a 32-bit counter
198bb898558SAl Viro  * with the high bit (sign) being the "contended" bit.
199bb898558SAl Viro  */
200bb898558SAl Viro 
201bb898558SAl Viro /**
202bb898558SAl Viro  * read_can_lock - would read_trylock() succeed?
203bb898558SAl Viro  * @lock: the rwlock in question.
204bb898558SAl Viro  */
205e5931943SThomas Gleixner static inline int arch_read_can_lock(arch_rwlock_t *lock)
206bb898558SAl Viro {
207a750036fSJan Beulich 	return lock->lock > 0;
208bb898558SAl Viro }
209bb898558SAl Viro 
210bb898558SAl Viro /**
211bb898558SAl Viro  * write_can_lock - would write_trylock() succeed?
212bb898558SAl Viro  * @lock: the rwlock in question.
213bb898558SAl Viro  */
214e5931943SThomas Gleixner static inline int arch_write_can_lock(arch_rwlock_t *lock)
215bb898558SAl Viro {
216a750036fSJan Beulich 	return lock->write == WRITE_LOCK_CMP;
217bb898558SAl Viro }
218bb898558SAl Viro 
219e5931943SThomas Gleixner static inline void arch_read_lock(arch_rwlock_t *rw)
220bb898558SAl Viro {
221a750036fSJan Beulich 	asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
222bb898558SAl Viro 		     "jns 1f\n"
223bb898558SAl Viro 		     "call __read_lock_failed\n\t"
224bb898558SAl Viro 		     "1:\n"
225bb898558SAl Viro 		     ::LOCK_PTR_REG (rw) : "memory");
226bb898558SAl Viro }
227bb898558SAl Viro 
228e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw)
229bb898558SAl Viro {
230a750036fSJan Beulich 	asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
231bb898558SAl Viro 		     "jz 1f\n"
232bb898558SAl Viro 		     "call __write_lock_failed\n\t"
233bb898558SAl Viro 		     "1:\n"
234a750036fSJan Beulich 		     ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
235a750036fSJan Beulich 		     : "memory");
236bb898558SAl Viro }
237bb898558SAl Viro 
238e5931943SThomas Gleixner static inline int arch_read_trylock(arch_rwlock_t *lock)
239bb898558SAl Viro {
240a750036fSJan Beulich 	READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
241bb898558SAl Viro 
242a750036fSJan Beulich 	if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
243bb898558SAl Viro 		return 1;
244a750036fSJan Beulich 	READ_LOCK_ATOMIC(inc)(count);
245bb898558SAl Viro 	return 0;
246bb898558SAl Viro }
247bb898558SAl Viro 
248e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *lock)
249bb898558SAl Viro {
250a750036fSJan Beulich 	atomic_t *count = (atomic_t *)&lock->write;
251bb898558SAl Viro 
252a750036fSJan Beulich 	if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
253bb898558SAl Viro 		return 1;
254a750036fSJan Beulich 	atomic_add(WRITE_LOCK_CMP, count);
255bb898558SAl Viro 	return 0;
256bb898558SAl Viro }
257bb898558SAl Viro 
258e5931943SThomas Gleixner static inline void arch_read_unlock(arch_rwlock_t *rw)
259bb898558SAl Viro {
260a750036fSJan Beulich 	asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
261a750036fSJan Beulich 		     :"+m" (rw->lock) : : "memory");
262bb898558SAl Viro }
263bb898558SAl Viro 
264e5931943SThomas Gleixner static inline void arch_write_unlock(arch_rwlock_t *rw)
265bb898558SAl Viro {
266a750036fSJan Beulich 	asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
267a750036fSJan Beulich 		     : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
268bb898558SAl Viro }
269bb898558SAl Viro 
270e5931943SThomas Gleixner #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
271e5931943SThomas Gleixner #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
272f5f7eac4SRobin Holt 
273a750036fSJan Beulich #undef READ_LOCK_SIZE
274a750036fSJan Beulich #undef READ_LOCK_ATOMIC
275a750036fSJan Beulich #undef WRITE_LOCK_ADD
276a750036fSJan Beulich #undef WRITE_LOCK_SUB
277a750036fSJan Beulich #undef WRITE_LOCK_CMP
278a750036fSJan Beulich 
2790199c4e6SThomas Gleixner #define arch_spin_relax(lock)	cpu_relax()
2800199c4e6SThomas Gleixner #define arch_read_relax(lock)	cpu_relax()
2810199c4e6SThomas Gleixner #define arch_write_relax(lock)	cpu_relax()
282bb898558SAl Viro 
283ad462769SJiri Olsa /* The {read|write|spin}_lock() on x86 are full memory barriers. */
284ad462769SJiri Olsa static inline void smp_mb__after_lock(void) { }
285ad462769SJiri Olsa #define ARCH_HAS_SMP_MB_AFTER_LOCK
286ad462769SJiri Olsa 
2871965aae3SH. Peter Anvin #endif /* _ASM_X86_SPINLOCK_H */
288