xref: /openbmc/linux/arch/x86/include/asm/spinlock.h (revision 1965aae3)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_SPINLOCK_H
21965aae3SH. Peter Anvin #define _ASM_X86_SPINLOCK_H
3bb898558SAl Viro 
4bb898558SAl Viro #include <asm/atomic.h>
5bb898558SAl Viro #include <asm/rwlock.h>
6bb898558SAl Viro #include <asm/page.h>
7bb898558SAl Viro #include <asm/processor.h>
8bb898558SAl Viro #include <linux/compiler.h>
9bb898558SAl Viro #include <asm/paravirt.h>
10bb898558SAl Viro /*
11bb898558SAl Viro  * Your basic SMP spinlocks, allowing only a single CPU anywhere
12bb898558SAl Viro  *
13bb898558SAl Viro  * Simple spin lock operations.  There are two variants, one clears IRQ's
14bb898558SAl Viro  * on the local processor, one does not.
15bb898558SAl Viro  *
16bb898558SAl Viro  * These are fair FIFO ticket locks, which are currently limited to 256
17bb898558SAl Viro  * CPUs.
18bb898558SAl Viro  *
19bb898558SAl Viro  * (the type definitions are in asm/spinlock_types.h)
20bb898558SAl Viro  */
21bb898558SAl Viro 
22bb898558SAl Viro #ifdef CONFIG_X86_32
23bb898558SAl Viro # define LOCK_PTR_REG "a"
24bb898558SAl Viro # define REG_PTR_MODE "k"
25bb898558SAl Viro #else
26bb898558SAl Viro # define LOCK_PTR_REG "D"
27bb898558SAl Viro # define REG_PTR_MODE "q"
28bb898558SAl Viro #endif
29bb898558SAl Viro 
30bb898558SAl Viro #if defined(CONFIG_X86_32) && \
31bb898558SAl Viro 	(defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
32bb898558SAl Viro /*
33bb898558SAl Viro  * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
34bb898558SAl Viro  * (PPro errata 66, 92)
35bb898558SAl Viro  */
36bb898558SAl Viro # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
37bb898558SAl Viro #else
38bb898558SAl Viro # define UNLOCK_LOCK_PREFIX
39bb898558SAl Viro #endif
40bb898558SAl Viro 
41bb898558SAl Viro /*
42bb898558SAl Viro  * Ticket locks are conceptually two parts, one indicating the current head of
43bb898558SAl Viro  * the queue, and the other indicating the current tail. The lock is acquired
44bb898558SAl Viro  * by atomically noting the tail and incrementing it by one (thus adding
45bb898558SAl Viro  * ourself to the queue and noting our position), then waiting until the head
46bb898558SAl Viro  * becomes equal to the the initial value of the tail.
47bb898558SAl Viro  *
48bb898558SAl Viro  * We use an xadd covering *both* parts of the lock, to increment the tail and
49bb898558SAl Viro  * also load the position of the head, which takes care of memory ordering
50bb898558SAl Viro  * issues and should be optimal for the uncontended case. Note the tail must be
51bb898558SAl Viro  * in the high part, because a wide xadd increment of the low part would carry
52bb898558SAl Viro  * up and contaminate the high part.
53bb898558SAl Viro  *
54bb898558SAl Viro  * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
55bb898558SAl Viro  * save some instructions and make the code more elegant. There really isn't
56bb898558SAl Viro  * much between them in performance though, especially as locks are out of line.
57bb898558SAl Viro  */
58bb898558SAl Viro #if (NR_CPUS < 256)
59bb898558SAl Viro #define TICKET_SHIFT 8
60bb898558SAl Viro 
61bb898558SAl Viro static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
62bb898558SAl Viro {
63bb898558SAl Viro 	short inc = 0x0100;
64bb898558SAl Viro 
65bb898558SAl Viro 	asm volatile (
66bb898558SAl Viro 		LOCK_PREFIX "xaddw %w0, %1\n"
67bb898558SAl Viro 		"1:\t"
68bb898558SAl Viro 		"cmpb %h0, %b0\n\t"
69bb898558SAl Viro 		"je 2f\n\t"
70bb898558SAl Viro 		"rep ; nop\n\t"
71bb898558SAl Viro 		"movb %1, %b0\n\t"
72bb898558SAl Viro 		/* don't need lfence here, because loads are in-order */
73bb898558SAl Viro 		"jmp 1b\n"
74bb898558SAl Viro 		"2:"
75bb898558SAl Viro 		: "+Q" (inc), "+m" (lock->slock)
76bb898558SAl Viro 		:
77bb898558SAl Viro 		: "memory", "cc");
78bb898558SAl Viro }
79bb898558SAl Viro 
80bb898558SAl Viro static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
81bb898558SAl Viro {
82bb898558SAl Viro 	int tmp, new;
83bb898558SAl Viro 
84bb898558SAl Viro 	asm volatile("movzwl %2, %0\n\t"
85bb898558SAl Viro 		     "cmpb %h0,%b0\n\t"
86bb898558SAl Viro 		     "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
87bb898558SAl Viro 		     "jne 1f\n\t"
88bb898558SAl Viro 		     LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
89bb898558SAl Viro 		     "1:"
90bb898558SAl Viro 		     "sete %b1\n\t"
91bb898558SAl Viro 		     "movzbl %b1,%0\n\t"
92bb898558SAl Viro 		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
93bb898558SAl Viro 		     :
94bb898558SAl Viro 		     : "memory", "cc");
95bb898558SAl Viro 
96bb898558SAl Viro 	return tmp;
97bb898558SAl Viro }
98bb898558SAl Viro 
99bb898558SAl Viro static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
100bb898558SAl Viro {
101bb898558SAl Viro 	asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
102bb898558SAl Viro 		     : "+m" (lock->slock)
103bb898558SAl Viro 		     :
104bb898558SAl Viro 		     : "memory", "cc");
105bb898558SAl Viro }
106bb898558SAl Viro #else
107bb898558SAl Viro #define TICKET_SHIFT 16
108bb898558SAl Viro 
109bb898558SAl Viro static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
110bb898558SAl Viro {
111bb898558SAl Viro 	int inc = 0x00010000;
112bb898558SAl Viro 	int tmp;
113bb898558SAl Viro 
114bb898558SAl Viro 	asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
115bb898558SAl Viro 		     "movzwl %w0, %2\n\t"
116bb898558SAl Viro 		     "shrl $16, %0\n\t"
117bb898558SAl Viro 		     "1:\t"
118bb898558SAl Viro 		     "cmpl %0, %2\n\t"
119bb898558SAl Viro 		     "je 2f\n\t"
120bb898558SAl Viro 		     "rep ; nop\n\t"
121bb898558SAl Viro 		     "movzwl %1, %2\n\t"
122bb898558SAl Viro 		     /* don't need lfence here, because loads are in-order */
123bb898558SAl Viro 		     "jmp 1b\n"
124bb898558SAl Viro 		     "2:"
125bb898558SAl Viro 		     : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
126bb898558SAl Viro 		     :
127bb898558SAl Viro 		     : "memory", "cc");
128bb898558SAl Viro }
129bb898558SAl Viro 
130bb898558SAl Viro static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
131bb898558SAl Viro {
132bb898558SAl Viro 	int tmp;
133bb898558SAl Viro 	int new;
134bb898558SAl Viro 
135bb898558SAl Viro 	asm volatile("movl %2,%0\n\t"
136bb898558SAl Viro 		     "movl %0,%1\n\t"
137bb898558SAl Viro 		     "roll $16, %0\n\t"
138bb898558SAl Viro 		     "cmpl %0,%1\n\t"
139bb898558SAl Viro 		     "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
140bb898558SAl Viro 		     "jne 1f\n\t"
141bb898558SAl Viro 		     LOCK_PREFIX "cmpxchgl %1,%2\n\t"
142bb898558SAl Viro 		     "1:"
143bb898558SAl Viro 		     "sete %b1\n\t"
144bb898558SAl Viro 		     "movzbl %b1,%0\n\t"
145bb898558SAl Viro 		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
146bb898558SAl Viro 		     :
147bb898558SAl Viro 		     : "memory", "cc");
148bb898558SAl Viro 
149bb898558SAl Viro 	return tmp;
150bb898558SAl Viro }
151bb898558SAl Viro 
152bb898558SAl Viro static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
153bb898558SAl Viro {
154bb898558SAl Viro 	asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
155bb898558SAl Viro 		     : "+m" (lock->slock)
156bb898558SAl Viro 		     :
157bb898558SAl Viro 		     : "memory", "cc");
158bb898558SAl Viro }
159bb898558SAl Viro #endif
160bb898558SAl Viro 
161bb898558SAl Viro static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
162bb898558SAl Viro {
163bb898558SAl Viro 	int tmp = ACCESS_ONCE(lock->slock);
164bb898558SAl Viro 
165bb898558SAl Viro 	return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
166bb898558SAl Viro }
167bb898558SAl Viro 
168bb898558SAl Viro static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
169bb898558SAl Viro {
170bb898558SAl Viro 	int tmp = ACCESS_ONCE(lock->slock);
171bb898558SAl Viro 
172bb898558SAl Viro 	return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173bb898558SAl Viro }
174bb898558SAl Viro 
175bb898558SAl Viro #ifdef CONFIG_PARAVIRT
176bb898558SAl Viro /*
177bb898558SAl Viro  * Define virtualization-friendly old-style lock byte lock, for use in
178bb898558SAl Viro  * pv_lock_ops if desired.
179bb898558SAl Viro  *
180bb898558SAl Viro  * This differs from the pre-2.6.24 spinlock by always using xchgb
181bb898558SAl Viro  * rather than decb to take the lock; this allows it to use a
182bb898558SAl Viro  * zero-initialized lock structure.  It also maintains a 1-byte
183bb898558SAl Viro  * contention counter, so that we can implement
184bb898558SAl Viro  * __byte_spin_is_contended.
185bb898558SAl Viro  */
186bb898558SAl Viro struct __byte_spinlock {
187bb898558SAl Viro 	s8 lock;
188bb898558SAl Viro 	s8 spinners;
189bb898558SAl Viro };
190bb898558SAl Viro 
191bb898558SAl Viro static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
192bb898558SAl Viro {
193bb898558SAl Viro 	struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
194bb898558SAl Viro 	return bl->lock != 0;
195bb898558SAl Viro }
196bb898558SAl Viro 
197bb898558SAl Viro static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
198bb898558SAl Viro {
199bb898558SAl Viro 	struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
200bb898558SAl Viro 	return bl->spinners != 0;
201bb898558SAl Viro }
202bb898558SAl Viro 
203bb898558SAl Viro static inline void __byte_spin_lock(raw_spinlock_t *lock)
204bb898558SAl Viro {
205bb898558SAl Viro 	struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
206bb898558SAl Viro 	s8 val = 1;
207bb898558SAl Viro 
208bb898558SAl Viro 	asm("1: xchgb %1, %0\n"
209bb898558SAl Viro 	    "   test %1,%1\n"
210bb898558SAl Viro 	    "   jz 3f\n"
211bb898558SAl Viro 	    "   " LOCK_PREFIX "incb %2\n"
212bb898558SAl Viro 	    "2: rep;nop\n"
213bb898558SAl Viro 	    "   cmpb $1, %0\n"
214bb898558SAl Viro 	    "   je 2b\n"
215bb898558SAl Viro 	    "   " LOCK_PREFIX "decb %2\n"
216bb898558SAl Viro 	    "   jmp 1b\n"
217bb898558SAl Viro 	    "3:"
218bb898558SAl Viro 	    : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
219bb898558SAl Viro }
220bb898558SAl Viro 
221bb898558SAl Viro static inline int __byte_spin_trylock(raw_spinlock_t *lock)
222bb898558SAl Viro {
223bb898558SAl Viro 	struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
224bb898558SAl Viro 	u8 old = 1;
225bb898558SAl Viro 
226bb898558SAl Viro 	asm("xchgb %1,%0"
227bb898558SAl Viro 	    : "+m" (bl->lock), "+q" (old) : : "memory");
228bb898558SAl Viro 
229bb898558SAl Viro 	return old == 0;
230bb898558SAl Viro }
231bb898558SAl Viro 
232bb898558SAl Viro static inline void __byte_spin_unlock(raw_spinlock_t *lock)
233bb898558SAl Viro {
234bb898558SAl Viro 	struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
235bb898558SAl Viro 	smp_wmb();
236bb898558SAl Viro 	bl->lock = 0;
237bb898558SAl Viro }
238bb898558SAl Viro #else  /* !CONFIG_PARAVIRT */
239bb898558SAl Viro static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
240bb898558SAl Viro {
241bb898558SAl Viro 	return __ticket_spin_is_locked(lock);
242bb898558SAl Viro }
243bb898558SAl Viro 
244bb898558SAl Viro static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
245bb898558SAl Viro {
246bb898558SAl Viro 	return __ticket_spin_is_contended(lock);
247bb898558SAl Viro }
248bb898558SAl Viro 
249bb898558SAl Viro static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
250bb898558SAl Viro {
251bb898558SAl Viro 	__ticket_spin_lock(lock);
252bb898558SAl Viro }
253bb898558SAl Viro 
254bb898558SAl Viro static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
255bb898558SAl Viro {
256bb898558SAl Viro 	return __ticket_spin_trylock(lock);
257bb898558SAl Viro }
258bb898558SAl Viro 
259bb898558SAl Viro static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
260bb898558SAl Viro {
261bb898558SAl Viro 	__ticket_spin_unlock(lock);
262bb898558SAl Viro }
263bb898558SAl Viro 
264bb898558SAl Viro static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
265bb898558SAl Viro 						  unsigned long flags)
266bb898558SAl Viro {
267bb898558SAl Viro 	__raw_spin_lock(lock);
268bb898558SAl Viro }
269bb898558SAl Viro 
270bb898558SAl Viro #endif	/* CONFIG_PARAVIRT */
271bb898558SAl Viro 
272bb898558SAl Viro static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
273bb898558SAl Viro {
274bb898558SAl Viro 	while (__raw_spin_is_locked(lock))
275bb898558SAl Viro 		cpu_relax();
276bb898558SAl Viro }
277bb898558SAl Viro 
278bb898558SAl Viro /*
279bb898558SAl Viro  * Read-write spinlocks, allowing multiple readers
280bb898558SAl Viro  * but only one writer.
281bb898558SAl Viro  *
282bb898558SAl Viro  * NOTE! it is quite common to have readers in interrupts
283bb898558SAl Viro  * but no interrupt writers. For those circumstances we
284bb898558SAl Viro  * can "mix" irq-safe locks - any writer needs to get a
285bb898558SAl Viro  * irq-safe write-lock, but readers can get non-irqsafe
286bb898558SAl Viro  * read-locks.
287bb898558SAl Viro  *
288bb898558SAl Viro  * On x86, we implement read-write locks as a 32-bit counter
289bb898558SAl Viro  * with the high bit (sign) being the "contended" bit.
290bb898558SAl Viro  */
291bb898558SAl Viro 
292bb898558SAl Viro /**
293bb898558SAl Viro  * read_can_lock - would read_trylock() succeed?
294bb898558SAl Viro  * @lock: the rwlock in question.
295bb898558SAl Viro  */
296bb898558SAl Viro static inline int __raw_read_can_lock(raw_rwlock_t *lock)
297bb898558SAl Viro {
298bb898558SAl Viro 	return (int)(lock)->lock > 0;
299bb898558SAl Viro }
300bb898558SAl Viro 
301bb898558SAl Viro /**
302bb898558SAl Viro  * write_can_lock - would write_trylock() succeed?
303bb898558SAl Viro  * @lock: the rwlock in question.
304bb898558SAl Viro  */
305bb898558SAl Viro static inline int __raw_write_can_lock(raw_rwlock_t *lock)
306bb898558SAl Viro {
307bb898558SAl Viro 	return (lock)->lock == RW_LOCK_BIAS;
308bb898558SAl Viro }
309bb898558SAl Viro 
310bb898558SAl Viro static inline void __raw_read_lock(raw_rwlock_t *rw)
311bb898558SAl Viro {
312bb898558SAl Viro 	asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
313bb898558SAl Viro 		     "jns 1f\n"
314bb898558SAl Viro 		     "call __read_lock_failed\n\t"
315bb898558SAl Viro 		     "1:\n"
316bb898558SAl Viro 		     ::LOCK_PTR_REG (rw) : "memory");
317bb898558SAl Viro }
318bb898558SAl Viro 
319bb898558SAl Viro static inline void __raw_write_lock(raw_rwlock_t *rw)
320bb898558SAl Viro {
321bb898558SAl Viro 	asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
322bb898558SAl Viro 		     "jz 1f\n"
323bb898558SAl Viro 		     "call __write_lock_failed\n\t"
324bb898558SAl Viro 		     "1:\n"
325bb898558SAl Viro 		     ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
326bb898558SAl Viro }
327bb898558SAl Viro 
328bb898558SAl Viro static inline int __raw_read_trylock(raw_rwlock_t *lock)
329bb898558SAl Viro {
330bb898558SAl Viro 	atomic_t *count = (atomic_t *)lock;
331bb898558SAl Viro 
332bb898558SAl Viro 	atomic_dec(count);
333bb898558SAl Viro 	if (atomic_read(count) >= 0)
334bb898558SAl Viro 		return 1;
335bb898558SAl Viro 	atomic_inc(count);
336bb898558SAl Viro 	return 0;
337bb898558SAl Viro }
338bb898558SAl Viro 
339bb898558SAl Viro static inline int __raw_write_trylock(raw_rwlock_t *lock)
340bb898558SAl Viro {
341bb898558SAl Viro 	atomic_t *count = (atomic_t *)lock;
342bb898558SAl Viro 
343bb898558SAl Viro 	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
344bb898558SAl Viro 		return 1;
345bb898558SAl Viro 	atomic_add(RW_LOCK_BIAS, count);
346bb898558SAl Viro 	return 0;
347bb898558SAl Viro }
348bb898558SAl Viro 
349bb898558SAl Viro static inline void __raw_read_unlock(raw_rwlock_t *rw)
350bb898558SAl Viro {
351bb898558SAl Viro 	asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
352bb898558SAl Viro }
353bb898558SAl Viro 
354bb898558SAl Viro static inline void __raw_write_unlock(raw_rwlock_t *rw)
355bb898558SAl Viro {
356bb898558SAl Viro 	asm volatile(LOCK_PREFIX "addl %1, %0"
357bb898558SAl Viro 		     : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
358bb898558SAl Viro }
359bb898558SAl Viro 
360bb898558SAl Viro #define _raw_spin_relax(lock)	cpu_relax()
361bb898558SAl Viro #define _raw_read_relax(lock)	cpu_relax()
362bb898558SAl Viro #define _raw_write_relax(lock)	cpu_relax()
363bb898558SAl Viro 
3641965aae3SH. Peter Anvin #endif /* _ASM_X86_SPINLOCK_H */
365