xref: /openbmc/linux/arch/arm64/include/asm/spinlock.h (revision 6aa7de05)
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_SPINLOCK_H
17 #define __ASM_SPINLOCK_H
18 
19 #include <asm/lse.h>
20 #include <asm/spinlock_types.h>
21 #include <asm/processor.h>
22 
23 /*
24  * Spinlock implementation.
25  *
26  * The memory barriers are implicit with the load-acquire and store-release
27  * instructions.
28  */
29 
30 static inline void arch_spin_lock(arch_spinlock_t *lock)
31 {
32 	unsigned int tmp;
33 	arch_spinlock_t lockval, newval;
34 
35 	asm volatile(
36 	/* Atomically increment the next ticket. */
37 	ARM64_LSE_ATOMIC_INSN(
38 	/* LL/SC */
39 "	prfm	pstl1strm, %3\n"
40 "1:	ldaxr	%w0, %3\n"
41 "	add	%w1, %w0, %w5\n"
42 "	stxr	%w2, %w1, %3\n"
43 "	cbnz	%w2, 1b\n",
44 	/* LSE atomics */
45 "	mov	%w2, %w5\n"
46 "	ldadda	%w2, %w0, %3\n"
47 	__nops(3)
48 	)
49 
50 	/* Did we get the lock? */
51 "	eor	%w1, %w0, %w0, ror #16\n"
52 "	cbz	%w1, 3f\n"
53 	/*
54 	 * No: spin on the owner. Send a local event to avoid missing an
55 	 * unlock before the exclusive load.
56 	 */
57 "	sevl\n"
58 "2:	wfe\n"
59 "	ldaxrh	%w2, %4\n"
60 "	eor	%w1, %w2, %w0, lsr #16\n"
61 "	cbnz	%w1, 2b\n"
62 	/* We got the lock. Critical section starts here. */
63 "3:"
64 	: "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
65 	: "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
66 	: "memory");
67 }
68 
69 static inline int arch_spin_trylock(arch_spinlock_t *lock)
70 {
71 	unsigned int tmp;
72 	arch_spinlock_t lockval;
73 
74 	asm volatile(ARM64_LSE_ATOMIC_INSN(
75 	/* LL/SC */
76 	"	prfm	pstl1strm, %2\n"
77 	"1:	ldaxr	%w0, %2\n"
78 	"	eor	%w1, %w0, %w0, ror #16\n"
79 	"	cbnz	%w1, 2f\n"
80 	"	add	%w0, %w0, %3\n"
81 	"	stxr	%w1, %w0, %2\n"
82 	"	cbnz	%w1, 1b\n"
83 	"2:",
84 	/* LSE atomics */
85 	"	ldr	%w0, %2\n"
86 	"	eor	%w1, %w0, %w0, ror #16\n"
87 	"	cbnz	%w1, 1f\n"
88 	"	add	%w1, %w0, %3\n"
89 	"	casa	%w0, %w1, %2\n"
90 	"	and	%w1, %w1, #0xffff\n"
91 	"	eor	%w1, %w1, %w0, lsr #16\n"
92 	"1:")
93 	: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
94 	: "I" (1 << TICKET_SHIFT)
95 	: "memory");
96 
97 	return !tmp;
98 }
99 
100 static inline void arch_spin_unlock(arch_spinlock_t *lock)
101 {
102 	unsigned long tmp;
103 
104 	asm volatile(ARM64_LSE_ATOMIC_INSN(
105 	/* LL/SC */
106 	"	ldrh	%w1, %0\n"
107 	"	add	%w1, %w1, #1\n"
108 	"	stlrh	%w1, %0",
109 	/* LSE atomics */
110 	"	mov	%w1, #1\n"
111 	"	staddlh	%w1, %0\n"
112 	__nops(1))
113 	: "=Q" (lock->owner), "=&r" (tmp)
114 	:
115 	: "memory");
116 }
117 
118 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
119 {
120 	return lock.owner == lock.next;
121 }
122 
123 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
124 {
125 	/*
126 	 * Ensure prior spin_lock operations to other locks have completed
127 	 * on this CPU before we test whether "lock" is locked.
128 	 */
129 	smp_mb(); /* ^^^ */
130 	return !arch_spin_value_unlocked(READ_ONCE(*lock));
131 }
132 
133 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
134 {
135 	arch_spinlock_t lockval = READ_ONCE(*lock);
136 	return (lockval.next - lockval.owner) > 1;
137 }
138 #define arch_spin_is_contended	arch_spin_is_contended
139 
140 #include <asm/qrwlock.h>
141 
142 /* See include/linux/spinlock.h */
143 #define smp_mb__after_spinlock()	smp_mb()
144 
145 #endif /* __ASM_SPINLOCK_H */
146