xref: /openbmc/linux/arch/mips/include/asm/spinlock.h (revision 25da4e9d)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_SPINLOCK_H
10 #define _ASM_SPINLOCK_H
11 
12 #include <linux/compiler.h>
13 
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
16 #include <asm/qrwlock.h>
17 #include <asm/compiler.h>
18 #include <asm/war.h>
19 
20 /*
21  * Your basic SMP spinlocks, allowing only a single CPU anywhere
22  *
23  * Simple spin lock operations.	 There are two variants, one clears IRQ's
24  * on the local processor, one does not.
25  *
26  * These are fair FIFO ticket locks
27  *
28  * (the type definitions are in asm/spinlock_types.h)
29  */
30 
31 
32 /*
33  * Ticket locks are conceptually two parts, one indicating the current head of
34  * the queue, and the other indicating the current tail. The lock is acquired
35  * by atomically noting the tail and incrementing it by one (thus adding
36  * ourself to the queue and noting our position), then waiting until the head
37  * becomes equal to the the initial value of the tail.
38  */
39 
40 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
41 {
42 	u32 counters = ACCESS_ONCE(lock->lock);
43 
44 	return ((counters >> 16) ^ counters) & 0xffff;
45 }
46 
47 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
48 {
49 	return lock.h.serving_now == lock.h.ticket;
50 }
51 
52 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
53 
54 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
55 {
56 	u16 owner = READ_ONCE(lock->h.serving_now);
57 	smp_rmb();
58 	for (;;) {
59 		arch_spinlock_t tmp = READ_ONCE(*lock);
60 
61 		if (tmp.h.serving_now == tmp.h.ticket ||
62 		    tmp.h.serving_now != owner)
63 			break;
64 
65 		cpu_relax();
66 	}
67 	smp_acquire__after_ctrl_dep();
68 }
69 
70 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
71 {
72 	u32 counters = ACCESS_ONCE(lock->lock);
73 
74 	return (((counters >> 16) - counters) & 0xffff) > 1;
75 }
76 #define arch_spin_is_contended	arch_spin_is_contended
77 
78 static inline void arch_spin_lock(arch_spinlock_t *lock)
79 {
80 	int my_ticket;
81 	int tmp;
82 	int inc = 0x10000;
83 
84 	if (R10000_LLSC_WAR) {
85 		__asm__ __volatile__ (
86 		"	.set push		# arch_spin_lock	\n"
87 		"	.set noreorder					\n"
88 		"							\n"
89 		"1:	ll	%[ticket], %[ticket_ptr]		\n"
90 		"	addu	%[my_ticket], %[ticket], %[inc]		\n"
91 		"	sc	%[my_ticket], %[ticket_ptr]		\n"
92 		"	beqzl	%[my_ticket], 1b			\n"
93 		"	 nop						\n"
94 		"	srl	%[my_ticket], %[ticket], 16		\n"
95 		"	andi	%[ticket], %[ticket], 0xffff		\n"
96 		"	bne	%[ticket], %[my_ticket], 4f		\n"
97 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
98 		"2:							\n"
99 		"	.subsection 2					\n"
100 		"4:	andi	%[ticket], %[ticket], 0xffff		\n"
101 		"	sll	%[ticket], 5				\n"
102 		"							\n"
103 		"6:	bnez	%[ticket], 6b				\n"
104 		"	 subu	%[ticket], 1				\n"
105 		"							\n"
106 		"	lhu	%[ticket], %[serving_now_ptr]		\n"
107 		"	beq	%[ticket], %[my_ticket], 2b		\n"
108 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
109 		"	b	4b					\n"
110 		"	 subu	%[ticket], %[ticket], 1			\n"
111 		"	.previous					\n"
112 		"	.set pop					\n"
113 		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
114 		  [serving_now_ptr] "+m" (lock->h.serving_now),
115 		  [ticket] "=&r" (tmp),
116 		  [my_ticket] "=&r" (my_ticket)
117 		: [inc] "r" (inc));
118 	} else {
119 		__asm__ __volatile__ (
120 		"	.set push		# arch_spin_lock	\n"
121 		"	.set noreorder					\n"
122 		"							\n"
123 		"1:	ll	%[ticket], %[ticket_ptr]		\n"
124 		"	addu	%[my_ticket], %[ticket], %[inc]		\n"
125 		"	sc	%[my_ticket], %[ticket_ptr]		\n"
126 		"	beqz	%[my_ticket], 1b			\n"
127 		"	 srl	%[my_ticket], %[ticket], 16		\n"
128 		"	andi	%[ticket], %[ticket], 0xffff		\n"
129 		"	bne	%[ticket], %[my_ticket], 4f		\n"
130 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
131 		"2:	.insn						\n"
132 		"	.subsection 2					\n"
133 		"4:	andi	%[ticket], %[ticket], 0xffff		\n"
134 		"	sll	%[ticket], 5				\n"
135 		"							\n"
136 		"6:	bnez	%[ticket], 6b				\n"
137 		"	 subu	%[ticket], 1				\n"
138 		"							\n"
139 		"	lhu	%[ticket], %[serving_now_ptr]		\n"
140 		"	beq	%[ticket], %[my_ticket], 2b		\n"
141 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
142 		"	b	4b					\n"
143 		"	 subu	%[ticket], %[ticket], 1			\n"
144 		"	.previous					\n"
145 		"	.set pop					\n"
146 		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
147 		  [serving_now_ptr] "+m" (lock->h.serving_now),
148 		  [ticket] "=&r" (tmp),
149 		  [my_ticket] "=&r" (my_ticket)
150 		: [inc] "r" (inc));
151 	}
152 
153 	smp_llsc_mb();
154 }
155 
156 static inline void arch_spin_unlock(arch_spinlock_t *lock)
157 {
158 	unsigned int serving_now = lock->h.serving_now + 1;
159 	wmb();
160 	lock->h.serving_now = (u16)serving_now;
161 	nudge_writes();
162 }
163 
164 static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
165 {
166 	int tmp, tmp2, tmp3;
167 	int inc = 0x10000;
168 
169 	if (R10000_LLSC_WAR) {
170 		__asm__ __volatile__ (
171 		"	.set push		# arch_spin_trylock	\n"
172 		"	.set noreorder					\n"
173 		"							\n"
174 		"1:	ll	%[ticket], %[ticket_ptr]		\n"
175 		"	srl	%[my_ticket], %[ticket], 16		\n"
176 		"	andi	%[now_serving], %[ticket], 0xffff	\n"
177 		"	bne	%[my_ticket], %[now_serving], 3f	\n"
178 		"	 addu	%[ticket], %[ticket], %[inc]		\n"
179 		"	sc	%[ticket], %[ticket_ptr]		\n"
180 		"	beqzl	%[ticket], 1b				\n"
181 		"	 li	%[ticket], 1				\n"
182 		"2:							\n"
183 		"	.subsection 2					\n"
184 		"3:	b	2b					\n"
185 		"	 li	%[ticket], 0				\n"
186 		"	.previous					\n"
187 		"	.set pop					\n"
188 		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
189 		  [ticket] "=&r" (tmp),
190 		  [my_ticket] "=&r" (tmp2),
191 		  [now_serving] "=&r" (tmp3)
192 		: [inc] "r" (inc));
193 	} else {
194 		__asm__ __volatile__ (
195 		"	.set push		# arch_spin_trylock	\n"
196 		"	.set noreorder					\n"
197 		"							\n"
198 		"1:	ll	%[ticket], %[ticket_ptr]		\n"
199 		"	srl	%[my_ticket], %[ticket], 16		\n"
200 		"	andi	%[now_serving], %[ticket], 0xffff	\n"
201 		"	bne	%[my_ticket], %[now_serving], 3f	\n"
202 		"	 addu	%[ticket], %[ticket], %[inc]		\n"
203 		"	sc	%[ticket], %[ticket_ptr]		\n"
204 		"	beqz	%[ticket], 1b				\n"
205 		"	 li	%[ticket], 1				\n"
206 		"2:	.insn						\n"
207 		"	.subsection 2					\n"
208 		"3:	b	2b					\n"
209 		"	 li	%[ticket], 0				\n"
210 		"	.previous					\n"
211 		"	.set pop					\n"
212 		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
213 		  [ticket] "=&r" (tmp),
214 		  [my_ticket] "=&r" (tmp2),
215 		  [now_serving] "=&r" (tmp3)
216 		: [inc] "r" (inc));
217 	}
218 
219 	smp_llsc_mb();
220 
221 	return tmp;
222 }
223 
224 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
225 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
226 
227 #define arch_spin_relax(lock)	cpu_relax()
228 #define arch_read_relax(lock)	cpu_relax()
229 #define arch_write_relax(lock)	cpu_relax()
230 
231 #endif /* _ASM_SPINLOCK_H */
232