xref: /openbmc/linux/arch/mips/include/asm/spinlock.h (revision 9b8f3863)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_SPINLOCK_H
10 #define _ASM_SPINLOCK_H
11 
12 #include <linux/compiler.h>
13 
14 #include <asm/barrier.h>
15 #include <asm/war.h>
16 
17 /*
18  * Your basic SMP spinlocks, allowing only a single CPU anywhere
19  *
20  * Simple spin lock operations.  There are two variants, one clears IRQ's
21  * on the local processor, one does not.
22  *
23  * These are fair FIFO ticket locks
24  *
25  * (the type definitions are in asm/spinlock_types.h)
26  */
27 
28 
29 /*
30  * Ticket locks are conceptually two parts, one indicating the current head of
31  * the queue, and the other indicating the current tail. The lock is acquired
32  * by atomically noting the tail and incrementing it by one (thus adding
33  * ourself to the queue and noting our position), then waiting until the head
34  * becomes equal to the the initial value of the tail.
35  */
36 
37 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
38 {
39 	unsigned int counters = ACCESS_ONCE(lock->lock);
40 
41 	return ((counters >> 14) ^ counters) & 0x1fff;
42 }
43 
44 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
45 #define __raw_spin_unlock_wait(x) \
46 	while (__raw_spin_is_locked(x)) { cpu_relax(); }
47 
48 static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
49 {
50 	unsigned int counters = ACCESS_ONCE(lock->lock);
51 
52 	return (((counters >> 14) - counters) & 0x1fff) > 1;
53 }
54 
55 static inline void __raw_spin_lock(raw_spinlock_t *lock)
56 {
57 	int my_ticket;
58 	int tmp;
59 
60 	if (R10000_LLSC_WAR) {
61 		__asm__ __volatile__ (
62 		"	.set push		# __raw_spin_lock	\n"
63 		"	.set noreorder					\n"
64 		"							\n"
65 		"1:	ll	%[ticket], %[ticket_ptr]		\n"
66 		"	addiu	%[my_ticket], %[ticket], 0x4000		\n"
67 		"	sc	%[my_ticket], %[ticket_ptr]		\n"
68 		"	beqzl	%[my_ticket], 1b			\n"
69 		"	 nop						\n"
70 		"	srl	%[my_ticket], %[ticket], 14		\n"
71 		"	andi	%[my_ticket], %[my_ticket], 0x1fff	\n"
72 		"	andi	%[ticket], %[ticket], 0x1fff		\n"
73 		"	bne	%[ticket], %[my_ticket], 4f		\n"
74 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
75 		"2:							\n"
76 		"	.subsection 2					\n"
77 		"4:	andi	%[ticket], %[ticket], 0x1fff		\n"
78 		"5:	sll	%[ticket], 5				\n"
79 		"							\n"
80 		"6:	bnez	%[ticket], 6b				\n"
81 		"	 subu	%[ticket], 1				\n"
82 		"							\n"
83 		"	lw	%[ticket], %[ticket_ptr]		\n"
84 		"	andi	%[ticket], %[ticket], 0x1fff		\n"
85 		"	beq	%[ticket], %[my_ticket], 2b		\n"
86 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
87 		"	b	5b					\n"
88 		"	 subu	%[ticket], %[ticket], 1			\n"
89 		"	.previous					\n"
90 		"	.set pop					\n"
91 		: [ticket_ptr] "+m" (lock->lock),
92 		  [ticket] "=&r" (tmp),
93 		  [my_ticket] "=&r" (my_ticket));
94 	} else {
95 		__asm__ __volatile__ (
96 		"	.set push		# __raw_spin_lock	\n"
97 		"	.set noreorder					\n"
98 		"							\n"
99 		"	ll	%[ticket], %[ticket_ptr]		\n"
100 		"1:	addiu	%[my_ticket], %[ticket], 0x4000		\n"
101 		"	sc	%[my_ticket], %[ticket_ptr]		\n"
102 		"	beqz	%[my_ticket], 3f			\n"
103 		"	 nop						\n"
104 		"	srl	%[my_ticket], %[ticket], 14		\n"
105 		"	andi	%[my_ticket], %[my_ticket], 0x1fff	\n"
106 		"	andi	%[ticket], %[ticket], 0x1fff		\n"
107 		"	bne	%[ticket], %[my_ticket], 4f		\n"
108 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
109 		"2:							\n"
110 		"	.subsection 2					\n"
111 		"3:	b	1b					\n"
112 		"	 ll	%[ticket], %[ticket_ptr]		\n"
113 		"							\n"
114 		"4:	andi	%[ticket], %[ticket], 0x1fff		\n"
115 		"5:	sll	%[ticket], 5				\n"
116 		"							\n"
117 		"6:	bnez	%[ticket], 6b				\n"
118 		"	 subu	%[ticket], 1				\n"
119 		"							\n"
120 		"	lw	%[ticket], %[ticket_ptr]		\n"
121 		"	andi	%[ticket], %[ticket], 0x1fff		\n"
122 		"	beq	%[ticket], %[my_ticket], 2b		\n"
123 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
124 		"	b	5b					\n"
125 		"	 subu	%[ticket], %[ticket], 1			\n"
126 		"	.previous					\n"
127 		"	.set pop					\n"
128 		: [ticket_ptr] "+m" (lock->lock),
129 		  [ticket] "=&r" (tmp),
130 		  [my_ticket] "=&r" (my_ticket));
131 	}
132 
133 	smp_llsc_mb();
134 }
135 
136 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
137 {
138 	int tmp;
139 
140 	smp_llsc_mb();
141 
142 	if (R10000_LLSC_WAR) {
143 		__asm__ __volatile__ (
144 		"				# __raw_spin_unlock	\n"
145 		"1:	ll	%[ticket], %[ticket_ptr]		\n"
146 		"	addiu	%[ticket], %[ticket], 1			\n"
147 		"	ori	%[ticket], %[ticket], 0x2000		\n"
148 		"	xori	%[ticket], %[ticket], 0x2000		\n"
149 		"	sc	%[ticket], %[ticket_ptr]		\n"
150 		"	beqzl	%[ticket], 1b				\n"
151 		: [ticket_ptr] "+m" (lock->lock),
152 		  [ticket] "=&r" (tmp));
153 	} else {
154 		__asm__ __volatile__ (
155 		"	.set push		# __raw_spin_unlock	\n"
156 		"	.set noreorder					\n"
157 		"							\n"
158 		"	ll	%[ticket], %[ticket_ptr]		\n"
159 		"1:	addiu	%[ticket], %[ticket], 1			\n"
160 		"	ori	%[ticket], %[ticket], 0x2000		\n"
161 		"	xori	%[ticket], %[ticket], 0x2000		\n"
162 		"	sc	%[ticket], %[ticket_ptr]		\n"
163 		"	beqz	%[ticket], 2f				\n"
164 		"	 nop						\n"
165 		"							\n"
166 		"	.subsection 2					\n"
167 		"2:	b	1b					\n"
168 		"	 ll	%[ticket], %[ticket_ptr]		\n"
169 		"	.previous					\n"
170 		"	.set pop					\n"
171 		: [ticket_ptr] "+m" (lock->lock),
172 		  [ticket] "=&r" (tmp));
173 	}
174 }
175 
176 static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
177 {
178 	int tmp, tmp2, tmp3;
179 
180 	if (R10000_LLSC_WAR) {
181 		__asm__ __volatile__ (
182 		"	.set push		# __raw_spin_trylock	\n"
183 		"	.set noreorder					\n"
184 		"							\n"
185 		"1:	ll	%[ticket], %[ticket_ptr]		\n"
186 		"	srl	%[my_ticket], %[ticket], 14		\n"
187 		"	andi	%[my_ticket], %[my_ticket], 0x1fff	\n"
188 		"	andi	%[now_serving], %[ticket], 0x1fff	\n"
189 		"	bne	%[my_ticket], %[now_serving], 3f	\n"
190 		"	 addiu	%[ticket], %[ticket], 0x4000		\n"
191 		"	sc	%[ticket], %[ticket_ptr]		\n"
192 		"	beqzl	%[ticket], 1b				\n"
193 		"	 li	%[ticket], 1				\n"
194 		"2:							\n"
195 		"	.subsection 2					\n"
196 		"3:	b	2b					\n"
197 		"	 li	%[ticket], 0				\n"
198 		"	.previous					\n"
199 		"	.set pop					\n"
200 		: [ticket_ptr] "+m" (lock->lock),
201 		  [ticket] "=&r" (tmp),
202 		  [my_ticket] "=&r" (tmp2),
203 		  [now_serving] "=&r" (tmp3));
204 	} else {
205 		__asm__ __volatile__ (
206 		"	.set push		# __raw_spin_trylock	\n"
207 		"	.set noreorder					\n"
208 		"							\n"
209 		"	ll	%[ticket], %[ticket_ptr]		\n"
210 		"1:	srl	%[my_ticket], %[ticket], 14		\n"
211 		"	andi	%[my_ticket], %[my_ticket], 0x1fff	\n"
212 		"	andi	%[now_serving], %[ticket], 0x1fff	\n"
213 		"	bne	%[my_ticket], %[now_serving], 3f	\n"
214 		"	 addiu	%[ticket], %[ticket], 0x4000		\n"
215 		"	sc	%[ticket], %[ticket_ptr]		\n"
216 		"	beqz	%[ticket], 4f				\n"
217 		"	 li	%[ticket], 1				\n"
218 		"2:							\n"
219 		"	.subsection 2					\n"
220 		"3:	b	2b					\n"
221 		"	 li	%[ticket], 0				\n"
222 		"4:	b	1b					\n"
223 		"	 ll	%[ticket], %[ticket_ptr]		\n"
224 		"	.previous					\n"
225 		"	.set pop					\n"
226 		: [ticket_ptr] "+m" (lock->lock),
227 		  [ticket] "=&r" (tmp),
228 		  [my_ticket] "=&r" (tmp2),
229 		  [now_serving] "=&r" (tmp3));
230 	}
231 
232 	smp_llsc_mb();
233 
234 	return tmp;
235 }
236 
237 /*
238  * Read-write spinlocks, allowing multiple readers but only one writer.
239  *
240  * NOTE! it is quite common to have readers in interrupts but no interrupt
241  * writers. For those circumstances we can "mix" irq-safe locks - any writer
242  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
243  * read-locks.
244  */
245 
246 /*
247  * read_can_lock - would read_trylock() succeed?
248  * @lock: the rwlock in question.
249  */
250 #define __raw_read_can_lock(rw)	((rw)->lock >= 0)
251 
252 /*
253  * write_can_lock - would write_trylock() succeed?
254  * @lock: the rwlock in question.
255  */
256 #define __raw_write_can_lock(rw)	(!(rw)->lock)
257 
258 static inline void __raw_read_lock(raw_rwlock_t *rw)
259 {
260 	unsigned int tmp;
261 
262 	if (R10000_LLSC_WAR) {
263 		__asm__ __volatile__(
264 		"	.set	noreorder	# __raw_read_lock	\n"
265 		"1:	ll	%1, %2					\n"
266 		"	bltz	%1, 1b					\n"
267 		"	 addu	%1, 1					\n"
268 		"	sc	%1, %0					\n"
269 		"	beqzl	%1, 1b					\n"
270 		"	 nop						\n"
271 		"	.set	reorder					\n"
272 		: "=m" (rw->lock), "=&r" (tmp)
273 		: "m" (rw->lock)
274 		: "memory");
275 	} else {
276 		__asm__ __volatile__(
277 		"	.set	noreorder	# __raw_read_lock	\n"
278 		"1:	ll	%1, %2					\n"
279 		"	bltz	%1, 2f					\n"
280 		"	 addu	%1, 1					\n"
281 		"	sc	%1, %0					\n"
282 		"	beqz	%1, 1b					\n"
283 		"	 nop						\n"
284 		"	.subsection 2					\n"
285 		"2:	ll	%1, %2					\n"
286 		"	bltz	%1, 2b					\n"
287 		"	 addu	%1, 1					\n"
288 		"	b	1b					\n"
289 		"	 nop						\n"
290 		"	.previous					\n"
291 		"	.set	reorder					\n"
292 		: "=m" (rw->lock), "=&r" (tmp)
293 		: "m" (rw->lock)
294 		: "memory");
295 	}
296 
297 	smp_llsc_mb();
298 }
299 
300 /* Note the use of sub, not subu which will make the kernel die with an
301    overflow exception if we ever try to unlock an rwlock that is already
302    unlocked or is being held by a writer.  */
303 static inline void __raw_read_unlock(raw_rwlock_t *rw)
304 {
305 	unsigned int tmp;
306 
307 	smp_llsc_mb();
308 
309 	if (R10000_LLSC_WAR) {
310 		__asm__ __volatile__(
311 		"1:	ll	%1, %2		# __raw_read_unlock	\n"
312 		"	sub	%1, 1					\n"
313 		"	sc	%1, %0					\n"
314 		"	beqzl	%1, 1b					\n"
315 		: "=m" (rw->lock), "=&r" (tmp)
316 		: "m" (rw->lock)
317 		: "memory");
318 	} else {
319 		__asm__ __volatile__(
320 		"	.set	noreorder	# __raw_read_unlock	\n"
321 		"1:	ll	%1, %2					\n"
322 		"	sub	%1, 1					\n"
323 		"	sc	%1, %0					\n"
324 		"	beqz	%1, 2f					\n"
325 		"	 nop						\n"
326 		"	.subsection 2					\n"
327 		"2:	b	1b					\n"
328 		"	 nop						\n"
329 		"	.previous					\n"
330 		"	.set	reorder					\n"
331 		: "=m" (rw->lock), "=&r" (tmp)
332 		: "m" (rw->lock)
333 		: "memory");
334 	}
335 }
336 
337 static inline void __raw_write_lock(raw_rwlock_t *rw)
338 {
339 	unsigned int tmp;
340 
341 	if (R10000_LLSC_WAR) {
342 		__asm__ __volatile__(
343 		"	.set	noreorder	# __raw_write_lock	\n"
344 		"1:	ll	%1, %2					\n"
345 		"	bnez	%1, 1b					\n"
346 		"	 lui	%1, 0x8000				\n"
347 		"	sc	%1, %0					\n"
348 		"	beqzl	%1, 1b					\n"
349 		"	 nop						\n"
350 		"	.set	reorder					\n"
351 		: "=m" (rw->lock), "=&r" (tmp)
352 		: "m" (rw->lock)
353 		: "memory");
354 	} else {
355 		__asm__ __volatile__(
356 		"	.set	noreorder	# __raw_write_lock	\n"
357 		"1:	ll	%1, %2					\n"
358 		"	bnez	%1, 2f					\n"
359 		"	 lui	%1, 0x8000				\n"
360 		"	sc	%1, %0					\n"
361 		"	beqz	%1, 2f					\n"
362 		"	 nop						\n"
363 		"	.subsection 2					\n"
364 		"2:	ll	%1, %2					\n"
365 		"	bnez	%1, 2b					\n"
366 		"	 lui	%1, 0x8000				\n"
367 		"	b	1b					\n"
368 		"	 nop						\n"
369 		"	.previous					\n"
370 		"	.set	reorder					\n"
371 		: "=m" (rw->lock), "=&r" (tmp)
372 		: "m" (rw->lock)
373 		: "memory");
374 	}
375 
376 	smp_llsc_mb();
377 }
378 
379 static inline void __raw_write_unlock(raw_rwlock_t *rw)
380 {
381 	smp_mb();
382 
383 	__asm__ __volatile__(
384 	"				# __raw_write_unlock	\n"
385 	"	sw	$0, %0					\n"
386 	: "=m" (rw->lock)
387 	: "m" (rw->lock)
388 	: "memory");
389 }
390 
391 static inline int __raw_read_trylock(raw_rwlock_t *rw)
392 {
393 	unsigned int tmp;
394 	int ret;
395 
396 	if (R10000_LLSC_WAR) {
397 		__asm__ __volatile__(
398 		"	.set	noreorder	# __raw_read_trylock	\n"
399 		"	li	%2, 0					\n"
400 		"1:	ll	%1, %3					\n"
401 		"	bltz	%1, 2f					\n"
402 		"	 addu	%1, 1					\n"
403 		"	sc	%1, %0					\n"
404 		"	.set	reorder					\n"
405 		"	beqzl	%1, 1b					\n"
406 		"	 nop						\n"
407 		__WEAK_LLSC_MB
408 		"	li	%2, 1					\n"
409 		"2:							\n"
410 		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
411 		: "m" (rw->lock)
412 		: "memory");
413 	} else {
414 		__asm__ __volatile__(
415 		"	.set	noreorder	# __raw_read_trylock	\n"
416 		"	li	%2, 0					\n"
417 		"1:	ll	%1, %3					\n"
418 		"	bltz	%1, 2f					\n"
419 		"	 addu	%1, 1					\n"
420 		"	sc	%1, %0					\n"
421 		"	beqz	%1, 1b					\n"
422 		"	 nop						\n"
423 		"	.set	reorder					\n"
424 		__WEAK_LLSC_MB
425 		"	li	%2, 1					\n"
426 		"2:							\n"
427 		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
428 		: "m" (rw->lock)
429 		: "memory");
430 	}
431 
432 	return ret;
433 }
434 
435 static inline int __raw_write_trylock(raw_rwlock_t *rw)
436 {
437 	unsigned int tmp;
438 	int ret;
439 
440 	if (R10000_LLSC_WAR) {
441 		__asm__ __volatile__(
442 		"	.set	noreorder	# __raw_write_trylock	\n"
443 		"	li	%2, 0					\n"
444 		"1:	ll	%1, %3					\n"
445 		"	bnez	%1, 2f					\n"
446 		"	 lui	%1, 0x8000				\n"
447 		"	sc	%1, %0					\n"
448 		"	beqzl	%1, 1b					\n"
449 		"	 nop						\n"
450 		__WEAK_LLSC_MB
451 		"	li	%2, 1					\n"
452 		"	.set	reorder					\n"
453 		"2:							\n"
454 		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
455 		: "m" (rw->lock)
456 		: "memory");
457 	} else {
458 		__asm__ __volatile__(
459 		"	.set	noreorder	# __raw_write_trylock	\n"
460 		"	li	%2, 0					\n"
461 		"1:	ll	%1, %3					\n"
462 		"	bnez	%1, 2f					\n"
463 		"	lui	%1, 0x8000				\n"
464 		"	sc	%1, %0					\n"
465 		"	beqz	%1, 3f					\n"
466 		"	 li	%2, 1					\n"
467 		"2:							\n"
468 		__WEAK_LLSC_MB
469 		"	.subsection 2					\n"
470 		"3:	b	1b					\n"
471 		"	 li	%2, 0					\n"
472 		"	.previous					\n"
473 		"	.set	reorder					\n"
474 		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
475 		: "m" (rw->lock)
476 		: "memory");
477 	}
478 
479 	return ret;
480 }
481 
482 
483 #define _raw_spin_relax(lock)	cpu_relax()
484 #define _raw_read_relax(lock)	cpu_relax()
485 #define _raw_write_relax(lock)	cpu_relax()
486 
487 #endif /* _ASM_SPINLOCK_H */
488