xref: /openbmc/linux/arch/sh/include/asm/spinlock.h (revision f15cbe6f)
1f15cbe6fSPaul Mundt /*
2f15cbe6fSPaul Mundt  * include/asm-sh/spinlock.h
3f15cbe6fSPaul Mundt  *
4f15cbe6fSPaul Mundt  * Copyright (C) 2002, 2003 Paul Mundt
5f15cbe6fSPaul Mundt  * Copyright (C) 2006, 2007 Akio Idehara
6f15cbe6fSPaul Mundt  *
7f15cbe6fSPaul Mundt  * This file is subject to the terms and conditions of the GNU General Public
8f15cbe6fSPaul Mundt  * License.  See the file "COPYING" in the main directory of this archive
9f15cbe6fSPaul Mundt  * for more details.
10f15cbe6fSPaul Mundt  */
11f15cbe6fSPaul Mundt #ifndef __ASM_SH_SPINLOCK_H
12f15cbe6fSPaul Mundt #define __ASM_SH_SPINLOCK_H
13f15cbe6fSPaul Mundt 
14f15cbe6fSPaul Mundt /*
15f15cbe6fSPaul Mundt  * The only locking implemented here uses SH-4A opcodes. For others,
16f15cbe6fSPaul Mundt  * split this out as per atomic-*.h.
17f15cbe6fSPaul Mundt  */
18f15cbe6fSPaul Mundt #ifndef CONFIG_CPU_SH4A
19f15cbe6fSPaul Mundt #error "Need movli.l/movco.l for spinlocks"
20f15cbe6fSPaul Mundt #endif
21f15cbe6fSPaul Mundt 
22f15cbe6fSPaul Mundt /*
23f15cbe6fSPaul Mundt  * Your basic SMP spinlocks, allowing only a single CPU anywhere
24f15cbe6fSPaul Mundt  */
25f15cbe6fSPaul Mundt 
26f15cbe6fSPaul Mundt #define __raw_spin_is_locked(x)		((x)->lock <= 0)
27f15cbe6fSPaul Mundt #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
28f15cbe6fSPaul Mundt #define __raw_spin_unlock_wait(x) \
29f15cbe6fSPaul Mundt 	do { cpu_relax(); } while ((x)->lock)
30f15cbe6fSPaul Mundt 
31f15cbe6fSPaul Mundt /*
32f15cbe6fSPaul Mundt  * Simple spin lock operations.  There are two variants, one clears IRQ's
33f15cbe6fSPaul Mundt  * on the local processor, one does not.
34f15cbe6fSPaul Mundt  *
35f15cbe6fSPaul Mundt  * We make no fairness assumptions.  They have a cost.
36f15cbe6fSPaul Mundt  */
37f15cbe6fSPaul Mundt static inline void __raw_spin_lock(raw_spinlock_t *lock)
38f15cbe6fSPaul Mundt {
39f15cbe6fSPaul Mundt 	unsigned long tmp;
40f15cbe6fSPaul Mundt 	unsigned long oldval;
41f15cbe6fSPaul Mundt 
42f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
43f15cbe6fSPaul Mundt 		"1:						\n\t"
44f15cbe6fSPaul Mundt 		"movli.l	@%2, %0	! __raw_spin_lock	\n\t"
45f15cbe6fSPaul Mundt 		"mov		%0, %1				\n\t"
46f15cbe6fSPaul Mundt 		"mov		#0, %0				\n\t"
47f15cbe6fSPaul Mundt 		"movco.l	%0, @%2				\n\t"
48f15cbe6fSPaul Mundt 		"bf		1b				\n\t"
49f15cbe6fSPaul Mundt 		"cmp/pl		%1				\n\t"
50f15cbe6fSPaul Mundt 		"bf		1b				\n\t"
51f15cbe6fSPaul Mundt 		: "=&z" (tmp), "=&r" (oldval)
52f15cbe6fSPaul Mundt 		: "r" (&lock->lock)
53f15cbe6fSPaul Mundt 		: "t", "memory"
54f15cbe6fSPaul Mundt 	);
55f15cbe6fSPaul Mundt }
56f15cbe6fSPaul Mundt 
57f15cbe6fSPaul Mundt static inline void __raw_spin_unlock(raw_spinlock_t *lock)
58f15cbe6fSPaul Mundt {
59f15cbe6fSPaul Mundt 	unsigned long tmp;
60f15cbe6fSPaul Mundt 
61f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
62f15cbe6fSPaul Mundt 		"mov		#1, %0 ! __raw_spin_unlock	\n\t"
63f15cbe6fSPaul Mundt 		"mov.l		%0, @%1				\n\t"
64f15cbe6fSPaul Mundt 		: "=&z" (tmp)
65f15cbe6fSPaul Mundt 		: "r" (&lock->lock)
66f15cbe6fSPaul Mundt 		: "t", "memory"
67f15cbe6fSPaul Mundt 	);
68f15cbe6fSPaul Mundt }
69f15cbe6fSPaul Mundt 
70f15cbe6fSPaul Mundt static inline int __raw_spin_trylock(raw_spinlock_t *lock)
71f15cbe6fSPaul Mundt {
72f15cbe6fSPaul Mundt 	unsigned long tmp, oldval;
73f15cbe6fSPaul Mundt 
74f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
75f15cbe6fSPaul Mundt 		"1:						\n\t"
76f15cbe6fSPaul Mundt 		"movli.l	@%2, %0	! __raw_spin_trylock	\n\t"
77f15cbe6fSPaul Mundt 		"mov		%0, %1				\n\t"
78f15cbe6fSPaul Mundt 		"mov		#0, %0				\n\t"
79f15cbe6fSPaul Mundt 		"movco.l	%0, @%2				\n\t"
80f15cbe6fSPaul Mundt 		"bf		1b				\n\t"
81f15cbe6fSPaul Mundt 		"synco						\n\t"
82f15cbe6fSPaul Mundt 		: "=&z" (tmp), "=&r" (oldval)
83f15cbe6fSPaul Mundt 		: "r" (&lock->lock)
84f15cbe6fSPaul Mundt 		: "t", "memory"
85f15cbe6fSPaul Mundt 	);
86f15cbe6fSPaul Mundt 
87f15cbe6fSPaul Mundt 	return oldval;
88f15cbe6fSPaul Mundt }
89f15cbe6fSPaul Mundt 
90f15cbe6fSPaul Mundt /*
91f15cbe6fSPaul Mundt  * Read-write spinlocks, allowing multiple readers but only one writer.
92f15cbe6fSPaul Mundt  *
93f15cbe6fSPaul Mundt  * NOTE! it is quite common to have readers in interrupts but no interrupt
94f15cbe6fSPaul Mundt  * writers. For those circumstances we can "mix" irq-safe locks - any writer
95f15cbe6fSPaul Mundt  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
96f15cbe6fSPaul Mundt  * read-locks.
97f15cbe6fSPaul Mundt  */
98f15cbe6fSPaul Mundt 
99f15cbe6fSPaul Mundt /**
100f15cbe6fSPaul Mundt  * read_can_lock - would read_trylock() succeed?
101f15cbe6fSPaul Mundt  * @lock: the rwlock in question.
102f15cbe6fSPaul Mundt  */
103f15cbe6fSPaul Mundt #define __raw_read_can_lock(x)	((x)->lock > 0)
104f15cbe6fSPaul Mundt 
105f15cbe6fSPaul Mundt /**
106f15cbe6fSPaul Mundt  * write_can_lock - would write_trylock() succeed?
107f15cbe6fSPaul Mundt  * @lock: the rwlock in question.
108f15cbe6fSPaul Mundt  */
109f15cbe6fSPaul Mundt #define __raw_write_can_lock(x)	((x)->lock == RW_LOCK_BIAS)
110f15cbe6fSPaul Mundt 
111f15cbe6fSPaul Mundt static inline void __raw_read_lock(raw_rwlock_t *rw)
112f15cbe6fSPaul Mundt {
113f15cbe6fSPaul Mundt 	unsigned long tmp;
114f15cbe6fSPaul Mundt 
115f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
116f15cbe6fSPaul Mundt 		"1:						\n\t"
117f15cbe6fSPaul Mundt 		"movli.l	@%1, %0	! __raw_read_lock	\n\t"
118f15cbe6fSPaul Mundt 		"cmp/pl		%0				\n\t"
119f15cbe6fSPaul Mundt 		"bf		1b				\n\t"
120f15cbe6fSPaul Mundt 		"add		#-1, %0				\n\t"
121f15cbe6fSPaul Mundt 		"movco.l	%0, @%1				\n\t"
122f15cbe6fSPaul Mundt 		"bf		1b				\n\t"
123f15cbe6fSPaul Mundt 		: "=&z" (tmp)
124f15cbe6fSPaul Mundt 		: "r" (&rw->lock)
125f15cbe6fSPaul Mundt 		: "t", "memory"
126f15cbe6fSPaul Mundt 	);
127f15cbe6fSPaul Mundt }
128f15cbe6fSPaul Mundt 
129f15cbe6fSPaul Mundt static inline void __raw_read_unlock(raw_rwlock_t *rw)
130f15cbe6fSPaul Mundt {
131f15cbe6fSPaul Mundt 	unsigned long tmp;
132f15cbe6fSPaul Mundt 
133f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
134f15cbe6fSPaul Mundt 		"1:						\n\t"
135f15cbe6fSPaul Mundt 		"movli.l	@%1, %0	! __raw_read_unlock	\n\t"
136f15cbe6fSPaul Mundt 		"add		#1, %0				\n\t"
137f15cbe6fSPaul Mundt 		"movco.l	%0, @%1				\n\t"
138f15cbe6fSPaul Mundt 		"bf		1b				\n\t"
139f15cbe6fSPaul Mundt 		: "=&z" (tmp)
140f15cbe6fSPaul Mundt 		: "r" (&rw->lock)
141f15cbe6fSPaul Mundt 		: "t", "memory"
142f15cbe6fSPaul Mundt 	);
143f15cbe6fSPaul Mundt }
144f15cbe6fSPaul Mundt 
145f15cbe6fSPaul Mundt static inline void __raw_write_lock(raw_rwlock_t *rw)
146f15cbe6fSPaul Mundt {
147f15cbe6fSPaul Mundt 	unsigned long tmp;
148f15cbe6fSPaul Mundt 
149f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
150f15cbe6fSPaul Mundt 		"1:						\n\t"
151f15cbe6fSPaul Mundt 		"movli.l	@%1, %0	! __raw_write_lock	\n\t"
152f15cbe6fSPaul Mundt 		"cmp/hs		%2, %0				\n\t"
153f15cbe6fSPaul Mundt 		"bf		1b				\n\t"
154f15cbe6fSPaul Mundt 		"sub		%2, %0				\n\t"
155f15cbe6fSPaul Mundt 		"movco.l	%0, @%1				\n\t"
156f15cbe6fSPaul Mundt 		"bf		1b				\n\t"
157f15cbe6fSPaul Mundt 		: "=&z" (tmp)
158f15cbe6fSPaul Mundt 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
159f15cbe6fSPaul Mundt 		: "t", "memory"
160f15cbe6fSPaul Mundt 	);
161f15cbe6fSPaul Mundt }
162f15cbe6fSPaul Mundt 
163f15cbe6fSPaul Mundt static inline void __raw_write_unlock(raw_rwlock_t *rw)
164f15cbe6fSPaul Mundt {
165f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
166f15cbe6fSPaul Mundt 		"mov.l		%1, @%0 ! __raw_write_unlock	\n\t"
167f15cbe6fSPaul Mundt 		:
168f15cbe6fSPaul Mundt 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
169f15cbe6fSPaul Mundt 		: "t", "memory"
170f15cbe6fSPaul Mundt 	);
171f15cbe6fSPaul Mundt }
172f15cbe6fSPaul Mundt 
173f15cbe6fSPaul Mundt static inline int __raw_read_trylock(raw_rwlock_t *rw)
174f15cbe6fSPaul Mundt {
175f15cbe6fSPaul Mundt 	unsigned long tmp, oldval;
176f15cbe6fSPaul Mundt 
177f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
178f15cbe6fSPaul Mundt 		"1:						\n\t"
179f15cbe6fSPaul Mundt 		"movli.l	@%2, %0	! __raw_read_trylock	\n\t"
180f15cbe6fSPaul Mundt 		"mov		%0, %1				\n\t"
181f15cbe6fSPaul Mundt 		"cmp/pl		%0				\n\t"
182f15cbe6fSPaul Mundt 		"bf		2f				\n\t"
183f15cbe6fSPaul Mundt 		"add		#-1, %0				\n\t"
184f15cbe6fSPaul Mundt 		"movco.l	%0, @%2				\n\t"
185f15cbe6fSPaul Mundt 		"bf		1b				\n\t"
186f15cbe6fSPaul Mundt 		"2:						\n\t"
187f15cbe6fSPaul Mundt 		"synco						\n\t"
188f15cbe6fSPaul Mundt 		: "=&z" (tmp), "=&r" (oldval)
189f15cbe6fSPaul Mundt 		: "r" (&rw->lock)
190f15cbe6fSPaul Mundt 		: "t", "memory"
191f15cbe6fSPaul Mundt 	);
192f15cbe6fSPaul Mundt 
193f15cbe6fSPaul Mundt 	return (oldval > 0);
194f15cbe6fSPaul Mundt }
195f15cbe6fSPaul Mundt 
196f15cbe6fSPaul Mundt static inline int __raw_write_trylock(raw_rwlock_t *rw)
197f15cbe6fSPaul Mundt {
198f15cbe6fSPaul Mundt 	unsigned long tmp, oldval;
199f15cbe6fSPaul Mundt 
200f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
201f15cbe6fSPaul Mundt 		"1:						\n\t"
202f15cbe6fSPaul Mundt 		"movli.l	@%2, %0	! __raw_write_trylock	\n\t"
203f15cbe6fSPaul Mundt 		"mov		%0, %1				\n\t"
204f15cbe6fSPaul Mundt 		"cmp/hs		%3, %0				\n\t"
205f15cbe6fSPaul Mundt 		"bf		2f				\n\t"
206f15cbe6fSPaul Mundt 		"sub		%3, %0				\n\t"
207f15cbe6fSPaul Mundt 		"2:						\n\t"
208f15cbe6fSPaul Mundt 		"movco.l	%0, @%2				\n\t"
209f15cbe6fSPaul Mundt 		"bf		1b				\n\t"
210f15cbe6fSPaul Mundt 		"synco						\n\t"
211f15cbe6fSPaul Mundt 		: "=&z" (tmp), "=&r" (oldval)
212f15cbe6fSPaul Mundt 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
213f15cbe6fSPaul Mundt 		: "t", "memory"
214f15cbe6fSPaul Mundt 	);
215f15cbe6fSPaul Mundt 
216f15cbe6fSPaul Mundt 	return (oldval > (RW_LOCK_BIAS - 1));
217f15cbe6fSPaul Mundt }
218f15cbe6fSPaul Mundt 
219f15cbe6fSPaul Mundt #define _raw_spin_relax(lock)	cpu_relax()
220f15cbe6fSPaul Mundt #define _raw_read_relax(lock)	cpu_relax()
221f15cbe6fSPaul Mundt #define _raw_write_relax(lock)	cpu_relax()
222f15cbe6fSPaul Mundt 
223f15cbe6fSPaul Mundt #endif /* __ASM_SH_SPINLOCK_H */
224