xref: /openbmc/linux/arch/sh/include/asm/spinlock.h (revision 726328d9)
1 /*
2  * include/asm-sh/spinlock.h
3  *
4  * Copyright (C) 2002, 2003 Paul Mundt
5  * Copyright (C) 2006, 2007 Akio Idehara
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #ifndef __ASM_SH_SPINLOCK_H
12 #define __ASM_SH_SPINLOCK_H
13 
14 /*
15  * The only locking implemented here uses SH-4A opcodes. For others,
16  * split this out as per atomic-*.h.
17  */
18 #ifndef CONFIG_CPU_SH4A
19 #error "Need movli.l/movco.l for spinlocks"
20 #endif
21 
22 #include <asm/barrier.h>
23 #include <asm/processor.h>
24 
25 /*
26  * Your basic SMP spinlocks, allowing only a single CPU anywhere
27  */
28 
29 #define arch_spin_is_locked(x)		((x)->lock <= 0)
30 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
31 
32 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
33 {
34 	smp_cond_load_acquire(&lock->lock, VAL > 0);
35 }
36 
37 /*
38  * Simple spin lock operations.  There are two variants, one clears IRQ's
39  * on the local processor, one does not.
40  *
41  * We make no fairness assumptions.  They have a cost.
42  */
43 static inline void arch_spin_lock(arch_spinlock_t *lock)
44 {
45 	unsigned long tmp;
46 	unsigned long oldval;
47 
48 	__asm__ __volatile__ (
49 		"1:						\n\t"
50 		"movli.l	@%2, %0	! arch_spin_lock	\n\t"
51 		"mov		%0, %1				\n\t"
52 		"mov		#0, %0				\n\t"
53 		"movco.l	%0, @%2				\n\t"
54 		"bf		1b				\n\t"
55 		"cmp/pl		%1				\n\t"
56 		"bf		1b				\n\t"
57 		: "=&z" (tmp), "=&r" (oldval)
58 		: "r" (&lock->lock)
59 		: "t", "memory"
60 	);
61 }
62 
63 static inline void arch_spin_unlock(arch_spinlock_t *lock)
64 {
65 	unsigned long tmp;
66 
67 	__asm__ __volatile__ (
68 		"mov		#1, %0 ! arch_spin_unlock	\n\t"
69 		"mov.l		%0, @%1				\n\t"
70 		: "=&z" (tmp)
71 		: "r" (&lock->lock)
72 		: "t", "memory"
73 	);
74 }
75 
76 static inline int arch_spin_trylock(arch_spinlock_t *lock)
77 {
78 	unsigned long tmp, oldval;
79 
80 	__asm__ __volatile__ (
81 		"1:						\n\t"
82 		"movli.l	@%2, %0	! arch_spin_trylock	\n\t"
83 		"mov		%0, %1				\n\t"
84 		"mov		#0, %0				\n\t"
85 		"movco.l	%0, @%2				\n\t"
86 		"bf		1b				\n\t"
87 		"synco						\n\t"
88 		: "=&z" (tmp), "=&r" (oldval)
89 		: "r" (&lock->lock)
90 		: "t", "memory"
91 	);
92 
93 	return oldval;
94 }
95 
96 /*
97  * Read-write spinlocks, allowing multiple readers but only one writer.
98  *
99  * NOTE! it is quite common to have readers in interrupts but no interrupt
100  * writers. For those circumstances we can "mix" irq-safe locks - any writer
101  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
102  * read-locks.
103  */
104 
105 /**
106  * read_can_lock - would read_trylock() succeed?
107  * @lock: the rwlock in question.
108  */
109 #define arch_read_can_lock(x)	((x)->lock > 0)
110 
111 /**
112  * write_can_lock - would write_trylock() succeed?
113  * @lock: the rwlock in question.
114  */
115 #define arch_write_can_lock(x)	((x)->lock == RW_LOCK_BIAS)
116 
117 static inline void arch_read_lock(arch_rwlock_t *rw)
118 {
119 	unsigned long tmp;
120 
121 	__asm__ __volatile__ (
122 		"1:						\n\t"
123 		"movli.l	@%1, %0	! arch_read_lock	\n\t"
124 		"cmp/pl		%0				\n\t"
125 		"bf		1b				\n\t"
126 		"add		#-1, %0				\n\t"
127 		"movco.l	%0, @%1				\n\t"
128 		"bf		1b				\n\t"
129 		: "=&z" (tmp)
130 		: "r" (&rw->lock)
131 		: "t", "memory"
132 	);
133 }
134 
135 static inline void arch_read_unlock(arch_rwlock_t *rw)
136 {
137 	unsigned long tmp;
138 
139 	__asm__ __volatile__ (
140 		"1:						\n\t"
141 		"movli.l	@%1, %0	! arch_read_unlock	\n\t"
142 		"add		#1, %0				\n\t"
143 		"movco.l	%0, @%1				\n\t"
144 		"bf		1b				\n\t"
145 		: "=&z" (tmp)
146 		: "r" (&rw->lock)
147 		: "t", "memory"
148 	);
149 }
150 
151 static inline void arch_write_lock(arch_rwlock_t *rw)
152 {
153 	unsigned long tmp;
154 
155 	__asm__ __volatile__ (
156 		"1:						\n\t"
157 		"movli.l	@%1, %0	! arch_write_lock	\n\t"
158 		"cmp/hs		%2, %0				\n\t"
159 		"bf		1b				\n\t"
160 		"sub		%2, %0				\n\t"
161 		"movco.l	%0, @%1				\n\t"
162 		"bf		1b				\n\t"
163 		: "=&z" (tmp)
164 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
165 		: "t", "memory"
166 	);
167 }
168 
169 static inline void arch_write_unlock(arch_rwlock_t *rw)
170 {
171 	__asm__ __volatile__ (
172 		"mov.l		%1, @%0 ! arch_write_unlock	\n\t"
173 		:
174 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
175 		: "t", "memory"
176 	);
177 }
178 
179 static inline int arch_read_trylock(arch_rwlock_t *rw)
180 {
181 	unsigned long tmp, oldval;
182 
183 	__asm__ __volatile__ (
184 		"1:						\n\t"
185 		"movli.l	@%2, %0	! arch_read_trylock	\n\t"
186 		"mov		%0, %1				\n\t"
187 		"cmp/pl		%0				\n\t"
188 		"bf		2f				\n\t"
189 		"add		#-1, %0				\n\t"
190 		"movco.l	%0, @%2				\n\t"
191 		"bf		1b				\n\t"
192 		"2:						\n\t"
193 		"synco						\n\t"
194 		: "=&z" (tmp), "=&r" (oldval)
195 		: "r" (&rw->lock)
196 		: "t", "memory"
197 	);
198 
199 	return (oldval > 0);
200 }
201 
202 static inline int arch_write_trylock(arch_rwlock_t *rw)
203 {
204 	unsigned long tmp, oldval;
205 
206 	__asm__ __volatile__ (
207 		"1:						\n\t"
208 		"movli.l	@%2, %0	! arch_write_trylock	\n\t"
209 		"mov		%0, %1				\n\t"
210 		"cmp/hs		%3, %0				\n\t"
211 		"bf		2f				\n\t"
212 		"sub		%3, %0				\n\t"
213 		"2:						\n\t"
214 		"movco.l	%0, @%2				\n\t"
215 		"bf		1b				\n\t"
216 		"synco						\n\t"
217 		: "=&z" (tmp), "=&r" (oldval)
218 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
219 		: "t", "memory"
220 	);
221 
222 	return (oldval > (RW_LOCK_BIAS - 1));
223 }
224 
225 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
226 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
227 
228 #define arch_spin_relax(lock)	cpu_relax()
229 #define arch_read_relax(lock)	cpu_relax()
230 #define arch_write_relax(lock)	cpu_relax()
231 
232 #endif /* __ASM_SH_SPINLOCK_H */
233