xref: /openbmc/linux/arch/arc/include/asm/spinlock.h (revision ff148d8a)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11 
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
15 
16 #define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 
18 #ifdef CONFIG_ARC_HAS_LLSC
19 
20 static inline void arch_spin_lock(arch_spinlock_t *lock)
21 {
22 	unsigned int val;
23 
24 	__asm__ __volatile__(
25 	"1:	llock	%[val], [%[slock]]	\n"
26 	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
27 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
28 	"	bnz	1b			\n"
29 	"					\n"
30 	: [val]		"=&r"	(val)
31 	: [slock]	"r"	(&(lock->slock)),
32 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
33 	: "memory", "cc");
34 
35 	/*
36 	 * ACQUIRE barrier to ensure load/store after taking the lock
37 	 * don't "bleed-up" out of the critical section (leak-in is allowed)
38 	 * http://www.spinics.net/lists/kernel/msg2010409.html
39 	 *
40 	 * ARCv2 only has load-load, store-store and all-all barrier
41 	 * thus need the full all-all barrier
42 	 */
43 	smp_mb();
44 }
45 
46 /* 1 - lock taken successfully */
47 static inline int arch_spin_trylock(arch_spinlock_t *lock)
48 {
49 	unsigned int val, got_it = 0;
50 
51 	__asm__ __volatile__(
52 	"1:	llock	%[val], [%[slock]]	\n"
53 	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
54 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
55 	"	bnz	1b			\n"
56 	"	mov	%[got_it], 1		\n"
57 	"4:					\n"
58 	"					\n"
59 	: [val]		"=&r"	(val),
60 	  [got_it]	"+&r"	(got_it)
61 	: [slock]	"r"	(&(lock->slock)),
62 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
63 	: "memory", "cc");
64 
65 	smp_mb();
66 
67 	return got_it;
68 }
69 
70 static inline void arch_spin_unlock(arch_spinlock_t *lock)
71 {
72 	smp_mb();
73 
74 	WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
75 }
76 
77 /*
78  * Read-write spinlocks, allowing multiple readers but only one writer.
79  * Unfair locking as Writers could be starved indefinitely by Reader(s)
80  */
81 
82 static inline void arch_read_lock(arch_rwlock_t *rw)
83 {
84 	unsigned int val;
85 
86 	/*
87 	 * zero means writer holds the lock exclusively, deny Reader.
88 	 * Otherwise grant lock to first/subseq reader
89 	 *
90 	 * 	if (rw->counter > 0) {
91 	 *		rw->counter--;
92 	 *		ret = 1;
93 	 *	}
94 	 */
95 
96 	__asm__ __volatile__(
97 	"1:	llock	%[val], [%[rwlock]]	\n"
98 	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
99 	"	sub	%[val], %[val], 1	\n"	/* reader lock */
100 	"	scond	%[val], [%[rwlock]]	\n"
101 	"	bnz	1b			\n"
102 	"					\n"
103 	: [val]		"=&r"	(val)
104 	: [rwlock]	"r"	(&(rw->counter)),
105 	  [WR_LOCKED]	"ir"	(0)
106 	: "memory", "cc");
107 
108 	smp_mb();
109 }
110 
111 /* 1 - lock taken successfully */
112 static inline int arch_read_trylock(arch_rwlock_t *rw)
113 {
114 	unsigned int val, got_it = 0;
115 
116 	__asm__ __volatile__(
117 	"1:	llock	%[val], [%[rwlock]]	\n"
118 	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
119 	"	sub	%[val], %[val], 1	\n"	/* counter-- */
120 	"	scond	%[val], [%[rwlock]]	\n"
121 	"	bnz	1b			\n"	/* retry if collided with someone */
122 	"	mov	%[got_it], 1		\n"
123 	"					\n"
124 	"4: ; --- done ---			\n"
125 
126 	: [val]		"=&r"	(val),
127 	  [got_it]	"+&r"	(got_it)
128 	: [rwlock]	"r"	(&(rw->counter)),
129 	  [WR_LOCKED]	"ir"	(0)
130 	: "memory", "cc");
131 
132 	smp_mb();
133 
134 	return got_it;
135 }
136 
137 static inline void arch_write_lock(arch_rwlock_t *rw)
138 {
139 	unsigned int val;
140 
141 	/*
142 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
143 	 * deny writer. Otherwise if unlocked grant to writer
144 	 * Hence the claim that Linux rwlocks are unfair to writers.
145 	 * (can be starved for an indefinite time by readers).
146 	 *
147 	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
148 	 *		rw->counter = 0;
149 	 *		ret = 1;
150 	 *	}
151 	 */
152 
153 	__asm__ __volatile__(
154 	"1:	llock	%[val], [%[rwlock]]	\n"
155 	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
156 	"	mov	%[val], %[WR_LOCKED]	\n"
157 	"	scond	%[val], [%[rwlock]]	\n"
158 	"	bnz	1b			\n"
159 	"					\n"
160 	: [val]		"=&r"	(val)
161 	: [rwlock]	"r"	(&(rw->counter)),
162 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
163 	  [WR_LOCKED]	"ir"	(0)
164 	: "memory", "cc");
165 
166 	smp_mb();
167 }
168 
169 /* 1 - lock taken successfully */
170 static inline int arch_write_trylock(arch_rwlock_t *rw)
171 {
172 	unsigned int val, got_it = 0;
173 
174 	__asm__ __volatile__(
175 	"1:	llock	%[val], [%[rwlock]]	\n"
176 	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
177 	"	mov	%[val], %[WR_LOCKED]	\n"
178 	"	scond	%[val], [%[rwlock]]	\n"
179 	"	bnz	1b			\n"	/* retry if collided with someone */
180 	"	mov	%[got_it], 1		\n"
181 	"					\n"
182 	"4: ; --- done ---			\n"
183 
184 	: [val]		"=&r"	(val),
185 	  [got_it]	"+&r"	(got_it)
186 	: [rwlock]	"r"	(&(rw->counter)),
187 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
188 	  [WR_LOCKED]	"ir"	(0)
189 	: "memory", "cc");
190 
191 	smp_mb();
192 
193 	return got_it;
194 }
195 
196 static inline void arch_read_unlock(arch_rwlock_t *rw)
197 {
198 	unsigned int val;
199 
200 	smp_mb();
201 
202 	/*
203 	 * rw->counter++;
204 	 */
205 	__asm__ __volatile__(
206 	"1:	llock	%[val], [%[rwlock]]	\n"
207 	"	add	%[val], %[val], 1	\n"
208 	"	scond	%[val], [%[rwlock]]	\n"
209 	"	bnz	1b			\n"
210 	"					\n"
211 	: [val]		"=&r"	(val)
212 	: [rwlock]	"r"	(&(rw->counter))
213 	: "memory", "cc");
214 }
215 
216 static inline void arch_write_unlock(arch_rwlock_t *rw)
217 {
218 	smp_mb();
219 
220 	WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
221 }
222 
223 #else	/* !CONFIG_ARC_HAS_LLSC */
224 
225 static inline void arch_spin_lock(arch_spinlock_t *lock)
226 {
227 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
228 
229 	/*
230 	 * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
231 	 * for ACQ and REL semantics respectively. However EX based spinlocks
232 	 * need the extra smp_mb to workaround a hardware quirk.
233 	 */
234 	smp_mb();
235 
236 	__asm__ __volatile__(
237 	"1:	ex  %0, [%1]		\n"
238 #ifdef CONFIG_EZNPS_MTM_EXT
239 	"	.word %3		\n"
240 #endif
241 	"	breq  %0, %2, 1b	\n"
242 	: "+&r" (val)
243 	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
244 #ifdef CONFIG_EZNPS_MTM_EXT
245 	, "i"(CTOP_INST_SCHD_RW)
246 #endif
247 	: "memory");
248 
249 	smp_mb();
250 }
251 
252 /* 1 - lock taken successfully */
253 static inline int arch_spin_trylock(arch_spinlock_t *lock)
254 {
255 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
256 
257 	smp_mb();
258 
259 	__asm__ __volatile__(
260 	"1:	ex  %0, [%1]		\n"
261 	: "+r" (val)
262 	: "r"(&(lock->slock))
263 	: "memory");
264 
265 	smp_mb();
266 
267 	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
268 }
269 
270 static inline void arch_spin_unlock(arch_spinlock_t *lock)
271 {
272 	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
273 
274 	/*
275 	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
276 	 * is the only option
277 	 */
278 	smp_mb();
279 
280 	/*
281 	 * EX is not really required here, a simple STore of 0 suffices.
282 	 * However this causes tasklist livelocks in SystemC based SMP virtual
283 	 * platforms where the systemc core scheduler uses EX as a cue for
284 	 * moving to next core. Do a git log of this file for details
285 	 */
286 	__asm__ __volatile__(
287 	"	ex  %0, [%1]		\n"
288 	: "+r" (val)
289 	: "r"(&(lock->slock))
290 	: "memory");
291 
292 	/*
293 	 * see pairing version/comment in arch_spin_lock above
294 	 */
295 	smp_mb();
296 }
297 
298 /*
299  * Read-write spinlocks, allowing multiple readers but only one writer.
300  * Unfair locking as Writers could be starved indefinitely by Reader(s)
301  *
302  * The spinlock itself is contained in @counter and access to it is
303  * serialized with @lock_mutex.
304  */
305 
306 /* 1 - lock taken successfully */
307 static inline int arch_read_trylock(arch_rwlock_t *rw)
308 {
309 	int ret = 0;
310 	unsigned long flags;
311 
312 	local_irq_save(flags);
313 	arch_spin_lock(&(rw->lock_mutex));
314 
315 	/*
316 	 * zero means writer holds the lock exclusively, deny Reader.
317 	 * Otherwise grant lock to first/subseq reader
318 	 */
319 	if (rw->counter > 0) {
320 		rw->counter--;
321 		ret = 1;
322 	}
323 
324 	arch_spin_unlock(&(rw->lock_mutex));
325 	local_irq_restore(flags);
326 
327 	return ret;
328 }
329 
330 /* 1 - lock taken successfully */
331 static inline int arch_write_trylock(arch_rwlock_t *rw)
332 {
333 	int ret = 0;
334 	unsigned long flags;
335 
336 	local_irq_save(flags);
337 	arch_spin_lock(&(rw->lock_mutex));
338 
339 	/*
340 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
341 	 * deny writer. Otherwise if unlocked grant to writer
342 	 * Hence the claim that Linux rwlocks are unfair to writers.
343 	 * (can be starved for an indefinite time by readers).
344 	 */
345 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
346 		rw->counter = 0;
347 		ret = 1;
348 	}
349 	arch_spin_unlock(&(rw->lock_mutex));
350 	local_irq_restore(flags);
351 
352 	return ret;
353 }
354 
355 static inline void arch_read_lock(arch_rwlock_t *rw)
356 {
357 	while (!arch_read_trylock(rw))
358 		cpu_relax();
359 }
360 
361 static inline void arch_write_lock(arch_rwlock_t *rw)
362 {
363 	while (!arch_write_trylock(rw))
364 		cpu_relax();
365 }
366 
367 static inline void arch_read_unlock(arch_rwlock_t *rw)
368 {
369 	unsigned long flags;
370 
371 	local_irq_save(flags);
372 	arch_spin_lock(&(rw->lock_mutex));
373 	rw->counter++;
374 	arch_spin_unlock(&(rw->lock_mutex));
375 	local_irq_restore(flags);
376 }
377 
378 static inline void arch_write_unlock(arch_rwlock_t *rw)
379 {
380 	unsigned long flags;
381 
382 	local_irq_save(flags);
383 	arch_spin_lock(&(rw->lock_mutex));
384 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
385 	arch_spin_unlock(&(rw->lock_mutex));
386 	local_irq_restore(flags);
387 }
388 
389 #endif
390 
391 #endif /* __ASM_SPINLOCK_H */
392