xref: /openbmc/linux/arch/arc/include/asm/spinlock.h (revision 160b8e75)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11 
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
15 
16 #define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 
18 #ifdef CONFIG_ARC_HAS_LLSC
19 
20 static inline void arch_spin_lock(arch_spinlock_t *lock)
21 {
22 	unsigned int val;
23 
24 	smp_mb();
25 
26 	__asm__ __volatile__(
27 	"1:	llock	%[val], [%[slock]]	\n"
28 	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
29 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
30 	"	bnz	1b			\n"
31 	"					\n"
32 	: [val]		"=&r"	(val)
33 	: [slock]	"r"	(&(lock->slock)),
34 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
35 	: "memory", "cc");
36 
37 	smp_mb();
38 }
39 
40 /* 1 - lock taken successfully */
41 static inline int arch_spin_trylock(arch_spinlock_t *lock)
42 {
43 	unsigned int val, got_it = 0;
44 
45 	smp_mb();
46 
47 	__asm__ __volatile__(
48 	"1:	llock	%[val], [%[slock]]	\n"
49 	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
50 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
51 	"	bnz	1b			\n"
52 	"	mov	%[got_it], 1		\n"
53 	"4:					\n"
54 	"					\n"
55 	: [val]		"=&r"	(val),
56 	  [got_it]	"+&r"	(got_it)
57 	: [slock]	"r"	(&(lock->slock)),
58 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
59 	: "memory", "cc");
60 
61 	smp_mb();
62 
63 	return got_it;
64 }
65 
66 static inline void arch_spin_unlock(arch_spinlock_t *lock)
67 {
68 	smp_mb();
69 
70 	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
71 
72 	smp_mb();
73 }
74 
75 /*
76  * Read-write spinlocks, allowing multiple readers but only one writer.
77  * Unfair locking as Writers could be starved indefinitely by Reader(s)
78  */
79 
80 static inline void arch_read_lock(arch_rwlock_t *rw)
81 {
82 	unsigned int val;
83 
84 	smp_mb();
85 
86 	/*
87 	 * zero means writer holds the lock exclusively, deny Reader.
88 	 * Otherwise grant lock to first/subseq reader
89 	 *
90 	 * 	if (rw->counter > 0) {
91 	 *		rw->counter--;
92 	 *		ret = 1;
93 	 *	}
94 	 */
95 
96 	__asm__ __volatile__(
97 	"1:	llock	%[val], [%[rwlock]]	\n"
98 	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
99 	"	sub	%[val], %[val], 1	\n"	/* reader lock */
100 	"	scond	%[val], [%[rwlock]]	\n"
101 	"	bnz	1b			\n"
102 	"					\n"
103 	: [val]		"=&r"	(val)
104 	: [rwlock]	"r"	(&(rw->counter)),
105 	  [WR_LOCKED]	"ir"	(0)
106 	: "memory", "cc");
107 
108 	smp_mb();
109 }
110 
111 /* 1 - lock taken successfully */
112 static inline int arch_read_trylock(arch_rwlock_t *rw)
113 {
114 	unsigned int val, got_it = 0;
115 
116 	smp_mb();
117 
118 	__asm__ __volatile__(
119 	"1:	llock	%[val], [%[rwlock]]	\n"
120 	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
121 	"	sub	%[val], %[val], 1	\n"	/* counter-- */
122 	"	scond	%[val], [%[rwlock]]	\n"
123 	"	bnz	1b			\n"	/* retry if collided with someone */
124 	"	mov	%[got_it], 1		\n"
125 	"					\n"
126 	"4: ; --- done ---			\n"
127 
128 	: [val]		"=&r"	(val),
129 	  [got_it]	"+&r"	(got_it)
130 	: [rwlock]	"r"	(&(rw->counter)),
131 	  [WR_LOCKED]	"ir"	(0)
132 	: "memory", "cc");
133 
134 	smp_mb();
135 
136 	return got_it;
137 }
138 
139 static inline void arch_write_lock(arch_rwlock_t *rw)
140 {
141 	unsigned int val;
142 
143 	smp_mb();
144 
145 	/*
146 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
147 	 * deny writer. Otherwise if unlocked grant to writer
148 	 * Hence the claim that Linux rwlocks are unfair to writers.
149 	 * (can be starved for an indefinite time by readers).
150 	 *
151 	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
152 	 *		rw->counter = 0;
153 	 *		ret = 1;
154 	 *	}
155 	 */
156 
157 	__asm__ __volatile__(
158 	"1:	llock	%[val], [%[rwlock]]	\n"
159 	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
160 	"	mov	%[val], %[WR_LOCKED]	\n"
161 	"	scond	%[val], [%[rwlock]]	\n"
162 	"	bnz	1b			\n"
163 	"					\n"
164 	: [val]		"=&r"	(val)
165 	: [rwlock]	"r"	(&(rw->counter)),
166 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
167 	  [WR_LOCKED]	"ir"	(0)
168 	: "memory", "cc");
169 
170 	smp_mb();
171 }
172 
173 /* 1 - lock taken successfully */
174 static inline int arch_write_trylock(arch_rwlock_t *rw)
175 {
176 	unsigned int val, got_it = 0;
177 
178 	smp_mb();
179 
180 	__asm__ __volatile__(
181 	"1:	llock	%[val], [%[rwlock]]	\n"
182 	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
183 	"	mov	%[val], %[WR_LOCKED]	\n"
184 	"	scond	%[val], [%[rwlock]]	\n"
185 	"	bnz	1b			\n"	/* retry if collided with someone */
186 	"	mov	%[got_it], 1		\n"
187 	"					\n"
188 	"4: ; --- done ---			\n"
189 
190 	: [val]		"=&r"	(val),
191 	  [got_it]	"+&r"	(got_it)
192 	: [rwlock]	"r"	(&(rw->counter)),
193 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
194 	  [WR_LOCKED]	"ir"	(0)
195 	: "memory", "cc");
196 
197 	smp_mb();
198 
199 	return got_it;
200 }
201 
202 static inline void arch_read_unlock(arch_rwlock_t *rw)
203 {
204 	unsigned int val;
205 
206 	smp_mb();
207 
208 	/*
209 	 * rw->counter++;
210 	 */
211 	__asm__ __volatile__(
212 	"1:	llock	%[val], [%[rwlock]]	\n"
213 	"	add	%[val], %[val], 1	\n"
214 	"	scond	%[val], [%[rwlock]]	\n"
215 	"	bnz	1b			\n"
216 	"					\n"
217 	: [val]		"=&r"	(val)
218 	: [rwlock]	"r"	(&(rw->counter))
219 	: "memory", "cc");
220 
221 	smp_mb();
222 }
223 
224 static inline void arch_write_unlock(arch_rwlock_t *rw)
225 {
226 	smp_mb();
227 
228 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
229 
230 	smp_mb();
231 }
232 
233 #else	/* !CONFIG_ARC_HAS_LLSC */
234 
235 static inline void arch_spin_lock(arch_spinlock_t *lock)
236 {
237 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
238 
239 	/*
240 	 * This smp_mb() is technically superfluous, we only need the one
241 	 * after the lock for providing the ACQUIRE semantics.
242 	 * However doing the "right" thing was regressing hackbench
243 	 * so keeping this, pending further investigation
244 	 */
245 	smp_mb();
246 
247 	__asm__ __volatile__(
248 	"1:	ex  %0, [%1]		\n"
249 #ifdef CONFIG_EZNPS_MTM_EXT
250 	"	.word %3		\n"
251 #endif
252 	"	breq  %0, %2, 1b	\n"
253 	: "+&r" (val)
254 	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
255 #ifdef CONFIG_EZNPS_MTM_EXT
256 	, "i"(CTOP_INST_SCHD_RW)
257 #endif
258 	: "memory");
259 
260 	/*
261 	 * ACQUIRE barrier to ensure load/store after taking the lock
262 	 * don't "bleed-up" out of the critical section (leak-in is allowed)
263 	 * http://www.spinics.net/lists/kernel/msg2010409.html
264 	 *
265 	 * ARCv2 only has load-load, store-store and all-all barrier
266 	 * thus need the full all-all barrier
267 	 */
268 	smp_mb();
269 }
270 
271 /* 1 - lock taken successfully */
272 static inline int arch_spin_trylock(arch_spinlock_t *lock)
273 {
274 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
275 
276 	smp_mb();
277 
278 	__asm__ __volatile__(
279 	"1:	ex  %0, [%1]		\n"
280 	: "+r" (val)
281 	: "r"(&(lock->slock))
282 	: "memory");
283 
284 	smp_mb();
285 
286 	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
287 }
288 
289 static inline void arch_spin_unlock(arch_spinlock_t *lock)
290 {
291 	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
292 
293 	/*
294 	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
295 	 * is the only option
296 	 */
297 	smp_mb();
298 
299 	/*
300 	 * EX is not really required here, a simple STore of 0 suffices.
301 	 * However this causes tasklist livelocks in SystemC based SMP virtual
302 	 * platforms where the systemc core scheduler uses EX as a cue for
303 	 * moving to next core. Do a git log of this file for details
304 	 */
305 	__asm__ __volatile__(
306 	"	ex  %0, [%1]		\n"
307 	: "+r" (val)
308 	: "r"(&(lock->slock))
309 	: "memory");
310 
311 	/*
312 	 * superfluous, but keeping for now - see pairing version in
313 	 * arch_spin_lock above
314 	 */
315 	smp_mb();
316 }
317 
318 /*
319  * Read-write spinlocks, allowing multiple readers but only one writer.
320  * Unfair locking as Writers could be starved indefinitely by Reader(s)
321  *
322  * The spinlock itself is contained in @counter and access to it is
323  * serialized with @lock_mutex.
324  */
325 
326 /* 1 - lock taken successfully */
327 static inline int arch_read_trylock(arch_rwlock_t *rw)
328 {
329 	int ret = 0;
330 	unsigned long flags;
331 
332 	local_irq_save(flags);
333 	arch_spin_lock(&(rw->lock_mutex));
334 
335 	/*
336 	 * zero means writer holds the lock exclusively, deny Reader.
337 	 * Otherwise grant lock to first/subseq reader
338 	 */
339 	if (rw->counter > 0) {
340 		rw->counter--;
341 		ret = 1;
342 	}
343 
344 	arch_spin_unlock(&(rw->lock_mutex));
345 	local_irq_restore(flags);
346 
347 	smp_mb();
348 	return ret;
349 }
350 
351 /* 1 - lock taken successfully */
352 static inline int arch_write_trylock(arch_rwlock_t *rw)
353 {
354 	int ret = 0;
355 	unsigned long flags;
356 
357 	local_irq_save(flags);
358 	arch_spin_lock(&(rw->lock_mutex));
359 
360 	/*
361 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
362 	 * deny writer. Otherwise if unlocked grant to writer
363 	 * Hence the claim that Linux rwlocks are unfair to writers.
364 	 * (can be starved for an indefinite time by readers).
365 	 */
366 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
367 		rw->counter = 0;
368 		ret = 1;
369 	}
370 	arch_spin_unlock(&(rw->lock_mutex));
371 	local_irq_restore(flags);
372 
373 	return ret;
374 }
375 
376 static inline void arch_read_lock(arch_rwlock_t *rw)
377 {
378 	while (!arch_read_trylock(rw))
379 		cpu_relax();
380 }
381 
382 static inline void arch_write_lock(arch_rwlock_t *rw)
383 {
384 	while (!arch_write_trylock(rw))
385 		cpu_relax();
386 }
387 
388 static inline void arch_read_unlock(arch_rwlock_t *rw)
389 {
390 	unsigned long flags;
391 
392 	local_irq_save(flags);
393 	arch_spin_lock(&(rw->lock_mutex));
394 	rw->counter++;
395 	arch_spin_unlock(&(rw->lock_mutex));
396 	local_irq_restore(flags);
397 }
398 
399 static inline void arch_write_unlock(arch_rwlock_t *rw)
400 {
401 	unsigned long flags;
402 
403 	local_irq_save(flags);
404 	arch_spin_lock(&(rw->lock_mutex));
405 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
406 	arch_spin_unlock(&(rw->lock_mutex));
407 	local_irq_restore(flags);
408 }
409 
410 #endif
411 
412 #endif /* __ASM_SPINLOCK_H */
413