xref: /openbmc/linux/arch/arc/include/asm/spinlock.h (revision a36954f5)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11 
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
15 
16 #define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 #define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
18 
19 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
20 {
21 	smp_cond_load_acquire(&lock->slock, !VAL);
22 }
23 
24 #ifdef CONFIG_ARC_HAS_LLSC
25 
26 static inline void arch_spin_lock(arch_spinlock_t *lock)
27 {
28 	unsigned int val;
29 
30 	smp_mb();
31 
32 	__asm__ __volatile__(
33 	"1:	llock	%[val], [%[slock]]	\n"
34 	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
35 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
36 	"	bnz	1b			\n"
37 	"					\n"
38 	: [val]		"=&r"	(val)
39 	: [slock]	"r"	(&(lock->slock)),
40 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
41 	: "memory", "cc");
42 
43 	smp_mb();
44 }
45 
46 /* 1 - lock taken successfully */
47 static inline int arch_spin_trylock(arch_spinlock_t *lock)
48 {
49 	unsigned int val, got_it = 0;
50 
51 	smp_mb();
52 
53 	__asm__ __volatile__(
54 	"1:	llock	%[val], [%[slock]]	\n"
55 	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
56 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
57 	"	bnz	1b			\n"
58 	"	mov	%[got_it], 1		\n"
59 	"4:					\n"
60 	"					\n"
61 	: [val]		"=&r"	(val),
62 	  [got_it]	"+&r"	(got_it)
63 	: [slock]	"r"	(&(lock->slock)),
64 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
65 	: "memory", "cc");
66 
67 	smp_mb();
68 
69 	return got_it;
70 }
71 
72 static inline void arch_spin_unlock(arch_spinlock_t *lock)
73 {
74 	smp_mb();
75 
76 	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
77 
78 	smp_mb();
79 }
80 
81 /*
82  * Read-write spinlocks, allowing multiple readers but only one writer.
83  * Unfair locking as Writers could be starved indefinitely by Reader(s)
84  */
85 
86 static inline void arch_read_lock(arch_rwlock_t *rw)
87 {
88 	unsigned int val;
89 
90 	smp_mb();
91 
92 	/*
93 	 * zero means writer holds the lock exclusively, deny Reader.
94 	 * Otherwise grant lock to first/subseq reader
95 	 *
96 	 * 	if (rw->counter > 0) {
97 	 *		rw->counter--;
98 	 *		ret = 1;
99 	 *	}
100 	 */
101 
102 	__asm__ __volatile__(
103 	"1:	llock	%[val], [%[rwlock]]	\n"
104 	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
105 	"	sub	%[val], %[val], 1	\n"	/* reader lock */
106 	"	scond	%[val], [%[rwlock]]	\n"
107 	"	bnz	1b			\n"
108 	"					\n"
109 	: [val]		"=&r"	(val)
110 	: [rwlock]	"r"	(&(rw->counter)),
111 	  [WR_LOCKED]	"ir"	(0)
112 	: "memory", "cc");
113 
114 	smp_mb();
115 }
116 
117 /* 1 - lock taken successfully */
118 static inline int arch_read_trylock(arch_rwlock_t *rw)
119 {
120 	unsigned int val, got_it = 0;
121 
122 	smp_mb();
123 
124 	__asm__ __volatile__(
125 	"1:	llock	%[val], [%[rwlock]]	\n"
126 	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
127 	"	sub	%[val], %[val], 1	\n"	/* counter-- */
128 	"	scond	%[val], [%[rwlock]]	\n"
129 	"	bnz	1b			\n"	/* retry if collided with someone */
130 	"	mov	%[got_it], 1		\n"
131 	"					\n"
132 	"4: ; --- done ---			\n"
133 
134 	: [val]		"=&r"	(val),
135 	  [got_it]	"+&r"	(got_it)
136 	: [rwlock]	"r"	(&(rw->counter)),
137 	  [WR_LOCKED]	"ir"	(0)
138 	: "memory", "cc");
139 
140 	smp_mb();
141 
142 	return got_it;
143 }
144 
145 static inline void arch_write_lock(arch_rwlock_t *rw)
146 {
147 	unsigned int val;
148 
149 	smp_mb();
150 
151 	/*
152 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
153 	 * deny writer. Otherwise if unlocked grant to writer
154 	 * Hence the claim that Linux rwlocks are unfair to writers.
155 	 * (can be starved for an indefinite time by readers).
156 	 *
157 	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
158 	 *		rw->counter = 0;
159 	 *		ret = 1;
160 	 *	}
161 	 */
162 
163 	__asm__ __volatile__(
164 	"1:	llock	%[val], [%[rwlock]]	\n"
165 	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
166 	"	mov	%[val], %[WR_LOCKED]	\n"
167 	"	scond	%[val], [%[rwlock]]	\n"
168 	"	bnz	1b			\n"
169 	"					\n"
170 	: [val]		"=&r"	(val)
171 	: [rwlock]	"r"	(&(rw->counter)),
172 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
173 	  [WR_LOCKED]	"ir"	(0)
174 	: "memory", "cc");
175 
176 	smp_mb();
177 }
178 
179 /* 1 - lock taken successfully */
180 static inline int arch_write_trylock(arch_rwlock_t *rw)
181 {
182 	unsigned int val, got_it = 0;
183 
184 	smp_mb();
185 
186 	__asm__ __volatile__(
187 	"1:	llock	%[val], [%[rwlock]]	\n"
188 	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
189 	"	mov	%[val], %[WR_LOCKED]	\n"
190 	"	scond	%[val], [%[rwlock]]	\n"
191 	"	bnz	1b			\n"	/* retry if collided with someone */
192 	"	mov	%[got_it], 1		\n"
193 	"					\n"
194 	"4: ; --- done ---			\n"
195 
196 	: [val]		"=&r"	(val),
197 	  [got_it]	"+&r"	(got_it)
198 	: [rwlock]	"r"	(&(rw->counter)),
199 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
200 	  [WR_LOCKED]	"ir"	(0)
201 	: "memory", "cc");
202 
203 	smp_mb();
204 
205 	return got_it;
206 }
207 
208 static inline void arch_read_unlock(arch_rwlock_t *rw)
209 {
210 	unsigned int val;
211 
212 	smp_mb();
213 
214 	/*
215 	 * rw->counter++;
216 	 */
217 	__asm__ __volatile__(
218 	"1:	llock	%[val], [%[rwlock]]	\n"
219 	"	add	%[val], %[val], 1	\n"
220 	"	scond	%[val], [%[rwlock]]	\n"
221 	"	bnz	1b			\n"
222 	"					\n"
223 	: [val]		"=&r"	(val)
224 	: [rwlock]	"r"	(&(rw->counter))
225 	: "memory", "cc");
226 
227 	smp_mb();
228 }
229 
230 static inline void arch_write_unlock(arch_rwlock_t *rw)
231 {
232 	smp_mb();
233 
234 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
235 
236 	smp_mb();
237 }
238 
239 #else	/* !CONFIG_ARC_HAS_LLSC */
240 
241 static inline void arch_spin_lock(arch_spinlock_t *lock)
242 {
243 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
244 
245 	/*
246 	 * This smp_mb() is technically superfluous, we only need the one
247 	 * after the lock for providing the ACQUIRE semantics.
248 	 * However doing the "right" thing was regressing hackbench
249 	 * so keeping this, pending further investigation
250 	 */
251 	smp_mb();
252 
253 	__asm__ __volatile__(
254 	"1:	ex  %0, [%1]		\n"
255 	"	breq  %0, %2, 1b	\n"
256 	: "+&r" (val)
257 	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
258 	: "memory");
259 
260 	/*
261 	 * ACQUIRE barrier to ensure load/store after taking the lock
262 	 * don't "bleed-up" out of the critical section (leak-in is allowed)
263 	 * http://www.spinics.net/lists/kernel/msg2010409.html
264 	 *
265 	 * ARCv2 only has load-load, store-store and all-all barrier
266 	 * thus need the full all-all barrier
267 	 */
268 	smp_mb();
269 }
270 
271 /* 1 - lock taken successfully */
272 static inline int arch_spin_trylock(arch_spinlock_t *lock)
273 {
274 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
275 
276 	smp_mb();
277 
278 	__asm__ __volatile__(
279 	"1:	ex  %0, [%1]		\n"
280 	: "+r" (val)
281 	: "r"(&(lock->slock))
282 	: "memory");
283 
284 	smp_mb();
285 
286 	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
287 }
288 
289 static inline void arch_spin_unlock(arch_spinlock_t *lock)
290 {
291 	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
292 
293 	/*
294 	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
295 	 * is the only option
296 	 */
297 	smp_mb();
298 
299 	__asm__ __volatile__(
300 	"	ex  %0, [%1]		\n"
301 	: "+r" (val)
302 	: "r"(&(lock->slock))
303 	: "memory");
304 
305 	/*
306 	 * superfluous, but keeping for now - see pairing version in
307 	 * arch_spin_lock above
308 	 */
309 	smp_mb();
310 }
311 
312 /*
313  * Read-write spinlocks, allowing multiple readers but only one writer.
314  * Unfair locking as Writers could be starved indefinitely by Reader(s)
315  *
316  * The spinlock itself is contained in @counter and access to it is
317  * serialized with @lock_mutex.
318  */
319 
320 /* 1 - lock taken successfully */
321 static inline int arch_read_trylock(arch_rwlock_t *rw)
322 {
323 	int ret = 0;
324 	unsigned long flags;
325 
326 	local_irq_save(flags);
327 	arch_spin_lock(&(rw->lock_mutex));
328 
329 	/*
330 	 * zero means writer holds the lock exclusively, deny Reader.
331 	 * Otherwise grant lock to first/subseq reader
332 	 */
333 	if (rw->counter > 0) {
334 		rw->counter--;
335 		ret = 1;
336 	}
337 
338 	arch_spin_unlock(&(rw->lock_mutex));
339 	local_irq_restore(flags);
340 
341 	smp_mb();
342 	return ret;
343 }
344 
345 /* 1 - lock taken successfully */
346 static inline int arch_write_trylock(arch_rwlock_t *rw)
347 {
348 	int ret = 0;
349 	unsigned long flags;
350 
351 	local_irq_save(flags);
352 	arch_spin_lock(&(rw->lock_mutex));
353 
354 	/*
355 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
356 	 * deny writer. Otherwise if unlocked grant to writer
357 	 * Hence the claim that Linux rwlocks are unfair to writers.
358 	 * (can be starved for an indefinite time by readers).
359 	 */
360 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
361 		rw->counter = 0;
362 		ret = 1;
363 	}
364 	arch_spin_unlock(&(rw->lock_mutex));
365 	local_irq_restore(flags);
366 
367 	return ret;
368 }
369 
370 static inline void arch_read_lock(arch_rwlock_t *rw)
371 {
372 	while (!arch_read_trylock(rw))
373 		cpu_relax();
374 }
375 
376 static inline void arch_write_lock(arch_rwlock_t *rw)
377 {
378 	while (!arch_write_trylock(rw))
379 		cpu_relax();
380 }
381 
382 static inline void arch_read_unlock(arch_rwlock_t *rw)
383 {
384 	unsigned long flags;
385 
386 	local_irq_save(flags);
387 	arch_spin_lock(&(rw->lock_mutex));
388 	rw->counter++;
389 	arch_spin_unlock(&(rw->lock_mutex));
390 	local_irq_restore(flags);
391 }
392 
393 static inline void arch_write_unlock(arch_rwlock_t *rw)
394 {
395 	unsigned long flags;
396 
397 	local_irq_save(flags);
398 	arch_spin_lock(&(rw->lock_mutex));
399 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
400 	arch_spin_unlock(&(rw->lock_mutex));
401 	local_irq_restore(flags);
402 }
403 
404 #endif
405 
406 #define arch_read_can_lock(x)	((x)->counter > 0)
407 #define arch_write_can_lock(x)	((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
408 
409 #define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
410 #define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
411 
412 #define arch_spin_relax(lock)	cpu_relax()
413 #define arch_read_relax(lock)	cpu_relax()
414 #define arch_write_relax(lock)	cpu_relax()
415 
416 #endif /* __ASM_SPINLOCK_H */
417