xref: /openbmc/linux/arch/arc/include/asm/spinlock.h (revision 2c684d89)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11 
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
15 
16 #define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 #define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
18 #define arch_spin_unlock_wait(x) \
19 	do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20 
21 #ifdef CONFIG_ARC_HAS_LLSC
22 
23 /*
24  * A normal LLOCK/SCOND based system, w/o need for livelock workaround
25  */
26 #ifndef CONFIG_ARC_STAR_9000923308
27 
28 static inline void arch_spin_lock(arch_spinlock_t *lock)
29 {
30 	unsigned int val;
31 
32 	smp_mb();
33 
34 	__asm__ __volatile__(
35 	"1:	llock	%[val], [%[slock]]	\n"
36 	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
37 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
38 	"	bnz	1b			\n"
39 	"					\n"
40 	: [val]		"=&r"	(val)
41 	: [slock]	"r"	(&(lock->slock)),
42 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
43 	: "memory", "cc");
44 
45 	smp_mb();
46 }
47 
48 /* 1 - lock taken successfully */
49 static inline int arch_spin_trylock(arch_spinlock_t *lock)
50 {
51 	unsigned int val, got_it = 0;
52 
53 	smp_mb();
54 
55 	__asm__ __volatile__(
56 	"1:	llock	%[val], [%[slock]]	\n"
57 	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
58 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
59 	"	bnz	1b			\n"
60 	"	mov	%[got_it], 1		\n"
61 	"4:					\n"
62 	"					\n"
63 	: [val]		"=&r"	(val),
64 	  [got_it]	"+&r"	(got_it)
65 	: [slock]	"r"	(&(lock->slock)),
66 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
67 	: "memory", "cc");
68 
69 	smp_mb();
70 
71 	return got_it;
72 }
73 
74 static inline void arch_spin_unlock(arch_spinlock_t *lock)
75 {
76 	smp_mb();
77 
78 	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
79 
80 	smp_mb();
81 }
82 
83 /*
84  * Read-write spinlocks, allowing multiple readers but only one writer.
85  * Unfair locking as Writers could be starved indefinitely by Reader(s)
86  */
87 
88 static inline void arch_read_lock(arch_rwlock_t *rw)
89 {
90 	unsigned int val;
91 
92 	smp_mb();
93 
94 	/*
95 	 * zero means writer holds the lock exclusively, deny Reader.
96 	 * Otherwise grant lock to first/subseq reader
97 	 *
98 	 * 	if (rw->counter > 0) {
99 	 *		rw->counter--;
100 	 *		ret = 1;
101 	 *	}
102 	 */
103 
104 	__asm__ __volatile__(
105 	"1:	llock	%[val], [%[rwlock]]	\n"
106 	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
107 	"	sub	%[val], %[val], 1	\n"	/* reader lock */
108 	"	scond	%[val], [%[rwlock]]	\n"
109 	"	bnz	1b			\n"
110 	"					\n"
111 	: [val]		"=&r"	(val)
112 	: [rwlock]	"r"	(&(rw->counter)),
113 	  [WR_LOCKED]	"ir"	(0)
114 	: "memory", "cc");
115 
116 	smp_mb();
117 }
118 
119 /* 1 - lock taken successfully */
120 static inline int arch_read_trylock(arch_rwlock_t *rw)
121 {
122 	unsigned int val, got_it = 0;
123 
124 	smp_mb();
125 
126 	__asm__ __volatile__(
127 	"1:	llock	%[val], [%[rwlock]]	\n"
128 	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
129 	"	sub	%[val], %[val], 1	\n"	/* counter-- */
130 	"	scond	%[val], [%[rwlock]]	\n"
131 	"	bnz	1b			\n"	/* retry if collided with someone */
132 	"	mov	%[got_it], 1		\n"
133 	"					\n"
134 	"4: ; --- done ---			\n"
135 
136 	: [val]		"=&r"	(val),
137 	  [got_it]	"+&r"	(got_it)
138 	: [rwlock]	"r"	(&(rw->counter)),
139 	  [WR_LOCKED]	"ir"	(0)
140 	: "memory", "cc");
141 
142 	smp_mb();
143 
144 	return got_it;
145 }
146 
147 static inline void arch_write_lock(arch_rwlock_t *rw)
148 {
149 	unsigned int val;
150 
151 	smp_mb();
152 
153 	/*
154 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
155 	 * deny writer. Otherwise if unlocked grant to writer
156 	 * Hence the claim that Linux rwlocks are unfair to writers.
157 	 * (can be starved for an indefinite time by readers).
158 	 *
159 	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
160 	 *		rw->counter = 0;
161 	 *		ret = 1;
162 	 *	}
163 	 */
164 
165 	__asm__ __volatile__(
166 	"1:	llock	%[val], [%[rwlock]]	\n"
167 	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
168 	"	mov	%[val], %[WR_LOCKED]	\n"
169 	"	scond	%[val], [%[rwlock]]	\n"
170 	"	bnz	1b			\n"
171 	"					\n"
172 	: [val]		"=&r"	(val)
173 	: [rwlock]	"r"	(&(rw->counter)),
174 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
175 	  [WR_LOCKED]	"ir"	(0)
176 	: "memory", "cc");
177 
178 	smp_mb();
179 }
180 
181 /* 1 - lock taken successfully */
182 static inline int arch_write_trylock(arch_rwlock_t *rw)
183 {
184 	unsigned int val, got_it = 0;
185 
186 	smp_mb();
187 
188 	__asm__ __volatile__(
189 	"1:	llock	%[val], [%[rwlock]]	\n"
190 	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
191 	"	mov	%[val], %[WR_LOCKED]	\n"
192 	"	scond	%[val], [%[rwlock]]	\n"
193 	"	bnz	1b			\n"	/* retry if collided with someone */
194 	"	mov	%[got_it], 1		\n"
195 	"					\n"
196 	"4: ; --- done ---			\n"
197 
198 	: [val]		"=&r"	(val),
199 	  [got_it]	"+&r"	(got_it)
200 	: [rwlock]	"r"	(&(rw->counter)),
201 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
202 	  [WR_LOCKED]	"ir"	(0)
203 	: "memory", "cc");
204 
205 	smp_mb();
206 
207 	return got_it;
208 }
209 
210 static inline void arch_read_unlock(arch_rwlock_t *rw)
211 {
212 	unsigned int val;
213 
214 	smp_mb();
215 
216 	/*
217 	 * rw->counter++;
218 	 */
219 	__asm__ __volatile__(
220 	"1:	llock	%[val], [%[rwlock]]	\n"
221 	"	add	%[val], %[val], 1	\n"
222 	"	scond	%[val], [%[rwlock]]	\n"
223 	"	bnz	1b			\n"
224 	"					\n"
225 	: [val]		"=&r"	(val)
226 	: [rwlock]	"r"	(&(rw->counter))
227 	: "memory", "cc");
228 
229 	smp_mb();
230 }
231 
232 static inline void arch_write_unlock(arch_rwlock_t *rw)
233 {
234 	smp_mb();
235 
236 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
237 
238 	smp_mb();
239 }
240 
241 #else	/* CONFIG_ARC_STAR_9000923308 */
242 
243 /*
244  * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
245  * coherency transactions in the SCU. The exclusive line state keeps rotating
246  * among contenting cores leading to a never ending cycle. So break the cycle
247  * by deferring the retry of failed exclusive access (SCOND). The actual delay
248  * needed is function of number of contending cores as well as the unrelated
249  * coherency traffic from other cores. To keep the code simple, start off with
250  * small delay of 1 which would suffice most cases and in case of contention
251  * double the delay. Eventually the delay is sufficient such that the coherency
252  * pipeline is drained, thus a subsequent exclusive access would succeed.
253  */
254 
255 #define SCOND_FAIL_RETRY_VAR_DEF						\
256 	unsigned int delay, tmp;						\
257 
258 #define SCOND_FAIL_RETRY_ASM							\
259 	"   ; --- scond fail delay ---		\n"				\
260 	"	mov	%[tmp], %[delay]	\n"	/* tmp = delay */	\
261 	"2: 	brne.d	%[tmp], 0, 2b		\n"	/* while (tmp != 0) */	\
262 	"	sub	%[tmp], %[tmp], 1	\n"	/* tmp-- */		\
263 	"	rol	%[delay], %[delay]	\n"	/* delay *= 2 */	\
264 	"	b	1b			\n"	/* start over */	\
265 	"					\n"				\
266 	"4: ; --- done ---			\n"				\
267 
268 #define SCOND_FAIL_RETRY_VARS							\
269 	  ,[delay] "=&r" (delay), [tmp] "=&r"	(tmp)				\
270 
271 static inline void arch_spin_lock(arch_spinlock_t *lock)
272 {
273 	unsigned int val;
274 	SCOND_FAIL_RETRY_VAR_DEF;
275 
276 	smp_mb();
277 
278 	__asm__ __volatile__(
279 	"0:	mov	%[delay], 1		\n"
280 	"1:	llock	%[val], [%[slock]]	\n"
281 	"	breq	%[val], %[LOCKED], 0b	\n"	/* spin while LOCKED */
282 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
283 	"	bz	4f			\n"	/* done */
284 	"					\n"
285 	SCOND_FAIL_RETRY_ASM
286 
287 	: [val]		"=&r"	(val)
288 	  SCOND_FAIL_RETRY_VARS
289 	: [slock]	"r"	(&(lock->slock)),
290 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
291 	: "memory", "cc");
292 
293 	smp_mb();
294 }
295 
296 /* 1 - lock taken successfully */
297 static inline int arch_spin_trylock(arch_spinlock_t *lock)
298 {
299 	unsigned int val, got_it = 0;
300 	SCOND_FAIL_RETRY_VAR_DEF;
301 
302 	smp_mb();
303 
304 	__asm__ __volatile__(
305 	"0:	mov	%[delay], 1		\n"
306 	"1:	llock	%[val], [%[slock]]	\n"
307 	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
308 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
309 	"	bz.d	4f			\n"
310 	"	mov.z	%[got_it], 1		\n"	/* got it */
311 	"					\n"
312 	SCOND_FAIL_RETRY_ASM
313 
314 	: [val]		"=&r"	(val),
315 	  [got_it]	"+&r"	(got_it)
316 	  SCOND_FAIL_RETRY_VARS
317 	: [slock]	"r"	(&(lock->slock)),
318 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
319 	: "memory", "cc");
320 
321 	smp_mb();
322 
323 	return got_it;
324 }
325 
326 static inline void arch_spin_unlock(arch_spinlock_t *lock)
327 {
328 	smp_mb();
329 
330 	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
331 
332 	smp_mb();
333 }
334 
335 /*
336  * Read-write spinlocks, allowing multiple readers but only one writer.
337  * Unfair locking as Writers could be starved indefinitely by Reader(s)
338  */
339 
340 static inline void arch_read_lock(arch_rwlock_t *rw)
341 {
342 	unsigned int val;
343 	SCOND_FAIL_RETRY_VAR_DEF;
344 
345 	smp_mb();
346 
347 	/*
348 	 * zero means writer holds the lock exclusively, deny Reader.
349 	 * Otherwise grant lock to first/subseq reader
350 	 *
351 	 * 	if (rw->counter > 0) {
352 	 *		rw->counter--;
353 	 *		ret = 1;
354 	 *	}
355 	 */
356 
357 	__asm__ __volatile__(
358 	"0:	mov	%[delay], 1		\n"
359 	"1:	llock	%[val], [%[rwlock]]	\n"
360 	"	brls	%[val], %[WR_LOCKED], 0b\n"	/* <= 0: spin while write locked */
361 	"	sub	%[val], %[val], 1	\n"	/* reader lock */
362 	"	scond	%[val], [%[rwlock]]	\n"
363 	"	bz	4f			\n"	/* done */
364 	"					\n"
365 	SCOND_FAIL_RETRY_ASM
366 
367 	: [val]		"=&r"	(val)
368 	  SCOND_FAIL_RETRY_VARS
369 	: [rwlock]	"r"	(&(rw->counter)),
370 	  [WR_LOCKED]	"ir"	(0)
371 	: "memory", "cc");
372 
373 	smp_mb();
374 }
375 
376 /* 1 - lock taken successfully */
377 static inline int arch_read_trylock(arch_rwlock_t *rw)
378 {
379 	unsigned int val, got_it = 0;
380 	SCOND_FAIL_RETRY_VAR_DEF;
381 
382 	smp_mb();
383 
384 	__asm__ __volatile__(
385 	"0:	mov	%[delay], 1		\n"
386 	"1:	llock	%[val], [%[rwlock]]	\n"
387 	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
388 	"	sub	%[val], %[val], 1	\n"	/* counter-- */
389 	"	scond	%[val], [%[rwlock]]	\n"
390 	"	bz.d	4f			\n"
391 	"	mov.z	%[got_it], 1		\n"	/* got it */
392 	"					\n"
393 	SCOND_FAIL_RETRY_ASM
394 
395 	: [val]		"=&r"	(val),
396 	  [got_it]	"+&r"	(got_it)
397 	  SCOND_FAIL_RETRY_VARS
398 	: [rwlock]	"r"	(&(rw->counter)),
399 	  [WR_LOCKED]	"ir"	(0)
400 	: "memory", "cc");
401 
402 	smp_mb();
403 
404 	return got_it;
405 }
406 
407 static inline void arch_write_lock(arch_rwlock_t *rw)
408 {
409 	unsigned int val;
410 	SCOND_FAIL_RETRY_VAR_DEF;
411 
412 	smp_mb();
413 
414 	/*
415 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
416 	 * deny writer. Otherwise if unlocked grant to writer
417 	 * Hence the claim that Linux rwlocks are unfair to writers.
418 	 * (can be starved for an indefinite time by readers).
419 	 *
420 	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
421 	 *		rw->counter = 0;
422 	 *		ret = 1;
423 	 *	}
424 	 */
425 
426 	__asm__ __volatile__(
427 	"0:	mov	%[delay], 1		\n"
428 	"1:	llock	%[val], [%[rwlock]]	\n"
429 	"	brne	%[val], %[UNLOCKED], 0b	\n"	/* while !UNLOCKED spin */
430 	"	mov	%[val], %[WR_LOCKED]	\n"
431 	"	scond	%[val], [%[rwlock]]	\n"
432 	"	bz	4f			\n"
433 	"					\n"
434 	SCOND_FAIL_RETRY_ASM
435 
436 	: [val]		"=&r"	(val)
437 	  SCOND_FAIL_RETRY_VARS
438 	: [rwlock]	"r"	(&(rw->counter)),
439 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
440 	  [WR_LOCKED]	"ir"	(0)
441 	: "memory", "cc");
442 
443 	smp_mb();
444 }
445 
446 /* 1 - lock taken successfully */
447 static inline int arch_write_trylock(arch_rwlock_t *rw)
448 {
449 	unsigned int val, got_it = 0;
450 	SCOND_FAIL_RETRY_VAR_DEF;
451 
452 	smp_mb();
453 
454 	__asm__ __volatile__(
455 	"0:	mov	%[delay], 1		\n"
456 	"1:	llock	%[val], [%[rwlock]]	\n"
457 	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
458 	"	mov	%[val], %[WR_LOCKED]	\n"
459 	"	scond	%[val], [%[rwlock]]	\n"
460 	"	bz.d	4f			\n"
461 	"	mov.z	%[got_it], 1		\n"	/* got it */
462 	"					\n"
463 	SCOND_FAIL_RETRY_ASM
464 
465 	: [val]		"=&r"	(val),
466 	  [got_it]	"+&r"	(got_it)
467 	  SCOND_FAIL_RETRY_VARS
468 	: [rwlock]	"r"	(&(rw->counter)),
469 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
470 	  [WR_LOCKED]	"ir"	(0)
471 	: "memory", "cc");
472 
473 	smp_mb();
474 
475 	return got_it;
476 }
477 
478 static inline void arch_read_unlock(arch_rwlock_t *rw)
479 {
480 	unsigned int val;
481 
482 	smp_mb();
483 
484 	/*
485 	 * rw->counter++;
486 	 */
487 	__asm__ __volatile__(
488 	"1:	llock	%[val], [%[rwlock]]	\n"
489 	"	add	%[val], %[val], 1	\n"
490 	"	scond	%[val], [%[rwlock]]	\n"
491 	"	bnz	1b			\n"
492 	"					\n"
493 	: [val]		"=&r"	(val)
494 	: [rwlock]	"r"	(&(rw->counter))
495 	: "memory", "cc");
496 
497 	smp_mb();
498 }
499 
500 static inline void arch_write_unlock(arch_rwlock_t *rw)
501 {
502 	unsigned int val;
503 
504 	smp_mb();
505 
506 	/*
507 	 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
508 	 */
509 	__asm__ __volatile__(
510 	"1:	llock	%[val], [%[rwlock]]	\n"
511 	"	scond	%[UNLOCKED], [%[rwlock]]\n"
512 	"	bnz	1b			\n"
513 	"					\n"
514 	: [val]		"=&r"	(val)
515 	: [rwlock]	"r"	(&(rw->counter)),
516 	  [UNLOCKED]	"r"	(__ARCH_RW_LOCK_UNLOCKED__)
517 	: "memory", "cc");
518 
519 	smp_mb();
520 }
521 
522 #undef SCOND_FAIL_RETRY_VAR_DEF
523 #undef SCOND_FAIL_RETRY_ASM
524 #undef SCOND_FAIL_RETRY_VARS
525 
526 #endif	/* CONFIG_ARC_STAR_9000923308 */
527 
528 #else	/* !CONFIG_ARC_HAS_LLSC */
529 
530 static inline void arch_spin_lock(arch_spinlock_t *lock)
531 {
532 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
533 
534 	/*
535 	 * This smp_mb() is technically superfluous, we only need the one
536 	 * after the lock for providing the ACQUIRE semantics.
537 	 * However doing the "right" thing was regressing hackbench
538 	 * so keeping this, pending further investigation
539 	 */
540 	smp_mb();
541 
542 	__asm__ __volatile__(
543 	"1:	ex  %0, [%1]		\n"
544 	"	breq  %0, %2, 1b	\n"
545 	: "+&r" (val)
546 	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
547 	: "memory");
548 
549 	/*
550 	 * ACQUIRE barrier to ensure load/store after taking the lock
551 	 * don't "bleed-up" out of the critical section (leak-in is allowed)
552 	 * http://www.spinics.net/lists/kernel/msg2010409.html
553 	 *
554 	 * ARCv2 only has load-load, store-store and all-all barrier
555 	 * thus need the full all-all barrier
556 	 */
557 	smp_mb();
558 }
559 
560 /* 1 - lock taken successfully */
561 static inline int arch_spin_trylock(arch_spinlock_t *lock)
562 {
563 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
564 
565 	smp_mb();
566 
567 	__asm__ __volatile__(
568 	"1:	ex  %0, [%1]		\n"
569 	: "+r" (val)
570 	: "r"(&(lock->slock))
571 	: "memory");
572 
573 	smp_mb();
574 
575 	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
576 }
577 
578 static inline void arch_spin_unlock(arch_spinlock_t *lock)
579 {
580 	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
581 
582 	/*
583 	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
584 	 * is the only option
585 	 */
586 	smp_mb();
587 
588 	__asm__ __volatile__(
589 	"	ex  %0, [%1]		\n"
590 	: "+r" (val)
591 	: "r"(&(lock->slock))
592 	: "memory");
593 
594 	/*
595 	 * superfluous, but keeping for now - see pairing version in
596 	 * arch_spin_lock above
597 	 */
598 	smp_mb();
599 }
600 
601 /*
602  * Read-write spinlocks, allowing multiple readers but only one writer.
603  * Unfair locking as Writers could be starved indefinitely by Reader(s)
604  *
605  * The spinlock itself is contained in @counter and access to it is
606  * serialized with @lock_mutex.
607  */
608 
609 /* 1 - lock taken successfully */
610 static inline int arch_read_trylock(arch_rwlock_t *rw)
611 {
612 	int ret = 0;
613 
614 	arch_spin_lock(&(rw->lock_mutex));
615 
616 	/*
617 	 * zero means writer holds the lock exclusively, deny Reader.
618 	 * Otherwise grant lock to first/subseq reader
619 	 */
620 	if (rw->counter > 0) {
621 		rw->counter--;
622 		ret = 1;
623 	}
624 
625 	arch_spin_unlock(&(rw->lock_mutex));
626 
627 	smp_mb();
628 	return ret;
629 }
630 
631 /* 1 - lock taken successfully */
632 static inline int arch_write_trylock(arch_rwlock_t *rw)
633 {
634 	int ret = 0;
635 
636 	arch_spin_lock(&(rw->lock_mutex));
637 
638 	/*
639 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
640 	 * deny writer. Otherwise if unlocked grant to writer
641 	 * Hence the claim that Linux rwlocks are unfair to writers.
642 	 * (can be starved for an indefinite time by readers).
643 	 */
644 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
645 		rw->counter = 0;
646 		ret = 1;
647 	}
648 	arch_spin_unlock(&(rw->lock_mutex));
649 
650 	return ret;
651 }
652 
653 static inline void arch_read_lock(arch_rwlock_t *rw)
654 {
655 	while (!arch_read_trylock(rw))
656 		cpu_relax();
657 }
658 
659 static inline void arch_write_lock(arch_rwlock_t *rw)
660 {
661 	while (!arch_write_trylock(rw))
662 		cpu_relax();
663 }
664 
665 static inline void arch_read_unlock(arch_rwlock_t *rw)
666 {
667 	arch_spin_lock(&(rw->lock_mutex));
668 	rw->counter++;
669 	arch_spin_unlock(&(rw->lock_mutex));
670 }
671 
672 static inline void arch_write_unlock(arch_rwlock_t *rw)
673 {
674 	arch_spin_lock(&(rw->lock_mutex));
675 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
676 	arch_spin_unlock(&(rw->lock_mutex));
677 }
678 
679 #endif
680 
681 #define arch_read_can_lock(x)	((x)->counter > 0)
682 #define arch_write_can_lock(x)	((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
683 
684 #define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
685 #define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
686 
687 #define arch_spin_relax(lock)	cpu_relax()
688 #define arch_read_relax(lock)	cpu_relax()
689 #define arch_write_relax(lock)	cpu_relax()
690 
691 #endif /* __ASM_SPINLOCK_H */
692