xref: /openbmc/linux/arch/arc/include/asm/spinlock.h (revision c2bdac14)
16e35fa2dSVineet Gupta /*
26e35fa2dSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
36e35fa2dSVineet Gupta  *
46e35fa2dSVineet Gupta  * This program is free software; you can redistribute it and/or modify
56e35fa2dSVineet Gupta  * it under the terms of the GNU General Public License version 2 as
66e35fa2dSVineet Gupta  * published by the Free Software Foundation.
76e35fa2dSVineet Gupta  */
86e35fa2dSVineet Gupta 
96e35fa2dSVineet Gupta #ifndef __ASM_SPINLOCK_H
106e35fa2dSVineet Gupta #define __ASM_SPINLOCK_H
116e35fa2dSVineet Gupta 
126e35fa2dSVineet Gupta #include <asm/spinlock_types.h>
136e35fa2dSVineet Gupta #include <asm/processor.h>
146e35fa2dSVineet Gupta #include <asm/barrier.h>
156e35fa2dSVineet Gupta 
166e35fa2dSVineet Gupta #define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
176e35fa2dSVineet Gupta #define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
18726328d9SPeter Zijlstra 
19726328d9SPeter Zijlstra static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
20726328d9SPeter Zijlstra {
21726328d9SPeter Zijlstra 	smp_cond_load_acquire(&lock->slock, !VAL);
22726328d9SPeter Zijlstra }
236e35fa2dSVineet Gupta 
24ae7eae9eSVineet Gupta #ifdef CONFIG_ARC_HAS_LLSC
25ae7eae9eSVineet Gupta 
266e35fa2dSVineet Gupta static inline void arch_spin_lock(arch_spinlock_t *lock)
276e35fa2dSVineet Gupta {
28ae7eae9eSVineet Gupta 	unsigned int val;
29ae7eae9eSVineet Gupta 
30ae7eae9eSVineet Gupta 	smp_mb();
31ae7eae9eSVineet Gupta 
32ae7eae9eSVineet Gupta 	__asm__ __volatile__(
33ae7eae9eSVineet Gupta 	"1:	llock	%[val], [%[slock]]	\n"
34ae7eae9eSVineet Gupta 	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
35ae7eae9eSVineet Gupta 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
36ae7eae9eSVineet Gupta 	"	bnz	1b			\n"
37ae7eae9eSVineet Gupta 	"					\n"
38ae7eae9eSVineet Gupta 	: [val]		"=&r"	(val)
39ae7eae9eSVineet Gupta 	: [slock]	"r"	(&(lock->slock)),
40ae7eae9eSVineet Gupta 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
41ae7eae9eSVineet Gupta 	: "memory", "cc");
42ae7eae9eSVineet Gupta 
43ae7eae9eSVineet Gupta 	smp_mb();
44ae7eae9eSVineet Gupta }
45ae7eae9eSVineet Gupta 
46ae7eae9eSVineet Gupta /* 1 - lock taken successfully */
47ae7eae9eSVineet Gupta static inline int arch_spin_trylock(arch_spinlock_t *lock)
48ae7eae9eSVineet Gupta {
49ae7eae9eSVineet Gupta 	unsigned int val, got_it = 0;
50ae7eae9eSVineet Gupta 
51ae7eae9eSVineet Gupta 	smp_mb();
52ae7eae9eSVineet Gupta 
53ae7eae9eSVineet Gupta 	__asm__ __volatile__(
54ae7eae9eSVineet Gupta 	"1:	llock	%[val], [%[slock]]	\n"
55ae7eae9eSVineet Gupta 	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
56ae7eae9eSVineet Gupta 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
57ae7eae9eSVineet Gupta 	"	bnz	1b			\n"
58ae7eae9eSVineet Gupta 	"	mov	%[got_it], 1		\n"
59ae7eae9eSVineet Gupta 	"4:					\n"
60ae7eae9eSVineet Gupta 	"					\n"
61ae7eae9eSVineet Gupta 	: [val]		"=&r"	(val),
62ae7eae9eSVineet Gupta 	  [got_it]	"+&r"	(got_it)
63ae7eae9eSVineet Gupta 	: [slock]	"r"	(&(lock->slock)),
64ae7eae9eSVineet Gupta 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
65ae7eae9eSVineet Gupta 	: "memory", "cc");
66ae7eae9eSVineet Gupta 
67ae7eae9eSVineet Gupta 	smp_mb();
68ae7eae9eSVineet Gupta 
69ae7eae9eSVineet Gupta 	return got_it;
70ae7eae9eSVineet Gupta }
71ae7eae9eSVineet Gupta 
72ae7eae9eSVineet Gupta static inline void arch_spin_unlock(arch_spinlock_t *lock)
73ae7eae9eSVineet Gupta {
74ae7eae9eSVineet Gupta 	smp_mb();
75ae7eae9eSVineet Gupta 
76ae7eae9eSVineet Gupta 	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
77ae7eae9eSVineet Gupta 
78ae7eae9eSVineet Gupta 	smp_mb();
79ae7eae9eSVineet Gupta }
80ae7eae9eSVineet Gupta 
8169cbe630SVineet Gupta /*
8269cbe630SVineet Gupta  * Read-write spinlocks, allowing multiple readers but only one writer.
8369cbe630SVineet Gupta  * Unfair locking as Writers could be starved indefinitely by Reader(s)
8469cbe630SVineet Gupta  */
8569cbe630SVineet Gupta 
8669cbe630SVineet Gupta static inline void arch_read_lock(arch_rwlock_t *rw)
8769cbe630SVineet Gupta {
8869cbe630SVineet Gupta 	unsigned int val;
8969cbe630SVineet Gupta 
9069cbe630SVineet Gupta 	smp_mb();
9169cbe630SVineet Gupta 
9269cbe630SVineet Gupta 	/*
9369cbe630SVineet Gupta 	 * zero means writer holds the lock exclusively, deny Reader.
9469cbe630SVineet Gupta 	 * Otherwise grant lock to first/subseq reader
9569cbe630SVineet Gupta 	 *
9669cbe630SVineet Gupta 	 * 	if (rw->counter > 0) {
9769cbe630SVineet Gupta 	 *		rw->counter--;
9869cbe630SVineet Gupta 	 *		ret = 1;
9969cbe630SVineet Gupta 	 *	}
10069cbe630SVineet Gupta 	 */
10169cbe630SVineet Gupta 
10269cbe630SVineet Gupta 	__asm__ __volatile__(
10369cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
10469cbe630SVineet Gupta 	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
10569cbe630SVineet Gupta 	"	sub	%[val], %[val], 1	\n"	/* reader lock */
10669cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
10769cbe630SVineet Gupta 	"	bnz	1b			\n"
10869cbe630SVineet Gupta 	"					\n"
10969cbe630SVineet Gupta 	: [val]		"=&r"	(val)
11069cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
11169cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
11269cbe630SVineet Gupta 	: "memory", "cc");
11369cbe630SVineet Gupta 
11469cbe630SVineet Gupta 	smp_mb();
11569cbe630SVineet Gupta }
11669cbe630SVineet Gupta 
11769cbe630SVineet Gupta /* 1 - lock taken successfully */
11869cbe630SVineet Gupta static inline int arch_read_trylock(arch_rwlock_t *rw)
11969cbe630SVineet Gupta {
12069cbe630SVineet Gupta 	unsigned int val, got_it = 0;
12169cbe630SVineet Gupta 
12269cbe630SVineet Gupta 	smp_mb();
12369cbe630SVineet Gupta 
12469cbe630SVineet Gupta 	__asm__ __volatile__(
12569cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
12669cbe630SVineet Gupta 	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
12769cbe630SVineet Gupta 	"	sub	%[val], %[val], 1	\n"	/* counter-- */
12869cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
12969cbe630SVineet Gupta 	"	bnz	1b			\n"	/* retry if collided with someone */
13069cbe630SVineet Gupta 	"	mov	%[got_it], 1		\n"
13169cbe630SVineet Gupta 	"					\n"
13269cbe630SVineet Gupta 	"4: ; --- done ---			\n"
13369cbe630SVineet Gupta 
13469cbe630SVineet Gupta 	: [val]		"=&r"	(val),
13569cbe630SVineet Gupta 	  [got_it]	"+&r"	(got_it)
13669cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
13769cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
13869cbe630SVineet Gupta 	: "memory", "cc");
13969cbe630SVineet Gupta 
14069cbe630SVineet Gupta 	smp_mb();
14169cbe630SVineet Gupta 
14269cbe630SVineet Gupta 	return got_it;
14369cbe630SVineet Gupta }
14469cbe630SVineet Gupta 
14569cbe630SVineet Gupta static inline void arch_write_lock(arch_rwlock_t *rw)
14669cbe630SVineet Gupta {
14769cbe630SVineet Gupta 	unsigned int val;
14869cbe630SVineet Gupta 
14969cbe630SVineet Gupta 	smp_mb();
15069cbe630SVineet Gupta 
15169cbe630SVineet Gupta 	/*
15269cbe630SVineet Gupta 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
15369cbe630SVineet Gupta 	 * deny writer. Otherwise if unlocked grant to writer
15469cbe630SVineet Gupta 	 * Hence the claim that Linux rwlocks are unfair to writers.
15569cbe630SVineet Gupta 	 * (can be starved for an indefinite time by readers).
15669cbe630SVineet Gupta 	 *
15769cbe630SVineet Gupta 	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
15869cbe630SVineet Gupta 	 *		rw->counter = 0;
15969cbe630SVineet Gupta 	 *		ret = 1;
16069cbe630SVineet Gupta 	 *	}
16169cbe630SVineet Gupta 	 */
16269cbe630SVineet Gupta 
16369cbe630SVineet Gupta 	__asm__ __volatile__(
16469cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
16569cbe630SVineet Gupta 	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
16669cbe630SVineet Gupta 	"	mov	%[val], %[WR_LOCKED]	\n"
16769cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
16869cbe630SVineet Gupta 	"	bnz	1b			\n"
16969cbe630SVineet Gupta 	"					\n"
17069cbe630SVineet Gupta 	: [val]		"=&r"	(val)
17169cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
17269cbe630SVineet Gupta 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
17369cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
17469cbe630SVineet Gupta 	: "memory", "cc");
17569cbe630SVineet Gupta 
17669cbe630SVineet Gupta 	smp_mb();
17769cbe630SVineet Gupta }
17869cbe630SVineet Gupta 
17969cbe630SVineet Gupta /* 1 - lock taken successfully */
18069cbe630SVineet Gupta static inline int arch_write_trylock(arch_rwlock_t *rw)
18169cbe630SVineet Gupta {
18269cbe630SVineet Gupta 	unsigned int val, got_it = 0;
18369cbe630SVineet Gupta 
18469cbe630SVineet Gupta 	smp_mb();
18569cbe630SVineet Gupta 
18669cbe630SVineet Gupta 	__asm__ __volatile__(
18769cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
18869cbe630SVineet Gupta 	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
18969cbe630SVineet Gupta 	"	mov	%[val], %[WR_LOCKED]	\n"
19069cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
19169cbe630SVineet Gupta 	"	bnz	1b			\n"	/* retry if collided with someone */
19269cbe630SVineet Gupta 	"	mov	%[got_it], 1		\n"
19369cbe630SVineet Gupta 	"					\n"
19469cbe630SVineet Gupta 	"4: ; --- done ---			\n"
19569cbe630SVineet Gupta 
19669cbe630SVineet Gupta 	: [val]		"=&r"	(val),
19769cbe630SVineet Gupta 	  [got_it]	"+&r"	(got_it)
19869cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
19969cbe630SVineet Gupta 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
20069cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
20169cbe630SVineet Gupta 	: "memory", "cc");
20269cbe630SVineet Gupta 
20369cbe630SVineet Gupta 	smp_mb();
20469cbe630SVineet Gupta 
20569cbe630SVineet Gupta 	return got_it;
20669cbe630SVineet Gupta }
20769cbe630SVineet Gupta 
20869cbe630SVineet Gupta static inline void arch_read_unlock(arch_rwlock_t *rw)
20969cbe630SVineet Gupta {
21069cbe630SVineet Gupta 	unsigned int val;
21169cbe630SVineet Gupta 
21269cbe630SVineet Gupta 	smp_mb();
21369cbe630SVineet Gupta 
21469cbe630SVineet Gupta 	/*
21569cbe630SVineet Gupta 	 * rw->counter++;
21669cbe630SVineet Gupta 	 */
21769cbe630SVineet Gupta 	__asm__ __volatile__(
21869cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
21969cbe630SVineet Gupta 	"	add	%[val], %[val], 1	\n"
22069cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
22169cbe630SVineet Gupta 	"	bnz	1b			\n"
22269cbe630SVineet Gupta 	"					\n"
22369cbe630SVineet Gupta 	: [val]		"=&r"	(val)
22469cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter))
22569cbe630SVineet Gupta 	: "memory", "cc");
22669cbe630SVineet Gupta 
22769cbe630SVineet Gupta 	smp_mb();
22869cbe630SVineet Gupta }
22969cbe630SVineet Gupta 
23069cbe630SVineet Gupta static inline void arch_write_unlock(arch_rwlock_t *rw)
23169cbe630SVineet Gupta {
23269cbe630SVineet Gupta 	smp_mb();
23369cbe630SVineet Gupta 
23469cbe630SVineet Gupta 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
23569cbe630SVineet Gupta 
23669cbe630SVineet Gupta 	smp_mb();
23769cbe630SVineet Gupta }
23869cbe630SVineet Gupta 
239ae7eae9eSVineet Gupta #else	/* !CONFIG_ARC_HAS_LLSC */
240ae7eae9eSVineet Gupta 
241ae7eae9eSVineet Gupta static inline void arch_spin_lock(arch_spinlock_t *lock)
242ae7eae9eSVineet Gupta {
243ae7eae9eSVineet Gupta 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
2446e35fa2dSVineet Gupta 
2452576c28eSVineet Gupta 	/*
2462576c28eSVineet Gupta 	 * This smp_mb() is technically superfluous, we only need the one
2472576c28eSVineet Gupta 	 * after the lock for providing the ACQUIRE semantics.
2482576c28eSVineet Gupta 	 * However doing the "right" thing was regressing hackbench
2492576c28eSVineet Gupta 	 * so keeping this, pending further investigation
2502576c28eSVineet Gupta 	 */
2512576c28eSVineet Gupta 	smp_mb();
2522576c28eSVineet Gupta 
2536e35fa2dSVineet Gupta 	__asm__ __volatile__(
2546e35fa2dSVineet Gupta 	"1:	ex  %0, [%1]		\n"
2556e35fa2dSVineet Gupta 	"	breq  %0, %2, 1b	\n"
256ae7eae9eSVineet Gupta 	: "+&r" (val)
2576e35fa2dSVineet Gupta 	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
2586e35fa2dSVineet Gupta 	: "memory");
2592576c28eSVineet Gupta 
2602576c28eSVineet Gupta 	/*
2612576c28eSVineet Gupta 	 * ACQUIRE barrier to ensure load/store after taking the lock
2622576c28eSVineet Gupta 	 * don't "bleed-up" out of the critical section (leak-in is allowed)
2632576c28eSVineet Gupta 	 * http://www.spinics.net/lists/kernel/msg2010409.html
2642576c28eSVineet Gupta 	 *
2652576c28eSVineet Gupta 	 * ARCv2 only has load-load, store-store and all-all barrier
2662576c28eSVineet Gupta 	 * thus need the full all-all barrier
2672576c28eSVineet Gupta 	 */
2682576c28eSVineet Gupta 	smp_mb();
2696e35fa2dSVineet Gupta }
2706e35fa2dSVineet Gupta 
271ae7eae9eSVineet Gupta /* 1 - lock taken successfully */
2726e35fa2dSVineet Gupta static inline int arch_spin_trylock(arch_spinlock_t *lock)
2736e35fa2dSVineet Gupta {
274ae7eae9eSVineet Gupta 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
2756e35fa2dSVineet Gupta 
2762576c28eSVineet Gupta 	smp_mb();
2772576c28eSVineet Gupta 
2786e35fa2dSVineet Gupta 	__asm__ __volatile__(
2796e35fa2dSVineet Gupta 	"1:	ex  %0, [%1]		\n"
280ae7eae9eSVineet Gupta 	: "+r" (val)
2816e35fa2dSVineet Gupta 	: "r"(&(lock->slock))
2826e35fa2dSVineet Gupta 	: "memory");
2836e35fa2dSVineet Gupta 
2842576c28eSVineet Gupta 	smp_mb();
2852576c28eSVineet Gupta 
286ae7eae9eSVineet Gupta 	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
2876e35fa2dSVineet Gupta }
2886e35fa2dSVineet Gupta 
2896e35fa2dSVineet Gupta static inline void arch_spin_unlock(arch_spinlock_t *lock)
2906e35fa2dSVineet Gupta {
291ae7eae9eSVineet Gupta 	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
2926c00350bSVineet Gupta 
2932576c28eSVineet Gupta 	/*
2942576c28eSVineet Gupta 	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
2952576c28eSVineet Gupta 	 * is the only option
2962576c28eSVineet Gupta 	 */
2972576c28eSVineet Gupta 	smp_mb();
2982576c28eSVineet Gupta 
299c2bdac14SVineet Gupta 	/*
300c2bdac14SVineet Gupta 	 * EX is not really required here, a simple STore of 0 suffices.
301c2bdac14SVineet Gupta 	 * However this causes tasklist livelocks in SystemC based SMP virtual
302c2bdac14SVineet Gupta 	 * platforms where the systemc core scheduler uses EX as a cue for
303c2bdac14SVineet Gupta 	 * moving to next core. Do a git log of this file for details
304c2bdac14SVineet Gupta 	 */
3056c00350bSVineet Gupta 	__asm__ __volatile__(
3066c00350bSVineet Gupta 	"	ex  %0, [%1]		\n"
307ae7eae9eSVineet Gupta 	: "+r" (val)
3086c00350bSVineet Gupta 	: "r"(&(lock->slock))
3096c00350bSVineet Gupta 	: "memory");
3106c00350bSVineet Gupta 
3112576c28eSVineet Gupta 	/*
3122576c28eSVineet Gupta 	 * superfluous, but keeping for now - see pairing version in
3132576c28eSVineet Gupta 	 * arch_spin_lock above
3142576c28eSVineet Gupta 	 */
3156e35fa2dSVineet Gupta 	smp_mb();
3166e35fa2dSVineet Gupta }
3176e35fa2dSVineet Gupta 
3186e35fa2dSVineet Gupta /*
3196e35fa2dSVineet Gupta  * Read-write spinlocks, allowing multiple readers but only one writer.
32069cbe630SVineet Gupta  * Unfair locking as Writers could be starved indefinitely by Reader(s)
3216e35fa2dSVineet Gupta  *
3226e35fa2dSVineet Gupta  * The spinlock itself is contained in @counter and access to it is
3236e35fa2dSVineet Gupta  * serialized with @lock_mutex.
3246e35fa2dSVineet Gupta  */
3256e35fa2dSVineet Gupta 
3266e35fa2dSVineet Gupta /* 1 - lock taken successfully */
3276e35fa2dSVineet Gupta static inline int arch_read_trylock(arch_rwlock_t *rw)
3286e35fa2dSVineet Gupta {
3296e35fa2dSVineet Gupta 	int ret = 0;
3302a1021fcSNoam Camus 	unsigned long flags;
3316e35fa2dSVineet Gupta 
3322a1021fcSNoam Camus 	local_irq_save(flags);
3336e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
3346e35fa2dSVineet Gupta 
3356e35fa2dSVineet Gupta 	/*
3366e35fa2dSVineet Gupta 	 * zero means writer holds the lock exclusively, deny Reader.
3376e35fa2dSVineet Gupta 	 * Otherwise grant lock to first/subseq reader
3386e35fa2dSVineet Gupta 	 */
3396e35fa2dSVineet Gupta 	if (rw->counter > 0) {
3406e35fa2dSVineet Gupta 		rw->counter--;
3416e35fa2dSVineet Gupta 		ret = 1;
3426e35fa2dSVineet Gupta 	}
3436e35fa2dSVineet Gupta 
3446e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
3452a1021fcSNoam Camus 	local_irq_restore(flags);
3466e35fa2dSVineet Gupta 
3476e35fa2dSVineet Gupta 	smp_mb();
3486e35fa2dSVineet Gupta 	return ret;
3496e35fa2dSVineet Gupta }
3506e35fa2dSVineet Gupta 
3516e35fa2dSVineet Gupta /* 1 - lock taken successfully */
3526e35fa2dSVineet Gupta static inline int arch_write_trylock(arch_rwlock_t *rw)
3536e35fa2dSVineet Gupta {
3546e35fa2dSVineet Gupta 	int ret = 0;
3552a1021fcSNoam Camus 	unsigned long flags;
3566e35fa2dSVineet Gupta 
3572a1021fcSNoam Camus 	local_irq_save(flags);
3586e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
3596e35fa2dSVineet Gupta 
3606e35fa2dSVineet Gupta 	/*
3616e35fa2dSVineet Gupta 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
3626e35fa2dSVineet Gupta 	 * deny writer. Otherwise if unlocked grant to writer
3636e35fa2dSVineet Gupta 	 * Hence the claim that Linux rwlocks are unfair to writers.
3646e35fa2dSVineet Gupta 	 * (can be starved for an indefinite time by readers).
3656e35fa2dSVineet Gupta 	 */
3666e35fa2dSVineet Gupta 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
3676e35fa2dSVineet Gupta 		rw->counter = 0;
3686e35fa2dSVineet Gupta 		ret = 1;
3696e35fa2dSVineet Gupta 	}
3706e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
3712a1021fcSNoam Camus 	local_irq_restore(flags);
3726e35fa2dSVineet Gupta 
3736e35fa2dSVineet Gupta 	return ret;
3746e35fa2dSVineet Gupta }
3756e35fa2dSVineet Gupta 
3766e35fa2dSVineet Gupta static inline void arch_read_lock(arch_rwlock_t *rw)
3776e35fa2dSVineet Gupta {
3786e35fa2dSVineet Gupta 	while (!arch_read_trylock(rw))
3796e35fa2dSVineet Gupta 		cpu_relax();
3806e35fa2dSVineet Gupta }
3816e35fa2dSVineet Gupta 
3826e35fa2dSVineet Gupta static inline void arch_write_lock(arch_rwlock_t *rw)
3836e35fa2dSVineet Gupta {
3846e35fa2dSVineet Gupta 	while (!arch_write_trylock(rw))
3856e35fa2dSVineet Gupta 		cpu_relax();
3866e35fa2dSVineet Gupta }
3876e35fa2dSVineet Gupta 
3886e35fa2dSVineet Gupta static inline void arch_read_unlock(arch_rwlock_t *rw)
3896e35fa2dSVineet Gupta {
3902a1021fcSNoam Camus 	unsigned long flags;
3912a1021fcSNoam Camus 
3922a1021fcSNoam Camus 	local_irq_save(flags);
3936e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
3946e35fa2dSVineet Gupta 	rw->counter++;
3956e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
3962a1021fcSNoam Camus 	local_irq_restore(flags);
3976e35fa2dSVineet Gupta }
3986e35fa2dSVineet Gupta 
3996e35fa2dSVineet Gupta static inline void arch_write_unlock(arch_rwlock_t *rw)
4006e35fa2dSVineet Gupta {
4012a1021fcSNoam Camus 	unsigned long flags;
4022a1021fcSNoam Camus 
4032a1021fcSNoam Camus 	local_irq_save(flags);
4046e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
4056e35fa2dSVineet Gupta 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
4066e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
4072a1021fcSNoam Camus 	local_irq_restore(flags);
4086e35fa2dSVineet Gupta }
4096e35fa2dSVineet Gupta 
41069cbe630SVineet Gupta #endif
41169cbe630SVineet Gupta 
41269cbe630SVineet Gupta #define arch_read_can_lock(x)	((x)->counter > 0)
41369cbe630SVineet Gupta #define arch_write_can_lock(x)	((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
41469cbe630SVineet Gupta 
4156e35fa2dSVineet Gupta #define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
4166e35fa2dSVineet Gupta #define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
4176e35fa2dSVineet Gupta 
4186e35fa2dSVineet Gupta #define arch_spin_relax(lock)	cpu_relax()
4196e35fa2dSVineet Gupta #define arch_read_relax(lock)	cpu_relax()
4206e35fa2dSVineet Gupta #define arch_write_relax(lock)	cpu_relax()
4216e35fa2dSVineet Gupta 
4226e35fa2dSVineet Gupta #endif /* __ASM_SPINLOCK_H */
423