xref: /openbmc/linux/arch/arc/include/asm/spinlock.h (revision dd7c7ab0)
1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
26e35fa2dSVineet Gupta /*
36e35fa2dSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
46e35fa2dSVineet Gupta  */
56e35fa2dSVineet Gupta 
66e35fa2dSVineet Gupta #ifndef __ASM_SPINLOCK_H
76e35fa2dSVineet Gupta #define __ASM_SPINLOCK_H
86e35fa2dSVineet Gupta 
96e35fa2dSVineet Gupta #include <asm/spinlock_types.h>
106e35fa2dSVineet Gupta #include <asm/processor.h>
116e35fa2dSVineet Gupta #include <asm/barrier.h>
126e35fa2dSVineet Gupta 
136e35fa2dSVineet Gupta #define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
14726328d9SPeter Zijlstra 
15ae7eae9eSVineet Gupta #ifdef CONFIG_ARC_HAS_LLSC
16ae7eae9eSVineet Gupta 
arch_spin_lock(arch_spinlock_t * lock)176e35fa2dSVineet Gupta static inline void arch_spin_lock(arch_spinlock_t *lock)
186e35fa2dSVineet Gupta {
19ae7eae9eSVineet Gupta 	unsigned int val;
20ae7eae9eSVineet Gupta 
21ae7eae9eSVineet Gupta 	__asm__ __volatile__(
22ae7eae9eSVineet Gupta 	"1:	llock	%[val], [%[slock]]	\n"
23ae7eae9eSVineet Gupta 	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
24ae7eae9eSVineet Gupta 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
25ae7eae9eSVineet Gupta 	"	bnz	1b			\n"
26ae7eae9eSVineet Gupta 	"					\n"
27ae7eae9eSVineet Gupta 	: [val]		"=&r"	(val)
28ae7eae9eSVineet Gupta 	: [slock]	"r"	(&(lock->slock)),
29ae7eae9eSVineet Gupta 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
30ae7eae9eSVineet Gupta 	: "memory", "cc");
31ae7eae9eSVineet Gupta 
323032f0c9SVineet Gupta 	/*
333032f0c9SVineet Gupta 	 * ACQUIRE barrier to ensure load/store after taking the lock
343032f0c9SVineet Gupta 	 * don't "bleed-up" out of the critical section (leak-in is allowed)
353032f0c9SVineet Gupta 	 * http://www.spinics.net/lists/kernel/msg2010409.html
363032f0c9SVineet Gupta 	 *
373032f0c9SVineet Gupta 	 * ARCv2 only has load-load, store-store and all-all barrier
383032f0c9SVineet Gupta 	 * thus need the full all-all barrier
393032f0c9SVineet Gupta 	 */
40ae7eae9eSVineet Gupta 	smp_mb();
41ae7eae9eSVineet Gupta }
42ae7eae9eSVineet Gupta 
43ae7eae9eSVineet Gupta /* 1 - lock taken successfully */
arch_spin_trylock(arch_spinlock_t * lock)44ae7eae9eSVineet Gupta static inline int arch_spin_trylock(arch_spinlock_t *lock)
45ae7eae9eSVineet Gupta {
46ae7eae9eSVineet Gupta 	unsigned int val, got_it = 0;
47ae7eae9eSVineet Gupta 
48ae7eae9eSVineet Gupta 	__asm__ __volatile__(
49ae7eae9eSVineet Gupta 	"1:	llock	%[val], [%[slock]]	\n"
50ae7eae9eSVineet Gupta 	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
51ae7eae9eSVineet Gupta 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
52ae7eae9eSVineet Gupta 	"	bnz	1b			\n"
53ae7eae9eSVineet Gupta 	"	mov	%[got_it], 1		\n"
54ae7eae9eSVineet Gupta 	"4:					\n"
55ae7eae9eSVineet Gupta 	"					\n"
56ae7eae9eSVineet Gupta 	: [val]		"=&r"	(val),
57ae7eae9eSVineet Gupta 	  [got_it]	"+&r"	(got_it)
58ae7eae9eSVineet Gupta 	: [slock]	"r"	(&(lock->slock)),
59ae7eae9eSVineet Gupta 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
60ae7eae9eSVineet Gupta 	: "memory", "cc");
61ae7eae9eSVineet Gupta 
62ae7eae9eSVineet Gupta 	smp_mb();
63ae7eae9eSVineet Gupta 
64ae7eae9eSVineet Gupta 	return got_it;
65ae7eae9eSVineet Gupta }
66ae7eae9eSVineet Gupta 
arch_spin_unlock(arch_spinlock_t * lock)67ae7eae9eSVineet Gupta static inline void arch_spin_unlock(arch_spinlock_t *lock)
68ae7eae9eSVineet Gupta {
69ae7eae9eSVineet Gupta 	smp_mb();
70ae7eae9eSVineet Gupta 
713032f0c9SVineet Gupta 	WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
72ae7eae9eSVineet Gupta }
73ae7eae9eSVineet Gupta 
7469cbe630SVineet Gupta /*
7569cbe630SVineet Gupta  * Read-write spinlocks, allowing multiple readers but only one writer.
7669cbe630SVineet Gupta  * Unfair locking as Writers could be starved indefinitely by Reader(s)
7769cbe630SVineet Gupta  */
7869cbe630SVineet Gupta 
arch_read_lock(arch_rwlock_t * rw)7969cbe630SVineet Gupta static inline void arch_read_lock(arch_rwlock_t *rw)
8069cbe630SVineet Gupta {
8169cbe630SVineet Gupta 	unsigned int val;
8269cbe630SVineet Gupta 
8369cbe630SVineet Gupta 	/*
8469cbe630SVineet Gupta 	 * zero means writer holds the lock exclusively, deny Reader.
8569cbe630SVineet Gupta 	 * Otherwise grant lock to first/subseq reader
8669cbe630SVineet Gupta 	 *
8769cbe630SVineet Gupta 	 * 	if (rw->counter > 0) {
8869cbe630SVineet Gupta 	 *		rw->counter--;
8969cbe630SVineet Gupta 	 *		ret = 1;
9069cbe630SVineet Gupta 	 *	}
9169cbe630SVineet Gupta 	 */
9269cbe630SVineet Gupta 
9369cbe630SVineet Gupta 	__asm__ __volatile__(
9469cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
9569cbe630SVineet Gupta 	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
9669cbe630SVineet Gupta 	"	sub	%[val], %[val], 1	\n"	/* reader lock */
9769cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
9869cbe630SVineet Gupta 	"	bnz	1b			\n"
9969cbe630SVineet Gupta 	"					\n"
10069cbe630SVineet Gupta 	: [val]		"=&r"	(val)
10169cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
10269cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
10369cbe630SVineet Gupta 	: "memory", "cc");
10469cbe630SVineet Gupta 
10569cbe630SVineet Gupta 	smp_mb();
10669cbe630SVineet Gupta }
10769cbe630SVineet Gupta 
10869cbe630SVineet Gupta /* 1 - lock taken successfully */
arch_read_trylock(arch_rwlock_t * rw)10969cbe630SVineet Gupta static inline int arch_read_trylock(arch_rwlock_t *rw)
11069cbe630SVineet Gupta {
11169cbe630SVineet Gupta 	unsigned int val, got_it = 0;
11269cbe630SVineet Gupta 
11369cbe630SVineet Gupta 	__asm__ __volatile__(
11469cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
11569cbe630SVineet Gupta 	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
11669cbe630SVineet Gupta 	"	sub	%[val], %[val], 1	\n"	/* counter-- */
11769cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
11869cbe630SVineet Gupta 	"	bnz	1b			\n"	/* retry if collided with someone */
11969cbe630SVineet Gupta 	"	mov	%[got_it], 1		\n"
12069cbe630SVineet Gupta 	"					\n"
12169cbe630SVineet Gupta 	"4: ; --- done ---			\n"
12269cbe630SVineet Gupta 
12369cbe630SVineet Gupta 	: [val]		"=&r"	(val),
12469cbe630SVineet Gupta 	  [got_it]	"+&r"	(got_it)
12569cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
12669cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
12769cbe630SVineet Gupta 	: "memory", "cc");
12869cbe630SVineet Gupta 
12969cbe630SVineet Gupta 	smp_mb();
13069cbe630SVineet Gupta 
13169cbe630SVineet Gupta 	return got_it;
13269cbe630SVineet Gupta }
13369cbe630SVineet Gupta 
arch_write_lock(arch_rwlock_t * rw)13469cbe630SVineet Gupta static inline void arch_write_lock(arch_rwlock_t *rw)
13569cbe630SVineet Gupta {
13669cbe630SVineet Gupta 	unsigned int val;
13769cbe630SVineet Gupta 
13869cbe630SVineet Gupta 	/*
13969cbe630SVineet Gupta 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
14069cbe630SVineet Gupta 	 * deny writer. Otherwise if unlocked grant to writer
14169cbe630SVineet Gupta 	 * Hence the claim that Linux rwlocks are unfair to writers.
14269cbe630SVineet Gupta 	 * (can be starved for an indefinite time by readers).
14369cbe630SVineet Gupta 	 *
14469cbe630SVineet Gupta 	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
14569cbe630SVineet Gupta 	 *		rw->counter = 0;
14669cbe630SVineet Gupta 	 *		ret = 1;
14769cbe630SVineet Gupta 	 *	}
14869cbe630SVineet Gupta 	 */
14969cbe630SVineet Gupta 
15069cbe630SVineet Gupta 	__asm__ __volatile__(
15169cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
15269cbe630SVineet Gupta 	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
15369cbe630SVineet Gupta 	"	mov	%[val], %[WR_LOCKED]	\n"
15469cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
15569cbe630SVineet Gupta 	"	bnz	1b			\n"
15669cbe630SVineet Gupta 	"					\n"
15769cbe630SVineet Gupta 	: [val]		"=&r"	(val)
15869cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
15969cbe630SVineet Gupta 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
16069cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
16169cbe630SVineet Gupta 	: "memory", "cc");
16269cbe630SVineet Gupta 
16369cbe630SVineet Gupta 	smp_mb();
16469cbe630SVineet Gupta }
16569cbe630SVineet Gupta 
16669cbe630SVineet Gupta /* 1 - lock taken successfully */
arch_write_trylock(arch_rwlock_t * rw)16769cbe630SVineet Gupta static inline int arch_write_trylock(arch_rwlock_t *rw)
16869cbe630SVineet Gupta {
16969cbe630SVineet Gupta 	unsigned int val, got_it = 0;
17069cbe630SVineet Gupta 
17169cbe630SVineet Gupta 	__asm__ __volatile__(
17269cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
17369cbe630SVineet Gupta 	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
17469cbe630SVineet Gupta 	"	mov	%[val], %[WR_LOCKED]	\n"
17569cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
17669cbe630SVineet Gupta 	"	bnz	1b			\n"	/* retry if collided with someone */
17769cbe630SVineet Gupta 	"	mov	%[got_it], 1		\n"
17869cbe630SVineet Gupta 	"					\n"
17969cbe630SVineet Gupta 	"4: ; --- done ---			\n"
18069cbe630SVineet Gupta 
18169cbe630SVineet Gupta 	: [val]		"=&r"	(val),
18269cbe630SVineet Gupta 	  [got_it]	"+&r"	(got_it)
18369cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
18469cbe630SVineet Gupta 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
18569cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
18669cbe630SVineet Gupta 	: "memory", "cc");
18769cbe630SVineet Gupta 
18869cbe630SVineet Gupta 	smp_mb();
18969cbe630SVineet Gupta 
19069cbe630SVineet Gupta 	return got_it;
19169cbe630SVineet Gupta }
19269cbe630SVineet Gupta 
arch_read_unlock(arch_rwlock_t * rw)19369cbe630SVineet Gupta static inline void arch_read_unlock(arch_rwlock_t *rw)
19469cbe630SVineet Gupta {
19569cbe630SVineet Gupta 	unsigned int val;
19669cbe630SVineet Gupta 
19769cbe630SVineet Gupta 	smp_mb();
19869cbe630SVineet Gupta 
19969cbe630SVineet Gupta 	/*
20069cbe630SVineet Gupta 	 * rw->counter++;
20169cbe630SVineet Gupta 	 */
20269cbe630SVineet Gupta 	__asm__ __volatile__(
20369cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
20469cbe630SVineet Gupta 	"	add	%[val], %[val], 1	\n"
20569cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
20669cbe630SVineet Gupta 	"	bnz	1b			\n"
20769cbe630SVineet Gupta 	"					\n"
20869cbe630SVineet Gupta 	: [val]		"=&r"	(val)
20969cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter))
21069cbe630SVineet Gupta 	: "memory", "cc");
21169cbe630SVineet Gupta }
21269cbe630SVineet Gupta 
arch_write_unlock(arch_rwlock_t * rw)21369cbe630SVineet Gupta static inline void arch_write_unlock(arch_rwlock_t *rw)
21469cbe630SVineet Gupta {
21569cbe630SVineet Gupta 	smp_mb();
21669cbe630SVineet Gupta 
2173032f0c9SVineet Gupta 	WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
21869cbe630SVineet Gupta }
21969cbe630SVineet Gupta 
220ae7eae9eSVineet Gupta #else	/* !CONFIG_ARC_HAS_LLSC */
221ae7eae9eSVineet Gupta 
arch_spin_lock(arch_spinlock_t * lock)222ae7eae9eSVineet Gupta static inline void arch_spin_lock(arch_spinlock_t *lock)
223ae7eae9eSVineet Gupta {
224ae7eae9eSVineet Gupta 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
2256e35fa2dSVineet Gupta 
2262576c28eSVineet Gupta 	/*
2273032f0c9SVineet Gupta 	 * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
2283032f0c9SVineet Gupta 	 * for ACQ and REL semantics respectively. However EX based spinlocks
2293032f0c9SVineet Gupta 	 * need the extra smp_mb to workaround a hardware quirk.
2302576c28eSVineet Gupta 	 */
2312576c28eSVineet Gupta 	smp_mb();
2322576c28eSVineet Gupta 
2336e35fa2dSVineet Gupta 	__asm__ __volatile__(
2346e35fa2dSVineet Gupta 	"1:	ex  %0, [%1]		\n"
2356e35fa2dSVineet Gupta 	"	breq  %0, %2, 1b	\n"
236ae7eae9eSVineet Gupta 	: "+&r" (val)
2376e35fa2dSVineet Gupta 	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
2386e35fa2dSVineet Gupta 	: "memory");
2392576c28eSVineet Gupta 
2402576c28eSVineet Gupta 	smp_mb();
2416e35fa2dSVineet Gupta }
2426e35fa2dSVineet Gupta 
243ae7eae9eSVineet Gupta /* 1 - lock taken successfully */
arch_spin_trylock(arch_spinlock_t * lock)2446e35fa2dSVineet Gupta static inline int arch_spin_trylock(arch_spinlock_t *lock)
2456e35fa2dSVineet Gupta {
246ae7eae9eSVineet Gupta 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
2476e35fa2dSVineet Gupta 
2482576c28eSVineet Gupta 	smp_mb();
2492576c28eSVineet Gupta 
2506e35fa2dSVineet Gupta 	__asm__ __volatile__(
2516e35fa2dSVineet Gupta 	"1:	ex  %0, [%1]		\n"
252ae7eae9eSVineet Gupta 	: "+r" (val)
2536e35fa2dSVineet Gupta 	: "r"(&(lock->slock))
2546e35fa2dSVineet Gupta 	: "memory");
2556e35fa2dSVineet Gupta 
2562576c28eSVineet Gupta 	smp_mb();
2572576c28eSVineet Gupta 
258ae7eae9eSVineet Gupta 	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
2596e35fa2dSVineet Gupta }
2606e35fa2dSVineet Gupta 
arch_spin_unlock(arch_spinlock_t * lock)2616e35fa2dSVineet Gupta static inline void arch_spin_unlock(arch_spinlock_t *lock)
2626e35fa2dSVineet Gupta {
263ae7eae9eSVineet Gupta 	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
2646c00350bSVineet Gupta 
2652576c28eSVineet Gupta 	/*
2662576c28eSVineet Gupta 	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
2672576c28eSVineet Gupta 	 * is the only option
2682576c28eSVineet Gupta 	 */
2692576c28eSVineet Gupta 	smp_mb();
2702576c28eSVineet Gupta 
271c2bdac14SVineet Gupta 	/*
272c2bdac14SVineet Gupta 	 * EX is not really required here, a simple STore of 0 suffices.
273c2bdac14SVineet Gupta 	 * However this causes tasklist livelocks in SystemC based SMP virtual
274c2bdac14SVineet Gupta 	 * platforms where the systemc core scheduler uses EX as a cue for
275c2bdac14SVineet Gupta 	 * moving to next core. Do a git log of this file for details
276c2bdac14SVineet Gupta 	 */
2776c00350bSVineet Gupta 	__asm__ __volatile__(
2786c00350bSVineet Gupta 	"	ex  %0, [%1]		\n"
279ae7eae9eSVineet Gupta 	: "+r" (val)
2806c00350bSVineet Gupta 	: "r"(&(lock->slock))
2816c00350bSVineet Gupta 	: "memory");
2826c00350bSVineet Gupta 
2832576c28eSVineet Gupta 	/*
2843032f0c9SVineet Gupta 	 * see pairing version/comment in arch_spin_lock above
2852576c28eSVineet Gupta 	 */
2866e35fa2dSVineet Gupta 	smp_mb();
2876e35fa2dSVineet Gupta }
2886e35fa2dSVineet Gupta 
2896e35fa2dSVineet Gupta /*
2906e35fa2dSVineet Gupta  * Read-write spinlocks, allowing multiple readers but only one writer.
29169cbe630SVineet Gupta  * Unfair locking as Writers could be starved indefinitely by Reader(s)
2926e35fa2dSVineet Gupta  *
2936e35fa2dSVineet Gupta  * The spinlock itself is contained in @counter and access to it is
2946e35fa2dSVineet Gupta  * serialized with @lock_mutex.
2956e35fa2dSVineet Gupta  */
2966e35fa2dSVineet Gupta 
2976e35fa2dSVineet Gupta /* 1 - lock taken successfully */
arch_read_trylock(arch_rwlock_t * rw)2986e35fa2dSVineet Gupta static inline int arch_read_trylock(arch_rwlock_t *rw)
2996e35fa2dSVineet Gupta {
3006e35fa2dSVineet Gupta 	int ret = 0;
3012a1021fcSNoam Camus 	unsigned long flags;
3026e35fa2dSVineet Gupta 
3032a1021fcSNoam Camus 	local_irq_save(flags);
3046e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
3056e35fa2dSVineet Gupta 
3066e35fa2dSVineet Gupta 	/*
3076e35fa2dSVineet Gupta 	 * zero means writer holds the lock exclusively, deny Reader.
3086e35fa2dSVineet Gupta 	 * Otherwise grant lock to first/subseq reader
3096e35fa2dSVineet Gupta 	 */
3106e35fa2dSVineet Gupta 	if (rw->counter > 0) {
3116e35fa2dSVineet Gupta 		rw->counter--;
3126e35fa2dSVineet Gupta 		ret = 1;
3136e35fa2dSVineet Gupta 	}
3146e35fa2dSVineet Gupta 
3156e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
3162a1021fcSNoam Camus 	local_irq_restore(flags);
3176e35fa2dSVineet Gupta 
3186e35fa2dSVineet Gupta 	return ret;
3196e35fa2dSVineet Gupta }
3206e35fa2dSVineet Gupta 
3216e35fa2dSVineet Gupta /* 1 - lock taken successfully */
arch_write_trylock(arch_rwlock_t * rw)3226e35fa2dSVineet Gupta static inline int arch_write_trylock(arch_rwlock_t *rw)
3236e35fa2dSVineet Gupta {
3246e35fa2dSVineet Gupta 	int ret = 0;
3252a1021fcSNoam Camus 	unsigned long flags;
3266e35fa2dSVineet Gupta 
3272a1021fcSNoam Camus 	local_irq_save(flags);
3286e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
3296e35fa2dSVineet Gupta 
3306e35fa2dSVineet Gupta 	/*
3316e35fa2dSVineet Gupta 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
3326e35fa2dSVineet Gupta 	 * deny writer. Otherwise if unlocked grant to writer
3336e35fa2dSVineet Gupta 	 * Hence the claim that Linux rwlocks are unfair to writers.
3346e35fa2dSVineet Gupta 	 * (can be starved for an indefinite time by readers).
3356e35fa2dSVineet Gupta 	 */
3366e35fa2dSVineet Gupta 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
3376e35fa2dSVineet Gupta 		rw->counter = 0;
3386e35fa2dSVineet Gupta 		ret = 1;
3396e35fa2dSVineet Gupta 	}
3406e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
3412a1021fcSNoam Camus 	local_irq_restore(flags);
3426e35fa2dSVineet Gupta 
3436e35fa2dSVineet Gupta 	return ret;
3446e35fa2dSVineet Gupta }
3456e35fa2dSVineet Gupta 
arch_read_lock(arch_rwlock_t * rw)3466e35fa2dSVineet Gupta static inline void arch_read_lock(arch_rwlock_t *rw)
3476e35fa2dSVineet Gupta {
3486e35fa2dSVineet Gupta 	while (!arch_read_trylock(rw))
3496e35fa2dSVineet Gupta 		cpu_relax();
3506e35fa2dSVineet Gupta }
3516e35fa2dSVineet Gupta 
arch_write_lock(arch_rwlock_t * rw)3526e35fa2dSVineet Gupta static inline void arch_write_lock(arch_rwlock_t *rw)
3536e35fa2dSVineet Gupta {
3546e35fa2dSVineet Gupta 	while (!arch_write_trylock(rw))
3556e35fa2dSVineet Gupta 		cpu_relax();
3566e35fa2dSVineet Gupta }
3576e35fa2dSVineet Gupta 
arch_read_unlock(arch_rwlock_t * rw)3586e35fa2dSVineet Gupta static inline void arch_read_unlock(arch_rwlock_t *rw)
3596e35fa2dSVineet Gupta {
3602a1021fcSNoam Camus 	unsigned long flags;
3612a1021fcSNoam Camus 
3622a1021fcSNoam Camus 	local_irq_save(flags);
3636e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
3646e35fa2dSVineet Gupta 	rw->counter++;
3656e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
3662a1021fcSNoam Camus 	local_irq_restore(flags);
3676e35fa2dSVineet Gupta }
3686e35fa2dSVineet Gupta 
arch_write_unlock(arch_rwlock_t * rw)3696e35fa2dSVineet Gupta static inline void arch_write_unlock(arch_rwlock_t *rw)
3706e35fa2dSVineet Gupta {
3712a1021fcSNoam Camus 	unsigned long flags;
3722a1021fcSNoam Camus 
3732a1021fcSNoam Camus 	local_irq_save(flags);
3746e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
3756e35fa2dSVineet Gupta 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
3766e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
3772a1021fcSNoam Camus 	local_irq_restore(flags);
3786e35fa2dSVineet Gupta }
3796e35fa2dSVineet Gupta 
38069cbe630SVineet Gupta #endif
38169cbe630SVineet Gupta 
3826e35fa2dSVineet Gupta #endif /* __ASM_SPINLOCK_H */
383