xref: /openbmc/linux/arch/arc/include/asm/spinlock.h (revision d2912cb1)
1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
26e35fa2dSVineet Gupta /*
36e35fa2dSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
46e35fa2dSVineet Gupta  */
56e35fa2dSVineet Gupta 
66e35fa2dSVineet Gupta #ifndef __ASM_SPINLOCK_H
76e35fa2dSVineet Gupta #define __ASM_SPINLOCK_H
86e35fa2dSVineet Gupta 
96e35fa2dSVineet Gupta #include <asm/spinlock_types.h>
106e35fa2dSVineet Gupta #include <asm/processor.h>
116e35fa2dSVineet Gupta #include <asm/barrier.h>
126e35fa2dSVineet Gupta 
136e35fa2dSVineet Gupta #define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
14726328d9SPeter Zijlstra 
15ae7eae9eSVineet Gupta #ifdef CONFIG_ARC_HAS_LLSC
16ae7eae9eSVineet Gupta 
176e35fa2dSVineet Gupta static inline void arch_spin_lock(arch_spinlock_t *lock)
186e35fa2dSVineet Gupta {
19ae7eae9eSVineet Gupta 	unsigned int val;
20ae7eae9eSVineet Gupta 
21ae7eae9eSVineet Gupta 	__asm__ __volatile__(
22ae7eae9eSVineet Gupta 	"1:	llock	%[val], [%[slock]]	\n"
23ae7eae9eSVineet Gupta 	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
24ae7eae9eSVineet Gupta 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
25ae7eae9eSVineet Gupta 	"	bnz	1b			\n"
26ae7eae9eSVineet Gupta 	"					\n"
27ae7eae9eSVineet Gupta 	: [val]		"=&r"	(val)
28ae7eae9eSVineet Gupta 	: [slock]	"r"	(&(lock->slock)),
29ae7eae9eSVineet Gupta 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
30ae7eae9eSVineet Gupta 	: "memory", "cc");
31ae7eae9eSVineet Gupta 
323032f0c9SVineet Gupta 	/*
333032f0c9SVineet Gupta 	 * ACQUIRE barrier to ensure load/store after taking the lock
343032f0c9SVineet Gupta 	 * don't "bleed-up" out of the critical section (leak-in is allowed)
353032f0c9SVineet Gupta 	 * http://www.spinics.net/lists/kernel/msg2010409.html
363032f0c9SVineet Gupta 	 *
373032f0c9SVineet Gupta 	 * ARCv2 only has load-load, store-store and all-all barrier
383032f0c9SVineet Gupta 	 * thus need the full all-all barrier
393032f0c9SVineet Gupta 	 */
40ae7eae9eSVineet Gupta 	smp_mb();
41ae7eae9eSVineet Gupta }
42ae7eae9eSVineet Gupta 
43ae7eae9eSVineet Gupta /* 1 - lock taken successfully */
44ae7eae9eSVineet Gupta static inline int arch_spin_trylock(arch_spinlock_t *lock)
45ae7eae9eSVineet Gupta {
46ae7eae9eSVineet Gupta 	unsigned int val, got_it = 0;
47ae7eae9eSVineet Gupta 
48ae7eae9eSVineet Gupta 	__asm__ __volatile__(
49ae7eae9eSVineet Gupta 	"1:	llock	%[val], [%[slock]]	\n"
50ae7eae9eSVineet Gupta 	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
51ae7eae9eSVineet Gupta 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
52ae7eae9eSVineet Gupta 	"	bnz	1b			\n"
53ae7eae9eSVineet Gupta 	"	mov	%[got_it], 1		\n"
54ae7eae9eSVineet Gupta 	"4:					\n"
55ae7eae9eSVineet Gupta 	"					\n"
56ae7eae9eSVineet Gupta 	: [val]		"=&r"	(val),
57ae7eae9eSVineet Gupta 	  [got_it]	"+&r"	(got_it)
58ae7eae9eSVineet Gupta 	: [slock]	"r"	(&(lock->slock)),
59ae7eae9eSVineet Gupta 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
60ae7eae9eSVineet Gupta 	: "memory", "cc");
61ae7eae9eSVineet Gupta 
62ae7eae9eSVineet Gupta 	smp_mb();
63ae7eae9eSVineet Gupta 
64ae7eae9eSVineet Gupta 	return got_it;
65ae7eae9eSVineet Gupta }
66ae7eae9eSVineet Gupta 
67ae7eae9eSVineet Gupta static inline void arch_spin_unlock(arch_spinlock_t *lock)
68ae7eae9eSVineet Gupta {
69ae7eae9eSVineet Gupta 	smp_mb();
70ae7eae9eSVineet Gupta 
713032f0c9SVineet Gupta 	WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
72ae7eae9eSVineet Gupta }
73ae7eae9eSVineet Gupta 
7469cbe630SVineet Gupta /*
7569cbe630SVineet Gupta  * Read-write spinlocks, allowing multiple readers but only one writer.
7669cbe630SVineet Gupta  * Unfair locking as Writers could be starved indefinitely by Reader(s)
7769cbe630SVineet Gupta  */
7869cbe630SVineet Gupta 
7969cbe630SVineet Gupta static inline void arch_read_lock(arch_rwlock_t *rw)
8069cbe630SVineet Gupta {
8169cbe630SVineet Gupta 	unsigned int val;
8269cbe630SVineet Gupta 
8369cbe630SVineet Gupta 	/*
8469cbe630SVineet Gupta 	 * zero means writer holds the lock exclusively, deny Reader.
8569cbe630SVineet Gupta 	 * Otherwise grant lock to first/subseq reader
8669cbe630SVineet Gupta 	 *
8769cbe630SVineet Gupta 	 * 	if (rw->counter > 0) {
8869cbe630SVineet Gupta 	 *		rw->counter--;
8969cbe630SVineet Gupta 	 *		ret = 1;
9069cbe630SVineet Gupta 	 *	}
9169cbe630SVineet Gupta 	 */
9269cbe630SVineet Gupta 
9369cbe630SVineet Gupta 	__asm__ __volatile__(
9469cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
9569cbe630SVineet Gupta 	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
9669cbe630SVineet Gupta 	"	sub	%[val], %[val], 1	\n"	/* reader lock */
9769cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
9869cbe630SVineet Gupta 	"	bnz	1b			\n"
9969cbe630SVineet Gupta 	"					\n"
10069cbe630SVineet Gupta 	: [val]		"=&r"	(val)
10169cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
10269cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
10369cbe630SVineet Gupta 	: "memory", "cc");
10469cbe630SVineet Gupta 
10569cbe630SVineet Gupta 	smp_mb();
10669cbe630SVineet Gupta }
10769cbe630SVineet Gupta 
10869cbe630SVineet Gupta /* 1 - lock taken successfully */
10969cbe630SVineet Gupta static inline int arch_read_trylock(arch_rwlock_t *rw)
11069cbe630SVineet Gupta {
11169cbe630SVineet Gupta 	unsigned int val, got_it = 0;
11269cbe630SVineet Gupta 
11369cbe630SVineet Gupta 	__asm__ __volatile__(
11469cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
11569cbe630SVineet Gupta 	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
11669cbe630SVineet Gupta 	"	sub	%[val], %[val], 1	\n"	/* counter-- */
11769cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
11869cbe630SVineet Gupta 	"	bnz	1b			\n"	/* retry if collided with someone */
11969cbe630SVineet Gupta 	"	mov	%[got_it], 1		\n"
12069cbe630SVineet Gupta 	"					\n"
12169cbe630SVineet Gupta 	"4: ; --- done ---			\n"
12269cbe630SVineet Gupta 
12369cbe630SVineet Gupta 	: [val]		"=&r"	(val),
12469cbe630SVineet Gupta 	  [got_it]	"+&r"	(got_it)
12569cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
12669cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
12769cbe630SVineet Gupta 	: "memory", "cc");
12869cbe630SVineet Gupta 
12969cbe630SVineet Gupta 	smp_mb();
13069cbe630SVineet Gupta 
13169cbe630SVineet Gupta 	return got_it;
13269cbe630SVineet Gupta }
13369cbe630SVineet Gupta 
13469cbe630SVineet Gupta static inline void arch_write_lock(arch_rwlock_t *rw)
13569cbe630SVineet Gupta {
13669cbe630SVineet Gupta 	unsigned int val;
13769cbe630SVineet Gupta 
13869cbe630SVineet Gupta 	/*
13969cbe630SVineet Gupta 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
14069cbe630SVineet Gupta 	 * deny writer. Otherwise if unlocked grant to writer
14169cbe630SVineet Gupta 	 * Hence the claim that Linux rwlocks are unfair to writers.
14269cbe630SVineet Gupta 	 * (can be starved for an indefinite time by readers).
14369cbe630SVineet Gupta 	 *
14469cbe630SVineet Gupta 	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
14569cbe630SVineet Gupta 	 *		rw->counter = 0;
14669cbe630SVineet Gupta 	 *		ret = 1;
14769cbe630SVineet Gupta 	 *	}
14869cbe630SVineet Gupta 	 */
14969cbe630SVineet Gupta 
15069cbe630SVineet Gupta 	__asm__ __volatile__(
15169cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
15269cbe630SVineet Gupta 	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
15369cbe630SVineet Gupta 	"	mov	%[val], %[WR_LOCKED]	\n"
15469cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
15569cbe630SVineet Gupta 	"	bnz	1b			\n"
15669cbe630SVineet Gupta 	"					\n"
15769cbe630SVineet Gupta 	: [val]		"=&r"	(val)
15869cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
15969cbe630SVineet Gupta 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
16069cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
16169cbe630SVineet Gupta 	: "memory", "cc");
16269cbe630SVineet Gupta 
16369cbe630SVineet Gupta 	smp_mb();
16469cbe630SVineet Gupta }
16569cbe630SVineet Gupta 
16669cbe630SVineet Gupta /* 1 - lock taken successfully */
16769cbe630SVineet Gupta static inline int arch_write_trylock(arch_rwlock_t *rw)
16869cbe630SVineet Gupta {
16969cbe630SVineet Gupta 	unsigned int val, got_it = 0;
17069cbe630SVineet Gupta 
17169cbe630SVineet Gupta 	__asm__ __volatile__(
17269cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
17369cbe630SVineet Gupta 	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
17469cbe630SVineet Gupta 	"	mov	%[val], %[WR_LOCKED]	\n"
17569cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
17669cbe630SVineet Gupta 	"	bnz	1b			\n"	/* retry if collided with someone */
17769cbe630SVineet Gupta 	"	mov	%[got_it], 1		\n"
17869cbe630SVineet Gupta 	"					\n"
17969cbe630SVineet Gupta 	"4: ; --- done ---			\n"
18069cbe630SVineet Gupta 
18169cbe630SVineet Gupta 	: [val]		"=&r"	(val),
18269cbe630SVineet Gupta 	  [got_it]	"+&r"	(got_it)
18369cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter)),
18469cbe630SVineet Gupta 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
18569cbe630SVineet Gupta 	  [WR_LOCKED]	"ir"	(0)
18669cbe630SVineet Gupta 	: "memory", "cc");
18769cbe630SVineet Gupta 
18869cbe630SVineet Gupta 	smp_mb();
18969cbe630SVineet Gupta 
19069cbe630SVineet Gupta 	return got_it;
19169cbe630SVineet Gupta }
19269cbe630SVineet Gupta 
19369cbe630SVineet Gupta static inline void arch_read_unlock(arch_rwlock_t *rw)
19469cbe630SVineet Gupta {
19569cbe630SVineet Gupta 	unsigned int val;
19669cbe630SVineet Gupta 
19769cbe630SVineet Gupta 	smp_mb();
19869cbe630SVineet Gupta 
19969cbe630SVineet Gupta 	/*
20069cbe630SVineet Gupta 	 * rw->counter++;
20169cbe630SVineet Gupta 	 */
20269cbe630SVineet Gupta 	__asm__ __volatile__(
20369cbe630SVineet Gupta 	"1:	llock	%[val], [%[rwlock]]	\n"
20469cbe630SVineet Gupta 	"	add	%[val], %[val], 1	\n"
20569cbe630SVineet Gupta 	"	scond	%[val], [%[rwlock]]	\n"
20669cbe630SVineet Gupta 	"	bnz	1b			\n"
20769cbe630SVineet Gupta 	"					\n"
20869cbe630SVineet Gupta 	: [val]		"=&r"	(val)
20969cbe630SVineet Gupta 	: [rwlock]	"r"	(&(rw->counter))
21069cbe630SVineet Gupta 	: "memory", "cc");
21169cbe630SVineet Gupta }
21269cbe630SVineet Gupta 
21369cbe630SVineet Gupta static inline void arch_write_unlock(arch_rwlock_t *rw)
21469cbe630SVineet Gupta {
21569cbe630SVineet Gupta 	smp_mb();
21669cbe630SVineet Gupta 
2173032f0c9SVineet Gupta 	WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
21869cbe630SVineet Gupta }
21969cbe630SVineet Gupta 
220ae7eae9eSVineet Gupta #else	/* !CONFIG_ARC_HAS_LLSC */
221ae7eae9eSVineet Gupta 
222ae7eae9eSVineet Gupta static inline void arch_spin_lock(arch_spinlock_t *lock)
223ae7eae9eSVineet Gupta {
224ae7eae9eSVineet Gupta 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
2256e35fa2dSVineet Gupta 
2262576c28eSVineet Gupta 	/*
2273032f0c9SVineet Gupta 	 * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
2283032f0c9SVineet Gupta 	 * for ACQ and REL semantics respectively. However EX based spinlocks
2293032f0c9SVineet Gupta 	 * need the extra smp_mb to workaround a hardware quirk.
2302576c28eSVineet Gupta 	 */
2312576c28eSVineet Gupta 	smp_mb();
2322576c28eSVineet Gupta 
2336e35fa2dSVineet Gupta 	__asm__ __volatile__(
2346e35fa2dSVineet Gupta 	"1:	ex  %0, [%1]		\n"
2351112c3b2SNoam Camus #ifdef CONFIG_EZNPS_MTM_EXT
2361112c3b2SNoam Camus 	"	.word %3		\n"
2371112c3b2SNoam Camus #endif
2386e35fa2dSVineet Gupta 	"	breq  %0, %2, 1b	\n"
239ae7eae9eSVineet Gupta 	: "+&r" (val)
2406e35fa2dSVineet Gupta 	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
2411112c3b2SNoam Camus #ifdef CONFIG_EZNPS_MTM_EXT
2421112c3b2SNoam Camus 	, "i"(CTOP_INST_SCHD_RW)
2431112c3b2SNoam Camus #endif
2446e35fa2dSVineet Gupta 	: "memory");
2452576c28eSVineet Gupta 
2462576c28eSVineet Gupta 	smp_mb();
2476e35fa2dSVineet Gupta }
2486e35fa2dSVineet Gupta 
249ae7eae9eSVineet Gupta /* 1 - lock taken successfully */
2506e35fa2dSVineet Gupta static inline int arch_spin_trylock(arch_spinlock_t *lock)
2516e35fa2dSVineet Gupta {
252ae7eae9eSVineet Gupta 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
2536e35fa2dSVineet Gupta 
2542576c28eSVineet Gupta 	smp_mb();
2552576c28eSVineet Gupta 
2566e35fa2dSVineet Gupta 	__asm__ __volatile__(
2576e35fa2dSVineet Gupta 	"1:	ex  %0, [%1]		\n"
258ae7eae9eSVineet Gupta 	: "+r" (val)
2596e35fa2dSVineet Gupta 	: "r"(&(lock->slock))
2606e35fa2dSVineet Gupta 	: "memory");
2616e35fa2dSVineet Gupta 
2622576c28eSVineet Gupta 	smp_mb();
2632576c28eSVineet Gupta 
264ae7eae9eSVineet Gupta 	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
2656e35fa2dSVineet Gupta }
2666e35fa2dSVineet Gupta 
2676e35fa2dSVineet Gupta static inline void arch_spin_unlock(arch_spinlock_t *lock)
2686e35fa2dSVineet Gupta {
269ae7eae9eSVineet Gupta 	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
2706c00350bSVineet Gupta 
2712576c28eSVineet Gupta 	/*
2722576c28eSVineet Gupta 	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
2732576c28eSVineet Gupta 	 * is the only option
2742576c28eSVineet Gupta 	 */
2752576c28eSVineet Gupta 	smp_mb();
2762576c28eSVineet Gupta 
277c2bdac14SVineet Gupta 	/*
278c2bdac14SVineet Gupta 	 * EX is not really required here, a simple STore of 0 suffices.
279c2bdac14SVineet Gupta 	 * However this causes tasklist livelocks in SystemC based SMP virtual
280c2bdac14SVineet Gupta 	 * platforms where the systemc core scheduler uses EX as a cue for
281c2bdac14SVineet Gupta 	 * moving to next core. Do a git log of this file for details
282c2bdac14SVineet Gupta 	 */
2836c00350bSVineet Gupta 	__asm__ __volatile__(
2846c00350bSVineet Gupta 	"	ex  %0, [%1]		\n"
285ae7eae9eSVineet Gupta 	: "+r" (val)
2866c00350bSVineet Gupta 	: "r"(&(lock->slock))
2876c00350bSVineet Gupta 	: "memory");
2886c00350bSVineet Gupta 
2892576c28eSVineet Gupta 	/*
2903032f0c9SVineet Gupta 	 * see pairing version/comment in arch_spin_lock above
2912576c28eSVineet Gupta 	 */
2926e35fa2dSVineet Gupta 	smp_mb();
2936e35fa2dSVineet Gupta }
2946e35fa2dSVineet Gupta 
2956e35fa2dSVineet Gupta /*
2966e35fa2dSVineet Gupta  * Read-write spinlocks, allowing multiple readers but only one writer.
29769cbe630SVineet Gupta  * Unfair locking as Writers could be starved indefinitely by Reader(s)
2986e35fa2dSVineet Gupta  *
2996e35fa2dSVineet Gupta  * The spinlock itself is contained in @counter and access to it is
3006e35fa2dSVineet Gupta  * serialized with @lock_mutex.
3016e35fa2dSVineet Gupta  */
3026e35fa2dSVineet Gupta 
3036e35fa2dSVineet Gupta /* 1 - lock taken successfully */
3046e35fa2dSVineet Gupta static inline int arch_read_trylock(arch_rwlock_t *rw)
3056e35fa2dSVineet Gupta {
3066e35fa2dSVineet Gupta 	int ret = 0;
3072a1021fcSNoam Camus 	unsigned long flags;
3086e35fa2dSVineet Gupta 
3092a1021fcSNoam Camus 	local_irq_save(flags);
3106e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
3116e35fa2dSVineet Gupta 
3126e35fa2dSVineet Gupta 	/*
3136e35fa2dSVineet Gupta 	 * zero means writer holds the lock exclusively, deny Reader.
3146e35fa2dSVineet Gupta 	 * Otherwise grant lock to first/subseq reader
3156e35fa2dSVineet Gupta 	 */
3166e35fa2dSVineet Gupta 	if (rw->counter > 0) {
3176e35fa2dSVineet Gupta 		rw->counter--;
3186e35fa2dSVineet Gupta 		ret = 1;
3196e35fa2dSVineet Gupta 	}
3206e35fa2dSVineet Gupta 
3216e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
3222a1021fcSNoam Camus 	local_irq_restore(flags);
3236e35fa2dSVineet Gupta 
3246e35fa2dSVineet Gupta 	return ret;
3256e35fa2dSVineet Gupta }
3266e35fa2dSVineet Gupta 
3276e35fa2dSVineet Gupta /* 1 - lock taken successfully */
3286e35fa2dSVineet Gupta static inline int arch_write_trylock(arch_rwlock_t *rw)
3296e35fa2dSVineet Gupta {
3306e35fa2dSVineet Gupta 	int ret = 0;
3312a1021fcSNoam Camus 	unsigned long flags;
3326e35fa2dSVineet Gupta 
3332a1021fcSNoam Camus 	local_irq_save(flags);
3346e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
3356e35fa2dSVineet Gupta 
3366e35fa2dSVineet Gupta 	/*
3376e35fa2dSVineet Gupta 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
3386e35fa2dSVineet Gupta 	 * deny writer. Otherwise if unlocked grant to writer
3396e35fa2dSVineet Gupta 	 * Hence the claim that Linux rwlocks are unfair to writers.
3406e35fa2dSVineet Gupta 	 * (can be starved for an indefinite time by readers).
3416e35fa2dSVineet Gupta 	 */
3426e35fa2dSVineet Gupta 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
3436e35fa2dSVineet Gupta 		rw->counter = 0;
3446e35fa2dSVineet Gupta 		ret = 1;
3456e35fa2dSVineet Gupta 	}
3466e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
3472a1021fcSNoam Camus 	local_irq_restore(flags);
3486e35fa2dSVineet Gupta 
3496e35fa2dSVineet Gupta 	return ret;
3506e35fa2dSVineet Gupta }
3516e35fa2dSVineet Gupta 
3526e35fa2dSVineet Gupta static inline void arch_read_lock(arch_rwlock_t *rw)
3536e35fa2dSVineet Gupta {
3546e35fa2dSVineet Gupta 	while (!arch_read_trylock(rw))
3556e35fa2dSVineet Gupta 		cpu_relax();
3566e35fa2dSVineet Gupta }
3576e35fa2dSVineet Gupta 
3586e35fa2dSVineet Gupta static inline void arch_write_lock(arch_rwlock_t *rw)
3596e35fa2dSVineet Gupta {
3606e35fa2dSVineet Gupta 	while (!arch_write_trylock(rw))
3616e35fa2dSVineet Gupta 		cpu_relax();
3626e35fa2dSVineet Gupta }
3636e35fa2dSVineet Gupta 
3646e35fa2dSVineet Gupta static inline void arch_read_unlock(arch_rwlock_t *rw)
3656e35fa2dSVineet Gupta {
3662a1021fcSNoam Camus 	unsigned long flags;
3672a1021fcSNoam Camus 
3682a1021fcSNoam Camus 	local_irq_save(flags);
3696e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
3706e35fa2dSVineet Gupta 	rw->counter++;
3716e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
3722a1021fcSNoam Camus 	local_irq_restore(flags);
3736e35fa2dSVineet Gupta }
3746e35fa2dSVineet Gupta 
3756e35fa2dSVineet Gupta static inline void arch_write_unlock(arch_rwlock_t *rw)
3766e35fa2dSVineet Gupta {
3772a1021fcSNoam Camus 	unsigned long flags;
3782a1021fcSNoam Camus 
3792a1021fcSNoam Camus 	local_irq_save(flags);
3806e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
3816e35fa2dSVineet Gupta 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
3826e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
3832a1021fcSNoam Camus 	local_irq_restore(flags);
3846e35fa2dSVineet Gupta }
3856e35fa2dSVineet Gupta 
38669cbe630SVineet Gupta #endif
38769cbe630SVineet Gupta 
3886e35fa2dSVineet Gupta #endif /* __ASM_SPINLOCK_H */
389