xref: /openbmc/linux/arch/parisc/include/asm/spinlock.h (revision a0f4b787)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2deae26bfSKyle McMartin #ifndef __ASM_SPINLOCK_H
3deae26bfSKyle McMartin #define __ASM_SPINLOCK_H
4deae26bfSKyle McMartin 
51cab4201SRolf Eike Beer #include <asm/barrier.h>
61cab4201SRolf Eike Beer #include <asm/ldcw.h>
7deae26bfSKyle McMartin #include <asm/processor.h>
8deae26bfSKyle McMartin #include <asm/spinlock_types.h>
9deae26bfSKyle McMartin 
arch_spin_val_check(int lock_val)10*15e64ef6SHelge Deller static inline void arch_spin_val_check(int lock_val)
11*15e64ef6SHelge Deller {
12*15e64ef6SHelge Deller 	if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
13*15e64ef6SHelge Deller 		asm volatile(	"andcm,= %0,%1,%%r0\n"
14*15e64ef6SHelge Deller 				".word %2\n"
15*15e64ef6SHelge Deller 		: : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
16*15e64ef6SHelge Deller 			"i" (SPINLOCK_BREAK_INSN));
17*15e64ef6SHelge Deller }
18*15e64ef6SHelge Deller 
arch_spin_is_locked(arch_spinlock_t * x)190199c4e6SThomas Gleixner static inline int arch_spin_is_locked(arch_spinlock_t *x)
20deae26bfSKyle McMartin {
21*15e64ef6SHelge Deller 	volatile unsigned int *a;
22*15e64ef6SHelge Deller 	int lock_val;
23*15e64ef6SHelge Deller 
24*15e64ef6SHelge Deller 	a = __ldcw_align(x);
25*15e64ef6SHelge Deller 	lock_val = READ_ONCE(*a);
26*15e64ef6SHelge Deller 	arch_spin_val_check(lock_val);
27*15e64ef6SHelge Deller 	return (lock_val == 0);
28deae26bfSKyle McMartin }
29deae26bfSKyle McMartin 
arch_spin_lock(arch_spinlock_t * x)30f173e3a7SJohn David Anglin static inline void arch_spin_lock(arch_spinlock_t *x)
31f173e3a7SJohn David Anglin {
32f173e3a7SJohn David Anglin 	volatile unsigned int *a;
33f173e3a7SJohn David Anglin 
34f173e3a7SJohn David Anglin 	a = __ldcw_align(x);
35*15e64ef6SHelge Deller 	do {
36*15e64ef6SHelge Deller 		int lock_val_old;
37*15e64ef6SHelge Deller 
38*15e64ef6SHelge Deller 		lock_val_old = __ldcw(a);
39*15e64ef6SHelge Deller 		arch_spin_val_check(lock_val_old);
40*15e64ef6SHelge Deller 		if (lock_val_old)
41*15e64ef6SHelge Deller 			return;	/* got lock */
42*15e64ef6SHelge Deller 
43*15e64ef6SHelge Deller 		/* wait until we should try to get lock again */
44f173e3a7SJohn David Anglin 		while (*a == 0)
45f173e3a7SJohn David Anglin 			continue;
46*15e64ef6SHelge Deller 	} while (1);
47f173e3a7SJohn David Anglin }
483d05b8aeSHelge Deller 
arch_spin_unlock(arch_spinlock_t * x)490199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *x)
50deae26bfSKyle McMartin {
51deae26bfSKyle McMartin 	volatile unsigned int *a;
523b885ac1SJohn David Anglin 
53deae26bfSKyle McMartin 	a = __ldcw_align(x);
54157e9afcSHelge Deller 	/* Release with ordered store. */
55*15e64ef6SHelge Deller 	__asm__ __volatile__("stw,ma %0,0(%1)"
56*15e64ef6SHelge Deller 		: : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
57deae26bfSKyle McMartin }
58deae26bfSKyle McMartin 
arch_spin_trylock(arch_spinlock_t * x)590199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *x)
60deae26bfSKyle McMartin {
61deae26bfSKyle McMartin 	volatile unsigned int *a;
62*15e64ef6SHelge Deller 	int lock_val;
63deae26bfSKyle McMartin 
64deae26bfSKyle McMartin 	a = __ldcw_align(x);
65*15e64ef6SHelge Deller 	lock_val = __ldcw(a);
66*15e64ef6SHelge Deller 	arch_spin_val_check(lock_val);
67*15e64ef6SHelge Deller 	return lock_val != 0;
68deae26bfSKyle McMartin }
69deae26bfSKyle McMartin 
70deae26bfSKyle McMartin /*
71deae26bfSKyle McMartin  * Read-write spinlocks, allowing multiple readers but only one writer.
72fbdc8f0fSHelge Deller  * Unfair locking as Writers could be starved indefinitely by Reader(s)
73deae26bfSKyle McMartin  *
74fbdc8f0fSHelge Deller  * The spinlock itself is contained in @counter and access to it is
75fbdc8f0fSHelge Deller  * serialized with @lock_mutex.
76deae26bfSKyle McMartin  */
77deae26bfSKyle McMartin 
78fbdc8f0fSHelge Deller /* 1 - lock taken successfully */
arch_read_trylock(arch_rwlock_t * rw)79fbdc8f0fSHelge Deller static inline int arch_read_trylock(arch_rwlock_t *rw)
80deae26bfSKyle McMartin {
81fbdc8f0fSHelge Deller 	int ret = 0;
82deae26bfSKyle McMartin 	unsigned long flags;
83deae26bfSKyle McMartin 
84deae26bfSKyle McMartin 	local_irq_save(flags);
85fbdc8f0fSHelge Deller 	arch_spin_lock(&(rw->lock_mutex));
86fbdc8f0fSHelge Deller 
87fbdc8f0fSHelge Deller 	/*
88fbdc8f0fSHelge Deller 	 * zero means writer holds the lock exclusively, deny Reader.
89fbdc8f0fSHelge Deller 	 * Otherwise grant lock to first/subseq reader
90fbdc8f0fSHelge Deller 	 */
91fbdc8f0fSHelge Deller 	if (rw->counter > 0) {
92deae26bfSKyle McMartin 		rw->counter--;
93fbdc8f0fSHelge Deller 		ret = 1;
94deae26bfSKyle McMartin 	}
95deae26bfSKyle McMartin 
96fbdc8f0fSHelge Deller 	arch_spin_unlock(&(rw->lock_mutex));
97fbdc8f0fSHelge Deller 	local_irq_restore(flags);
98fbdc8f0fSHelge Deller 
99fbdc8f0fSHelge Deller 	return ret;
100fbdc8f0fSHelge Deller }
101fbdc8f0fSHelge Deller 
102fbdc8f0fSHelge Deller /* 1 - lock taken successfully */
arch_write_trylock(arch_rwlock_t * rw)103fbdc8f0fSHelge Deller static inline int arch_write_trylock(arch_rwlock_t *rw)
104deae26bfSKyle McMartin {
105fbdc8f0fSHelge Deller 	int ret = 0;
106deae26bfSKyle McMartin 	unsigned long flags;
107fbdc8f0fSHelge Deller 
108deae26bfSKyle McMartin 	local_irq_save(flags);
109fbdc8f0fSHelge Deller 	arch_spin_lock(&(rw->lock_mutex));
110deae26bfSKyle McMartin 
111fbdc8f0fSHelge Deller 	/*
112fbdc8f0fSHelge Deller 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
113fbdc8f0fSHelge Deller 	 * deny writer. Otherwise if unlocked grant to writer
114fbdc8f0fSHelge Deller 	 * Hence the claim that Linux rwlocks are unfair to writers.
115fbdc8f0fSHelge Deller 	 * (can be starved for an indefinite time by readers).
116fbdc8f0fSHelge Deller 	 */
117fbdc8f0fSHelge Deller 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
118deae26bfSKyle McMartin 		rw->counter = 0;
119fbdc8f0fSHelge Deller 		ret = 1;
120deae26bfSKyle McMartin 	}
121fbdc8f0fSHelge Deller 	arch_spin_unlock(&(rw->lock_mutex));
122deae26bfSKyle McMartin 	local_irq_restore(flags);
123deae26bfSKyle McMartin 
124fbdc8f0fSHelge Deller 	return ret;
125fbdc8f0fSHelge Deller }
126fbdc8f0fSHelge Deller 
arch_read_lock(arch_rwlock_t * rw)127fbdc8f0fSHelge Deller static inline void arch_read_lock(arch_rwlock_t *rw)
128fbdc8f0fSHelge Deller {
129fbdc8f0fSHelge Deller 	while (!arch_read_trylock(rw))
130fbdc8f0fSHelge Deller 		cpu_relax();
131fbdc8f0fSHelge Deller }
132fbdc8f0fSHelge Deller 
arch_write_lock(arch_rwlock_t * rw)133fbdc8f0fSHelge Deller static inline void arch_write_lock(arch_rwlock_t *rw)
134fbdc8f0fSHelge Deller {
135fbdc8f0fSHelge Deller 	while (!arch_write_trylock(rw))
136fbdc8f0fSHelge Deller 		cpu_relax();
137fbdc8f0fSHelge Deller }
138fbdc8f0fSHelge Deller 
arch_read_unlock(arch_rwlock_t * rw)139fbdc8f0fSHelge Deller static inline void arch_read_unlock(arch_rwlock_t *rw)
140fbdc8f0fSHelge Deller {
141fbdc8f0fSHelge Deller 	unsigned long flags;
142fbdc8f0fSHelge Deller 
143fbdc8f0fSHelge Deller 	local_irq_save(flags);
144fbdc8f0fSHelge Deller 	arch_spin_lock(&(rw->lock_mutex));
145fbdc8f0fSHelge Deller 	rw->counter++;
146fbdc8f0fSHelge Deller 	arch_spin_unlock(&(rw->lock_mutex));
147fbdc8f0fSHelge Deller 	local_irq_restore(flags);
148fbdc8f0fSHelge Deller }
149fbdc8f0fSHelge Deller 
arch_write_unlock(arch_rwlock_t * rw)150fbdc8f0fSHelge Deller static inline void arch_write_unlock(arch_rwlock_t *rw)
151fbdc8f0fSHelge Deller {
152fbdc8f0fSHelge Deller 	unsigned long flags;
153fbdc8f0fSHelge Deller 
154fbdc8f0fSHelge Deller 	local_irq_save(flags);
155fbdc8f0fSHelge Deller 	arch_spin_lock(&(rw->lock_mutex));
156fbdc8f0fSHelge Deller 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
157fbdc8f0fSHelge Deller 	arch_spin_unlock(&(rw->lock_mutex));
158fbdc8f0fSHelge Deller 	local_irq_restore(flags);
159deae26bfSKyle McMartin }
160deae26bfSKyle McMartin 
161deae26bfSKyle McMartin #endif /* __ASM_SPINLOCK_H */
162