xref: /openbmc/linux/arch/parisc/include/asm/spinlock.h (revision 3d05b8ae)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2deae26bfSKyle McMartin #ifndef __ASM_SPINLOCK_H
3deae26bfSKyle McMartin #define __ASM_SPINLOCK_H
4deae26bfSKyle McMartin 
51cab4201SRolf Eike Beer #include <asm/barrier.h>
61cab4201SRolf Eike Beer #include <asm/ldcw.h>
7deae26bfSKyle McMartin #include <asm/processor.h>
8deae26bfSKyle McMartin #include <asm/spinlock_types.h>
9deae26bfSKyle McMartin 
100199c4e6SThomas Gleixner static inline int arch_spin_is_locked(arch_spinlock_t *x)
11deae26bfSKyle McMartin {
12deae26bfSKyle McMartin 	volatile unsigned int *a = __ldcw_align(x);
13deae26bfSKyle McMartin 	return *a == 0;
14deae26bfSKyle McMartin }
15deae26bfSKyle McMartin 
163d05b8aeSHelge Deller #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
173d05b8aeSHelge Deller 
183d05b8aeSHelge Deller static inline void arch_spin_lock_flags(arch_spinlock_t *x,
193d05b8aeSHelge Deller 					 unsigned long flags)
20deae26bfSKyle McMartin {
21deae26bfSKyle McMartin 	volatile unsigned int *a;
22deae26bfSKyle McMartin 
23deae26bfSKyle McMartin 	a = __ldcw_align(x);
24deae26bfSKyle McMartin 	while (__ldcw(a) == 0)
25deae26bfSKyle McMartin 		while (*a == 0)
263d05b8aeSHelge Deller 			if (flags & PSW_SM_I) {
273d05b8aeSHelge Deller 				local_irq_enable();
28deae26bfSKyle McMartin 				cpu_relax();
293d05b8aeSHelge Deller 				local_irq_disable();
303d05b8aeSHelge Deller 			} else
31deae26bfSKyle McMartin 				cpu_relax();
32deae26bfSKyle McMartin }
33a4c1887dSWill Deacon #define arch_spin_lock_flags arch_spin_lock_flags
34deae26bfSKyle McMartin 
350199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *x)
36deae26bfSKyle McMartin {
37deae26bfSKyle McMartin 	volatile unsigned int *a;
383b885ac1SJohn David Anglin 
39deae26bfSKyle McMartin 	a = __ldcw_align(x);
409e5c6021SJohn David Anglin #ifdef CONFIG_SMP
419e5c6021SJohn David Anglin 	(void) __ldcw(a);
429e5c6021SJohn David Anglin #else
4386d4d068SJohn David Anglin 	mb();
449e5c6021SJohn David Anglin #endif
4586d4d068SJohn David Anglin 	*a = 1;
46deae26bfSKyle McMartin }
47deae26bfSKyle McMartin 
480199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *x)
49deae26bfSKyle McMartin {
50deae26bfSKyle McMartin 	volatile unsigned int *a;
51deae26bfSKyle McMartin 	int ret;
52deae26bfSKyle McMartin 
53deae26bfSKyle McMartin 	a = __ldcw_align(x);
54deae26bfSKyle McMartin         ret = __ldcw(a) != 0;
55deae26bfSKyle McMartin 
56deae26bfSKyle McMartin 	return ret;
57deae26bfSKyle McMartin }
58deae26bfSKyle McMartin 
59deae26bfSKyle McMartin /*
60deae26bfSKyle McMartin  * Read-write spinlocks, allowing multiple readers but only one writer.
61fbdc8f0fSHelge Deller  * Unfair locking as Writers could be starved indefinitely by Reader(s)
62deae26bfSKyle McMartin  *
63fbdc8f0fSHelge Deller  * The spinlock itself is contained in @counter and access to it is
64fbdc8f0fSHelge Deller  * serialized with @lock_mutex.
65deae26bfSKyle McMartin  */
66deae26bfSKyle McMartin 
67fbdc8f0fSHelge Deller /* 1 - lock taken successfully */
68fbdc8f0fSHelge Deller static inline int arch_read_trylock(arch_rwlock_t *rw)
69deae26bfSKyle McMartin {
70fbdc8f0fSHelge Deller 	int ret = 0;
71deae26bfSKyle McMartin 	unsigned long flags;
72deae26bfSKyle McMartin 
73deae26bfSKyle McMartin 	local_irq_save(flags);
74fbdc8f0fSHelge Deller 	arch_spin_lock(&(rw->lock_mutex));
75fbdc8f0fSHelge Deller 
76fbdc8f0fSHelge Deller 	/*
77fbdc8f0fSHelge Deller 	 * zero means writer holds the lock exclusively, deny Reader.
78fbdc8f0fSHelge Deller 	 * Otherwise grant lock to first/subseq reader
79fbdc8f0fSHelge Deller 	 */
80fbdc8f0fSHelge Deller 	if (rw->counter > 0) {
81deae26bfSKyle McMartin 		rw->counter--;
82fbdc8f0fSHelge Deller 		ret = 1;
83deae26bfSKyle McMartin 	}
84deae26bfSKyle McMartin 
85fbdc8f0fSHelge Deller 	arch_spin_unlock(&(rw->lock_mutex));
86fbdc8f0fSHelge Deller 	local_irq_restore(flags);
87fbdc8f0fSHelge Deller 
88fbdc8f0fSHelge Deller 	return ret;
89fbdc8f0fSHelge Deller }
90fbdc8f0fSHelge Deller 
91fbdc8f0fSHelge Deller /* 1 - lock taken successfully */
92fbdc8f0fSHelge Deller static inline int arch_write_trylock(arch_rwlock_t *rw)
93deae26bfSKyle McMartin {
94fbdc8f0fSHelge Deller 	int ret = 0;
95deae26bfSKyle McMartin 	unsigned long flags;
96fbdc8f0fSHelge Deller 
97deae26bfSKyle McMartin 	local_irq_save(flags);
98fbdc8f0fSHelge Deller 	arch_spin_lock(&(rw->lock_mutex));
99deae26bfSKyle McMartin 
100fbdc8f0fSHelge Deller 	/*
101fbdc8f0fSHelge Deller 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
102fbdc8f0fSHelge Deller 	 * deny writer. Otherwise if unlocked grant to writer
103fbdc8f0fSHelge Deller 	 * Hence the claim that Linux rwlocks are unfair to writers.
104fbdc8f0fSHelge Deller 	 * (can be starved for an indefinite time by readers).
105fbdc8f0fSHelge Deller 	 */
106fbdc8f0fSHelge Deller 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
107deae26bfSKyle McMartin 		rw->counter = 0;
108fbdc8f0fSHelge Deller 		ret = 1;
109deae26bfSKyle McMartin 	}
110fbdc8f0fSHelge Deller 	arch_spin_unlock(&(rw->lock_mutex));
111deae26bfSKyle McMartin 	local_irq_restore(flags);
112deae26bfSKyle McMartin 
113fbdc8f0fSHelge Deller 	return ret;
114fbdc8f0fSHelge Deller }
115fbdc8f0fSHelge Deller 
116fbdc8f0fSHelge Deller static inline void arch_read_lock(arch_rwlock_t *rw)
117fbdc8f0fSHelge Deller {
118fbdc8f0fSHelge Deller 	while (!arch_read_trylock(rw))
119fbdc8f0fSHelge Deller 		cpu_relax();
120fbdc8f0fSHelge Deller }
121fbdc8f0fSHelge Deller 
122fbdc8f0fSHelge Deller static inline void arch_write_lock(arch_rwlock_t *rw)
123fbdc8f0fSHelge Deller {
124fbdc8f0fSHelge Deller 	while (!arch_write_trylock(rw))
125fbdc8f0fSHelge Deller 		cpu_relax();
126fbdc8f0fSHelge Deller }
127fbdc8f0fSHelge Deller 
128fbdc8f0fSHelge Deller static inline void arch_read_unlock(arch_rwlock_t *rw)
129fbdc8f0fSHelge Deller {
130fbdc8f0fSHelge Deller 	unsigned long flags;
131fbdc8f0fSHelge Deller 
132fbdc8f0fSHelge Deller 	local_irq_save(flags);
133fbdc8f0fSHelge Deller 	arch_spin_lock(&(rw->lock_mutex));
134fbdc8f0fSHelge Deller 	rw->counter++;
135fbdc8f0fSHelge Deller 	arch_spin_unlock(&(rw->lock_mutex));
136fbdc8f0fSHelge Deller 	local_irq_restore(flags);
137fbdc8f0fSHelge Deller }
138fbdc8f0fSHelge Deller 
139fbdc8f0fSHelge Deller static inline void arch_write_unlock(arch_rwlock_t *rw)
140fbdc8f0fSHelge Deller {
141fbdc8f0fSHelge Deller 	unsigned long flags;
142fbdc8f0fSHelge Deller 
143fbdc8f0fSHelge Deller 	local_irq_save(flags);
144fbdc8f0fSHelge Deller 	arch_spin_lock(&(rw->lock_mutex));
145fbdc8f0fSHelge Deller 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
146fbdc8f0fSHelge Deller 	arch_spin_unlock(&(rw->lock_mutex));
147fbdc8f0fSHelge Deller 	local_irq_restore(flags);
148deae26bfSKyle McMartin }
149deae26bfSKyle McMartin 
150deae26bfSKyle McMartin #endif /* __ASM_SPINLOCK_H */
151