xref: /openbmc/linux/arch/parisc/include/asm/spinlock.h (revision 26721b02)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4 
5 #include <asm/barrier.h>
6 #include <asm/ldcw.h>
7 #include <asm/processor.h>
8 #include <asm/spinlock_types.h>
9 
10 static inline int arch_spin_is_locked(arch_spinlock_t *x)
11 {
12 	volatile unsigned int *a = __ldcw_align(x);
13 	return *a == 0;
14 }
15 
16 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
17 
18 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
19 					 unsigned long flags)
20 {
21 	volatile unsigned int *a;
22 
23 	a = __ldcw_align(x);
24 	while (__ldcw(a) == 0)
25 		while (*a == 0)
26 			if (flags & PSW_SM_I) {
27 				local_irq_enable();
28 				cpu_relax();
29 				local_irq_disable();
30 			} else
31 				cpu_relax();
32 }
33 #define arch_spin_lock_flags arch_spin_lock_flags
34 
35 static inline void arch_spin_unlock(arch_spinlock_t *x)
36 {
37 	volatile unsigned int *a;
38 
39 	a = __ldcw_align(x);
40 	/* Release with ordered store. */
41 	__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
42 }
43 
44 static inline int arch_spin_trylock(arch_spinlock_t *x)
45 {
46 	volatile unsigned int *a;
47 	int ret;
48 
49 	a = __ldcw_align(x);
50         ret = __ldcw(a) != 0;
51 
52 	return ret;
53 }
54 
55 /*
56  * Read-write spinlocks, allowing multiple readers but only one writer.
57  * Unfair locking as Writers could be starved indefinitely by Reader(s)
58  *
59  * The spinlock itself is contained in @counter and access to it is
60  * serialized with @lock_mutex.
61  */
62 
63 /* 1 - lock taken successfully */
64 static inline int arch_read_trylock(arch_rwlock_t *rw)
65 {
66 	int ret = 0;
67 	unsigned long flags;
68 
69 	local_irq_save(flags);
70 	arch_spin_lock(&(rw->lock_mutex));
71 
72 	/*
73 	 * zero means writer holds the lock exclusively, deny Reader.
74 	 * Otherwise grant lock to first/subseq reader
75 	 */
76 	if (rw->counter > 0) {
77 		rw->counter--;
78 		ret = 1;
79 	}
80 
81 	arch_spin_unlock(&(rw->lock_mutex));
82 	local_irq_restore(flags);
83 
84 	return ret;
85 }
86 
87 /* 1 - lock taken successfully */
88 static inline int arch_write_trylock(arch_rwlock_t *rw)
89 {
90 	int ret = 0;
91 	unsigned long flags;
92 
93 	local_irq_save(flags);
94 	arch_spin_lock(&(rw->lock_mutex));
95 
96 	/*
97 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
98 	 * deny writer. Otherwise if unlocked grant to writer
99 	 * Hence the claim that Linux rwlocks are unfair to writers.
100 	 * (can be starved for an indefinite time by readers).
101 	 */
102 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
103 		rw->counter = 0;
104 		ret = 1;
105 	}
106 	arch_spin_unlock(&(rw->lock_mutex));
107 	local_irq_restore(flags);
108 
109 	return ret;
110 }
111 
112 static inline void arch_read_lock(arch_rwlock_t *rw)
113 {
114 	while (!arch_read_trylock(rw))
115 		cpu_relax();
116 }
117 
118 static inline void arch_write_lock(arch_rwlock_t *rw)
119 {
120 	while (!arch_write_trylock(rw))
121 		cpu_relax();
122 }
123 
124 static inline void arch_read_unlock(arch_rwlock_t *rw)
125 {
126 	unsigned long flags;
127 
128 	local_irq_save(flags);
129 	arch_spin_lock(&(rw->lock_mutex));
130 	rw->counter++;
131 	arch_spin_unlock(&(rw->lock_mutex));
132 	local_irq_restore(flags);
133 }
134 
135 static inline void arch_write_unlock(arch_rwlock_t *rw)
136 {
137 	unsigned long flags;
138 
139 	local_irq_save(flags);
140 	arch_spin_lock(&(rw->lock_mutex));
141 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
142 	arch_spin_unlock(&(rw->lock_mutex));
143 	local_irq_restore(flags);
144 }
145 
146 #endif /* __ASM_SPINLOCK_H */
147