xref: /openbmc/linux/arch/parisc/include/asm/spinlock.h (revision d0676871)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4 
5 #include <asm/barrier.h>
6 #include <asm/ldcw.h>
7 #include <asm/processor.h>
8 #include <asm/spinlock_types.h>
9 
10 static inline int arch_spin_is_locked(arch_spinlock_t *x)
11 {
12 	volatile unsigned int *a = __ldcw_align(x);
13 	smp_mb();
14 	return *a == 0;
15 }
16 
17 static inline void arch_spin_lock(arch_spinlock_t *x)
18 {
19 	volatile unsigned int *a;
20 
21 	a = __ldcw_align(x);
22 	while (__ldcw(a) == 0)
23 		while (*a == 0)
24 			cpu_relax();
25 }
26 
27 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
28 					 unsigned long flags)
29 {
30 	volatile unsigned int *a;
31 	unsigned long flags_dis;
32 
33 	a = __ldcw_align(x);
34 	while (__ldcw(a) == 0) {
35 		local_save_flags(flags_dis);
36 		local_irq_restore(flags);
37 		while (*a == 0)
38 			cpu_relax();
39 		local_irq_restore(flags_dis);
40 	}
41 }
42 #define arch_spin_lock_flags arch_spin_lock_flags
43 
44 static inline void arch_spin_unlock(arch_spinlock_t *x)
45 {
46 	volatile unsigned int *a;
47 
48 	a = __ldcw_align(x);
49 #ifdef CONFIG_SMP
50 	(void) __ldcw(a);
51 #else
52 	mb();
53 #endif
54 	*a = 1;
55 }
56 
57 static inline int arch_spin_trylock(arch_spinlock_t *x)
58 {
59 	volatile unsigned int *a;
60 	int ret;
61 
62 	a = __ldcw_align(x);
63         ret = __ldcw(a) != 0;
64 
65 	return ret;
66 }
67 
68 /*
69  * Read-write spinlocks, allowing multiple readers but only one writer.
70  * Unfair locking as Writers could be starved indefinitely by Reader(s)
71  *
72  * The spinlock itself is contained in @counter and access to it is
73  * serialized with @lock_mutex.
74  */
75 
76 /* 1 - lock taken successfully */
77 static inline int arch_read_trylock(arch_rwlock_t *rw)
78 {
79 	int ret = 0;
80 	unsigned long flags;
81 
82 	local_irq_save(flags);
83 	arch_spin_lock(&(rw->lock_mutex));
84 
85 	/*
86 	 * zero means writer holds the lock exclusively, deny Reader.
87 	 * Otherwise grant lock to first/subseq reader
88 	 */
89 	if (rw->counter > 0) {
90 		rw->counter--;
91 		ret = 1;
92 	}
93 
94 	arch_spin_unlock(&(rw->lock_mutex));
95 	local_irq_restore(flags);
96 
97 	return ret;
98 }
99 
100 /* 1 - lock taken successfully */
101 static inline int arch_write_trylock(arch_rwlock_t *rw)
102 {
103 	int ret = 0;
104 	unsigned long flags;
105 
106 	local_irq_save(flags);
107 	arch_spin_lock(&(rw->lock_mutex));
108 
109 	/*
110 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
111 	 * deny writer. Otherwise if unlocked grant to writer
112 	 * Hence the claim that Linux rwlocks are unfair to writers.
113 	 * (can be starved for an indefinite time by readers).
114 	 */
115 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
116 		rw->counter = 0;
117 		ret = 1;
118 	}
119 	arch_spin_unlock(&(rw->lock_mutex));
120 	local_irq_restore(flags);
121 
122 	return ret;
123 }
124 
125 static inline void arch_read_lock(arch_rwlock_t *rw)
126 {
127 	while (!arch_read_trylock(rw))
128 		cpu_relax();
129 }
130 
131 static inline void arch_write_lock(arch_rwlock_t *rw)
132 {
133 	while (!arch_write_trylock(rw))
134 		cpu_relax();
135 }
136 
137 static inline void arch_read_unlock(arch_rwlock_t *rw)
138 {
139 	unsigned long flags;
140 
141 	local_irq_save(flags);
142 	arch_spin_lock(&(rw->lock_mutex));
143 	rw->counter++;
144 	arch_spin_unlock(&(rw->lock_mutex));
145 	local_irq_restore(flags);
146 }
147 
148 static inline void arch_write_unlock(arch_rwlock_t *rw)
149 {
150 	unsigned long flags;
151 
152 	local_irq_save(flags);
153 	arch_spin_lock(&(rw->lock_mutex));
154 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
155 	arch_spin_unlock(&(rw->lock_mutex));
156 	local_irq_restore(flags);
157 }
158 
159 #endif /* __ASM_SPINLOCK_H */
160