xref: /openbmc/linux/arch/parisc/include/asm/spinlock.h (revision 09bae3b6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4 
5 #include <asm/barrier.h>
6 #include <asm/ldcw.h>
7 #include <asm/processor.h>
8 #include <asm/spinlock_types.h>
9 
10 static inline int arch_spin_is_locked(arch_spinlock_t *x)
11 {
12 	volatile unsigned int *a = __ldcw_align(x);
13 	return *a == 0;
14 }
15 
16 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
17 
18 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
19 					 unsigned long flags)
20 {
21 	volatile unsigned int *a;
22 
23 	a = __ldcw_align(x);
24 	while (__ldcw(a) == 0)
25 		while (*a == 0)
26 			if (flags & PSW_SM_I) {
27 				local_irq_enable();
28 				cpu_relax();
29 				local_irq_disable();
30 			} else
31 				cpu_relax();
32 }
33 #define arch_spin_lock_flags arch_spin_lock_flags
34 
35 static inline void arch_spin_unlock(arch_spinlock_t *x)
36 {
37 	volatile unsigned int *a;
38 
39 	a = __ldcw_align(x);
40 	mb();
41 	*a = 1;
42 }
43 
44 static inline int arch_spin_trylock(arch_spinlock_t *x)
45 {
46 	volatile unsigned int *a;
47 	int ret;
48 
49 	a = __ldcw_align(x);
50         ret = __ldcw(a) != 0;
51 
52 	return ret;
53 }
54 
55 /*
56  * Read-write spinlocks, allowing multiple readers but only one writer.
57  * Linux rwlocks are unfair to writers; they can be starved for an indefinite
58  * time by readers.  With care, they can also be taken in interrupt context.
59  *
60  * In the PA-RISC implementation, we have a spinlock and a counter.
61  * Readers use the lock to serialise their access to the counter (which
62  * records how many readers currently hold the lock).
63  * Writers hold the spinlock, preventing any readers or other writers from
64  * grabbing the rwlock.
65  */
66 
67 /* Note that we have to ensure interrupts are disabled in case we're
68  * interrupted by some other code that wants to grab the same read lock */
69 static  __inline__ void arch_read_lock(arch_rwlock_t *rw)
70 {
71 	unsigned long flags;
72 	local_irq_save(flags);
73 	arch_spin_lock_flags(&rw->lock, flags);
74 	rw->counter++;
75 	arch_spin_unlock(&rw->lock);
76 	local_irq_restore(flags);
77 }
78 
79 /* Note that we have to ensure interrupts are disabled in case we're
80  * interrupted by some other code that wants to grab the same read lock */
81 static  __inline__ void arch_read_unlock(arch_rwlock_t *rw)
82 {
83 	unsigned long flags;
84 	local_irq_save(flags);
85 	arch_spin_lock_flags(&rw->lock, flags);
86 	rw->counter--;
87 	arch_spin_unlock(&rw->lock);
88 	local_irq_restore(flags);
89 }
90 
91 /* Note that we have to ensure interrupts are disabled in case we're
92  * interrupted by some other code that wants to grab the same read lock */
93 static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
94 {
95 	unsigned long flags;
96  retry:
97 	local_irq_save(flags);
98 	if (arch_spin_trylock(&rw->lock)) {
99 		rw->counter++;
100 		arch_spin_unlock(&rw->lock);
101 		local_irq_restore(flags);
102 		return 1;
103 	}
104 
105 	local_irq_restore(flags);
106 	/* If write-locked, we fail to acquire the lock */
107 	if (rw->counter < 0)
108 		return 0;
109 
110 	/* Wait until we have a realistic chance at the lock */
111 	while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
112 		cpu_relax();
113 
114 	goto retry;
115 }
116 
117 /* Note that we have to ensure interrupts are disabled in case we're
118  * interrupted by some other code that wants to read_trylock() this lock */
119 static __inline__ void arch_write_lock(arch_rwlock_t *rw)
120 {
121 	unsigned long flags;
122 retry:
123 	local_irq_save(flags);
124 	arch_spin_lock_flags(&rw->lock, flags);
125 
126 	if (rw->counter != 0) {
127 		arch_spin_unlock(&rw->lock);
128 		local_irq_restore(flags);
129 
130 		while (rw->counter != 0)
131 			cpu_relax();
132 
133 		goto retry;
134 	}
135 
136 	rw->counter = -1; /* mark as write-locked */
137 	mb();
138 	local_irq_restore(flags);
139 }
140 
141 static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
142 {
143 	rw->counter = 0;
144 	arch_spin_unlock(&rw->lock);
145 }
146 
147 /* Note that we have to ensure interrupts are disabled in case we're
148  * interrupted by some other code that wants to read_trylock() this lock */
149 static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
150 {
151 	unsigned long flags;
152 	int result = 0;
153 
154 	local_irq_save(flags);
155 	if (arch_spin_trylock(&rw->lock)) {
156 		if (rw->counter == 0) {
157 			rw->counter = -1;
158 			result = 1;
159 		} else {
160 			/* Read-locked.  Oh well. */
161 			arch_spin_unlock(&rw->lock);
162 		}
163 	}
164 	local_irq_restore(flags);
165 
166 	return result;
167 }
168 
169 #endif /* __ASM_SPINLOCK_H */
170