xref: /openbmc/linux/arch/ia64/include/asm/spinlock.h (revision 275876e2)
1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
3 
4 /*
5  * Copyright (C) 1998-2003 Hewlett-Packard Co
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8  *
9  * This file is used for SMP configurations only.
10  */
11 
12 #include <linux/compiler.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 
16 #include <linux/atomic.h>
17 #include <asm/intrinsics.h>
18 
19 #define arch_spin_lock_init(x)			((x)->lock = 0)
20 
21 /*
22  * Ticket locks are conceptually two parts, one indicating the current head of
23  * the queue, and the other indicating the current tail. The lock is acquired
24  * by atomically noting the tail and incrementing it by one (thus adding
25  * ourself to the queue and noting our position), then waiting until the head
26  * becomes equal to the the initial value of the tail.
27  * The pad bits in the middle are used to prevent the next_ticket number
28  * overflowing into the now_serving number.
29  *
30  *   31             17  16    15  14                    0
31  *  +----------------------------------------------------+
32  *  |  now_serving     | padding |   next_ticket         |
33  *  +----------------------------------------------------+
34  */
35 
36 #define TICKET_SHIFT	17
37 #define TICKET_BITS	15
38 #define	TICKET_MASK	((1 << TICKET_BITS) - 1)
39 
40 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
41 {
42 	int	*p = (int *)&lock->lock, ticket, serve;
43 
44 	ticket = ia64_fetchadd(1, p, acq);
45 
46 	if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
47 		return;
48 
49 	ia64_invala();
50 
51 	for (;;) {
52 		asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
53 
54 		if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
55 			return;
56 		cpu_relax();
57 	}
58 }
59 
60 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
61 {
62 	int tmp = ACCESS_ONCE(lock->lock);
63 
64 	if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
65 		return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
66 	return 0;
67 }
68 
69 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
70 {
71 	unsigned short	*p = (unsigned short *)&lock->lock + 1, tmp;
72 
73 	asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
74 	ACCESS_ONCE(*p) = (tmp + 2) & ~1;
75 }
76 
77 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
78 {
79 	int	*p = (int *)&lock->lock, ticket;
80 
81 	ia64_invala();
82 
83 	for (;;) {
84 		asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
85 		if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
86 			return;
87 		cpu_relax();
88 	}
89 }
90 
91 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
92 {
93 	long tmp = ACCESS_ONCE(lock->lock);
94 
95 	return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
96 }
97 
98 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
99 {
100 	long tmp = ACCESS_ONCE(lock->lock);
101 
102 	return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
103 }
104 
105 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
106 {
107 	return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK);
108 }
109 
110 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
111 {
112 	return __ticket_spin_is_locked(lock);
113 }
114 
115 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
116 {
117 	return __ticket_spin_is_contended(lock);
118 }
119 #define arch_spin_is_contended	arch_spin_is_contended
120 
121 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
122 {
123 	__ticket_spin_lock(lock);
124 }
125 
126 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
127 {
128 	return __ticket_spin_trylock(lock);
129 }
130 
131 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
132 {
133 	__ticket_spin_unlock(lock);
134 }
135 
136 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
137 						  unsigned long flags)
138 {
139 	arch_spin_lock(lock);
140 }
141 
142 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
143 {
144 	__ticket_spin_unlock_wait(lock);
145 }
146 
147 #define arch_read_can_lock(rw)		(*(volatile int *)(rw) >= 0)
148 #define arch_write_can_lock(rw)	(*(volatile int *)(rw) == 0)
149 
150 #ifdef ASM_SUPPORTED
151 
152 static __always_inline void
153 arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
154 {
155 	__asm__ __volatile__ (
156 		"tbit.nz p6, p0 = %1,%2\n"
157 		"br.few 3f\n"
158 		"1:\n"
159 		"fetchadd4.rel r2 = [%0], -1;;\n"
160 		"(p6) ssm psr.i\n"
161 		"2:\n"
162 		"hint @pause\n"
163 		"ld4 r2 = [%0];;\n"
164 		"cmp4.lt p7,p0 = r2, r0\n"
165 		"(p7) br.cond.spnt.few 2b\n"
166 		"(p6) rsm psr.i\n"
167 		";;\n"
168 		"3:\n"
169 		"fetchadd4.acq r2 = [%0], 1;;\n"
170 		"cmp4.lt p7,p0 = r2, r0\n"
171 		"(p7) br.cond.spnt.few 1b\n"
172 		: : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
173 		: "p6", "p7", "r2", "memory");
174 }
175 
176 #define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
177 
178 #else /* !ASM_SUPPORTED */
179 
180 #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
181 
182 #define arch_read_lock(rw)								\
183 do {											\
184 	arch_rwlock_t *__read_lock_ptr = (rw);						\
185 											\
186 	while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {		\
187 		ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);			\
188 		while (*(volatile int *)__read_lock_ptr < 0)				\
189 			cpu_relax();							\
190 	}										\
191 } while (0)
192 
193 #endif /* !ASM_SUPPORTED */
194 
195 #define arch_read_unlock(rw)					\
196 do {								\
197 	arch_rwlock_t *__read_lock_ptr = (rw);			\
198 	ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);	\
199 } while (0)
200 
201 #ifdef ASM_SUPPORTED
202 
203 static __always_inline void
204 arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
205 {
206 	__asm__ __volatile__ (
207 		"tbit.nz p6, p0 = %1, %2\n"
208 		"mov ar.ccv = r0\n"
209 		"dep r29 = -1, r0, 31, 1\n"
210 		"br.few 3f;;\n"
211 		"1:\n"
212 		"(p6) ssm psr.i\n"
213 		"2:\n"
214 		"hint @pause\n"
215 		"ld4 r2 = [%0];;\n"
216 		"cmp4.eq p0,p7 = r0, r2\n"
217 		"(p7) br.cond.spnt.few 2b\n"
218 		"(p6) rsm psr.i\n"
219 		";;\n"
220 		"3:\n"
221 		"cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
222 		"cmp4.eq p0,p7 = r0, r2\n"
223 		"(p7) br.cond.spnt.few 1b;;\n"
224 		: : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
225 		: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
226 }
227 
228 #define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
229 
230 #define arch_write_trylock(rw)							\
231 ({										\
232 	register long result;							\
233 										\
234 	__asm__ __volatile__ (							\
235 		"mov ar.ccv = r0\n"						\
236 		"dep r29 = -1, r0, 31, 1;;\n"					\
237 		"cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"				\
238 		: "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");		\
239 	(result == 0);								\
240 })
241 
242 static inline void arch_write_unlock(arch_rwlock_t *x)
243 {
244 	u8 *y = (u8 *)x;
245 	barrier();
246 	asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
247 }
248 
249 #else /* !ASM_SUPPORTED */
250 
251 #define arch_write_lock_flags(l, flags) arch_write_lock(l)
252 
253 #define arch_write_lock(l)								\
254 ({											\
255 	__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);			\
256 	__u32 *ia64_write_lock_ptr = (__u32 *) (l);					\
257 	do {										\
258 		while (*ia64_write_lock_ptr)						\
259 			ia64_barrier();							\
260 		ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0);	\
261 	} while (ia64_val);								\
262 })
263 
264 #define arch_write_trylock(rw)						\
265 ({									\
266 	__u64 ia64_val;							\
267 	__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);			\
268 	ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);	\
269 	(ia64_val == 0);						\
270 })
271 
272 static inline void arch_write_unlock(arch_rwlock_t *x)
273 {
274 	barrier();
275 	x->write_lock = 0;
276 }
277 
278 #endif /* !ASM_SUPPORTED */
279 
280 static inline int arch_read_trylock(arch_rwlock_t *x)
281 {
282 	union {
283 		arch_rwlock_t lock;
284 		__u32 word;
285 	} old, new;
286 	old.lock = new.lock = *x;
287 	old.lock.write_lock = new.lock.write_lock = 0;
288 	++new.lock.read_counter;
289 	return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
290 }
291 
292 #define arch_spin_relax(lock)	cpu_relax()
293 #define arch_read_relax(lock)	cpu_relax()
294 #define arch_write_relax(lock)	cpu_relax()
295 
296 #endif /*  _ASM_IA64_SPINLOCK_H */
297