xref: /openbmc/linux/arch/ia64/include/asm/spinlock.h (revision b6dcefde)
1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
3 
4 /*
5  * Copyright (C) 1998-2003 Hewlett-Packard Co
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8  *
9  * This file is used for SMP configurations only.
10  */
11 
12 #include <linux/compiler.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 
16 #include <asm/atomic.h>
17 #include <asm/intrinsics.h>
18 #include <asm/system.h>
19 
20 #define arch_spin_lock_init(x)			((x)->lock = 0)
21 
22 /*
23  * Ticket locks are conceptually two parts, one indicating the current head of
24  * the queue, and the other indicating the current tail. The lock is acquired
25  * by atomically noting the tail and incrementing it by one (thus adding
26  * ourself to the queue and noting our position), then waiting until the head
27  * becomes equal to the the initial value of the tail.
28  * The pad bits in the middle are used to prevent the next_ticket number
29  * overflowing into the now_serving number.
30  *
31  *   31             17  16    15  14                    0
32  *  +----------------------------------------------------+
33  *  |  now_serving     | padding |   next_ticket         |
34  *  +----------------------------------------------------+
35  */
36 
37 #define TICKET_SHIFT	17
38 #define TICKET_BITS	15
39 #define	TICKET_MASK	((1 << TICKET_BITS) - 1)
40 
41 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
42 {
43 	int	*p = (int *)&lock->lock, ticket, serve;
44 
45 	ticket = ia64_fetchadd(1, p, acq);
46 
47 	if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
48 		return;
49 
50 	ia64_invala();
51 
52 	for (;;) {
53 		asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
54 
55 		if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
56 			return;
57 		cpu_relax();
58 	}
59 }
60 
61 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
62 {
63 	int tmp = ACCESS_ONCE(lock->lock);
64 
65 	if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
66 		return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
67 	return 0;
68 }
69 
70 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
71 {
72 	unsigned short	*p = (unsigned short *)&lock->lock + 1, tmp;
73 
74 	asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
75 	ACCESS_ONCE(*p) = (tmp + 2) & ~1;
76 }
77 
78 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
79 {
80 	int	*p = (int *)&lock->lock, ticket;
81 
82 	ia64_invala();
83 
84 	for (;;) {
85 		asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
86 		if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
87 			return;
88 		cpu_relax();
89 	}
90 }
91 
92 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
93 {
94 	long tmp = ACCESS_ONCE(lock->lock);
95 
96 	return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
97 }
98 
99 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
100 {
101 	long tmp = ACCESS_ONCE(lock->lock);
102 
103 	return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
104 }
105 
106 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
107 {
108 	return __ticket_spin_is_locked(lock);
109 }
110 
111 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
112 {
113 	return __ticket_spin_is_contended(lock);
114 }
115 #define arch_spin_is_contended	arch_spin_is_contended
116 
117 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
118 {
119 	__ticket_spin_lock(lock);
120 }
121 
122 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
123 {
124 	return __ticket_spin_trylock(lock);
125 }
126 
127 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
128 {
129 	__ticket_spin_unlock(lock);
130 }
131 
132 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
133 						  unsigned long flags)
134 {
135 	arch_spin_lock(lock);
136 }
137 
138 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
139 {
140 	__ticket_spin_unlock_wait(lock);
141 }
142 
143 #define arch_read_can_lock(rw)		(*(volatile int *)(rw) >= 0)
144 #define arch_write_can_lock(rw)	(*(volatile int *)(rw) == 0)
145 
146 #ifdef ASM_SUPPORTED
147 
148 static __always_inline void
149 arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
150 {
151 	__asm__ __volatile__ (
152 		"tbit.nz p6, p0 = %1,%2\n"
153 		"br.few 3f\n"
154 		"1:\n"
155 		"fetchadd4.rel r2 = [%0], -1;;\n"
156 		"(p6) ssm psr.i\n"
157 		"2:\n"
158 		"hint @pause\n"
159 		"ld4 r2 = [%0];;\n"
160 		"cmp4.lt p7,p0 = r2, r0\n"
161 		"(p7) br.cond.spnt.few 2b\n"
162 		"(p6) rsm psr.i\n"
163 		";;\n"
164 		"3:\n"
165 		"fetchadd4.acq r2 = [%0], 1;;\n"
166 		"cmp4.lt p7,p0 = r2, r0\n"
167 		"(p7) br.cond.spnt.few 1b\n"
168 		: : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
169 		: "p6", "p7", "r2", "memory");
170 }
171 
172 #define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
173 
174 #else /* !ASM_SUPPORTED */
175 
176 #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
177 
178 #define arch_read_lock(rw)								\
179 do {											\
180 	arch_rwlock_t *__read_lock_ptr = (rw);						\
181 											\
182 	while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {		\
183 		ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);			\
184 		while (*(volatile int *)__read_lock_ptr < 0)				\
185 			cpu_relax();							\
186 	}										\
187 } while (0)
188 
189 #endif /* !ASM_SUPPORTED */
190 
191 #define arch_read_unlock(rw)					\
192 do {								\
193 	arch_rwlock_t *__read_lock_ptr = (rw);			\
194 	ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);	\
195 } while (0)
196 
197 #ifdef ASM_SUPPORTED
198 
199 static __always_inline void
200 arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
201 {
202 	__asm__ __volatile__ (
203 		"tbit.nz p6, p0 = %1, %2\n"
204 		"mov ar.ccv = r0\n"
205 		"dep r29 = -1, r0, 31, 1\n"
206 		"br.few 3f;;\n"
207 		"1:\n"
208 		"(p6) ssm psr.i\n"
209 		"2:\n"
210 		"hint @pause\n"
211 		"ld4 r2 = [%0];;\n"
212 		"cmp4.eq p0,p7 = r0, r2\n"
213 		"(p7) br.cond.spnt.few 2b\n"
214 		"(p6) rsm psr.i\n"
215 		";;\n"
216 		"3:\n"
217 		"cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
218 		"cmp4.eq p0,p7 = r0, r2\n"
219 		"(p7) br.cond.spnt.few 1b;;\n"
220 		: : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
221 		: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
222 }
223 
224 #define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
225 
226 #define arch_write_trylock(rw)							\
227 ({										\
228 	register long result;							\
229 										\
230 	__asm__ __volatile__ (							\
231 		"mov ar.ccv = r0\n"						\
232 		"dep r29 = -1, r0, 31, 1;;\n"					\
233 		"cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"				\
234 		: "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");		\
235 	(result == 0);								\
236 })
237 
238 static inline void arch_write_unlock(arch_rwlock_t *x)
239 {
240 	u8 *y = (u8 *)x;
241 	barrier();
242 	asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
243 }
244 
245 #else /* !ASM_SUPPORTED */
246 
247 #define arch_write_lock_flags(l, flags) arch_write_lock(l)
248 
249 #define arch_write_lock(l)								\
250 ({											\
251 	__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);			\
252 	__u32 *ia64_write_lock_ptr = (__u32 *) (l);					\
253 	do {										\
254 		while (*ia64_write_lock_ptr)						\
255 			ia64_barrier();							\
256 		ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0);	\
257 	} while (ia64_val);								\
258 })
259 
260 #define arch_write_trylock(rw)						\
261 ({									\
262 	__u64 ia64_val;							\
263 	__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);			\
264 	ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);	\
265 	(ia64_val == 0);						\
266 })
267 
268 static inline void arch_write_unlock(arch_rwlock_t *x)
269 {
270 	barrier();
271 	x->write_lock = 0;
272 }
273 
274 #endif /* !ASM_SUPPORTED */
275 
276 static inline int arch_read_trylock(arch_rwlock_t *x)
277 {
278 	union {
279 		arch_rwlock_t lock;
280 		__u32 word;
281 	} old, new;
282 	old.lock = new.lock = *x;
283 	old.lock.write_lock = new.lock.write_lock = 0;
284 	++new.lock.read_counter;
285 	return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
286 }
287 
288 #define arch_spin_relax(lock)	cpu_relax()
289 #define arch_read_relax(lock)	cpu_relax()
290 #define arch_write_relax(lock)	cpu_relax()
291 
292 #endif /*  _ASM_IA64_SPINLOCK_H */
293