xref: /openbmc/linux/arch/ia64/include/asm/spinlock.h (revision b627b4ed)
1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
3 
4 /*
5  * Copyright (C) 1998-2003 Hewlett-Packard Co
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8  *
9  * This file is used for SMP configurations only.
10  */
11 
12 #include <linux/compiler.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 
16 #include <asm/atomic.h>
17 #include <asm/intrinsics.h>
18 #include <asm/system.h>
19 
20 #define __raw_spin_lock_init(x)			((x)->lock = 0)
21 
22 #ifdef ASM_SUPPORTED
23 /*
24  * Try to get the lock.  If we fail to get the lock, make a non-standard call to
25  * ia64_spinlock_contention().  We do not use a normal call because that would force all
26  * callers of __raw_spin_lock() to be non-leaf routines.  Instead, ia64_spinlock_contention() is
27  * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered".
28  */
29 
30 #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
31 
32 static inline void
33 __raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
34 {
35 	register volatile unsigned int *ptr asm ("r31") = &lock->lock;
36 
37 #if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
38 # ifdef CONFIG_ITANIUM
39 	/* don't use brl on Itanium... */
40 	asm volatile ("{\n\t"
41 		      "  mov ar.ccv = r0\n\t"
42 		      "  mov r28 = ip\n\t"
43 		      "  mov r30 = 1;;\n\t"
44 		      "}\n\t"
45 		      "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
46 		      "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
47 		      "cmp4.ne p14, p0 = r30, r0\n\t"
48 		      "mov b6 = r29;;\n\t"
49 		      "mov r27=%2\n\t"
50 		      "(p14) br.cond.spnt.many b6"
51 		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
52 # else
53 	asm volatile ("{\n\t"
54 		      "  mov ar.ccv = r0\n\t"
55 		      "  mov r28 = ip\n\t"
56 		      "  mov r30 = 1;;\n\t"
57 		      "}\n\t"
58 		      "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
59 		      "cmp4.ne p14, p0 = r30, r0\n\t"
60 		      "mov r27=%2\n\t"
61 		      "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
62 		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
63 # endif /* CONFIG_MCKINLEY */
64 #else
65 # ifdef CONFIG_ITANIUM
66 	/* don't use brl on Itanium... */
67 	/* mis-declare, so we get the entry-point, not it's function descriptor: */
68 	asm volatile ("mov r30 = 1\n\t"
69 		      "mov r27=%2\n\t"
70 		      "mov ar.ccv = r0;;\n\t"
71 		      "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
72 		      "movl r29 = ia64_spinlock_contention;;\n\t"
73 		      "cmp4.ne p14, p0 = r30, r0\n\t"
74 		      "mov b6 = r29;;\n\t"
75 		      "(p14) br.call.spnt.many b6 = b6"
76 		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
77 # else
78 	asm volatile ("mov r30 = 1\n\t"
79 		      "mov r27=%2\n\t"
80 		      "mov ar.ccv = r0;;\n\t"
81 		      "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
82 		      "cmp4.ne p14, p0 = r30, r0\n\t"
83 		      "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
84 		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
85 # endif /* CONFIG_MCKINLEY */
86 #endif
87 }
88 
89 #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
90 
91 /* Unlock by doing an ordered store and releasing the cacheline with nta */
92 static inline void __raw_spin_unlock(raw_spinlock_t *x) {
93 	barrier();
94 	asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
95 }
96 
97 #else /* !ASM_SUPPORTED */
98 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
99 # define __raw_spin_lock(x)								\
100 do {											\
101 	__u32 *ia64_spinlock_ptr = (__u32 *) (x);					\
102 	__u64 ia64_spinlock_val;							\
103 	ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);			\
104 	if (unlikely(ia64_spinlock_val)) {						\
105 		do {									\
106 			while (*ia64_spinlock_ptr)					\
107 				ia64_barrier();						\
108 			ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);	\
109 		} while (ia64_spinlock_val);						\
110 	}										\
111 } while (0)
112 #define __raw_spin_unlock(x)	do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0)
113 #endif /* !ASM_SUPPORTED */
114 
115 #define __raw_spin_is_locked(x)		((x)->lock != 0)
116 #define __raw_spin_trylock(x)		(cmpxchg_acq(&(x)->lock, 0, 1) == 0)
117 #define __raw_spin_unlock_wait(lock) \
118 	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
119 
120 #define __raw_read_can_lock(rw)		(*(volatile int *)(rw) >= 0)
121 #define __raw_write_can_lock(rw)	(*(volatile int *)(rw) == 0)
122 
123 #ifdef ASM_SUPPORTED
124 
125 static __always_inline void
126 __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
127 {
128 	__asm__ __volatile__ (
129 		"tbit.nz p6, p0 = %1,%2\n"
130 		"br.few 3f\n"
131 		"1:\n"
132 		"fetchadd4.rel r2 = [%0], -1;;\n"
133 		"(p6) ssm psr.i\n"
134 		"2:\n"
135 		"hint @pause\n"
136 		"ld4 r2 = [%0];;\n"
137 		"cmp4.lt p7,p0 = r2, r0\n"
138 		"(p7) br.cond.spnt.few 2b\n"
139 		"(p6) rsm psr.i\n"
140 		";;\n"
141 		"3:\n"
142 		"fetchadd4.acq r2 = [%0], 1;;\n"
143 		"cmp4.lt p7,p0 = r2, r0\n"
144 		"(p7) br.cond.spnt.few 1b\n"
145 		: : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
146 		: "p6", "p7", "r2", "memory");
147 }
148 
149 #define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
150 
151 #else /* !ASM_SUPPORTED */
152 
153 #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
154 
155 #define __raw_read_lock(rw)								\
156 do {											\
157 	raw_rwlock_t *__read_lock_ptr = (rw);						\
158 											\
159 	while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {		\
160 		ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);			\
161 		while (*(volatile int *)__read_lock_ptr < 0)				\
162 			cpu_relax();							\
163 	}										\
164 } while (0)
165 
166 #endif /* !ASM_SUPPORTED */
167 
168 #define __raw_read_unlock(rw)					\
169 do {								\
170 	raw_rwlock_t *__read_lock_ptr = (rw);			\
171 	ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);	\
172 } while (0)
173 
174 #ifdef ASM_SUPPORTED
175 
176 static __always_inline void
177 __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
178 {
179 	__asm__ __volatile__ (
180 		"tbit.nz p6, p0 = %1, %2\n"
181 		"mov ar.ccv = r0\n"
182 		"dep r29 = -1, r0, 31, 1\n"
183 		"br.few 3f;;\n"
184 		"1:\n"
185 		"(p6) ssm psr.i\n"
186 		"2:\n"
187 		"hint @pause\n"
188 		"ld4 r2 = [%0];;\n"
189 		"cmp4.eq p0,p7 = r0, r2\n"
190 		"(p7) br.cond.spnt.few 2b\n"
191 		"(p6) rsm psr.i\n"
192 		";;\n"
193 		"3:\n"
194 		"cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
195 		"cmp4.eq p0,p7 = r0, r2\n"
196 		"(p7) br.cond.spnt.few 1b;;\n"
197 		: : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
198 		: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
199 }
200 
201 #define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
202 
203 #define __raw_write_trylock(rw)							\
204 ({										\
205 	register long result;							\
206 										\
207 	__asm__ __volatile__ (							\
208 		"mov ar.ccv = r0\n"						\
209 		"dep r29 = -1, r0, 31, 1;;\n"					\
210 		"cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"				\
211 		: "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");		\
212 	(result == 0);								\
213 })
214 
215 static inline void __raw_write_unlock(raw_rwlock_t *x)
216 {
217 	u8 *y = (u8 *)x;
218 	barrier();
219 	asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
220 }
221 
222 #else /* !ASM_SUPPORTED */
223 
224 #define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
225 
226 #define __raw_write_lock(l)								\
227 ({											\
228 	__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);			\
229 	__u32 *ia64_write_lock_ptr = (__u32 *) (l);					\
230 	do {										\
231 		while (*ia64_write_lock_ptr)						\
232 			ia64_barrier();							\
233 		ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0);	\
234 	} while (ia64_val);								\
235 })
236 
237 #define __raw_write_trylock(rw)						\
238 ({									\
239 	__u64 ia64_val;							\
240 	__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);			\
241 	ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);	\
242 	(ia64_val == 0);						\
243 })
244 
245 static inline void __raw_write_unlock(raw_rwlock_t *x)
246 {
247 	barrier();
248 	x->write_lock = 0;
249 }
250 
251 #endif /* !ASM_SUPPORTED */
252 
253 static inline int __raw_read_trylock(raw_rwlock_t *x)
254 {
255 	union {
256 		raw_rwlock_t lock;
257 		__u32 word;
258 	} old, new;
259 	old.lock = new.lock = *x;
260 	old.lock.write_lock = new.lock.write_lock = 0;
261 	++new.lock.read_counter;
262 	return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
263 }
264 
265 #define _raw_spin_relax(lock)	cpu_relax()
266 #define _raw_read_relax(lock)	cpu_relax()
267 #define _raw_write_relax(lock)	cpu_relax()
268 
269 #endif /*  _ASM_IA64_SPINLOCK_H */
270