1 /* spinlock.h: 64-bit Sparc spinlock support.
2  *
3  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4  */
5 
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
8 
9 #ifndef __ASSEMBLY__
10 
11 /* To get debugging spinlocks which detect and catch
12  * deadlock situations, set CONFIG_DEBUG_SPINLOCK
13  * and rebuild your kernel.
14  */
15 
16 /* Because we play games to save cycles in the non-contention case, we
17  * need to be extra careful about branch targets into the "spinning"
18  * code.  They live in their own section, but the newer V9 branches
19  * have a shorter range than the traditional 32-bit sparc branch
20  * variants.  The rule is that the branches that go into and out of
21  * the spinner sections must be pre-V9 branches.
22  */
23 
24 #define arch_spin_is_locked(lp)	((lp)->lock != 0)
25 
26 #define arch_spin_unlock_wait(lp)	\
27 	do {	rmb();			\
28 	} while((lp)->lock)
29 
30 static inline void arch_spin_lock(arch_spinlock_t *lock)
31 {
32 	unsigned long tmp;
33 
34 	__asm__ __volatile__(
35 "1:	ldstub		[%1], %0\n"
36 "	brnz,pn		%0, 2f\n"
37 "	 nop\n"
38 "	.subsection	2\n"
39 "2:	ldub		[%1], %0\n"
40 "	brnz,pt		%0, 2b\n"
41 "	 nop\n"
42 "	ba,a,pt		%%xcc, 1b\n"
43 "	.previous"
44 	: "=&r" (tmp)
45 	: "r" (lock)
46 	: "memory");
47 }
48 
49 static inline int arch_spin_trylock(arch_spinlock_t *lock)
50 {
51 	unsigned long result;
52 
53 	__asm__ __volatile__(
54 "	ldstub		[%1], %0\n"
55 	: "=r" (result)
56 	: "r" (lock)
57 	: "memory");
58 
59 	return (result == 0UL);
60 }
61 
62 static inline void arch_spin_unlock(arch_spinlock_t *lock)
63 {
64 	__asm__ __volatile__(
65 "	stb		%%g0, [%0]"
66 	: /* No outputs */
67 	: "r" (lock)
68 	: "memory");
69 }
70 
71 static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
72 {
73 	unsigned long tmp1, tmp2;
74 
75 	__asm__ __volatile__(
76 "1:	ldstub		[%2], %0\n"
77 "	brnz,pn		%0, 2f\n"
78 "	 nop\n"
79 "	.subsection	2\n"
80 "2:	rdpr		%%pil, %1\n"
81 "	wrpr		%3, %%pil\n"
82 "3:	ldub		[%2], %0\n"
83 "	brnz,pt		%0, 3b\n"
84 "	 nop\n"
85 "	ba,pt		%%xcc, 1b\n"
86 "	 wrpr		%1, %%pil\n"
87 "	.previous"
88 	: "=&r" (tmp1), "=&r" (tmp2)
89 	: "r"(lock), "r"(flags)
90 	: "memory");
91 }
92 
93 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
94 
95 static void inline arch_read_lock(arch_rwlock_t *lock)
96 {
97 	unsigned long tmp1, tmp2;
98 
99 	__asm__ __volatile__ (
100 "1:	ldsw		[%2], %0\n"
101 "	brlz,pn		%0, 2f\n"
102 "4:	 add		%0, 1, %1\n"
103 "	cas		[%2], %0, %1\n"
104 "	cmp		%0, %1\n"
105 "	bne,pn		%%icc, 1b\n"
106 "	 nop\n"
107 "	.subsection	2\n"
108 "2:	ldsw		[%2], %0\n"
109 "	brlz,pt		%0, 2b\n"
110 "	 nop\n"
111 "	ba,a,pt		%%xcc, 4b\n"
112 "	.previous"
113 	: "=&r" (tmp1), "=&r" (tmp2)
114 	: "r" (lock)
115 	: "memory");
116 }
117 
118 static int inline arch_read_trylock(arch_rwlock_t *lock)
119 {
120 	int tmp1, tmp2;
121 
122 	__asm__ __volatile__ (
123 "1:	ldsw		[%2], %0\n"
124 "	brlz,a,pn	%0, 2f\n"
125 "	 mov		0, %0\n"
126 "	add		%0, 1, %1\n"
127 "	cas		[%2], %0, %1\n"
128 "	cmp		%0, %1\n"
129 "	bne,pn		%%icc, 1b\n"
130 "	 mov		1, %0\n"
131 "2:"
132 	: "=&r" (tmp1), "=&r" (tmp2)
133 	: "r" (lock)
134 	: "memory");
135 
136 	return tmp1;
137 }
138 
139 static void inline arch_read_unlock(arch_rwlock_t *lock)
140 {
141 	unsigned long tmp1, tmp2;
142 
143 	__asm__ __volatile__(
144 "1:	lduw	[%2], %0\n"
145 "	sub	%0, 1, %1\n"
146 "	cas	[%2], %0, %1\n"
147 "	cmp	%0, %1\n"
148 "	bne,pn	%%xcc, 1b\n"
149 "	 nop"
150 	: "=&r" (tmp1), "=&r" (tmp2)
151 	: "r" (lock)
152 	: "memory");
153 }
154 
155 static void inline arch_write_lock(arch_rwlock_t *lock)
156 {
157 	unsigned long mask, tmp1, tmp2;
158 
159 	mask = 0x80000000UL;
160 
161 	__asm__ __volatile__(
162 "1:	lduw		[%2], %0\n"
163 "	brnz,pn		%0, 2f\n"
164 "4:	 or		%0, %3, %1\n"
165 "	cas		[%2], %0, %1\n"
166 "	cmp		%0, %1\n"
167 "	bne,pn		%%icc, 1b\n"
168 "	 nop\n"
169 "	.subsection	2\n"
170 "2:	lduw		[%2], %0\n"
171 "	brnz,pt		%0, 2b\n"
172 "	 nop\n"
173 "	ba,a,pt		%%xcc, 4b\n"
174 "	.previous"
175 	: "=&r" (tmp1), "=&r" (tmp2)
176 	: "r" (lock), "r" (mask)
177 	: "memory");
178 }
179 
180 static void inline arch_write_unlock(arch_rwlock_t *lock)
181 {
182 	__asm__ __volatile__(
183 "	stw		%%g0, [%0]"
184 	: /* no outputs */
185 	: "r" (lock)
186 	: "memory");
187 }
188 
189 static int inline arch_write_trylock(arch_rwlock_t *lock)
190 {
191 	unsigned long mask, tmp1, tmp2, result;
192 
193 	mask = 0x80000000UL;
194 
195 	__asm__ __volatile__(
196 "	mov		0, %2\n"
197 "1:	lduw		[%3], %0\n"
198 "	brnz,pn		%0, 2f\n"
199 "	 or		%0, %4, %1\n"
200 "	cas		[%3], %0, %1\n"
201 "	cmp		%0, %1\n"
202 "	bne,pn		%%icc, 1b\n"
203 "	 nop\n"
204 "	mov		1, %2\n"
205 "2:"
206 	: "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
207 	: "r" (lock), "r" (mask)
208 	: "memory");
209 
210 	return result;
211 }
212 
213 #define arch_read_lock_flags(p, f) arch_read_lock(p)
214 #define arch_write_lock_flags(p, f) arch_write_lock(p)
215 
216 #define arch_read_can_lock(rw)		(!((rw)->lock & 0x80000000UL))
217 #define arch_write_can_lock(rw)	(!(rw)->lock)
218 
219 #define arch_spin_relax(lock)	cpu_relax()
220 #define arch_read_relax(lock)	cpu_relax()
221 #define arch_write_relax(lock)	cpu_relax()
222 
223 #endif /* !(__ASSEMBLY__) */
224 
225 #endif /* !(__SPARC64_SPINLOCK_H) */
226