1 /* spinlock.h: 32-bit Sparc spinlock support.
2  *
3  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4  */
5 
6 #ifndef __SPARC_SPINLOCK_H
7 #define __SPARC_SPINLOCK_H
8 
9 #ifndef __ASSEMBLY__
10 
11 #include <asm/psr.h>
12 #include <asm/barrier.h>
13 #include <asm/processor.h> /* for cpu_relax */
14 
15 #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
16 
17 static inline void arch_spin_lock(arch_spinlock_t *lock)
18 {
19 	__asm__ __volatile__(
20 	"\n1:\n\t"
21 	"ldstub	[%0], %%g2\n\t"
22 	"orcc	%%g2, 0x0, %%g0\n\t"
23 	"bne,a	2f\n\t"
24 	" ldub	[%0], %%g2\n\t"
25 	".subsection	2\n"
26 	"2:\n\t"
27 	"orcc	%%g2, 0x0, %%g0\n\t"
28 	"bne,a	2b\n\t"
29 	" ldub	[%0], %%g2\n\t"
30 	"b,a	1b\n\t"
31 	".previous\n"
32 	: /* no outputs */
33 	: "r" (lock)
34 	: "g2", "memory", "cc");
35 }
36 
37 static inline int arch_spin_trylock(arch_spinlock_t *lock)
38 {
39 	unsigned int result;
40 	__asm__ __volatile__("ldstub [%1], %0"
41 			     : "=r" (result)
42 			     : "r" (lock)
43 			     : "memory");
44 	return (result == 0);
45 }
46 
47 static inline void arch_spin_unlock(arch_spinlock_t *lock)
48 {
49 	__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
50 }
51 
52 /* Read-write spinlocks, allowing multiple readers
53  * but only one writer.
54  *
55  * NOTE! it is quite common to have readers in interrupts
56  * but no interrupt writers. For those circumstances we
57  * can "mix" irq-safe locks - any writer needs to get a
58  * irq-safe write-lock, but readers can get non-irqsafe
59  * read-locks.
60  *
61  * XXX This might create some problems with my dual spinlock
62  * XXX scheme, deadlocks etc. -DaveM
63  *
64  * Sort of like atomic_t's on Sparc, but even more clever.
65  *
66  *	------------------------------------
67  *	| 24-bit counter           | wlock |  arch_rwlock_t
68  *	------------------------------------
69  *	 31                       8 7     0
70  *
71  * wlock signifies the one writer is in or somebody is updating
72  * counter. For a writer, if he successfully acquires the wlock,
73  * but counter is non-zero, he has to release the lock and wait,
74  * till both counter and wlock are zero.
75  *
76  * Unfortunately this scheme limits us to ~16,000,000 cpus.
77  */
78 static inline void __arch_read_lock(arch_rwlock_t *rw)
79 {
80 	register arch_rwlock_t *lp asm("g1");
81 	lp = rw;
82 	__asm__ __volatile__(
83 	"mov	%%o7, %%g4\n\t"
84 	"call	___rw_read_enter\n\t"
85 	" ldstub	[%%g1 + 3], %%g2\n"
86 	: /* no outputs */
87 	: "r" (lp)
88 	: "g2", "g4", "memory", "cc");
89 }
90 
91 #define arch_read_lock(lock) \
92 do {	unsigned long flags; \
93 	local_irq_save(flags); \
94 	__arch_read_lock(lock); \
95 	local_irq_restore(flags); \
96 } while(0)
97 
98 static inline void __arch_read_unlock(arch_rwlock_t *rw)
99 {
100 	register arch_rwlock_t *lp asm("g1");
101 	lp = rw;
102 	__asm__ __volatile__(
103 	"mov	%%o7, %%g4\n\t"
104 	"call	___rw_read_exit\n\t"
105 	" ldstub	[%%g1 + 3], %%g2\n"
106 	: /* no outputs */
107 	: "r" (lp)
108 	: "g2", "g4", "memory", "cc");
109 }
110 
111 #define arch_read_unlock(lock) \
112 do {	unsigned long flags; \
113 	local_irq_save(flags); \
114 	__arch_read_unlock(lock); \
115 	local_irq_restore(flags); \
116 } while(0)
117 
118 static inline void arch_write_lock(arch_rwlock_t *rw)
119 {
120 	register arch_rwlock_t *lp asm("g1");
121 	lp = rw;
122 	__asm__ __volatile__(
123 	"mov	%%o7, %%g4\n\t"
124 	"call	___rw_write_enter\n\t"
125 	" ldstub	[%%g1 + 3], %%g2\n"
126 	: /* no outputs */
127 	: "r" (lp)
128 	: "g2", "g4", "memory", "cc");
129 	*(volatile __u32 *)&lp->lock = ~0U;
130 }
131 
132 static inline void arch_write_unlock(arch_rwlock_t *lock)
133 {
134 	__asm__ __volatile__(
135 "	st		%%g0, [%0]"
136 	: /* no outputs */
137 	: "r" (lock)
138 	: "memory");
139 }
140 
141 static inline int arch_write_trylock(arch_rwlock_t *rw)
142 {
143 	unsigned int val;
144 
145 	__asm__ __volatile__("ldstub [%1 + 3], %0"
146 			     : "=r" (val)
147 			     : "r" (&rw->lock)
148 			     : "memory");
149 
150 	if (val == 0) {
151 		val = rw->lock & ~0xff;
152 		if (val)
153 			((volatile u8*)&rw->lock)[3] = 0;
154 		else
155 			*(volatile u32*)&rw->lock = ~0U;
156 	}
157 
158 	return (val == 0);
159 }
160 
161 static inline int __arch_read_trylock(arch_rwlock_t *rw)
162 {
163 	register arch_rwlock_t *lp asm("g1");
164 	register int res asm("o0");
165 	lp = rw;
166 	__asm__ __volatile__(
167 	"mov	%%o7, %%g4\n\t"
168 	"call	___rw_read_try\n\t"
169 	" ldstub	[%%g1 + 3], %%g2\n"
170 	: "=r" (res)
171 	: "r" (lp)
172 	: "g2", "g4", "memory", "cc");
173 	return res;
174 }
175 
176 #define arch_read_trylock(lock) \
177 ({	unsigned long flags; \
178 	int res; \
179 	local_irq_save(flags); \
180 	res = __arch_read_trylock(lock); \
181 	local_irq_restore(flags); \
182 	res; \
183 })
184 
185 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
186 #define arch_read_lock_flags(rw, flags)   arch_read_lock(rw)
187 #define arch_write_lock_flags(rw, flags)  arch_write_lock(rw)
188 
189 #define arch_spin_relax(lock)	cpu_relax()
190 #define arch_read_relax(lock)	cpu_relax()
191 #define arch_write_relax(lock)	cpu_relax()
192 
193 #define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
194 #define arch_write_can_lock(rw) (!(rw)->lock)
195 
196 #endif /* !(__ASSEMBLY__) */
197 
198 #endif /* __SPARC_SPINLOCK_H */
199