1 /* spinlock.h: 32-bit Sparc spinlock support.
2  *
3  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4  */
5 
6 #ifndef __SPARC_SPINLOCK_H
7 #define __SPARC_SPINLOCK_H
8 
9 #ifndef __ASSEMBLY__
10 
11 #include <asm/psr.h>
12 
13 #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
14 
15 #define __raw_spin_unlock_wait(lock) \
16 	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
17 
18 static inline void __raw_spin_lock(raw_spinlock_t *lock)
19 {
20 	__asm__ __volatile__(
21 	"\n1:\n\t"
22 	"ldstub	[%0], %%g2\n\t"
23 	"orcc	%%g2, 0x0, %%g0\n\t"
24 	"bne,a	2f\n\t"
25 	" ldub	[%0], %%g2\n\t"
26 	".subsection	2\n"
27 	"2:\n\t"
28 	"orcc	%%g2, 0x0, %%g0\n\t"
29 	"bne,a	2b\n\t"
30 	" ldub	[%0], %%g2\n\t"
31 	"b,a	1b\n\t"
32 	".previous\n"
33 	: /* no outputs */
34 	: "r" (lock)
35 	: "g2", "memory", "cc");
36 }
37 
38 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
39 {
40 	unsigned int result;
41 	__asm__ __volatile__("ldstub [%1], %0"
42 			     : "=r" (result)
43 			     : "r" (lock)
44 			     : "memory");
45 	return (result == 0);
46 }
47 
48 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
49 {
50 	__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
51 }
52 
53 /* Read-write spinlocks, allowing multiple readers
54  * but only one writer.
55  *
56  * NOTE! it is quite common to have readers in interrupts
57  * but no interrupt writers. For those circumstances we
58  * can "mix" irq-safe locks - any writer needs to get a
59  * irq-safe write-lock, but readers can get non-irqsafe
60  * read-locks.
61  *
62  * XXX This might create some problems with my dual spinlock
63  * XXX scheme, deadlocks etc. -DaveM
64  *
65  * Sort of like atomic_t's on Sparc, but even more clever.
66  *
67  *	------------------------------------
68  *	| 24-bit counter           | wlock |  raw_rwlock_t
69  *	------------------------------------
70  *	 31                       8 7     0
71  *
72  * wlock signifies the one writer is in or somebody is updating
73  * counter. For a writer, if he successfully acquires the wlock,
74  * but counter is non-zero, he has to release the lock and wait,
75  * till both counter and wlock are zero.
76  *
77  * Unfortunately this scheme limits us to ~16,000,000 cpus.
78  */
79 static inline void arch_read_lock(raw_rwlock_t *rw)
80 {
81 	register raw_rwlock_t *lp asm("g1");
82 	lp = rw;
83 	__asm__ __volatile__(
84 	"mov	%%o7, %%g4\n\t"
85 	"call	___rw_read_enter\n\t"
86 	" ldstub	[%%g1 + 3], %%g2\n"
87 	: /* no outputs */
88 	: "r" (lp)
89 	: "g2", "g4", "memory", "cc");
90 }
91 
92 #define __raw_read_lock(lock) \
93 do {	unsigned long flags; \
94 	local_irq_save(flags); \
95 	arch_read_lock(lock); \
96 	local_irq_restore(flags); \
97 } while(0)
98 
99 static inline void arch_read_unlock(raw_rwlock_t *rw)
100 {
101 	register raw_rwlock_t *lp asm("g1");
102 	lp = rw;
103 	__asm__ __volatile__(
104 	"mov	%%o7, %%g4\n\t"
105 	"call	___rw_read_exit\n\t"
106 	" ldstub	[%%g1 + 3], %%g2\n"
107 	: /* no outputs */
108 	: "r" (lp)
109 	: "g2", "g4", "memory", "cc");
110 }
111 
112 #define __raw_read_unlock(lock) \
113 do {	unsigned long flags; \
114 	local_irq_save(flags); \
115 	arch_read_unlock(lock); \
116 	local_irq_restore(flags); \
117 } while(0)
118 
119 static inline void __raw_write_lock(raw_rwlock_t *rw)
120 {
121 	register raw_rwlock_t *lp asm("g1");
122 	lp = rw;
123 	__asm__ __volatile__(
124 	"mov	%%o7, %%g4\n\t"
125 	"call	___rw_write_enter\n\t"
126 	" ldstub	[%%g1 + 3], %%g2\n"
127 	: /* no outputs */
128 	: "r" (lp)
129 	: "g2", "g4", "memory", "cc");
130 	*(volatile __u32 *)&lp->lock = ~0U;
131 }
132 
133 static inline int __raw_write_trylock(raw_rwlock_t *rw)
134 {
135 	unsigned int val;
136 
137 	__asm__ __volatile__("ldstub [%1 + 3], %0"
138 			     : "=r" (val)
139 			     : "r" (&rw->lock)
140 			     : "memory");
141 
142 	if (val == 0) {
143 		val = rw->lock & ~0xff;
144 		if (val)
145 			((volatile u8*)&rw->lock)[3] = 0;
146 		else
147 			*(volatile u32*)&rw->lock = ~0U;
148 	}
149 
150 	return (val == 0);
151 }
152 
153 static inline int arch_read_trylock(raw_rwlock_t *rw)
154 {
155 	register raw_rwlock_t *lp asm("g1");
156 	register int res asm("o0");
157 	lp = rw;
158 	__asm__ __volatile__(
159 	"mov	%%o7, %%g4\n\t"
160 	"call	___rw_read_try\n\t"
161 	" ldstub	[%%g1 + 3], %%g2\n"
162 	: "=r" (res)
163 	: "r" (lp)
164 	: "g2", "g4", "memory", "cc");
165 	return res;
166 }
167 
168 #define __raw_read_trylock(lock) \
169 ({	unsigned long flags; \
170 	int res; \
171 	local_irq_save(flags); \
172 	res = arch_read_trylock(lock); \
173 	local_irq_restore(flags); \
174 	res; \
175 })
176 
177 #define __raw_write_unlock(rw)	do { (rw)->lock = 0; } while(0)
178 
179 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
180 #define __raw_read_lock_flags(rw, flags)   __raw_read_lock(rw)
181 #define __raw_write_lock_flags(rw, flags)  __raw_write_lock(rw)
182 
183 #define _raw_spin_relax(lock)	cpu_relax()
184 #define _raw_read_relax(lock)	cpu_relax()
185 #define _raw_write_relax(lock)	cpu_relax()
186 
187 #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
188 #define __raw_write_can_lock(rw) (!(rw)->lock)
189 
190 #endif /* !(__ASSEMBLY__) */
191 
192 #endif /* __SPARC_SPINLOCK_H */
193