1 /*
2  * include/asm-sh/spinlock-llsc.h
3  *
4  * Copyright (C) 2002, 2003 Paul Mundt
5  * Copyright (C) 2006, 2007 Akio Idehara
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #ifndef __ASM_SH_SPINLOCK_LLSC_H
12 #define __ASM_SH_SPINLOCK_LLSC_H
13 
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
16 
17 /*
18  * Your basic SMP spinlocks, allowing only a single CPU anywhere
19  */
20 
21 #define arch_spin_is_locked(x)		((x)->lock <= 0)
22 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
23 
24 /*
25  * Simple spin lock operations.  There are two variants, one clears IRQ's
26  * on the local processor, one does not.
27  *
28  * We make no fairness assumptions.  They have a cost.
29  */
30 static inline void arch_spin_lock(arch_spinlock_t *lock)
31 {
32 	unsigned long tmp;
33 	unsigned long oldval;
34 
35 	__asm__ __volatile__ (
36 		"1:						\n\t"
37 		"movli.l	@%2, %0	! arch_spin_lock	\n\t"
38 		"mov		%0, %1				\n\t"
39 		"mov		#0, %0				\n\t"
40 		"movco.l	%0, @%2				\n\t"
41 		"bf		1b				\n\t"
42 		"cmp/pl		%1				\n\t"
43 		"bf		1b				\n\t"
44 		: "=&z" (tmp), "=&r" (oldval)
45 		: "r" (&lock->lock)
46 		: "t", "memory"
47 	);
48 }
49 
50 static inline void arch_spin_unlock(arch_spinlock_t *lock)
51 {
52 	unsigned long tmp;
53 
54 	__asm__ __volatile__ (
55 		"mov		#1, %0 ! arch_spin_unlock	\n\t"
56 		"mov.l		%0, @%1				\n\t"
57 		: "=&z" (tmp)
58 		: "r" (&lock->lock)
59 		: "t", "memory"
60 	);
61 }
62 
63 static inline int arch_spin_trylock(arch_spinlock_t *lock)
64 {
65 	unsigned long tmp, oldval;
66 
67 	__asm__ __volatile__ (
68 		"1:						\n\t"
69 		"movli.l	@%2, %0	! arch_spin_trylock	\n\t"
70 		"mov		%0, %1				\n\t"
71 		"mov		#0, %0				\n\t"
72 		"movco.l	%0, @%2				\n\t"
73 		"bf		1b				\n\t"
74 		"synco						\n\t"
75 		: "=&z" (tmp), "=&r" (oldval)
76 		: "r" (&lock->lock)
77 		: "t", "memory"
78 	);
79 
80 	return oldval;
81 }
82 
83 /*
84  * Read-write spinlocks, allowing multiple readers but only one writer.
85  *
86  * NOTE! it is quite common to have readers in interrupts but no interrupt
87  * writers. For those circumstances we can "mix" irq-safe locks - any writer
88  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
89  * read-locks.
90  */
91 
92 /**
93  * read_can_lock - would read_trylock() succeed?
94  * @lock: the rwlock in question.
95  */
96 #define arch_read_can_lock(x)	((x)->lock > 0)
97 
98 /**
99  * write_can_lock - would write_trylock() succeed?
100  * @lock: the rwlock in question.
101  */
102 #define arch_write_can_lock(x)	((x)->lock == RW_LOCK_BIAS)
103 
104 static inline void arch_read_lock(arch_rwlock_t *rw)
105 {
106 	unsigned long tmp;
107 
108 	__asm__ __volatile__ (
109 		"1:						\n\t"
110 		"movli.l	@%1, %0	! arch_read_lock	\n\t"
111 		"cmp/pl		%0				\n\t"
112 		"bf		1b				\n\t"
113 		"add		#-1, %0				\n\t"
114 		"movco.l	%0, @%1				\n\t"
115 		"bf		1b				\n\t"
116 		: "=&z" (tmp)
117 		: "r" (&rw->lock)
118 		: "t", "memory"
119 	);
120 }
121 
122 static inline void arch_read_unlock(arch_rwlock_t *rw)
123 {
124 	unsigned long tmp;
125 
126 	__asm__ __volatile__ (
127 		"1:						\n\t"
128 		"movli.l	@%1, %0	! arch_read_unlock	\n\t"
129 		"add		#1, %0				\n\t"
130 		"movco.l	%0, @%1				\n\t"
131 		"bf		1b				\n\t"
132 		: "=&z" (tmp)
133 		: "r" (&rw->lock)
134 		: "t", "memory"
135 	);
136 }
137 
138 static inline void arch_write_lock(arch_rwlock_t *rw)
139 {
140 	unsigned long tmp;
141 
142 	__asm__ __volatile__ (
143 		"1:						\n\t"
144 		"movli.l	@%1, %0	! arch_write_lock	\n\t"
145 		"cmp/hs		%2, %0				\n\t"
146 		"bf		1b				\n\t"
147 		"sub		%2, %0				\n\t"
148 		"movco.l	%0, @%1				\n\t"
149 		"bf		1b				\n\t"
150 		: "=&z" (tmp)
151 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
152 		: "t", "memory"
153 	);
154 }
155 
156 static inline void arch_write_unlock(arch_rwlock_t *rw)
157 {
158 	__asm__ __volatile__ (
159 		"mov.l		%1, @%0 ! arch_write_unlock	\n\t"
160 		:
161 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
162 		: "t", "memory"
163 	);
164 }
165 
166 static inline int arch_read_trylock(arch_rwlock_t *rw)
167 {
168 	unsigned long tmp, oldval;
169 
170 	__asm__ __volatile__ (
171 		"1:						\n\t"
172 		"movli.l	@%2, %0	! arch_read_trylock	\n\t"
173 		"mov		%0, %1				\n\t"
174 		"cmp/pl		%0				\n\t"
175 		"bf		2f				\n\t"
176 		"add		#-1, %0				\n\t"
177 		"movco.l	%0, @%2				\n\t"
178 		"bf		1b				\n\t"
179 		"2:						\n\t"
180 		"synco						\n\t"
181 		: "=&z" (tmp), "=&r" (oldval)
182 		: "r" (&rw->lock)
183 		: "t", "memory"
184 	);
185 
186 	return (oldval > 0);
187 }
188 
189 static inline int arch_write_trylock(arch_rwlock_t *rw)
190 {
191 	unsigned long tmp, oldval;
192 
193 	__asm__ __volatile__ (
194 		"1:						\n\t"
195 		"movli.l	@%2, %0	! arch_write_trylock	\n\t"
196 		"mov		%0, %1				\n\t"
197 		"cmp/hs		%3, %0				\n\t"
198 		"bf		2f				\n\t"
199 		"sub		%3, %0				\n\t"
200 		"2:						\n\t"
201 		"movco.l	%0, @%2				\n\t"
202 		"bf		1b				\n\t"
203 		"synco						\n\t"
204 		: "=&z" (tmp), "=&r" (oldval)
205 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
206 		: "t", "memory"
207 	);
208 
209 	return (oldval > (RW_LOCK_BIAS - 1));
210 }
211 
212 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
213 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
214 
215 #define arch_spin_relax(lock)	cpu_relax()
216 #define arch_read_relax(lock)	cpu_relax()
217 #define arch_write_relax(lock)	cpu_relax()
218 
219 #endif /* __ASM_SH_SPINLOCK_LLSC_H */
220