1 /*
2  * include/asm-sh/spinlock-llsc.h
3  *
4  * Copyright (C) 2002, 2003 Paul Mundt
5  * Copyright (C) 2006, 2007 Akio Idehara
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #ifndef __ASM_SH_SPINLOCK_LLSC_H
12 #define __ASM_SH_SPINLOCK_LLSC_H
13 
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
16 
17 /*
18  * Your basic SMP spinlocks, allowing only a single CPU anywhere
19  */
20 
21 #define arch_spin_is_locked(x)		((x)->lock <= 0)
22 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
23 
24 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
25 {
26 	smp_cond_load_acquire(&lock->lock, VAL > 0);
27 }
28 
29 /*
30  * Simple spin lock operations.  There are two variants, one clears IRQ's
31  * on the local processor, one does not.
32  *
33  * We make no fairness assumptions.  They have a cost.
34  */
35 static inline void arch_spin_lock(arch_spinlock_t *lock)
36 {
37 	unsigned long tmp;
38 	unsigned long oldval;
39 
40 	__asm__ __volatile__ (
41 		"1:						\n\t"
42 		"movli.l	@%2, %0	! arch_spin_lock	\n\t"
43 		"mov		%0, %1				\n\t"
44 		"mov		#0, %0				\n\t"
45 		"movco.l	%0, @%2				\n\t"
46 		"bf		1b				\n\t"
47 		"cmp/pl		%1				\n\t"
48 		"bf		1b				\n\t"
49 		: "=&z" (tmp), "=&r" (oldval)
50 		: "r" (&lock->lock)
51 		: "t", "memory"
52 	);
53 }
54 
55 static inline void arch_spin_unlock(arch_spinlock_t *lock)
56 {
57 	unsigned long tmp;
58 
59 	__asm__ __volatile__ (
60 		"mov		#1, %0 ! arch_spin_unlock	\n\t"
61 		"mov.l		%0, @%1				\n\t"
62 		: "=&z" (tmp)
63 		: "r" (&lock->lock)
64 		: "t", "memory"
65 	);
66 }
67 
68 static inline int arch_spin_trylock(arch_spinlock_t *lock)
69 {
70 	unsigned long tmp, oldval;
71 
72 	__asm__ __volatile__ (
73 		"1:						\n\t"
74 		"movli.l	@%2, %0	! arch_spin_trylock	\n\t"
75 		"mov		%0, %1				\n\t"
76 		"mov		#0, %0				\n\t"
77 		"movco.l	%0, @%2				\n\t"
78 		"bf		1b				\n\t"
79 		"synco						\n\t"
80 		: "=&z" (tmp), "=&r" (oldval)
81 		: "r" (&lock->lock)
82 		: "t", "memory"
83 	);
84 
85 	return oldval;
86 }
87 
88 /*
89  * Read-write spinlocks, allowing multiple readers but only one writer.
90  *
91  * NOTE! it is quite common to have readers in interrupts but no interrupt
92  * writers. For those circumstances we can "mix" irq-safe locks - any writer
93  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
94  * read-locks.
95  */
96 
97 /**
98  * read_can_lock - would read_trylock() succeed?
99  * @lock: the rwlock in question.
100  */
101 #define arch_read_can_lock(x)	((x)->lock > 0)
102 
103 /**
104  * write_can_lock - would write_trylock() succeed?
105  * @lock: the rwlock in question.
106  */
107 #define arch_write_can_lock(x)	((x)->lock == RW_LOCK_BIAS)
108 
109 static inline void arch_read_lock(arch_rwlock_t *rw)
110 {
111 	unsigned long tmp;
112 
113 	__asm__ __volatile__ (
114 		"1:						\n\t"
115 		"movli.l	@%1, %0	! arch_read_lock	\n\t"
116 		"cmp/pl		%0				\n\t"
117 		"bf		1b				\n\t"
118 		"add		#-1, %0				\n\t"
119 		"movco.l	%0, @%1				\n\t"
120 		"bf		1b				\n\t"
121 		: "=&z" (tmp)
122 		: "r" (&rw->lock)
123 		: "t", "memory"
124 	);
125 }
126 
127 static inline void arch_read_unlock(arch_rwlock_t *rw)
128 {
129 	unsigned long tmp;
130 
131 	__asm__ __volatile__ (
132 		"1:						\n\t"
133 		"movli.l	@%1, %0	! arch_read_unlock	\n\t"
134 		"add		#1, %0				\n\t"
135 		"movco.l	%0, @%1				\n\t"
136 		"bf		1b				\n\t"
137 		: "=&z" (tmp)
138 		: "r" (&rw->lock)
139 		: "t", "memory"
140 	);
141 }
142 
143 static inline void arch_write_lock(arch_rwlock_t *rw)
144 {
145 	unsigned long tmp;
146 
147 	__asm__ __volatile__ (
148 		"1:						\n\t"
149 		"movli.l	@%1, %0	! arch_write_lock	\n\t"
150 		"cmp/hs		%2, %0				\n\t"
151 		"bf		1b				\n\t"
152 		"sub		%2, %0				\n\t"
153 		"movco.l	%0, @%1				\n\t"
154 		"bf		1b				\n\t"
155 		: "=&z" (tmp)
156 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
157 		: "t", "memory"
158 	);
159 }
160 
161 static inline void arch_write_unlock(arch_rwlock_t *rw)
162 {
163 	__asm__ __volatile__ (
164 		"mov.l		%1, @%0 ! arch_write_unlock	\n\t"
165 		:
166 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
167 		: "t", "memory"
168 	);
169 }
170 
171 static inline int arch_read_trylock(arch_rwlock_t *rw)
172 {
173 	unsigned long tmp, oldval;
174 
175 	__asm__ __volatile__ (
176 		"1:						\n\t"
177 		"movli.l	@%2, %0	! arch_read_trylock	\n\t"
178 		"mov		%0, %1				\n\t"
179 		"cmp/pl		%0				\n\t"
180 		"bf		2f				\n\t"
181 		"add		#-1, %0				\n\t"
182 		"movco.l	%0, @%2				\n\t"
183 		"bf		1b				\n\t"
184 		"2:						\n\t"
185 		"synco						\n\t"
186 		: "=&z" (tmp), "=&r" (oldval)
187 		: "r" (&rw->lock)
188 		: "t", "memory"
189 	);
190 
191 	return (oldval > 0);
192 }
193 
194 static inline int arch_write_trylock(arch_rwlock_t *rw)
195 {
196 	unsigned long tmp, oldval;
197 
198 	__asm__ __volatile__ (
199 		"1:						\n\t"
200 		"movli.l	@%2, %0	! arch_write_trylock	\n\t"
201 		"mov		%0, %1				\n\t"
202 		"cmp/hs		%3, %0				\n\t"
203 		"bf		2f				\n\t"
204 		"sub		%3, %0				\n\t"
205 		"2:						\n\t"
206 		"movco.l	%0, @%2				\n\t"
207 		"bf		1b				\n\t"
208 		"synco						\n\t"
209 		: "=&z" (tmp), "=&r" (oldval)
210 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
211 		: "t", "memory"
212 	);
213 
214 	return (oldval > (RW_LOCK_BIAS - 1));
215 }
216 
217 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
218 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
219 
220 #define arch_spin_relax(lock)	cpu_relax()
221 #define arch_read_relax(lock)	cpu_relax()
222 #define arch_write_relax(lock)	cpu_relax()
223 
224 #endif /* __ASM_SH_SPINLOCK_LLSC_H */
225