1 /*
2  * include/asm-sh/spinlock-llsc.h
3  *
4  * Copyright (C) 2002, 2003 Paul Mundt
5  * Copyright (C) 2006, 2007 Akio Idehara
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #ifndef __ASM_SH_SPINLOCK_LLSC_H
12 #define __ASM_SH_SPINLOCK_LLSC_H
13 
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
16 
17 /*
18  * Your basic SMP spinlocks, allowing only a single CPU anywhere
19  */
20 
21 #define arch_spin_is_locked(x)		((x)->lock <= 0)
22 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
23 
24 /*
25  * Simple spin lock operations.  There are two variants, one clears IRQ's
26  * on the local processor, one does not.
27  *
28  * We make no fairness assumptions.  They have a cost.
29  */
30 static inline void arch_spin_lock(arch_spinlock_t *lock)
31 {
32 	unsigned long tmp;
33 	unsigned long oldval;
34 
35 	__asm__ __volatile__ (
36 		"1:						\n\t"
37 		"movli.l	@%2, %0	! arch_spin_lock	\n\t"
38 		"mov		%0, %1				\n\t"
39 		"mov		#0, %0				\n\t"
40 		"movco.l	%0, @%2				\n\t"
41 		"bf		1b				\n\t"
42 		"cmp/pl		%1				\n\t"
43 		"bf		1b				\n\t"
44 		: "=&z" (tmp), "=&r" (oldval)
45 		: "r" (&lock->lock)
46 		: "t", "memory"
47 	);
48 }
49 
50 static inline void arch_spin_unlock(arch_spinlock_t *lock)
51 {
52 	unsigned long tmp;
53 
54 	__asm__ __volatile__ (
55 		"mov		#1, %0 ! arch_spin_unlock	\n\t"
56 		"mov.l		%0, @%1				\n\t"
57 		: "=&z" (tmp)
58 		: "r" (&lock->lock)
59 		: "t", "memory"
60 	);
61 }
62 
63 static inline int arch_spin_trylock(arch_spinlock_t *lock)
64 {
65 	unsigned long tmp, oldval;
66 
67 	__asm__ __volatile__ (
68 		"1:						\n\t"
69 		"movli.l	@%2, %0	! arch_spin_trylock	\n\t"
70 		"mov		%0, %1				\n\t"
71 		"mov		#0, %0				\n\t"
72 		"movco.l	%0, @%2				\n\t"
73 		"bf		1b				\n\t"
74 		"synco						\n\t"
75 		: "=&z" (tmp), "=&r" (oldval)
76 		: "r" (&lock->lock)
77 		: "t", "memory"
78 	);
79 
80 	return oldval;
81 }
82 
83 /*
84  * Read-write spinlocks, allowing multiple readers but only one writer.
85  *
86  * NOTE! it is quite common to have readers in interrupts but no interrupt
87  * writers. For those circumstances we can "mix" irq-safe locks - any writer
88  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
89  * read-locks.
90  */
91 
92 static inline void arch_read_lock(arch_rwlock_t *rw)
93 {
94 	unsigned long tmp;
95 
96 	__asm__ __volatile__ (
97 		"1:						\n\t"
98 		"movli.l	@%1, %0	! arch_read_lock	\n\t"
99 		"cmp/pl		%0				\n\t"
100 		"bf		1b				\n\t"
101 		"add		#-1, %0				\n\t"
102 		"movco.l	%0, @%1				\n\t"
103 		"bf		1b				\n\t"
104 		: "=&z" (tmp)
105 		: "r" (&rw->lock)
106 		: "t", "memory"
107 	);
108 }
109 
110 static inline void arch_read_unlock(arch_rwlock_t *rw)
111 {
112 	unsigned long tmp;
113 
114 	__asm__ __volatile__ (
115 		"1:						\n\t"
116 		"movli.l	@%1, %0	! arch_read_unlock	\n\t"
117 		"add		#1, %0				\n\t"
118 		"movco.l	%0, @%1				\n\t"
119 		"bf		1b				\n\t"
120 		: "=&z" (tmp)
121 		: "r" (&rw->lock)
122 		: "t", "memory"
123 	);
124 }
125 
126 static inline void arch_write_lock(arch_rwlock_t *rw)
127 {
128 	unsigned long tmp;
129 
130 	__asm__ __volatile__ (
131 		"1:						\n\t"
132 		"movli.l	@%1, %0	! arch_write_lock	\n\t"
133 		"cmp/hs		%2, %0				\n\t"
134 		"bf		1b				\n\t"
135 		"sub		%2, %0				\n\t"
136 		"movco.l	%0, @%1				\n\t"
137 		"bf		1b				\n\t"
138 		: "=&z" (tmp)
139 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
140 		: "t", "memory"
141 	);
142 }
143 
144 static inline void arch_write_unlock(arch_rwlock_t *rw)
145 {
146 	__asm__ __volatile__ (
147 		"mov.l		%1, @%0 ! arch_write_unlock	\n\t"
148 		:
149 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
150 		: "t", "memory"
151 	);
152 }
153 
154 static inline int arch_read_trylock(arch_rwlock_t *rw)
155 {
156 	unsigned long tmp, oldval;
157 
158 	__asm__ __volatile__ (
159 		"1:						\n\t"
160 		"movli.l	@%2, %0	! arch_read_trylock	\n\t"
161 		"mov		%0, %1				\n\t"
162 		"cmp/pl		%0				\n\t"
163 		"bf		2f				\n\t"
164 		"add		#-1, %0				\n\t"
165 		"movco.l	%0, @%2				\n\t"
166 		"bf		1b				\n\t"
167 		"2:						\n\t"
168 		"synco						\n\t"
169 		: "=&z" (tmp), "=&r" (oldval)
170 		: "r" (&rw->lock)
171 		: "t", "memory"
172 	);
173 
174 	return (oldval > 0);
175 }
176 
177 static inline int arch_write_trylock(arch_rwlock_t *rw)
178 {
179 	unsigned long tmp, oldval;
180 
181 	__asm__ __volatile__ (
182 		"1:						\n\t"
183 		"movli.l	@%2, %0	! arch_write_trylock	\n\t"
184 		"mov		%0, %1				\n\t"
185 		"cmp/hs		%3, %0				\n\t"
186 		"bf		2f				\n\t"
187 		"sub		%3, %0				\n\t"
188 		"2:						\n\t"
189 		"movco.l	%0, @%2				\n\t"
190 		"bf		1b				\n\t"
191 		"synco						\n\t"
192 		: "=&z" (tmp), "=&r" (oldval)
193 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
194 		: "t", "memory"
195 	);
196 
197 	return (oldval > (RW_LOCK_BIAS - 1));
198 }
199 
200 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
201 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
202 
203 #endif /* __ASM_SH_SPINLOCK_LLSC_H */
204