1 /*
2  * include/asm-sh/spinlock-llsc.h
3  *
4  * Copyright (C) 2002, 2003 Paul Mundt
5  * Copyright (C) 2006, 2007 Akio Idehara
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #ifndef __ASM_SH_SPINLOCK_LLSC_H
12 #define __ASM_SH_SPINLOCK_LLSC_H
13 
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
16 
17 /*
18  * Your basic SMP spinlocks, allowing only a single CPU anywhere
19  */
20 
21 #define arch_spin_is_locked(x)		((x)->lock <= 0)
22 
23 /*
24  * Simple spin lock operations.  There are two variants, one clears IRQ's
25  * on the local processor, one does not.
26  *
27  * We make no fairness assumptions.  They have a cost.
28  */
29 static inline void arch_spin_lock(arch_spinlock_t *lock)
30 {
31 	unsigned long tmp;
32 	unsigned long oldval;
33 
34 	__asm__ __volatile__ (
35 		"1:						\n\t"
36 		"movli.l	@%2, %0	! arch_spin_lock	\n\t"
37 		"mov		%0, %1				\n\t"
38 		"mov		#0, %0				\n\t"
39 		"movco.l	%0, @%2				\n\t"
40 		"bf		1b				\n\t"
41 		"cmp/pl		%1				\n\t"
42 		"bf		1b				\n\t"
43 		: "=&z" (tmp), "=&r" (oldval)
44 		: "r" (&lock->lock)
45 		: "t", "memory"
46 	);
47 }
48 
49 static inline void arch_spin_unlock(arch_spinlock_t *lock)
50 {
51 	unsigned long tmp;
52 
53 	__asm__ __volatile__ (
54 		"mov		#1, %0 ! arch_spin_unlock	\n\t"
55 		"mov.l		%0, @%1				\n\t"
56 		: "=&z" (tmp)
57 		: "r" (&lock->lock)
58 		: "t", "memory"
59 	);
60 }
61 
62 static inline int arch_spin_trylock(arch_spinlock_t *lock)
63 {
64 	unsigned long tmp, oldval;
65 
66 	__asm__ __volatile__ (
67 		"1:						\n\t"
68 		"movli.l	@%2, %0	! arch_spin_trylock	\n\t"
69 		"mov		%0, %1				\n\t"
70 		"mov		#0, %0				\n\t"
71 		"movco.l	%0, @%2				\n\t"
72 		"bf		1b				\n\t"
73 		"synco						\n\t"
74 		: "=&z" (tmp), "=&r" (oldval)
75 		: "r" (&lock->lock)
76 		: "t", "memory"
77 	);
78 
79 	return oldval;
80 }
81 
82 /*
83  * Read-write spinlocks, allowing multiple readers but only one writer.
84  *
85  * NOTE! it is quite common to have readers in interrupts but no interrupt
86  * writers. For those circumstances we can "mix" irq-safe locks - any writer
87  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
88  * read-locks.
89  */
90 
91 static inline void arch_read_lock(arch_rwlock_t *rw)
92 {
93 	unsigned long tmp;
94 
95 	__asm__ __volatile__ (
96 		"1:						\n\t"
97 		"movli.l	@%1, %0	! arch_read_lock	\n\t"
98 		"cmp/pl		%0				\n\t"
99 		"bf		1b				\n\t"
100 		"add		#-1, %0				\n\t"
101 		"movco.l	%0, @%1				\n\t"
102 		"bf		1b				\n\t"
103 		: "=&z" (tmp)
104 		: "r" (&rw->lock)
105 		: "t", "memory"
106 	);
107 }
108 
109 static inline void arch_read_unlock(arch_rwlock_t *rw)
110 {
111 	unsigned long tmp;
112 
113 	__asm__ __volatile__ (
114 		"1:						\n\t"
115 		"movli.l	@%1, %0	! arch_read_unlock	\n\t"
116 		"add		#1, %0				\n\t"
117 		"movco.l	%0, @%1				\n\t"
118 		"bf		1b				\n\t"
119 		: "=&z" (tmp)
120 		: "r" (&rw->lock)
121 		: "t", "memory"
122 	);
123 }
124 
125 static inline void arch_write_lock(arch_rwlock_t *rw)
126 {
127 	unsigned long tmp;
128 
129 	__asm__ __volatile__ (
130 		"1:						\n\t"
131 		"movli.l	@%1, %0	! arch_write_lock	\n\t"
132 		"cmp/hs		%2, %0				\n\t"
133 		"bf		1b				\n\t"
134 		"sub		%2, %0				\n\t"
135 		"movco.l	%0, @%1				\n\t"
136 		"bf		1b				\n\t"
137 		: "=&z" (tmp)
138 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
139 		: "t", "memory"
140 	);
141 }
142 
143 static inline void arch_write_unlock(arch_rwlock_t *rw)
144 {
145 	__asm__ __volatile__ (
146 		"mov.l		%1, @%0 ! arch_write_unlock	\n\t"
147 		:
148 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
149 		: "t", "memory"
150 	);
151 }
152 
153 static inline int arch_read_trylock(arch_rwlock_t *rw)
154 {
155 	unsigned long tmp, oldval;
156 
157 	__asm__ __volatile__ (
158 		"1:						\n\t"
159 		"movli.l	@%2, %0	! arch_read_trylock	\n\t"
160 		"mov		%0, %1				\n\t"
161 		"cmp/pl		%0				\n\t"
162 		"bf		2f				\n\t"
163 		"add		#-1, %0				\n\t"
164 		"movco.l	%0, @%2				\n\t"
165 		"bf		1b				\n\t"
166 		"2:						\n\t"
167 		"synco						\n\t"
168 		: "=&z" (tmp), "=&r" (oldval)
169 		: "r" (&rw->lock)
170 		: "t", "memory"
171 	);
172 
173 	return (oldval > 0);
174 }
175 
176 static inline int arch_write_trylock(arch_rwlock_t *rw)
177 {
178 	unsigned long tmp, oldval;
179 
180 	__asm__ __volatile__ (
181 		"1:						\n\t"
182 		"movli.l	@%2, %0	! arch_write_trylock	\n\t"
183 		"mov		%0, %1				\n\t"
184 		"cmp/hs		%3, %0				\n\t"
185 		"bf		2f				\n\t"
186 		"sub		%3, %0				\n\t"
187 		"2:						\n\t"
188 		"movco.l	%0, @%2				\n\t"
189 		"bf		1b				\n\t"
190 		"synco						\n\t"
191 		: "=&z" (tmp), "=&r" (oldval)
192 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
193 		: "t", "memory"
194 	);
195 
196 	return (oldval > (RW_LOCK_BIAS - 1));
197 }
198 
199 #endif /* __ASM_SH_SPINLOCK_LLSC_H */
200