1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * include/asm-sh/spinlock-llsc.h
4  *
5  * Copyright (C) 2002, 2003 Paul Mundt
6  * Copyright (C) 2006, 2007 Akio Idehara
7  */
8 #ifndef __ASM_SH_SPINLOCK_LLSC_H
9 #define __ASM_SH_SPINLOCK_LLSC_H
10 
11 #include <asm/barrier.h>
12 #include <asm/processor.h>
13 
14 /*
15  * Your basic SMP spinlocks, allowing only a single CPU anywhere
16  */
17 
18 #define arch_spin_is_locked(x)		((x)->lock <= 0)
19 
20 /*
21  * Simple spin lock operations.  There are two variants, one clears IRQ's
22  * on the local processor, one does not.
23  *
24  * We make no fairness assumptions.  They have a cost.
25  */
26 static inline void arch_spin_lock(arch_spinlock_t *lock)
27 {
28 	unsigned long tmp;
29 	unsigned long oldval;
30 
31 	__asm__ __volatile__ (
32 		"1:						\n\t"
33 		"movli.l	@%2, %0	! arch_spin_lock	\n\t"
34 		"mov		%0, %1				\n\t"
35 		"mov		#0, %0				\n\t"
36 		"movco.l	%0, @%2				\n\t"
37 		"bf		1b				\n\t"
38 		"cmp/pl		%1				\n\t"
39 		"bf		1b				\n\t"
40 		: "=&z" (tmp), "=&r" (oldval)
41 		: "r" (&lock->lock)
42 		: "t", "memory"
43 	);
44 }
45 
46 static inline void arch_spin_unlock(arch_spinlock_t *lock)
47 {
48 	unsigned long tmp;
49 
50 	__asm__ __volatile__ (
51 		"mov		#1, %0 ! arch_spin_unlock	\n\t"
52 		"mov.l		%0, @%1				\n\t"
53 		: "=&z" (tmp)
54 		: "r" (&lock->lock)
55 		: "t", "memory"
56 	);
57 }
58 
59 static inline int arch_spin_trylock(arch_spinlock_t *lock)
60 {
61 	unsigned long tmp, oldval;
62 
63 	__asm__ __volatile__ (
64 		"1:						\n\t"
65 		"movli.l	@%2, %0	! arch_spin_trylock	\n\t"
66 		"mov		%0, %1				\n\t"
67 		"mov		#0, %0				\n\t"
68 		"movco.l	%0, @%2				\n\t"
69 		"bf		1b				\n\t"
70 		"synco						\n\t"
71 		: "=&z" (tmp), "=&r" (oldval)
72 		: "r" (&lock->lock)
73 		: "t", "memory"
74 	);
75 
76 	return oldval;
77 }
78 
79 /*
80  * Read-write spinlocks, allowing multiple readers but only one writer.
81  *
82  * NOTE! it is quite common to have readers in interrupts but no interrupt
83  * writers. For those circumstances we can "mix" irq-safe locks - any writer
84  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
85  * read-locks.
86  */
87 
88 static inline void arch_read_lock(arch_rwlock_t *rw)
89 {
90 	unsigned long tmp;
91 
92 	__asm__ __volatile__ (
93 		"1:						\n\t"
94 		"movli.l	@%1, %0	! arch_read_lock	\n\t"
95 		"cmp/pl		%0				\n\t"
96 		"bf		1b				\n\t"
97 		"add		#-1, %0				\n\t"
98 		"movco.l	%0, @%1				\n\t"
99 		"bf		1b				\n\t"
100 		: "=&z" (tmp)
101 		: "r" (&rw->lock)
102 		: "t", "memory"
103 	);
104 }
105 
106 static inline void arch_read_unlock(arch_rwlock_t *rw)
107 {
108 	unsigned long tmp;
109 
110 	__asm__ __volatile__ (
111 		"1:						\n\t"
112 		"movli.l	@%1, %0	! arch_read_unlock	\n\t"
113 		"add		#1, %0				\n\t"
114 		"movco.l	%0, @%1				\n\t"
115 		"bf		1b				\n\t"
116 		: "=&z" (tmp)
117 		: "r" (&rw->lock)
118 		: "t", "memory"
119 	);
120 }
121 
122 static inline void arch_write_lock(arch_rwlock_t *rw)
123 {
124 	unsigned long tmp;
125 
126 	__asm__ __volatile__ (
127 		"1:						\n\t"
128 		"movli.l	@%1, %0	! arch_write_lock	\n\t"
129 		"cmp/hs		%2, %0				\n\t"
130 		"bf		1b				\n\t"
131 		"sub		%2, %0				\n\t"
132 		"movco.l	%0, @%1				\n\t"
133 		"bf		1b				\n\t"
134 		: "=&z" (tmp)
135 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
136 		: "t", "memory"
137 	);
138 }
139 
140 static inline void arch_write_unlock(arch_rwlock_t *rw)
141 {
142 	__asm__ __volatile__ (
143 		"mov.l		%1, @%0 ! arch_write_unlock	\n\t"
144 		:
145 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
146 		: "t", "memory"
147 	);
148 }
149 
150 static inline int arch_read_trylock(arch_rwlock_t *rw)
151 {
152 	unsigned long tmp, oldval;
153 
154 	__asm__ __volatile__ (
155 		"1:						\n\t"
156 		"movli.l	@%2, %0	! arch_read_trylock	\n\t"
157 		"mov		%0, %1				\n\t"
158 		"cmp/pl		%0				\n\t"
159 		"bf		2f				\n\t"
160 		"add		#-1, %0				\n\t"
161 		"movco.l	%0, @%2				\n\t"
162 		"bf		1b				\n\t"
163 		"2:						\n\t"
164 		"synco						\n\t"
165 		: "=&z" (tmp), "=&r" (oldval)
166 		: "r" (&rw->lock)
167 		: "t", "memory"
168 	);
169 
170 	return (oldval > 0);
171 }
172 
173 static inline int arch_write_trylock(arch_rwlock_t *rw)
174 {
175 	unsigned long tmp, oldval;
176 
177 	__asm__ __volatile__ (
178 		"1:						\n\t"
179 		"movli.l	@%2, %0	! arch_write_trylock	\n\t"
180 		"mov		%0, %1				\n\t"
181 		"cmp/hs		%3, %0				\n\t"
182 		"bf		2f				\n\t"
183 		"sub		%3, %0				\n\t"
184 		"2:						\n\t"
185 		"movco.l	%0, @%2				\n\t"
186 		"bf		1b				\n\t"
187 		"synco						\n\t"
188 		: "=&z" (tmp), "=&r" (oldval)
189 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
190 		: "t", "memory"
191 	);
192 
193 	return (oldval > (RW_LOCK_BIAS - 1));
194 }
195 
196 #endif /* __ASM_SH_SPINLOCK_LLSC_H */
197