xref: /openbmc/linux/arch/arm64/include/asm/atomic.h (revision 588b48ca)
1 /*
2  * Based on arch/arm/include/asm/atomic.h
3  *
4  * Copyright (C) 1996 Russell King.
5  * Copyright (C) 2002 Deep Blue Solutions Ltd.
6  * Copyright (C) 2012 ARM Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 #ifndef __ASM_ATOMIC_H
21 #define __ASM_ATOMIC_H
22 
23 #include <linux/compiler.h>
24 #include <linux/types.h>
25 
26 #include <asm/barrier.h>
27 #include <asm/cmpxchg.h>
28 
29 #define ATOMIC_INIT(i)	{ (i) }
30 
31 #ifdef __KERNEL__
32 
33 /*
34  * On ARM, ordinary assignment (str instruction) doesn't clear the local
35  * strex/ldrex monitor on some implementations. The reason we can use it for
36  * atomic_set() is the clrex or dummy strex done on every exception return.
37  */
38 #define atomic_read(v)	(*(volatile int *)&(v)->counter)
39 #define atomic_set(v,i)	(((v)->counter) = (i))
40 
41 /*
42  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
43  * store exclusive to ensure that these are atomic.  We may loop
44  * to ensure that the update happens.
45  */
46 static inline void atomic_add(int i, atomic_t *v)
47 {
48 	unsigned long tmp;
49 	int result;
50 
51 	asm volatile("// atomic_add\n"
52 "1:	ldxr	%w0, %2\n"
53 "	add	%w0, %w0, %w3\n"
54 "	stxr	%w1, %w0, %2\n"
55 "	cbnz	%w1, 1b"
56 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
57 	: "Ir" (i));
58 }
59 
60 static inline int atomic_add_return(int i, atomic_t *v)
61 {
62 	unsigned long tmp;
63 	int result;
64 
65 	asm volatile("// atomic_add_return\n"
66 "1:	ldxr	%w0, %2\n"
67 "	add	%w0, %w0, %w3\n"
68 "	stlxr	%w1, %w0, %2\n"
69 "	cbnz	%w1, 1b"
70 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
71 	: "Ir" (i)
72 	: "memory");
73 
74 	smp_mb();
75 	return result;
76 }
77 
78 static inline void atomic_sub(int i, atomic_t *v)
79 {
80 	unsigned long tmp;
81 	int result;
82 
83 	asm volatile("// atomic_sub\n"
84 "1:	ldxr	%w0, %2\n"
85 "	sub	%w0, %w0, %w3\n"
86 "	stxr	%w1, %w0, %2\n"
87 "	cbnz	%w1, 1b"
88 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
89 	: "Ir" (i));
90 }
91 
92 static inline int atomic_sub_return(int i, atomic_t *v)
93 {
94 	unsigned long tmp;
95 	int result;
96 
97 	asm volatile("// atomic_sub_return\n"
98 "1:	ldxr	%w0, %2\n"
99 "	sub	%w0, %w0, %w3\n"
100 "	stlxr	%w1, %w0, %2\n"
101 "	cbnz	%w1, 1b"
102 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
103 	: "Ir" (i)
104 	: "memory");
105 
106 	smp_mb();
107 	return result;
108 }
109 
110 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
111 {
112 	unsigned long tmp;
113 	int oldval;
114 
115 	smp_mb();
116 
117 	asm volatile("// atomic_cmpxchg\n"
118 "1:	ldxr	%w1, %2\n"
119 "	cmp	%w1, %w3\n"
120 "	b.ne	2f\n"
121 "	stxr	%w0, %w4, %2\n"
122 "	cbnz	%w0, 1b\n"
123 "2:"
124 	: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
125 	: "Ir" (old), "r" (new)
126 	: "cc");
127 
128 	smp_mb();
129 	return oldval;
130 }
131 
132 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
133 
134 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
135 {
136 	int c, old;
137 
138 	c = atomic_read(v);
139 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
140 		c = old;
141 	return c;
142 }
143 
144 #define atomic_inc(v)		atomic_add(1, v)
145 #define atomic_dec(v)		atomic_sub(1, v)
146 
147 #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
148 #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
149 #define atomic_inc_return(v)    (atomic_add_return(1, v))
150 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
151 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
152 
153 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
154 
155 /*
156  * 64-bit atomic operations.
157  */
158 #define ATOMIC64_INIT(i) { (i) }
159 
160 #define atomic64_read(v)	(*(volatile long *)&(v)->counter)
161 #define atomic64_set(v,i)	(((v)->counter) = (i))
162 
163 static inline void atomic64_add(u64 i, atomic64_t *v)
164 {
165 	long result;
166 	unsigned long tmp;
167 
168 	asm volatile("// atomic64_add\n"
169 "1:	ldxr	%0, %2\n"
170 "	add	%0, %0, %3\n"
171 "	stxr	%w1, %0, %2\n"
172 "	cbnz	%w1, 1b"
173 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
174 	: "Ir" (i));
175 }
176 
177 static inline long atomic64_add_return(long i, atomic64_t *v)
178 {
179 	long result;
180 	unsigned long tmp;
181 
182 	asm volatile("// atomic64_add_return\n"
183 "1:	ldxr	%0, %2\n"
184 "	add	%0, %0, %3\n"
185 "	stlxr	%w1, %0, %2\n"
186 "	cbnz	%w1, 1b"
187 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
188 	: "Ir" (i)
189 	: "memory");
190 
191 	smp_mb();
192 	return result;
193 }
194 
195 static inline void atomic64_sub(u64 i, atomic64_t *v)
196 {
197 	long result;
198 	unsigned long tmp;
199 
200 	asm volatile("// atomic64_sub\n"
201 "1:	ldxr	%0, %2\n"
202 "	sub	%0, %0, %3\n"
203 "	stxr	%w1, %0, %2\n"
204 "	cbnz	%w1, 1b"
205 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
206 	: "Ir" (i));
207 }
208 
209 static inline long atomic64_sub_return(long i, atomic64_t *v)
210 {
211 	long result;
212 	unsigned long tmp;
213 
214 	asm volatile("// atomic64_sub_return\n"
215 "1:	ldxr	%0, %2\n"
216 "	sub	%0, %0, %3\n"
217 "	stlxr	%w1, %0, %2\n"
218 "	cbnz	%w1, 1b"
219 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
220 	: "Ir" (i)
221 	: "memory");
222 
223 	smp_mb();
224 	return result;
225 }
226 
227 static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
228 {
229 	long oldval;
230 	unsigned long res;
231 
232 	smp_mb();
233 
234 	asm volatile("// atomic64_cmpxchg\n"
235 "1:	ldxr	%1, %2\n"
236 "	cmp	%1, %3\n"
237 "	b.ne	2f\n"
238 "	stxr	%w0, %4, %2\n"
239 "	cbnz	%w0, 1b\n"
240 "2:"
241 	: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
242 	: "Ir" (old), "r" (new)
243 	: "cc");
244 
245 	smp_mb();
246 	return oldval;
247 }
248 
249 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
250 
251 static inline long atomic64_dec_if_positive(atomic64_t *v)
252 {
253 	long result;
254 	unsigned long tmp;
255 
256 	asm volatile("// atomic64_dec_if_positive\n"
257 "1:	ldxr	%0, %2\n"
258 "	subs	%0, %0, #1\n"
259 "	b.mi	2f\n"
260 "	stlxr	%w1, %0, %2\n"
261 "	cbnz	%w1, 1b\n"
262 "	dmb	ish\n"
263 "2:"
264 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
265 	:
266 	: "cc", "memory");
267 
268 	return result;
269 }
270 
271 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
272 {
273 	long c, old;
274 
275 	c = atomic64_read(v);
276 	while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
277 		c = old;
278 
279 	return c != u;
280 }
281 
282 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
283 #define atomic64_inc(v)			atomic64_add(1LL, (v))
284 #define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
285 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
286 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
287 #define atomic64_dec(v)			atomic64_sub(1LL, (v))
288 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
289 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
290 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
291 
292 #endif
293 #endif
294