xref: /openbmc/linux/arch/arm/include/asm/atomic.h (revision 77d84ff8)
1 /*
2  *  arch/arm/include/asm/atomic.h
3  *
4  *  Copyright (C) 1996 Russell King.
5  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
13 
14 #include <linux/compiler.h>
15 #include <linux/prefetch.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
20 
21 #define ATOMIC_INIT(i)	{ (i) }
22 
23 #ifdef __KERNEL__
24 
25 /*
26  * On ARM, ordinary assignment (str instruction) doesn't clear the local
27  * strex/ldrex monitor on some implementations. The reason we can use it for
28  * atomic_set() is the clrex or dummy strex done on every exception return.
29  */
30 #define atomic_read(v)	(*(volatile int *)&(v)->counter)
31 #define atomic_set(v,i)	(((v)->counter) = (i))
32 
33 #if __LINUX_ARM_ARCH__ >= 6
34 
35 /*
36  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
37  * store exclusive to ensure that these are atomic.  We may loop
38  * to ensure that the update happens.
39  */
40 static inline void atomic_add(int i, atomic_t *v)
41 {
42 	unsigned long tmp;
43 	int result;
44 
45 	prefetchw(&v->counter);
46 	__asm__ __volatile__("@ atomic_add\n"
47 "1:	ldrex	%0, [%3]\n"
48 "	add	%0, %0, %4\n"
49 "	strex	%1, %0, [%3]\n"
50 "	teq	%1, #0\n"
51 "	bne	1b"
52 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
53 	: "r" (&v->counter), "Ir" (i)
54 	: "cc");
55 }
56 
57 static inline int atomic_add_return(int i, atomic_t *v)
58 {
59 	unsigned long tmp;
60 	int result;
61 
62 	smp_mb();
63 
64 	__asm__ __volatile__("@ atomic_add_return\n"
65 "1:	ldrex	%0, [%3]\n"
66 "	add	%0, %0, %4\n"
67 "	strex	%1, %0, [%3]\n"
68 "	teq	%1, #0\n"
69 "	bne	1b"
70 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
71 	: "r" (&v->counter), "Ir" (i)
72 	: "cc");
73 
74 	smp_mb();
75 
76 	return result;
77 }
78 
79 static inline void atomic_sub(int i, atomic_t *v)
80 {
81 	unsigned long tmp;
82 	int result;
83 
84 	prefetchw(&v->counter);
85 	__asm__ __volatile__("@ atomic_sub\n"
86 "1:	ldrex	%0, [%3]\n"
87 "	sub	%0, %0, %4\n"
88 "	strex	%1, %0, [%3]\n"
89 "	teq	%1, #0\n"
90 "	bne	1b"
91 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
92 	: "r" (&v->counter), "Ir" (i)
93 	: "cc");
94 }
95 
96 static inline int atomic_sub_return(int i, atomic_t *v)
97 {
98 	unsigned long tmp;
99 	int result;
100 
101 	smp_mb();
102 
103 	__asm__ __volatile__("@ atomic_sub_return\n"
104 "1:	ldrex	%0, [%3]\n"
105 "	sub	%0, %0, %4\n"
106 "	strex	%1, %0, [%3]\n"
107 "	teq	%1, #0\n"
108 "	bne	1b"
109 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
110 	: "r" (&v->counter), "Ir" (i)
111 	: "cc");
112 
113 	smp_mb();
114 
115 	return result;
116 }
117 
118 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
119 {
120 	int oldval;
121 	unsigned long res;
122 
123 	smp_mb();
124 
125 	do {
126 		__asm__ __volatile__("@ atomic_cmpxchg\n"
127 		"ldrex	%1, [%3]\n"
128 		"mov	%0, #0\n"
129 		"teq	%1, %4\n"
130 		"strexeq %0, %5, [%3]\n"
131 		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
132 		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
133 		    : "cc");
134 	} while (res);
135 
136 	smp_mb();
137 
138 	return oldval;
139 }
140 
141 #else /* ARM_ARCH_6 */
142 
143 #ifdef CONFIG_SMP
144 #error SMP not supported on pre-ARMv6 CPUs
145 #endif
146 
147 static inline int atomic_add_return(int i, atomic_t *v)
148 {
149 	unsigned long flags;
150 	int val;
151 
152 	raw_local_irq_save(flags);
153 	val = v->counter;
154 	v->counter = val += i;
155 	raw_local_irq_restore(flags);
156 
157 	return val;
158 }
159 #define atomic_add(i, v)	(void) atomic_add_return(i, v)
160 
161 static inline int atomic_sub_return(int i, atomic_t *v)
162 {
163 	unsigned long flags;
164 	int val;
165 
166 	raw_local_irq_save(flags);
167 	val = v->counter;
168 	v->counter = val -= i;
169 	raw_local_irq_restore(flags);
170 
171 	return val;
172 }
173 #define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
174 
175 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
176 {
177 	int ret;
178 	unsigned long flags;
179 
180 	raw_local_irq_save(flags);
181 	ret = v->counter;
182 	if (likely(ret == old))
183 		v->counter = new;
184 	raw_local_irq_restore(flags);
185 
186 	return ret;
187 }
188 
189 #endif /* __LINUX_ARM_ARCH__ */
190 
191 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
192 
193 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
194 {
195 	int c, old;
196 
197 	c = atomic_read(v);
198 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
199 		c = old;
200 	return c;
201 }
202 
203 #define atomic_inc(v)		atomic_add(1, v)
204 #define atomic_dec(v)		atomic_sub(1, v)
205 
206 #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
207 #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
208 #define atomic_inc_return(v)    (atomic_add_return(1, v))
209 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
210 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
211 
212 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
213 
214 #define smp_mb__before_atomic_dec()	smp_mb()
215 #define smp_mb__after_atomic_dec()	smp_mb()
216 #define smp_mb__before_atomic_inc()	smp_mb()
217 #define smp_mb__after_atomic_inc()	smp_mb()
218 
219 #ifndef CONFIG_GENERIC_ATOMIC64
220 typedef struct {
221 	long long counter;
222 } atomic64_t;
223 
224 #define ATOMIC64_INIT(i) { (i) }
225 
226 #ifdef CONFIG_ARM_LPAE
227 static inline long long atomic64_read(const atomic64_t *v)
228 {
229 	long long result;
230 
231 	__asm__ __volatile__("@ atomic64_read\n"
232 "	ldrd	%0, %H0, [%1]"
233 	: "=&r" (result)
234 	: "r" (&v->counter), "Qo" (v->counter)
235 	);
236 
237 	return result;
238 }
239 
240 static inline void atomic64_set(atomic64_t *v, long long i)
241 {
242 	__asm__ __volatile__("@ atomic64_set\n"
243 "	strd	%2, %H2, [%1]"
244 	: "=Qo" (v->counter)
245 	: "r" (&v->counter), "r" (i)
246 	);
247 }
248 #else
249 static inline long long atomic64_read(const atomic64_t *v)
250 {
251 	long long result;
252 
253 	__asm__ __volatile__("@ atomic64_read\n"
254 "	ldrexd	%0, %H0, [%1]"
255 	: "=&r" (result)
256 	: "r" (&v->counter), "Qo" (v->counter)
257 	);
258 
259 	return result;
260 }
261 
262 static inline void atomic64_set(atomic64_t *v, long long i)
263 {
264 	long long tmp;
265 
266 	prefetchw(&v->counter);
267 	__asm__ __volatile__("@ atomic64_set\n"
268 "1:	ldrexd	%0, %H0, [%2]\n"
269 "	strexd	%0, %3, %H3, [%2]\n"
270 "	teq	%0, #0\n"
271 "	bne	1b"
272 	: "=&r" (tmp), "=Qo" (v->counter)
273 	: "r" (&v->counter), "r" (i)
274 	: "cc");
275 }
276 #endif
277 
278 static inline void atomic64_add(long long i, atomic64_t *v)
279 {
280 	long long result;
281 	unsigned long tmp;
282 
283 	prefetchw(&v->counter);
284 	__asm__ __volatile__("@ atomic64_add\n"
285 "1:	ldrexd	%0, %H0, [%3]\n"
286 "	adds	%Q0, %Q0, %Q4\n"
287 "	adc	%R0, %R0, %R4\n"
288 "	strexd	%1, %0, %H0, [%3]\n"
289 "	teq	%1, #0\n"
290 "	bne	1b"
291 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
292 	: "r" (&v->counter), "r" (i)
293 	: "cc");
294 }
295 
296 static inline long long atomic64_add_return(long long i, atomic64_t *v)
297 {
298 	long long result;
299 	unsigned long tmp;
300 
301 	smp_mb();
302 
303 	__asm__ __volatile__("@ atomic64_add_return\n"
304 "1:	ldrexd	%0, %H0, [%3]\n"
305 "	adds	%Q0, %Q0, %Q4\n"
306 "	adc	%R0, %R0, %R4\n"
307 "	strexd	%1, %0, %H0, [%3]\n"
308 "	teq	%1, #0\n"
309 "	bne	1b"
310 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
311 	: "r" (&v->counter), "r" (i)
312 	: "cc");
313 
314 	smp_mb();
315 
316 	return result;
317 }
318 
319 static inline void atomic64_sub(long long i, atomic64_t *v)
320 {
321 	long long result;
322 	unsigned long tmp;
323 
324 	prefetchw(&v->counter);
325 	__asm__ __volatile__("@ atomic64_sub\n"
326 "1:	ldrexd	%0, %H0, [%3]\n"
327 "	subs	%Q0, %Q0, %Q4\n"
328 "	sbc	%R0, %R0, %R4\n"
329 "	strexd	%1, %0, %H0, [%3]\n"
330 "	teq	%1, #0\n"
331 "	bne	1b"
332 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
333 	: "r" (&v->counter), "r" (i)
334 	: "cc");
335 }
336 
337 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
338 {
339 	long long result;
340 	unsigned long tmp;
341 
342 	smp_mb();
343 
344 	__asm__ __volatile__("@ atomic64_sub_return\n"
345 "1:	ldrexd	%0, %H0, [%3]\n"
346 "	subs	%Q0, %Q0, %Q4\n"
347 "	sbc	%R0, %R0, %R4\n"
348 "	strexd	%1, %0, %H0, [%3]\n"
349 "	teq	%1, #0\n"
350 "	bne	1b"
351 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
352 	: "r" (&v->counter), "r" (i)
353 	: "cc");
354 
355 	smp_mb();
356 
357 	return result;
358 }
359 
360 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
361 					long long new)
362 {
363 	long long oldval;
364 	unsigned long res;
365 
366 	smp_mb();
367 
368 	do {
369 		__asm__ __volatile__("@ atomic64_cmpxchg\n"
370 		"ldrexd		%1, %H1, [%3]\n"
371 		"mov		%0, #0\n"
372 		"teq		%1, %4\n"
373 		"teqeq		%H1, %H4\n"
374 		"strexdeq	%0, %5, %H5, [%3]"
375 		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
376 		: "r" (&ptr->counter), "r" (old), "r" (new)
377 		: "cc");
378 	} while (res);
379 
380 	smp_mb();
381 
382 	return oldval;
383 }
384 
385 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
386 {
387 	long long result;
388 	unsigned long tmp;
389 
390 	smp_mb();
391 
392 	__asm__ __volatile__("@ atomic64_xchg\n"
393 "1:	ldrexd	%0, %H0, [%3]\n"
394 "	strexd	%1, %4, %H4, [%3]\n"
395 "	teq	%1, #0\n"
396 "	bne	1b"
397 	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
398 	: "r" (&ptr->counter), "r" (new)
399 	: "cc");
400 
401 	smp_mb();
402 
403 	return result;
404 }
405 
406 static inline long long atomic64_dec_if_positive(atomic64_t *v)
407 {
408 	long long result;
409 	unsigned long tmp;
410 
411 	smp_mb();
412 
413 	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
414 "1:	ldrexd	%0, %H0, [%3]\n"
415 "	subs	%Q0, %Q0, #1\n"
416 "	sbc	%R0, %R0, #0\n"
417 "	teq	%R0, #0\n"
418 "	bmi	2f\n"
419 "	strexd	%1, %0, %H0, [%3]\n"
420 "	teq	%1, #0\n"
421 "	bne	1b\n"
422 "2:"
423 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
424 	: "r" (&v->counter)
425 	: "cc");
426 
427 	smp_mb();
428 
429 	return result;
430 }
431 
432 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
433 {
434 	long long val;
435 	unsigned long tmp;
436 	int ret = 1;
437 
438 	smp_mb();
439 
440 	__asm__ __volatile__("@ atomic64_add_unless\n"
441 "1:	ldrexd	%0, %H0, [%4]\n"
442 "	teq	%0, %5\n"
443 "	teqeq	%H0, %H5\n"
444 "	moveq	%1, #0\n"
445 "	beq	2f\n"
446 "	adds	%Q0, %Q0, %Q6\n"
447 "	adc	%R0, %R0, %R6\n"
448 "	strexd	%2, %0, %H0, [%4]\n"
449 "	teq	%2, #0\n"
450 "	bne	1b\n"
451 "2:"
452 	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
453 	: "r" (&v->counter), "r" (u), "r" (a)
454 	: "cc");
455 
456 	if (ret)
457 		smp_mb();
458 
459 	return ret;
460 }
461 
462 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
463 #define atomic64_inc(v)			atomic64_add(1LL, (v))
464 #define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
465 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
466 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
467 #define atomic64_dec(v)			atomic64_sub(1LL, (v))
468 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
469 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
470 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
471 
472 #endif /* !CONFIG_GENERIC_ATOMIC64 */
473 #endif
474 #endif
475