xref: /openbmc/linux/arch/arm/include/asm/atomic.h (revision afb46f79)
1 /*
2  *  arch/arm/include/asm/atomic.h
3  *
4  *  Copyright (C) 1996 Russell King.
5  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
13 
14 #include <linux/compiler.h>
15 #include <linux/prefetch.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
20 
21 #define ATOMIC_INIT(i)	{ (i) }
22 
23 #ifdef __KERNEL__
24 
25 /*
26  * On ARM, ordinary assignment (str instruction) doesn't clear the local
27  * strex/ldrex monitor on some implementations. The reason we can use it for
28  * atomic_set() is the clrex or dummy strex done on every exception return.
29  */
30 #define atomic_read(v)	(*(volatile int *)&(v)->counter)
31 #define atomic_set(v,i)	(((v)->counter) = (i))
32 
33 #if __LINUX_ARM_ARCH__ >= 6
34 
35 /*
36  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
37  * store exclusive to ensure that these are atomic.  We may loop
38  * to ensure that the update happens.
39  */
40 static inline void atomic_add(int i, atomic_t *v)
41 {
42 	unsigned long tmp;
43 	int result;
44 
45 	prefetchw(&v->counter);
46 	__asm__ __volatile__("@ atomic_add\n"
47 "1:	ldrex	%0, [%3]\n"
48 "	add	%0, %0, %4\n"
49 "	strex	%1, %0, [%3]\n"
50 "	teq	%1, #0\n"
51 "	bne	1b"
52 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
53 	: "r" (&v->counter), "Ir" (i)
54 	: "cc");
55 }
56 
57 static inline int atomic_add_return(int i, atomic_t *v)
58 {
59 	unsigned long tmp;
60 	int result;
61 
62 	smp_mb();
63 	prefetchw(&v->counter);
64 
65 	__asm__ __volatile__("@ atomic_add_return\n"
66 "1:	ldrex	%0, [%3]\n"
67 "	add	%0, %0, %4\n"
68 "	strex	%1, %0, [%3]\n"
69 "	teq	%1, #0\n"
70 "	bne	1b"
71 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
72 	: "r" (&v->counter), "Ir" (i)
73 	: "cc");
74 
75 	smp_mb();
76 
77 	return result;
78 }
79 
80 static inline void atomic_sub(int i, atomic_t *v)
81 {
82 	unsigned long tmp;
83 	int result;
84 
85 	prefetchw(&v->counter);
86 	__asm__ __volatile__("@ atomic_sub\n"
87 "1:	ldrex	%0, [%3]\n"
88 "	sub	%0, %0, %4\n"
89 "	strex	%1, %0, [%3]\n"
90 "	teq	%1, #0\n"
91 "	bne	1b"
92 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
93 	: "r" (&v->counter), "Ir" (i)
94 	: "cc");
95 }
96 
97 static inline int atomic_sub_return(int i, atomic_t *v)
98 {
99 	unsigned long tmp;
100 	int result;
101 
102 	smp_mb();
103 	prefetchw(&v->counter);
104 
105 	__asm__ __volatile__("@ atomic_sub_return\n"
106 "1:	ldrex	%0, [%3]\n"
107 "	sub	%0, %0, %4\n"
108 "	strex	%1, %0, [%3]\n"
109 "	teq	%1, #0\n"
110 "	bne	1b"
111 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
112 	: "r" (&v->counter), "Ir" (i)
113 	: "cc");
114 
115 	smp_mb();
116 
117 	return result;
118 }
119 
120 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
121 {
122 	int oldval;
123 	unsigned long res;
124 
125 	smp_mb();
126 	prefetchw(&ptr->counter);
127 
128 	do {
129 		__asm__ __volatile__("@ atomic_cmpxchg\n"
130 		"ldrex	%1, [%3]\n"
131 		"mov	%0, #0\n"
132 		"teq	%1, %4\n"
133 		"strexeq %0, %5, [%3]\n"
134 		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
135 		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
136 		    : "cc");
137 	} while (res);
138 
139 	smp_mb();
140 
141 	return oldval;
142 }
143 
144 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
145 {
146 	int oldval, newval;
147 	unsigned long tmp;
148 
149 	smp_mb();
150 	prefetchw(&v->counter);
151 
152 	__asm__ __volatile__ ("@ atomic_add_unless\n"
153 "1:	ldrex	%0, [%4]\n"
154 "	teq	%0, %5\n"
155 "	beq	2f\n"
156 "	add	%1, %0, %6\n"
157 "	strex	%2, %1, [%4]\n"
158 "	teq	%2, #0\n"
159 "	bne	1b\n"
160 "2:"
161 	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
162 	: "r" (&v->counter), "r" (u), "r" (a)
163 	: "cc");
164 
165 	if (oldval != u)
166 		smp_mb();
167 
168 	return oldval;
169 }
170 
171 #else /* ARM_ARCH_6 */
172 
173 #ifdef CONFIG_SMP
174 #error SMP not supported on pre-ARMv6 CPUs
175 #endif
176 
177 static inline int atomic_add_return(int i, atomic_t *v)
178 {
179 	unsigned long flags;
180 	int val;
181 
182 	raw_local_irq_save(flags);
183 	val = v->counter;
184 	v->counter = val += i;
185 	raw_local_irq_restore(flags);
186 
187 	return val;
188 }
189 #define atomic_add(i, v)	(void) atomic_add_return(i, v)
190 
191 static inline int atomic_sub_return(int i, atomic_t *v)
192 {
193 	unsigned long flags;
194 	int val;
195 
196 	raw_local_irq_save(flags);
197 	val = v->counter;
198 	v->counter = val -= i;
199 	raw_local_irq_restore(flags);
200 
201 	return val;
202 }
203 #define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
204 
205 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
206 {
207 	int ret;
208 	unsigned long flags;
209 
210 	raw_local_irq_save(flags);
211 	ret = v->counter;
212 	if (likely(ret == old))
213 		v->counter = new;
214 	raw_local_irq_restore(flags);
215 
216 	return ret;
217 }
218 
219 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
220 {
221 	int c, old;
222 
223 	c = atomic_read(v);
224 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
225 		c = old;
226 	return c;
227 }
228 
229 #endif /* __LINUX_ARM_ARCH__ */
230 
231 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
232 
233 #define atomic_inc(v)		atomic_add(1, v)
234 #define atomic_dec(v)		atomic_sub(1, v)
235 
236 #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
237 #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
238 #define atomic_inc_return(v)    (atomic_add_return(1, v))
239 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
240 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
241 
242 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
243 
244 #define smp_mb__before_atomic_dec()	smp_mb()
245 #define smp_mb__after_atomic_dec()	smp_mb()
246 #define smp_mb__before_atomic_inc()	smp_mb()
247 #define smp_mb__after_atomic_inc()	smp_mb()
248 
249 #ifndef CONFIG_GENERIC_ATOMIC64
250 typedef struct {
251 	long long counter;
252 } atomic64_t;
253 
254 #define ATOMIC64_INIT(i) { (i) }
255 
256 #ifdef CONFIG_ARM_LPAE
257 static inline long long atomic64_read(const atomic64_t *v)
258 {
259 	long long result;
260 
261 	__asm__ __volatile__("@ atomic64_read\n"
262 "	ldrd	%0, %H0, [%1]"
263 	: "=&r" (result)
264 	: "r" (&v->counter), "Qo" (v->counter)
265 	);
266 
267 	return result;
268 }
269 
270 static inline void atomic64_set(atomic64_t *v, long long i)
271 {
272 	__asm__ __volatile__("@ atomic64_set\n"
273 "	strd	%2, %H2, [%1]"
274 	: "=Qo" (v->counter)
275 	: "r" (&v->counter), "r" (i)
276 	);
277 }
278 #else
279 static inline long long atomic64_read(const atomic64_t *v)
280 {
281 	long long result;
282 
283 	__asm__ __volatile__("@ atomic64_read\n"
284 "	ldrexd	%0, %H0, [%1]"
285 	: "=&r" (result)
286 	: "r" (&v->counter), "Qo" (v->counter)
287 	);
288 
289 	return result;
290 }
291 
292 static inline void atomic64_set(atomic64_t *v, long long i)
293 {
294 	long long tmp;
295 
296 	prefetchw(&v->counter);
297 	__asm__ __volatile__("@ atomic64_set\n"
298 "1:	ldrexd	%0, %H0, [%2]\n"
299 "	strexd	%0, %3, %H3, [%2]\n"
300 "	teq	%0, #0\n"
301 "	bne	1b"
302 	: "=&r" (tmp), "=Qo" (v->counter)
303 	: "r" (&v->counter), "r" (i)
304 	: "cc");
305 }
306 #endif
307 
308 static inline void atomic64_add(long long i, atomic64_t *v)
309 {
310 	long long result;
311 	unsigned long tmp;
312 
313 	prefetchw(&v->counter);
314 	__asm__ __volatile__("@ atomic64_add\n"
315 "1:	ldrexd	%0, %H0, [%3]\n"
316 "	adds	%Q0, %Q0, %Q4\n"
317 "	adc	%R0, %R0, %R4\n"
318 "	strexd	%1, %0, %H0, [%3]\n"
319 "	teq	%1, #0\n"
320 "	bne	1b"
321 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
322 	: "r" (&v->counter), "r" (i)
323 	: "cc");
324 }
325 
326 static inline long long atomic64_add_return(long long i, atomic64_t *v)
327 {
328 	long long result;
329 	unsigned long tmp;
330 
331 	smp_mb();
332 	prefetchw(&v->counter);
333 
334 	__asm__ __volatile__("@ atomic64_add_return\n"
335 "1:	ldrexd	%0, %H0, [%3]\n"
336 "	adds	%Q0, %Q0, %Q4\n"
337 "	adc	%R0, %R0, %R4\n"
338 "	strexd	%1, %0, %H0, [%3]\n"
339 "	teq	%1, #0\n"
340 "	bne	1b"
341 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
342 	: "r" (&v->counter), "r" (i)
343 	: "cc");
344 
345 	smp_mb();
346 
347 	return result;
348 }
349 
350 static inline void atomic64_sub(long long i, atomic64_t *v)
351 {
352 	long long result;
353 	unsigned long tmp;
354 
355 	prefetchw(&v->counter);
356 	__asm__ __volatile__("@ atomic64_sub\n"
357 "1:	ldrexd	%0, %H0, [%3]\n"
358 "	subs	%Q0, %Q0, %Q4\n"
359 "	sbc	%R0, %R0, %R4\n"
360 "	strexd	%1, %0, %H0, [%3]\n"
361 "	teq	%1, #0\n"
362 "	bne	1b"
363 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
364 	: "r" (&v->counter), "r" (i)
365 	: "cc");
366 }
367 
368 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
369 {
370 	long long result;
371 	unsigned long tmp;
372 
373 	smp_mb();
374 	prefetchw(&v->counter);
375 
376 	__asm__ __volatile__("@ atomic64_sub_return\n"
377 "1:	ldrexd	%0, %H0, [%3]\n"
378 "	subs	%Q0, %Q0, %Q4\n"
379 "	sbc	%R0, %R0, %R4\n"
380 "	strexd	%1, %0, %H0, [%3]\n"
381 "	teq	%1, #0\n"
382 "	bne	1b"
383 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
384 	: "r" (&v->counter), "r" (i)
385 	: "cc");
386 
387 	smp_mb();
388 
389 	return result;
390 }
391 
392 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
393 					long long new)
394 {
395 	long long oldval;
396 	unsigned long res;
397 
398 	smp_mb();
399 	prefetchw(&ptr->counter);
400 
401 	do {
402 		__asm__ __volatile__("@ atomic64_cmpxchg\n"
403 		"ldrexd		%1, %H1, [%3]\n"
404 		"mov		%0, #0\n"
405 		"teq		%1, %4\n"
406 		"teqeq		%H1, %H4\n"
407 		"strexdeq	%0, %5, %H5, [%3]"
408 		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
409 		: "r" (&ptr->counter), "r" (old), "r" (new)
410 		: "cc");
411 	} while (res);
412 
413 	smp_mb();
414 
415 	return oldval;
416 }
417 
418 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
419 {
420 	long long result;
421 	unsigned long tmp;
422 
423 	smp_mb();
424 	prefetchw(&ptr->counter);
425 
426 	__asm__ __volatile__("@ atomic64_xchg\n"
427 "1:	ldrexd	%0, %H0, [%3]\n"
428 "	strexd	%1, %4, %H4, [%3]\n"
429 "	teq	%1, #0\n"
430 "	bne	1b"
431 	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
432 	: "r" (&ptr->counter), "r" (new)
433 	: "cc");
434 
435 	smp_mb();
436 
437 	return result;
438 }
439 
440 static inline long long atomic64_dec_if_positive(atomic64_t *v)
441 {
442 	long long result;
443 	unsigned long tmp;
444 
445 	smp_mb();
446 	prefetchw(&v->counter);
447 
448 	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
449 "1:	ldrexd	%0, %H0, [%3]\n"
450 "	subs	%Q0, %Q0, #1\n"
451 "	sbc	%R0, %R0, #0\n"
452 "	teq	%R0, #0\n"
453 "	bmi	2f\n"
454 "	strexd	%1, %0, %H0, [%3]\n"
455 "	teq	%1, #0\n"
456 "	bne	1b\n"
457 "2:"
458 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
459 	: "r" (&v->counter)
460 	: "cc");
461 
462 	smp_mb();
463 
464 	return result;
465 }
466 
467 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
468 {
469 	long long val;
470 	unsigned long tmp;
471 	int ret = 1;
472 
473 	smp_mb();
474 	prefetchw(&v->counter);
475 
476 	__asm__ __volatile__("@ atomic64_add_unless\n"
477 "1:	ldrexd	%0, %H0, [%4]\n"
478 "	teq	%0, %5\n"
479 "	teqeq	%H0, %H5\n"
480 "	moveq	%1, #0\n"
481 "	beq	2f\n"
482 "	adds	%Q0, %Q0, %Q6\n"
483 "	adc	%R0, %R0, %R6\n"
484 "	strexd	%2, %0, %H0, [%4]\n"
485 "	teq	%2, #0\n"
486 "	bne	1b\n"
487 "2:"
488 	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
489 	: "r" (&v->counter), "r" (u), "r" (a)
490 	: "cc");
491 
492 	if (ret)
493 		smp_mb();
494 
495 	return ret;
496 }
497 
498 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
499 #define atomic64_inc(v)			atomic64_add(1LL, (v))
500 #define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
501 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
502 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
503 #define atomic64_dec(v)			atomic64_sub(1LL, (v))
504 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
505 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
506 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
507 
508 #endif /* !CONFIG_GENERIC_ATOMIC64 */
509 #endif
510 #endif
511