xref: /openbmc/linux/arch/arm/include/asm/atomic.h (revision 60772e48)
1 /*
2  *  arch/arm/include/asm/atomic.h
3  *
4  *  Copyright (C) 1996 Russell King.
5  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
13 
14 #include <linux/compiler.h>
15 #include <linux/prefetch.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
20 
21 #define ATOMIC_INIT(i)	{ (i) }
22 
23 #ifdef __KERNEL__
24 
25 /*
26  * On ARM, ordinary assignment (str instruction) doesn't clear the local
27  * strex/ldrex monitor on some implementations. The reason we can use it for
28  * atomic_set() is the clrex or dummy strex done on every exception return.
29  */
30 #define atomic_read(v)	READ_ONCE((v)->counter)
31 #define atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
32 
33 #if __LINUX_ARM_ARCH__ >= 6
34 
35 /*
36  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
37  * store exclusive to ensure that these are atomic.  We may loop
38  * to ensure that the update happens.
39  */
40 
41 #define ATOMIC_OP(op, c_op, asm_op)					\
42 static inline void atomic_##op(int i, atomic_t *v)			\
43 {									\
44 	unsigned long tmp;						\
45 	int result;							\
46 									\
47 	prefetchw(&v->counter);						\
48 	__asm__ __volatile__("@ atomic_" #op "\n"			\
49 "1:	ldrex	%0, [%3]\n"						\
50 "	" #asm_op "	%0, %0, %4\n"					\
51 "	strex	%1, %0, [%3]\n"						\
52 "	teq	%1, #0\n"						\
53 "	bne	1b"							\
54 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
55 	: "r" (&v->counter), "Ir" (i)					\
56 	: "cc");							\
57 }									\
58 
59 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
60 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
61 {									\
62 	unsigned long tmp;						\
63 	int result;							\
64 									\
65 	prefetchw(&v->counter);						\
66 									\
67 	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
68 "1:	ldrex	%0, [%3]\n"						\
69 "	" #asm_op "	%0, %0, %4\n"					\
70 "	strex	%1, %0, [%3]\n"						\
71 "	teq	%1, #0\n"						\
72 "	bne	1b"							\
73 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
74 	: "r" (&v->counter), "Ir" (i)					\
75 	: "cc");							\
76 									\
77 	return result;							\
78 }
79 
80 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
81 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
82 {									\
83 	unsigned long tmp;						\
84 	int result, val;						\
85 									\
86 	prefetchw(&v->counter);						\
87 									\
88 	__asm__ __volatile__("@ atomic_fetch_" #op "\n"			\
89 "1:	ldrex	%0, [%4]\n"						\
90 "	" #asm_op "	%1, %0, %5\n"					\
91 "	strex	%2, %1, [%4]\n"						\
92 "	teq	%2, #0\n"						\
93 "	bne	1b"							\
94 	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
95 	: "r" (&v->counter), "Ir" (i)					\
96 	: "cc");							\
97 									\
98 	return result;							\
99 }
100 
101 #define atomic_add_return_relaxed	atomic_add_return_relaxed
102 #define atomic_sub_return_relaxed	atomic_sub_return_relaxed
103 #define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
104 #define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
105 
106 #define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
107 #define atomic_fetch_andnot_relaxed	atomic_fetch_andnot_relaxed
108 #define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
109 #define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
110 
111 static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
112 {
113 	int oldval;
114 	unsigned long res;
115 
116 	prefetchw(&ptr->counter);
117 
118 	do {
119 		__asm__ __volatile__("@ atomic_cmpxchg\n"
120 		"ldrex	%1, [%3]\n"
121 		"mov	%0, #0\n"
122 		"teq	%1, %4\n"
123 		"strexeq %0, %5, [%3]\n"
124 		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
125 		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
126 		    : "cc");
127 	} while (res);
128 
129 	return oldval;
130 }
131 #define atomic_cmpxchg_relaxed		atomic_cmpxchg_relaxed
132 
133 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
134 {
135 	int oldval, newval;
136 	unsigned long tmp;
137 
138 	smp_mb();
139 	prefetchw(&v->counter);
140 
141 	__asm__ __volatile__ ("@ atomic_add_unless\n"
142 "1:	ldrex	%0, [%4]\n"
143 "	teq	%0, %5\n"
144 "	beq	2f\n"
145 "	add	%1, %0, %6\n"
146 "	strex	%2, %1, [%4]\n"
147 "	teq	%2, #0\n"
148 "	bne	1b\n"
149 "2:"
150 	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
151 	: "r" (&v->counter), "r" (u), "r" (a)
152 	: "cc");
153 
154 	if (oldval != u)
155 		smp_mb();
156 
157 	return oldval;
158 }
159 
160 #else /* ARM_ARCH_6 */
161 
162 #ifdef CONFIG_SMP
163 #error SMP not supported on pre-ARMv6 CPUs
164 #endif
165 
166 #define ATOMIC_OP(op, c_op, asm_op)					\
167 static inline void atomic_##op(int i, atomic_t *v)			\
168 {									\
169 	unsigned long flags;						\
170 									\
171 	raw_local_irq_save(flags);					\
172 	v->counter c_op i;						\
173 	raw_local_irq_restore(flags);					\
174 }									\
175 
176 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
177 static inline int atomic_##op##_return(int i, atomic_t *v)		\
178 {									\
179 	unsigned long flags;						\
180 	int val;							\
181 									\
182 	raw_local_irq_save(flags);					\
183 	v->counter c_op i;						\
184 	val = v->counter;						\
185 	raw_local_irq_restore(flags);					\
186 									\
187 	return val;							\
188 }
189 
190 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
191 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
192 {									\
193 	unsigned long flags;						\
194 	int val;							\
195 									\
196 	raw_local_irq_save(flags);					\
197 	val = v->counter;						\
198 	v->counter c_op i;						\
199 	raw_local_irq_restore(flags);					\
200 									\
201 	return val;							\
202 }
203 
204 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
205 {
206 	int ret;
207 	unsigned long flags;
208 
209 	raw_local_irq_save(flags);
210 	ret = v->counter;
211 	if (likely(ret == old))
212 		v->counter = new;
213 	raw_local_irq_restore(flags);
214 
215 	return ret;
216 }
217 
218 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
219 {
220 	int c, old;
221 
222 	c = atomic_read(v);
223 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
224 		c = old;
225 	return c;
226 }
227 
228 #endif /* __LINUX_ARM_ARCH__ */
229 
230 #define ATOMIC_OPS(op, c_op, asm_op)					\
231 	ATOMIC_OP(op, c_op, asm_op)					\
232 	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
233 	ATOMIC_FETCH_OP(op, c_op, asm_op)
234 
235 ATOMIC_OPS(add, +=, add)
236 ATOMIC_OPS(sub, -=, sub)
237 
238 #define atomic_andnot atomic_andnot
239 
240 #undef ATOMIC_OPS
241 #define ATOMIC_OPS(op, c_op, asm_op)					\
242 	ATOMIC_OP(op, c_op, asm_op)					\
243 	ATOMIC_FETCH_OP(op, c_op, asm_op)
244 
245 ATOMIC_OPS(and, &=, and)
246 ATOMIC_OPS(andnot, &= ~, bic)
247 ATOMIC_OPS(or,  |=, orr)
248 ATOMIC_OPS(xor, ^=, eor)
249 
250 #undef ATOMIC_OPS
251 #undef ATOMIC_FETCH_OP
252 #undef ATOMIC_OP_RETURN
253 #undef ATOMIC_OP
254 
255 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
256 
257 #define atomic_inc(v)		atomic_add(1, v)
258 #define atomic_dec(v)		atomic_sub(1, v)
259 
260 #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
261 #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
262 #define atomic_inc_return_relaxed(v)    (atomic_add_return_relaxed(1, v))
263 #define atomic_dec_return_relaxed(v)    (atomic_sub_return_relaxed(1, v))
264 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
265 
266 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
267 
268 #ifndef CONFIG_GENERIC_ATOMIC64
269 typedef struct {
270 	long long counter;
271 } atomic64_t;
272 
273 #define ATOMIC64_INIT(i) { (i) }
274 
275 #ifdef CONFIG_ARM_LPAE
276 static inline long long atomic64_read(const atomic64_t *v)
277 {
278 	long long result;
279 
280 	__asm__ __volatile__("@ atomic64_read\n"
281 "	ldrd	%0, %H0, [%1]"
282 	: "=&r" (result)
283 	: "r" (&v->counter), "Qo" (v->counter)
284 	);
285 
286 	return result;
287 }
288 
289 static inline void atomic64_set(atomic64_t *v, long long i)
290 {
291 	__asm__ __volatile__("@ atomic64_set\n"
292 "	strd	%2, %H2, [%1]"
293 	: "=Qo" (v->counter)
294 	: "r" (&v->counter), "r" (i)
295 	);
296 }
297 #else
298 static inline long long atomic64_read(const atomic64_t *v)
299 {
300 	long long result;
301 
302 	__asm__ __volatile__("@ atomic64_read\n"
303 "	ldrexd	%0, %H0, [%1]"
304 	: "=&r" (result)
305 	: "r" (&v->counter), "Qo" (v->counter)
306 	);
307 
308 	return result;
309 }
310 
311 static inline void atomic64_set(atomic64_t *v, long long i)
312 {
313 	long long tmp;
314 
315 	prefetchw(&v->counter);
316 	__asm__ __volatile__("@ atomic64_set\n"
317 "1:	ldrexd	%0, %H0, [%2]\n"
318 "	strexd	%0, %3, %H3, [%2]\n"
319 "	teq	%0, #0\n"
320 "	bne	1b"
321 	: "=&r" (tmp), "=Qo" (v->counter)
322 	: "r" (&v->counter), "r" (i)
323 	: "cc");
324 }
325 #endif
326 
327 #define ATOMIC64_OP(op, op1, op2)					\
328 static inline void atomic64_##op(long long i, atomic64_t *v)		\
329 {									\
330 	long long result;						\
331 	unsigned long tmp;						\
332 									\
333 	prefetchw(&v->counter);						\
334 	__asm__ __volatile__("@ atomic64_" #op "\n"			\
335 "1:	ldrexd	%0, %H0, [%3]\n"					\
336 "	" #op1 " %Q0, %Q0, %Q4\n"					\
337 "	" #op2 " %R0, %R0, %R4\n"					\
338 "	strexd	%1, %0, %H0, [%3]\n"					\
339 "	teq	%1, #0\n"						\
340 "	bne	1b"							\
341 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
342 	: "r" (&v->counter), "r" (i)					\
343 	: "cc");							\
344 }									\
345 
346 #define ATOMIC64_OP_RETURN(op, op1, op2)				\
347 static inline long long							\
348 atomic64_##op##_return_relaxed(long long i, atomic64_t *v)		\
349 {									\
350 	long long result;						\
351 	unsigned long tmp;						\
352 									\
353 	prefetchw(&v->counter);						\
354 									\
355 	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
356 "1:	ldrexd	%0, %H0, [%3]\n"					\
357 "	" #op1 " %Q0, %Q0, %Q4\n"					\
358 "	" #op2 " %R0, %R0, %R4\n"					\
359 "	strexd	%1, %0, %H0, [%3]\n"					\
360 "	teq	%1, #0\n"						\
361 "	bne	1b"							\
362 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
363 	: "r" (&v->counter), "r" (i)					\
364 	: "cc");							\
365 									\
366 	return result;							\
367 }
368 
369 #define ATOMIC64_FETCH_OP(op, op1, op2)					\
370 static inline long long							\
371 atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v)		\
372 {									\
373 	long long result, val;						\
374 	unsigned long tmp;						\
375 									\
376 	prefetchw(&v->counter);						\
377 									\
378 	__asm__ __volatile__("@ atomic64_fetch_" #op "\n"		\
379 "1:	ldrexd	%0, %H0, [%4]\n"					\
380 "	" #op1 " %Q1, %Q0, %Q5\n"					\
381 "	" #op2 " %R1, %R0, %R5\n"					\
382 "	strexd	%2, %1, %H1, [%4]\n"					\
383 "	teq	%2, #0\n"						\
384 "	bne	1b"							\
385 	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
386 	: "r" (&v->counter), "r" (i)					\
387 	: "cc");							\
388 									\
389 	return result;							\
390 }
391 
392 #define ATOMIC64_OPS(op, op1, op2)					\
393 	ATOMIC64_OP(op, op1, op2)					\
394 	ATOMIC64_OP_RETURN(op, op1, op2)				\
395 	ATOMIC64_FETCH_OP(op, op1, op2)
396 
397 ATOMIC64_OPS(add, adds, adc)
398 ATOMIC64_OPS(sub, subs, sbc)
399 
400 #define atomic64_add_return_relaxed	atomic64_add_return_relaxed
401 #define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
402 #define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
403 #define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
404 
405 #undef ATOMIC64_OPS
406 #define ATOMIC64_OPS(op, op1, op2)					\
407 	ATOMIC64_OP(op, op1, op2)					\
408 	ATOMIC64_FETCH_OP(op, op1, op2)
409 
410 #define atomic64_andnot atomic64_andnot
411 
412 ATOMIC64_OPS(and, and, and)
413 ATOMIC64_OPS(andnot, bic, bic)
414 ATOMIC64_OPS(or,  orr, orr)
415 ATOMIC64_OPS(xor, eor, eor)
416 
417 #define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
418 #define atomic64_fetch_andnot_relaxed	atomic64_fetch_andnot_relaxed
419 #define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
420 #define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
421 
422 #undef ATOMIC64_OPS
423 #undef ATOMIC64_FETCH_OP
424 #undef ATOMIC64_OP_RETURN
425 #undef ATOMIC64_OP
426 
427 static inline long long
428 atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
429 {
430 	long long oldval;
431 	unsigned long res;
432 
433 	prefetchw(&ptr->counter);
434 
435 	do {
436 		__asm__ __volatile__("@ atomic64_cmpxchg\n"
437 		"ldrexd		%1, %H1, [%3]\n"
438 		"mov		%0, #0\n"
439 		"teq		%1, %4\n"
440 		"teqeq		%H1, %H4\n"
441 		"strexdeq	%0, %5, %H5, [%3]"
442 		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
443 		: "r" (&ptr->counter), "r" (old), "r" (new)
444 		: "cc");
445 	} while (res);
446 
447 	return oldval;
448 }
449 #define atomic64_cmpxchg_relaxed	atomic64_cmpxchg_relaxed
450 
451 static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
452 {
453 	long long result;
454 	unsigned long tmp;
455 
456 	prefetchw(&ptr->counter);
457 
458 	__asm__ __volatile__("@ atomic64_xchg\n"
459 "1:	ldrexd	%0, %H0, [%3]\n"
460 "	strexd	%1, %4, %H4, [%3]\n"
461 "	teq	%1, #0\n"
462 "	bne	1b"
463 	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
464 	: "r" (&ptr->counter), "r" (new)
465 	: "cc");
466 
467 	return result;
468 }
469 #define atomic64_xchg_relaxed		atomic64_xchg_relaxed
470 
471 static inline long long atomic64_dec_if_positive(atomic64_t *v)
472 {
473 	long long result;
474 	unsigned long tmp;
475 
476 	smp_mb();
477 	prefetchw(&v->counter);
478 
479 	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
480 "1:	ldrexd	%0, %H0, [%3]\n"
481 "	subs	%Q0, %Q0, #1\n"
482 "	sbc	%R0, %R0, #0\n"
483 "	teq	%R0, #0\n"
484 "	bmi	2f\n"
485 "	strexd	%1, %0, %H0, [%3]\n"
486 "	teq	%1, #0\n"
487 "	bne	1b\n"
488 "2:"
489 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
490 	: "r" (&v->counter)
491 	: "cc");
492 
493 	smp_mb();
494 
495 	return result;
496 }
497 
498 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
499 {
500 	long long val;
501 	unsigned long tmp;
502 	int ret = 1;
503 
504 	smp_mb();
505 	prefetchw(&v->counter);
506 
507 	__asm__ __volatile__("@ atomic64_add_unless\n"
508 "1:	ldrexd	%0, %H0, [%4]\n"
509 "	teq	%0, %5\n"
510 "	teqeq	%H0, %H5\n"
511 "	moveq	%1, #0\n"
512 "	beq	2f\n"
513 "	adds	%Q0, %Q0, %Q6\n"
514 "	adc	%R0, %R0, %R6\n"
515 "	strexd	%2, %0, %H0, [%4]\n"
516 "	teq	%2, #0\n"
517 "	bne	1b\n"
518 "2:"
519 	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
520 	: "r" (&v->counter), "r" (u), "r" (a)
521 	: "cc");
522 
523 	if (ret)
524 		smp_mb();
525 
526 	return ret;
527 }
528 
529 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
530 #define atomic64_inc(v)			atomic64_add(1LL, (v))
531 #define atomic64_inc_return_relaxed(v)	atomic64_add_return_relaxed(1LL, (v))
532 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
533 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
534 #define atomic64_dec(v)			atomic64_sub(1LL, (v))
535 #define atomic64_dec_return_relaxed(v)	atomic64_sub_return_relaxed(1LL, (v))
536 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
537 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
538 
539 #endif /* !CONFIG_GENERIC_ATOMIC64 */
540 #endif
541 #endif
542