xref: /openbmc/linux/arch/arm/include/asm/atomic.h (revision ef4cdc09260e2b0576423ca708e245e7549aa8e0)
1 /*
2  *  arch/arm/include/asm/atomic.h
3  *
4  *  Copyright (C) 1996 Russell King.
5  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
13 
14 #include <linux/compiler.h>
15 #include <linux/prefetch.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
20 
21 #define ATOMIC_INIT(i)	{ (i) }
22 
23 #ifdef __KERNEL__
24 
25 /*
26  * On ARM, ordinary assignment (str instruction) doesn't clear the local
27  * strex/ldrex monitor on some implementations. The reason we can use it for
28  * atomic_set() is the clrex or dummy strex done on every exception return.
29  */
30 #define atomic_read(v)	READ_ONCE((v)->counter)
31 #define atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
32 
33 #if __LINUX_ARM_ARCH__ >= 6
34 
35 /*
36  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
37  * store exclusive to ensure that these are atomic.  We may loop
38  * to ensure that the update happens.
39  */
40 
41 #define ATOMIC_OP(op, c_op, asm_op)					\
42 static inline void atomic_##op(int i, atomic_t *v)			\
43 {									\
44 	unsigned long tmp;						\
45 	int result;							\
46 									\
47 	prefetchw(&v->counter);						\
48 	__asm__ __volatile__("@ atomic_" #op "\n"			\
49 "1:	ldrex	%0, [%3]\n"						\
50 "	" #asm_op "	%0, %0, %4\n"					\
51 "	strex	%1, %0, [%3]\n"						\
52 "	teq	%1, #0\n"						\
53 "	bne	1b"							\
54 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
55 	: "r" (&v->counter), "Ir" (i)					\
56 	: "cc");							\
57 }									\
58 
59 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
60 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
61 {									\
62 	unsigned long tmp;						\
63 	int result;							\
64 									\
65 	prefetchw(&v->counter);						\
66 									\
67 	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
68 "1:	ldrex	%0, [%3]\n"						\
69 "	" #asm_op "	%0, %0, %4\n"					\
70 "	strex	%1, %0, [%3]\n"						\
71 "	teq	%1, #0\n"						\
72 "	bne	1b"							\
73 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
74 	: "r" (&v->counter), "Ir" (i)					\
75 	: "cc");							\
76 									\
77 	return result;							\
78 }
79 
80 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
81 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
82 {									\
83 	unsigned long tmp;						\
84 	int result, val;						\
85 									\
86 	prefetchw(&v->counter);						\
87 									\
88 	__asm__ __volatile__("@ atomic_fetch_" #op "\n"			\
89 "1:	ldrex	%0, [%4]\n"						\
90 "	" #asm_op "	%1, %0, %5\n"					\
91 "	strex	%2, %1, [%4]\n"						\
92 "	teq	%2, #0\n"						\
93 "	bne	1b"							\
94 	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
95 	: "r" (&v->counter), "Ir" (i)					\
96 	: "cc");							\
97 									\
98 	return result;							\
99 }
100 
101 #define atomic_add_return_relaxed	atomic_add_return_relaxed
102 #define atomic_sub_return_relaxed	atomic_sub_return_relaxed
103 #define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
104 #define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
105 
106 #define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
107 #define atomic_fetch_andnot_relaxed	atomic_fetch_andnot_relaxed
108 #define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
109 #define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
110 
111 static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
112 {
113 	int oldval;
114 	unsigned long res;
115 
116 	prefetchw(&ptr->counter);
117 
118 	do {
119 		__asm__ __volatile__("@ atomic_cmpxchg\n"
120 		"ldrex	%1, [%3]\n"
121 		"mov	%0, #0\n"
122 		"teq	%1, %4\n"
123 		"strexeq %0, %5, [%3]\n"
124 		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
125 		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
126 		    : "cc");
127 	} while (res);
128 
129 	return oldval;
130 }
131 #define atomic_cmpxchg_relaxed		atomic_cmpxchg_relaxed
132 
133 static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
134 {
135 	int oldval, newval;
136 	unsigned long tmp;
137 
138 	smp_mb();
139 	prefetchw(&v->counter);
140 
141 	__asm__ __volatile__ ("@ atomic_add_unless\n"
142 "1:	ldrex	%0, [%4]\n"
143 "	teq	%0, %5\n"
144 "	beq	2f\n"
145 "	add	%1, %0, %6\n"
146 "	strex	%2, %1, [%4]\n"
147 "	teq	%2, #0\n"
148 "	bne	1b\n"
149 "2:"
150 	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
151 	: "r" (&v->counter), "r" (u), "r" (a)
152 	: "cc");
153 
154 	if (oldval != u)
155 		smp_mb();
156 
157 	return oldval;
158 }
159 #define atomic_fetch_add_unless		atomic_fetch_add_unless
160 
161 #else /* ARM_ARCH_6 */
162 
163 #ifdef CONFIG_SMP
164 #error SMP not supported on pre-ARMv6 CPUs
165 #endif
166 
167 #define ATOMIC_OP(op, c_op, asm_op)					\
168 static inline void atomic_##op(int i, atomic_t *v)			\
169 {									\
170 	unsigned long flags;						\
171 									\
172 	raw_local_irq_save(flags);					\
173 	v->counter c_op i;						\
174 	raw_local_irq_restore(flags);					\
175 }									\
176 
177 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
178 static inline int atomic_##op##_return(int i, atomic_t *v)		\
179 {									\
180 	unsigned long flags;						\
181 	int val;							\
182 									\
183 	raw_local_irq_save(flags);					\
184 	v->counter c_op i;						\
185 	val = v->counter;						\
186 	raw_local_irq_restore(flags);					\
187 									\
188 	return val;							\
189 }
190 
191 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
192 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
193 {									\
194 	unsigned long flags;						\
195 	int val;							\
196 									\
197 	raw_local_irq_save(flags);					\
198 	val = v->counter;						\
199 	v->counter c_op i;						\
200 	raw_local_irq_restore(flags);					\
201 									\
202 	return val;							\
203 }
204 
205 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
206 {
207 	int ret;
208 	unsigned long flags;
209 
210 	raw_local_irq_save(flags);
211 	ret = v->counter;
212 	if (likely(ret == old))
213 		v->counter = new;
214 	raw_local_irq_restore(flags);
215 
216 	return ret;
217 }
218 
219 #define atomic_fetch_andnot		atomic_fetch_andnot
220 
221 #endif /* __LINUX_ARM_ARCH__ */
222 
223 #define ATOMIC_OPS(op, c_op, asm_op)					\
224 	ATOMIC_OP(op, c_op, asm_op)					\
225 	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
226 	ATOMIC_FETCH_OP(op, c_op, asm_op)
227 
228 ATOMIC_OPS(add, +=, add)
229 ATOMIC_OPS(sub, -=, sub)
230 
231 #define atomic_andnot atomic_andnot
232 
233 #undef ATOMIC_OPS
234 #define ATOMIC_OPS(op, c_op, asm_op)					\
235 	ATOMIC_OP(op, c_op, asm_op)					\
236 	ATOMIC_FETCH_OP(op, c_op, asm_op)
237 
238 ATOMIC_OPS(and, &=, and)
239 ATOMIC_OPS(andnot, &= ~, bic)
240 ATOMIC_OPS(or,  |=, orr)
241 ATOMIC_OPS(xor, ^=, eor)
242 
243 #undef ATOMIC_OPS
244 #undef ATOMIC_FETCH_OP
245 #undef ATOMIC_OP_RETURN
246 #undef ATOMIC_OP
247 
248 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
249 
250 #ifndef CONFIG_GENERIC_ATOMIC64
251 typedef struct {
252 	s64 counter;
253 } atomic64_t;
254 
255 #define ATOMIC64_INIT(i) { (i) }
256 
257 #ifdef CONFIG_ARM_LPAE
258 static inline s64 atomic64_read(const atomic64_t *v)
259 {
260 	s64 result;
261 
262 	__asm__ __volatile__("@ atomic64_read\n"
263 "	ldrd	%0, %H0, [%1]"
264 	: "=&r" (result)
265 	: "r" (&v->counter), "Qo" (v->counter)
266 	);
267 
268 	return result;
269 }
270 
271 static inline void atomic64_set(atomic64_t *v, s64 i)
272 {
273 	__asm__ __volatile__("@ atomic64_set\n"
274 "	strd	%2, %H2, [%1]"
275 	: "=Qo" (v->counter)
276 	: "r" (&v->counter), "r" (i)
277 	);
278 }
279 #else
280 static inline s64 atomic64_read(const atomic64_t *v)
281 {
282 	s64 result;
283 
284 	__asm__ __volatile__("@ atomic64_read\n"
285 "	ldrexd	%0, %H0, [%1]"
286 	: "=&r" (result)
287 	: "r" (&v->counter), "Qo" (v->counter)
288 	);
289 
290 	return result;
291 }
292 
293 static inline void atomic64_set(atomic64_t *v, s64 i)
294 {
295 	s64 tmp;
296 
297 	prefetchw(&v->counter);
298 	__asm__ __volatile__("@ atomic64_set\n"
299 "1:	ldrexd	%0, %H0, [%2]\n"
300 "	strexd	%0, %3, %H3, [%2]\n"
301 "	teq	%0, #0\n"
302 "	bne	1b"
303 	: "=&r" (tmp), "=Qo" (v->counter)
304 	: "r" (&v->counter), "r" (i)
305 	: "cc");
306 }
307 #endif
308 
309 #define ATOMIC64_OP(op, op1, op2)					\
310 static inline void atomic64_##op(s64 i, atomic64_t *v)			\
311 {									\
312 	s64 result;							\
313 	unsigned long tmp;						\
314 									\
315 	prefetchw(&v->counter);						\
316 	__asm__ __volatile__("@ atomic64_" #op "\n"			\
317 "1:	ldrexd	%0, %H0, [%3]\n"					\
318 "	" #op1 " %Q0, %Q0, %Q4\n"					\
319 "	" #op2 " %R0, %R0, %R4\n"					\
320 "	strexd	%1, %0, %H0, [%3]\n"					\
321 "	teq	%1, #0\n"						\
322 "	bne	1b"							\
323 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
324 	: "r" (&v->counter), "r" (i)					\
325 	: "cc");							\
326 }									\
327 
328 #define ATOMIC64_OP_RETURN(op, op1, op2)				\
329 static inline s64							\
330 atomic64_##op##_return_relaxed(s64 i, atomic64_t *v)			\
331 {									\
332 	s64 result;							\
333 	unsigned long tmp;						\
334 									\
335 	prefetchw(&v->counter);						\
336 									\
337 	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
338 "1:	ldrexd	%0, %H0, [%3]\n"					\
339 "	" #op1 " %Q0, %Q0, %Q4\n"					\
340 "	" #op2 " %R0, %R0, %R4\n"					\
341 "	strexd	%1, %0, %H0, [%3]\n"					\
342 "	teq	%1, #0\n"						\
343 "	bne	1b"							\
344 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
345 	: "r" (&v->counter), "r" (i)					\
346 	: "cc");							\
347 									\
348 	return result;							\
349 }
350 
351 #define ATOMIC64_FETCH_OP(op, op1, op2)					\
352 static inline s64							\
353 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v)			\
354 {									\
355 	s64 result, val;						\
356 	unsigned long tmp;						\
357 									\
358 	prefetchw(&v->counter);						\
359 									\
360 	__asm__ __volatile__("@ atomic64_fetch_" #op "\n"		\
361 "1:	ldrexd	%0, %H0, [%4]\n"					\
362 "	" #op1 " %Q1, %Q0, %Q5\n"					\
363 "	" #op2 " %R1, %R0, %R5\n"					\
364 "	strexd	%2, %1, %H1, [%4]\n"					\
365 "	teq	%2, #0\n"						\
366 "	bne	1b"							\
367 	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
368 	: "r" (&v->counter), "r" (i)					\
369 	: "cc");							\
370 									\
371 	return result;							\
372 }
373 
374 #define ATOMIC64_OPS(op, op1, op2)					\
375 	ATOMIC64_OP(op, op1, op2)					\
376 	ATOMIC64_OP_RETURN(op, op1, op2)				\
377 	ATOMIC64_FETCH_OP(op, op1, op2)
378 
379 ATOMIC64_OPS(add, adds, adc)
380 ATOMIC64_OPS(sub, subs, sbc)
381 
382 #define atomic64_add_return_relaxed	atomic64_add_return_relaxed
383 #define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
384 #define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
385 #define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
386 
387 #undef ATOMIC64_OPS
388 #define ATOMIC64_OPS(op, op1, op2)					\
389 	ATOMIC64_OP(op, op1, op2)					\
390 	ATOMIC64_FETCH_OP(op, op1, op2)
391 
392 #define atomic64_andnot atomic64_andnot
393 
394 ATOMIC64_OPS(and, and, and)
395 ATOMIC64_OPS(andnot, bic, bic)
396 ATOMIC64_OPS(or,  orr, orr)
397 ATOMIC64_OPS(xor, eor, eor)
398 
399 #define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
400 #define atomic64_fetch_andnot_relaxed	atomic64_fetch_andnot_relaxed
401 #define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
402 #define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
403 
404 #undef ATOMIC64_OPS
405 #undef ATOMIC64_FETCH_OP
406 #undef ATOMIC64_OP_RETURN
407 #undef ATOMIC64_OP
408 
409 static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
410 {
411 	s64 oldval;
412 	unsigned long res;
413 
414 	prefetchw(&ptr->counter);
415 
416 	do {
417 		__asm__ __volatile__("@ atomic64_cmpxchg\n"
418 		"ldrexd		%1, %H1, [%3]\n"
419 		"mov		%0, #0\n"
420 		"teq		%1, %4\n"
421 		"teqeq		%H1, %H4\n"
422 		"strexdeq	%0, %5, %H5, [%3]"
423 		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
424 		: "r" (&ptr->counter), "r" (old), "r" (new)
425 		: "cc");
426 	} while (res);
427 
428 	return oldval;
429 }
430 #define atomic64_cmpxchg_relaxed	atomic64_cmpxchg_relaxed
431 
432 static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
433 {
434 	s64 result;
435 	unsigned long tmp;
436 
437 	prefetchw(&ptr->counter);
438 
439 	__asm__ __volatile__("@ atomic64_xchg\n"
440 "1:	ldrexd	%0, %H0, [%3]\n"
441 "	strexd	%1, %4, %H4, [%3]\n"
442 "	teq	%1, #0\n"
443 "	bne	1b"
444 	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
445 	: "r" (&ptr->counter), "r" (new)
446 	: "cc");
447 
448 	return result;
449 }
450 #define atomic64_xchg_relaxed		atomic64_xchg_relaxed
451 
452 static inline s64 atomic64_dec_if_positive(atomic64_t *v)
453 {
454 	s64 result;
455 	unsigned long tmp;
456 
457 	smp_mb();
458 	prefetchw(&v->counter);
459 
460 	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
461 "1:	ldrexd	%0, %H0, [%3]\n"
462 "	subs	%Q0, %Q0, #1\n"
463 "	sbc	%R0, %R0, #0\n"
464 "	teq	%R0, #0\n"
465 "	bmi	2f\n"
466 "	strexd	%1, %0, %H0, [%3]\n"
467 "	teq	%1, #0\n"
468 "	bne	1b\n"
469 "2:"
470 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
471 	: "r" (&v->counter)
472 	: "cc");
473 
474 	smp_mb();
475 
476 	return result;
477 }
478 #define atomic64_dec_if_positive atomic64_dec_if_positive
479 
480 static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
481 {
482 	s64 oldval, newval;
483 	unsigned long tmp;
484 
485 	smp_mb();
486 	prefetchw(&v->counter);
487 
488 	__asm__ __volatile__("@ atomic64_add_unless\n"
489 "1:	ldrexd	%0, %H0, [%4]\n"
490 "	teq	%0, %5\n"
491 "	teqeq	%H0, %H5\n"
492 "	beq	2f\n"
493 "	adds	%Q1, %Q0, %Q6\n"
494 "	adc	%R1, %R0, %R6\n"
495 "	strexd	%2, %1, %H1, [%4]\n"
496 "	teq	%2, #0\n"
497 "	bne	1b\n"
498 "2:"
499 	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
500 	: "r" (&v->counter), "r" (u), "r" (a)
501 	: "cc");
502 
503 	if (oldval != u)
504 		smp_mb();
505 
506 	return oldval;
507 }
508 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
509 
510 #endif /* !CONFIG_GENERIC_ATOMIC64 */
511 #endif
512 #endif
513