xref: /openbmc/linux/arch/arm/include/asm/atomic.h (revision f21e49be)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  arch/arm/include/asm/atomic.h
4  *
5  *  Copyright (C) 1996 Russell King.
6  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
7  */
8 #ifndef __ASM_ARM_ATOMIC_H
9 #define __ASM_ARM_ATOMIC_H
10 
11 #include <linux/compiler.h>
12 #include <linux/prefetch.h>
13 #include <linux/types.h>
14 #include <linux/irqflags.h>
15 #include <asm/barrier.h>
16 #include <asm/cmpxchg.h>
17 
18 #ifdef __KERNEL__
19 
20 /*
21  * On ARM, ordinary assignment (str instruction) doesn't clear the local
22  * strex/ldrex monitor on some implementations. The reason we can use it for
23  * atomic_set() is the clrex or dummy strex done on every exception return.
24  */
25 #define arch_atomic_read(v)	READ_ONCE((v)->counter)
26 #define arch_atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
27 
28 #if __LINUX_ARM_ARCH__ >= 6
29 
30 /*
31  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
32  * store exclusive to ensure that these are atomic.  We may loop
33  * to ensure that the update happens.
34  */
35 
36 #define ATOMIC_OP(op, c_op, asm_op)					\
37 static inline void arch_atomic_##op(int i, atomic_t *v)			\
38 {									\
39 	unsigned long tmp;						\
40 	int result;							\
41 									\
42 	prefetchw(&v->counter);						\
43 	__asm__ __volatile__("@ atomic_" #op "\n"			\
44 "1:	ldrex	%0, [%3]\n"						\
45 "	" #asm_op "	%0, %0, %4\n"					\
46 "	strex	%1, %0, [%3]\n"						\
47 "	teq	%1, #0\n"						\
48 "	bne	1b"							\
49 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
50 	: "r" (&v->counter), "Ir" (i)					\
51 	: "cc");							\
52 }									\
53 
54 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
55 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
56 {									\
57 	unsigned long tmp;						\
58 	int result;							\
59 									\
60 	prefetchw(&v->counter);						\
61 									\
62 	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
63 "1:	ldrex	%0, [%3]\n"						\
64 "	" #asm_op "	%0, %0, %4\n"					\
65 "	strex	%1, %0, [%3]\n"						\
66 "	teq	%1, #0\n"						\
67 "	bne	1b"							\
68 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
69 	: "r" (&v->counter), "Ir" (i)					\
70 	: "cc");							\
71 									\
72 	return result;							\
73 }
74 
75 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
76 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
77 {									\
78 	unsigned long tmp;						\
79 	int result, val;						\
80 									\
81 	prefetchw(&v->counter);						\
82 									\
83 	__asm__ __volatile__("@ atomic_fetch_" #op "\n"			\
84 "1:	ldrex	%0, [%4]\n"						\
85 "	" #asm_op "	%1, %0, %5\n"					\
86 "	strex	%2, %1, [%4]\n"						\
87 "	teq	%2, #0\n"						\
88 "	bne	1b"							\
89 	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
90 	: "r" (&v->counter), "Ir" (i)					\
91 	: "cc");							\
92 									\
93 	return result;							\
94 }
95 
96 #define arch_atomic_add_return_relaxed		arch_atomic_add_return_relaxed
97 #define arch_atomic_sub_return_relaxed		arch_atomic_sub_return_relaxed
98 #define arch_atomic_fetch_add_relaxed		arch_atomic_fetch_add_relaxed
99 #define arch_atomic_fetch_sub_relaxed		arch_atomic_fetch_sub_relaxed
100 
101 #define arch_atomic_fetch_and_relaxed		arch_atomic_fetch_and_relaxed
102 #define arch_atomic_fetch_andnot_relaxed	arch_atomic_fetch_andnot_relaxed
103 #define arch_atomic_fetch_or_relaxed		arch_atomic_fetch_or_relaxed
104 #define arch_atomic_fetch_xor_relaxed		arch_atomic_fetch_xor_relaxed
105 
106 static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
107 {
108 	int oldval;
109 	unsigned long res;
110 
111 	prefetchw(&ptr->counter);
112 
113 	do {
114 		__asm__ __volatile__("@ atomic_cmpxchg\n"
115 		"ldrex	%1, [%3]\n"
116 		"mov	%0, #0\n"
117 		"teq	%1, %4\n"
118 		"strexeq %0, %5, [%3]\n"
119 		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
120 		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
121 		    : "cc");
122 	} while (res);
123 
124 	return oldval;
125 }
126 #define arch_atomic_cmpxchg_relaxed		arch_atomic_cmpxchg_relaxed
127 
128 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
129 {
130 	int oldval, newval;
131 	unsigned long tmp;
132 
133 	smp_mb();
134 	prefetchw(&v->counter);
135 
136 	__asm__ __volatile__ ("@ atomic_add_unless\n"
137 "1:	ldrex	%0, [%4]\n"
138 "	teq	%0, %5\n"
139 "	beq	2f\n"
140 "	add	%1, %0, %6\n"
141 "	strex	%2, %1, [%4]\n"
142 "	teq	%2, #0\n"
143 "	bne	1b\n"
144 "2:"
145 	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
146 	: "r" (&v->counter), "r" (u), "r" (a)
147 	: "cc");
148 
149 	if (oldval != u)
150 		smp_mb();
151 
152 	return oldval;
153 }
154 #define arch_atomic_fetch_add_unless		arch_atomic_fetch_add_unless
155 
156 #else /* ARM_ARCH_6 */
157 
158 #ifdef CONFIG_SMP
159 #error SMP not supported on pre-ARMv6 CPUs
160 #endif
161 
162 #define ATOMIC_OP(op, c_op, asm_op)					\
163 static inline void arch_atomic_##op(int i, atomic_t *v)			\
164 {									\
165 	unsigned long flags;						\
166 									\
167 	raw_local_irq_save(flags);					\
168 	v->counter c_op i;						\
169 	raw_local_irq_restore(flags);					\
170 }									\
171 
172 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
173 static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
174 {									\
175 	unsigned long flags;						\
176 	int val;							\
177 									\
178 	raw_local_irq_save(flags);					\
179 	v->counter c_op i;						\
180 	val = v->counter;						\
181 	raw_local_irq_restore(flags);					\
182 									\
183 	return val;							\
184 }
185 
186 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
187 static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
188 {									\
189 	unsigned long flags;						\
190 	int val;							\
191 									\
192 	raw_local_irq_save(flags);					\
193 	val = v->counter;						\
194 	v->counter c_op i;						\
195 	raw_local_irq_restore(flags);					\
196 									\
197 	return val;							\
198 }
199 
200 static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
201 {
202 	int ret;
203 	unsigned long flags;
204 
205 	raw_local_irq_save(flags);
206 	ret = v->counter;
207 	if (likely(ret == old))
208 		v->counter = new;
209 	raw_local_irq_restore(flags);
210 
211 	return ret;
212 }
213 
214 #define arch_atomic_fetch_andnot		arch_atomic_fetch_andnot
215 
216 #endif /* __LINUX_ARM_ARCH__ */
217 
218 #define ATOMIC_OPS(op, c_op, asm_op)					\
219 	ATOMIC_OP(op, c_op, asm_op)					\
220 	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
221 	ATOMIC_FETCH_OP(op, c_op, asm_op)
222 
223 ATOMIC_OPS(add, +=, add)
224 ATOMIC_OPS(sub, -=, sub)
225 
226 #define arch_atomic_andnot arch_atomic_andnot
227 
228 #undef ATOMIC_OPS
229 #define ATOMIC_OPS(op, c_op, asm_op)					\
230 	ATOMIC_OP(op, c_op, asm_op)					\
231 	ATOMIC_FETCH_OP(op, c_op, asm_op)
232 
233 ATOMIC_OPS(and, &=, and)
234 ATOMIC_OPS(andnot, &= ~, bic)
235 ATOMIC_OPS(or,  |=, orr)
236 ATOMIC_OPS(xor, ^=, eor)
237 
238 #undef ATOMIC_OPS
239 #undef ATOMIC_FETCH_OP
240 #undef ATOMIC_OP_RETURN
241 #undef ATOMIC_OP
242 
243 #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
244 
245 #ifndef CONFIG_GENERIC_ATOMIC64
246 typedef struct {
247 	s64 counter;
248 } atomic64_t;
249 
250 #define ATOMIC64_INIT(i) { (i) }
251 
252 #ifdef CONFIG_ARM_LPAE
253 static inline s64 arch_atomic64_read(const atomic64_t *v)
254 {
255 	s64 result;
256 
257 	__asm__ __volatile__("@ atomic64_read\n"
258 "	ldrd	%0, %H0, [%1]"
259 	: "=&r" (result)
260 	: "r" (&v->counter), "Qo" (v->counter)
261 	);
262 
263 	return result;
264 }
265 
266 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
267 {
268 	__asm__ __volatile__("@ atomic64_set\n"
269 "	strd	%2, %H2, [%1]"
270 	: "=Qo" (v->counter)
271 	: "r" (&v->counter), "r" (i)
272 	);
273 }
274 #else
275 static inline s64 arch_atomic64_read(const atomic64_t *v)
276 {
277 	s64 result;
278 
279 	__asm__ __volatile__("@ atomic64_read\n"
280 "	ldrexd	%0, %H0, [%1]"
281 	: "=&r" (result)
282 	: "r" (&v->counter), "Qo" (v->counter)
283 	);
284 
285 	return result;
286 }
287 
288 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
289 {
290 	s64 tmp;
291 
292 	prefetchw(&v->counter);
293 	__asm__ __volatile__("@ atomic64_set\n"
294 "1:	ldrexd	%0, %H0, [%2]\n"
295 "	strexd	%0, %3, %H3, [%2]\n"
296 "	teq	%0, #0\n"
297 "	bne	1b"
298 	: "=&r" (tmp), "=Qo" (v->counter)
299 	: "r" (&v->counter), "r" (i)
300 	: "cc");
301 }
302 #endif
303 
304 #define ATOMIC64_OP(op, op1, op2)					\
305 static inline void arch_atomic64_##op(s64 i, atomic64_t *v)		\
306 {									\
307 	s64 result;							\
308 	unsigned long tmp;						\
309 									\
310 	prefetchw(&v->counter);						\
311 	__asm__ __volatile__("@ atomic64_" #op "\n"			\
312 "1:	ldrexd	%0, %H0, [%3]\n"					\
313 "	" #op1 " %Q0, %Q0, %Q4\n"					\
314 "	" #op2 " %R0, %R0, %R4\n"					\
315 "	strexd	%1, %0, %H0, [%3]\n"					\
316 "	teq	%1, #0\n"						\
317 "	bne	1b"							\
318 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
319 	: "r" (&v->counter), "r" (i)					\
320 	: "cc");							\
321 }									\
322 
323 #define ATOMIC64_OP_RETURN(op, op1, op2)				\
324 static inline s64							\
325 arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v)		\
326 {									\
327 	s64 result;							\
328 	unsigned long tmp;						\
329 									\
330 	prefetchw(&v->counter);						\
331 									\
332 	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
333 "1:	ldrexd	%0, %H0, [%3]\n"					\
334 "	" #op1 " %Q0, %Q0, %Q4\n"					\
335 "	" #op2 " %R0, %R0, %R4\n"					\
336 "	strexd	%1, %0, %H0, [%3]\n"					\
337 "	teq	%1, #0\n"						\
338 "	bne	1b"							\
339 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
340 	: "r" (&v->counter), "r" (i)					\
341 	: "cc");							\
342 									\
343 	return result;							\
344 }
345 
346 #define ATOMIC64_FETCH_OP(op, op1, op2)					\
347 static inline s64							\
348 arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v)		\
349 {									\
350 	s64 result, val;						\
351 	unsigned long tmp;						\
352 									\
353 	prefetchw(&v->counter);						\
354 									\
355 	__asm__ __volatile__("@ atomic64_fetch_" #op "\n"		\
356 "1:	ldrexd	%0, %H0, [%4]\n"					\
357 "	" #op1 " %Q1, %Q0, %Q5\n"					\
358 "	" #op2 " %R1, %R0, %R5\n"					\
359 "	strexd	%2, %1, %H1, [%4]\n"					\
360 "	teq	%2, #0\n"						\
361 "	bne	1b"							\
362 	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
363 	: "r" (&v->counter), "r" (i)					\
364 	: "cc");							\
365 									\
366 	return result;							\
367 }
368 
369 #define ATOMIC64_OPS(op, op1, op2)					\
370 	ATOMIC64_OP(op, op1, op2)					\
371 	ATOMIC64_OP_RETURN(op, op1, op2)				\
372 	ATOMIC64_FETCH_OP(op, op1, op2)
373 
374 ATOMIC64_OPS(add, adds, adc)
375 ATOMIC64_OPS(sub, subs, sbc)
376 
377 #define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
378 #define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
379 #define arch_atomic64_fetch_add_relaxed		arch_atomic64_fetch_add_relaxed
380 #define arch_atomic64_fetch_sub_relaxed		arch_atomic64_fetch_sub_relaxed
381 
382 #undef ATOMIC64_OPS
383 #define ATOMIC64_OPS(op, op1, op2)					\
384 	ATOMIC64_OP(op, op1, op2)					\
385 	ATOMIC64_FETCH_OP(op, op1, op2)
386 
387 #define arch_atomic64_andnot arch_atomic64_andnot
388 
389 ATOMIC64_OPS(and, and, and)
390 ATOMIC64_OPS(andnot, bic, bic)
391 ATOMIC64_OPS(or,  orr, orr)
392 ATOMIC64_OPS(xor, eor, eor)
393 
394 #define arch_atomic64_fetch_and_relaxed		arch_atomic64_fetch_and_relaxed
395 #define arch_atomic64_fetch_andnot_relaxed	arch_atomic64_fetch_andnot_relaxed
396 #define arch_atomic64_fetch_or_relaxed		arch_atomic64_fetch_or_relaxed
397 #define arch_atomic64_fetch_xor_relaxed		arch_atomic64_fetch_xor_relaxed
398 
399 #undef ATOMIC64_OPS
400 #undef ATOMIC64_FETCH_OP
401 #undef ATOMIC64_OP_RETURN
402 #undef ATOMIC64_OP
403 
404 static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
405 {
406 	s64 oldval;
407 	unsigned long res;
408 
409 	prefetchw(&ptr->counter);
410 
411 	do {
412 		__asm__ __volatile__("@ atomic64_cmpxchg\n"
413 		"ldrexd		%1, %H1, [%3]\n"
414 		"mov		%0, #0\n"
415 		"teq		%1, %4\n"
416 		"teqeq		%H1, %H4\n"
417 		"strexdeq	%0, %5, %H5, [%3]"
418 		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
419 		: "r" (&ptr->counter), "r" (old), "r" (new)
420 		: "cc");
421 	} while (res);
422 
423 	return oldval;
424 }
425 #define arch_atomic64_cmpxchg_relaxed	arch_atomic64_cmpxchg_relaxed
426 
427 static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
428 {
429 	s64 result;
430 	unsigned long tmp;
431 
432 	prefetchw(&ptr->counter);
433 
434 	__asm__ __volatile__("@ atomic64_xchg\n"
435 "1:	ldrexd	%0, %H0, [%3]\n"
436 "	strexd	%1, %4, %H4, [%3]\n"
437 "	teq	%1, #0\n"
438 "	bne	1b"
439 	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
440 	: "r" (&ptr->counter), "r" (new)
441 	: "cc");
442 
443 	return result;
444 }
445 #define arch_atomic64_xchg_relaxed		arch_atomic64_xchg_relaxed
446 
447 static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
448 {
449 	s64 result;
450 	unsigned long tmp;
451 
452 	smp_mb();
453 	prefetchw(&v->counter);
454 
455 	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
456 "1:	ldrexd	%0, %H0, [%3]\n"
457 "	subs	%Q0, %Q0, #1\n"
458 "	sbc	%R0, %R0, #0\n"
459 "	teq	%R0, #0\n"
460 "	bmi	2f\n"
461 "	strexd	%1, %0, %H0, [%3]\n"
462 "	teq	%1, #0\n"
463 "	bne	1b\n"
464 "2:"
465 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
466 	: "r" (&v->counter)
467 	: "cc");
468 
469 	smp_mb();
470 
471 	return result;
472 }
473 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
474 
475 static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
476 {
477 	s64 oldval, newval;
478 	unsigned long tmp;
479 
480 	smp_mb();
481 	prefetchw(&v->counter);
482 
483 	__asm__ __volatile__("@ atomic64_add_unless\n"
484 "1:	ldrexd	%0, %H0, [%4]\n"
485 "	teq	%0, %5\n"
486 "	teqeq	%H0, %H5\n"
487 "	beq	2f\n"
488 "	adds	%Q1, %Q0, %Q6\n"
489 "	adc	%R1, %R0, %R6\n"
490 "	strexd	%2, %1, %H1, [%4]\n"
491 "	teq	%2, #0\n"
492 "	bne	1b\n"
493 "2:"
494 	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
495 	: "r" (&v->counter), "r" (u), "r" (a)
496 	: "cc");
497 
498 	if (oldval != u)
499 		smp_mb();
500 
501 	return oldval;
502 }
503 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
504 
505 #endif /* !CONFIG_GENERIC_ATOMIC64 */
506 #endif
507 #endif
508