xref: /openbmc/linux/arch/powerpc/include/asm/atomic.h (revision 160b8e75)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
4 
5 /*
6  * PowerPC atomic operations
7  */
8 
9 #ifdef __KERNEL__
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 
14 #define ATOMIC_INIT(i)		{ (i) }
15 
16 /*
17  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
18  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
19  * on the platform without lwsync.
20  */
21 #define __atomic_op_acquire(op, args...)				\
22 ({									\
23 	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
24 	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory");	\
25 	__ret;								\
26 })
27 
28 #define __atomic_op_release(op, args...)				\
29 ({									\
30 	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory");	\
31 	op##_relaxed(args);						\
32 })
33 
34 static __inline__ int atomic_read(const atomic_t *v)
35 {
36 	int t;
37 
38 	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
39 
40 	return t;
41 }
42 
43 static __inline__ void atomic_set(atomic_t *v, int i)
44 {
45 	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
46 }
47 
48 #define ATOMIC_OP(op, asm_op)						\
49 static __inline__ void atomic_##op(int a, atomic_t *v)			\
50 {									\
51 	int t;								\
52 									\
53 	__asm__ __volatile__(						\
54 "1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
55 	#asm_op " %0,%2,%0\n"						\
56 	PPC405_ERR77(0,%3)						\
57 "	stwcx.	%0,0,%3 \n"						\
58 "	bne-	1b\n"							\
59 	: "=&r" (t), "+m" (v->counter)					\
60 	: "r" (a), "r" (&v->counter)					\
61 	: "cc");							\
62 }									\
63 
64 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
65 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)	\
66 {									\
67 	int t;								\
68 									\
69 	__asm__ __volatile__(						\
70 "1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
71 	#asm_op " %0,%2,%0\n"						\
72 	PPC405_ERR77(0, %3)						\
73 "	stwcx.	%0,0,%3\n"						\
74 "	bne-	1b\n"							\
75 	: "=&r" (t), "+m" (v->counter)					\
76 	: "r" (a), "r" (&v->counter)					\
77 	: "cc");							\
78 									\
79 	return t;							\
80 }
81 
82 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op)				\
83 static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
84 {									\
85 	int res, t;							\
86 									\
87 	__asm__ __volatile__(						\
88 "1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
89 	#asm_op " %1,%3,%0\n"						\
90 	PPC405_ERR77(0, %4)						\
91 "	stwcx.	%1,0,%4\n"						\
92 "	bne-	1b\n"							\
93 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
94 	: "r" (a), "r" (&v->counter)					\
95 	: "cc");							\
96 									\
97 	return res;							\
98 }
99 
100 #define ATOMIC_OPS(op, asm_op)						\
101 	ATOMIC_OP(op, asm_op)						\
102 	ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
103 	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
104 
105 ATOMIC_OPS(add, add)
106 ATOMIC_OPS(sub, subf)
107 
108 #define atomic_add_return_relaxed atomic_add_return_relaxed
109 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
110 
111 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
112 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
113 
114 #undef ATOMIC_OPS
115 #define ATOMIC_OPS(op, asm_op)						\
116 	ATOMIC_OP(op, asm_op)						\
117 	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
118 
119 ATOMIC_OPS(and, and)
120 ATOMIC_OPS(or, or)
121 ATOMIC_OPS(xor, xor)
122 
123 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
124 #define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
125 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
126 
127 #undef ATOMIC_OPS
128 #undef ATOMIC_FETCH_OP_RELAXED
129 #undef ATOMIC_OP_RETURN_RELAXED
130 #undef ATOMIC_OP
131 
132 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
133 
134 static __inline__ void atomic_inc(atomic_t *v)
135 {
136 	int t;
137 
138 	__asm__ __volatile__(
139 "1:	lwarx	%0,0,%2		# atomic_inc\n\
140 	addic	%0,%0,1\n"
141 	PPC405_ERR77(0,%2)
142 "	stwcx.	%0,0,%2 \n\
143 	bne-	1b"
144 	: "=&r" (t), "+m" (v->counter)
145 	: "r" (&v->counter)
146 	: "cc", "xer");
147 }
148 
149 static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
150 {
151 	int t;
152 
153 	__asm__ __volatile__(
154 "1:	lwarx	%0,0,%2		# atomic_inc_return_relaxed\n"
155 "	addic	%0,%0,1\n"
156 	PPC405_ERR77(0, %2)
157 "	stwcx.	%0,0,%2\n"
158 "	bne-	1b"
159 	: "=&r" (t), "+m" (v->counter)
160 	: "r" (&v->counter)
161 	: "cc", "xer");
162 
163 	return t;
164 }
165 
166 /*
167  * atomic_inc_and_test - increment and test
168  * @v: pointer of type atomic_t
169  *
170  * Atomically increments @v by 1
171  * and returns true if the result is zero, or false for all
172  * other cases.
173  */
174 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
175 
176 static __inline__ void atomic_dec(atomic_t *v)
177 {
178 	int t;
179 
180 	__asm__ __volatile__(
181 "1:	lwarx	%0,0,%2		# atomic_dec\n\
182 	addic	%0,%0,-1\n"
183 	PPC405_ERR77(0,%2)\
184 "	stwcx.	%0,0,%2\n\
185 	bne-	1b"
186 	: "=&r" (t), "+m" (v->counter)
187 	: "r" (&v->counter)
188 	: "cc", "xer");
189 }
190 
191 static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
192 {
193 	int t;
194 
195 	__asm__ __volatile__(
196 "1:	lwarx	%0,0,%2		# atomic_dec_return_relaxed\n"
197 "	addic	%0,%0,-1\n"
198 	PPC405_ERR77(0, %2)
199 "	stwcx.	%0,0,%2\n"
200 "	bne-	1b"
201 	: "=&r" (t), "+m" (v->counter)
202 	: "r" (&v->counter)
203 	: "cc", "xer");
204 
205 	return t;
206 }
207 
208 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
209 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
210 
211 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
212 #define atomic_cmpxchg_relaxed(v, o, n) \
213 	cmpxchg_relaxed(&((v)->counter), (o), (n))
214 #define atomic_cmpxchg_acquire(v, o, n) \
215 	cmpxchg_acquire(&((v)->counter), (o), (n))
216 
217 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
218 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
219 
220 /**
221  * __atomic_add_unless - add unless the number is a given value
222  * @v: pointer of type atomic_t
223  * @a: the amount to add to v...
224  * @u: ...unless v is equal to u.
225  *
226  * Atomically adds @a to @v, so long as it was not @u.
227  * Returns the old value of @v.
228  */
229 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
230 {
231 	int t;
232 
233 	__asm__ __volatile__ (
234 	PPC_ATOMIC_ENTRY_BARRIER
235 "1:	lwarx	%0,0,%1		# __atomic_add_unless\n\
236 	cmpw	0,%0,%3 \n\
237 	beq	2f \n\
238 	add	%0,%2,%0 \n"
239 	PPC405_ERR77(0,%2)
240 "	stwcx.	%0,0,%1 \n\
241 	bne-	1b \n"
242 	PPC_ATOMIC_EXIT_BARRIER
243 "	subf	%0,%2,%0 \n\
244 2:"
245 	: "=&r" (t)
246 	: "r" (&v->counter), "r" (a), "r" (u)
247 	: "cc", "memory");
248 
249 	return t;
250 }
251 
252 /**
253  * atomic_inc_not_zero - increment unless the number is zero
254  * @v: pointer of type atomic_t
255  *
256  * Atomically increments @v by 1, so long as @v is non-zero.
257  * Returns non-zero if @v was non-zero, and zero otherwise.
258  */
259 static __inline__ int atomic_inc_not_zero(atomic_t *v)
260 {
261 	int t1, t2;
262 
263 	__asm__ __volatile__ (
264 	PPC_ATOMIC_ENTRY_BARRIER
265 "1:	lwarx	%0,0,%2		# atomic_inc_not_zero\n\
266 	cmpwi	0,%0,0\n\
267 	beq-	2f\n\
268 	addic	%1,%0,1\n"
269 	PPC405_ERR77(0,%2)
270 "	stwcx.	%1,0,%2\n\
271 	bne-	1b\n"
272 	PPC_ATOMIC_EXIT_BARRIER
273 	"\n\
274 2:"
275 	: "=&r" (t1), "=&r" (t2)
276 	: "r" (&v->counter)
277 	: "cc", "xer", "memory");
278 
279 	return t1;
280 }
281 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
282 
283 #define atomic_sub_and_test(a, v)	(atomic_sub_return((a), (v)) == 0)
284 #define atomic_dec_and_test(v)		(atomic_dec_return((v)) == 0)
285 
286 /*
287  * Atomically test *v and decrement if it is greater than 0.
288  * The function returns the old value of *v minus 1, even if
289  * the atomic variable, v, was not decremented.
290  */
291 static __inline__ int atomic_dec_if_positive(atomic_t *v)
292 {
293 	int t;
294 
295 	__asm__ __volatile__(
296 	PPC_ATOMIC_ENTRY_BARRIER
297 "1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
298 	cmpwi	%0,1\n\
299 	addi	%0,%0,-1\n\
300 	blt-	2f\n"
301 	PPC405_ERR77(0,%1)
302 "	stwcx.	%0,0,%1\n\
303 	bne-	1b"
304 	PPC_ATOMIC_EXIT_BARRIER
305 	"\n\
306 2:"	: "=&b" (t)
307 	: "r" (&v->counter)
308 	: "cc", "memory");
309 
310 	return t;
311 }
312 #define atomic_dec_if_positive atomic_dec_if_positive
313 
314 #ifdef __powerpc64__
315 
316 #define ATOMIC64_INIT(i)	{ (i) }
317 
318 static __inline__ long atomic64_read(const atomic64_t *v)
319 {
320 	long t;
321 
322 	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
323 
324 	return t;
325 }
326 
327 static __inline__ void atomic64_set(atomic64_t *v, long i)
328 {
329 	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
330 }
331 
332 #define ATOMIC64_OP(op, asm_op)						\
333 static __inline__ void atomic64_##op(long a, atomic64_t *v)		\
334 {									\
335 	long t;								\
336 									\
337 	__asm__ __volatile__(						\
338 "1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
339 	#asm_op " %0,%2,%0\n"						\
340 "	stdcx.	%0,0,%3 \n"						\
341 "	bne-	1b\n"							\
342 	: "=&r" (t), "+m" (v->counter)					\
343 	: "r" (a), "r" (&v->counter)					\
344 	: "cc");							\
345 }
346 
347 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
348 static inline long							\
349 atomic64_##op##_return_relaxed(long a, atomic64_t *v)			\
350 {									\
351 	long t;								\
352 									\
353 	__asm__ __volatile__(						\
354 "1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
355 	#asm_op " %0,%2,%0\n"						\
356 "	stdcx.	%0,0,%3\n"						\
357 "	bne-	1b\n"							\
358 	: "=&r" (t), "+m" (v->counter)					\
359 	: "r" (a), "r" (&v->counter)					\
360 	: "cc");							\
361 									\
362 	return t;							\
363 }
364 
365 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
366 static inline long							\
367 atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)			\
368 {									\
369 	long res, t;							\
370 									\
371 	__asm__ __volatile__(						\
372 "1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
373 	#asm_op " %1,%3,%0\n"						\
374 "	stdcx.	%1,0,%4\n"						\
375 "	bne-	1b\n"							\
376 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
377 	: "r" (a), "r" (&v->counter)					\
378 	: "cc");							\
379 									\
380 	return res;							\
381 }
382 
383 #define ATOMIC64_OPS(op, asm_op)					\
384 	ATOMIC64_OP(op, asm_op)						\
385 	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
386 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
387 
388 ATOMIC64_OPS(add, add)
389 ATOMIC64_OPS(sub, subf)
390 
391 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
392 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
393 
394 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
395 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
396 
397 #undef ATOMIC64_OPS
398 #define ATOMIC64_OPS(op, asm_op)					\
399 	ATOMIC64_OP(op, asm_op)						\
400 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
401 
402 ATOMIC64_OPS(and, and)
403 ATOMIC64_OPS(or, or)
404 ATOMIC64_OPS(xor, xor)
405 
406 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
407 #define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
408 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
409 
410 #undef ATOPIC64_OPS
411 #undef ATOMIC64_FETCH_OP_RELAXED
412 #undef ATOMIC64_OP_RETURN_RELAXED
413 #undef ATOMIC64_OP
414 
415 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
416 
417 static __inline__ void atomic64_inc(atomic64_t *v)
418 {
419 	long t;
420 
421 	__asm__ __volatile__(
422 "1:	ldarx	%0,0,%2		# atomic64_inc\n\
423 	addic	%0,%0,1\n\
424 	stdcx.	%0,0,%2 \n\
425 	bne-	1b"
426 	: "=&r" (t), "+m" (v->counter)
427 	: "r" (&v->counter)
428 	: "cc", "xer");
429 }
430 
431 static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
432 {
433 	long t;
434 
435 	__asm__ __volatile__(
436 "1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
437 "	addic	%0,%0,1\n"
438 "	stdcx.	%0,0,%2\n"
439 "	bne-	1b"
440 	: "=&r" (t), "+m" (v->counter)
441 	: "r" (&v->counter)
442 	: "cc", "xer");
443 
444 	return t;
445 }
446 
447 /*
448  * atomic64_inc_and_test - increment and test
449  * @v: pointer of type atomic64_t
450  *
451  * Atomically increments @v by 1
452  * and returns true if the result is zero, or false for all
453  * other cases.
454  */
455 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
456 
457 static __inline__ void atomic64_dec(atomic64_t *v)
458 {
459 	long t;
460 
461 	__asm__ __volatile__(
462 "1:	ldarx	%0,0,%2		# atomic64_dec\n\
463 	addic	%0,%0,-1\n\
464 	stdcx.	%0,0,%2\n\
465 	bne-	1b"
466 	: "=&r" (t), "+m" (v->counter)
467 	: "r" (&v->counter)
468 	: "cc", "xer");
469 }
470 
471 static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
472 {
473 	long t;
474 
475 	__asm__ __volatile__(
476 "1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
477 "	addic	%0,%0,-1\n"
478 "	stdcx.	%0,0,%2\n"
479 "	bne-	1b"
480 	: "=&r" (t), "+m" (v->counter)
481 	: "r" (&v->counter)
482 	: "cc", "xer");
483 
484 	return t;
485 }
486 
487 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
488 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
489 
490 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
491 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
492 
493 /*
494  * Atomically test *v and decrement if it is greater than 0.
495  * The function returns the old value of *v minus 1.
496  */
497 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
498 {
499 	long t;
500 
501 	__asm__ __volatile__(
502 	PPC_ATOMIC_ENTRY_BARRIER
503 "1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
504 	addic.	%0,%0,-1\n\
505 	blt-	2f\n\
506 	stdcx.	%0,0,%1\n\
507 	bne-	1b"
508 	PPC_ATOMIC_EXIT_BARRIER
509 	"\n\
510 2:"	: "=&r" (t)
511 	: "r" (&v->counter)
512 	: "cc", "xer", "memory");
513 
514 	return t;
515 }
516 
517 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
518 #define atomic64_cmpxchg_relaxed(v, o, n) \
519 	cmpxchg_relaxed(&((v)->counter), (o), (n))
520 #define atomic64_cmpxchg_acquire(v, o, n) \
521 	cmpxchg_acquire(&((v)->counter), (o), (n))
522 
523 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
524 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
525 
526 /**
527  * atomic64_add_unless - add unless the number is a given value
528  * @v: pointer of type atomic64_t
529  * @a: the amount to add to v...
530  * @u: ...unless v is equal to u.
531  *
532  * Atomically adds @a to @v, so long as it was not @u.
533  * Returns the old value of @v.
534  */
535 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
536 {
537 	long t;
538 
539 	__asm__ __volatile__ (
540 	PPC_ATOMIC_ENTRY_BARRIER
541 "1:	ldarx	%0,0,%1		# __atomic_add_unless\n\
542 	cmpd	0,%0,%3 \n\
543 	beq	2f \n\
544 	add	%0,%2,%0 \n"
545 "	stdcx.	%0,0,%1 \n\
546 	bne-	1b \n"
547 	PPC_ATOMIC_EXIT_BARRIER
548 "	subf	%0,%2,%0 \n\
549 2:"
550 	: "=&r" (t)
551 	: "r" (&v->counter), "r" (a), "r" (u)
552 	: "cc", "memory");
553 
554 	return t != u;
555 }
556 
557 /**
558  * atomic_inc64_not_zero - increment unless the number is zero
559  * @v: pointer of type atomic64_t
560  *
561  * Atomically increments @v by 1, so long as @v is non-zero.
562  * Returns non-zero if @v was non-zero, and zero otherwise.
563  */
564 static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
565 {
566 	long t1, t2;
567 
568 	__asm__ __volatile__ (
569 	PPC_ATOMIC_ENTRY_BARRIER
570 "1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
571 	cmpdi	0,%0,0\n\
572 	beq-	2f\n\
573 	addic	%1,%0,1\n\
574 	stdcx.	%1,0,%2\n\
575 	bne-	1b\n"
576 	PPC_ATOMIC_EXIT_BARRIER
577 	"\n\
578 2:"
579 	: "=&r" (t1), "=&r" (t2)
580 	: "r" (&v->counter)
581 	: "cc", "xer", "memory");
582 
583 	return t1 != 0;
584 }
585 
586 #endif /* __powerpc64__ */
587 
588 #endif /* __KERNEL__ */
589 #endif /* _ASM_POWERPC_ATOMIC_H_ */
590