xref: /openbmc/linux/arch/powerpc/include/asm/atomic.h (revision 55b37d9c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
4 
5 /*
6  * PowerPC atomic operations
7  */
8 
9 #ifdef __KERNEL__
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-const.h>
14 
15 /*
16  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18  * on the platform without lwsync.
19  */
20 #define __atomic_acquire_fence()					\
21 	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
22 
23 #define __atomic_release_fence()					\
24 	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
25 
26 static __inline__ int arch_atomic_read(const atomic_t *v)
27 {
28 	int t;
29 
30 	/* -mprefixed can generate offsets beyond range, fall back hack */
31 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
32 		__asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
33 	else
34 		__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
35 
36 	return t;
37 }
38 
39 static __inline__ void arch_atomic_set(atomic_t *v, int i)
40 {
41 	/* -mprefixed can generate offsets beyond range, fall back hack */
42 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
43 		__asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
44 	else
45 		__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
46 }
47 
48 #define ATOMIC_OP(op, asm_op, suffix, sign, ...)			\
49 static __inline__ void arch_atomic_##op(int a, atomic_t *v)		\
50 {									\
51 	int t;								\
52 									\
53 	__asm__ __volatile__(						\
54 "1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
55 	#asm_op "%I2" suffix " %0,%0,%2\n"				\
56 "	stwcx.	%0,0,%3 \n"						\
57 "	bne-	1b\n"							\
58 	: "=&r" (t), "+m" (v->counter)					\
59 	: "r"#sign (a), "r" (&v->counter)				\
60 	: "cc", ##__VA_ARGS__);						\
61 }									\
62 
63 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...)		\
64 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v)	\
65 {									\
66 	int t;								\
67 									\
68 	__asm__ __volatile__(						\
69 "1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
70 	#asm_op "%I2" suffix " %0,%0,%2\n"				\
71 "	stwcx.	%0,0,%3\n"						\
72 "	bne-	1b\n"							\
73 	: "=&r" (t), "+m" (v->counter)					\
74 	: "r"#sign (a), "r" (&v->counter)				\
75 	: "cc", ##__VA_ARGS__);						\
76 									\
77 	return t;							\
78 }
79 
80 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...)		\
81 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
82 {									\
83 	int res, t;							\
84 									\
85 	__asm__ __volatile__(						\
86 "1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
87 	#asm_op "%I3" suffix " %1,%0,%3\n"				\
88 "	stwcx.	%1,0,%4\n"						\
89 "	bne-	1b\n"							\
90 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
91 	: "r"#sign (a), "r" (&v->counter)				\
92 	: "cc", ##__VA_ARGS__);						\
93 									\
94 	return res;							\
95 }
96 
97 #define ATOMIC_OPS(op, asm_op, suffix, sign, ...)			\
98 	ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__)		\
99 	ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
100 	ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
101 
102 ATOMIC_OPS(add, add, "c", I, "xer")
103 ATOMIC_OPS(sub, sub, "c", I, "xer")
104 
105 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
106 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
107 
108 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
109 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
110 
111 #undef ATOMIC_OPS
112 #define ATOMIC_OPS(op, asm_op, suffix, sign)				\
113 	ATOMIC_OP(op, asm_op, suffix, sign)				\
114 	ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
115 
116 ATOMIC_OPS(and, and, ".", K)
117 ATOMIC_OPS(or, or, "", K)
118 ATOMIC_OPS(xor, xor, "", K)
119 
120 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
121 #define arch_atomic_fetch_or_relaxed  arch_atomic_fetch_or_relaxed
122 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
123 
124 #undef ATOMIC_OPS
125 #undef ATOMIC_FETCH_OP_RELAXED
126 #undef ATOMIC_OP_RETURN_RELAXED
127 #undef ATOMIC_OP
128 
129 #define arch_atomic_cmpxchg(v, o, n) \
130 	(arch_cmpxchg(&((v)->counter), (o), (n)))
131 #define arch_atomic_cmpxchg_relaxed(v, o, n) \
132 	arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
133 #define arch_atomic_cmpxchg_acquire(v, o, n) \
134 	arch_cmpxchg_acquire(&((v)->counter), (o), (n))
135 
136 #define arch_atomic_xchg(v, new) \
137 	(arch_xchg(&((v)->counter), new))
138 #define arch_atomic_xchg_relaxed(v, new) \
139 	arch_xchg_relaxed(&((v)->counter), (new))
140 
141 /**
142  * atomic_fetch_add_unless - add unless the number is a given value
143  * @v: pointer of type atomic_t
144  * @a: the amount to add to v...
145  * @u: ...unless v is equal to u.
146  *
147  * Atomically adds @a to @v, so long as it was not @u.
148  * Returns the old value of @v.
149  */
150 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
151 {
152 	int t;
153 
154 	__asm__ __volatile__ (
155 	PPC_ATOMIC_ENTRY_BARRIER
156 "1:	lwarx	%0,0,%1		# atomic_fetch_add_unless\n\
157 	cmpw	0,%0,%3 \n\
158 	beq	2f \n\
159 	add%I2c	%0,%0,%2 \n"
160 "	stwcx.	%0,0,%1 \n\
161 	bne-	1b \n"
162 	PPC_ATOMIC_EXIT_BARRIER
163 "	sub%I2c	%0,%0,%2 \n\
164 2:"
165 	: "=&r" (t)
166 	: "r" (&v->counter), "rI" (a), "r" (u)
167 	: "cc", "memory", "xer");
168 
169 	return t;
170 }
171 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
172 
173 /*
174  * Atomically test *v and decrement if it is greater than 0.
175  * The function returns the old value of *v minus 1, even if
176  * the atomic variable, v, was not decremented.
177  */
178 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
179 {
180 	int t;
181 
182 	__asm__ __volatile__(
183 	PPC_ATOMIC_ENTRY_BARRIER
184 "1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
185 	cmpwi	%0,1\n\
186 	addi	%0,%0,-1\n\
187 	blt-	2f\n"
188 "	stwcx.	%0,0,%1\n\
189 	bne-	1b"
190 	PPC_ATOMIC_EXIT_BARRIER
191 	"\n\
192 2:"	: "=&b" (t)
193 	: "r" (&v->counter)
194 	: "cc", "memory");
195 
196 	return t;
197 }
198 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
199 
200 #ifdef __powerpc64__
201 
202 #define ATOMIC64_INIT(i)	{ (i) }
203 
204 static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
205 {
206 	s64 t;
207 
208 	/* -mprefixed can generate offsets beyond range, fall back hack */
209 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
210 		__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
211 	else
212 		__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
213 
214 	return t;
215 }
216 
217 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
218 {
219 	/* -mprefixed can generate offsets beyond range, fall back hack */
220 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
221 		__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
222 	else
223 		__asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
224 }
225 
226 #define ATOMIC64_OP(op, asm_op)						\
227 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v)		\
228 {									\
229 	s64 t;								\
230 									\
231 	__asm__ __volatile__(						\
232 "1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
233 	#asm_op " %0,%2,%0\n"						\
234 "	stdcx.	%0,0,%3 \n"						\
235 "	bne-	1b\n"							\
236 	: "=&r" (t), "+m" (v->counter)					\
237 	: "r" (a), "r" (&v->counter)					\
238 	: "cc");							\
239 }
240 
241 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
242 static inline s64							\
243 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)		\
244 {									\
245 	s64 t;								\
246 									\
247 	__asm__ __volatile__(						\
248 "1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
249 	#asm_op " %0,%2,%0\n"						\
250 "	stdcx.	%0,0,%3\n"						\
251 "	bne-	1b\n"							\
252 	: "=&r" (t), "+m" (v->counter)					\
253 	: "r" (a), "r" (&v->counter)					\
254 	: "cc");							\
255 									\
256 	return t;							\
257 }
258 
259 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
260 static inline s64							\
261 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)		\
262 {									\
263 	s64 res, t;							\
264 									\
265 	__asm__ __volatile__(						\
266 "1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
267 	#asm_op " %1,%3,%0\n"						\
268 "	stdcx.	%1,0,%4\n"						\
269 "	bne-	1b\n"							\
270 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
271 	: "r" (a), "r" (&v->counter)					\
272 	: "cc");							\
273 									\
274 	return res;							\
275 }
276 
277 #define ATOMIC64_OPS(op, asm_op)					\
278 	ATOMIC64_OP(op, asm_op)						\
279 	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
280 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
281 
282 ATOMIC64_OPS(add, add)
283 ATOMIC64_OPS(sub, subf)
284 
285 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
286 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
287 
288 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
289 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
290 
291 #undef ATOMIC64_OPS
292 #define ATOMIC64_OPS(op, asm_op)					\
293 	ATOMIC64_OP(op, asm_op)						\
294 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
295 
296 ATOMIC64_OPS(and, and)
297 ATOMIC64_OPS(or, or)
298 ATOMIC64_OPS(xor, xor)
299 
300 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
301 #define arch_atomic64_fetch_or_relaxed  arch_atomic64_fetch_or_relaxed
302 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
303 
304 #undef ATOPIC64_OPS
305 #undef ATOMIC64_FETCH_OP_RELAXED
306 #undef ATOMIC64_OP_RETURN_RELAXED
307 #undef ATOMIC64_OP
308 
309 static __inline__ void arch_atomic64_inc(atomic64_t *v)
310 {
311 	s64 t;
312 
313 	__asm__ __volatile__(
314 "1:	ldarx	%0,0,%2		# atomic64_inc\n\
315 	addic	%0,%0,1\n\
316 	stdcx.	%0,0,%2 \n\
317 	bne-	1b"
318 	: "=&r" (t), "+m" (v->counter)
319 	: "r" (&v->counter)
320 	: "cc", "xer");
321 }
322 #define arch_atomic64_inc arch_atomic64_inc
323 
324 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
325 {
326 	s64 t;
327 
328 	__asm__ __volatile__(
329 "1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
330 "	addic	%0,%0,1\n"
331 "	stdcx.	%0,0,%2\n"
332 "	bne-	1b"
333 	: "=&r" (t), "+m" (v->counter)
334 	: "r" (&v->counter)
335 	: "cc", "xer");
336 
337 	return t;
338 }
339 
340 static __inline__ void arch_atomic64_dec(atomic64_t *v)
341 {
342 	s64 t;
343 
344 	__asm__ __volatile__(
345 "1:	ldarx	%0,0,%2		# atomic64_dec\n\
346 	addic	%0,%0,-1\n\
347 	stdcx.	%0,0,%2\n\
348 	bne-	1b"
349 	: "=&r" (t), "+m" (v->counter)
350 	: "r" (&v->counter)
351 	: "cc", "xer");
352 }
353 #define arch_atomic64_dec arch_atomic64_dec
354 
355 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
356 {
357 	s64 t;
358 
359 	__asm__ __volatile__(
360 "1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
361 "	addic	%0,%0,-1\n"
362 "	stdcx.	%0,0,%2\n"
363 "	bne-	1b"
364 	: "=&r" (t), "+m" (v->counter)
365 	: "r" (&v->counter)
366 	: "cc", "xer");
367 
368 	return t;
369 }
370 
371 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
372 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
373 
374 /*
375  * Atomically test *v and decrement if it is greater than 0.
376  * The function returns the old value of *v minus 1.
377  */
378 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
379 {
380 	s64 t;
381 
382 	__asm__ __volatile__(
383 	PPC_ATOMIC_ENTRY_BARRIER
384 "1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
385 	addic.	%0,%0,-1\n\
386 	blt-	2f\n\
387 	stdcx.	%0,0,%1\n\
388 	bne-	1b"
389 	PPC_ATOMIC_EXIT_BARRIER
390 	"\n\
391 2:"	: "=&r" (t)
392 	: "r" (&v->counter)
393 	: "cc", "xer", "memory");
394 
395 	return t;
396 }
397 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
398 
399 #define arch_atomic64_cmpxchg(v, o, n) \
400 	(arch_cmpxchg(&((v)->counter), (o), (n)))
401 #define arch_atomic64_cmpxchg_relaxed(v, o, n) \
402 	arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
403 #define arch_atomic64_cmpxchg_acquire(v, o, n) \
404 	arch_cmpxchg_acquire(&((v)->counter), (o), (n))
405 
406 #define arch_atomic64_xchg(v, new) \
407 	(arch_xchg(&((v)->counter), new))
408 #define arch_atomic64_xchg_relaxed(v, new) \
409 	arch_xchg_relaxed(&((v)->counter), (new))
410 
411 /**
412  * atomic64_fetch_add_unless - add unless the number is a given value
413  * @v: pointer of type atomic64_t
414  * @a: the amount to add to v...
415  * @u: ...unless v is equal to u.
416  *
417  * Atomically adds @a to @v, so long as it was not @u.
418  * Returns the old value of @v.
419  */
420 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
421 {
422 	s64 t;
423 
424 	__asm__ __volatile__ (
425 	PPC_ATOMIC_ENTRY_BARRIER
426 "1:	ldarx	%0,0,%1		# atomic64_fetch_add_unless\n\
427 	cmpd	0,%0,%3 \n\
428 	beq	2f \n\
429 	add	%0,%2,%0 \n"
430 "	stdcx.	%0,0,%1 \n\
431 	bne-	1b \n"
432 	PPC_ATOMIC_EXIT_BARRIER
433 "	subf	%0,%2,%0 \n\
434 2:"
435 	: "=&r" (t)
436 	: "r" (&v->counter), "r" (a), "r" (u)
437 	: "cc", "memory");
438 
439 	return t;
440 }
441 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
442 
443 /**
444  * atomic_inc64_not_zero - increment unless the number is zero
445  * @v: pointer of type atomic64_t
446  *
447  * Atomically increments @v by 1, so long as @v is non-zero.
448  * Returns non-zero if @v was non-zero, and zero otherwise.
449  */
450 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
451 {
452 	s64 t1, t2;
453 
454 	__asm__ __volatile__ (
455 	PPC_ATOMIC_ENTRY_BARRIER
456 "1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
457 	cmpdi	0,%0,0\n\
458 	beq-	2f\n\
459 	addic	%1,%0,1\n\
460 	stdcx.	%1,0,%2\n\
461 	bne-	1b\n"
462 	PPC_ATOMIC_EXIT_BARRIER
463 	"\n\
464 2:"
465 	: "=&r" (t1), "=&r" (t2)
466 	: "r" (&v->counter)
467 	: "cc", "xer", "memory");
468 
469 	return t1 != 0;
470 }
471 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
472 
473 #endif /* __powerpc64__ */
474 
475 #endif /* __KERNEL__ */
476 #endif /* _ASM_POWERPC_ATOMIC_H_ */
477