xref: /openbmc/linux/arch/alpha/include/asm/atomic.h (revision ef558b4b)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2024b246eSLinus Torvalds #ifndef _ALPHA_ATOMIC_H
3024b246eSLinus Torvalds #define _ALPHA_ATOMIC_H
4024b246eSLinus Torvalds 
5ea435467SMatthew Wilcox #include <linux/types.h>
6024b246eSLinus Torvalds #include <asm/barrier.h>
75ba840f9SPaul Gortmaker #include <asm/cmpxchg.h>
8024b246eSLinus Torvalds 
9024b246eSLinus Torvalds /*
10024b246eSLinus Torvalds  * Atomic operations that C can't guarantee us.  Useful for
11024b246eSLinus Torvalds  * resource counting etc...
12024b246eSLinus Torvalds  *
13024b246eSLinus Torvalds  * But use these as seldom as possible since they are much slower
14024b246eSLinus Torvalds  * than regular operations.
15024b246eSLinus Torvalds  */
16024b246eSLinus Torvalds 
175a8897ccSWill Deacon /*
185a8897ccSWill Deacon  * To ensure dependency ordering is preserved for the _relaxed and
19bb7cdd38SWill Deacon  * _release atomics, an smp_mb() is unconditionally inserted into the
20bb7cdd38SWill Deacon  * _relaxed variants, which are used to build the barriered versions.
21bb7cdd38SWill Deacon  * Avoid redundant back-to-back fences in the _acquire and _fence
22bb7cdd38SWill Deacon  * versions.
235a8897ccSWill Deacon  */
24fd2efaa4SMark Rutland #define __atomic_acquire_fence()
25fd2efaa4SMark Rutland #define __atomic_post_full_fence()
26024b246eSLinus Torvalds 
2767a806d9SMel Gorman #define ATOMIC64_INIT(i)	{ (i) }
28024b246eSLinus Torvalds 
29*96d330afSMark Rutland #define arch_atomic_read(v)	READ_ONCE((v)->counter)
30*96d330afSMark Rutland #define arch_atomic64_read(v)	READ_ONCE((v)->counter)
31024b246eSLinus Torvalds 
32*96d330afSMark Rutland #define arch_atomic_set(v,i)	WRITE_ONCE((v)->counter, (i))
33*96d330afSMark Rutland #define arch_atomic64_set(v,i)	WRITE_ONCE((v)->counter, (i))
34024b246eSLinus Torvalds 
35024b246eSLinus Torvalds /*
36024b246eSLinus Torvalds  * To get proper branch prediction for the main line, we must branch
37024b246eSLinus Torvalds  * forward to code at the end of this object's .text section, then
38024b246eSLinus Torvalds  * branch back to restart the operation.
39024b246eSLinus Torvalds  */
40024b246eSLinus Torvalds 
41212d3be1SPeter Zijlstra #define ATOMIC_OP(op, asm_op)						\
42*96d330afSMark Rutland static __inline__ void arch_atomic_##op(int i, atomic_t * v)		\
43b93c7b8cSPeter Zijlstra {									\
44b93c7b8cSPeter Zijlstra 	unsigned long temp;						\
45b93c7b8cSPeter Zijlstra 	__asm__ __volatile__(						\
46b93c7b8cSPeter Zijlstra 	"1:	ldl_l %0,%1\n"						\
47212d3be1SPeter Zijlstra 	"	" #asm_op " %0,%2,%0\n"					\
48b93c7b8cSPeter Zijlstra 	"	stl_c %0,%1\n"						\
49b93c7b8cSPeter Zijlstra 	"	beq %0,2f\n"						\
50b93c7b8cSPeter Zijlstra 	".subsection 2\n"						\
51b93c7b8cSPeter Zijlstra 	"2:	br 1b\n"						\
52b93c7b8cSPeter Zijlstra 	".previous"							\
53b93c7b8cSPeter Zijlstra 	:"=&r" (temp), "=m" (v->counter)				\
54b93c7b8cSPeter Zijlstra 	:"Ir" (i), "m" (v->counter));					\
55b93c7b8cSPeter Zijlstra }									\
56b93c7b8cSPeter Zijlstra 
57212d3be1SPeter Zijlstra #define ATOMIC_OP_RETURN(op, asm_op)					\
58*96d330afSMark Rutland static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
59b93c7b8cSPeter Zijlstra {									\
60b93c7b8cSPeter Zijlstra 	long temp, result;						\
61b93c7b8cSPeter Zijlstra 	__asm__ __volatile__(						\
62b93c7b8cSPeter Zijlstra 	"1:	ldl_l %0,%1\n"						\
63212d3be1SPeter Zijlstra 	"	" #asm_op " %0,%3,%2\n"					\
64212d3be1SPeter Zijlstra 	"	" #asm_op " %0,%3,%0\n"					\
65b93c7b8cSPeter Zijlstra 	"	stl_c %0,%1\n"						\
66b93c7b8cSPeter Zijlstra 	"	beq %0,2f\n"						\
67b93c7b8cSPeter Zijlstra 	".subsection 2\n"						\
68b93c7b8cSPeter Zijlstra 	"2:	br 1b\n"						\
69b93c7b8cSPeter Zijlstra 	".previous"							\
70b93c7b8cSPeter Zijlstra 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
71b93c7b8cSPeter Zijlstra 	:"Ir" (i), "m" (v->counter) : "memory");			\
72bb7cdd38SWill Deacon 	smp_mb();							\
73b93c7b8cSPeter Zijlstra 	return result;							\
74024b246eSLinus Torvalds }
75024b246eSLinus Torvalds 
761f51dee7SPeter Zijlstra #define ATOMIC_FETCH_OP(op, asm_op)					\
77*96d330afSMark Rutland static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
781f51dee7SPeter Zijlstra {									\
791f51dee7SPeter Zijlstra 	long temp, result;						\
801f51dee7SPeter Zijlstra 	__asm__ __volatile__(						\
811f51dee7SPeter Zijlstra 	"1:	ldl_l %2,%1\n"						\
821f51dee7SPeter Zijlstra 	"	" #asm_op " %2,%3,%0\n"					\
831f51dee7SPeter Zijlstra 	"	stl_c %0,%1\n"						\
841f51dee7SPeter Zijlstra 	"	beq %0,2f\n"						\
851f51dee7SPeter Zijlstra 	".subsection 2\n"						\
861f51dee7SPeter Zijlstra 	"2:	br 1b\n"						\
871f51dee7SPeter Zijlstra 	".previous"							\
881f51dee7SPeter Zijlstra 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
891f51dee7SPeter Zijlstra 	:"Ir" (i), "m" (v->counter) : "memory");			\
90bb7cdd38SWill Deacon 	smp_mb();							\
911f51dee7SPeter Zijlstra 	return result;							\
921f51dee7SPeter Zijlstra }
931f51dee7SPeter Zijlstra 
94212d3be1SPeter Zijlstra #define ATOMIC64_OP(op, asm_op)						\
95*96d330afSMark Rutland static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v)	\
96b93c7b8cSPeter Zijlstra {									\
970203fdc1SMark Rutland 	s64 temp;							\
98b93c7b8cSPeter Zijlstra 	__asm__ __volatile__(						\
99b93c7b8cSPeter Zijlstra 	"1:	ldq_l %0,%1\n"						\
100212d3be1SPeter Zijlstra 	"	" #asm_op " %0,%2,%0\n"					\
101b93c7b8cSPeter Zijlstra 	"	stq_c %0,%1\n"						\
102b93c7b8cSPeter Zijlstra 	"	beq %0,2f\n"						\
103b93c7b8cSPeter Zijlstra 	".subsection 2\n"						\
104b93c7b8cSPeter Zijlstra 	"2:	br 1b\n"						\
105b93c7b8cSPeter Zijlstra 	".previous"							\
106b93c7b8cSPeter Zijlstra 	:"=&r" (temp), "=m" (v->counter)				\
107b93c7b8cSPeter Zijlstra 	:"Ir" (i), "m" (v->counter));					\
108b93c7b8cSPeter Zijlstra }									\
109b93c7b8cSPeter Zijlstra 
110212d3be1SPeter Zijlstra #define ATOMIC64_OP_RETURN(op, asm_op)					\
111*96d330afSMark Rutland static __inline__ s64							\
112*96d330afSMark Rutland arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)		\
113b93c7b8cSPeter Zijlstra {									\
1140203fdc1SMark Rutland 	s64 temp, result;						\
115b93c7b8cSPeter Zijlstra 	__asm__ __volatile__(						\
116b93c7b8cSPeter Zijlstra 	"1:	ldq_l %0,%1\n"						\
117212d3be1SPeter Zijlstra 	"	" #asm_op " %0,%3,%2\n"					\
118212d3be1SPeter Zijlstra 	"	" #asm_op " %0,%3,%0\n"					\
119b93c7b8cSPeter Zijlstra 	"	stq_c %0,%1\n"						\
120b93c7b8cSPeter Zijlstra 	"	beq %0,2f\n"						\
121b93c7b8cSPeter Zijlstra 	".subsection 2\n"						\
122b93c7b8cSPeter Zijlstra 	"2:	br 1b\n"						\
123b93c7b8cSPeter Zijlstra 	".previous"							\
124b93c7b8cSPeter Zijlstra 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
125b93c7b8cSPeter Zijlstra 	:"Ir" (i), "m" (v->counter) : "memory");			\
126bb7cdd38SWill Deacon 	smp_mb();							\
127b93c7b8cSPeter Zijlstra 	return result;							\
128024b246eSLinus Torvalds }
129024b246eSLinus Torvalds 
1301f51dee7SPeter Zijlstra #define ATOMIC64_FETCH_OP(op, asm_op)					\
131*96d330afSMark Rutland static __inline__ s64							\
132*96d330afSMark Rutland arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)		\
1331f51dee7SPeter Zijlstra {									\
1340203fdc1SMark Rutland 	s64 temp, result;						\
1351f51dee7SPeter Zijlstra 	__asm__ __volatile__(						\
1361f51dee7SPeter Zijlstra 	"1:	ldq_l %2,%1\n"						\
1371f51dee7SPeter Zijlstra 	"	" #asm_op " %2,%3,%0\n"					\
1381f51dee7SPeter Zijlstra 	"	stq_c %0,%1\n"						\
1391f51dee7SPeter Zijlstra 	"	beq %0,2f\n"						\
1401f51dee7SPeter Zijlstra 	".subsection 2\n"						\
1411f51dee7SPeter Zijlstra 	"2:	br 1b\n"						\
1421f51dee7SPeter Zijlstra 	".previous"							\
1431f51dee7SPeter Zijlstra 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
1441f51dee7SPeter Zijlstra 	:"Ir" (i), "m" (v->counter) : "memory");			\
145bb7cdd38SWill Deacon 	smp_mb();							\
1461f51dee7SPeter Zijlstra 	return result;							\
1471f51dee7SPeter Zijlstra }
1481f51dee7SPeter Zijlstra 
149212d3be1SPeter Zijlstra #define ATOMIC_OPS(op)							\
150212d3be1SPeter Zijlstra 	ATOMIC_OP(op, op##l)						\
151212d3be1SPeter Zijlstra 	ATOMIC_OP_RETURN(op, op##l)					\
1521f51dee7SPeter Zijlstra 	ATOMIC_FETCH_OP(op, op##l)					\
153212d3be1SPeter Zijlstra 	ATOMIC64_OP(op, op##q)						\
1541f51dee7SPeter Zijlstra 	ATOMIC64_OP_RETURN(op, op##q)					\
1551f51dee7SPeter Zijlstra 	ATOMIC64_FETCH_OP(op, op##q)
156024b246eSLinus Torvalds 
157b93c7b8cSPeter Zijlstra ATOMIC_OPS(add)
ATOMIC_OPS(sub)158b93c7b8cSPeter Zijlstra ATOMIC_OPS(sub)
159024b246eSLinus Torvalds 
160*96d330afSMark Rutland #define arch_atomic_add_return_relaxed		arch_atomic_add_return_relaxed
161*96d330afSMark Rutland #define arch_atomic_sub_return_relaxed		arch_atomic_sub_return_relaxed
162*96d330afSMark Rutland #define arch_atomic_fetch_add_relaxed		arch_atomic_fetch_add_relaxed
163*96d330afSMark Rutland #define arch_atomic_fetch_sub_relaxed		arch_atomic_fetch_sub_relaxed
164fe14d2f1SPeter Zijlstra 
165*96d330afSMark Rutland #define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
166*96d330afSMark Rutland #define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
167*96d330afSMark Rutland #define arch_atomic64_fetch_add_relaxed		arch_atomic64_fetch_add_relaxed
168*96d330afSMark Rutland #define arch_atomic64_fetch_sub_relaxed		arch_atomic64_fetch_sub_relaxed
169fe14d2f1SPeter Zijlstra 
170*96d330afSMark Rutland #define arch_atomic_andnot			arch_atomic_andnot
171*96d330afSMark Rutland #define arch_atomic64_andnot			arch_atomic64_andnot
172212d3be1SPeter Zijlstra 
173b93c7b8cSPeter Zijlstra #undef ATOMIC_OPS
1741f51dee7SPeter Zijlstra #define ATOMIC_OPS(op, asm)						\
1751f51dee7SPeter Zijlstra 	ATOMIC_OP(op, asm)						\
1761f51dee7SPeter Zijlstra 	ATOMIC_FETCH_OP(op, asm)					\
1771f51dee7SPeter Zijlstra 	ATOMIC64_OP(op, asm)						\
1781f51dee7SPeter Zijlstra 	ATOMIC64_FETCH_OP(op, asm)
1791f51dee7SPeter Zijlstra 
1801f51dee7SPeter Zijlstra ATOMIC_OPS(and, and)
1811f51dee7SPeter Zijlstra ATOMIC_OPS(andnot, bic)
1821f51dee7SPeter Zijlstra ATOMIC_OPS(or, bis)
1831f51dee7SPeter Zijlstra ATOMIC_OPS(xor, xor)
1841f51dee7SPeter Zijlstra 
185*96d330afSMark Rutland #define arch_atomic_fetch_and_relaxed		arch_atomic_fetch_and_relaxed
186*96d330afSMark Rutland #define arch_atomic_fetch_andnot_relaxed	arch_atomic_fetch_andnot_relaxed
187*96d330afSMark Rutland #define arch_atomic_fetch_or_relaxed		arch_atomic_fetch_or_relaxed
188*96d330afSMark Rutland #define arch_atomic_fetch_xor_relaxed		arch_atomic_fetch_xor_relaxed
189fe14d2f1SPeter Zijlstra 
190*96d330afSMark Rutland #define arch_atomic64_fetch_and_relaxed		arch_atomic64_fetch_and_relaxed
191*96d330afSMark Rutland #define arch_atomic64_fetch_andnot_relaxed	arch_atomic64_fetch_andnot_relaxed
192*96d330afSMark Rutland #define arch_atomic64_fetch_or_relaxed		arch_atomic64_fetch_or_relaxed
193*96d330afSMark Rutland #define arch_atomic64_fetch_xor_relaxed		arch_atomic64_fetch_xor_relaxed
194fe14d2f1SPeter Zijlstra 
1951f51dee7SPeter Zijlstra #undef ATOMIC_OPS
1961f51dee7SPeter Zijlstra #undef ATOMIC64_FETCH_OP
197b93c7b8cSPeter Zijlstra #undef ATOMIC64_OP_RETURN
198b93c7b8cSPeter Zijlstra #undef ATOMIC64_OP
1991f51dee7SPeter Zijlstra #undef ATOMIC_FETCH_OP
200b93c7b8cSPeter Zijlstra #undef ATOMIC_OP_RETURN
201b93c7b8cSPeter Zijlstra #undef ATOMIC_OP
202024b246eSLinus Torvalds 
203*96d330afSMark Rutland static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
204024b246eSLinus Torvalds {
2056da75397SRichard Henderson 	int c, new, old;
2066da75397SRichard Henderson 	smp_mb();
2076da75397SRichard Henderson 	__asm__ __volatile__(
2086da75397SRichard Henderson 	"1:	ldl_l	%[old],%[mem]\n"
2096da75397SRichard Henderson 	"	cmpeq	%[old],%[u],%[c]\n"
2106da75397SRichard Henderson 	"	addl	%[old],%[a],%[new]\n"
2116da75397SRichard Henderson 	"	bne	%[c],2f\n"
2126da75397SRichard Henderson 	"	stl_c	%[new],%[mem]\n"
2136da75397SRichard Henderson 	"	beq	%[new],3f\n"
2146da75397SRichard Henderson 	"2:\n"
2156da75397SRichard Henderson 	".subsection 2\n"
2166da75397SRichard Henderson 	"3:	br	1b\n"
2176da75397SRichard Henderson 	".previous"
2186da75397SRichard Henderson 	: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
2196da75397SRichard Henderson 	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
2206da75397SRichard Henderson 	: "memory");
2216da75397SRichard Henderson 	smp_mb();
2226da75397SRichard Henderson 	return old;
223024b246eSLinus Torvalds }
224*96d330afSMark Rutland #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
225024b246eSLinus Torvalds 
arch_atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)226*96d330afSMark Rutland static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
227024b246eSLinus Torvalds {
2280203fdc1SMark Rutland 	s64 c, new, old;
2296da75397SRichard Henderson 	smp_mb();
2306da75397SRichard Henderson 	__asm__ __volatile__(
231434b6accSMark Rutland 	"1:	ldq_l	%[old],%[mem]\n"
232434b6accSMark Rutland 	"	cmpeq	%[old],%[u],%[c]\n"
233434b6accSMark Rutland 	"	addq	%[old],%[a],%[new]\n"
2346da75397SRichard Henderson 	"	bne	%[c],2f\n"
235434b6accSMark Rutland 	"	stq_c	%[new],%[mem]\n"
236434b6accSMark Rutland 	"	beq	%[new],3f\n"
2376da75397SRichard Henderson 	"2:\n"
2386da75397SRichard Henderson 	".subsection 2\n"
2396da75397SRichard Henderson 	"3:	br	1b\n"
2406da75397SRichard Henderson 	".previous"
241434b6accSMark Rutland 	: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
2426da75397SRichard Henderson 	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
2436da75397SRichard Henderson 	: "memory");
2446da75397SRichard Henderson 	smp_mb();
245434b6accSMark Rutland 	return old;
246024b246eSLinus Torvalds }
247*96d330afSMark Rutland #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
248024b246eSLinus Torvalds 
arch_atomic64_dec_if_positive(atomic64_t * v)249*96d330afSMark Rutland static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
250748a76b5SRichard Henderson {
2510203fdc1SMark Rutland 	s64 old, tmp;
252748a76b5SRichard Henderson 	smp_mb();
253748a76b5SRichard Henderson 	__asm__ __volatile__(
254748a76b5SRichard Henderson 	"1:	ldq_l	%[old],%[mem]\n"
255748a76b5SRichard Henderson 	"	subq	%[old],1,%[tmp]\n"
256748a76b5SRichard Henderson 	"	ble	%[old],2f\n"
257748a76b5SRichard Henderson 	"	stq_c	%[tmp],%[mem]\n"
258748a76b5SRichard Henderson 	"	beq	%[tmp],3f\n"
259748a76b5SRichard Henderson 	"2:\n"
260748a76b5SRichard Henderson 	".subsection 2\n"
261748a76b5SRichard Henderson 	"3:	br	1b\n"
262748a76b5SRichard Henderson 	".previous"
263748a76b5SRichard Henderson 	: [old] "=&r"(old), [tmp] "=&r"(tmp)
264748a76b5SRichard Henderson 	: [mem] "m"(*v)
265748a76b5SRichard Henderson 	: "memory");
266748a76b5SRichard Henderson 	smp_mb();
267748a76b5SRichard Henderson 	return old - 1;
268748a76b5SRichard Henderson }
269*96d330afSMark Rutland #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
270748a76b5SRichard Henderson 
271024b246eSLinus Torvalds #endif /* _ALPHA_ATOMIC_H */
272