xref: /openbmc/linux/arch/ia64/include/asm/atomic.h (revision 24e2d05d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_IA64_ATOMIC_H
3 #define _ASM_IA64_ATOMIC_H
4 
5 /*
6  * Atomic operations that C can't guarantee us.  Useful for
7  * resource counting etc..
8  *
9  * NOTE: don't mess with the types below!  The "unsigned long" and
10  * "int" types were carefully placed so as to ensure proper operation
11  * of the macros.
12  *
13  * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
14  *	David Mosberger-Tang <davidm@hpl.hp.com>
15  */
16 #include <linux/types.h>
17 
18 #include <asm/intrinsics.h>
19 #include <asm/barrier.h>
20 
21 
22 #define ATOMIC64_INIT(i)	{ (i) }
23 
24 #define arch_atomic_read(v)	READ_ONCE((v)->counter)
25 #define arch_atomic64_read(v)	READ_ONCE((v)->counter)
26 
27 #define arch_atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
28 #define arch_atomic64_set(v,i)	WRITE_ONCE(((v)->counter), (i))
29 
30 #define ATOMIC_OP(op, c_op)						\
31 static __inline__ int							\
32 ia64_atomic_##op (int i, atomic_t *v)					\
33 {									\
34 	__s32 old, new;							\
35 	CMPXCHG_BUGCHECK_DECL						\
36 									\
37 	do {								\
38 		CMPXCHG_BUGCHECK(v);					\
39 		old = arch_atomic_read(v);				\
40 		new = old c_op i;					\
41 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
42 	return new;							\
43 }
44 
45 #define ATOMIC_FETCH_OP(op, c_op)					\
46 static __inline__ int							\
47 ia64_atomic_fetch_##op (int i, atomic_t *v)				\
48 {									\
49 	__s32 old, new;							\
50 	CMPXCHG_BUGCHECK_DECL						\
51 									\
52 	do {								\
53 		CMPXCHG_BUGCHECK(v);					\
54 		old = arch_atomic_read(v);				\
55 		new = old c_op i;					\
56 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
57 	return old;							\
58 }
59 
60 #define ATOMIC_OPS(op, c_op)						\
61 	ATOMIC_OP(op, c_op)						\
62 	ATOMIC_FETCH_OP(op, c_op)
63 
64 ATOMIC_OPS(add, +)
65 ATOMIC_OPS(sub, -)
66 
67 #ifdef __OPTIMIZE__
68 #define __ia64_atomic_const(i)						\
69 	static const int __ia64_atomic_p = __builtin_constant_p(i) ?	\
70 		((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 ||	\
71 		 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
72 	__ia64_atomic_p
73 #else
74 #define __ia64_atomic_const(i)	0
75 #endif
76 
77 #define arch_atomic_add_return(i,v)					\
78 ({									\
79 	int __ia64_aar_i = (i);						\
80 	__ia64_atomic_const(i)						\
81 		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
82 		: ia64_atomic_add(__ia64_aar_i, v);			\
83 })
84 
85 #define arch_atomic_sub_return(i,v)					\
86 ({									\
87 	int __ia64_asr_i = (i);						\
88 	__ia64_atomic_const(i)						\
89 		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
90 		: ia64_atomic_sub(__ia64_asr_i, v);			\
91 })
92 
93 #define arch_atomic_fetch_add(i,v)					\
94 ({									\
95 	int __ia64_aar_i = (i);						\
96 	__ia64_atomic_const(i)						\
97 		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
98 		: ia64_atomic_fetch_add(__ia64_aar_i, v);		\
99 })
100 
101 #define arch_atomic_fetch_sub(i,v)					\
102 ({									\
103 	int __ia64_asr_i = (i);						\
104 	__ia64_atomic_const(i)						\
105 		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
106 		: ia64_atomic_fetch_sub(__ia64_asr_i, v);		\
107 })
108 
109 ATOMIC_FETCH_OP(and, &)
110 ATOMIC_FETCH_OP(or, |)
111 ATOMIC_FETCH_OP(xor, ^)
112 
113 #define arch_atomic_and(i,v)	(void)ia64_atomic_fetch_and(i,v)
114 #define arch_atomic_or(i,v)	(void)ia64_atomic_fetch_or(i,v)
115 #define arch_atomic_xor(i,v)	(void)ia64_atomic_fetch_xor(i,v)
116 
117 #define arch_atomic_fetch_and(i,v)	ia64_atomic_fetch_and(i,v)
118 #define arch_atomic_fetch_or(i,v)	ia64_atomic_fetch_or(i,v)
119 #define arch_atomic_fetch_xor(i,v)	ia64_atomic_fetch_xor(i,v)
120 
121 #undef ATOMIC_OPS
122 #undef ATOMIC_FETCH_OP
123 #undef ATOMIC_OP
124 
125 #define ATOMIC64_OP(op, c_op)						\
126 static __inline__ s64							\
127 ia64_atomic64_##op (s64 i, atomic64_t *v)				\
128 {									\
129 	s64 old, new;							\
130 	CMPXCHG_BUGCHECK_DECL						\
131 									\
132 	do {								\
133 		CMPXCHG_BUGCHECK(v);					\
134 		old = arch_atomic64_read(v);				\
135 		new = old c_op i;					\
136 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
137 	return new;							\
138 }
139 
140 #define ATOMIC64_FETCH_OP(op, c_op)					\
141 static __inline__ s64							\
142 ia64_atomic64_fetch_##op (s64 i, atomic64_t *v)				\
143 {									\
144 	s64 old, new;							\
145 	CMPXCHG_BUGCHECK_DECL						\
146 									\
147 	do {								\
148 		CMPXCHG_BUGCHECK(v);					\
149 		old = arch_atomic64_read(v);				\
150 		new = old c_op i;					\
151 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
152 	return old;							\
153 }
154 
155 #define ATOMIC64_OPS(op, c_op)						\
156 	ATOMIC64_OP(op, c_op)						\
157 	ATOMIC64_FETCH_OP(op, c_op)
158 
159 ATOMIC64_OPS(add, +)
160 ATOMIC64_OPS(sub, -)
161 
162 #define arch_atomic64_add_return(i,v)					\
163 ({									\
164 	s64 __ia64_aar_i = (i);						\
165 	__ia64_atomic_const(i)						\
166 		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
167 		: ia64_atomic64_add(__ia64_aar_i, v);			\
168 })
169 
170 #define arch_atomic64_sub_return(i,v)					\
171 ({									\
172 	s64 __ia64_asr_i = (i);						\
173 	__ia64_atomic_const(i)						\
174 		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
175 		: ia64_atomic64_sub(__ia64_asr_i, v);			\
176 })
177 
178 #define arch_atomic64_fetch_add(i,v)					\
179 ({									\
180 	s64 __ia64_aar_i = (i);						\
181 	__ia64_atomic_const(i)						\
182 		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
183 		: ia64_atomic64_fetch_add(__ia64_aar_i, v);		\
184 })
185 
186 #define arch_atomic64_fetch_sub(i,v)					\
187 ({									\
188 	s64 __ia64_asr_i = (i);						\
189 	__ia64_atomic_const(i)						\
190 		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
191 		: ia64_atomic64_fetch_sub(__ia64_asr_i, v);		\
192 })
193 
194 ATOMIC64_FETCH_OP(and, &)
195 ATOMIC64_FETCH_OP(or, |)
196 ATOMIC64_FETCH_OP(xor, ^)
197 
198 #define arch_atomic64_and(i,v)	(void)ia64_atomic64_fetch_and(i,v)
199 #define arch_atomic64_or(i,v)	(void)ia64_atomic64_fetch_or(i,v)
200 #define arch_atomic64_xor(i,v)	(void)ia64_atomic64_fetch_xor(i,v)
201 
202 #define arch_atomic64_fetch_and(i,v)	ia64_atomic64_fetch_and(i,v)
203 #define arch_atomic64_fetch_or(i,v)	ia64_atomic64_fetch_or(i,v)
204 #define arch_atomic64_fetch_xor(i,v)	ia64_atomic64_fetch_xor(i,v)
205 
206 #undef ATOMIC64_OPS
207 #undef ATOMIC64_FETCH_OP
208 #undef ATOMIC64_OP
209 
210 #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new))
211 #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
212 
213 #define arch_atomic64_cmpxchg(v, old, new) \
214 	(arch_cmpxchg(&((v)->counter), old, new))
215 #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
216 
217 #define arch_atomic_add(i,v)		(void)arch_atomic_add_return((i), (v))
218 #define arch_atomic_sub(i,v)		(void)arch_atomic_sub_return((i), (v))
219 
220 #define arch_atomic64_add(i,v)		(void)arch_atomic64_add_return((i), (v))
221 #define arch_atomic64_sub(i,v)		(void)arch_atomic64_sub_return((i), (v))
222 
223 #endif /* _ASM_IA64_ATOMIC_H */
224