xref: /openbmc/linux/arch/ia64/include/asm/atomic.h (revision ddc141e5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_IA64_ATOMIC_H
3 #define _ASM_IA64_ATOMIC_H
4 
5 /*
6  * Atomic operations that C can't guarantee us.  Useful for
7  * resource counting etc..
8  *
9  * NOTE: don't mess with the types below!  The "unsigned long" and
10  * "int" types were carefully placed so as to ensure proper operation
11  * of the macros.
12  *
13  * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
14  *	David Mosberger-Tang <davidm@hpl.hp.com>
15  */
16 #include <linux/types.h>
17 
18 #include <asm/intrinsics.h>
19 #include <asm/barrier.h>
20 
21 
22 #define ATOMIC_INIT(i)		{ (i) }
23 #define ATOMIC64_INIT(i)	{ (i) }
24 
25 #define atomic_read(v)		READ_ONCE((v)->counter)
26 #define atomic64_read(v)	READ_ONCE((v)->counter)
27 
28 #define atomic_set(v,i)		WRITE_ONCE(((v)->counter), (i))
29 #define atomic64_set(v,i)	WRITE_ONCE(((v)->counter), (i))
30 
31 #define ATOMIC_OP(op, c_op)						\
32 static __inline__ int							\
33 ia64_atomic_##op (int i, atomic_t *v)					\
34 {									\
35 	__s32 old, new;							\
36 	CMPXCHG_BUGCHECK_DECL						\
37 									\
38 	do {								\
39 		CMPXCHG_BUGCHECK(v);					\
40 		old = atomic_read(v);					\
41 		new = old c_op i;					\
42 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
43 	return new;							\
44 }
45 
46 #define ATOMIC_FETCH_OP(op, c_op)					\
47 static __inline__ int							\
48 ia64_atomic_fetch_##op (int i, atomic_t *v)				\
49 {									\
50 	__s32 old, new;							\
51 	CMPXCHG_BUGCHECK_DECL						\
52 									\
53 	do {								\
54 		CMPXCHG_BUGCHECK(v);					\
55 		old = atomic_read(v);					\
56 		new = old c_op i;					\
57 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
58 	return old;							\
59 }
60 
61 #define ATOMIC_OPS(op, c_op)						\
62 	ATOMIC_OP(op, c_op)						\
63 	ATOMIC_FETCH_OP(op, c_op)
64 
65 ATOMIC_OPS(add, +)
66 ATOMIC_OPS(sub, -)
67 
68 #ifdef __OPTIMIZE__
69 #define __ia64_atomic_const(i)	__builtin_constant_p(i) ?		\
70 		((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 ||	\
71 		 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
72 
73 #define atomic_add_return(i, v)						\
74 ({									\
75 	int __i = (i);							\
76 	static const int __ia64_atomic_p = __ia64_atomic_const(i);	\
77 	__ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) :	\
78 				ia64_atomic_add(__i, v);		\
79 })
80 
81 #define atomic_sub_return(i, v)						\
82 ({									\
83 	int __i = (i);							\
84 	static const int __ia64_atomic_p = __ia64_atomic_const(i);	\
85 	__ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) :	\
86 				ia64_atomic_sub(__i, v);		\
87 })
88 #else
89 #define atomic_add_return(i, v)	ia64_atomic_add(i, v)
90 #define atomic_sub_return(i, v)	ia64_atomic_sub(i, v)
91 #endif
92 
93 #define atomic_fetch_add(i,v)						\
94 ({									\
95 	int __ia64_aar_i = (i);						\
96 	(__builtin_constant_p(i)					\
97 	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
98 	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
99 	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
100 	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
101 		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
102 		: ia64_atomic_fetch_add(__ia64_aar_i, v);		\
103 })
104 
105 #define atomic_fetch_sub(i,v)						\
106 ({									\
107 	int __ia64_asr_i = (i);						\
108 	(__builtin_constant_p(i)					\
109 	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
110 	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
111 	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
112 	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
113 		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
114 		: ia64_atomic_fetch_sub(__ia64_asr_i, v);		\
115 })
116 
117 ATOMIC_FETCH_OP(and, &)
118 ATOMIC_FETCH_OP(or, |)
119 ATOMIC_FETCH_OP(xor, ^)
120 
121 #define atomic_and(i,v)	(void)ia64_atomic_fetch_and(i,v)
122 #define atomic_or(i,v)	(void)ia64_atomic_fetch_or(i,v)
123 #define atomic_xor(i,v)	(void)ia64_atomic_fetch_xor(i,v)
124 
125 #define atomic_fetch_and(i,v)	ia64_atomic_fetch_and(i,v)
126 #define atomic_fetch_or(i,v)	ia64_atomic_fetch_or(i,v)
127 #define atomic_fetch_xor(i,v)	ia64_atomic_fetch_xor(i,v)
128 
129 #undef ATOMIC_OPS
130 #undef ATOMIC_FETCH_OP
131 #undef ATOMIC_OP
132 
133 #define ATOMIC64_OP(op, c_op)						\
134 static __inline__ long							\
135 ia64_atomic64_##op (__s64 i, atomic64_t *v)				\
136 {									\
137 	__s64 old, new;							\
138 	CMPXCHG_BUGCHECK_DECL						\
139 									\
140 	do {								\
141 		CMPXCHG_BUGCHECK(v);					\
142 		old = atomic64_read(v);					\
143 		new = old c_op i;					\
144 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
145 	return new;							\
146 }
147 
148 #define ATOMIC64_FETCH_OP(op, c_op)					\
149 static __inline__ long							\
150 ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v)			\
151 {									\
152 	__s64 old, new;							\
153 	CMPXCHG_BUGCHECK_DECL						\
154 									\
155 	do {								\
156 		CMPXCHG_BUGCHECK(v);					\
157 		old = atomic64_read(v);					\
158 		new = old c_op i;					\
159 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
160 	return old;							\
161 }
162 
163 #define ATOMIC64_OPS(op, c_op)						\
164 	ATOMIC64_OP(op, c_op)						\
165 	ATOMIC64_FETCH_OP(op, c_op)
166 
167 ATOMIC64_OPS(add, +)
168 ATOMIC64_OPS(sub, -)
169 
170 #define atomic64_add_return(i,v)					\
171 ({									\
172 	long __ia64_aar_i = (i);					\
173 	(__builtin_constant_p(i)					\
174 	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
175 	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
176 	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
177 	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
178 		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
179 		: ia64_atomic64_add(__ia64_aar_i, v);			\
180 })
181 
182 #define atomic64_sub_return(i,v)					\
183 ({									\
184 	long __ia64_asr_i = (i);					\
185 	(__builtin_constant_p(i)					\
186 	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
187 	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
188 	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
189 	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
190 		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
191 		: ia64_atomic64_sub(__ia64_asr_i, v);			\
192 })
193 
194 #define atomic64_fetch_add(i,v)						\
195 ({									\
196 	long __ia64_aar_i = (i);					\
197 	(__builtin_constant_p(i)					\
198 	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
199 	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
200 	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
201 	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
202 		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
203 		: ia64_atomic64_fetch_add(__ia64_aar_i, v);		\
204 })
205 
206 #define atomic64_fetch_sub(i,v)						\
207 ({									\
208 	long __ia64_asr_i = (i);					\
209 	(__builtin_constant_p(i)					\
210 	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
211 	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
212 	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
213 	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
214 		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
215 		: ia64_atomic64_fetch_sub(__ia64_asr_i, v);		\
216 })
217 
218 ATOMIC64_FETCH_OP(and, &)
219 ATOMIC64_FETCH_OP(or, |)
220 ATOMIC64_FETCH_OP(xor, ^)
221 
222 #define atomic64_and(i,v)	(void)ia64_atomic64_fetch_and(i,v)
223 #define atomic64_or(i,v)	(void)ia64_atomic64_fetch_or(i,v)
224 #define atomic64_xor(i,v)	(void)ia64_atomic64_fetch_xor(i,v)
225 
226 #define atomic64_fetch_and(i,v)	ia64_atomic64_fetch_and(i,v)
227 #define atomic64_fetch_or(i,v)	ia64_atomic64_fetch_or(i,v)
228 #define atomic64_fetch_xor(i,v)	ia64_atomic64_fetch_xor(i,v)
229 
230 #undef ATOMIC64_OPS
231 #undef ATOMIC64_FETCH_OP
232 #undef ATOMIC64_OP
233 
234 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
235 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
236 
237 #define atomic64_cmpxchg(v, old, new) \
238 	(cmpxchg(&((v)->counter), old, new))
239 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
240 
241 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
242 {
243 	int c, old;
244 	c = atomic_read(v);
245 	for (;;) {
246 		if (unlikely(c == (u)))
247 			break;
248 		old = atomic_cmpxchg((v), c, c + (a));
249 		if (likely(old == c))
250 			break;
251 		c = old;
252 	}
253 	return c;
254 }
255 
256 
257 static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
258 {
259 	long c, old;
260 	c = atomic64_read(v);
261 	for (;;) {
262 		if (unlikely(c == (u)))
263 			break;
264 		old = atomic64_cmpxchg((v), c, c + (a));
265 		if (likely(old == c))
266 			break;
267 		c = old;
268 	}
269 	return c != (u);
270 }
271 
272 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
273 
274 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
275 {
276 	long c, old, dec;
277 	c = atomic64_read(v);
278 	for (;;) {
279 		dec = c - 1;
280 		if (unlikely(dec < 0))
281 			break;
282 		old = atomic64_cmpxchg((v), c, dec);
283 		if (likely(old == c))
284 			break;
285 		c = old;
286 	}
287 	return dec;
288 }
289 
290 /*
291  * Atomically add I to V and return TRUE if the resulting value is
292  * negative.
293  */
294 static __inline__ int
295 atomic_add_negative (int i, atomic_t *v)
296 {
297 	return atomic_add_return(i, v) < 0;
298 }
299 
300 static __inline__ long
301 atomic64_add_negative (__s64 i, atomic64_t *v)
302 {
303 	return atomic64_add_return(i, v) < 0;
304 }
305 
306 #define atomic_dec_return(v)		atomic_sub_return(1, (v))
307 #define atomic_inc_return(v)		atomic_add_return(1, (v))
308 #define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
309 #define atomic64_inc_return(v)		atomic64_add_return(1, (v))
310 
311 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
312 #define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
313 #define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
314 #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
315 #define atomic64_dec_and_test(v)	(atomic64_sub_return(1, (v)) == 0)
316 #define atomic64_inc_and_test(v)	(atomic64_add_return(1, (v)) == 0)
317 
318 #define atomic_add(i,v)			(void)atomic_add_return((i), (v))
319 #define atomic_sub(i,v)			(void)atomic_sub_return((i), (v))
320 #define atomic_inc(v)			atomic_add(1, (v))
321 #define atomic_dec(v)			atomic_sub(1, (v))
322 
323 #define atomic64_add(i,v)		(void)atomic64_add_return((i), (v))
324 #define atomic64_sub(i,v)		(void)atomic64_sub_return((i), (v))
325 #define atomic64_inc(v)			atomic64_add(1, (v))
326 #define atomic64_dec(v)			atomic64_sub(1, (v))
327 
328 #endif /* _ASM_IA64_ATOMIC_H */
329