xref: /openbmc/linux/arch/ia64/include/asm/atomic.h (revision bc5aa3a0)
1 #ifndef _ASM_IA64_ATOMIC_H
2 #define _ASM_IA64_ATOMIC_H
3 
4 /*
5  * Atomic operations that C can't guarantee us.  Useful for
6  * resource counting etc..
7  *
8  * NOTE: don't mess with the types below!  The "unsigned long" and
9  * "int" types were carefully placed so as to ensure proper operation
10  * of the macros.
11  *
12  * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13  *	David Mosberger-Tang <davidm@hpl.hp.com>
14  */
15 #include <linux/types.h>
16 
17 #include <asm/intrinsics.h>
18 #include <asm/barrier.h>
19 
20 
21 #define ATOMIC_INIT(i)		{ (i) }
22 #define ATOMIC64_INIT(i)	{ (i) }
23 
24 #define atomic_read(v)		READ_ONCE((v)->counter)
25 #define atomic64_read(v)	READ_ONCE((v)->counter)
26 
27 #define atomic_set(v,i)		WRITE_ONCE(((v)->counter), (i))
28 #define atomic64_set(v,i)	WRITE_ONCE(((v)->counter), (i))
29 
30 #define ATOMIC_OP(op, c_op)						\
31 static __inline__ int							\
32 ia64_atomic_##op (int i, atomic_t *v)					\
33 {									\
34 	__s32 old, new;							\
35 	CMPXCHG_BUGCHECK_DECL						\
36 									\
37 	do {								\
38 		CMPXCHG_BUGCHECK(v);					\
39 		old = atomic_read(v);					\
40 		new = old c_op i;					\
41 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
42 	return new;							\
43 }
44 
45 #define ATOMIC_FETCH_OP(op, c_op)					\
46 static __inline__ int							\
47 ia64_atomic_fetch_##op (int i, atomic_t *v)				\
48 {									\
49 	__s32 old, new;							\
50 	CMPXCHG_BUGCHECK_DECL						\
51 									\
52 	do {								\
53 		CMPXCHG_BUGCHECK(v);					\
54 		old = atomic_read(v);					\
55 		new = old c_op i;					\
56 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
57 	return old;							\
58 }
59 
60 #define ATOMIC_OPS(op, c_op)						\
61 	ATOMIC_OP(op, c_op)						\
62 	ATOMIC_FETCH_OP(op, c_op)
63 
64 ATOMIC_OPS(add, +)
65 ATOMIC_OPS(sub, -)
66 
67 #define atomic_add_return(i,v)						\
68 ({									\
69 	int __ia64_aar_i = (i);						\
70 	(__builtin_constant_p(i)					\
71 	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
72 	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
73 	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
74 	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
75 		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
76 		: ia64_atomic_add(__ia64_aar_i, v);			\
77 })
78 
79 #define atomic_sub_return(i,v)						\
80 ({									\
81 	int __ia64_asr_i = (i);						\
82 	(__builtin_constant_p(i)					\
83 	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
84 	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
85 	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
86 	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
87 		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
88 		: ia64_atomic_sub(__ia64_asr_i, v);			\
89 })
90 
91 #define atomic_fetch_add(i,v)						\
92 ({									\
93 	int __ia64_aar_i = (i);						\
94 	(__builtin_constant_p(i)					\
95 	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
96 	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
97 	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
98 	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
99 		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
100 		: ia64_atomic_fetch_add(__ia64_aar_i, v);		\
101 })
102 
103 #define atomic_fetch_sub(i,v)						\
104 ({									\
105 	int __ia64_asr_i = (i);						\
106 	(__builtin_constant_p(i)					\
107 	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
108 	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
109 	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
110 	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
111 		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
112 		: ia64_atomic_fetch_sub(__ia64_asr_i, v);		\
113 })
114 
115 ATOMIC_FETCH_OP(and, &)
116 ATOMIC_FETCH_OP(or, |)
117 ATOMIC_FETCH_OP(xor, ^)
118 
119 #define atomic_and(i,v)	(void)ia64_atomic_fetch_and(i,v)
120 #define atomic_or(i,v)	(void)ia64_atomic_fetch_or(i,v)
121 #define atomic_xor(i,v)	(void)ia64_atomic_fetch_xor(i,v)
122 
123 #define atomic_fetch_and(i,v)	ia64_atomic_fetch_and(i,v)
124 #define atomic_fetch_or(i,v)	ia64_atomic_fetch_or(i,v)
125 #define atomic_fetch_xor(i,v)	ia64_atomic_fetch_xor(i,v)
126 
127 #undef ATOMIC_OPS
128 #undef ATOMIC_FETCH_OP
129 #undef ATOMIC_OP
130 
131 #define ATOMIC64_OP(op, c_op)						\
132 static __inline__ long							\
133 ia64_atomic64_##op (__s64 i, atomic64_t *v)				\
134 {									\
135 	__s64 old, new;							\
136 	CMPXCHG_BUGCHECK_DECL						\
137 									\
138 	do {								\
139 		CMPXCHG_BUGCHECK(v);					\
140 		old = atomic64_read(v);					\
141 		new = old c_op i;					\
142 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
143 	return new;							\
144 }
145 
146 #define ATOMIC64_FETCH_OP(op, c_op)					\
147 static __inline__ long							\
148 ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v)			\
149 {									\
150 	__s64 old, new;							\
151 	CMPXCHG_BUGCHECK_DECL						\
152 									\
153 	do {								\
154 		CMPXCHG_BUGCHECK(v);					\
155 		old = atomic64_read(v);					\
156 		new = old c_op i;					\
157 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
158 	return old;							\
159 }
160 
161 #define ATOMIC64_OPS(op, c_op)						\
162 	ATOMIC64_OP(op, c_op)						\
163 	ATOMIC64_FETCH_OP(op, c_op)
164 
165 ATOMIC64_OPS(add, +)
166 ATOMIC64_OPS(sub, -)
167 
168 #define atomic64_add_return(i,v)					\
169 ({									\
170 	long __ia64_aar_i = (i);					\
171 	(__builtin_constant_p(i)					\
172 	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
173 	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
174 	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
175 	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
176 		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
177 		: ia64_atomic64_add(__ia64_aar_i, v);			\
178 })
179 
180 #define atomic64_sub_return(i,v)					\
181 ({									\
182 	long __ia64_asr_i = (i);					\
183 	(__builtin_constant_p(i)					\
184 	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
185 	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
186 	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
187 	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
188 		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
189 		: ia64_atomic64_sub(__ia64_asr_i, v);			\
190 })
191 
192 #define atomic64_fetch_add(i,v)						\
193 ({									\
194 	long __ia64_aar_i = (i);					\
195 	(__builtin_constant_p(i)					\
196 	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
197 	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
198 	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
199 	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
200 		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
201 		: ia64_atomic64_fetch_add(__ia64_aar_i, v);		\
202 })
203 
204 #define atomic64_fetch_sub(i,v)						\
205 ({									\
206 	long __ia64_asr_i = (i);					\
207 	(__builtin_constant_p(i)					\
208 	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
209 	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
210 	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
211 	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
212 		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
213 		: ia64_atomic64_fetch_sub(__ia64_asr_i, v);		\
214 })
215 
216 ATOMIC64_FETCH_OP(and, &)
217 ATOMIC64_FETCH_OP(or, |)
218 ATOMIC64_FETCH_OP(xor, ^)
219 
220 #define atomic64_and(i,v)	(void)ia64_atomic64_fetch_and(i,v)
221 #define atomic64_or(i,v)	(void)ia64_atomic64_fetch_or(i,v)
222 #define atomic64_xor(i,v)	(void)ia64_atomic64_fetch_xor(i,v)
223 
224 #define atomic64_fetch_and(i,v)	ia64_atomic64_fetch_and(i,v)
225 #define atomic64_fetch_or(i,v)	ia64_atomic64_fetch_or(i,v)
226 #define atomic64_fetch_xor(i,v)	ia64_atomic64_fetch_xor(i,v)
227 
228 #undef ATOMIC64_OPS
229 #undef ATOMIC64_FETCH_OP
230 #undef ATOMIC64_OP
231 
232 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
233 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
234 
235 #define atomic64_cmpxchg(v, old, new) \
236 	(cmpxchg(&((v)->counter), old, new))
237 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
238 
239 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
240 {
241 	int c, old;
242 	c = atomic_read(v);
243 	for (;;) {
244 		if (unlikely(c == (u)))
245 			break;
246 		old = atomic_cmpxchg((v), c, c + (a));
247 		if (likely(old == c))
248 			break;
249 		c = old;
250 	}
251 	return c;
252 }
253 
254 
255 static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
256 {
257 	long c, old;
258 	c = atomic64_read(v);
259 	for (;;) {
260 		if (unlikely(c == (u)))
261 			break;
262 		old = atomic64_cmpxchg((v), c, c + (a));
263 		if (likely(old == c))
264 			break;
265 		c = old;
266 	}
267 	return c != (u);
268 }
269 
270 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
271 
272 /*
273  * Atomically add I to V and return TRUE if the resulting value is
274  * negative.
275  */
276 static __inline__ int
277 atomic_add_negative (int i, atomic_t *v)
278 {
279 	return atomic_add_return(i, v) < 0;
280 }
281 
282 static __inline__ long
283 atomic64_add_negative (__s64 i, atomic64_t *v)
284 {
285 	return atomic64_add_return(i, v) < 0;
286 }
287 
288 #define atomic_dec_return(v)		atomic_sub_return(1, (v))
289 #define atomic_inc_return(v)		atomic_add_return(1, (v))
290 #define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
291 #define atomic64_inc_return(v)		atomic64_add_return(1, (v))
292 
293 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
294 #define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
295 #define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
296 #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
297 #define atomic64_dec_and_test(v)	(atomic64_sub_return(1, (v)) == 0)
298 #define atomic64_inc_and_test(v)	(atomic64_add_return(1, (v)) == 0)
299 
300 #define atomic_add(i,v)			(void)atomic_add_return((i), (v))
301 #define atomic_sub(i,v)			(void)atomic_sub_return((i), (v))
302 #define atomic_inc(v)			atomic_add(1, (v))
303 #define atomic_dec(v)			atomic_sub(1, (v))
304 
305 #define atomic64_add(i,v)		(void)atomic64_add_return((i), (v))
306 #define atomic64_sub(i,v)		(void)atomic64_sub_return((i), (v))
307 #define atomic64_inc(v)			atomic64_add(1, (v))
308 #define atomic64_dec(v)			atomic64_sub(1, (v))
309 
310 #endif /* _ASM_IA64_ATOMIC_H */
311