xref: /openbmc/linux/arch/s390/include/asm/atomic.h (revision 7dd65feb)
1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
3 
4 /*
5  * Copyright 1999,2009 IBM Corp.
6  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7  *	      Denis Joseph Barrow,
8  *	      Arnd Bergmann <arndb@de.ibm.com>,
9  *
10  * Atomic operations that C can't guarantee us.
11  * Useful for resource counting etc.
12  * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
13  *
14  */
15 
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 
19 #define ATOMIC_INIT(i)  { (i) }
20 
21 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
22 
23 #define __CS_LOOP(ptr, op_val, op_string) ({				\
24 	int old_val, new_val;						\
25 	asm volatile(							\
26 		"	l	%0,%2\n"				\
27 		"0:	lr	%1,%0\n"				\
28 		op_string "	%1,%3\n"				\
29 		"	cs	%0,%1,%2\n"				\
30 		"	jl	0b"					\
31 		: "=&d" (old_val), "=&d" (new_val),			\
32 		  "=Q" (((atomic_t *)(ptr))->counter)			\
33 		: "d" (op_val),	 "Q" (((atomic_t *)(ptr))->counter)	\
34 		: "cc", "memory");					\
35 	new_val;							\
36 })
37 
38 #else /* __GNUC__ */
39 
40 #define __CS_LOOP(ptr, op_val, op_string) ({				\
41 	int old_val, new_val;						\
42 	asm volatile(							\
43 		"	l	%0,0(%3)\n"				\
44 		"0:	lr	%1,%0\n"				\
45 		op_string "	%1,%4\n"				\
46 		"	cs	%0,%1,0(%3)\n"				\
47 		"	jl	0b"					\
48 		: "=&d" (old_val), "=&d" (new_val),			\
49 		  "=m" (((atomic_t *)(ptr))->counter)			\
50 		: "a" (ptr), "d" (op_val),				\
51 		  "m" (((atomic_t *)(ptr))->counter)			\
52 		: "cc", "memory");					\
53 	new_val;							\
54 })
55 
56 #endif /* __GNUC__ */
57 
58 static inline int atomic_read(const atomic_t *v)
59 {
60 	barrier();
61 	return v->counter;
62 }
63 
64 static inline void atomic_set(atomic_t *v, int i)
65 {
66 	v->counter = i;
67 	barrier();
68 }
69 
70 static inline int atomic_add_return(int i, atomic_t *v)
71 {
72 	return __CS_LOOP(v, i, "ar");
73 }
74 #define atomic_add(_i, _v)		atomic_add_return(_i, _v)
75 #define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
76 #define atomic_inc(_v)			atomic_add_return(1, _v)
77 #define atomic_inc_return(_v)		atomic_add_return(1, _v)
78 #define atomic_inc_and_test(_v)		(atomic_add_return(1, _v) == 0)
79 
80 static inline int atomic_sub_return(int i, atomic_t *v)
81 {
82 	return __CS_LOOP(v, i, "sr");
83 }
84 #define atomic_sub(_i, _v)		atomic_sub_return(_i, _v)
85 #define atomic_sub_and_test(_i, _v)	(atomic_sub_return(_i, _v) == 0)
86 #define atomic_dec(_v)			atomic_sub_return(1, _v)
87 #define atomic_dec_return(_v)		atomic_sub_return(1, _v)
88 #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
89 
90 static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
91 {
92 	__CS_LOOP(v, ~mask, "nr");
93 }
94 
95 static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
96 {
97 	__CS_LOOP(v, mask, "or");
98 }
99 
100 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
101 
102 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
103 {
104 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
105 	asm volatile(
106 		"	cs	%0,%2,%1"
107 		: "+d" (old), "=Q" (v->counter)
108 		: "d" (new), "Q" (v->counter)
109 		: "cc", "memory");
110 #else /* __GNUC__ */
111 	asm volatile(
112 		"	cs	%0,%3,0(%2)"
113 		: "+d" (old), "=m" (v->counter)
114 		: "a" (v), "d" (new), "m" (v->counter)
115 		: "cc", "memory");
116 #endif /* __GNUC__ */
117 	return old;
118 }
119 
120 static inline int atomic_add_unless(atomic_t *v, int a, int u)
121 {
122 	int c, old;
123 	c = atomic_read(v);
124 	for (;;) {
125 		if (unlikely(c == u))
126 			break;
127 		old = atomic_cmpxchg(v, c, c + a);
128 		if (likely(old == c))
129 			break;
130 		c = old;
131 	}
132 	return c != u;
133 }
134 
135 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
136 
137 #undef __CS_LOOP
138 
139 #define ATOMIC64_INIT(i)  { (i) }
140 
141 #ifdef CONFIG_64BIT
142 
143 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
144 
145 #define __CSG_LOOP(ptr, op_val, op_string) ({				\
146 	long long old_val, new_val;					\
147 	asm volatile(							\
148 		"	lg	%0,%2\n"				\
149 		"0:	lgr	%1,%0\n"				\
150 		op_string "	%1,%3\n"				\
151 		"	csg	%0,%1,%2\n"				\
152 		"	jl	0b"					\
153 		: "=&d" (old_val), "=&d" (new_val),			\
154 		  "=Q" (((atomic_t *)(ptr))->counter)			\
155 		: "d" (op_val),	"Q" (((atomic_t *)(ptr))->counter)	\
156 		: "cc", "memory");					\
157 	new_val;							\
158 })
159 
160 #else /* __GNUC__ */
161 
162 #define __CSG_LOOP(ptr, op_val, op_string) ({				\
163 	long long old_val, new_val;					\
164 	asm volatile(							\
165 		"	lg	%0,0(%3)\n"				\
166 		"0:	lgr	%1,%0\n"				\
167 		op_string "	%1,%4\n"				\
168 		"	csg	%0,%1,0(%3)\n"				\
169 		"	jl	0b"					\
170 		: "=&d" (old_val), "=&d" (new_val),			\
171 		  "=m" (((atomic_t *)(ptr))->counter)			\
172 		: "a" (ptr), "d" (op_val),				\
173 		  "m" (((atomic_t *)(ptr))->counter)			\
174 		: "cc", "memory");					\
175 	new_val;							\
176 })
177 
178 #endif /* __GNUC__ */
179 
180 static inline long long atomic64_read(const atomic64_t *v)
181 {
182 	barrier();
183 	return v->counter;
184 }
185 
186 static inline void atomic64_set(atomic64_t *v, long long i)
187 {
188 	v->counter = i;
189 	barrier();
190 }
191 
192 static inline long long atomic64_add_return(long long i, atomic64_t *v)
193 {
194 	return __CSG_LOOP(v, i, "agr");
195 }
196 
197 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
198 {
199 	return __CSG_LOOP(v, i, "sgr");
200 }
201 
202 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
203 {
204 	__CSG_LOOP(v, ~mask, "ngr");
205 }
206 
207 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
208 {
209 	__CSG_LOOP(v, mask, "ogr");
210 }
211 
212 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
213 
214 static inline long long atomic64_cmpxchg(atomic64_t *v,
215 					     long long old, long long new)
216 {
217 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
218 	asm volatile(
219 		"	csg	%0,%2,%1"
220 		: "+d" (old), "=Q" (v->counter)
221 		: "d" (new), "Q" (v->counter)
222 		: "cc", "memory");
223 #else /* __GNUC__ */
224 	asm volatile(
225 		"	csg	%0,%3,0(%2)"
226 		: "+d" (old), "=m" (v->counter)
227 		: "a" (v), "d" (new), "m" (v->counter)
228 		: "cc", "memory");
229 #endif /* __GNUC__ */
230 	return old;
231 }
232 
233 #undef __CSG_LOOP
234 
235 #else /* CONFIG_64BIT */
236 
237 typedef struct {
238 	long long counter;
239 } atomic64_t;
240 
241 static inline long long atomic64_read(const atomic64_t *v)
242 {
243 	register_pair rp;
244 
245 	asm volatile(
246 		"	lm	%0,%N0,0(%1)"
247 		: "=&d" (rp)
248 		: "a" (&v->counter), "m" (v->counter)
249 		);
250 	return rp.pair;
251 }
252 
253 static inline void atomic64_set(atomic64_t *v, long long i)
254 {
255 	register_pair rp = {.pair = i};
256 
257 	asm volatile(
258 		"	stm	%1,%N1,0(%2)"
259 		: "=m" (v->counter)
260 		: "d" (rp), "a" (&v->counter)
261 		);
262 }
263 
264 static inline long long atomic64_xchg(atomic64_t *v, long long new)
265 {
266 	register_pair rp_new = {.pair = new};
267 	register_pair rp_old;
268 
269 	asm volatile(
270 		"	lm	%0,%N0,0(%2)\n"
271 		"0:	cds	%0,%3,0(%2)\n"
272 		"	jl	0b\n"
273 		: "=&d" (rp_old), "+m" (v->counter)
274 		: "a" (&v->counter), "d" (rp_new)
275 		: "cc");
276 	return rp_old.pair;
277 }
278 
279 static inline long long atomic64_cmpxchg(atomic64_t *v,
280 					 long long old, long long new)
281 {
282 	register_pair rp_old = {.pair = old};
283 	register_pair rp_new = {.pair = new};
284 
285 	asm volatile(
286 		"	cds	%0,%3,0(%2)"
287 		: "+&d" (rp_old), "+m" (v->counter)
288 		: "a" (&v->counter), "d" (rp_new)
289 		: "cc");
290 	return rp_old.pair;
291 }
292 
293 
294 static inline long long atomic64_add_return(long long i, atomic64_t *v)
295 {
296 	long long old, new;
297 
298 	do {
299 		old = atomic64_read(v);
300 		new = old + i;
301 	} while (atomic64_cmpxchg(v, old, new) != old);
302 	return new;
303 }
304 
305 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
306 {
307 	long long old, new;
308 
309 	do {
310 		old = atomic64_read(v);
311 		new = old - i;
312 	} while (atomic64_cmpxchg(v, old, new) != old);
313 	return new;
314 }
315 
316 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
317 {
318 	long long old, new;
319 
320 	do {
321 		old = atomic64_read(v);
322 		new = old | mask;
323 	} while (atomic64_cmpxchg(v, old, new) != old);
324 }
325 
326 static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
327 {
328 	long long old, new;
329 
330 	do {
331 		old = atomic64_read(v);
332 		new = old & mask;
333 	} while (atomic64_cmpxchg(v, old, new) != old);
334 }
335 
336 #endif /* CONFIG_64BIT */
337 
338 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
339 {
340 	long long c, old;
341 	c = atomic64_read(v);
342 	for (;;) {
343 		if (unlikely(c == u))
344 			break;
345 		old = atomic64_cmpxchg(v, c, c + a);
346 		if (likely(old == c))
347 			break;
348 		c = old;
349 	}
350 	return c != u;
351 }
352 
353 #define atomic64_add(_i, _v)		atomic64_add_return(_i, _v)
354 #define atomic64_add_negative(_i, _v)	(atomic64_add_return(_i, _v) < 0)
355 #define atomic64_inc(_v)		atomic64_add_return(1, _v)
356 #define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
357 #define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)
358 #define atomic64_sub(_i, _v)		atomic64_sub_return(_i, _v)
359 #define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
360 #define atomic64_dec(_v)		atomic64_sub_return(1, _v)
361 #define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
362 #define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0)
363 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
364 
365 #define smp_mb__before_atomic_dec()	smp_mb()
366 #define smp_mb__after_atomic_dec()	smp_mb()
367 #define smp_mb__before_atomic_inc()	smp_mb()
368 #define smp_mb__after_atomic_inc()	smp_mb()
369 
370 #include <asm-generic/atomic-long.h>
371 
372 #endif /* __ARCH_S390_ATOMIC__  */
373