xref: /openbmc/linux/arch/s390/include/asm/atomic.h (revision e190bfe5)
1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
3 
4 /*
5  * Copyright 1999,2009 IBM Corp.
6  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7  *	      Denis Joseph Barrow,
8  *	      Arnd Bergmann <arndb@de.ibm.com>,
9  *
10  * Atomic operations that C can't guarantee us.
11  * Useful for resource counting etc.
12  * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
13  *
14  */
15 
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/system.h>
19 
20 #define ATOMIC_INIT(i)  { (i) }
21 
22 #define __CS_LOOP(ptr, op_val, op_string) ({				\
23 	int old_val, new_val;						\
24 	asm volatile(							\
25 		"	l	%0,%2\n"				\
26 		"0:	lr	%1,%0\n"				\
27 		op_string "	%1,%3\n"				\
28 		"	cs	%0,%1,%2\n"				\
29 		"	jl	0b"					\
30 		: "=&d" (old_val), "=&d" (new_val),			\
31 		  "=Q" (((atomic_t *)(ptr))->counter)			\
32 		: "d" (op_val),	 "Q" (((atomic_t *)(ptr))->counter)	\
33 		: "cc", "memory");					\
34 	new_val;							\
35 })
36 
37 static inline int atomic_read(const atomic_t *v)
38 {
39 	barrier();
40 	return v->counter;
41 }
42 
43 static inline void atomic_set(atomic_t *v, int i)
44 {
45 	v->counter = i;
46 	barrier();
47 }
48 
49 static inline int atomic_add_return(int i, atomic_t *v)
50 {
51 	return __CS_LOOP(v, i, "ar");
52 }
53 #define atomic_add(_i, _v)		atomic_add_return(_i, _v)
54 #define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
55 #define atomic_inc(_v)			atomic_add_return(1, _v)
56 #define atomic_inc_return(_v)		atomic_add_return(1, _v)
57 #define atomic_inc_and_test(_v)		(atomic_add_return(1, _v) == 0)
58 
59 static inline int atomic_sub_return(int i, atomic_t *v)
60 {
61 	return __CS_LOOP(v, i, "sr");
62 }
63 #define atomic_sub(_i, _v)		atomic_sub_return(_i, _v)
64 #define atomic_sub_and_test(_i, _v)	(atomic_sub_return(_i, _v) == 0)
65 #define atomic_dec(_v)			atomic_sub_return(1, _v)
66 #define atomic_dec_return(_v)		atomic_sub_return(1, _v)
67 #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
68 
69 static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
70 {
71 	__CS_LOOP(v, ~mask, "nr");
72 }
73 
74 static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
75 {
76 	__CS_LOOP(v, mask, "or");
77 }
78 
79 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
80 
81 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
82 {
83 	asm volatile(
84 		"	cs	%0,%2,%1"
85 		: "+d" (old), "=Q" (v->counter)
86 		: "d" (new), "Q" (v->counter)
87 		: "cc", "memory");
88 	return old;
89 }
90 
91 static inline int atomic_add_unless(atomic_t *v, int a, int u)
92 {
93 	int c, old;
94 	c = atomic_read(v);
95 	for (;;) {
96 		if (unlikely(c == u))
97 			break;
98 		old = atomic_cmpxchg(v, c, c + a);
99 		if (likely(old == c))
100 			break;
101 		c = old;
102 	}
103 	return c != u;
104 }
105 
106 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
107 
108 #undef __CS_LOOP
109 
110 #define ATOMIC64_INIT(i)  { (i) }
111 
112 #ifdef CONFIG_64BIT
113 
114 #define __CSG_LOOP(ptr, op_val, op_string) ({				\
115 	long long old_val, new_val;					\
116 	asm volatile(							\
117 		"	lg	%0,%2\n"				\
118 		"0:	lgr	%1,%0\n"				\
119 		op_string "	%1,%3\n"				\
120 		"	csg	%0,%1,%2\n"				\
121 		"	jl	0b"					\
122 		: "=&d" (old_val), "=&d" (new_val),			\
123 		  "=Q" (((atomic_t *)(ptr))->counter)			\
124 		: "d" (op_val),	"Q" (((atomic_t *)(ptr))->counter)	\
125 		: "cc", "memory");					\
126 	new_val;							\
127 })
128 
129 static inline long long atomic64_read(const atomic64_t *v)
130 {
131 	barrier();
132 	return v->counter;
133 }
134 
135 static inline void atomic64_set(atomic64_t *v, long long i)
136 {
137 	v->counter = i;
138 	barrier();
139 }
140 
141 static inline long long atomic64_add_return(long long i, atomic64_t *v)
142 {
143 	return __CSG_LOOP(v, i, "agr");
144 }
145 
146 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
147 {
148 	return __CSG_LOOP(v, i, "sgr");
149 }
150 
151 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
152 {
153 	__CSG_LOOP(v, ~mask, "ngr");
154 }
155 
156 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
157 {
158 	__CSG_LOOP(v, mask, "ogr");
159 }
160 
161 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
162 
163 static inline long long atomic64_cmpxchg(atomic64_t *v,
164 					     long long old, long long new)
165 {
166 	asm volatile(
167 		"	csg	%0,%2,%1"
168 		: "+d" (old), "=Q" (v->counter)
169 		: "d" (new), "Q" (v->counter)
170 		: "cc", "memory");
171 	return old;
172 }
173 
174 #undef __CSG_LOOP
175 
176 #else /* CONFIG_64BIT */
177 
178 typedef struct {
179 	long long counter;
180 } atomic64_t;
181 
182 static inline long long atomic64_read(const atomic64_t *v)
183 {
184 	register_pair rp;
185 
186 	asm volatile(
187 		"	lm	%0,%N0,%1"
188 		: "=&d" (rp) : "Q" (v->counter)	);
189 	return rp.pair;
190 }
191 
192 static inline void atomic64_set(atomic64_t *v, long long i)
193 {
194 	register_pair rp = {.pair = i};
195 
196 	asm volatile(
197 		"	stm	%1,%N1,%0"
198 		: "=Q" (v->counter) : "d" (rp) );
199 }
200 
201 static inline long long atomic64_xchg(atomic64_t *v, long long new)
202 {
203 	register_pair rp_new = {.pair = new};
204 	register_pair rp_old;
205 
206 	asm volatile(
207 		"	lm	%0,%N0,%1\n"
208 		"0:	cds	%0,%2,%1\n"
209 		"	jl	0b\n"
210 		: "=&d" (rp_old), "=Q" (v->counter)
211 		: "d" (rp_new), "Q" (v->counter)
212 		: "cc");
213 	return rp_old.pair;
214 }
215 
216 static inline long long atomic64_cmpxchg(atomic64_t *v,
217 					 long long old, long long new)
218 {
219 	register_pair rp_old = {.pair = old};
220 	register_pair rp_new = {.pair = new};
221 
222 	asm volatile(
223 		"	cds	%0,%2,%1"
224 		: "+&d" (rp_old), "=Q" (v->counter)
225 		: "d" (rp_new), "Q" (v->counter)
226 		: "cc");
227 	return rp_old.pair;
228 }
229 
230 
231 static inline long long atomic64_add_return(long long i, atomic64_t *v)
232 {
233 	long long old, new;
234 
235 	do {
236 		old = atomic64_read(v);
237 		new = old + i;
238 	} while (atomic64_cmpxchg(v, old, new) != old);
239 	return new;
240 }
241 
242 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
243 {
244 	long long old, new;
245 
246 	do {
247 		old = atomic64_read(v);
248 		new = old - i;
249 	} while (atomic64_cmpxchg(v, old, new) != old);
250 	return new;
251 }
252 
253 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
254 {
255 	long long old, new;
256 
257 	do {
258 		old = atomic64_read(v);
259 		new = old | mask;
260 	} while (atomic64_cmpxchg(v, old, new) != old);
261 }
262 
263 static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
264 {
265 	long long old, new;
266 
267 	do {
268 		old = atomic64_read(v);
269 		new = old & mask;
270 	} while (atomic64_cmpxchg(v, old, new) != old);
271 }
272 
273 #endif /* CONFIG_64BIT */
274 
275 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
276 {
277 	long long c, old;
278 
279 	c = atomic64_read(v);
280 	for (;;) {
281 		if (unlikely(c == u))
282 			break;
283 		old = atomic64_cmpxchg(v, c, c + a);
284 		if (likely(old == c))
285 			break;
286 		c = old;
287 	}
288 	return c != u;
289 }
290 
291 static inline long long atomic64_dec_if_positive(atomic64_t *v)
292 {
293 	long long c, old, dec;
294 
295 	c = atomic64_read(v);
296 	for (;;) {
297 		dec = c - 1;
298 		if (unlikely(dec < 0))
299 			break;
300 		old = atomic64_cmpxchg((v), c, dec);
301 		if (likely(old == c))
302 			break;
303 		c = old;
304 	}
305 	return dec;
306 }
307 
308 #define atomic64_add(_i, _v)		atomic64_add_return(_i, _v)
309 #define atomic64_add_negative(_i, _v)	(atomic64_add_return(_i, _v) < 0)
310 #define atomic64_inc(_v)		atomic64_add_return(1, _v)
311 #define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
312 #define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)
313 #define atomic64_sub(_i, _v)		atomic64_sub_return(_i, _v)
314 #define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
315 #define atomic64_dec(_v)		atomic64_sub_return(1, _v)
316 #define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
317 #define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0)
318 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
319 
320 #define smp_mb__before_atomic_dec()	smp_mb()
321 #define smp_mb__after_atomic_dec()	smp_mb()
322 #define smp_mb__before_atomic_inc()	smp_mb()
323 #define smp_mb__after_atomic_inc()	smp_mb()
324 
325 #include <asm-generic/atomic-long.h>
326 
327 #endif /* __ARCH_S390_ATOMIC__  */
328