xref: /openbmc/linux/arch/s390/include/asm/atomic.h (revision b34e08d5)
1 /*
2  * Copyright IBM Corp. 1999, 2009
3  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
4  *	      Denis Joseph Barrow,
5  *	      Arnd Bergmann <arndb@de.ibm.com>,
6  *
7  * Atomic operations that C can't guarantee us.
8  * Useful for resource counting etc.
9  * s390 uses 'Compare And Swap' for atomicity in SMP environment.
10  *
11  */
12 
13 #ifndef __ARCH_S390_ATOMIC__
14 #define __ARCH_S390_ATOMIC__
15 
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
20 
21 #define ATOMIC_INIT(i)  { (i) }
22 
23 #define __ATOMIC_NO_BARRIER	"\n"
24 
25 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
26 
27 #define __ATOMIC_OR	"lao"
28 #define __ATOMIC_AND	"lan"
29 #define __ATOMIC_ADD	"laa"
30 #define __ATOMIC_BARRIER "bcr	14,0\n"
31 
32 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
33 ({									\
34 	int old_val;							\
35 									\
36 	typecheck(atomic_t *, ptr);					\
37 	asm volatile(							\
38 		__barrier						\
39 		op_string "	%0,%2,%1\n"				\
40 		__barrier						\
41 		: "=d" (old_val), "+Q" ((ptr)->counter)			\
42 		: "d" (op_val)						\
43 		: "cc", "memory");					\
44 	old_val;							\
45 })
46 
47 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
48 
49 #define __ATOMIC_OR	"or"
50 #define __ATOMIC_AND	"nr"
51 #define __ATOMIC_ADD	"ar"
52 #define __ATOMIC_BARRIER "\n"
53 
54 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
55 ({									\
56 	int old_val, new_val;						\
57 									\
58 	typecheck(atomic_t *, ptr);					\
59 	asm volatile(							\
60 		"	l	%0,%2\n"				\
61 		"0:	lr	%1,%0\n"				\
62 		op_string "	%1,%3\n"				\
63 		"	cs	%0,%1,%2\n"				\
64 		"	jl	0b"					\
65 		: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
66 		: "d" (op_val)						\
67 		: "cc", "memory");					\
68 	old_val;							\
69 })
70 
71 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
72 
73 static inline int atomic_read(const atomic_t *v)
74 {
75 	int c;
76 
77 	asm volatile(
78 		"	l	%0,%1\n"
79 		: "=d" (c) : "Q" (v->counter));
80 	return c;
81 }
82 
83 static inline void atomic_set(atomic_t *v, int i)
84 {
85 	asm volatile(
86 		"	st	%1,%0\n"
87 		: "=Q" (v->counter) : "d" (i));
88 }
89 
90 static inline int atomic_add_return(int i, atomic_t *v)
91 {
92 	return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
93 }
94 
95 static inline void atomic_add(int i, atomic_t *v)
96 {
97 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
98 	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
99 		asm volatile(
100 			"asi	%0,%1\n"
101 			: "+Q" (v->counter)
102 			: "i" (i)
103 			: "cc", "memory");
104 		return;
105 	}
106 #endif
107 	__ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
108 }
109 
110 #define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
111 #define atomic_inc(_v)			atomic_add(1, _v)
112 #define atomic_inc_return(_v)		atomic_add_return(1, _v)
113 #define atomic_inc_and_test(_v)		(atomic_add_return(1, _v) == 0)
114 #define atomic_sub(_i, _v)		atomic_add(-(int)(_i), _v)
115 #define atomic_sub_return(_i, _v)	atomic_add_return(-(int)(_i), _v)
116 #define atomic_sub_and_test(_i, _v)	(atomic_sub_return(_i, _v) == 0)
117 #define atomic_dec(_v)			atomic_sub(1, _v)
118 #define atomic_dec_return(_v)		atomic_sub_return(1, _v)
119 #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
120 
121 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
122 {
123 	__ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
124 }
125 
126 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
127 {
128 	__ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
129 }
130 
131 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
132 
133 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
134 {
135 	asm volatile(
136 		"	cs	%0,%2,%1"
137 		: "+d" (old), "+Q" (v->counter)
138 		: "d" (new)
139 		: "cc", "memory");
140 	return old;
141 }
142 
143 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
144 {
145 	int c, old;
146 	c = atomic_read(v);
147 	for (;;) {
148 		if (unlikely(c == u))
149 			break;
150 		old = atomic_cmpxchg(v, c, c + a);
151 		if (likely(old == c))
152 			break;
153 		c = old;
154 	}
155 	return c;
156 }
157 
158 
159 #undef __ATOMIC_LOOP
160 
161 #define ATOMIC64_INIT(i)  { (i) }
162 
163 #ifdef CONFIG_64BIT
164 
165 #define __ATOMIC64_NO_BARRIER	"\n"
166 
167 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
168 
169 #define __ATOMIC64_OR	"laog"
170 #define __ATOMIC64_AND	"lang"
171 #define __ATOMIC64_ADD	"laag"
172 #define __ATOMIC64_BARRIER "bcr	14,0\n"
173 
174 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
175 ({									\
176 	long long old_val;						\
177 									\
178 	typecheck(atomic64_t *, ptr);					\
179 	asm volatile(							\
180 		__barrier						\
181 		op_string "	%0,%2,%1\n"				\
182 		__barrier						\
183 		: "=d" (old_val), "+Q" ((ptr)->counter)			\
184 		: "d" (op_val)						\
185 		: "cc", "memory");					\
186 	old_val;							\
187 })
188 
189 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
190 
191 #define __ATOMIC64_OR	"ogr"
192 #define __ATOMIC64_AND	"ngr"
193 #define __ATOMIC64_ADD	"agr"
194 #define __ATOMIC64_BARRIER "\n"
195 
196 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
197 ({									\
198 	long long old_val, new_val;					\
199 									\
200 	typecheck(atomic64_t *, ptr);					\
201 	asm volatile(							\
202 		"	lg	%0,%2\n"				\
203 		"0:	lgr	%1,%0\n"				\
204 		op_string "	%1,%3\n"				\
205 		"	csg	%0,%1,%2\n"				\
206 		"	jl	0b"					\
207 		: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
208 		: "d" (op_val)						\
209 		: "cc", "memory");					\
210 	old_val;							\
211 })
212 
213 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
214 
215 static inline long long atomic64_read(const atomic64_t *v)
216 {
217 	long long c;
218 
219 	asm volatile(
220 		"	lg	%0,%1\n"
221 		: "=d" (c) : "Q" (v->counter));
222 	return c;
223 }
224 
225 static inline void atomic64_set(atomic64_t *v, long long i)
226 {
227 	asm volatile(
228 		"	stg	%1,%0\n"
229 		: "=Q" (v->counter) : "d" (i));
230 }
231 
232 static inline long long atomic64_add_return(long long i, atomic64_t *v)
233 {
234 	return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
235 }
236 
237 static inline void atomic64_add(long long i, atomic64_t *v)
238 {
239 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
240 	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
241 		asm volatile(
242 			"agsi	%0,%1\n"
243 			: "+Q" (v->counter)
244 			: "i" (i)
245 			: "cc", "memory");
246 		return;
247 	}
248 #endif
249 	__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
250 }
251 
252 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
253 {
254 	__ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
255 }
256 
257 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
258 {
259 	__ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
260 }
261 
262 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
263 
264 static inline long long atomic64_cmpxchg(atomic64_t *v,
265 					     long long old, long long new)
266 {
267 	asm volatile(
268 		"	csg	%0,%2,%1"
269 		: "+d" (old), "+Q" (v->counter)
270 		: "d" (new)
271 		: "cc", "memory");
272 	return old;
273 }
274 
275 #undef __ATOMIC64_LOOP
276 
277 #else /* CONFIG_64BIT */
278 
279 typedef struct {
280 	long long counter;
281 } atomic64_t;
282 
283 static inline long long atomic64_read(const atomic64_t *v)
284 {
285 	register_pair rp;
286 
287 	asm volatile(
288 		"	lm	%0,%N0,%1"
289 		: "=&d" (rp) : "Q" (v->counter)	);
290 	return rp.pair;
291 }
292 
293 static inline void atomic64_set(atomic64_t *v, long long i)
294 {
295 	register_pair rp = {.pair = i};
296 
297 	asm volatile(
298 		"	stm	%1,%N1,%0"
299 		: "=Q" (v->counter) : "d" (rp) );
300 }
301 
302 static inline long long atomic64_xchg(atomic64_t *v, long long new)
303 {
304 	register_pair rp_new = {.pair = new};
305 	register_pair rp_old;
306 
307 	asm volatile(
308 		"	lm	%0,%N0,%1\n"
309 		"0:	cds	%0,%2,%1\n"
310 		"	jl	0b\n"
311 		: "=&d" (rp_old), "+Q" (v->counter)
312 		: "d" (rp_new)
313 		: "cc");
314 	return rp_old.pair;
315 }
316 
317 static inline long long atomic64_cmpxchg(atomic64_t *v,
318 					 long long old, long long new)
319 {
320 	register_pair rp_old = {.pair = old};
321 	register_pair rp_new = {.pair = new};
322 
323 	asm volatile(
324 		"	cds	%0,%2,%1"
325 		: "+&d" (rp_old), "+Q" (v->counter)
326 		: "d" (rp_new)
327 		: "cc");
328 	return rp_old.pair;
329 }
330 
331 
332 static inline long long atomic64_add_return(long long i, atomic64_t *v)
333 {
334 	long long old, new;
335 
336 	do {
337 		old = atomic64_read(v);
338 		new = old + i;
339 	} while (atomic64_cmpxchg(v, old, new) != old);
340 	return new;
341 }
342 
343 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
344 {
345 	long long old, new;
346 
347 	do {
348 		old = atomic64_read(v);
349 		new = old | mask;
350 	} while (atomic64_cmpxchg(v, old, new) != old);
351 }
352 
353 static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
354 {
355 	long long old, new;
356 
357 	do {
358 		old = atomic64_read(v);
359 		new = old & mask;
360 	} while (atomic64_cmpxchg(v, old, new) != old);
361 }
362 
363 static inline void atomic64_add(long long i, atomic64_t *v)
364 {
365 	atomic64_add_return(i, v);
366 }
367 
368 #endif /* CONFIG_64BIT */
369 
370 static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
371 {
372 	long long c, old;
373 
374 	c = atomic64_read(v);
375 	for (;;) {
376 		if (unlikely(c == u))
377 			break;
378 		old = atomic64_cmpxchg(v, c, c + i);
379 		if (likely(old == c))
380 			break;
381 		c = old;
382 	}
383 	return c != u;
384 }
385 
386 static inline long long atomic64_dec_if_positive(atomic64_t *v)
387 {
388 	long long c, old, dec;
389 
390 	c = atomic64_read(v);
391 	for (;;) {
392 		dec = c - 1;
393 		if (unlikely(dec < 0))
394 			break;
395 		old = atomic64_cmpxchg((v), c, dec);
396 		if (likely(old == c))
397 			break;
398 		c = old;
399 	}
400 	return dec;
401 }
402 
403 #define atomic64_add_negative(_i, _v)	(atomic64_add_return(_i, _v) < 0)
404 #define atomic64_inc(_v)		atomic64_add(1, _v)
405 #define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
406 #define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)
407 #define atomic64_sub_return(_i, _v)	atomic64_add_return(-(long long)(_i), _v)
408 #define atomic64_sub(_i, _v)		atomic64_add(-(long long)(_i), _v)
409 #define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
410 #define atomic64_dec(_v)		atomic64_sub(1, _v)
411 #define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
412 #define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0)
413 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
414 
415 #define smp_mb__before_atomic_dec()	smp_mb()
416 #define smp_mb__after_atomic_dec()	smp_mb()
417 #define smp_mb__before_atomic_inc()	smp_mb()
418 #define smp_mb__after_atomic_inc()	smp_mb()
419 
420 #endif /* __ARCH_S390_ATOMIC__  */
421