xref: /openbmc/linux/arch/s390/include/asm/atomic.h (revision 4c5a116a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright IBM Corp. 1999, 2016
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5  *	      Denis Joseph Barrow,
6  *	      Arnd Bergmann,
7  */
8 
9 #ifndef __ARCH_S390_ATOMIC__
10 #define __ARCH_S390_ATOMIC__
11 
12 #include <linux/compiler.h>
13 #include <linux/types.h>
14 #include <asm/atomic_ops.h>
15 #include <asm/barrier.h>
16 #include <asm/cmpxchg.h>
17 
18 static inline int atomic_read(const atomic_t *v)
19 {
20 	int c;
21 
22 	asm volatile(
23 		"	l	%0,%1\n"
24 		: "=d" (c) : "Q" (v->counter));
25 	return c;
26 }
27 
28 static inline void atomic_set(atomic_t *v, int i)
29 {
30 	asm volatile(
31 		"	st	%1,%0\n"
32 		: "=Q" (v->counter) : "d" (i));
33 }
34 
35 static inline int atomic_add_return(int i, atomic_t *v)
36 {
37 	return __atomic_add_barrier(i, &v->counter) + i;
38 }
39 
40 static inline int atomic_fetch_add(int i, atomic_t *v)
41 {
42 	return __atomic_add_barrier(i, &v->counter);
43 }
44 
45 static inline void atomic_add(int i, atomic_t *v)
46 {
47 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
48 	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
49 		__atomic_add_const(i, &v->counter);
50 		return;
51 	}
52 #endif
53 	__atomic_add(i, &v->counter);
54 }
55 
56 #define atomic_sub(_i, _v)		atomic_add(-(int)(_i), _v)
57 #define atomic_sub_return(_i, _v)	atomic_add_return(-(int)(_i), _v)
58 #define atomic_fetch_sub(_i, _v)	atomic_fetch_add(-(int)(_i), _v)
59 
60 #define ATOMIC_OPS(op)							\
61 static inline void atomic_##op(int i, atomic_t *v)			\
62 {									\
63 	__atomic_##op(i, &v->counter);					\
64 }									\
65 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
66 {									\
67 	return __atomic_##op##_barrier(i, &v->counter);			\
68 }
69 
70 ATOMIC_OPS(and)
71 ATOMIC_OPS(or)
72 ATOMIC_OPS(xor)
73 
74 #undef ATOMIC_OPS
75 
76 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
77 
78 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
79 {
80 	return __atomic_cmpxchg(&v->counter, old, new);
81 }
82 
83 #define ATOMIC64_INIT(i)  { (i) }
84 
85 static inline s64 atomic64_read(const atomic64_t *v)
86 {
87 	s64 c;
88 
89 	asm volatile(
90 		"	lg	%0,%1\n"
91 		: "=d" (c) : "Q" (v->counter));
92 	return c;
93 }
94 
95 static inline void atomic64_set(atomic64_t *v, s64 i)
96 {
97 	asm volatile(
98 		"	stg	%1,%0\n"
99 		: "=Q" (v->counter) : "d" (i));
100 }
101 
102 static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
103 {
104 	return __atomic64_add_barrier(i, (long *)&v->counter) + i;
105 }
106 
107 static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
108 {
109 	return __atomic64_add_barrier(i, (long *)&v->counter);
110 }
111 
112 static inline void atomic64_add(s64 i, atomic64_t *v)
113 {
114 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
115 	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
116 		__atomic64_add_const(i, (long *)&v->counter);
117 		return;
118 	}
119 #endif
120 	__atomic64_add(i, (long *)&v->counter);
121 }
122 
123 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
124 
125 static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
126 {
127 	return __atomic64_cmpxchg((long *)&v->counter, old, new);
128 }
129 
130 #define ATOMIC64_OPS(op)						\
131 static inline void atomic64_##op(s64 i, atomic64_t *v)			\
132 {									\
133 	__atomic64_##op(i, (long *)&v->counter);			\
134 }									\
135 static inline long atomic64_fetch_##op(s64 i, atomic64_t *v)		\
136 {									\
137 	return __atomic64_##op##_barrier(i, (long *)&v->counter);	\
138 }
139 
140 ATOMIC64_OPS(and)
141 ATOMIC64_OPS(or)
142 ATOMIC64_OPS(xor)
143 
144 #undef ATOMIC64_OPS
145 
146 #define atomic64_sub_return(_i, _v)	atomic64_add_return(-(s64)(_i), _v)
147 #define atomic64_fetch_sub(_i, _v)	atomic64_fetch_add(-(s64)(_i), _v)
148 #define atomic64_sub(_i, _v)		atomic64_add(-(s64)(_i), _v)
149 
150 #endif /* __ARCH_S390_ATOMIC__  */
151