xref: /openbmc/linux/arch/x86/include/asm/atomic.h (revision ba61bb17)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC_H
3 #define _ASM_X86_ATOMIC_H
4 
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 #include <asm/alternative.h>
8 #include <asm/cmpxchg.h>
9 #include <asm/rmwcc.h>
10 #include <asm/barrier.h>
11 
12 /*
13  * Atomic operations that C can't guarantee us.  Useful for
14  * resource counting etc..
15  */
16 
17 #define ATOMIC_INIT(i)	{ (i) }
18 
19 /**
20  * arch_atomic_read - read atomic variable
21  * @v: pointer of type atomic_t
22  *
23  * Atomically reads the value of @v.
24  */
25 static __always_inline int arch_atomic_read(const atomic_t *v)
26 {
27 	/*
28 	 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
29 	 * it's non-inlined function that increases binary size and stack usage.
30 	 */
31 	return READ_ONCE((v)->counter);
32 }
33 
34 /**
35  * arch_atomic_set - set atomic variable
36  * @v: pointer of type atomic_t
37  * @i: required value
38  *
39  * Atomically sets the value of @v to @i.
40  */
41 static __always_inline void arch_atomic_set(atomic_t *v, int i)
42 {
43 	WRITE_ONCE(v->counter, i);
44 }
45 
46 /**
47  * arch_atomic_add - add integer to atomic variable
48  * @i: integer value to add
49  * @v: pointer of type atomic_t
50  *
51  * Atomically adds @i to @v.
52  */
53 static __always_inline void arch_atomic_add(int i, atomic_t *v)
54 {
55 	asm volatile(LOCK_PREFIX "addl %1,%0"
56 		     : "+m" (v->counter)
57 		     : "ir" (i));
58 }
59 
60 /**
61  * arch_atomic_sub - subtract integer from atomic variable
62  * @i: integer value to subtract
63  * @v: pointer of type atomic_t
64  *
65  * Atomically subtracts @i from @v.
66  */
67 static __always_inline void arch_atomic_sub(int i, atomic_t *v)
68 {
69 	asm volatile(LOCK_PREFIX "subl %1,%0"
70 		     : "+m" (v->counter)
71 		     : "ir" (i));
72 }
73 
74 /**
75  * arch_atomic_sub_and_test - subtract value from variable and test result
76  * @i: integer value to subtract
77  * @v: pointer of type atomic_t
78  *
79  * Atomically subtracts @i from @v and returns
80  * true if the result is zero, or false for all
81  * other cases.
82  */
83 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
84 {
85 	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
86 }
87 
88 /**
89  * arch_atomic_inc - increment atomic variable
90  * @v: pointer of type atomic_t
91  *
92  * Atomically increments @v by 1.
93  */
94 static __always_inline void arch_atomic_inc(atomic_t *v)
95 {
96 	asm volatile(LOCK_PREFIX "incl %0"
97 		     : "+m" (v->counter));
98 }
99 
100 /**
101  * arch_atomic_dec - decrement atomic variable
102  * @v: pointer of type atomic_t
103  *
104  * Atomically decrements @v by 1.
105  */
106 static __always_inline void arch_atomic_dec(atomic_t *v)
107 {
108 	asm volatile(LOCK_PREFIX "decl %0"
109 		     : "+m" (v->counter));
110 }
111 
112 /**
113  * arch_atomic_dec_and_test - decrement and test
114  * @v: pointer of type atomic_t
115  *
116  * Atomically decrements @v by 1 and
117  * returns true if the result is 0, or false for all other
118  * cases.
119  */
120 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
121 {
122 	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
123 }
124 
125 /**
126  * arch_atomic_inc_and_test - increment and test
127  * @v: pointer of type atomic_t
128  *
129  * Atomically increments @v by 1
130  * and returns true if the result is zero, or false for all
131  * other cases.
132  */
133 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
134 {
135 	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
136 }
137 
138 /**
139  * arch_atomic_add_negative - add and test if negative
140  * @i: integer value to add
141  * @v: pointer of type atomic_t
142  *
143  * Atomically adds @i to @v and returns true
144  * if the result is negative, or false when
145  * result is greater than or equal to zero.
146  */
147 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
148 {
149 	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
150 }
151 
152 /**
153  * arch_atomic_add_return - add integer and return
154  * @i: integer value to add
155  * @v: pointer of type atomic_t
156  *
157  * Atomically adds @i to @v and returns @i + @v
158  */
159 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
160 {
161 	return i + xadd(&v->counter, i);
162 }
163 
164 /**
165  * arch_atomic_sub_return - subtract integer and return
166  * @v: pointer of type atomic_t
167  * @i: integer value to subtract
168  *
169  * Atomically subtracts @i from @v and returns @v - @i
170  */
171 static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
172 {
173 	return arch_atomic_add_return(-i, v);
174 }
175 
176 #define arch_atomic_inc_return(v)  (arch_atomic_add_return(1, v))
177 #define arch_atomic_dec_return(v)  (arch_atomic_sub_return(1, v))
178 
179 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
180 {
181 	return xadd(&v->counter, i);
182 }
183 
184 static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
185 {
186 	return xadd(&v->counter, -i);
187 }
188 
189 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
190 {
191 	return arch_cmpxchg(&v->counter, old, new);
192 }
193 
194 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
195 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
196 {
197 	return try_cmpxchg(&v->counter, old, new);
198 }
199 
200 static inline int arch_atomic_xchg(atomic_t *v, int new)
201 {
202 	return xchg(&v->counter, new);
203 }
204 
205 static inline void arch_atomic_and(int i, atomic_t *v)
206 {
207 	asm volatile(LOCK_PREFIX "andl %1,%0"
208 			: "+m" (v->counter)
209 			: "ir" (i)
210 			: "memory");
211 }
212 
213 static inline int arch_atomic_fetch_and(int i, atomic_t *v)
214 {
215 	int val = arch_atomic_read(v);
216 
217 	do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
218 
219 	return val;
220 }
221 
222 static inline void arch_atomic_or(int i, atomic_t *v)
223 {
224 	asm volatile(LOCK_PREFIX "orl %1,%0"
225 			: "+m" (v->counter)
226 			: "ir" (i)
227 			: "memory");
228 }
229 
230 static inline int arch_atomic_fetch_or(int i, atomic_t *v)
231 {
232 	int val = arch_atomic_read(v);
233 
234 	do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
235 
236 	return val;
237 }
238 
239 static inline void arch_atomic_xor(int i, atomic_t *v)
240 {
241 	asm volatile(LOCK_PREFIX "xorl %1,%0"
242 			: "+m" (v->counter)
243 			: "ir" (i)
244 			: "memory");
245 }
246 
247 static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
248 {
249 	int val = arch_atomic_read(v);
250 
251 	do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
252 
253 	return val;
254 }
255 
256 /**
257  * __arch_atomic_add_unless - add unless the number is already a given value
258  * @v: pointer of type atomic_t
259  * @a: the amount to add to v...
260  * @u: ...unless v is equal to u.
261  *
262  * Atomically adds @a to @v, so long as @v was not already @u.
263  * Returns the old value of @v.
264  */
265 static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u)
266 {
267 	int c = arch_atomic_read(v);
268 
269 	do {
270 		if (unlikely(c == u))
271 			break;
272 	} while (!arch_atomic_try_cmpxchg(v, &c, c + a));
273 
274 	return c;
275 }
276 
277 #ifdef CONFIG_X86_32
278 # include <asm/atomic64_32.h>
279 #else
280 # include <asm/atomic64_64.h>
281 #endif
282 
283 #include <asm-generic/atomic-instrumented.h>
284 
285 #endif /* _ASM_X86_ATOMIC_H */
286