xref: /openbmc/linux/arch/x86/include/asm/atomic.h (revision ecd25094)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC_H
3 #define _ASM_X86_ATOMIC_H
4 
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 #include <asm/alternative.h>
8 #include <asm/cmpxchg.h>
9 #include <asm/rmwcc.h>
10 #include <asm/barrier.h>
11 
12 /*
13  * Atomic operations that C can't guarantee us.  Useful for
14  * resource counting etc..
15  */
16 
17 #define ATOMIC_INIT(i)	{ (i) }
18 
19 /**
20  * arch_atomic_read - read atomic variable
21  * @v: pointer of type atomic_t
22  *
23  * Atomically reads the value of @v.
24  */
25 static __always_inline int arch_atomic_read(const atomic_t *v)
26 {
27 	/*
28 	 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
29 	 * it's non-inlined function that increases binary size and stack usage.
30 	 */
31 	return READ_ONCE((v)->counter);
32 }
33 
34 /**
35  * arch_atomic_set - set atomic variable
36  * @v: pointer of type atomic_t
37  * @i: required value
38  *
39  * Atomically sets the value of @v to @i.
40  */
41 static __always_inline void arch_atomic_set(atomic_t *v, int i)
42 {
43 	WRITE_ONCE(v->counter, i);
44 }
45 
46 /**
47  * arch_atomic_add - add integer to atomic variable
48  * @i: integer value to add
49  * @v: pointer of type atomic_t
50  *
51  * Atomically adds @i to @v.
52  */
53 static __always_inline void arch_atomic_add(int i, atomic_t *v)
54 {
55 	asm volatile(LOCK_PREFIX "addl %1,%0"
56 		     : "+m" (v->counter)
57 		     : "ir" (i) : "memory");
58 }
59 
60 /**
61  * arch_atomic_sub - subtract integer from atomic variable
62  * @i: integer value to subtract
63  * @v: pointer of type atomic_t
64  *
65  * Atomically subtracts @i from @v.
66  */
67 static __always_inline void arch_atomic_sub(int i, atomic_t *v)
68 {
69 	asm volatile(LOCK_PREFIX "subl %1,%0"
70 		     : "+m" (v->counter)
71 		     : "ir" (i) : "memory");
72 }
73 
74 /**
75  * arch_atomic_sub_and_test - subtract value from variable and test result
76  * @i: integer value to subtract
77  * @v: pointer of type atomic_t
78  *
79  * Atomically subtracts @i from @v and returns
80  * true if the result is zero, or false for all
81  * other cases.
82  */
83 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
84 {
85 	return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
86 }
87 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
88 
89 /**
90  * arch_atomic_inc - increment atomic variable
91  * @v: pointer of type atomic_t
92  *
93  * Atomically increments @v by 1.
94  */
95 static __always_inline void arch_atomic_inc(atomic_t *v)
96 {
97 	asm volatile(LOCK_PREFIX "incl %0"
98 		     : "+m" (v->counter) :: "memory");
99 }
100 #define arch_atomic_inc arch_atomic_inc
101 
102 /**
103  * arch_atomic_dec - decrement atomic variable
104  * @v: pointer of type atomic_t
105  *
106  * Atomically decrements @v by 1.
107  */
108 static __always_inline void arch_atomic_dec(atomic_t *v)
109 {
110 	asm volatile(LOCK_PREFIX "decl %0"
111 		     : "+m" (v->counter) :: "memory");
112 }
113 #define arch_atomic_dec arch_atomic_dec
114 
115 /**
116  * arch_atomic_dec_and_test - decrement and test
117  * @v: pointer of type atomic_t
118  *
119  * Atomically decrements @v by 1 and
120  * returns true if the result is 0, or false for all other
121  * cases.
122  */
123 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
124 {
125 	return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
126 }
127 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
128 
129 /**
130  * arch_atomic_inc_and_test - increment and test
131  * @v: pointer of type atomic_t
132  *
133  * Atomically increments @v by 1
134  * and returns true if the result is zero, or false for all
135  * other cases.
136  */
137 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
138 {
139 	return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
140 }
141 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
142 
143 /**
144  * arch_atomic_add_negative - add and test if negative
145  * @i: integer value to add
146  * @v: pointer of type atomic_t
147  *
148  * Atomically adds @i to @v and returns true
149  * if the result is negative, or false when
150  * result is greater than or equal to zero.
151  */
152 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
153 {
154 	return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
155 }
156 #define arch_atomic_add_negative arch_atomic_add_negative
157 
158 /**
159  * arch_atomic_add_return - add integer and return
160  * @i: integer value to add
161  * @v: pointer of type atomic_t
162  *
163  * Atomically adds @i to @v and returns @i + @v
164  */
165 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
166 {
167 	return i + xadd(&v->counter, i);
168 }
169 
170 /**
171  * arch_atomic_sub_return - subtract integer and return
172  * @v: pointer of type atomic_t
173  * @i: integer value to subtract
174  *
175  * Atomically subtracts @i from @v and returns @v - @i
176  */
177 static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
178 {
179 	return arch_atomic_add_return(-i, v);
180 }
181 
182 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
183 {
184 	return xadd(&v->counter, i);
185 }
186 
187 static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
188 {
189 	return xadd(&v->counter, -i);
190 }
191 
192 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
193 {
194 	return arch_cmpxchg(&v->counter, old, new);
195 }
196 
197 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
198 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
199 {
200 	return try_cmpxchg(&v->counter, old, new);
201 }
202 
203 static inline int arch_atomic_xchg(atomic_t *v, int new)
204 {
205 	return arch_xchg(&v->counter, new);
206 }
207 
208 static inline void arch_atomic_and(int i, atomic_t *v)
209 {
210 	asm volatile(LOCK_PREFIX "andl %1,%0"
211 			: "+m" (v->counter)
212 			: "ir" (i)
213 			: "memory");
214 }
215 
216 static inline int arch_atomic_fetch_and(int i, atomic_t *v)
217 {
218 	int val = arch_atomic_read(v);
219 
220 	do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
221 
222 	return val;
223 }
224 
225 static inline void arch_atomic_or(int i, atomic_t *v)
226 {
227 	asm volatile(LOCK_PREFIX "orl %1,%0"
228 			: "+m" (v->counter)
229 			: "ir" (i)
230 			: "memory");
231 }
232 
233 static inline int arch_atomic_fetch_or(int i, atomic_t *v)
234 {
235 	int val = arch_atomic_read(v);
236 
237 	do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
238 
239 	return val;
240 }
241 
242 static inline void arch_atomic_xor(int i, atomic_t *v)
243 {
244 	asm volatile(LOCK_PREFIX "xorl %1,%0"
245 			: "+m" (v->counter)
246 			: "ir" (i)
247 			: "memory");
248 }
249 
250 static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
251 {
252 	int val = arch_atomic_read(v);
253 
254 	do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
255 
256 	return val;
257 }
258 
259 #ifdef CONFIG_X86_32
260 # include <asm/atomic64_32.h>
261 #else
262 # include <asm/atomic64_64.h>
263 #endif
264 
265 #include <asm-generic/atomic-instrumented.h>
266 
267 #endif /* _ASM_X86_ATOMIC_H */
268