xref: /openbmc/linux/arch/x86/include/asm/atomic64_64.h (revision ba61bb17)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC64_64_H
3 #define _ASM_X86_ATOMIC64_64_H
4 
5 #include <linux/types.h>
6 #include <asm/alternative.h>
7 #include <asm/cmpxchg.h>
8 
9 /* The 64-bit atomic type */
10 
11 #define ATOMIC64_INIT(i)	{ (i) }
12 
13 /**
14  * arch_atomic64_read - read atomic64 variable
15  * @v: pointer of type atomic64_t
16  *
17  * Atomically reads the value of @v.
18  * Doesn't imply a read memory barrier.
19  */
20 static inline long arch_atomic64_read(const atomic64_t *v)
21 {
22 	return READ_ONCE((v)->counter);
23 }
24 
25 /**
26  * arch_atomic64_set - set atomic64 variable
27  * @v: pointer to type atomic64_t
28  * @i: required value
29  *
30  * Atomically sets the value of @v to @i.
31  */
32 static inline void arch_atomic64_set(atomic64_t *v, long i)
33 {
34 	WRITE_ONCE(v->counter, i);
35 }
36 
37 /**
38  * arch_atomic64_add - add integer to atomic64 variable
39  * @i: integer value to add
40  * @v: pointer to type atomic64_t
41  *
42  * Atomically adds @i to @v.
43  */
44 static __always_inline void arch_atomic64_add(long i, atomic64_t *v)
45 {
46 	asm volatile(LOCK_PREFIX "addq %1,%0"
47 		     : "=m" (v->counter)
48 		     : "er" (i), "m" (v->counter));
49 }
50 
51 /**
52  * arch_atomic64_sub - subtract the atomic64 variable
53  * @i: integer value to subtract
54  * @v: pointer to type atomic64_t
55  *
56  * Atomically subtracts @i from @v.
57  */
58 static inline void arch_atomic64_sub(long i, atomic64_t *v)
59 {
60 	asm volatile(LOCK_PREFIX "subq %1,%0"
61 		     : "=m" (v->counter)
62 		     : "er" (i), "m" (v->counter));
63 }
64 
65 /**
66  * arch_atomic64_sub_and_test - subtract value from variable and test result
67  * @i: integer value to subtract
68  * @v: pointer to type atomic64_t
69  *
70  * Atomically subtracts @i from @v and returns
71  * true if the result is zero, or false for all
72  * other cases.
73  */
74 static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
75 {
76 	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
77 }
78 
79 /**
80  * arch_atomic64_inc - increment atomic64 variable
81  * @v: pointer to type atomic64_t
82  *
83  * Atomically increments @v by 1.
84  */
85 static __always_inline void arch_atomic64_inc(atomic64_t *v)
86 {
87 	asm volatile(LOCK_PREFIX "incq %0"
88 		     : "=m" (v->counter)
89 		     : "m" (v->counter));
90 }
91 
92 /**
93  * arch_atomic64_dec - decrement atomic64 variable
94  * @v: pointer to type atomic64_t
95  *
96  * Atomically decrements @v by 1.
97  */
98 static __always_inline void arch_atomic64_dec(atomic64_t *v)
99 {
100 	asm volatile(LOCK_PREFIX "decq %0"
101 		     : "=m" (v->counter)
102 		     : "m" (v->counter));
103 }
104 
105 /**
106  * arch_atomic64_dec_and_test - decrement and test
107  * @v: pointer to type atomic64_t
108  *
109  * Atomically decrements @v by 1 and
110  * returns true if the result is 0, or false for all other
111  * cases.
112  */
113 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
114 {
115 	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
116 }
117 
118 /**
119  * arch_atomic64_inc_and_test - increment and test
120  * @v: pointer to type atomic64_t
121  *
122  * Atomically increments @v by 1
123  * and returns true if the result is zero, or false for all
124  * other cases.
125  */
126 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
127 {
128 	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
129 }
130 
131 /**
132  * arch_atomic64_add_negative - add and test if negative
133  * @i: integer value to add
134  * @v: pointer to type atomic64_t
135  *
136  * Atomically adds @i to @v and returns true
137  * if the result is negative, or false when
138  * result is greater than or equal to zero.
139  */
140 static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
141 {
142 	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
143 }
144 
145 /**
146  * arch_atomic64_add_return - add and return
147  * @i: integer value to add
148  * @v: pointer to type atomic64_t
149  *
150  * Atomically adds @i to @v and returns @i + @v
151  */
152 static __always_inline long arch_atomic64_add_return(long i, atomic64_t *v)
153 {
154 	return i + xadd(&v->counter, i);
155 }
156 
157 static inline long arch_atomic64_sub_return(long i, atomic64_t *v)
158 {
159 	return arch_atomic64_add_return(-i, v);
160 }
161 
162 static inline long arch_atomic64_fetch_add(long i, atomic64_t *v)
163 {
164 	return xadd(&v->counter, i);
165 }
166 
167 static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v)
168 {
169 	return xadd(&v->counter, -i);
170 }
171 
172 #define arch_atomic64_inc_return(v)  (arch_atomic64_add_return(1, (v)))
173 #define arch_atomic64_dec_return(v)  (arch_atomic64_sub_return(1, (v)))
174 
175 static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
176 {
177 	return arch_cmpxchg(&v->counter, old, new);
178 }
179 
180 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
181 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
182 {
183 	return try_cmpxchg(&v->counter, old, new);
184 }
185 
186 static inline long arch_atomic64_xchg(atomic64_t *v, long new)
187 {
188 	return xchg(&v->counter, new);
189 }
190 
191 /**
192  * arch_atomic64_add_unless - add unless the number is a given value
193  * @v: pointer of type atomic64_t
194  * @a: the amount to add to v...
195  * @u: ...unless v is equal to u.
196  *
197  * Atomically adds @a to @v, so long as it was not @u.
198  * Returns the old value of @v.
199  */
200 static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u)
201 {
202 	s64 c = arch_atomic64_read(v);
203 	do {
204 		if (unlikely(c == u))
205 			return false;
206 	} while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
207 	return true;
208 }
209 
210 #define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0)
211 
212 /*
213  * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
214  * @v: pointer of type atomic_t
215  *
216  * The function returns the old value of *v minus 1, even if
217  * the atomic variable, v, was not decremented.
218  */
219 static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
220 {
221 	s64 dec, c = arch_atomic64_read(v);
222 	do {
223 		dec = c - 1;
224 		if (unlikely(dec < 0))
225 			break;
226 	} while (!arch_atomic64_try_cmpxchg(v, &c, dec));
227 	return dec;
228 }
229 
230 static inline void arch_atomic64_and(long i, atomic64_t *v)
231 {
232 	asm volatile(LOCK_PREFIX "andq %1,%0"
233 			: "+m" (v->counter)
234 			: "er" (i)
235 			: "memory");
236 }
237 
238 static inline long arch_atomic64_fetch_and(long i, atomic64_t *v)
239 {
240 	s64 val = arch_atomic64_read(v);
241 
242 	do {
243 	} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
244 	return val;
245 }
246 
247 static inline void arch_atomic64_or(long i, atomic64_t *v)
248 {
249 	asm volatile(LOCK_PREFIX "orq %1,%0"
250 			: "+m" (v->counter)
251 			: "er" (i)
252 			: "memory");
253 }
254 
255 static inline long arch_atomic64_fetch_or(long i, atomic64_t *v)
256 {
257 	s64 val = arch_atomic64_read(v);
258 
259 	do {
260 	} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
261 	return val;
262 }
263 
264 static inline void arch_atomic64_xor(long i, atomic64_t *v)
265 {
266 	asm volatile(LOCK_PREFIX "xorq %1,%0"
267 			: "+m" (v->counter)
268 			: "er" (i)
269 			: "memory");
270 }
271 
272 static inline long arch_atomic64_fetch_xor(long i, atomic64_t *v)
273 {
274 	s64 val = arch_atomic64_read(v);
275 
276 	do {
277 	} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
278 	return val;
279 }
280 
281 #endif /* _ASM_X86_ATOMIC64_64_H */
282