1 // SPDX-License-Identifier: GPL-2.0
2
3 // Generated by scripts/atomic/gen-atomic-fallback.sh
4 // DO NOT MODIFY THIS FILE DIRECTLY
5
6 #ifndef _LINUX_ATOMIC_FALLBACK_H
7 #define _LINUX_ATOMIC_FALLBACK_H
8
9 #include <linux/compiler.h>
10
11 #if defined(arch_xchg)
12 #define raw_xchg arch_xchg
13 #elif defined(arch_xchg_relaxed)
14 #define raw_xchg(...) \
15 __atomic_op_fence(arch_xchg, __VA_ARGS__)
16 #else
17 extern void raw_xchg_not_implemented(void);
18 #define raw_xchg(...) raw_xchg_not_implemented()
19 #endif
20
21 #if defined(arch_xchg_acquire)
22 #define raw_xchg_acquire arch_xchg_acquire
23 #elif defined(arch_xchg_relaxed)
24 #define raw_xchg_acquire(...) \
25 __atomic_op_acquire(arch_xchg, __VA_ARGS__)
26 #elif defined(arch_xchg)
27 #define raw_xchg_acquire arch_xchg
28 #else
29 extern void raw_xchg_acquire_not_implemented(void);
30 #define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
31 #endif
32
33 #if defined(arch_xchg_release)
34 #define raw_xchg_release arch_xchg_release
35 #elif defined(arch_xchg_relaxed)
36 #define raw_xchg_release(...) \
37 __atomic_op_release(arch_xchg, __VA_ARGS__)
38 #elif defined(arch_xchg)
39 #define raw_xchg_release arch_xchg
40 #else
41 extern void raw_xchg_release_not_implemented(void);
42 #define raw_xchg_release(...) raw_xchg_release_not_implemented()
43 #endif
44
45 #if defined(arch_xchg_relaxed)
46 #define raw_xchg_relaxed arch_xchg_relaxed
47 #elif defined(arch_xchg)
48 #define raw_xchg_relaxed arch_xchg
49 #else
50 extern void raw_xchg_relaxed_not_implemented(void);
51 #define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
52 #endif
53
54 #if defined(arch_cmpxchg)
55 #define raw_cmpxchg arch_cmpxchg
56 #elif defined(arch_cmpxchg_relaxed)
57 #define raw_cmpxchg(...) \
58 __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
59 #else
60 extern void raw_cmpxchg_not_implemented(void);
61 #define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
62 #endif
63
64 #if defined(arch_cmpxchg_acquire)
65 #define raw_cmpxchg_acquire arch_cmpxchg_acquire
66 #elif defined(arch_cmpxchg_relaxed)
67 #define raw_cmpxchg_acquire(...) \
68 __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
69 #elif defined(arch_cmpxchg)
70 #define raw_cmpxchg_acquire arch_cmpxchg
71 #else
72 extern void raw_cmpxchg_acquire_not_implemented(void);
73 #define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
74 #endif
75
76 #if defined(arch_cmpxchg_release)
77 #define raw_cmpxchg_release arch_cmpxchg_release
78 #elif defined(arch_cmpxchg_relaxed)
79 #define raw_cmpxchg_release(...) \
80 __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
81 #elif defined(arch_cmpxchg)
82 #define raw_cmpxchg_release arch_cmpxchg
83 #else
84 extern void raw_cmpxchg_release_not_implemented(void);
85 #define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
86 #endif
87
88 #if defined(arch_cmpxchg_relaxed)
89 #define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
90 #elif defined(arch_cmpxchg)
91 #define raw_cmpxchg_relaxed arch_cmpxchg
92 #else
93 extern void raw_cmpxchg_relaxed_not_implemented(void);
94 #define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
95 #endif
96
97 #if defined(arch_cmpxchg64)
98 #define raw_cmpxchg64 arch_cmpxchg64
99 #elif defined(arch_cmpxchg64_relaxed)
100 #define raw_cmpxchg64(...) \
101 __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
102 #else
103 extern void raw_cmpxchg64_not_implemented(void);
104 #define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
105 #endif
106
107 #if defined(arch_cmpxchg64_acquire)
108 #define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
109 #elif defined(arch_cmpxchg64_relaxed)
110 #define raw_cmpxchg64_acquire(...) \
111 __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
112 #elif defined(arch_cmpxchg64)
113 #define raw_cmpxchg64_acquire arch_cmpxchg64
114 #else
115 extern void raw_cmpxchg64_acquire_not_implemented(void);
116 #define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
117 #endif
118
119 #if defined(arch_cmpxchg64_release)
120 #define raw_cmpxchg64_release arch_cmpxchg64_release
121 #elif defined(arch_cmpxchg64_relaxed)
122 #define raw_cmpxchg64_release(...) \
123 __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
124 #elif defined(arch_cmpxchg64)
125 #define raw_cmpxchg64_release arch_cmpxchg64
126 #else
127 extern void raw_cmpxchg64_release_not_implemented(void);
128 #define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
129 #endif
130
131 #if defined(arch_cmpxchg64_relaxed)
132 #define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
133 #elif defined(arch_cmpxchg64)
134 #define raw_cmpxchg64_relaxed arch_cmpxchg64
135 #else
136 extern void raw_cmpxchg64_relaxed_not_implemented(void);
137 #define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
138 #endif
139
140 #if defined(arch_cmpxchg128)
141 #define raw_cmpxchg128 arch_cmpxchg128
142 #elif defined(arch_cmpxchg128_relaxed)
143 #define raw_cmpxchg128(...) \
144 __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
145 #else
146 extern void raw_cmpxchg128_not_implemented(void);
147 #define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
148 #endif
149
150 #if defined(arch_cmpxchg128_acquire)
151 #define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
152 #elif defined(arch_cmpxchg128_relaxed)
153 #define raw_cmpxchg128_acquire(...) \
154 __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
155 #elif defined(arch_cmpxchg128)
156 #define raw_cmpxchg128_acquire arch_cmpxchg128
157 #else
158 extern void raw_cmpxchg128_acquire_not_implemented(void);
159 #define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
160 #endif
161
162 #if defined(arch_cmpxchg128_release)
163 #define raw_cmpxchg128_release arch_cmpxchg128_release
164 #elif defined(arch_cmpxchg128_relaxed)
165 #define raw_cmpxchg128_release(...) \
166 __atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
167 #elif defined(arch_cmpxchg128)
168 #define raw_cmpxchg128_release arch_cmpxchg128
169 #else
170 extern void raw_cmpxchg128_release_not_implemented(void);
171 #define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
172 #endif
173
174 #if defined(arch_cmpxchg128_relaxed)
175 #define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
176 #elif defined(arch_cmpxchg128)
177 #define raw_cmpxchg128_relaxed arch_cmpxchg128
178 #else
179 extern void raw_cmpxchg128_relaxed_not_implemented(void);
180 #define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
181 #endif
182
183 #if defined(arch_try_cmpxchg)
184 #define raw_try_cmpxchg arch_try_cmpxchg
185 #elif defined(arch_try_cmpxchg_relaxed)
186 #define raw_try_cmpxchg(...) \
187 __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
188 #else
189 #define raw_try_cmpxchg(_ptr, _oldp, _new) \
190 ({ \
191 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
192 ___r = raw_cmpxchg((_ptr), ___o, (_new)); \
193 if (unlikely(___r != ___o)) \
194 *___op = ___r; \
195 likely(___r == ___o); \
196 })
197 #endif
198
199 #if defined(arch_try_cmpxchg_acquire)
200 #define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
201 #elif defined(arch_try_cmpxchg_relaxed)
202 #define raw_try_cmpxchg_acquire(...) \
203 __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
204 #elif defined(arch_try_cmpxchg)
205 #define raw_try_cmpxchg_acquire arch_try_cmpxchg
206 #else
207 #define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
208 ({ \
209 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
210 ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
211 if (unlikely(___r != ___o)) \
212 *___op = ___r; \
213 likely(___r == ___o); \
214 })
215 #endif
216
217 #if defined(arch_try_cmpxchg_release)
218 #define raw_try_cmpxchg_release arch_try_cmpxchg_release
219 #elif defined(arch_try_cmpxchg_relaxed)
220 #define raw_try_cmpxchg_release(...) \
221 __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
222 #elif defined(arch_try_cmpxchg)
223 #define raw_try_cmpxchg_release arch_try_cmpxchg
224 #else
225 #define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
226 ({ \
227 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
228 ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
229 if (unlikely(___r != ___o)) \
230 *___op = ___r; \
231 likely(___r == ___o); \
232 })
233 #endif
234
235 #if defined(arch_try_cmpxchg_relaxed)
236 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
237 #elif defined(arch_try_cmpxchg)
238 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg
239 #else
240 #define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
241 ({ \
242 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
243 ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
244 if (unlikely(___r != ___o)) \
245 *___op = ___r; \
246 likely(___r == ___o); \
247 })
248 #endif
249
250 #if defined(arch_try_cmpxchg64)
251 #define raw_try_cmpxchg64 arch_try_cmpxchg64
252 #elif defined(arch_try_cmpxchg64_relaxed)
253 #define raw_try_cmpxchg64(...) \
254 __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
255 #else
256 #define raw_try_cmpxchg64(_ptr, _oldp, _new) \
257 ({ \
258 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
259 ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
260 if (unlikely(___r != ___o)) \
261 *___op = ___r; \
262 likely(___r == ___o); \
263 })
264 #endif
265
266 #if defined(arch_try_cmpxchg64_acquire)
267 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
268 #elif defined(arch_try_cmpxchg64_relaxed)
269 #define raw_try_cmpxchg64_acquire(...) \
270 __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
271 #elif defined(arch_try_cmpxchg64)
272 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
273 #else
274 #define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
275 ({ \
276 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
277 ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
278 if (unlikely(___r != ___o)) \
279 *___op = ___r; \
280 likely(___r == ___o); \
281 })
282 #endif
283
284 #if defined(arch_try_cmpxchg64_release)
285 #define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
286 #elif defined(arch_try_cmpxchg64_relaxed)
287 #define raw_try_cmpxchg64_release(...) \
288 __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
289 #elif defined(arch_try_cmpxchg64)
290 #define raw_try_cmpxchg64_release arch_try_cmpxchg64
291 #else
292 #define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
293 ({ \
294 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
295 ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
296 if (unlikely(___r != ___o)) \
297 *___op = ___r; \
298 likely(___r == ___o); \
299 })
300 #endif
301
302 #if defined(arch_try_cmpxchg64_relaxed)
303 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
304 #elif defined(arch_try_cmpxchg64)
305 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
306 #else
307 #define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
308 ({ \
309 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
310 ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
311 if (unlikely(___r != ___o)) \
312 *___op = ___r; \
313 likely(___r == ___o); \
314 })
315 #endif
316
317 #if defined(arch_try_cmpxchg128)
318 #define raw_try_cmpxchg128 arch_try_cmpxchg128
319 #elif defined(arch_try_cmpxchg128_relaxed)
320 #define raw_try_cmpxchg128(...) \
321 __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
322 #else
323 #define raw_try_cmpxchg128(_ptr, _oldp, _new) \
324 ({ \
325 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
326 ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
327 if (unlikely(___r != ___o)) \
328 *___op = ___r; \
329 likely(___r == ___o); \
330 })
331 #endif
332
333 #if defined(arch_try_cmpxchg128_acquire)
334 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
335 #elif defined(arch_try_cmpxchg128_relaxed)
336 #define raw_try_cmpxchg128_acquire(...) \
337 __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
338 #elif defined(arch_try_cmpxchg128)
339 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
340 #else
341 #define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
342 ({ \
343 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
344 ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
345 if (unlikely(___r != ___o)) \
346 *___op = ___r; \
347 likely(___r == ___o); \
348 })
349 #endif
350
351 #if defined(arch_try_cmpxchg128_release)
352 #define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
353 #elif defined(arch_try_cmpxchg128_relaxed)
354 #define raw_try_cmpxchg128_release(...) \
355 __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
356 #elif defined(arch_try_cmpxchg128)
357 #define raw_try_cmpxchg128_release arch_try_cmpxchg128
358 #else
359 #define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
360 ({ \
361 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
362 ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
363 if (unlikely(___r != ___o)) \
364 *___op = ___r; \
365 likely(___r == ___o); \
366 })
367 #endif
368
369 #if defined(arch_try_cmpxchg128_relaxed)
370 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
371 #elif defined(arch_try_cmpxchg128)
372 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
373 #else
374 #define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
375 ({ \
376 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
377 ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
378 if (unlikely(___r != ___o)) \
379 *___op = ___r; \
380 likely(___r == ___o); \
381 })
382 #endif
383
384 #define raw_cmpxchg_local arch_cmpxchg_local
385
386 #ifdef arch_try_cmpxchg_local
387 #define raw_try_cmpxchg_local arch_try_cmpxchg_local
388 #else
389 #define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
390 ({ \
391 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
392 ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
393 if (unlikely(___r != ___o)) \
394 *___op = ___r; \
395 likely(___r == ___o); \
396 })
397 #endif
398
399 #define raw_cmpxchg64_local arch_cmpxchg64_local
400
401 #ifdef arch_try_cmpxchg64_local
402 #define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
403 #else
404 #define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
405 ({ \
406 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
407 ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
408 if (unlikely(___r != ___o)) \
409 *___op = ___r; \
410 likely(___r == ___o); \
411 })
412 #endif
413
414 #define raw_cmpxchg128_local arch_cmpxchg128_local
415
416 #ifdef arch_try_cmpxchg128_local
417 #define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
418 #else
419 #define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
420 ({ \
421 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
422 ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
423 if (unlikely(___r != ___o)) \
424 *___op = ___r; \
425 likely(___r == ___o); \
426 })
427 #endif
428
429 #define raw_sync_cmpxchg arch_sync_cmpxchg
430
431 /**
432 * raw_atomic_read() - atomic load with relaxed ordering
433 * @v: pointer to atomic_t
434 *
435 * Atomically loads the value of @v with relaxed ordering.
436 *
437 * Safe to use in noinstr code; prefer atomic_read() elsewhere.
438 *
439 * Return: The value loaded from @v.
440 */
441 static __always_inline int
raw_atomic_read(const atomic_t * v)442 raw_atomic_read(const atomic_t *v)
443 {
444 return arch_atomic_read(v);
445 }
446
447 /**
448 * raw_atomic_read_acquire() - atomic load with acquire ordering
449 * @v: pointer to atomic_t
450 *
451 * Atomically loads the value of @v with acquire ordering.
452 *
453 * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
454 *
455 * Return: The value loaded from @v.
456 */
457 static __always_inline int
raw_atomic_read_acquire(const atomic_t * v)458 raw_atomic_read_acquire(const atomic_t *v)
459 {
460 #if defined(arch_atomic_read_acquire)
461 return arch_atomic_read_acquire(v);
462 #else
463 int ret;
464
465 if (__native_word(atomic_t)) {
466 ret = smp_load_acquire(&(v)->counter);
467 } else {
468 ret = raw_atomic_read(v);
469 __atomic_acquire_fence();
470 }
471
472 return ret;
473 #endif
474 }
475
476 /**
477 * raw_atomic_set() - atomic set with relaxed ordering
478 * @v: pointer to atomic_t
479 * @i: int value to assign
480 *
481 * Atomically sets @v to @i with relaxed ordering.
482 *
483 * Safe to use in noinstr code; prefer atomic_set() elsewhere.
484 *
485 * Return: Nothing.
486 */
487 static __always_inline void
raw_atomic_set(atomic_t * v,int i)488 raw_atomic_set(atomic_t *v, int i)
489 {
490 arch_atomic_set(v, i);
491 }
492
493 /**
494 * raw_atomic_set_release() - atomic set with release ordering
495 * @v: pointer to atomic_t
496 * @i: int value to assign
497 *
498 * Atomically sets @v to @i with release ordering.
499 *
500 * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
501 *
502 * Return: Nothing.
503 */
504 static __always_inline void
raw_atomic_set_release(atomic_t * v,int i)505 raw_atomic_set_release(atomic_t *v, int i)
506 {
507 #if defined(arch_atomic_set_release)
508 arch_atomic_set_release(v, i);
509 #else
510 if (__native_word(atomic_t)) {
511 smp_store_release(&(v)->counter, i);
512 } else {
513 __atomic_release_fence();
514 raw_atomic_set(v, i);
515 }
516 #endif
517 }
518
519 /**
520 * raw_atomic_add() - atomic add with relaxed ordering
521 * @i: int value to add
522 * @v: pointer to atomic_t
523 *
524 * Atomically updates @v to (@v + @i) with relaxed ordering.
525 *
526 * Safe to use in noinstr code; prefer atomic_add() elsewhere.
527 *
528 * Return: Nothing.
529 */
530 static __always_inline void
raw_atomic_add(int i,atomic_t * v)531 raw_atomic_add(int i, atomic_t *v)
532 {
533 arch_atomic_add(i, v);
534 }
535
536 /**
537 * raw_atomic_add_return() - atomic add with full ordering
538 * @i: int value to add
539 * @v: pointer to atomic_t
540 *
541 * Atomically updates @v to (@v + @i) with full ordering.
542 *
543 * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
544 *
545 * Return: The updated value of @v.
546 */
547 static __always_inline int
raw_atomic_add_return(int i,atomic_t * v)548 raw_atomic_add_return(int i, atomic_t *v)
549 {
550 #if defined(arch_atomic_add_return)
551 return arch_atomic_add_return(i, v);
552 #elif defined(arch_atomic_add_return_relaxed)
553 int ret;
554 __atomic_pre_full_fence();
555 ret = arch_atomic_add_return_relaxed(i, v);
556 __atomic_post_full_fence();
557 return ret;
558 #else
559 #error "Unable to define raw_atomic_add_return"
560 #endif
561 }
562
563 /**
564 * raw_atomic_add_return_acquire() - atomic add with acquire ordering
565 * @i: int value to add
566 * @v: pointer to atomic_t
567 *
568 * Atomically updates @v to (@v + @i) with acquire ordering.
569 *
570 * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
571 *
572 * Return: The updated value of @v.
573 */
574 static __always_inline int
raw_atomic_add_return_acquire(int i,atomic_t * v)575 raw_atomic_add_return_acquire(int i, atomic_t *v)
576 {
577 #if defined(arch_atomic_add_return_acquire)
578 return arch_atomic_add_return_acquire(i, v);
579 #elif defined(arch_atomic_add_return_relaxed)
580 int ret = arch_atomic_add_return_relaxed(i, v);
581 __atomic_acquire_fence();
582 return ret;
583 #elif defined(arch_atomic_add_return)
584 return arch_atomic_add_return(i, v);
585 #else
586 #error "Unable to define raw_atomic_add_return_acquire"
587 #endif
588 }
589
590 /**
591 * raw_atomic_add_return_release() - atomic add with release ordering
592 * @i: int value to add
593 * @v: pointer to atomic_t
594 *
595 * Atomically updates @v to (@v + @i) with release ordering.
596 *
597 * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
598 *
599 * Return: The updated value of @v.
600 */
601 static __always_inline int
raw_atomic_add_return_release(int i,atomic_t * v)602 raw_atomic_add_return_release(int i, atomic_t *v)
603 {
604 #if defined(arch_atomic_add_return_release)
605 return arch_atomic_add_return_release(i, v);
606 #elif defined(arch_atomic_add_return_relaxed)
607 __atomic_release_fence();
608 return arch_atomic_add_return_relaxed(i, v);
609 #elif defined(arch_atomic_add_return)
610 return arch_atomic_add_return(i, v);
611 #else
612 #error "Unable to define raw_atomic_add_return_release"
613 #endif
614 }
615
616 /**
617 * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
618 * @i: int value to add
619 * @v: pointer to atomic_t
620 *
621 * Atomically updates @v to (@v + @i) with relaxed ordering.
622 *
623 * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
624 *
625 * Return: The updated value of @v.
626 */
627 static __always_inline int
raw_atomic_add_return_relaxed(int i,atomic_t * v)628 raw_atomic_add_return_relaxed(int i, atomic_t *v)
629 {
630 #if defined(arch_atomic_add_return_relaxed)
631 return arch_atomic_add_return_relaxed(i, v);
632 #elif defined(arch_atomic_add_return)
633 return arch_atomic_add_return(i, v);
634 #else
635 #error "Unable to define raw_atomic_add_return_relaxed"
636 #endif
637 }
638
639 /**
640 * raw_atomic_fetch_add() - atomic add with full ordering
641 * @i: int value to add
642 * @v: pointer to atomic_t
643 *
644 * Atomically updates @v to (@v + @i) with full ordering.
645 *
646 * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
647 *
648 * Return: The original value of @v.
649 */
650 static __always_inline int
raw_atomic_fetch_add(int i,atomic_t * v)651 raw_atomic_fetch_add(int i, atomic_t *v)
652 {
653 #if defined(arch_atomic_fetch_add)
654 return arch_atomic_fetch_add(i, v);
655 #elif defined(arch_atomic_fetch_add_relaxed)
656 int ret;
657 __atomic_pre_full_fence();
658 ret = arch_atomic_fetch_add_relaxed(i, v);
659 __atomic_post_full_fence();
660 return ret;
661 #else
662 #error "Unable to define raw_atomic_fetch_add"
663 #endif
664 }
665
666 /**
667 * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
668 * @i: int value to add
669 * @v: pointer to atomic_t
670 *
671 * Atomically updates @v to (@v + @i) with acquire ordering.
672 *
673 * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
674 *
675 * Return: The original value of @v.
676 */
677 static __always_inline int
raw_atomic_fetch_add_acquire(int i,atomic_t * v)678 raw_atomic_fetch_add_acquire(int i, atomic_t *v)
679 {
680 #if defined(arch_atomic_fetch_add_acquire)
681 return arch_atomic_fetch_add_acquire(i, v);
682 #elif defined(arch_atomic_fetch_add_relaxed)
683 int ret = arch_atomic_fetch_add_relaxed(i, v);
684 __atomic_acquire_fence();
685 return ret;
686 #elif defined(arch_atomic_fetch_add)
687 return arch_atomic_fetch_add(i, v);
688 #else
689 #error "Unable to define raw_atomic_fetch_add_acquire"
690 #endif
691 }
692
693 /**
694 * raw_atomic_fetch_add_release() - atomic add with release ordering
695 * @i: int value to add
696 * @v: pointer to atomic_t
697 *
698 * Atomically updates @v to (@v + @i) with release ordering.
699 *
700 * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
701 *
702 * Return: The original value of @v.
703 */
704 static __always_inline int
raw_atomic_fetch_add_release(int i,atomic_t * v)705 raw_atomic_fetch_add_release(int i, atomic_t *v)
706 {
707 #if defined(arch_atomic_fetch_add_release)
708 return arch_atomic_fetch_add_release(i, v);
709 #elif defined(arch_atomic_fetch_add_relaxed)
710 __atomic_release_fence();
711 return arch_atomic_fetch_add_relaxed(i, v);
712 #elif defined(arch_atomic_fetch_add)
713 return arch_atomic_fetch_add(i, v);
714 #else
715 #error "Unable to define raw_atomic_fetch_add_release"
716 #endif
717 }
718
719 /**
720 * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
721 * @i: int value to add
722 * @v: pointer to atomic_t
723 *
724 * Atomically updates @v to (@v + @i) with relaxed ordering.
725 *
726 * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
727 *
728 * Return: The original value of @v.
729 */
730 static __always_inline int
raw_atomic_fetch_add_relaxed(int i,atomic_t * v)731 raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
732 {
733 #if defined(arch_atomic_fetch_add_relaxed)
734 return arch_atomic_fetch_add_relaxed(i, v);
735 #elif defined(arch_atomic_fetch_add)
736 return arch_atomic_fetch_add(i, v);
737 #else
738 #error "Unable to define raw_atomic_fetch_add_relaxed"
739 #endif
740 }
741
742 /**
743 * raw_atomic_sub() - atomic subtract with relaxed ordering
744 * @i: int value to subtract
745 * @v: pointer to atomic_t
746 *
747 * Atomically updates @v to (@v - @i) with relaxed ordering.
748 *
749 * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
750 *
751 * Return: Nothing.
752 */
753 static __always_inline void
raw_atomic_sub(int i,atomic_t * v)754 raw_atomic_sub(int i, atomic_t *v)
755 {
756 arch_atomic_sub(i, v);
757 }
758
759 /**
760 * raw_atomic_sub_return() - atomic subtract with full ordering
761 * @i: int value to subtract
762 * @v: pointer to atomic_t
763 *
764 * Atomically updates @v to (@v - @i) with full ordering.
765 *
766 * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
767 *
768 * Return: The updated value of @v.
769 */
770 static __always_inline int
raw_atomic_sub_return(int i,atomic_t * v)771 raw_atomic_sub_return(int i, atomic_t *v)
772 {
773 #if defined(arch_atomic_sub_return)
774 return arch_atomic_sub_return(i, v);
775 #elif defined(arch_atomic_sub_return_relaxed)
776 int ret;
777 __atomic_pre_full_fence();
778 ret = arch_atomic_sub_return_relaxed(i, v);
779 __atomic_post_full_fence();
780 return ret;
781 #else
782 #error "Unable to define raw_atomic_sub_return"
783 #endif
784 }
785
786 /**
787 * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
788 * @i: int value to subtract
789 * @v: pointer to atomic_t
790 *
791 * Atomically updates @v to (@v - @i) with acquire ordering.
792 *
793 * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
794 *
795 * Return: The updated value of @v.
796 */
797 static __always_inline int
raw_atomic_sub_return_acquire(int i,atomic_t * v)798 raw_atomic_sub_return_acquire(int i, atomic_t *v)
799 {
800 #if defined(arch_atomic_sub_return_acquire)
801 return arch_atomic_sub_return_acquire(i, v);
802 #elif defined(arch_atomic_sub_return_relaxed)
803 int ret = arch_atomic_sub_return_relaxed(i, v);
804 __atomic_acquire_fence();
805 return ret;
806 #elif defined(arch_atomic_sub_return)
807 return arch_atomic_sub_return(i, v);
808 #else
809 #error "Unable to define raw_atomic_sub_return_acquire"
810 #endif
811 }
812
813 /**
814 * raw_atomic_sub_return_release() - atomic subtract with release ordering
815 * @i: int value to subtract
816 * @v: pointer to atomic_t
817 *
818 * Atomically updates @v to (@v - @i) with release ordering.
819 *
820 * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
821 *
822 * Return: The updated value of @v.
823 */
824 static __always_inline int
raw_atomic_sub_return_release(int i,atomic_t * v)825 raw_atomic_sub_return_release(int i, atomic_t *v)
826 {
827 #if defined(arch_atomic_sub_return_release)
828 return arch_atomic_sub_return_release(i, v);
829 #elif defined(arch_atomic_sub_return_relaxed)
830 __atomic_release_fence();
831 return arch_atomic_sub_return_relaxed(i, v);
832 #elif defined(arch_atomic_sub_return)
833 return arch_atomic_sub_return(i, v);
834 #else
835 #error "Unable to define raw_atomic_sub_return_release"
836 #endif
837 }
838
839 /**
840 * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
841 * @i: int value to subtract
842 * @v: pointer to atomic_t
843 *
844 * Atomically updates @v to (@v - @i) with relaxed ordering.
845 *
846 * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
847 *
848 * Return: The updated value of @v.
849 */
850 static __always_inline int
raw_atomic_sub_return_relaxed(int i,atomic_t * v)851 raw_atomic_sub_return_relaxed(int i, atomic_t *v)
852 {
853 #if defined(arch_atomic_sub_return_relaxed)
854 return arch_atomic_sub_return_relaxed(i, v);
855 #elif defined(arch_atomic_sub_return)
856 return arch_atomic_sub_return(i, v);
857 #else
858 #error "Unable to define raw_atomic_sub_return_relaxed"
859 #endif
860 }
861
862 /**
863 * raw_atomic_fetch_sub() - atomic subtract with full ordering
864 * @i: int value to subtract
865 * @v: pointer to atomic_t
866 *
867 * Atomically updates @v to (@v - @i) with full ordering.
868 *
869 * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
870 *
871 * Return: The original value of @v.
872 */
873 static __always_inline int
raw_atomic_fetch_sub(int i,atomic_t * v)874 raw_atomic_fetch_sub(int i, atomic_t *v)
875 {
876 #if defined(arch_atomic_fetch_sub)
877 return arch_atomic_fetch_sub(i, v);
878 #elif defined(arch_atomic_fetch_sub_relaxed)
879 int ret;
880 __atomic_pre_full_fence();
881 ret = arch_atomic_fetch_sub_relaxed(i, v);
882 __atomic_post_full_fence();
883 return ret;
884 #else
885 #error "Unable to define raw_atomic_fetch_sub"
886 #endif
887 }
888
889 /**
890 * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
891 * @i: int value to subtract
892 * @v: pointer to atomic_t
893 *
894 * Atomically updates @v to (@v - @i) with acquire ordering.
895 *
896 * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
897 *
898 * Return: The original value of @v.
899 */
900 static __always_inline int
raw_atomic_fetch_sub_acquire(int i,atomic_t * v)901 raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
902 {
903 #if defined(arch_atomic_fetch_sub_acquire)
904 return arch_atomic_fetch_sub_acquire(i, v);
905 #elif defined(arch_atomic_fetch_sub_relaxed)
906 int ret = arch_atomic_fetch_sub_relaxed(i, v);
907 __atomic_acquire_fence();
908 return ret;
909 #elif defined(arch_atomic_fetch_sub)
910 return arch_atomic_fetch_sub(i, v);
911 #else
912 #error "Unable to define raw_atomic_fetch_sub_acquire"
913 #endif
914 }
915
916 /**
917 * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
918 * @i: int value to subtract
919 * @v: pointer to atomic_t
920 *
921 * Atomically updates @v to (@v - @i) with release ordering.
922 *
923 * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
924 *
925 * Return: The original value of @v.
926 */
927 static __always_inline int
raw_atomic_fetch_sub_release(int i,atomic_t * v)928 raw_atomic_fetch_sub_release(int i, atomic_t *v)
929 {
930 #if defined(arch_atomic_fetch_sub_release)
931 return arch_atomic_fetch_sub_release(i, v);
932 #elif defined(arch_atomic_fetch_sub_relaxed)
933 __atomic_release_fence();
934 return arch_atomic_fetch_sub_relaxed(i, v);
935 #elif defined(arch_atomic_fetch_sub)
936 return arch_atomic_fetch_sub(i, v);
937 #else
938 #error "Unable to define raw_atomic_fetch_sub_release"
939 #endif
940 }
941
942 /**
943 * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
944 * @i: int value to subtract
945 * @v: pointer to atomic_t
946 *
947 * Atomically updates @v to (@v - @i) with relaxed ordering.
948 *
949 * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
950 *
951 * Return: The original value of @v.
952 */
953 static __always_inline int
raw_atomic_fetch_sub_relaxed(int i,atomic_t * v)954 raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
955 {
956 #if defined(arch_atomic_fetch_sub_relaxed)
957 return arch_atomic_fetch_sub_relaxed(i, v);
958 #elif defined(arch_atomic_fetch_sub)
959 return arch_atomic_fetch_sub(i, v);
960 #else
961 #error "Unable to define raw_atomic_fetch_sub_relaxed"
962 #endif
963 }
964
965 /**
966 * raw_atomic_inc() - atomic increment with relaxed ordering
967 * @v: pointer to atomic_t
968 *
969 * Atomically updates @v to (@v + 1) with relaxed ordering.
970 *
971 * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
972 *
973 * Return: Nothing.
974 */
975 static __always_inline void
raw_atomic_inc(atomic_t * v)976 raw_atomic_inc(atomic_t *v)
977 {
978 #if defined(arch_atomic_inc)
979 arch_atomic_inc(v);
980 #else
981 raw_atomic_add(1, v);
982 #endif
983 }
984
985 /**
986 * raw_atomic_inc_return() - atomic increment with full ordering
987 * @v: pointer to atomic_t
988 *
989 * Atomically updates @v to (@v + 1) with full ordering.
990 *
991 * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
992 *
993 * Return: The updated value of @v.
994 */
995 static __always_inline int
raw_atomic_inc_return(atomic_t * v)996 raw_atomic_inc_return(atomic_t *v)
997 {
998 #if defined(arch_atomic_inc_return)
999 return arch_atomic_inc_return(v);
1000 #elif defined(arch_atomic_inc_return_relaxed)
1001 int ret;
1002 __atomic_pre_full_fence();
1003 ret = arch_atomic_inc_return_relaxed(v);
1004 __atomic_post_full_fence();
1005 return ret;
1006 #else
1007 return raw_atomic_add_return(1, v);
1008 #endif
1009 }
1010
1011 /**
1012 * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
1013 * @v: pointer to atomic_t
1014 *
1015 * Atomically updates @v to (@v + 1) with acquire ordering.
1016 *
1017 * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
1018 *
1019 * Return: The updated value of @v.
1020 */
1021 static __always_inline int
raw_atomic_inc_return_acquire(atomic_t * v)1022 raw_atomic_inc_return_acquire(atomic_t *v)
1023 {
1024 #if defined(arch_atomic_inc_return_acquire)
1025 return arch_atomic_inc_return_acquire(v);
1026 #elif defined(arch_atomic_inc_return_relaxed)
1027 int ret = arch_atomic_inc_return_relaxed(v);
1028 __atomic_acquire_fence();
1029 return ret;
1030 #elif defined(arch_atomic_inc_return)
1031 return arch_atomic_inc_return(v);
1032 #else
1033 return raw_atomic_add_return_acquire(1, v);
1034 #endif
1035 }
1036
1037 /**
1038 * raw_atomic_inc_return_release() - atomic increment with release ordering
1039 * @v: pointer to atomic_t
1040 *
1041 * Atomically updates @v to (@v + 1) with release ordering.
1042 *
1043 * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
1044 *
1045 * Return: The updated value of @v.
1046 */
1047 static __always_inline int
raw_atomic_inc_return_release(atomic_t * v)1048 raw_atomic_inc_return_release(atomic_t *v)
1049 {
1050 #if defined(arch_atomic_inc_return_release)
1051 return arch_atomic_inc_return_release(v);
1052 #elif defined(arch_atomic_inc_return_relaxed)
1053 __atomic_release_fence();
1054 return arch_atomic_inc_return_relaxed(v);
1055 #elif defined(arch_atomic_inc_return)
1056 return arch_atomic_inc_return(v);
1057 #else
1058 return raw_atomic_add_return_release(1, v);
1059 #endif
1060 }
1061
1062 /**
1063 * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
1064 * @v: pointer to atomic_t
1065 *
1066 * Atomically updates @v to (@v + 1) with relaxed ordering.
1067 *
1068 * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
1069 *
1070 * Return: The updated value of @v.
1071 */
1072 static __always_inline int
raw_atomic_inc_return_relaxed(atomic_t * v)1073 raw_atomic_inc_return_relaxed(atomic_t *v)
1074 {
1075 #if defined(arch_atomic_inc_return_relaxed)
1076 return arch_atomic_inc_return_relaxed(v);
1077 #elif defined(arch_atomic_inc_return)
1078 return arch_atomic_inc_return(v);
1079 #else
1080 return raw_atomic_add_return_relaxed(1, v);
1081 #endif
1082 }
1083
1084 /**
1085 * raw_atomic_fetch_inc() - atomic increment with full ordering
1086 * @v: pointer to atomic_t
1087 *
1088 * Atomically updates @v to (@v + 1) with full ordering.
1089 *
1090 * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
1091 *
1092 * Return: The original value of @v.
1093 */
1094 static __always_inline int
raw_atomic_fetch_inc(atomic_t * v)1095 raw_atomic_fetch_inc(atomic_t *v)
1096 {
1097 #if defined(arch_atomic_fetch_inc)
1098 return arch_atomic_fetch_inc(v);
1099 #elif defined(arch_atomic_fetch_inc_relaxed)
1100 int ret;
1101 __atomic_pre_full_fence();
1102 ret = arch_atomic_fetch_inc_relaxed(v);
1103 __atomic_post_full_fence();
1104 return ret;
1105 #else
1106 return raw_atomic_fetch_add(1, v);
1107 #endif
1108 }
1109
1110 /**
1111 * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
1112 * @v: pointer to atomic_t
1113 *
1114 * Atomically updates @v to (@v + 1) with acquire ordering.
1115 *
1116 * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
1117 *
1118 * Return: The original value of @v.
1119 */
1120 static __always_inline int
raw_atomic_fetch_inc_acquire(atomic_t * v)1121 raw_atomic_fetch_inc_acquire(atomic_t *v)
1122 {
1123 #if defined(arch_atomic_fetch_inc_acquire)
1124 return arch_atomic_fetch_inc_acquire(v);
1125 #elif defined(arch_atomic_fetch_inc_relaxed)
1126 int ret = arch_atomic_fetch_inc_relaxed(v);
1127 __atomic_acquire_fence();
1128 return ret;
1129 #elif defined(arch_atomic_fetch_inc)
1130 return arch_atomic_fetch_inc(v);
1131 #else
1132 return raw_atomic_fetch_add_acquire(1, v);
1133 #endif
1134 }
1135
1136 /**
1137 * raw_atomic_fetch_inc_release() - atomic increment with release ordering
1138 * @v: pointer to atomic_t
1139 *
1140 * Atomically updates @v to (@v + 1) with release ordering.
1141 *
1142 * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
1143 *
1144 * Return: The original value of @v.
1145 */
1146 static __always_inline int
raw_atomic_fetch_inc_release(atomic_t * v)1147 raw_atomic_fetch_inc_release(atomic_t *v)
1148 {
1149 #if defined(arch_atomic_fetch_inc_release)
1150 return arch_atomic_fetch_inc_release(v);
1151 #elif defined(arch_atomic_fetch_inc_relaxed)
1152 __atomic_release_fence();
1153 return arch_atomic_fetch_inc_relaxed(v);
1154 #elif defined(arch_atomic_fetch_inc)
1155 return arch_atomic_fetch_inc(v);
1156 #else
1157 return raw_atomic_fetch_add_release(1, v);
1158 #endif
1159 }
1160
1161 /**
1162 * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
1163 * @v: pointer to atomic_t
1164 *
1165 * Atomically updates @v to (@v + 1) with relaxed ordering.
1166 *
1167 * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
1168 *
1169 * Return: The original value of @v.
1170 */
1171 static __always_inline int
raw_atomic_fetch_inc_relaxed(atomic_t * v)1172 raw_atomic_fetch_inc_relaxed(atomic_t *v)
1173 {
1174 #if defined(arch_atomic_fetch_inc_relaxed)
1175 return arch_atomic_fetch_inc_relaxed(v);
1176 #elif defined(arch_atomic_fetch_inc)
1177 return arch_atomic_fetch_inc(v);
1178 #else
1179 return raw_atomic_fetch_add_relaxed(1, v);
1180 #endif
1181 }
1182
1183 /**
1184 * raw_atomic_dec() - atomic decrement with relaxed ordering
1185 * @v: pointer to atomic_t
1186 *
1187 * Atomically updates @v to (@v - 1) with relaxed ordering.
1188 *
1189 * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
1190 *
1191 * Return: Nothing.
1192 */
1193 static __always_inline void
raw_atomic_dec(atomic_t * v)1194 raw_atomic_dec(atomic_t *v)
1195 {
1196 #if defined(arch_atomic_dec)
1197 arch_atomic_dec(v);
1198 #else
1199 raw_atomic_sub(1, v);
1200 #endif
1201 }
1202
1203 /**
1204 * raw_atomic_dec_return() - atomic decrement with full ordering
1205 * @v: pointer to atomic_t
1206 *
1207 * Atomically updates @v to (@v - 1) with full ordering.
1208 *
1209 * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
1210 *
1211 * Return: The updated value of @v.
1212 */
1213 static __always_inline int
raw_atomic_dec_return(atomic_t * v)1214 raw_atomic_dec_return(atomic_t *v)
1215 {
1216 #if defined(arch_atomic_dec_return)
1217 return arch_atomic_dec_return(v);
1218 #elif defined(arch_atomic_dec_return_relaxed)
1219 int ret;
1220 __atomic_pre_full_fence();
1221 ret = arch_atomic_dec_return_relaxed(v);
1222 __atomic_post_full_fence();
1223 return ret;
1224 #else
1225 return raw_atomic_sub_return(1, v);
1226 #endif
1227 }
1228
1229 /**
1230 * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
1231 * @v: pointer to atomic_t
1232 *
1233 * Atomically updates @v to (@v - 1) with acquire ordering.
1234 *
1235 * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
1236 *
1237 * Return: The updated value of @v.
1238 */
1239 static __always_inline int
raw_atomic_dec_return_acquire(atomic_t * v)1240 raw_atomic_dec_return_acquire(atomic_t *v)
1241 {
1242 #if defined(arch_atomic_dec_return_acquire)
1243 return arch_atomic_dec_return_acquire(v);
1244 #elif defined(arch_atomic_dec_return_relaxed)
1245 int ret = arch_atomic_dec_return_relaxed(v);
1246 __atomic_acquire_fence();
1247 return ret;
1248 #elif defined(arch_atomic_dec_return)
1249 return arch_atomic_dec_return(v);
1250 #else
1251 return raw_atomic_sub_return_acquire(1, v);
1252 #endif
1253 }
1254
1255 /**
1256 * raw_atomic_dec_return_release() - atomic decrement with release ordering
1257 * @v: pointer to atomic_t
1258 *
1259 * Atomically updates @v to (@v - 1) with release ordering.
1260 *
1261 * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
1262 *
1263 * Return: The updated value of @v.
1264 */
1265 static __always_inline int
raw_atomic_dec_return_release(atomic_t * v)1266 raw_atomic_dec_return_release(atomic_t *v)
1267 {
1268 #if defined(arch_atomic_dec_return_release)
1269 return arch_atomic_dec_return_release(v);
1270 #elif defined(arch_atomic_dec_return_relaxed)
1271 __atomic_release_fence();
1272 return arch_atomic_dec_return_relaxed(v);
1273 #elif defined(arch_atomic_dec_return)
1274 return arch_atomic_dec_return(v);
1275 #else
1276 return raw_atomic_sub_return_release(1, v);
1277 #endif
1278 }
1279
1280 /**
1281 * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
1282 * @v: pointer to atomic_t
1283 *
1284 * Atomically updates @v to (@v - 1) with relaxed ordering.
1285 *
1286 * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
1287 *
1288 * Return: The updated value of @v.
1289 */
1290 static __always_inline int
raw_atomic_dec_return_relaxed(atomic_t * v)1291 raw_atomic_dec_return_relaxed(atomic_t *v)
1292 {
1293 #if defined(arch_atomic_dec_return_relaxed)
1294 return arch_atomic_dec_return_relaxed(v);
1295 #elif defined(arch_atomic_dec_return)
1296 return arch_atomic_dec_return(v);
1297 #else
1298 return raw_atomic_sub_return_relaxed(1, v);
1299 #endif
1300 }
1301
1302 /**
1303 * raw_atomic_fetch_dec() - atomic decrement with full ordering
1304 * @v: pointer to atomic_t
1305 *
1306 * Atomically updates @v to (@v - 1) with full ordering.
1307 *
1308 * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
1309 *
1310 * Return: The original value of @v.
1311 */
1312 static __always_inline int
raw_atomic_fetch_dec(atomic_t * v)1313 raw_atomic_fetch_dec(atomic_t *v)
1314 {
1315 #if defined(arch_atomic_fetch_dec)
1316 return arch_atomic_fetch_dec(v);
1317 #elif defined(arch_atomic_fetch_dec_relaxed)
1318 int ret;
1319 __atomic_pre_full_fence();
1320 ret = arch_atomic_fetch_dec_relaxed(v);
1321 __atomic_post_full_fence();
1322 return ret;
1323 #else
1324 return raw_atomic_fetch_sub(1, v);
1325 #endif
1326 }
1327
1328 /**
1329 * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
1330 * @v: pointer to atomic_t
1331 *
1332 * Atomically updates @v to (@v - 1) with acquire ordering.
1333 *
1334 * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
1335 *
1336 * Return: The original value of @v.
1337 */
1338 static __always_inline int
raw_atomic_fetch_dec_acquire(atomic_t * v)1339 raw_atomic_fetch_dec_acquire(atomic_t *v)
1340 {
1341 #if defined(arch_atomic_fetch_dec_acquire)
1342 return arch_atomic_fetch_dec_acquire(v);
1343 #elif defined(arch_atomic_fetch_dec_relaxed)
1344 int ret = arch_atomic_fetch_dec_relaxed(v);
1345 __atomic_acquire_fence();
1346 return ret;
1347 #elif defined(arch_atomic_fetch_dec)
1348 return arch_atomic_fetch_dec(v);
1349 #else
1350 return raw_atomic_fetch_sub_acquire(1, v);
1351 #endif
1352 }
1353
1354 /**
1355 * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
1356 * @v: pointer to atomic_t
1357 *
1358 * Atomically updates @v to (@v - 1) with release ordering.
1359 *
1360 * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
1361 *
1362 * Return: The original value of @v.
1363 */
1364 static __always_inline int
raw_atomic_fetch_dec_release(atomic_t * v)1365 raw_atomic_fetch_dec_release(atomic_t *v)
1366 {
1367 #if defined(arch_atomic_fetch_dec_release)
1368 return arch_atomic_fetch_dec_release(v);
1369 #elif defined(arch_atomic_fetch_dec_relaxed)
1370 __atomic_release_fence();
1371 return arch_atomic_fetch_dec_relaxed(v);
1372 #elif defined(arch_atomic_fetch_dec)
1373 return arch_atomic_fetch_dec(v);
1374 #else
1375 return raw_atomic_fetch_sub_release(1, v);
1376 #endif
1377 }
1378
1379 /**
1380 * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
1381 * @v: pointer to atomic_t
1382 *
1383 * Atomically updates @v to (@v - 1) with relaxed ordering.
1384 *
1385 * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
1386 *
1387 * Return: The original value of @v.
1388 */
1389 static __always_inline int
raw_atomic_fetch_dec_relaxed(atomic_t * v)1390 raw_atomic_fetch_dec_relaxed(atomic_t *v)
1391 {
1392 #if defined(arch_atomic_fetch_dec_relaxed)
1393 return arch_atomic_fetch_dec_relaxed(v);
1394 #elif defined(arch_atomic_fetch_dec)
1395 return arch_atomic_fetch_dec(v);
1396 #else
1397 return raw_atomic_fetch_sub_relaxed(1, v);
1398 #endif
1399 }
1400
1401 /**
1402 * raw_atomic_and() - atomic bitwise AND with relaxed ordering
1403 * @i: int value
1404 * @v: pointer to atomic_t
1405 *
1406 * Atomically updates @v to (@v & @i) with relaxed ordering.
1407 *
1408 * Safe to use in noinstr code; prefer atomic_and() elsewhere.
1409 *
1410 * Return: Nothing.
1411 */
1412 static __always_inline void
raw_atomic_and(int i,atomic_t * v)1413 raw_atomic_and(int i, atomic_t *v)
1414 {
1415 arch_atomic_and(i, v);
1416 }
1417
1418 /**
1419 * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
1420 * @i: int value
1421 * @v: pointer to atomic_t
1422 *
1423 * Atomically updates @v to (@v & @i) with full ordering.
1424 *
1425 * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
1426 *
1427 * Return: The original value of @v.
1428 */
1429 static __always_inline int
raw_atomic_fetch_and(int i,atomic_t * v)1430 raw_atomic_fetch_and(int i, atomic_t *v)
1431 {
1432 #if defined(arch_atomic_fetch_and)
1433 return arch_atomic_fetch_and(i, v);
1434 #elif defined(arch_atomic_fetch_and_relaxed)
1435 int ret;
1436 __atomic_pre_full_fence();
1437 ret = arch_atomic_fetch_and_relaxed(i, v);
1438 __atomic_post_full_fence();
1439 return ret;
1440 #else
1441 #error "Unable to define raw_atomic_fetch_and"
1442 #endif
1443 }
1444
1445 /**
1446 * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
1447 * @i: int value
1448 * @v: pointer to atomic_t
1449 *
1450 * Atomically updates @v to (@v & @i) with acquire ordering.
1451 *
1452 * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
1453 *
1454 * Return: The original value of @v.
1455 */
1456 static __always_inline int
raw_atomic_fetch_and_acquire(int i,atomic_t * v)1457 raw_atomic_fetch_and_acquire(int i, atomic_t *v)
1458 {
1459 #if defined(arch_atomic_fetch_and_acquire)
1460 return arch_atomic_fetch_and_acquire(i, v);
1461 #elif defined(arch_atomic_fetch_and_relaxed)
1462 int ret = arch_atomic_fetch_and_relaxed(i, v);
1463 __atomic_acquire_fence();
1464 return ret;
1465 #elif defined(arch_atomic_fetch_and)
1466 return arch_atomic_fetch_and(i, v);
1467 #else
1468 #error "Unable to define raw_atomic_fetch_and_acquire"
1469 #endif
1470 }
1471
1472 /**
1473 * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
1474 * @i: int value
1475 * @v: pointer to atomic_t
1476 *
1477 * Atomically updates @v to (@v & @i) with release ordering.
1478 *
1479 * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
1480 *
1481 * Return: The original value of @v.
1482 */
1483 static __always_inline int
raw_atomic_fetch_and_release(int i,atomic_t * v)1484 raw_atomic_fetch_and_release(int i, atomic_t *v)
1485 {
1486 #if defined(arch_atomic_fetch_and_release)
1487 return arch_atomic_fetch_and_release(i, v);
1488 #elif defined(arch_atomic_fetch_and_relaxed)
1489 __atomic_release_fence();
1490 return arch_atomic_fetch_and_relaxed(i, v);
1491 #elif defined(arch_atomic_fetch_and)
1492 return arch_atomic_fetch_and(i, v);
1493 #else
1494 #error "Unable to define raw_atomic_fetch_and_release"
1495 #endif
1496 }
1497
1498 /**
1499 * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
1500 * @i: int value
1501 * @v: pointer to atomic_t
1502 *
1503 * Atomically updates @v to (@v & @i) with relaxed ordering.
1504 *
1505 * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
1506 *
1507 * Return: The original value of @v.
1508 */
1509 static __always_inline int
raw_atomic_fetch_and_relaxed(int i,atomic_t * v)1510 raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
1511 {
1512 #if defined(arch_atomic_fetch_and_relaxed)
1513 return arch_atomic_fetch_and_relaxed(i, v);
1514 #elif defined(arch_atomic_fetch_and)
1515 return arch_atomic_fetch_and(i, v);
1516 #else
1517 #error "Unable to define raw_atomic_fetch_and_relaxed"
1518 #endif
1519 }
1520
1521 /**
1522 * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
1523 * @i: int value
1524 * @v: pointer to atomic_t
1525 *
1526 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1527 *
1528 * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
1529 *
1530 * Return: Nothing.
1531 */
1532 static __always_inline void
raw_atomic_andnot(int i,atomic_t * v)1533 raw_atomic_andnot(int i, atomic_t *v)
1534 {
1535 #if defined(arch_atomic_andnot)
1536 arch_atomic_andnot(i, v);
1537 #else
1538 raw_atomic_and(~i, v);
1539 #endif
1540 }
1541
1542 /**
1543 * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
1544 * @i: int value
1545 * @v: pointer to atomic_t
1546 *
1547 * Atomically updates @v to (@v & ~@i) with full ordering.
1548 *
1549 * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
1550 *
1551 * Return: The original value of @v.
1552 */
1553 static __always_inline int
raw_atomic_fetch_andnot(int i,atomic_t * v)1554 raw_atomic_fetch_andnot(int i, atomic_t *v)
1555 {
1556 #if defined(arch_atomic_fetch_andnot)
1557 return arch_atomic_fetch_andnot(i, v);
1558 #elif defined(arch_atomic_fetch_andnot_relaxed)
1559 int ret;
1560 __atomic_pre_full_fence();
1561 ret = arch_atomic_fetch_andnot_relaxed(i, v);
1562 __atomic_post_full_fence();
1563 return ret;
1564 #else
1565 return raw_atomic_fetch_and(~i, v);
1566 #endif
1567 }
1568
1569 /**
1570 * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
1571 * @i: int value
1572 * @v: pointer to atomic_t
1573 *
1574 * Atomically updates @v to (@v & ~@i) with acquire ordering.
1575 *
1576 * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
1577 *
1578 * Return: The original value of @v.
1579 */
1580 static __always_inline int
raw_atomic_fetch_andnot_acquire(int i,atomic_t * v)1581 raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
1582 {
1583 #if defined(arch_atomic_fetch_andnot_acquire)
1584 return arch_atomic_fetch_andnot_acquire(i, v);
1585 #elif defined(arch_atomic_fetch_andnot_relaxed)
1586 int ret = arch_atomic_fetch_andnot_relaxed(i, v);
1587 __atomic_acquire_fence();
1588 return ret;
1589 #elif defined(arch_atomic_fetch_andnot)
1590 return arch_atomic_fetch_andnot(i, v);
1591 #else
1592 return raw_atomic_fetch_and_acquire(~i, v);
1593 #endif
1594 }
1595
1596 /**
1597 * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
1598 * @i: int value
1599 * @v: pointer to atomic_t
1600 *
1601 * Atomically updates @v to (@v & ~@i) with release ordering.
1602 *
1603 * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
1604 *
1605 * Return: The original value of @v.
1606 */
1607 static __always_inline int
raw_atomic_fetch_andnot_release(int i,atomic_t * v)1608 raw_atomic_fetch_andnot_release(int i, atomic_t *v)
1609 {
1610 #if defined(arch_atomic_fetch_andnot_release)
1611 return arch_atomic_fetch_andnot_release(i, v);
1612 #elif defined(arch_atomic_fetch_andnot_relaxed)
1613 __atomic_release_fence();
1614 return arch_atomic_fetch_andnot_relaxed(i, v);
1615 #elif defined(arch_atomic_fetch_andnot)
1616 return arch_atomic_fetch_andnot(i, v);
1617 #else
1618 return raw_atomic_fetch_and_release(~i, v);
1619 #endif
1620 }
1621
1622 /**
1623 * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
1624 * @i: int value
1625 * @v: pointer to atomic_t
1626 *
1627 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1628 *
1629 * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
1630 *
1631 * Return: The original value of @v.
1632 */
1633 static __always_inline int
raw_atomic_fetch_andnot_relaxed(int i,atomic_t * v)1634 raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
1635 {
1636 #if defined(arch_atomic_fetch_andnot_relaxed)
1637 return arch_atomic_fetch_andnot_relaxed(i, v);
1638 #elif defined(arch_atomic_fetch_andnot)
1639 return arch_atomic_fetch_andnot(i, v);
1640 #else
1641 return raw_atomic_fetch_and_relaxed(~i, v);
1642 #endif
1643 }
1644
1645 /**
1646 * raw_atomic_or() - atomic bitwise OR with relaxed ordering
1647 * @i: int value
1648 * @v: pointer to atomic_t
1649 *
1650 * Atomically updates @v to (@v | @i) with relaxed ordering.
1651 *
1652 * Safe to use in noinstr code; prefer atomic_or() elsewhere.
1653 *
1654 * Return: Nothing.
1655 */
1656 static __always_inline void
raw_atomic_or(int i,atomic_t * v)1657 raw_atomic_or(int i, atomic_t *v)
1658 {
1659 arch_atomic_or(i, v);
1660 }
1661
1662 /**
1663 * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
1664 * @i: int value
1665 * @v: pointer to atomic_t
1666 *
1667 * Atomically updates @v to (@v | @i) with full ordering.
1668 *
1669 * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
1670 *
1671 * Return: The original value of @v.
1672 */
1673 static __always_inline int
raw_atomic_fetch_or(int i,atomic_t * v)1674 raw_atomic_fetch_or(int i, atomic_t *v)
1675 {
1676 #if defined(arch_atomic_fetch_or)
1677 return arch_atomic_fetch_or(i, v);
1678 #elif defined(arch_atomic_fetch_or_relaxed)
1679 int ret;
1680 __atomic_pre_full_fence();
1681 ret = arch_atomic_fetch_or_relaxed(i, v);
1682 __atomic_post_full_fence();
1683 return ret;
1684 #else
1685 #error "Unable to define raw_atomic_fetch_or"
1686 #endif
1687 }
1688
1689 /**
1690 * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
1691 * @i: int value
1692 * @v: pointer to atomic_t
1693 *
1694 * Atomically updates @v to (@v | @i) with acquire ordering.
1695 *
1696 * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
1697 *
1698 * Return: The original value of @v.
1699 */
1700 static __always_inline int
raw_atomic_fetch_or_acquire(int i,atomic_t * v)1701 raw_atomic_fetch_or_acquire(int i, atomic_t *v)
1702 {
1703 #if defined(arch_atomic_fetch_or_acquire)
1704 return arch_atomic_fetch_or_acquire(i, v);
1705 #elif defined(arch_atomic_fetch_or_relaxed)
1706 int ret = arch_atomic_fetch_or_relaxed(i, v);
1707 __atomic_acquire_fence();
1708 return ret;
1709 #elif defined(arch_atomic_fetch_or)
1710 return arch_atomic_fetch_or(i, v);
1711 #else
1712 #error "Unable to define raw_atomic_fetch_or_acquire"
1713 #endif
1714 }
1715
1716 /**
1717 * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
1718 * @i: int value
1719 * @v: pointer to atomic_t
1720 *
1721 * Atomically updates @v to (@v | @i) with release ordering.
1722 *
1723 * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
1724 *
1725 * Return: The original value of @v.
1726 */
1727 static __always_inline int
raw_atomic_fetch_or_release(int i,atomic_t * v)1728 raw_atomic_fetch_or_release(int i, atomic_t *v)
1729 {
1730 #if defined(arch_atomic_fetch_or_release)
1731 return arch_atomic_fetch_or_release(i, v);
1732 #elif defined(arch_atomic_fetch_or_relaxed)
1733 __atomic_release_fence();
1734 return arch_atomic_fetch_or_relaxed(i, v);
1735 #elif defined(arch_atomic_fetch_or)
1736 return arch_atomic_fetch_or(i, v);
1737 #else
1738 #error "Unable to define raw_atomic_fetch_or_release"
1739 #endif
1740 }
1741
1742 /**
1743 * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
1744 * @i: int value
1745 * @v: pointer to atomic_t
1746 *
1747 * Atomically updates @v to (@v | @i) with relaxed ordering.
1748 *
1749 * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
1750 *
1751 * Return: The original value of @v.
1752 */
1753 static __always_inline int
raw_atomic_fetch_or_relaxed(int i,atomic_t * v)1754 raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
1755 {
1756 #if defined(arch_atomic_fetch_or_relaxed)
1757 return arch_atomic_fetch_or_relaxed(i, v);
1758 #elif defined(arch_atomic_fetch_or)
1759 return arch_atomic_fetch_or(i, v);
1760 #else
1761 #error "Unable to define raw_atomic_fetch_or_relaxed"
1762 #endif
1763 }
1764
1765 /**
1766 * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
1767 * @i: int value
1768 * @v: pointer to atomic_t
1769 *
1770 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1771 *
1772 * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
1773 *
1774 * Return: Nothing.
1775 */
1776 static __always_inline void
raw_atomic_xor(int i,atomic_t * v)1777 raw_atomic_xor(int i, atomic_t *v)
1778 {
1779 arch_atomic_xor(i, v);
1780 }
1781
1782 /**
1783 * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
1784 * @i: int value
1785 * @v: pointer to atomic_t
1786 *
1787 * Atomically updates @v to (@v ^ @i) with full ordering.
1788 *
1789 * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
1790 *
1791 * Return: The original value of @v.
1792 */
1793 static __always_inline int
raw_atomic_fetch_xor(int i,atomic_t * v)1794 raw_atomic_fetch_xor(int i, atomic_t *v)
1795 {
1796 #if defined(arch_atomic_fetch_xor)
1797 return arch_atomic_fetch_xor(i, v);
1798 #elif defined(arch_atomic_fetch_xor_relaxed)
1799 int ret;
1800 __atomic_pre_full_fence();
1801 ret = arch_atomic_fetch_xor_relaxed(i, v);
1802 __atomic_post_full_fence();
1803 return ret;
1804 #else
1805 #error "Unable to define raw_atomic_fetch_xor"
1806 #endif
1807 }
1808
1809 /**
1810 * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
1811 * @i: int value
1812 * @v: pointer to atomic_t
1813 *
1814 * Atomically updates @v to (@v ^ @i) with acquire ordering.
1815 *
1816 * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
1817 *
1818 * Return: The original value of @v.
1819 */
1820 static __always_inline int
raw_atomic_fetch_xor_acquire(int i,atomic_t * v)1821 raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
1822 {
1823 #if defined(arch_atomic_fetch_xor_acquire)
1824 return arch_atomic_fetch_xor_acquire(i, v);
1825 #elif defined(arch_atomic_fetch_xor_relaxed)
1826 int ret = arch_atomic_fetch_xor_relaxed(i, v);
1827 __atomic_acquire_fence();
1828 return ret;
1829 #elif defined(arch_atomic_fetch_xor)
1830 return arch_atomic_fetch_xor(i, v);
1831 #else
1832 #error "Unable to define raw_atomic_fetch_xor_acquire"
1833 #endif
1834 }
1835
1836 /**
1837 * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
1838 * @i: int value
1839 * @v: pointer to atomic_t
1840 *
1841 * Atomically updates @v to (@v ^ @i) with release ordering.
1842 *
1843 * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
1844 *
1845 * Return: The original value of @v.
1846 */
1847 static __always_inline int
raw_atomic_fetch_xor_release(int i,atomic_t * v)1848 raw_atomic_fetch_xor_release(int i, atomic_t *v)
1849 {
1850 #if defined(arch_atomic_fetch_xor_release)
1851 return arch_atomic_fetch_xor_release(i, v);
1852 #elif defined(arch_atomic_fetch_xor_relaxed)
1853 __atomic_release_fence();
1854 return arch_atomic_fetch_xor_relaxed(i, v);
1855 #elif defined(arch_atomic_fetch_xor)
1856 return arch_atomic_fetch_xor(i, v);
1857 #else
1858 #error "Unable to define raw_atomic_fetch_xor_release"
1859 #endif
1860 }
1861
1862 /**
1863 * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
1864 * @i: int value
1865 * @v: pointer to atomic_t
1866 *
1867 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1868 *
1869 * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
1870 *
1871 * Return: The original value of @v.
1872 */
1873 static __always_inline int
raw_atomic_fetch_xor_relaxed(int i,atomic_t * v)1874 raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
1875 {
1876 #if defined(arch_atomic_fetch_xor_relaxed)
1877 return arch_atomic_fetch_xor_relaxed(i, v);
1878 #elif defined(arch_atomic_fetch_xor)
1879 return arch_atomic_fetch_xor(i, v);
1880 #else
1881 #error "Unable to define raw_atomic_fetch_xor_relaxed"
1882 #endif
1883 }
1884
1885 /**
1886 * raw_atomic_xchg() - atomic exchange with full ordering
1887 * @v: pointer to atomic_t
1888 * @new: int value to assign
1889 *
1890 * Atomically updates @v to @new with full ordering.
1891 *
1892 * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
1893 *
1894 * Return: The original value of @v.
1895 */
1896 static __always_inline int
raw_atomic_xchg(atomic_t * v,int new)1897 raw_atomic_xchg(atomic_t *v, int new)
1898 {
1899 #if defined(arch_atomic_xchg)
1900 return arch_atomic_xchg(v, new);
1901 #elif defined(arch_atomic_xchg_relaxed)
1902 int ret;
1903 __atomic_pre_full_fence();
1904 ret = arch_atomic_xchg_relaxed(v, new);
1905 __atomic_post_full_fence();
1906 return ret;
1907 #else
1908 return raw_xchg(&v->counter, new);
1909 #endif
1910 }
1911
1912 /**
1913 * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
1914 * @v: pointer to atomic_t
1915 * @new: int value to assign
1916 *
1917 * Atomically updates @v to @new with acquire ordering.
1918 *
1919 * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
1920 *
1921 * Return: The original value of @v.
1922 */
1923 static __always_inline int
raw_atomic_xchg_acquire(atomic_t * v,int new)1924 raw_atomic_xchg_acquire(atomic_t *v, int new)
1925 {
1926 #if defined(arch_atomic_xchg_acquire)
1927 return arch_atomic_xchg_acquire(v, new);
1928 #elif defined(arch_atomic_xchg_relaxed)
1929 int ret = arch_atomic_xchg_relaxed(v, new);
1930 __atomic_acquire_fence();
1931 return ret;
1932 #elif defined(arch_atomic_xchg)
1933 return arch_atomic_xchg(v, new);
1934 #else
1935 return raw_xchg_acquire(&v->counter, new);
1936 #endif
1937 }
1938
1939 /**
1940 * raw_atomic_xchg_release() - atomic exchange with release ordering
1941 * @v: pointer to atomic_t
1942 * @new: int value to assign
1943 *
1944 * Atomically updates @v to @new with release ordering.
1945 *
1946 * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
1947 *
1948 * Return: The original value of @v.
1949 */
1950 static __always_inline int
raw_atomic_xchg_release(atomic_t * v,int new)1951 raw_atomic_xchg_release(atomic_t *v, int new)
1952 {
1953 #if defined(arch_atomic_xchg_release)
1954 return arch_atomic_xchg_release(v, new);
1955 #elif defined(arch_atomic_xchg_relaxed)
1956 __atomic_release_fence();
1957 return arch_atomic_xchg_relaxed(v, new);
1958 #elif defined(arch_atomic_xchg)
1959 return arch_atomic_xchg(v, new);
1960 #else
1961 return raw_xchg_release(&v->counter, new);
1962 #endif
1963 }
1964
1965 /**
1966 * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
1967 * @v: pointer to atomic_t
1968 * @new: int value to assign
1969 *
1970 * Atomically updates @v to @new with relaxed ordering.
1971 *
1972 * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
1973 *
1974 * Return: The original value of @v.
1975 */
1976 static __always_inline int
raw_atomic_xchg_relaxed(atomic_t * v,int new)1977 raw_atomic_xchg_relaxed(atomic_t *v, int new)
1978 {
1979 #if defined(arch_atomic_xchg_relaxed)
1980 return arch_atomic_xchg_relaxed(v, new);
1981 #elif defined(arch_atomic_xchg)
1982 return arch_atomic_xchg(v, new);
1983 #else
1984 return raw_xchg_relaxed(&v->counter, new);
1985 #endif
1986 }
1987
1988 /**
1989 * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
1990 * @v: pointer to atomic_t
1991 * @old: int value to compare with
1992 * @new: int value to assign
1993 *
1994 * If (@v == @old), atomically updates @v to @new with full ordering.
1995 *
1996 * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
1997 *
1998 * Return: The original value of @v.
1999 */
2000 static __always_inline int
raw_atomic_cmpxchg(atomic_t * v,int old,int new)2001 raw_atomic_cmpxchg(atomic_t *v, int old, int new)
2002 {
2003 #if defined(arch_atomic_cmpxchg)
2004 return arch_atomic_cmpxchg(v, old, new);
2005 #elif defined(arch_atomic_cmpxchg_relaxed)
2006 int ret;
2007 __atomic_pre_full_fence();
2008 ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2009 __atomic_post_full_fence();
2010 return ret;
2011 #else
2012 return raw_cmpxchg(&v->counter, old, new);
2013 #endif
2014 }
2015
2016 /**
2017 * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2018 * @v: pointer to atomic_t
2019 * @old: int value to compare with
2020 * @new: int value to assign
2021 *
2022 * If (@v == @old), atomically updates @v to @new with acquire ordering.
2023 *
2024 * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
2025 *
2026 * Return: The original value of @v.
2027 */
2028 static __always_inline int
raw_atomic_cmpxchg_acquire(atomic_t * v,int old,int new)2029 raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
2030 {
2031 #if defined(arch_atomic_cmpxchg_acquire)
2032 return arch_atomic_cmpxchg_acquire(v, old, new);
2033 #elif defined(arch_atomic_cmpxchg_relaxed)
2034 int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2035 __atomic_acquire_fence();
2036 return ret;
2037 #elif defined(arch_atomic_cmpxchg)
2038 return arch_atomic_cmpxchg(v, old, new);
2039 #else
2040 return raw_cmpxchg_acquire(&v->counter, old, new);
2041 #endif
2042 }
2043
2044 /**
2045 * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
2046 * @v: pointer to atomic_t
2047 * @old: int value to compare with
2048 * @new: int value to assign
2049 *
2050 * If (@v == @old), atomically updates @v to @new with release ordering.
2051 *
2052 * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
2053 *
2054 * Return: The original value of @v.
2055 */
2056 static __always_inline int
raw_atomic_cmpxchg_release(atomic_t * v,int old,int new)2057 raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
2058 {
2059 #if defined(arch_atomic_cmpxchg_release)
2060 return arch_atomic_cmpxchg_release(v, old, new);
2061 #elif defined(arch_atomic_cmpxchg_relaxed)
2062 __atomic_release_fence();
2063 return arch_atomic_cmpxchg_relaxed(v, old, new);
2064 #elif defined(arch_atomic_cmpxchg)
2065 return arch_atomic_cmpxchg(v, old, new);
2066 #else
2067 return raw_cmpxchg_release(&v->counter, old, new);
2068 #endif
2069 }
2070
2071 /**
2072 * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2073 * @v: pointer to atomic_t
2074 * @old: int value to compare with
2075 * @new: int value to assign
2076 *
2077 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2078 *
2079 * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
2080 *
2081 * Return: The original value of @v.
2082 */
2083 static __always_inline int
raw_atomic_cmpxchg_relaxed(atomic_t * v,int old,int new)2084 raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
2085 {
2086 #if defined(arch_atomic_cmpxchg_relaxed)
2087 return arch_atomic_cmpxchg_relaxed(v, old, new);
2088 #elif defined(arch_atomic_cmpxchg)
2089 return arch_atomic_cmpxchg(v, old, new);
2090 #else
2091 return raw_cmpxchg_relaxed(&v->counter, old, new);
2092 #endif
2093 }
2094
2095 /**
2096 * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
2097 * @v: pointer to atomic_t
2098 * @old: pointer to int value to compare with
2099 * @new: int value to assign
2100 *
2101 * If (@v == @old), atomically updates @v to @new with full ordering.
2102 * Otherwise, updates @old to the current value of @v.
2103 *
2104 * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
2105 *
2106 * Return: @true if the exchange occured, @false otherwise.
2107 */
2108 static __always_inline bool
raw_atomic_try_cmpxchg(atomic_t * v,int * old,int new)2109 raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
2110 {
2111 #if defined(arch_atomic_try_cmpxchg)
2112 return arch_atomic_try_cmpxchg(v, old, new);
2113 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2114 bool ret;
2115 __atomic_pre_full_fence();
2116 ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2117 __atomic_post_full_fence();
2118 return ret;
2119 #else
2120 int r, o = *old;
2121 r = raw_atomic_cmpxchg(v, o, new);
2122 if (unlikely(r != o))
2123 *old = r;
2124 return likely(r == o);
2125 #endif
2126 }
2127
2128 /**
2129 * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2130 * @v: pointer to atomic_t
2131 * @old: pointer to int value to compare with
2132 * @new: int value to assign
2133 *
2134 * If (@v == @old), atomically updates @v to @new with acquire ordering.
2135 * Otherwise, updates @old to the current value of @v.
2136 *
2137 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
2138 *
2139 * Return: @true if the exchange occured, @false otherwise.
2140 */
2141 static __always_inline bool
raw_atomic_try_cmpxchg_acquire(atomic_t * v,int * old,int new)2142 raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
2143 {
2144 #if defined(arch_atomic_try_cmpxchg_acquire)
2145 return arch_atomic_try_cmpxchg_acquire(v, old, new);
2146 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2147 bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2148 __atomic_acquire_fence();
2149 return ret;
2150 #elif defined(arch_atomic_try_cmpxchg)
2151 return arch_atomic_try_cmpxchg(v, old, new);
2152 #else
2153 int r, o = *old;
2154 r = raw_atomic_cmpxchg_acquire(v, o, new);
2155 if (unlikely(r != o))
2156 *old = r;
2157 return likely(r == o);
2158 #endif
2159 }
2160
2161 /**
2162 * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
2163 * @v: pointer to atomic_t
2164 * @old: pointer to int value to compare with
2165 * @new: int value to assign
2166 *
2167 * If (@v == @old), atomically updates @v to @new with release ordering.
2168 * Otherwise, updates @old to the current value of @v.
2169 *
2170 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
2171 *
2172 * Return: @true if the exchange occured, @false otherwise.
2173 */
2174 static __always_inline bool
raw_atomic_try_cmpxchg_release(atomic_t * v,int * old,int new)2175 raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
2176 {
2177 #if defined(arch_atomic_try_cmpxchg_release)
2178 return arch_atomic_try_cmpxchg_release(v, old, new);
2179 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2180 __atomic_release_fence();
2181 return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2182 #elif defined(arch_atomic_try_cmpxchg)
2183 return arch_atomic_try_cmpxchg(v, old, new);
2184 #else
2185 int r, o = *old;
2186 r = raw_atomic_cmpxchg_release(v, o, new);
2187 if (unlikely(r != o))
2188 *old = r;
2189 return likely(r == o);
2190 #endif
2191 }
2192
2193 /**
2194 * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2195 * @v: pointer to atomic_t
2196 * @old: pointer to int value to compare with
2197 * @new: int value to assign
2198 *
2199 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2200 * Otherwise, updates @old to the current value of @v.
2201 *
2202 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
2203 *
2204 * Return: @true if the exchange occured, @false otherwise.
2205 */
2206 static __always_inline bool
raw_atomic_try_cmpxchg_relaxed(atomic_t * v,int * old,int new)2207 raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
2208 {
2209 #if defined(arch_atomic_try_cmpxchg_relaxed)
2210 return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2211 #elif defined(arch_atomic_try_cmpxchg)
2212 return arch_atomic_try_cmpxchg(v, old, new);
2213 #else
2214 int r, o = *old;
2215 r = raw_atomic_cmpxchg_relaxed(v, o, new);
2216 if (unlikely(r != o))
2217 *old = r;
2218 return likely(r == o);
2219 #endif
2220 }
2221
2222 /**
2223 * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
2224 * @i: int value to subtract
2225 * @v: pointer to atomic_t
2226 *
2227 * Atomically updates @v to (@v - @i) with full ordering.
2228 *
2229 * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
2230 *
2231 * Return: @true if the resulting value of @v is zero, @false otherwise.
2232 */
2233 static __always_inline bool
raw_atomic_sub_and_test(int i,atomic_t * v)2234 raw_atomic_sub_and_test(int i, atomic_t *v)
2235 {
2236 #if defined(arch_atomic_sub_and_test)
2237 return arch_atomic_sub_and_test(i, v);
2238 #else
2239 return raw_atomic_sub_return(i, v) == 0;
2240 #endif
2241 }
2242
2243 /**
2244 * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
2245 * @v: pointer to atomic_t
2246 *
2247 * Atomically updates @v to (@v - 1) with full ordering.
2248 *
2249 * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
2250 *
2251 * Return: @true if the resulting value of @v is zero, @false otherwise.
2252 */
2253 static __always_inline bool
raw_atomic_dec_and_test(atomic_t * v)2254 raw_atomic_dec_and_test(atomic_t *v)
2255 {
2256 #if defined(arch_atomic_dec_and_test)
2257 return arch_atomic_dec_and_test(v);
2258 #else
2259 return raw_atomic_dec_return(v) == 0;
2260 #endif
2261 }
2262
2263 /**
2264 * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
2265 * @v: pointer to atomic_t
2266 *
2267 * Atomically updates @v to (@v + 1) with full ordering.
2268 *
2269 * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
2270 *
2271 * Return: @true if the resulting value of @v is zero, @false otherwise.
2272 */
2273 static __always_inline bool
raw_atomic_inc_and_test(atomic_t * v)2274 raw_atomic_inc_and_test(atomic_t *v)
2275 {
2276 #if defined(arch_atomic_inc_and_test)
2277 return arch_atomic_inc_and_test(v);
2278 #else
2279 return raw_atomic_inc_return(v) == 0;
2280 #endif
2281 }
2282
2283 /**
2284 * raw_atomic_add_negative() - atomic add and test if negative with full ordering
2285 * @i: int value to add
2286 * @v: pointer to atomic_t
2287 *
2288 * Atomically updates @v to (@v + @i) with full ordering.
2289 *
2290 * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
2291 *
2292 * Return: @true if the resulting value of @v is negative, @false otherwise.
2293 */
2294 static __always_inline bool
raw_atomic_add_negative(int i,atomic_t * v)2295 raw_atomic_add_negative(int i, atomic_t *v)
2296 {
2297 #if defined(arch_atomic_add_negative)
2298 return arch_atomic_add_negative(i, v);
2299 #elif defined(arch_atomic_add_negative_relaxed)
2300 bool ret;
2301 __atomic_pre_full_fence();
2302 ret = arch_atomic_add_negative_relaxed(i, v);
2303 __atomic_post_full_fence();
2304 return ret;
2305 #else
2306 return raw_atomic_add_return(i, v) < 0;
2307 #endif
2308 }
2309
2310 /**
2311 * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
2312 * @i: int value to add
2313 * @v: pointer to atomic_t
2314 *
2315 * Atomically updates @v to (@v + @i) with acquire ordering.
2316 *
2317 * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
2318 *
2319 * Return: @true if the resulting value of @v is negative, @false otherwise.
2320 */
2321 static __always_inline bool
raw_atomic_add_negative_acquire(int i,atomic_t * v)2322 raw_atomic_add_negative_acquire(int i, atomic_t *v)
2323 {
2324 #if defined(arch_atomic_add_negative_acquire)
2325 return arch_atomic_add_negative_acquire(i, v);
2326 #elif defined(arch_atomic_add_negative_relaxed)
2327 bool ret = arch_atomic_add_negative_relaxed(i, v);
2328 __atomic_acquire_fence();
2329 return ret;
2330 #elif defined(arch_atomic_add_negative)
2331 return arch_atomic_add_negative(i, v);
2332 #else
2333 return raw_atomic_add_return_acquire(i, v) < 0;
2334 #endif
2335 }
2336
2337 /**
2338 * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
2339 * @i: int value to add
2340 * @v: pointer to atomic_t
2341 *
2342 * Atomically updates @v to (@v + @i) with release ordering.
2343 *
2344 * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
2345 *
2346 * Return: @true if the resulting value of @v is negative, @false otherwise.
2347 */
2348 static __always_inline bool
raw_atomic_add_negative_release(int i,atomic_t * v)2349 raw_atomic_add_negative_release(int i, atomic_t *v)
2350 {
2351 #if defined(arch_atomic_add_negative_release)
2352 return arch_atomic_add_negative_release(i, v);
2353 #elif defined(arch_atomic_add_negative_relaxed)
2354 __atomic_release_fence();
2355 return arch_atomic_add_negative_relaxed(i, v);
2356 #elif defined(arch_atomic_add_negative)
2357 return arch_atomic_add_negative(i, v);
2358 #else
2359 return raw_atomic_add_return_release(i, v) < 0;
2360 #endif
2361 }
2362
2363 /**
2364 * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
2365 * @i: int value to add
2366 * @v: pointer to atomic_t
2367 *
2368 * Atomically updates @v to (@v + @i) with relaxed ordering.
2369 *
2370 * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
2371 *
2372 * Return: @true if the resulting value of @v is negative, @false otherwise.
2373 */
2374 static __always_inline bool
raw_atomic_add_negative_relaxed(int i,atomic_t * v)2375 raw_atomic_add_negative_relaxed(int i, atomic_t *v)
2376 {
2377 #if defined(arch_atomic_add_negative_relaxed)
2378 return arch_atomic_add_negative_relaxed(i, v);
2379 #elif defined(arch_atomic_add_negative)
2380 return arch_atomic_add_negative(i, v);
2381 #else
2382 return raw_atomic_add_return_relaxed(i, v) < 0;
2383 #endif
2384 }
2385
2386 /**
2387 * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
2388 * @v: pointer to atomic_t
2389 * @a: int value to add
2390 * @u: int value to compare with
2391 *
2392 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2393 *
2394 * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
2395 *
2396 * Return: The original value of @v.
2397 */
2398 static __always_inline int
raw_atomic_fetch_add_unless(atomic_t * v,int a,int u)2399 raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
2400 {
2401 #if defined(arch_atomic_fetch_add_unless)
2402 return arch_atomic_fetch_add_unless(v, a, u);
2403 #else
2404 int c = raw_atomic_read(v);
2405
2406 do {
2407 if (unlikely(c == u))
2408 break;
2409 } while (!raw_atomic_try_cmpxchg(v, &c, c + a));
2410
2411 return c;
2412 #endif
2413 }
2414
2415 /**
2416 * raw_atomic_add_unless() - atomic add unless value with full ordering
2417 * @v: pointer to atomic_t
2418 * @a: int value to add
2419 * @u: int value to compare with
2420 *
2421 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2422 *
2423 * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
2424 *
2425 * Return: @true if @v was updated, @false otherwise.
2426 */
2427 static __always_inline bool
raw_atomic_add_unless(atomic_t * v,int a,int u)2428 raw_atomic_add_unless(atomic_t *v, int a, int u)
2429 {
2430 #if defined(arch_atomic_add_unless)
2431 return arch_atomic_add_unless(v, a, u);
2432 #else
2433 return raw_atomic_fetch_add_unless(v, a, u) != u;
2434 #endif
2435 }
2436
2437 /**
2438 * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
2439 * @v: pointer to atomic_t
2440 *
2441 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
2442 *
2443 * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
2444 *
2445 * Return: @true if @v was updated, @false otherwise.
2446 */
2447 static __always_inline bool
raw_atomic_inc_not_zero(atomic_t * v)2448 raw_atomic_inc_not_zero(atomic_t *v)
2449 {
2450 #if defined(arch_atomic_inc_not_zero)
2451 return arch_atomic_inc_not_zero(v);
2452 #else
2453 return raw_atomic_add_unless(v, 1, 0);
2454 #endif
2455 }
2456
2457 /**
2458 * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
2459 * @v: pointer to atomic_t
2460 *
2461 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
2462 *
2463 * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
2464 *
2465 * Return: @true if @v was updated, @false otherwise.
2466 */
2467 static __always_inline bool
raw_atomic_inc_unless_negative(atomic_t * v)2468 raw_atomic_inc_unless_negative(atomic_t *v)
2469 {
2470 #if defined(arch_atomic_inc_unless_negative)
2471 return arch_atomic_inc_unless_negative(v);
2472 #else
2473 int c = raw_atomic_read(v);
2474
2475 do {
2476 if (unlikely(c < 0))
2477 return false;
2478 } while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
2479
2480 return true;
2481 #endif
2482 }
2483
2484 /**
2485 * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
2486 * @v: pointer to atomic_t
2487 *
2488 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
2489 *
2490 * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
2491 *
2492 * Return: @true if @v was updated, @false otherwise.
2493 */
2494 static __always_inline bool
raw_atomic_dec_unless_positive(atomic_t * v)2495 raw_atomic_dec_unless_positive(atomic_t *v)
2496 {
2497 #if defined(arch_atomic_dec_unless_positive)
2498 return arch_atomic_dec_unless_positive(v);
2499 #else
2500 int c = raw_atomic_read(v);
2501
2502 do {
2503 if (unlikely(c > 0))
2504 return false;
2505 } while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
2506
2507 return true;
2508 #endif
2509 }
2510
2511 /**
2512 * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
2513 * @v: pointer to atomic_t
2514 *
2515 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
2516 *
2517 * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
2518 *
2519 * Return: The old value of (@v - 1), regardless of whether @v was updated.
2520 */
2521 static __always_inline int
raw_atomic_dec_if_positive(atomic_t * v)2522 raw_atomic_dec_if_positive(atomic_t *v)
2523 {
2524 #if defined(arch_atomic_dec_if_positive)
2525 return arch_atomic_dec_if_positive(v);
2526 #else
2527 int dec, c = raw_atomic_read(v);
2528
2529 do {
2530 dec = c - 1;
2531 if (unlikely(dec < 0))
2532 break;
2533 } while (!raw_atomic_try_cmpxchg(v, &c, dec));
2534
2535 return dec;
2536 #endif
2537 }
2538
2539 #ifdef CONFIG_GENERIC_ATOMIC64
2540 #include <asm-generic/atomic64.h>
2541 #endif
2542
2543 /**
2544 * raw_atomic64_read() - atomic load with relaxed ordering
2545 * @v: pointer to atomic64_t
2546 *
2547 * Atomically loads the value of @v with relaxed ordering.
2548 *
2549 * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
2550 *
2551 * Return: The value loaded from @v.
2552 */
2553 static __always_inline s64
raw_atomic64_read(const atomic64_t * v)2554 raw_atomic64_read(const atomic64_t *v)
2555 {
2556 return arch_atomic64_read(v);
2557 }
2558
2559 /**
2560 * raw_atomic64_read_acquire() - atomic load with acquire ordering
2561 * @v: pointer to atomic64_t
2562 *
2563 * Atomically loads the value of @v with acquire ordering.
2564 *
2565 * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
2566 *
2567 * Return: The value loaded from @v.
2568 */
2569 static __always_inline s64
raw_atomic64_read_acquire(const atomic64_t * v)2570 raw_atomic64_read_acquire(const atomic64_t *v)
2571 {
2572 #if defined(arch_atomic64_read_acquire)
2573 return arch_atomic64_read_acquire(v);
2574 #else
2575 s64 ret;
2576
2577 if (__native_word(atomic64_t)) {
2578 ret = smp_load_acquire(&(v)->counter);
2579 } else {
2580 ret = raw_atomic64_read(v);
2581 __atomic_acquire_fence();
2582 }
2583
2584 return ret;
2585 #endif
2586 }
2587
2588 /**
2589 * raw_atomic64_set() - atomic set with relaxed ordering
2590 * @v: pointer to atomic64_t
2591 * @i: s64 value to assign
2592 *
2593 * Atomically sets @v to @i with relaxed ordering.
2594 *
2595 * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
2596 *
2597 * Return: Nothing.
2598 */
2599 static __always_inline void
raw_atomic64_set(atomic64_t * v,s64 i)2600 raw_atomic64_set(atomic64_t *v, s64 i)
2601 {
2602 arch_atomic64_set(v, i);
2603 }
2604
2605 /**
2606 * raw_atomic64_set_release() - atomic set with release ordering
2607 * @v: pointer to atomic64_t
2608 * @i: s64 value to assign
2609 *
2610 * Atomically sets @v to @i with release ordering.
2611 *
2612 * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
2613 *
2614 * Return: Nothing.
2615 */
2616 static __always_inline void
raw_atomic64_set_release(atomic64_t * v,s64 i)2617 raw_atomic64_set_release(atomic64_t *v, s64 i)
2618 {
2619 #if defined(arch_atomic64_set_release)
2620 arch_atomic64_set_release(v, i);
2621 #else
2622 if (__native_word(atomic64_t)) {
2623 smp_store_release(&(v)->counter, i);
2624 } else {
2625 __atomic_release_fence();
2626 raw_atomic64_set(v, i);
2627 }
2628 #endif
2629 }
2630
2631 /**
2632 * raw_atomic64_add() - atomic add with relaxed ordering
2633 * @i: s64 value to add
2634 * @v: pointer to atomic64_t
2635 *
2636 * Atomically updates @v to (@v + @i) with relaxed ordering.
2637 *
2638 * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
2639 *
2640 * Return: Nothing.
2641 */
2642 static __always_inline void
raw_atomic64_add(s64 i,atomic64_t * v)2643 raw_atomic64_add(s64 i, atomic64_t *v)
2644 {
2645 arch_atomic64_add(i, v);
2646 }
2647
2648 /**
2649 * raw_atomic64_add_return() - atomic add with full ordering
2650 * @i: s64 value to add
2651 * @v: pointer to atomic64_t
2652 *
2653 * Atomically updates @v to (@v + @i) with full ordering.
2654 *
2655 * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
2656 *
2657 * Return: The updated value of @v.
2658 */
2659 static __always_inline s64
raw_atomic64_add_return(s64 i,atomic64_t * v)2660 raw_atomic64_add_return(s64 i, atomic64_t *v)
2661 {
2662 #if defined(arch_atomic64_add_return)
2663 return arch_atomic64_add_return(i, v);
2664 #elif defined(arch_atomic64_add_return_relaxed)
2665 s64 ret;
2666 __atomic_pre_full_fence();
2667 ret = arch_atomic64_add_return_relaxed(i, v);
2668 __atomic_post_full_fence();
2669 return ret;
2670 #else
2671 #error "Unable to define raw_atomic64_add_return"
2672 #endif
2673 }
2674
2675 /**
2676 * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
2677 * @i: s64 value to add
2678 * @v: pointer to atomic64_t
2679 *
2680 * Atomically updates @v to (@v + @i) with acquire ordering.
2681 *
2682 * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
2683 *
2684 * Return: The updated value of @v.
2685 */
2686 static __always_inline s64
raw_atomic64_add_return_acquire(s64 i,atomic64_t * v)2687 raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
2688 {
2689 #if defined(arch_atomic64_add_return_acquire)
2690 return arch_atomic64_add_return_acquire(i, v);
2691 #elif defined(arch_atomic64_add_return_relaxed)
2692 s64 ret = arch_atomic64_add_return_relaxed(i, v);
2693 __atomic_acquire_fence();
2694 return ret;
2695 #elif defined(arch_atomic64_add_return)
2696 return arch_atomic64_add_return(i, v);
2697 #else
2698 #error "Unable to define raw_atomic64_add_return_acquire"
2699 #endif
2700 }
2701
2702 /**
2703 * raw_atomic64_add_return_release() - atomic add with release ordering
2704 * @i: s64 value to add
2705 * @v: pointer to atomic64_t
2706 *
2707 * Atomically updates @v to (@v + @i) with release ordering.
2708 *
2709 * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
2710 *
2711 * Return: The updated value of @v.
2712 */
2713 static __always_inline s64
raw_atomic64_add_return_release(s64 i,atomic64_t * v)2714 raw_atomic64_add_return_release(s64 i, atomic64_t *v)
2715 {
2716 #if defined(arch_atomic64_add_return_release)
2717 return arch_atomic64_add_return_release(i, v);
2718 #elif defined(arch_atomic64_add_return_relaxed)
2719 __atomic_release_fence();
2720 return arch_atomic64_add_return_relaxed(i, v);
2721 #elif defined(arch_atomic64_add_return)
2722 return arch_atomic64_add_return(i, v);
2723 #else
2724 #error "Unable to define raw_atomic64_add_return_release"
2725 #endif
2726 }
2727
2728 /**
2729 * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
2730 * @i: s64 value to add
2731 * @v: pointer to atomic64_t
2732 *
2733 * Atomically updates @v to (@v + @i) with relaxed ordering.
2734 *
2735 * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
2736 *
2737 * Return: The updated value of @v.
2738 */
2739 static __always_inline s64
raw_atomic64_add_return_relaxed(s64 i,atomic64_t * v)2740 raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
2741 {
2742 #if defined(arch_atomic64_add_return_relaxed)
2743 return arch_atomic64_add_return_relaxed(i, v);
2744 #elif defined(arch_atomic64_add_return)
2745 return arch_atomic64_add_return(i, v);
2746 #else
2747 #error "Unable to define raw_atomic64_add_return_relaxed"
2748 #endif
2749 }
2750
2751 /**
2752 * raw_atomic64_fetch_add() - atomic add with full ordering
2753 * @i: s64 value to add
2754 * @v: pointer to atomic64_t
2755 *
2756 * Atomically updates @v to (@v + @i) with full ordering.
2757 *
2758 * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
2759 *
2760 * Return: The original value of @v.
2761 */
2762 static __always_inline s64
raw_atomic64_fetch_add(s64 i,atomic64_t * v)2763 raw_atomic64_fetch_add(s64 i, atomic64_t *v)
2764 {
2765 #if defined(arch_atomic64_fetch_add)
2766 return arch_atomic64_fetch_add(i, v);
2767 #elif defined(arch_atomic64_fetch_add_relaxed)
2768 s64 ret;
2769 __atomic_pre_full_fence();
2770 ret = arch_atomic64_fetch_add_relaxed(i, v);
2771 __atomic_post_full_fence();
2772 return ret;
2773 #else
2774 #error "Unable to define raw_atomic64_fetch_add"
2775 #endif
2776 }
2777
2778 /**
2779 * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
2780 * @i: s64 value to add
2781 * @v: pointer to atomic64_t
2782 *
2783 * Atomically updates @v to (@v + @i) with acquire ordering.
2784 *
2785 * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
2786 *
2787 * Return: The original value of @v.
2788 */
2789 static __always_inline s64
raw_atomic64_fetch_add_acquire(s64 i,atomic64_t * v)2790 raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
2791 {
2792 #if defined(arch_atomic64_fetch_add_acquire)
2793 return arch_atomic64_fetch_add_acquire(i, v);
2794 #elif defined(arch_atomic64_fetch_add_relaxed)
2795 s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
2796 __atomic_acquire_fence();
2797 return ret;
2798 #elif defined(arch_atomic64_fetch_add)
2799 return arch_atomic64_fetch_add(i, v);
2800 #else
2801 #error "Unable to define raw_atomic64_fetch_add_acquire"
2802 #endif
2803 }
2804
2805 /**
2806 * raw_atomic64_fetch_add_release() - atomic add with release ordering
2807 * @i: s64 value to add
2808 * @v: pointer to atomic64_t
2809 *
2810 * Atomically updates @v to (@v + @i) with release ordering.
2811 *
2812 * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
2813 *
2814 * Return: The original value of @v.
2815 */
2816 static __always_inline s64
raw_atomic64_fetch_add_release(s64 i,atomic64_t * v)2817 raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
2818 {
2819 #if defined(arch_atomic64_fetch_add_release)
2820 return arch_atomic64_fetch_add_release(i, v);
2821 #elif defined(arch_atomic64_fetch_add_relaxed)
2822 __atomic_release_fence();
2823 return arch_atomic64_fetch_add_relaxed(i, v);
2824 #elif defined(arch_atomic64_fetch_add)
2825 return arch_atomic64_fetch_add(i, v);
2826 #else
2827 #error "Unable to define raw_atomic64_fetch_add_release"
2828 #endif
2829 }
2830
2831 /**
2832 * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
2833 * @i: s64 value to add
2834 * @v: pointer to atomic64_t
2835 *
2836 * Atomically updates @v to (@v + @i) with relaxed ordering.
2837 *
2838 * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
2839 *
2840 * Return: The original value of @v.
2841 */
2842 static __always_inline s64
raw_atomic64_fetch_add_relaxed(s64 i,atomic64_t * v)2843 raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
2844 {
2845 #if defined(arch_atomic64_fetch_add_relaxed)
2846 return arch_atomic64_fetch_add_relaxed(i, v);
2847 #elif defined(arch_atomic64_fetch_add)
2848 return arch_atomic64_fetch_add(i, v);
2849 #else
2850 #error "Unable to define raw_atomic64_fetch_add_relaxed"
2851 #endif
2852 }
2853
2854 /**
2855 * raw_atomic64_sub() - atomic subtract with relaxed ordering
2856 * @i: s64 value to subtract
2857 * @v: pointer to atomic64_t
2858 *
2859 * Atomically updates @v to (@v - @i) with relaxed ordering.
2860 *
2861 * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
2862 *
2863 * Return: Nothing.
2864 */
2865 static __always_inline void
raw_atomic64_sub(s64 i,atomic64_t * v)2866 raw_atomic64_sub(s64 i, atomic64_t *v)
2867 {
2868 arch_atomic64_sub(i, v);
2869 }
2870
2871 /**
2872 * raw_atomic64_sub_return() - atomic subtract with full ordering
2873 * @i: s64 value to subtract
2874 * @v: pointer to atomic64_t
2875 *
2876 * Atomically updates @v to (@v - @i) with full ordering.
2877 *
2878 * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
2879 *
2880 * Return: The updated value of @v.
2881 */
2882 static __always_inline s64
raw_atomic64_sub_return(s64 i,atomic64_t * v)2883 raw_atomic64_sub_return(s64 i, atomic64_t *v)
2884 {
2885 #if defined(arch_atomic64_sub_return)
2886 return arch_atomic64_sub_return(i, v);
2887 #elif defined(arch_atomic64_sub_return_relaxed)
2888 s64 ret;
2889 __atomic_pre_full_fence();
2890 ret = arch_atomic64_sub_return_relaxed(i, v);
2891 __atomic_post_full_fence();
2892 return ret;
2893 #else
2894 #error "Unable to define raw_atomic64_sub_return"
2895 #endif
2896 }
2897
2898 /**
2899 * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
2900 * @i: s64 value to subtract
2901 * @v: pointer to atomic64_t
2902 *
2903 * Atomically updates @v to (@v - @i) with acquire ordering.
2904 *
2905 * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
2906 *
2907 * Return: The updated value of @v.
2908 */
2909 static __always_inline s64
raw_atomic64_sub_return_acquire(s64 i,atomic64_t * v)2910 raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
2911 {
2912 #if defined(arch_atomic64_sub_return_acquire)
2913 return arch_atomic64_sub_return_acquire(i, v);
2914 #elif defined(arch_atomic64_sub_return_relaxed)
2915 s64 ret = arch_atomic64_sub_return_relaxed(i, v);
2916 __atomic_acquire_fence();
2917 return ret;
2918 #elif defined(arch_atomic64_sub_return)
2919 return arch_atomic64_sub_return(i, v);
2920 #else
2921 #error "Unable to define raw_atomic64_sub_return_acquire"
2922 #endif
2923 }
2924
2925 /**
2926 * raw_atomic64_sub_return_release() - atomic subtract with release ordering
2927 * @i: s64 value to subtract
2928 * @v: pointer to atomic64_t
2929 *
2930 * Atomically updates @v to (@v - @i) with release ordering.
2931 *
2932 * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
2933 *
2934 * Return: The updated value of @v.
2935 */
2936 static __always_inline s64
raw_atomic64_sub_return_release(s64 i,atomic64_t * v)2937 raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
2938 {
2939 #if defined(arch_atomic64_sub_return_release)
2940 return arch_atomic64_sub_return_release(i, v);
2941 #elif defined(arch_atomic64_sub_return_relaxed)
2942 __atomic_release_fence();
2943 return arch_atomic64_sub_return_relaxed(i, v);
2944 #elif defined(arch_atomic64_sub_return)
2945 return arch_atomic64_sub_return(i, v);
2946 #else
2947 #error "Unable to define raw_atomic64_sub_return_release"
2948 #endif
2949 }
2950
2951 /**
2952 * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
2953 * @i: s64 value to subtract
2954 * @v: pointer to atomic64_t
2955 *
2956 * Atomically updates @v to (@v - @i) with relaxed ordering.
2957 *
2958 * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
2959 *
2960 * Return: The updated value of @v.
2961 */
2962 static __always_inline s64
raw_atomic64_sub_return_relaxed(s64 i,atomic64_t * v)2963 raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
2964 {
2965 #if defined(arch_atomic64_sub_return_relaxed)
2966 return arch_atomic64_sub_return_relaxed(i, v);
2967 #elif defined(arch_atomic64_sub_return)
2968 return arch_atomic64_sub_return(i, v);
2969 #else
2970 #error "Unable to define raw_atomic64_sub_return_relaxed"
2971 #endif
2972 }
2973
2974 /**
2975 * raw_atomic64_fetch_sub() - atomic subtract with full ordering
2976 * @i: s64 value to subtract
2977 * @v: pointer to atomic64_t
2978 *
2979 * Atomically updates @v to (@v - @i) with full ordering.
2980 *
2981 * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
2982 *
2983 * Return: The original value of @v.
2984 */
2985 static __always_inline s64
raw_atomic64_fetch_sub(s64 i,atomic64_t * v)2986 raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
2987 {
2988 #if defined(arch_atomic64_fetch_sub)
2989 return arch_atomic64_fetch_sub(i, v);
2990 #elif defined(arch_atomic64_fetch_sub_relaxed)
2991 s64 ret;
2992 __atomic_pre_full_fence();
2993 ret = arch_atomic64_fetch_sub_relaxed(i, v);
2994 __atomic_post_full_fence();
2995 return ret;
2996 #else
2997 #error "Unable to define raw_atomic64_fetch_sub"
2998 #endif
2999 }
3000
3001 /**
3002 * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
3003 * @i: s64 value to subtract
3004 * @v: pointer to atomic64_t
3005 *
3006 * Atomically updates @v to (@v - @i) with acquire ordering.
3007 *
3008 * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
3009 *
3010 * Return: The original value of @v.
3011 */
3012 static __always_inline s64
raw_atomic64_fetch_sub_acquire(s64 i,atomic64_t * v)3013 raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
3014 {
3015 #if defined(arch_atomic64_fetch_sub_acquire)
3016 return arch_atomic64_fetch_sub_acquire(i, v);
3017 #elif defined(arch_atomic64_fetch_sub_relaxed)
3018 s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
3019 __atomic_acquire_fence();
3020 return ret;
3021 #elif defined(arch_atomic64_fetch_sub)
3022 return arch_atomic64_fetch_sub(i, v);
3023 #else
3024 #error "Unable to define raw_atomic64_fetch_sub_acquire"
3025 #endif
3026 }
3027
3028 /**
3029 * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
3030 * @i: s64 value to subtract
3031 * @v: pointer to atomic64_t
3032 *
3033 * Atomically updates @v to (@v - @i) with release ordering.
3034 *
3035 * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
3036 *
3037 * Return: The original value of @v.
3038 */
3039 static __always_inline s64
raw_atomic64_fetch_sub_release(s64 i,atomic64_t * v)3040 raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
3041 {
3042 #if defined(arch_atomic64_fetch_sub_release)
3043 return arch_atomic64_fetch_sub_release(i, v);
3044 #elif defined(arch_atomic64_fetch_sub_relaxed)
3045 __atomic_release_fence();
3046 return arch_atomic64_fetch_sub_relaxed(i, v);
3047 #elif defined(arch_atomic64_fetch_sub)
3048 return arch_atomic64_fetch_sub(i, v);
3049 #else
3050 #error "Unable to define raw_atomic64_fetch_sub_release"
3051 #endif
3052 }
3053
3054 /**
3055 * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
3056 * @i: s64 value to subtract
3057 * @v: pointer to atomic64_t
3058 *
3059 * Atomically updates @v to (@v - @i) with relaxed ordering.
3060 *
3061 * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
3062 *
3063 * Return: The original value of @v.
3064 */
3065 static __always_inline s64
raw_atomic64_fetch_sub_relaxed(s64 i,atomic64_t * v)3066 raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
3067 {
3068 #if defined(arch_atomic64_fetch_sub_relaxed)
3069 return arch_atomic64_fetch_sub_relaxed(i, v);
3070 #elif defined(arch_atomic64_fetch_sub)
3071 return arch_atomic64_fetch_sub(i, v);
3072 #else
3073 #error "Unable to define raw_atomic64_fetch_sub_relaxed"
3074 #endif
3075 }
3076
3077 /**
3078 * raw_atomic64_inc() - atomic increment with relaxed ordering
3079 * @v: pointer to atomic64_t
3080 *
3081 * Atomically updates @v to (@v + 1) with relaxed ordering.
3082 *
3083 * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
3084 *
3085 * Return: Nothing.
3086 */
3087 static __always_inline void
raw_atomic64_inc(atomic64_t * v)3088 raw_atomic64_inc(atomic64_t *v)
3089 {
3090 #if defined(arch_atomic64_inc)
3091 arch_atomic64_inc(v);
3092 #else
3093 raw_atomic64_add(1, v);
3094 #endif
3095 }
3096
3097 /**
3098 * raw_atomic64_inc_return() - atomic increment with full ordering
3099 * @v: pointer to atomic64_t
3100 *
3101 * Atomically updates @v to (@v + 1) with full ordering.
3102 *
3103 * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
3104 *
3105 * Return: The updated value of @v.
3106 */
3107 static __always_inline s64
raw_atomic64_inc_return(atomic64_t * v)3108 raw_atomic64_inc_return(atomic64_t *v)
3109 {
3110 #if defined(arch_atomic64_inc_return)
3111 return arch_atomic64_inc_return(v);
3112 #elif defined(arch_atomic64_inc_return_relaxed)
3113 s64 ret;
3114 __atomic_pre_full_fence();
3115 ret = arch_atomic64_inc_return_relaxed(v);
3116 __atomic_post_full_fence();
3117 return ret;
3118 #else
3119 return raw_atomic64_add_return(1, v);
3120 #endif
3121 }
3122
3123 /**
3124 * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
3125 * @v: pointer to atomic64_t
3126 *
3127 * Atomically updates @v to (@v + 1) with acquire ordering.
3128 *
3129 * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
3130 *
3131 * Return: The updated value of @v.
3132 */
3133 static __always_inline s64
raw_atomic64_inc_return_acquire(atomic64_t * v)3134 raw_atomic64_inc_return_acquire(atomic64_t *v)
3135 {
3136 #if defined(arch_atomic64_inc_return_acquire)
3137 return arch_atomic64_inc_return_acquire(v);
3138 #elif defined(arch_atomic64_inc_return_relaxed)
3139 s64 ret = arch_atomic64_inc_return_relaxed(v);
3140 __atomic_acquire_fence();
3141 return ret;
3142 #elif defined(arch_atomic64_inc_return)
3143 return arch_atomic64_inc_return(v);
3144 #else
3145 return raw_atomic64_add_return_acquire(1, v);
3146 #endif
3147 }
3148
3149 /**
3150 * raw_atomic64_inc_return_release() - atomic increment with release ordering
3151 * @v: pointer to atomic64_t
3152 *
3153 * Atomically updates @v to (@v + 1) with release ordering.
3154 *
3155 * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
3156 *
3157 * Return: The updated value of @v.
3158 */
3159 static __always_inline s64
raw_atomic64_inc_return_release(atomic64_t * v)3160 raw_atomic64_inc_return_release(atomic64_t *v)
3161 {
3162 #if defined(arch_atomic64_inc_return_release)
3163 return arch_atomic64_inc_return_release(v);
3164 #elif defined(arch_atomic64_inc_return_relaxed)
3165 __atomic_release_fence();
3166 return arch_atomic64_inc_return_relaxed(v);
3167 #elif defined(arch_atomic64_inc_return)
3168 return arch_atomic64_inc_return(v);
3169 #else
3170 return raw_atomic64_add_return_release(1, v);
3171 #endif
3172 }
3173
3174 /**
3175 * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
3176 * @v: pointer to atomic64_t
3177 *
3178 * Atomically updates @v to (@v + 1) with relaxed ordering.
3179 *
3180 * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
3181 *
3182 * Return: The updated value of @v.
3183 */
3184 static __always_inline s64
raw_atomic64_inc_return_relaxed(atomic64_t * v)3185 raw_atomic64_inc_return_relaxed(atomic64_t *v)
3186 {
3187 #if defined(arch_atomic64_inc_return_relaxed)
3188 return arch_atomic64_inc_return_relaxed(v);
3189 #elif defined(arch_atomic64_inc_return)
3190 return arch_atomic64_inc_return(v);
3191 #else
3192 return raw_atomic64_add_return_relaxed(1, v);
3193 #endif
3194 }
3195
3196 /**
3197 * raw_atomic64_fetch_inc() - atomic increment with full ordering
3198 * @v: pointer to atomic64_t
3199 *
3200 * Atomically updates @v to (@v + 1) with full ordering.
3201 *
3202 * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
3203 *
3204 * Return: The original value of @v.
3205 */
3206 static __always_inline s64
raw_atomic64_fetch_inc(atomic64_t * v)3207 raw_atomic64_fetch_inc(atomic64_t *v)
3208 {
3209 #if defined(arch_atomic64_fetch_inc)
3210 return arch_atomic64_fetch_inc(v);
3211 #elif defined(arch_atomic64_fetch_inc_relaxed)
3212 s64 ret;
3213 __atomic_pre_full_fence();
3214 ret = arch_atomic64_fetch_inc_relaxed(v);
3215 __atomic_post_full_fence();
3216 return ret;
3217 #else
3218 return raw_atomic64_fetch_add(1, v);
3219 #endif
3220 }
3221
3222 /**
3223 * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
3224 * @v: pointer to atomic64_t
3225 *
3226 * Atomically updates @v to (@v + 1) with acquire ordering.
3227 *
3228 * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
3229 *
3230 * Return: The original value of @v.
3231 */
3232 static __always_inline s64
raw_atomic64_fetch_inc_acquire(atomic64_t * v)3233 raw_atomic64_fetch_inc_acquire(atomic64_t *v)
3234 {
3235 #if defined(arch_atomic64_fetch_inc_acquire)
3236 return arch_atomic64_fetch_inc_acquire(v);
3237 #elif defined(arch_atomic64_fetch_inc_relaxed)
3238 s64 ret = arch_atomic64_fetch_inc_relaxed(v);
3239 __atomic_acquire_fence();
3240 return ret;
3241 #elif defined(arch_atomic64_fetch_inc)
3242 return arch_atomic64_fetch_inc(v);
3243 #else
3244 return raw_atomic64_fetch_add_acquire(1, v);
3245 #endif
3246 }
3247
3248 /**
3249 * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
3250 * @v: pointer to atomic64_t
3251 *
3252 * Atomically updates @v to (@v + 1) with release ordering.
3253 *
3254 * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
3255 *
3256 * Return: The original value of @v.
3257 */
3258 static __always_inline s64
raw_atomic64_fetch_inc_release(atomic64_t * v)3259 raw_atomic64_fetch_inc_release(atomic64_t *v)
3260 {
3261 #if defined(arch_atomic64_fetch_inc_release)
3262 return arch_atomic64_fetch_inc_release(v);
3263 #elif defined(arch_atomic64_fetch_inc_relaxed)
3264 __atomic_release_fence();
3265 return arch_atomic64_fetch_inc_relaxed(v);
3266 #elif defined(arch_atomic64_fetch_inc)
3267 return arch_atomic64_fetch_inc(v);
3268 #else
3269 return raw_atomic64_fetch_add_release(1, v);
3270 #endif
3271 }
3272
3273 /**
3274 * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
3275 * @v: pointer to atomic64_t
3276 *
3277 * Atomically updates @v to (@v + 1) with relaxed ordering.
3278 *
3279 * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
3280 *
3281 * Return: The original value of @v.
3282 */
3283 static __always_inline s64
raw_atomic64_fetch_inc_relaxed(atomic64_t * v)3284 raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
3285 {
3286 #if defined(arch_atomic64_fetch_inc_relaxed)
3287 return arch_atomic64_fetch_inc_relaxed(v);
3288 #elif defined(arch_atomic64_fetch_inc)
3289 return arch_atomic64_fetch_inc(v);
3290 #else
3291 return raw_atomic64_fetch_add_relaxed(1, v);
3292 #endif
3293 }
3294
3295 /**
3296 * raw_atomic64_dec() - atomic decrement with relaxed ordering
3297 * @v: pointer to atomic64_t
3298 *
3299 * Atomically updates @v to (@v - 1) with relaxed ordering.
3300 *
3301 * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
3302 *
3303 * Return: Nothing.
3304 */
3305 static __always_inline void
raw_atomic64_dec(atomic64_t * v)3306 raw_atomic64_dec(atomic64_t *v)
3307 {
3308 #if defined(arch_atomic64_dec)
3309 arch_atomic64_dec(v);
3310 #else
3311 raw_atomic64_sub(1, v);
3312 #endif
3313 }
3314
3315 /**
3316 * raw_atomic64_dec_return() - atomic decrement with full ordering
3317 * @v: pointer to atomic64_t
3318 *
3319 * Atomically updates @v to (@v - 1) with full ordering.
3320 *
3321 * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
3322 *
3323 * Return: The updated value of @v.
3324 */
3325 static __always_inline s64
raw_atomic64_dec_return(atomic64_t * v)3326 raw_atomic64_dec_return(atomic64_t *v)
3327 {
3328 #if defined(arch_atomic64_dec_return)
3329 return arch_atomic64_dec_return(v);
3330 #elif defined(arch_atomic64_dec_return_relaxed)
3331 s64 ret;
3332 __atomic_pre_full_fence();
3333 ret = arch_atomic64_dec_return_relaxed(v);
3334 __atomic_post_full_fence();
3335 return ret;
3336 #else
3337 return raw_atomic64_sub_return(1, v);
3338 #endif
3339 }
3340
3341 /**
3342 * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
3343 * @v: pointer to atomic64_t
3344 *
3345 * Atomically updates @v to (@v - 1) with acquire ordering.
3346 *
3347 * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
3348 *
3349 * Return: The updated value of @v.
3350 */
3351 static __always_inline s64
raw_atomic64_dec_return_acquire(atomic64_t * v)3352 raw_atomic64_dec_return_acquire(atomic64_t *v)
3353 {
3354 #if defined(arch_atomic64_dec_return_acquire)
3355 return arch_atomic64_dec_return_acquire(v);
3356 #elif defined(arch_atomic64_dec_return_relaxed)
3357 s64 ret = arch_atomic64_dec_return_relaxed(v);
3358 __atomic_acquire_fence();
3359 return ret;
3360 #elif defined(arch_atomic64_dec_return)
3361 return arch_atomic64_dec_return(v);
3362 #else
3363 return raw_atomic64_sub_return_acquire(1, v);
3364 #endif
3365 }
3366
3367 /**
3368 * raw_atomic64_dec_return_release() - atomic decrement with release ordering
3369 * @v: pointer to atomic64_t
3370 *
3371 * Atomically updates @v to (@v - 1) with release ordering.
3372 *
3373 * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
3374 *
3375 * Return: The updated value of @v.
3376 */
3377 static __always_inline s64
raw_atomic64_dec_return_release(atomic64_t * v)3378 raw_atomic64_dec_return_release(atomic64_t *v)
3379 {
3380 #if defined(arch_atomic64_dec_return_release)
3381 return arch_atomic64_dec_return_release(v);
3382 #elif defined(arch_atomic64_dec_return_relaxed)
3383 __atomic_release_fence();
3384 return arch_atomic64_dec_return_relaxed(v);
3385 #elif defined(arch_atomic64_dec_return)
3386 return arch_atomic64_dec_return(v);
3387 #else
3388 return raw_atomic64_sub_return_release(1, v);
3389 #endif
3390 }
3391
3392 /**
3393 * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
3394 * @v: pointer to atomic64_t
3395 *
3396 * Atomically updates @v to (@v - 1) with relaxed ordering.
3397 *
3398 * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
3399 *
3400 * Return: The updated value of @v.
3401 */
3402 static __always_inline s64
raw_atomic64_dec_return_relaxed(atomic64_t * v)3403 raw_atomic64_dec_return_relaxed(atomic64_t *v)
3404 {
3405 #if defined(arch_atomic64_dec_return_relaxed)
3406 return arch_atomic64_dec_return_relaxed(v);
3407 #elif defined(arch_atomic64_dec_return)
3408 return arch_atomic64_dec_return(v);
3409 #else
3410 return raw_atomic64_sub_return_relaxed(1, v);
3411 #endif
3412 }
3413
3414 /**
3415 * raw_atomic64_fetch_dec() - atomic decrement with full ordering
3416 * @v: pointer to atomic64_t
3417 *
3418 * Atomically updates @v to (@v - 1) with full ordering.
3419 *
3420 * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
3421 *
3422 * Return: The original value of @v.
3423 */
3424 static __always_inline s64
raw_atomic64_fetch_dec(atomic64_t * v)3425 raw_atomic64_fetch_dec(atomic64_t *v)
3426 {
3427 #if defined(arch_atomic64_fetch_dec)
3428 return arch_atomic64_fetch_dec(v);
3429 #elif defined(arch_atomic64_fetch_dec_relaxed)
3430 s64 ret;
3431 __atomic_pre_full_fence();
3432 ret = arch_atomic64_fetch_dec_relaxed(v);
3433 __atomic_post_full_fence();
3434 return ret;
3435 #else
3436 return raw_atomic64_fetch_sub(1, v);
3437 #endif
3438 }
3439
3440 /**
3441 * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
3442 * @v: pointer to atomic64_t
3443 *
3444 * Atomically updates @v to (@v - 1) with acquire ordering.
3445 *
3446 * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
3447 *
3448 * Return: The original value of @v.
3449 */
3450 static __always_inline s64
raw_atomic64_fetch_dec_acquire(atomic64_t * v)3451 raw_atomic64_fetch_dec_acquire(atomic64_t *v)
3452 {
3453 #if defined(arch_atomic64_fetch_dec_acquire)
3454 return arch_atomic64_fetch_dec_acquire(v);
3455 #elif defined(arch_atomic64_fetch_dec_relaxed)
3456 s64 ret = arch_atomic64_fetch_dec_relaxed(v);
3457 __atomic_acquire_fence();
3458 return ret;
3459 #elif defined(arch_atomic64_fetch_dec)
3460 return arch_atomic64_fetch_dec(v);
3461 #else
3462 return raw_atomic64_fetch_sub_acquire(1, v);
3463 #endif
3464 }
3465
3466 /**
3467 * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
3468 * @v: pointer to atomic64_t
3469 *
3470 * Atomically updates @v to (@v - 1) with release ordering.
3471 *
3472 * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
3473 *
3474 * Return: The original value of @v.
3475 */
3476 static __always_inline s64
raw_atomic64_fetch_dec_release(atomic64_t * v)3477 raw_atomic64_fetch_dec_release(atomic64_t *v)
3478 {
3479 #if defined(arch_atomic64_fetch_dec_release)
3480 return arch_atomic64_fetch_dec_release(v);
3481 #elif defined(arch_atomic64_fetch_dec_relaxed)
3482 __atomic_release_fence();
3483 return arch_atomic64_fetch_dec_relaxed(v);
3484 #elif defined(arch_atomic64_fetch_dec)
3485 return arch_atomic64_fetch_dec(v);
3486 #else
3487 return raw_atomic64_fetch_sub_release(1, v);
3488 #endif
3489 }
3490
3491 /**
3492 * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
3493 * @v: pointer to atomic64_t
3494 *
3495 * Atomically updates @v to (@v - 1) with relaxed ordering.
3496 *
3497 * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
3498 *
3499 * Return: The original value of @v.
3500 */
3501 static __always_inline s64
raw_atomic64_fetch_dec_relaxed(atomic64_t * v)3502 raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
3503 {
3504 #if defined(arch_atomic64_fetch_dec_relaxed)
3505 return arch_atomic64_fetch_dec_relaxed(v);
3506 #elif defined(arch_atomic64_fetch_dec)
3507 return arch_atomic64_fetch_dec(v);
3508 #else
3509 return raw_atomic64_fetch_sub_relaxed(1, v);
3510 #endif
3511 }
3512
3513 /**
3514 * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
3515 * @i: s64 value
3516 * @v: pointer to atomic64_t
3517 *
3518 * Atomically updates @v to (@v & @i) with relaxed ordering.
3519 *
3520 * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
3521 *
3522 * Return: Nothing.
3523 */
3524 static __always_inline void
raw_atomic64_and(s64 i,atomic64_t * v)3525 raw_atomic64_and(s64 i, atomic64_t *v)
3526 {
3527 arch_atomic64_and(i, v);
3528 }
3529
3530 /**
3531 * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
3532 * @i: s64 value
3533 * @v: pointer to atomic64_t
3534 *
3535 * Atomically updates @v to (@v & @i) with full ordering.
3536 *
3537 * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
3538 *
3539 * Return: The original value of @v.
3540 */
3541 static __always_inline s64
raw_atomic64_fetch_and(s64 i,atomic64_t * v)3542 raw_atomic64_fetch_and(s64 i, atomic64_t *v)
3543 {
3544 #if defined(arch_atomic64_fetch_and)
3545 return arch_atomic64_fetch_and(i, v);
3546 #elif defined(arch_atomic64_fetch_and_relaxed)
3547 s64 ret;
3548 __atomic_pre_full_fence();
3549 ret = arch_atomic64_fetch_and_relaxed(i, v);
3550 __atomic_post_full_fence();
3551 return ret;
3552 #else
3553 #error "Unable to define raw_atomic64_fetch_and"
3554 #endif
3555 }
3556
3557 /**
3558 * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
3559 * @i: s64 value
3560 * @v: pointer to atomic64_t
3561 *
3562 * Atomically updates @v to (@v & @i) with acquire ordering.
3563 *
3564 * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
3565 *
3566 * Return: The original value of @v.
3567 */
3568 static __always_inline s64
raw_atomic64_fetch_and_acquire(s64 i,atomic64_t * v)3569 raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
3570 {
3571 #if defined(arch_atomic64_fetch_and_acquire)
3572 return arch_atomic64_fetch_and_acquire(i, v);
3573 #elif defined(arch_atomic64_fetch_and_relaxed)
3574 s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
3575 __atomic_acquire_fence();
3576 return ret;
3577 #elif defined(arch_atomic64_fetch_and)
3578 return arch_atomic64_fetch_and(i, v);
3579 #else
3580 #error "Unable to define raw_atomic64_fetch_and_acquire"
3581 #endif
3582 }
3583
3584 /**
3585 * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
3586 * @i: s64 value
3587 * @v: pointer to atomic64_t
3588 *
3589 * Atomically updates @v to (@v & @i) with release ordering.
3590 *
3591 * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
3592 *
3593 * Return: The original value of @v.
3594 */
3595 static __always_inline s64
raw_atomic64_fetch_and_release(s64 i,atomic64_t * v)3596 raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
3597 {
3598 #if defined(arch_atomic64_fetch_and_release)
3599 return arch_atomic64_fetch_and_release(i, v);
3600 #elif defined(arch_atomic64_fetch_and_relaxed)
3601 __atomic_release_fence();
3602 return arch_atomic64_fetch_and_relaxed(i, v);
3603 #elif defined(arch_atomic64_fetch_and)
3604 return arch_atomic64_fetch_and(i, v);
3605 #else
3606 #error "Unable to define raw_atomic64_fetch_and_release"
3607 #endif
3608 }
3609
3610 /**
3611 * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
3612 * @i: s64 value
3613 * @v: pointer to atomic64_t
3614 *
3615 * Atomically updates @v to (@v & @i) with relaxed ordering.
3616 *
3617 * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
3618 *
3619 * Return: The original value of @v.
3620 */
3621 static __always_inline s64
raw_atomic64_fetch_and_relaxed(s64 i,atomic64_t * v)3622 raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
3623 {
3624 #if defined(arch_atomic64_fetch_and_relaxed)
3625 return arch_atomic64_fetch_and_relaxed(i, v);
3626 #elif defined(arch_atomic64_fetch_and)
3627 return arch_atomic64_fetch_and(i, v);
3628 #else
3629 #error "Unable to define raw_atomic64_fetch_and_relaxed"
3630 #endif
3631 }
3632
3633 /**
3634 * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
3635 * @i: s64 value
3636 * @v: pointer to atomic64_t
3637 *
3638 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3639 *
3640 * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
3641 *
3642 * Return: Nothing.
3643 */
3644 static __always_inline void
raw_atomic64_andnot(s64 i,atomic64_t * v)3645 raw_atomic64_andnot(s64 i, atomic64_t *v)
3646 {
3647 #if defined(arch_atomic64_andnot)
3648 arch_atomic64_andnot(i, v);
3649 #else
3650 raw_atomic64_and(~i, v);
3651 #endif
3652 }
3653
3654 /**
3655 * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
3656 * @i: s64 value
3657 * @v: pointer to atomic64_t
3658 *
3659 * Atomically updates @v to (@v & ~@i) with full ordering.
3660 *
3661 * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
3662 *
3663 * Return: The original value of @v.
3664 */
3665 static __always_inline s64
raw_atomic64_fetch_andnot(s64 i,atomic64_t * v)3666 raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
3667 {
3668 #if defined(arch_atomic64_fetch_andnot)
3669 return arch_atomic64_fetch_andnot(i, v);
3670 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3671 s64 ret;
3672 __atomic_pre_full_fence();
3673 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3674 __atomic_post_full_fence();
3675 return ret;
3676 #else
3677 return raw_atomic64_fetch_and(~i, v);
3678 #endif
3679 }
3680
3681 /**
3682 * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
3683 * @i: s64 value
3684 * @v: pointer to atomic64_t
3685 *
3686 * Atomically updates @v to (@v & ~@i) with acquire ordering.
3687 *
3688 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
3689 *
3690 * Return: The original value of @v.
3691 */
3692 static __always_inline s64
raw_atomic64_fetch_andnot_acquire(s64 i,atomic64_t * v)3693 raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
3694 {
3695 #if defined(arch_atomic64_fetch_andnot_acquire)
3696 return arch_atomic64_fetch_andnot_acquire(i, v);
3697 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3698 s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3699 __atomic_acquire_fence();
3700 return ret;
3701 #elif defined(arch_atomic64_fetch_andnot)
3702 return arch_atomic64_fetch_andnot(i, v);
3703 #else
3704 return raw_atomic64_fetch_and_acquire(~i, v);
3705 #endif
3706 }
3707
3708 /**
3709 * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
3710 * @i: s64 value
3711 * @v: pointer to atomic64_t
3712 *
3713 * Atomically updates @v to (@v & ~@i) with release ordering.
3714 *
3715 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
3716 *
3717 * Return: The original value of @v.
3718 */
3719 static __always_inline s64
raw_atomic64_fetch_andnot_release(s64 i,atomic64_t * v)3720 raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
3721 {
3722 #if defined(arch_atomic64_fetch_andnot_release)
3723 return arch_atomic64_fetch_andnot_release(i, v);
3724 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3725 __atomic_release_fence();
3726 return arch_atomic64_fetch_andnot_relaxed(i, v);
3727 #elif defined(arch_atomic64_fetch_andnot)
3728 return arch_atomic64_fetch_andnot(i, v);
3729 #else
3730 return raw_atomic64_fetch_and_release(~i, v);
3731 #endif
3732 }
3733
3734 /**
3735 * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
3736 * @i: s64 value
3737 * @v: pointer to atomic64_t
3738 *
3739 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3740 *
3741 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
3742 *
3743 * Return: The original value of @v.
3744 */
3745 static __always_inline s64
raw_atomic64_fetch_andnot_relaxed(s64 i,atomic64_t * v)3746 raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
3747 {
3748 #if defined(arch_atomic64_fetch_andnot_relaxed)
3749 return arch_atomic64_fetch_andnot_relaxed(i, v);
3750 #elif defined(arch_atomic64_fetch_andnot)
3751 return arch_atomic64_fetch_andnot(i, v);
3752 #else
3753 return raw_atomic64_fetch_and_relaxed(~i, v);
3754 #endif
3755 }
3756
3757 /**
3758 * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
3759 * @i: s64 value
3760 * @v: pointer to atomic64_t
3761 *
3762 * Atomically updates @v to (@v | @i) with relaxed ordering.
3763 *
3764 * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
3765 *
3766 * Return: Nothing.
3767 */
3768 static __always_inline void
raw_atomic64_or(s64 i,atomic64_t * v)3769 raw_atomic64_or(s64 i, atomic64_t *v)
3770 {
3771 arch_atomic64_or(i, v);
3772 }
3773
3774 /**
3775 * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
3776 * @i: s64 value
3777 * @v: pointer to atomic64_t
3778 *
3779 * Atomically updates @v to (@v | @i) with full ordering.
3780 *
3781 * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
3782 *
3783 * Return: The original value of @v.
3784 */
3785 static __always_inline s64
raw_atomic64_fetch_or(s64 i,atomic64_t * v)3786 raw_atomic64_fetch_or(s64 i, atomic64_t *v)
3787 {
3788 #if defined(arch_atomic64_fetch_or)
3789 return arch_atomic64_fetch_or(i, v);
3790 #elif defined(arch_atomic64_fetch_or_relaxed)
3791 s64 ret;
3792 __atomic_pre_full_fence();
3793 ret = arch_atomic64_fetch_or_relaxed(i, v);
3794 __atomic_post_full_fence();
3795 return ret;
3796 #else
3797 #error "Unable to define raw_atomic64_fetch_or"
3798 #endif
3799 }
3800
3801 /**
3802 * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
3803 * @i: s64 value
3804 * @v: pointer to atomic64_t
3805 *
3806 * Atomically updates @v to (@v | @i) with acquire ordering.
3807 *
3808 * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
3809 *
3810 * Return: The original value of @v.
3811 */
3812 static __always_inline s64
raw_atomic64_fetch_or_acquire(s64 i,atomic64_t * v)3813 raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
3814 {
3815 #if defined(arch_atomic64_fetch_or_acquire)
3816 return arch_atomic64_fetch_or_acquire(i, v);
3817 #elif defined(arch_atomic64_fetch_or_relaxed)
3818 s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
3819 __atomic_acquire_fence();
3820 return ret;
3821 #elif defined(arch_atomic64_fetch_or)
3822 return arch_atomic64_fetch_or(i, v);
3823 #else
3824 #error "Unable to define raw_atomic64_fetch_or_acquire"
3825 #endif
3826 }
3827
3828 /**
3829 * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
3830 * @i: s64 value
3831 * @v: pointer to atomic64_t
3832 *
3833 * Atomically updates @v to (@v | @i) with release ordering.
3834 *
3835 * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
3836 *
3837 * Return: The original value of @v.
3838 */
3839 static __always_inline s64
raw_atomic64_fetch_or_release(s64 i,atomic64_t * v)3840 raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
3841 {
3842 #if defined(arch_atomic64_fetch_or_release)
3843 return arch_atomic64_fetch_or_release(i, v);
3844 #elif defined(arch_atomic64_fetch_or_relaxed)
3845 __atomic_release_fence();
3846 return arch_atomic64_fetch_or_relaxed(i, v);
3847 #elif defined(arch_atomic64_fetch_or)
3848 return arch_atomic64_fetch_or(i, v);
3849 #else
3850 #error "Unable to define raw_atomic64_fetch_or_release"
3851 #endif
3852 }
3853
3854 /**
3855 * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
3856 * @i: s64 value
3857 * @v: pointer to atomic64_t
3858 *
3859 * Atomically updates @v to (@v | @i) with relaxed ordering.
3860 *
3861 * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
3862 *
3863 * Return: The original value of @v.
3864 */
3865 static __always_inline s64
raw_atomic64_fetch_or_relaxed(s64 i,atomic64_t * v)3866 raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
3867 {
3868 #if defined(arch_atomic64_fetch_or_relaxed)
3869 return arch_atomic64_fetch_or_relaxed(i, v);
3870 #elif defined(arch_atomic64_fetch_or)
3871 return arch_atomic64_fetch_or(i, v);
3872 #else
3873 #error "Unable to define raw_atomic64_fetch_or_relaxed"
3874 #endif
3875 }
3876
3877 /**
3878 * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
3879 * @i: s64 value
3880 * @v: pointer to atomic64_t
3881 *
3882 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
3883 *
3884 * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
3885 *
3886 * Return: Nothing.
3887 */
3888 static __always_inline void
raw_atomic64_xor(s64 i,atomic64_t * v)3889 raw_atomic64_xor(s64 i, atomic64_t *v)
3890 {
3891 arch_atomic64_xor(i, v);
3892 }
3893
3894 /**
3895 * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
3896 * @i: s64 value
3897 * @v: pointer to atomic64_t
3898 *
3899 * Atomically updates @v to (@v ^ @i) with full ordering.
3900 *
3901 * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
3902 *
3903 * Return: The original value of @v.
3904 */
3905 static __always_inline s64
raw_atomic64_fetch_xor(s64 i,atomic64_t * v)3906 raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
3907 {
3908 #if defined(arch_atomic64_fetch_xor)
3909 return arch_atomic64_fetch_xor(i, v);
3910 #elif defined(arch_atomic64_fetch_xor_relaxed)
3911 s64 ret;
3912 __atomic_pre_full_fence();
3913 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3914 __atomic_post_full_fence();
3915 return ret;
3916 #else
3917 #error "Unable to define raw_atomic64_fetch_xor"
3918 #endif
3919 }
3920
3921 /**
3922 * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
3923 * @i: s64 value
3924 * @v: pointer to atomic64_t
3925 *
3926 * Atomically updates @v to (@v ^ @i) with acquire ordering.
3927 *
3928 * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
3929 *
3930 * Return: The original value of @v.
3931 */
3932 static __always_inline s64
raw_atomic64_fetch_xor_acquire(s64 i,atomic64_t * v)3933 raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
3934 {
3935 #if defined(arch_atomic64_fetch_xor_acquire)
3936 return arch_atomic64_fetch_xor_acquire(i, v);
3937 #elif defined(arch_atomic64_fetch_xor_relaxed)
3938 s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3939 __atomic_acquire_fence();
3940 return ret;
3941 #elif defined(arch_atomic64_fetch_xor)
3942 return arch_atomic64_fetch_xor(i, v);
3943 #else
3944 #error "Unable to define raw_atomic64_fetch_xor_acquire"
3945 #endif
3946 }
3947
3948 /**
3949 * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
3950 * @i: s64 value
3951 * @v: pointer to atomic64_t
3952 *
3953 * Atomically updates @v to (@v ^ @i) with release ordering.
3954 *
3955 * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
3956 *
3957 * Return: The original value of @v.
3958 */
3959 static __always_inline s64
raw_atomic64_fetch_xor_release(s64 i,atomic64_t * v)3960 raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
3961 {
3962 #if defined(arch_atomic64_fetch_xor_release)
3963 return arch_atomic64_fetch_xor_release(i, v);
3964 #elif defined(arch_atomic64_fetch_xor_relaxed)
3965 __atomic_release_fence();
3966 return arch_atomic64_fetch_xor_relaxed(i, v);
3967 #elif defined(arch_atomic64_fetch_xor)
3968 return arch_atomic64_fetch_xor(i, v);
3969 #else
3970 #error "Unable to define raw_atomic64_fetch_xor_release"
3971 #endif
3972 }
3973
3974 /**
3975 * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
3976 * @i: s64 value
3977 * @v: pointer to atomic64_t
3978 *
3979 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
3980 *
3981 * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
3982 *
3983 * Return: The original value of @v.
3984 */
3985 static __always_inline s64
raw_atomic64_fetch_xor_relaxed(s64 i,atomic64_t * v)3986 raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
3987 {
3988 #if defined(arch_atomic64_fetch_xor_relaxed)
3989 return arch_atomic64_fetch_xor_relaxed(i, v);
3990 #elif defined(arch_atomic64_fetch_xor)
3991 return arch_atomic64_fetch_xor(i, v);
3992 #else
3993 #error "Unable to define raw_atomic64_fetch_xor_relaxed"
3994 #endif
3995 }
3996
3997 /**
3998 * raw_atomic64_xchg() - atomic exchange with full ordering
3999 * @v: pointer to atomic64_t
4000 * @new: s64 value to assign
4001 *
4002 * Atomically updates @v to @new with full ordering.
4003 *
4004 * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
4005 *
4006 * Return: The original value of @v.
4007 */
4008 static __always_inline s64
raw_atomic64_xchg(atomic64_t * v,s64 new)4009 raw_atomic64_xchg(atomic64_t *v, s64 new)
4010 {
4011 #if defined(arch_atomic64_xchg)
4012 return arch_atomic64_xchg(v, new);
4013 #elif defined(arch_atomic64_xchg_relaxed)
4014 s64 ret;
4015 __atomic_pre_full_fence();
4016 ret = arch_atomic64_xchg_relaxed(v, new);
4017 __atomic_post_full_fence();
4018 return ret;
4019 #else
4020 return raw_xchg(&v->counter, new);
4021 #endif
4022 }
4023
4024 /**
4025 * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
4026 * @v: pointer to atomic64_t
4027 * @new: s64 value to assign
4028 *
4029 * Atomically updates @v to @new with acquire ordering.
4030 *
4031 * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
4032 *
4033 * Return: The original value of @v.
4034 */
4035 static __always_inline s64
raw_atomic64_xchg_acquire(atomic64_t * v,s64 new)4036 raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
4037 {
4038 #if defined(arch_atomic64_xchg_acquire)
4039 return arch_atomic64_xchg_acquire(v, new);
4040 #elif defined(arch_atomic64_xchg_relaxed)
4041 s64 ret = arch_atomic64_xchg_relaxed(v, new);
4042 __atomic_acquire_fence();
4043 return ret;
4044 #elif defined(arch_atomic64_xchg)
4045 return arch_atomic64_xchg(v, new);
4046 #else
4047 return raw_xchg_acquire(&v->counter, new);
4048 #endif
4049 }
4050
4051 /**
4052 * raw_atomic64_xchg_release() - atomic exchange with release ordering
4053 * @v: pointer to atomic64_t
4054 * @new: s64 value to assign
4055 *
4056 * Atomically updates @v to @new with release ordering.
4057 *
4058 * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
4059 *
4060 * Return: The original value of @v.
4061 */
4062 static __always_inline s64
raw_atomic64_xchg_release(atomic64_t * v,s64 new)4063 raw_atomic64_xchg_release(atomic64_t *v, s64 new)
4064 {
4065 #if defined(arch_atomic64_xchg_release)
4066 return arch_atomic64_xchg_release(v, new);
4067 #elif defined(arch_atomic64_xchg_relaxed)
4068 __atomic_release_fence();
4069 return arch_atomic64_xchg_relaxed(v, new);
4070 #elif defined(arch_atomic64_xchg)
4071 return arch_atomic64_xchg(v, new);
4072 #else
4073 return raw_xchg_release(&v->counter, new);
4074 #endif
4075 }
4076
4077 /**
4078 * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
4079 * @v: pointer to atomic64_t
4080 * @new: s64 value to assign
4081 *
4082 * Atomically updates @v to @new with relaxed ordering.
4083 *
4084 * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
4085 *
4086 * Return: The original value of @v.
4087 */
4088 static __always_inline s64
raw_atomic64_xchg_relaxed(atomic64_t * v,s64 new)4089 raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
4090 {
4091 #if defined(arch_atomic64_xchg_relaxed)
4092 return arch_atomic64_xchg_relaxed(v, new);
4093 #elif defined(arch_atomic64_xchg)
4094 return arch_atomic64_xchg(v, new);
4095 #else
4096 return raw_xchg_relaxed(&v->counter, new);
4097 #endif
4098 }
4099
4100 /**
4101 * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
4102 * @v: pointer to atomic64_t
4103 * @old: s64 value to compare with
4104 * @new: s64 value to assign
4105 *
4106 * If (@v == @old), atomically updates @v to @new with full ordering.
4107 *
4108 * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
4109 *
4110 * Return: The original value of @v.
4111 */
4112 static __always_inline s64
raw_atomic64_cmpxchg(atomic64_t * v,s64 old,s64 new)4113 raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
4114 {
4115 #if defined(arch_atomic64_cmpxchg)
4116 return arch_atomic64_cmpxchg(v, old, new);
4117 #elif defined(arch_atomic64_cmpxchg_relaxed)
4118 s64 ret;
4119 __atomic_pre_full_fence();
4120 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4121 __atomic_post_full_fence();
4122 return ret;
4123 #else
4124 return raw_cmpxchg(&v->counter, old, new);
4125 #endif
4126 }
4127
4128 /**
4129 * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4130 * @v: pointer to atomic64_t
4131 * @old: s64 value to compare with
4132 * @new: s64 value to assign
4133 *
4134 * If (@v == @old), atomically updates @v to @new with acquire ordering.
4135 *
4136 * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
4137 *
4138 * Return: The original value of @v.
4139 */
4140 static __always_inline s64
raw_atomic64_cmpxchg_acquire(atomic64_t * v,s64 old,s64 new)4141 raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
4142 {
4143 #if defined(arch_atomic64_cmpxchg_acquire)
4144 return arch_atomic64_cmpxchg_acquire(v, old, new);
4145 #elif defined(arch_atomic64_cmpxchg_relaxed)
4146 s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4147 __atomic_acquire_fence();
4148 return ret;
4149 #elif defined(arch_atomic64_cmpxchg)
4150 return arch_atomic64_cmpxchg(v, old, new);
4151 #else
4152 return raw_cmpxchg_acquire(&v->counter, old, new);
4153 #endif
4154 }
4155
4156 /**
4157 * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
4158 * @v: pointer to atomic64_t
4159 * @old: s64 value to compare with
4160 * @new: s64 value to assign
4161 *
4162 * If (@v == @old), atomically updates @v to @new with release ordering.
4163 *
4164 * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
4165 *
4166 * Return: The original value of @v.
4167 */
4168 static __always_inline s64
raw_atomic64_cmpxchg_release(atomic64_t * v,s64 old,s64 new)4169 raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
4170 {
4171 #if defined(arch_atomic64_cmpxchg_release)
4172 return arch_atomic64_cmpxchg_release(v, old, new);
4173 #elif defined(arch_atomic64_cmpxchg_relaxed)
4174 __atomic_release_fence();
4175 return arch_atomic64_cmpxchg_relaxed(v, old, new);
4176 #elif defined(arch_atomic64_cmpxchg)
4177 return arch_atomic64_cmpxchg(v, old, new);
4178 #else
4179 return raw_cmpxchg_release(&v->counter, old, new);
4180 #endif
4181 }
4182
4183 /**
4184 * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4185 * @v: pointer to atomic64_t
4186 * @old: s64 value to compare with
4187 * @new: s64 value to assign
4188 *
4189 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4190 *
4191 * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
4192 *
4193 * Return: The original value of @v.
4194 */
4195 static __always_inline s64
raw_atomic64_cmpxchg_relaxed(atomic64_t * v,s64 old,s64 new)4196 raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
4197 {
4198 #if defined(arch_atomic64_cmpxchg_relaxed)
4199 return arch_atomic64_cmpxchg_relaxed(v, old, new);
4200 #elif defined(arch_atomic64_cmpxchg)
4201 return arch_atomic64_cmpxchg(v, old, new);
4202 #else
4203 return raw_cmpxchg_relaxed(&v->counter, old, new);
4204 #endif
4205 }
4206
4207 /**
4208 * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
4209 * @v: pointer to atomic64_t
4210 * @old: pointer to s64 value to compare with
4211 * @new: s64 value to assign
4212 *
4213 * If (@v == @old), atomically updates @v to @new with full ordering.
4214 * Otherwise, updates @old to the current value of @v.
4215 *
4216 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
4217 *
4218 * Return: @true if the exchange occured, @false otherwise.
4219 */
4220 static __always_inline bool
raw_atomic64_try_cmpxchg(atomic64_t * v,s64 * old,s64 new)4221 raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
4222 {
4223 #if defined(arch_atomic64_try_cmpxchg)
4224 return arch_atomic64_try_cmpxchg(v, old, new);
4225 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4226 bool ret;
4227 __atomic_pre_full_fence();
4228 ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4229 __atomic_post_full_fence();
4230 return ret;
4231 #else
4232 s64 r, o = *old;
4233 r = raw_atomic64_cmpxchg(v, o, new);
4234 if (unlikely(r != o))
4235 *old = r;
4236 return likely(r == o);
4237 #endif
4238 }
4239
4240 /**
4241 * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4242 * @v: pointer to atomic64_t
4243 * @old: pointer to s64 value to compare with
4244 * @new: s64 value to assign
4245 *
4246 * If (@v == @old), atomically updates @v to @new with acquire ordering.
4247 * Otherwise, updates @old to the current value of @v.
4248 *
4249 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
4250 *
4251 * Return: @true if the exchange occured, @false otherwise.
4252 */
4253 static __always_inline bool
raw_atomic64_try_cmpxchg_acquire(atomic64_t * v,s64 * old,s64 new)4254 raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
4255 {
4256 #if defined(arch_atomic64_try_cmpxchg_acquire)
4257 return arch_atomic64_try_cmpxchg_acquire(v, old, new);
4258 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4259 bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4260 __atomic_acquire_fence();
4261 return ret;
4262 #elif defined(arch_atomic64_try_cmpxchg)
4263 return arch_atomic64_try_cmpxchg(v, old, new);
4264 #else
4265 s64 r, o = *old;
4266 r = raw_atomic64_cmpxchg_acquire(v, o, new);
4267 if (unlikely(r != o))
4268 *old = r;
4269 return likely(r == o);
4270 #endif
4271 }
4272
4273 /**
4274 * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
4275 * @v: pointer to atomic64_t
4276 * @old: pointer to s64 value to compare with
4277 * @new: s64 value to assign
4278 *
4279 * If (@v == @old), atomically updates @v to @new with release ordering.
4280 * Otherwise, updates @old to the current value of @v.
4281 *
4282 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
4283 *
4284 * Return: @true if the exchange occured, @false otherwise.
4285 */
4286 static __always_inline bool
raw_atomic64_try_cmpxchg_release(atomic64_t * v,s64 * old,s64 new)4287 raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
4288 {
4289 #if defined(arch_atomic64_try_cmpxchg_release)
4290 return arch_atomic64_try_cmpxchg_release(v, old, new);
4291 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4292 __atomic_release_fence();
4293 return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4294 #elif defined(arch_atomic64_try_cmpxchg)
4295 return arch_atomic64_try_cmpxchg(v, old, new);
4296 #else
4297 s64 r, o = *old;
4298 r = raw_atomic64_cmpxchg_release(v, o, new);
4299 if (unlikely(r != o))
4300 *old = r;
4301 return likely(r == o);
4302 #endif
4303 }
4304
4305 /**
4306 * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4307 * @v: pointer to atomic64_t
4308 * @old: pointer to s64 value to compare with
4309 * @new: s64 value to assign
4310 *
4311 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4312 * Otherwise, updates @old to the current value of @v.
4313 *
4314 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
4315 *
4316 * Return: @true if the exchange occured, @false otherwise.
4317 */
4318 static __always_inline bool
raw_atomic64_try_cmpxchg_relaxed(atomic64_t * v,s64 * old,s64 new)4319 raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
4320 {
4321 #if defined(arch_atomic64_try_cmpxchg_relaxed)
4322 return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4323 #elif defined(arch_atomic64_try_cmpxchg)
4324 return arch_atomic64_try_cmpxchg(v, old, new);
4325 #else
4326 s64 r, o = *old;
4327 r = raw_atomic64_cmpxchg_relaxed(v, o, new);
4328 if (unlikely(r != o))
4329 *old = r;
4330 return likely(r == o);
4331 #endif
4332 }
4333
4334 /**
4335 * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
4336 * @i: s64 value to subtract
4337 * @v: pointer to atomic64_t
4338 *
4339 * Atomically updates @v to (@v - @i) with full ordering.
4340 *
4341 * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
4342 *
4343 * Return: @true if the resulting value of @v is zero, @false otherwise.
4344 */
4345 static __always_inline bool
raw_atomic64_sub_and_test(s64 i,atomic64_t * v)4346 raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
4347 {
4348 #if defined(arch_atomic64_sub_and_test)
4349 return arch_atomic64_sub_and_test(i, v);
4350 #else
4351 return raw_atomic64_sub_return(i, v) == 0;
4352 #endif
4353 }
4354
4355 /**
4356 * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
4357 * @v: pointer to atomic64_t
4358 *
4359 * Atomically updates @v to (@v - 1) with full ordering.
4360 *
4361 * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
4362 *
4363 * Return: @true if the resulting value of @v is zero, @false otherwise.
4364 */
4365 static __always_inline bool
raw_atomic64_dec_and_test(atomic64_t * v)4366 raw_atomic64_dec_and_test(atomic64_t *v)
4367 {
4368 #if defined(arch_atomic64_dec_and_test)
4369 return arch_atomic64_dec_and_test(v);
4370 #else
4371 return raw_atomic64_dec_return(v) == 0;
4372 #endif
4373 }
4374
4375 /**
4376 * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
4377 * @v: pointer to atomic64_t
4378 *
4379 * Atomically updates @v to (@v + 1) with full ordering.
4380 *
4381 * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
4382 *
4383 * Return: @true if the resulting value of @v is zero, @false otherwise.
4384 */
4385 static __always_inline bool
raw_atomic64_inc_and_test(atomic64_t * v)4386 raw_atomic64_inc_and_test(atomic64_t *v)
4387 {
4388 #if defined(arch_atomic64_inc_and_test)
4389 return arch_atomic64_inc_and_test(v);
4390 #else
4391 return raw_atomic64_inc_return(v) == 0;
4392 #endif
4393 }
4394
4395 /**
4396 * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
4397 * @i: s64 value to add
4398 * @v: pointer to atomic64_t
4399 *
4400 * Atomically updates @v to (@v + @i) with full ordering.
4401 *
4402 * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
4403 *
4404 * Return: @true if the resulting value of @v is negative, @false otherwise.
4405 */
4406 static __always_inline bool
raw_atomic64_add_negative(s64 i,atomic64_t * v)4407 raw_atomic64_add_negative(s64 i, atomic64_t *v)
4408 {
4409 #if defined(arch_atomic64_add_negative)
4410 return arch_atomic64_add_negative(i, v);
4411 #elif defined(arch_atomic64_add_negative_relaxed)
4412 bool ret;
4413 __atomic_pre_full_fence();
4414 ret = arch_atomic64_add_negative_relaxed(i, v);
4415 __atomic_post_full_fence();
4416 return ret;
4417 #else
4418 return raw_atomic64_add_return(i, v) < 0;
4419 #endif
4420 }
4421
4422 /**
4423 * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
4424 * @i: s64 value to add
4425 * @v: pointer to atomic64_t
4426 *
4427 * Atomically updates @v to (@v + @i) with acquire ordering.
4428 *
4429 * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
4430 *
4431 * Return: @true if the resulting value of @v is negative, @false otherwise.
4432 */
4433 static __always_inline bool
raw_atomic64_add_negative_acquire(s64 i,atomic64_t * v)4434 raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
4435 {
4436 #if defined(arch_atomic64_add_negative_acquire)
4437 return arch_atomic64_add_negative_acquire(i, v);
4438 #elif defined(arch_atomic64_add_negative_relaxed)
4439 bool ret = arch_atomic64_add_negative_relaxed(i, v);
4440 __atomic_acquire_fence();
4441 return ret;
4442 #elif defined(arch_atomic64_add_negative)
4443 return arch_atomic64_add_negative(i, v);
4444 #else
4445 return raw_atomic64_add_return_acquire(i, v) < 0;
4446 #endif
4447 }
4448
4449 /**
4450 * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
4451 * @i: s64 value to add
4452 * @v: pointer to atomic64_t
4453 *
4454 * Atomically updates @v to (@v + @i) with release ordering.
4455 *
4456 * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
4457 *
4458 * Return: @true if the resulting value of @v is negative, @false otherwise.
4459 */
4460 static __always_inline bool
raw_atomic64_add_negative_release(s64 i,atomic64_t * v)4461 raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
4462 {
4463 #if defined(arch_atomic64_add_negative_release)
4464 return arch_atomic64_add_negative_release(i, v);
4465 #elif defined(arch_atomic64_add_negative_relaxed)
4466 __atomic_release_fence();
4467 return arch_atomic64_add_negative_relaxed(i, v);
4468 #elif defined(arch_atomic64_add_negative)
4469 return arch_atomic64_add_negative(i, v);
4470 #else
4471 return raw_atomic64_add_return_release(i, v) < 0;
4472 #endif
4473 }
4474
4475 /**
4476 * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
4477 * @i: s64 value to add
4478 * @v: pointer to atomic64_t
4479 *
4480 * Atomically updates @v to (@v + @i) with relaxed ordering.
4481 *
4482 * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
4483 *
4484 * Return: @true if the resulting value of @v is negative, @false otherwise.
4485 */
4486 static __always_inline bool
raw_atomic64_add_negative_relaxed(s64 i,atomic64_t * v)4487 raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
4488 {
4489 #if defined(arch_atomic64_add_negative_relaxed)
4490 return arch_atomic64_add_negative_relaxed(i, v);
4491 #elif defined(arch_atomic64_add_negative)
4492 return arch_atomic64_add_negative(i, v);
4493 #else
4494 return raw_atomic64_add_return_relaxed(i, v) < 0;
4495 #endif
4496 }
4497
4498 /**
4499 * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
4500 * @v: pointer to atomic64_t
4501 * @a: s64 value to add
4502 * @u: s64 value to compare with
4503 *
4504 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4505 *
4506 * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
4507 *
4508 * Return: The original value of @v.
4509 */
4510 static __always_inline s64
raw_atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)4511 raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
4512 {
4513 #if defined(arch_atomic64_fetch_add_unless)
4514 return arch_atomic64_fetch_add_unless(v, a, u);
4515 #else
4516 s64 c = raw_atomic64_read(v);
4517
4518 do {
4519 if (unlikely(c == u))
4520 break;
4521 } while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
4522
4523 return c;
4524 #endif
4525 }
4526
4527 /**
4528 * raw_atomic64_add_unless() - atomic add unless value with full ordering
4529 * @v: pointer to atomic64_t
4530 * @a: s64 value to add
4531 * @u: s64 value to compare with
4532 *
4533 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4534 *
4535 * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
4536 *
4537 * Return: @true if @v was updated, @false otherwise.
4538 */
4539 static __always_inline bool
raw_atomic64_add_unless(atomic64_t * v,s64 a,s64 u)4540 raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
4541 {
4542 #if defined(arch_atomic64_add_unless)
4543 return arch_atomic64_add_unless(v, a, u);
4544 #else
4545 return raw_atomic64_fetch_add_unless(v, a, u) != u;
4546 #endif
4547 }
4548
4549 /**
4550 * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
4551 * @v: pointer to atomic64_t
4552 *
4553 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
4554 *
4555 * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
4556 *
4557 * Return: @true if @v was updated, @false otherwise.
4558 */
4559 static __always_inline bool
raw_atomic64_inc_not_zero(atomic64_t * v)4560 raw_atomic64_inc_not_zero(atomic64_t *v)
4561 {
4562 #if defined(arch_atomic64_inc_not_zero)
4563 return arch_atomic64_inc_not_zero(v);
4564 #else
4565 return raw_atomic64_add_unless(v, 1, 0);
4566 #endif
4567 }
4568
4569 /**
4570 * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
4571 * @v: pointer to atomic64_t
4572 *
4573 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
4574 *
4575 * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
4576 *
4577 * Return: @true if @v was updated, @false otherwise.
4578 */
4579 static __always_inline bool
raw_atomic64_inc_unless_negative(atomic64_t * v)4580 raw_atomic64_inc_unless_negative(atomic64_t *v)
4581 {
4582 #if defined(arch_atomic64_inc_unless_negative)
4583 return arch_atomic64_inc_unless_negative(v);
4584 #else
4585 s64 c = raw_atomic64_read(v);
4586
4587 do {
4588 if (unlikely(c < 0))
4589 return false;
4590 } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
4591
4592 return true;
4593 #endif
4594 }
4595
4596 /**
4597 * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
4598 * @v: pointer to atomic64_t
4599 *
4600 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
4601 *
4602 * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
4603 *
4604 * Return: @true if @v was updated, @false otherwise.
4605 */
4606 static __always_inline bool
raw_atomic64_dec_unless_positive(atomic64_t * v)4607 raw_atomic64_dec_unless_positive(atomic64_t *v)
4608 {
4609 #if defined(arch_atomic64_dec_unless_positive)
4610 return arch_atomic64_dec_unless_positive(v);
4611 #else
4612 s64 c = raw_atomic64_read(v);
4613
4614 do {
4615 if (unlikely(c > 0))
4616 return false;
4617 } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
4618
4619 return true;
4620 #endif
4621 }
4622
4623 /**
4624 * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
4625 * @v: pointer to atomic64_t
4626 *
4627 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
4628 *
4629 * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
4630 *
4631 * Return: The old value of (@v - 1), regardless of whether @v was updated.
4632 */
4633 static __always_inline s64
raw_atomic64_dec_if_positive(atomic64_t * v)4634 raw_atomic64_dec_if_positive(atomic64_t *v)
4635 {
4636 #if defined(arch_atomic64_dec_if_positive)
4637 return arch_atomic64_dec_if_positive(v);
4638 #else
4639 s64 dec, c = raw_atomic64_read(v);
4640
4641 do {
4642 dec = c - 1;
4643 if (unlikely(dec < 0))
4644 break;
4645 } while (!raw_atomic64_try_cmpxchg(v, &c, dec));
4646
4647 return dec;
4648 #endif
4649 }
4650
4651 #endif /* _LINUX_ATOMIC_FALLBACK_H */
4652 // f8888b25626bea006e7f11f7add7cecc33d0fa2e
4653