xref: /openbmc/linux/arch/arm/include/asm/sync_bitops.h (revision 569820be)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SYNC_BITOPS_H__
3 #define __ASM_SYNC_BITOPS_H__
4 
5 #include <asm/bitops.h>
6 
7 /* sync_bitops functions are equivalent to the SMP implementation of the
8  * original functions, independently from CONFIG_SMP being defined.
9  *
10  * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But
11  * under Xen you might be communicating with a completely external entity
12  * who might be on another CPU (e.g. two uniprocessor guests communicating
13  * via event channels and grant tables). So we need a variant of the bit
14  * ops which are SMP safe even on a UP kernel.
15  */
16 
17 /*
18  * Unordered
19  */
20 
21 #define sync_set_bit(nr, p)		_set_bit(nr, p)
22 #define sync_clear_bit(nr, p)		_clear_bit(nr, p)
23 #define sync_change_bit(nr, p)		_change_bit(nr, p)
24 #define sync_test_bit(nr, addr)		test_bit(nr, addr)
25 
26 /*
27  * Fully ordered
28  */
29 
30 int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
31 #define sync_test_and_set_bit(nr, p)	_sync_test_and_set_bit(nr, p)
32 
33 int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
34 #define sync_test_and_clear_bit(nr, p)	_sync_test_and_clear_bit(nr, p)
35 
36 int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
37 #define sync_test_and_change_bit(nr, p)	_sync_test_and_change_bit(nr, p)
38 
39 #define arch_sync_cmpxchg(ptr, old, new)				\
40 ({									\
41 	__typeof__(*(ptr)) __ret;					\
42 	__smp_mb__before_atomic();					\
43 	__ret = arch_cmpxchg_relaxed((ptr), (old), (new));		\
44 	__smp_mb__after_atomic();					\
45 	__ret;								\
46 })
47 
48 #endif
49