xref: /openbmc/linux/arch/arm/include/asm/sync_bitops.h (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2e54d2f61SStefano Stabellini #ifndef __ASM_SYNC_BITOPS_H__
3e54d2f61SStefano Stabellini #define __ASM_SYNC_BITOPS_H__
4e54d2f61SStefano Stabellini 
5e54d2f61SStefano Stabellini #include <asm/bitops.h>
6e54d2f61SStefano Stabellini 
7e54d2f61SStefano Stabellini /* sync_bitops functions are equivalent to the SMP implementation of the
8e54d2f61SStefano Stabellini  * original functions, independently from CONFIG_SMP being defined.
9e54d2f61SStefano Stabellini  *
10e54d2f61SStefano Stabellini  * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But
11e54d2f61SStefano Stabellini  * under Xen you might be communicating with a completely external entity
12e54d2f61SStefano Stabellini  * who might be on another CPU (e.g. two uniprocessor guests communicating
13e54d2f61SStefano Stabellini  * via event channels and grant tables). So we need a variant of the bit
14e54d2f61SStefano Stabellini  * ops which are SMP safe even on a UP kernel.
15e54d2f61SStefano Stabellini  */
16e54d2f61SStefano Stabellini 
17*dda5f312SMark Rutland /*
18*dda5f312SMark Rutland  * Unordered
19*dda5f312SMark Rutland  */
20*dda5f312SMark Rutland 
21e54d2f61SStefano Stabellini #define sync_set_bit(nr, p)		_set_bit(nr, p)
22e54d2f61SStefano Stabellini #define sync_clear_bit(nr, p)		_clear_bit(nr, p)
23e54d2f61SStefano Stabellini #define sync_change_bit(nr, p)		_change_bit(nr, p)
24e54d2f61SStefano Stabellini #define sync_test_bit(nr, addr)		test_bit(nr, addr)
25e54d2f61SStefano Stabellini 
26*dda5f312SMark Rutland /*
27*dda5f312SMark Rutland  * Fully ordered
28*dda5f312SMark Rutland  */
29*dda5f312SMark Rutland 
30*dda5f312SMark Rutland int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
31*dda5f312SMark Rutland #define sync_test_and_set_bit(nr, p)	_sync_test_and_set_bit(nr, p)
32*dda5f312SMark Rutland 
33*dda5f312SMark Rutland int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
34*dda5f312SMark Rutland #define sync_test_and_clear_bit(nr, p)	_sync_test_and_clear_bit(nr, p)
35*dda5f312SMark Rutland 
36*dda5f312SMark Rutland int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
37*dda5f312SMark Rutland #define sync_test_and_change_bit(nr, p)	_sync_test_and_change_bit(nr, p)
38*dda5f312SMark Rutland 
39*dda5f312SMark Rutland #define arch_sync_cmpxchg(ptr, old, new)				\
40*dda5f312SMark Rutland ({									\
41*dda5f312SMark Rutland 	__typeof__(*(ptr)) __ret;					\
42*dda5f312SMark Rutland 	__smp_mb__before_atomic();					\
43*dda5f312SMark Rutland 	__ret = arch_cmpxchg_relaxed((ptr), (old), (new));		\
44*dda5f312SMark Rutland 	__smp_mb__after_atomic();					\
45*dda5f312SMark Rutland 	__ret;								\
46*dda5f312SMark Rutland })
47e54d2f61SStefano Stabellini 
48e54d2f61SStefano Stabellini #endif
49