xref: /openbmc/linux/arch/ia64/include/uapi/asm/cmpxchg.h (revision 9702a046)
16f52b16cSGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2f84f1b9cSMark Rutland #ifndef _UAPI_ASM_IA64_CMPXCHG_H
3f84f1b9cSMark Rutland #define _UAPI_ASM_IA64_CMPXCHG_H
443e40f25SDavid Howells 
543e40f25SDavid Howells /*
643e40f25SDavid Howells  * Compare/Exchange, forked from asm/intrinsics.h
743e40f25SDavid Howells  * which was:
843e40f25SDavid Howells  *
943e40f25SDavid Howells  *	Copyright (C) 2002-2003 Hewlett-Packard Co
1043e40f25SDavid Howells  *	David Mosberger-Tang <davidm@hpl.hp.com>
1143e40f25SDavid Howells  */
1243e40f25SDavid Howells 
1343e40f25SDavid Howells #ifndef __ASSEMBLY__
1443e40f25SDavid Howells 
1543e40f25SDavid Howells #include <linux/types.h>
1643e40f25SDavid Howells /* include compiler specific intrinsics */
1743e40f25SDavid Howells #include <asm/ia64regs.h>
1843e40f25SDavid Howells #include <asm/gcc_intrin.h>
1943e40f25SDavid Howells 
2043e40f25SDavid Howells /*
2143e40f25SDavid Howells  * This function doesn't exist, so you'll get a linker error if
2243e40f25SDavid Howells  * something tries to do an invalid xchg().
2343e40f25SDavid Howells  */
2443e40f25SDavid Howells extern void ia64_xchg_called_with_bad_pointer(void);
2543e40f25SDavid Howells 
26*06855063SAndrzej Hajda #define __arch_xchg(x, ptr, size)					\
2743e40f25SDavid Howells ({									\
2843e40f25SDavid Howells 	unsigned long __xchg_result;					\
2943e40f25SDavid Howells 									\
3043e40f25SDavid Howells 	switch (size) {							\
3143e40f25SDavid Howells 	case 1:								\
329776e386SLuc Van Oostenryck 		__xchg_result = ia64_xchg1((__u8 __force *)ptr, x);	\
3343e40f25SDavid Howells 		break;							\
3443e40f25SDavid Howells 									\
3543e40f25SDavid Howells 	case 2:								\
369776e386SLuc Van Oostenryck 		__xchg_result = ia64_xchg2((__u16 __force *)ptr, x);	\
3743e40f25SDavid Howells 		break;							\
3843e40f25SDavid Howells 									\
3943e40f25SDavid Howells 	case 4:								\
409776e386SLuc Van Oostenryck 		__xchg_result = ia64_xchg4((__u32 __force *)ptr, x);	\
4143e40f25SDavid Howells 		break;							\
4243e40f25SDavid Howells 									\
4343e40f25SDavid Howells 	case 8:								\
449776e386SLuc Van Oostenryck 		__xchg_result = ia64_xchg8((__u64 __force *)ptr, x);	\
4543e40f25SDavid Howells 		break;							\
4643e40f25SDavid Howells 	default:							\
4743e40f25SDavid Howells 		ia64_xchg_called_with_bad_pointer();			\
4843e40f25SDavid Howells 	}								\
499776e386SLuc Van Oostenryck 	(__typeof__ (*(ptr)) __force) __xchg_result;			\
5043e40f25SDavid Howells })
5143e40f25SDavid Howells 
52f84f1b9cSMark Rutland #ifndef __KERNEL__
5343e40f25SDavid Howells #define xchg(ptr, x)							\
54*06855063SAndrzej Hajda ({(__typeof__(*(ptr))) __arch_xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
55f84f1b9cSMark Rutland #endif
5643e40f25SDavid Howells 
5743e40f25SDavid Howells /*
5843e40f25SDavid Howells  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
5943e40f25SDavid Howells  * store NEW in MEM.  Return the initial value in MEM.  Success is
6043e40f25SDavid Howells  * indicated by comparing RETURN with OLD.
6143e40f25SDavid Howells  */
6243e40f25SDavid Howells 
6343e40f25SDavid Howells /*
6443e40f25SDavid Howells  * This function doesn't exist, so you'll get a linker error
6543e40f25SDavid Howells  * if something tries to do an invalid cmpxchg().
6643e40f25SDavid Howells  */
6743e40f25SDavid Howells extern long ia64_cmpxchg_called_with_bad_pointer(void);
6843e40f25SDavid Howells 
6943e40f25SDavid Howells #define ia64_cmpxchg(sem, ptr, old, new, size)				\
7043e40f25SDavid Howells ({									\
7143e40f25SDavid Howells 	__u64 _o_, _r_;							\
7243e40f25SDavid Howells 									\
7343e40f25SDavid Howells 	switch (size) {							\
7443e40f25SDavid Howells 	case 1:								\
759776e386SLuc Van Oostenryck 		_o_ = (__u8) (long __force) (old);			\
7643e40f25SDavid Howells 		break;							\
7743e40f25SDavid Howells 	case 2:								\
789776e386SLuc Van Oostenryck 		_o_ = (__u16) (long __force) (old);			\
7943e40f25SDavid Howells 		break;							\
8043e40f25SDavid Howells 	case 4:								\
819776e386SLuc Van Oostenryck 		_o_ = (__u32) (long __force) (old);			\
8243e40f25SDavid Howells 		break;							\
8343e40f25SDavid Howells 	case 8:								\
849776e386SLuc Van Oostenryck 		_o_ = (__u64) (long __force) (old);			\
8543e40f25SDavid Howells 		break;							\
8643e40f25SDavid Howells 	default:							\
8743e40f25SDavid Howells 		break;							\
8843e40f25SDavid Howells 	}								\
8943e40f25SDavid Howells 	switch (size) {							\
9043e40f25SDavid Howells 	case 1:								\
919776e386SLuc Van Oostenryck 		_r_ = ia64_cmpxchg1_##sem((__u8 __force *) ptr, new, _o_);	\
9243e40f25SDavid Howells 		break;							\
9343e40f25SDavid Howells 									\
9443e40f25SDavid Howells 	case 2:								\
959776e386SLuc Van Oostenryck 		_r_ = ia64_cmpxchg2_##sem((__u16 __force *) ptr, new, _o_);	\
9643e40f25SDavid Howells 		break;							\
9743e40f25SDavid Howells 									\
9843e40f25SDavid Howells 	case 4:								\
999776e386SLuc Van Oostenryck 		_r_ = ia64_cmpxchg4_##sem((__u32 __force *) ptr, new, _o_);	\
10043e40f25SDavid Howells 		break;							\
10143e40f25SDavid Howells 									\
10243e40f25SDavid Howells 	case 8:								\
1039776e386SLuc Van Oostenryck 		_r_ = ia64_cmpxchg8_##sem((__u64 __force *) ptr, new, _o_);	\
10443e40f25SDavid Howells 		break;							\
10543e40f25SDavid Howells 									\
10643e40f25SDavid Howells 	default:							\
10743e40f25SDavid Howells 		_r_ = ia64_cmpxchg_called_with_bad_pointer();		\
10843e40f25SDavid Howells 		break;							\
10943e40f25SDavid Howells 	}								\
1109776e386SLuc Van Oostenryck 	(__typeof__(old) __force) _r_;					\
11143e40f25SDavid Howells })
11243e40f25SDavid Howells 
11343e40f25SDavid Howells #define cmpxchg_acq(ptr, o, n)	\
11443e40f25SDavid Howells 	ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
11543e40f25SDavid Howells #define cmpxchg_rel(ptr, o, n)	\
11643e40f25SDavid Howells 	ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
11743e40f25SDavid Howells 
118e4f9bfb3SPeter Zijlstra /*
119e4f9bfb3SPeter Zijlstra  * Worse still - early processor implementations actually just ignored
120e4f9bfb3SPeter Zijlstra  * the acquire/release and did a full fence all the time.  Unfortunately
121e4f9bfb3SPeter Zijlstra  * this meant a lot of badly written code that used .acq when they really
122e4f9bfb3SPeter Zijlstra  * wanted .rel became legacy out in the wild - so when we made a cpu
123e4f9bfb3SPeter Zijlstra  * that strictly did the .acq or .rel ... all that code started breaking - so
124e4f9bfb3SPeter Zijlstra  * we had to back-pedal and keep the "legacy" behavior of a full fence :-(
125e4f9bfb3SPeter Zijlstra  */
126e4f9bfb3SPeter Zijlstra 
127f84f1b9cSMark Rutland #ifndef __KERNEL__
12843e40f25SDavid Howells /* for compatibility with other platforms: */
12943e40f25SDavid Howells #define cmpxchg(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
13043e40f25SDavid Howells #define cmpxchg64(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
13143e40f25SDavid Howells 
13243e40f25SDavid Howells #define cmpxchg_local		cmpxchg
13343e40f25SDavid Howells #define cmpxchg64_local		cmpxchg64
134f84f1b9cSMark Rutland #endif
13543e40f25SDavid Howells 
13643e40f25SDavid Howells #endif /* !__ASSEMBLY__ */
13743e40f25SDavid Howells 
138f84f1b9cSMark Rutland #endif /* _UAPI_ASM_IA64_CMPXCHG_H */
139