xref: /openbmc/linux/arch/x86/include/asm/cmpxchg.h (revision 569820be)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_X86_CMPXCHG_H
3 #define ASM_X86_CMPXCHG_H
4 
5 #include <linux/compiler.h>
6 #include <asm/cpufeatures.h>
7 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
8 
9 /*
10  * Non-existent functions to indicate usage errors at link time
11  * (or compile-time if the compiler implements __compiletime_error().
12  */
13 extern void __xchg_wrong_size(void)
14 	__compiletime_error("Bad argument size for xchg");
15 extern void __cmpxchg_wrong_size(void)
16 	__compiletime_error("Bad argument size for cmpxchg");
17 extern void __xadd_wrong_size(void)
18 	__compiletime_error("Bad argument size for xadd");
19 extern void __add_wrong_size(void)
20 	__compiletime_error("Bad argument size for add");
21 
22 /*
23  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
24  * -1 because sizeof will never return -1, thereby making those switch
25  * case statements guaranteed dead code which the compiler will
26  * eliminate, and allowing the "missing symbol in the default case" to
27  * indicate a usage error.
28  */
29 #define __X86_CASE_B	1
30 #define __X86_CASE_W	2
31 #define __X86_CASE_L	4
32 #ifdef CONFIG_64BIT
33 #define __X86_CASE_Q	8
34 #else
35 #define	__X86_CASE_Q	-1		/* sizeof will never return -1 */
36 #endif
37 
38 /*
39  * An exchange-type operation, which takes a value and a pointer, and
40  * returns the old value.
41  */
42 #define __xchg_op(ptr, arg, op, lock)					\
43 	({								\
44 	        __typeof__ (*(ptr)) __ret = (arg);			\
45 		switch (sizeof(*(ptr))) {				\
46 		case __X86_CASE_B:					\
47 			asm volatile (lock #op "b %b0, %1\n"		\
48 				      : "+q" (__ret), "+m" (*(ptr))	\
49 				      : : "memory", "cc");		\
50 			break;						\
51 		case __X86_CASE_W:					\
52 			asm volatile (lock #op "w %w0, %1\n"		\
53 				      : "+r" (__ret), "+m" (*(ptr))	\
54 				      : : "memory", "cc");		\
55 			break;						\
56 		case __X86_CASE_L:					\
57 			asm volatile (lock #op "l %0, %1\n"		\
58 				      : "+r" (__ret), "+m" (*(ptr))	\
59 				      : : "memory", "cc");		\
60 			break;						\
61 		case __X86_CASE_Q:					\
62 			asm volatile (lock #op "q %q0, %1\n"		\
63 				      : "+r" (__ret), "+m" (*(ptr))	\
64 				      : : "memory", "cc");		\
65 			break;						\
66 		default:						\
67 			__ ## op ## _wrong_size();			\
68 		}							\
69 		__ret;							\
70 	})
71 
72 /*
73  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
74  * Since this is generally used to protect other memory information, we
75  * use "asm volatile" and "memory" clobbers to prevent gcc from moving
76  * information around.
77  */
78 #define arch_xchg(ptr, v)	__xchg_op((ptr), (v), xchg, "")
79 
80 /*
81  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
82  * store NEW in MEM.  Return the initial value in MEM.  Success is
83  * indicated by comparing RETURN with OLD.
84  */
85 #define __raw_cmpxchg(ptr, old, new, size, lock)			\
86 ({									\
87 	__typeof__(*(ptr)) __ret;					\
88 	__typeof__(*(ptr)) __old = (old);				\
89 	__typeof__(*(ptr)) __new = (new);				\
90 	switch (size) {							\
91 	case __X86_CASE_B:						\
92 	{								\
93 		volatile u8 *__ptr = (volatile u8 *)(ptr);		\
94 		asm volatile(lock "cmpxchgb %2,%1"			\
95 			     : "=a" (__ret), "+m" (*__ptr)		\
96 			     : "q" (__new), "0" (__old)			\
97 			     : "memory");				\
98 		break;							\
99 	}								\
100 	case __X86_CASE_W:						\
101 	{								\
102 		volatile u16 *__ptr = (volatile u16 *)(ptr);		\
103 		asm volatile(lock "cmpxchgw %2,%1"			\
104 			     : "=a" (__ret), "+m" (*__ptr)		\
105 			     : "r" (__new), "0" (__old)			\
106 			     : "memory");				\
107 		break;							\
108 	}								\
109 	case __X86_CASE_L:						\
110 	{								\
111 		volatile u32 *__ptr = (volatile u32 *)(ptr);		\
112 		asm volatile(lock "cmpxchgl %2,%1"			\
113 			     : "=a" (__ret), "+m" (*__ptr)		\
114 			     : "r" (__new), "0" (__old)			\
115 			     : "memory");				\
116 		break;							\
117 	}								\
118 	case __X86_CASE_Q:						\
119 	{								\
120 		volatile u64 *__ptr = (volatile u64 *)(ptr);		\
121 		asm volatile(lock "cmpxchgq %2,%1"			\
122 			     : "=a" (__ret), "+m" (*__ptr)		\
123 			     : "r" (__new), "0" (__old)			\
124 			     : "memory");				\
125 		break;							\
126 	}								\
127 	default:							\
128 		__cmpxchg_wrong_size();					\
129 	}								\
130 	__ret;								\
131 })
132 
133 #define __cmpxchg(ptr, old, new, size)					\
134 	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
135 
136 #define __sync_cmpxchg(ptr, old, new, size)				\
137 	__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
138 
139 #define __cmpxchg_local(ptr, old, new, size)				\
140 	__raw_cmpxchg((ptr), (old), (new), (size), "")
141 
142 #ifdef CONFIG_X86_32
143 # include <asm/cmpxchg_32.h>
144 #else
145 # include <asm/cmpxchg_64.h>
146 #endif
147 
148 #define arch_cmpxchg(ptr, old, new)					\
149 	__cmpxchg(ptr, old, new, sizeof(*(ptr)))
150 
151 #define arch_sync_cmpxchg(ptr, old, new)				\
152 	__sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
153 
154 #define arch_cmpxchg_local(ptr, old, new)				\
155 	__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
156 
157 
158 #define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock)		\
159 ({									\
160 	bool success;							\
161 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
162 	__typeof__(*(_ptr)) __old = *_old;				\
163 	__typeof__(*(_ptr)) __new = (_new);				\
164 	switch (size) {							\
165 	case __X86_CASE_B:						\
166 	{								\
167 		volatile u8 *__ptr = (volatile u8 *)(_ptr);		\
168 		asm volatile(lock "cmpxchgb %[new], %[ptr]"		\
169 			     CC_SET(z)					\
170 			     : CC_OUT(z) (success),			\
171 			       [ptr] "+m" (*__ptr),			\
172 			       [old] "+a" (__old)			\
173 			     : [new] "q" (__new)			\
174 			     : "memory");				\
175 		break;							\
176 	}								\
177 	case __X86_CASE_W:						\
178 	{								\
179 		volatile u16 *__ptr = (volatile u16 *)(_ptr);		\
180 		asm volatile(lock "cmpxchgw %[new], %[ptr]"		\
181 			     CC_SET(z)					\
182 			     : CC_OUT(z) (success),			\
183 			       [ptr] "+m" (*__ptr),			\
184 			       [old] "+a" (__old)			\
185 			     : [new] "r" (__new)			\
186 			     : "memory");				\
187 		break;							\
188 	}								\
189 	case __X86_CASE_L:						\
190 	{								\
191 		volatile u32 *__ptr = (volatile u32 *)(_ptr);		\
192 		asm volatile(lock "cmpxchgl %[new], %[ptr]"		\
193 			     CC_SET(z)					\
194 			     : CC_OUT(z) (success),			\
195 			       [ptr] "+m" (*__ptr),			\
196 			       [old] "+a" (__old)			\
197 			     : [new] "r" (__new)			\
198 			     : "memory");				\
199 		break;							\
200 	}								\
201 	case __X86_CASE_Q:						\
202 	{								\
203 		volatile u64 *__ptr = (volatile u64 *)(_ptr);		\
204 		asm volatile(lock "cmpxchgq %[new], %[ptr]"		\
205 			     CC_SET(z)					\
206 			     : CC_OUT(z) (success),			\
207 			       [ptr] "+m" (*__ptr),			\
208 			       [old] "+a" (__old)			\
209 			     : [new] "r" (__new)			\
210 			     : "memory");				\
211 		break;							\
212 	}								\
213 	default:							\
214 		__cmpxchg_wrong_size();					\
215 	}								\
216 	if (unlikely(!success))						\
217 		*_old = __old;						\
218 	likely(success);						\
219 })
220 
221 #define __try_cmpxchg(ptr, pold, new, size)				\
222 	__raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
223 
224 #define __try_cmpxchg_local(ptr, pold, new, size)			\
225 	__raw_try_cmpxchg((ptr), (pold), (new), (size), "")
226 
227 #define arch_try_cmpxchg(ptr, pold, new) 				\
228 	__try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))
229 
230 #define arch_try_cmpxchg_local(ptr, pold, new)				\
231 	__try_cmpxchg_local((ptr), (pold), (new), sizeof(*(ptr)))
232 
233 /*
234  * xadd() adds "inc" to "*ptr" and atomically returns the previous
235  * value of "*ptr".
236  *
237  * xadd() is locked when multiple CPUs are online
238  */
239 #define __xadd(ptr, inc, lock)	__xchg_op((ptr), (inc), xadd, lock)
240 #define xadd(ptr, inc)		__xadd((ptr), (inc), LOCK_PREFIX)
241 
242 #endif	/* ASM_X86_CMPXCHG_H */
243