xref: /openbmc/linux/arch/x86/include/asm/cmpxchg.h (revision 4a44a19b)
1 #ifndef ASM_X86_CMPXCHG_H
2 #define ASM_X86_CMPXCHG_H
3 
4 #include <linux/compiler.h>
5 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
6 
7 #define __HAVE_ARCH_CMPXCHG 1
8 
9 /*
10  * Non-existant functions to indicate usage errors at link time
11  * (or compile-time if the compiler implements __compiletime_error().
12  */
13 extern void __xchg_wrong_size(void)
14 	__compiletime_error("Bad argument size for xchg");
15 extern void __cmpxchg_wrong_size(void)
16 	__compiletime_error("Bad argument size for cmpxchg");
17 extern void __xadd_wrong_size(void)
18 	__compiletime_error("Bad argument size for xadd");
19 extern void __add_wrong_size(void)
20 	__compiletime_error("Bad argument size for add");
21 
22 /*
23  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
24  * -1 because sizeof will never return -1, thereby making those switch
25  * case statements guaranteeed dead code which the compiler will
26  * eliminate, and allowing the "missing symbol in the default case" to
27  * indicate a usage error.
28  */
29 #define __X86_CASE_B	1
30 #define __X86_CASE_W	2
31 #define __X86_CASE_L	4
32 #ifdef CONFIG_64BIT
33 #define __X86_CASE_Q	8
34 #else
35 #define	__X86_CASE_Q	-1		/* sizeof will never return -1 */
36 #endif
37 
38 /*
39  * An exchange-type operation, which takes a value and a pointer, and
40  * returns the old value.
41  */
42 #define __xchg_op(ptr, arg, op, lock)					\
43 	({								\
44 	        __typeof__ (*(ptr)) __ret = (arg);			\
45 		switch (sizeof(*(ptr))) {				\
46 		case __X86_CASE_B:					\
47 			asm volatile (lock #op "b %b0, %1\n"		\
48 				      : "+q" (__ret), "+m" (*(ptr))	\
49 				      : : "memory", "cc");		\
50 			break;						\
51 		case __X86_CASE_W:					\
52 			asm volatile (lock #op "w %w0, %1\n"		\
53 				      : "+r" (__ret), "+m" (*(ptr))	\
54 				      : : "memory", "cc");		\
55 			break;						\
56 		case __X86_CASE_L:					\
57 			asm volatile (lock #op "l %0, %1\n"		\
58 				      : "+r" (__ret), "+m" (*(ptr))	\
59 				      : : "memory", "cc");		\
60 			break;						\
61 		case __X86_CASE_Q:					\
62 			asm volatile (lock #op "q %q0, %1\n"		\
63 				      : "+r" (__ret), "+m" (*(ptr))	\
64 				      : : "memory", "cc");		\
65 			break;						\
66 		default:						\
67 			__ ## op ## _wrong_size();			\
68 		}							\
69 		__ret;							\
70 	})
71 
72 /*
73  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
74  * Since this is generally used to protect other memory information, we
75  * use "asm volatile" and "memory" clobbers to prevent gcc from moving
76  * information around.
77  */
78 #define xchg(ptr, v)	__xchg_op((ptr), (v), xchg, "")
79 
80 /*
81  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
82  * store NEW in MEM.  Return the initial value in MEM.  Success is
83  * indicated by comparing RETURN with OLD.
84  */
85 #define __raw_cmpxchg(ptr, old, new, size, lock)			\
86 ({									\
87 	__typeof__(*(ptr)) __ret;					\
88 	__typeof__(*(ptr)) __old = (old);				\
89 	__typeof__(*(ptr)) __new = (new);				\
90 	switch (size) {							\
91 	case __X86_CASE_B:						\
92 	{								\
93 		volatile u8 *__ptr = (volatile u8 *)(ptr);		\
94 		asm volatile(lock "cmpxchgb %2,%1"			\
95 			     : "=a" (__ret), "+m" (*__ptr)		\
96 			     : "q" (__new), "0" (__old)			\
97 			     : "memory");				\
98 		break;							\
99 	}								\
100 	case __X86_CASE_W:						\
101 	{								\
102 		volatile u16 *__ptr = (volatile u16 *)(ptr);		\
103 		asm volatile(lock "cmpxchgw %2,%1"			\
104 			     : "=a" (__ret), "+m" (*__ptr)		\
105 			     : "r" (__new), "0" (__old)			\
106 			     : "memory");				\
107 		break;							\
108 	}								\
109 	case __X86_CASE_L:						\
110 	{								\
111 		volatile u32 *__ptr = (volatile u32 *)(ptr);		\
112 		asm volatile(lock "cmpxchgl %2,%1"			\
113 			     : "=a" (__ret), "+m" (*__ptr)		\
114 			     : "r" (__new), "0" (__old)			\
115 			     : "memory");				\
116 		break;							\
117 	}								\
118 	case __X86_CASE_Q:						\
119 	{								\
120 		volatile u64 *__ptr = (volatile u64 *)(ptr);		\
121 		asm volatile(lock "cmpxchgq %2,%1"			\
122 			     : "=a" (__ret), "+m" (*__ptr)		\
123 			     : "r" (__new), "0" (__old)			\
124 			     : "memory");				\
125 		break;							\
126 	}								\
127 	default:							\
128 		__cmpxchg_wrong_size();					\
129 	}								\
130 	__ret;								\
131 })
132 
133 #define __cmpxchg(ptr, old, new, size)					\
134 	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
135 
136 #define __sync_cmpxchg(ptr, old, new, size)				\
137 	__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
138 
139 #define __cmpxchg_local(ptr, old, new, size)				\
140 	__raw_cmpxchg((ptr), (old), (new), (size), "")
141 
142 #ifdef CONFIG_X86_32
143 # include <asm/cmpxchg_32.h>
144 #else
145 # include <asm/cmpxchg_64.h>
146 #endif
147 
148 #define cmpxchg(ptr, old, new)						\
149 	__cmpxchg(ptr, old, new, sizeof(*(ptr)))
150 
151 #define sync_cmpxchg(ptr, old, new)					\
152 	__sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
153 
154 #define cmpxchg_local(ptr, old, new)					\
155 	__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
156 
157 /*
158  * xadd() adds "inc" to "*ptr" and atomically returns the previous
159  * value of "*ptr".
160  *
161  * xadd() is locked when multiple CPUs are online
162  * xadd_sync() is always locked
163  * xadd_local() is never locked
164  */
165 #define __xadd(ptr, inc, lock)	__xchg_op((ptr), (inc), xadd, lock)
166 #define xadd(ptr, inc)		__xadd((ptr), (inc), LOCK_PREFIX)
167 #define xadd_sync(ptr, inc)	__xadd((ptr), (inc), "lock; ")
168 #define xadd_local(ptr, inc)	__xadd((ptr), (inc), "")
169 
170 #define __add(ptr, inc, lock)						\
171 	({								\
172 	        __typeof__ (*(ptr)) __ret = (inc);			\
173 		switch (sizeof(*(ptr))) {				\
174 		case __X86_CASE_B:					\
175 			asm volatile (lock "addb %b1, %0\n"		\
176 				      : "+m" (*(ptr)) : "qi" (inc)	\
177 				      : "memory", "cc");		\
178 			break;						\
179 		case __X86_CASE_W:					\
180 			asm volatile (lock "addw %w1, %0\n"		\
181 				      : "+m" (*(ptr)) : "ri" (inc)	\
182 				      : "memory", "cc");		\
183 			break;						\
184 		case __X86_CASE_L:					\
185 			asm volatile (lock "addl %1, %0\n"		\
186 				      : "+m" (*(ptr)) : "ri" (inc)	\
187 				      : "memory", "cc");		\
188 			break;						\
189 		case __X86_CASE_Q:					\
190 			asm volatile (lock "addq %1, %0\n"		\
191 				      : "+m" (*(ptr)) : "ri" (inc)	\
192 				      : "memory", "cc");		\
193 			break;						\
194 		default:						\
195 			__add_wrong_size();				\
196 		}							\
197 		__ret;							\
198 	})
199 
200 /*
201  * add_*() adds "inc" to "*ptr"
202  *
203  * __add() takes a lock prefix
204  * add_smp() is locked when multiple CPUs are online
205  * add_sync() is always locked
206  */
207 #define add_smp(ptr, inc)	__add((ptr), (inc), LOCK_PREFIX)
208 #define add_sync(ptr, inc)	__add((ptr), (inc), "lock; ")
209 
210 #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2)			\
211 ({									\
212 	bool __ret;							\
213 	__typeof__(*(p1)) __old1 = (o1), __new1 = (n1);			\
214 	__typeof__(*(p2)) __old2 = (o2), __new2 = (n2);			\
215 	BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long));			\
216 	BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long));			\
217 	VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long)));		\
218 	VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2));	\
219 	asm volatile(pfx "cmpxchg%c4b %2; sete %0"			\
220 		     : "=a" (__ret), "+d" (__old2),			\
221 		       "+m" (*(p1)), "+m" (*(p2))			\
222 		     : "i" (2 * sizeof(long)), "a" (__old1),		\
223 		       "b" (__new1), "c" (__new2));			\
224 	__ret;								\
225 })
226 
227 #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
228 	__cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
229 
230 #define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
231 	__cmpxchg_double(, p1, p2, o1, o2, n1, n2)
232 
233 #endif	/* ASM_X86_CMPXCHG_H */
234