xref: /openbmc/linux/arch/arm64/include/asm/cmpxchg.h (revision d7a3d85e)
1 /*
2  * Based on arch/arm/include/asm/cmpxchg.h
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef __ASM_CMPXCHG_H
19 #define __ASM_CMPXCHG_H
20 
21 #include <linux/bug.h>
22 #include <linux/mmdebug.h>
23 
24 #include <asm/barrier.h>
25 
26 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
27 {
28 	unsigned long ret, tmp;
29 
30 	switch (size) {
31 	case 1:
32 		asm volatile("//	__xchg1\n"
33 		"1:	ldxrb	%w0, %2\n"
34 		"	stlxrb	%w1, %w3, %2\n"
35 		"	cbnz	%w1, 1b\n"
36 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
37 			: "r" (x)
38 			: "memory");
39 		break;
40 	case 2:
41 		asm volatile("//	__xchg2\n"
42 		"1:	ldxrh	%w0, %2\n"
43 		"	stlxrh	%w1, %w3, %2\n"
44 		"	cbnz	%w1, 1b\n"
45 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
46 			: "r" (x)
47 			: "memory");
48 		break;
49 	case 4:
50 		asm volatile("//	__xchg4\n"
51 		"1:	ldxr	%w0, %2\n"
52 		"	stlxr	%w1, %w3, %2\n"
53 		"	cbnz	%w1, 1b\n"
54 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
55 			: "r" (x)
56 			: "memory");
57 		break;
58 	case 8:
59 		asm volatile("//	__xchg8\n"
60 		"1:	ldxr	%0, %2\n"
61 		"	stlxr	%w1, %3, %2\n"
62 		"	cbnz	%w1, 1b\n"
63 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
64 			: "r" (x)
65 			: "memory");
66 		break;
67 	default:
68 		BUILD_BUG();
69 	}
70 
71 	smp_mb();
72 	return ret;
73 }
74 
75 #define xchg(ptr,x) \
76 ({ \
77 	__typeof__(*(ptr)) __ret; \
78 	__ret = (__typeof__(*(ptr))) \
79 		__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
80 	__ret; \
81 })
82 
83 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
84 				      unsigned long new, int size)
85 {
86 	unsigned long oldval = 0, res;
87 
88 	switch (size) {
89 	case 1:
90 		do {
91 			asm volatile("// __cmpxchg1\n"
92 			"	ldxrb	%w1, %2\n"
93 			"	mov	%w0, #0\n"
94 			"	cmp	%w1, %w3\n"
95 			"	b.ne	1f\n"
96 			"	stxrb	%w0, %w4, %2\n"
97 			"1:\n"
98 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
99 				: "Ir" (old), "r" (new)
100 				: "cc");
101 		} while (res);
102 		break;
103 
104 	case 2:
105 		do {
106 			asm volatile("// __cmpxchg2\n"
107 			"	ldxrh	%w1, %2\n"
108 			"	mov	%w0, #0\n"
109 			"	cmp	%w1, %w3\n"
110 			"	b.ne	1f\n"
111 			"	stxrh	%w0, %w4, %2\n"
112 			"1:\n"
113 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
114 				: "Ir" (old), "r" (new)
115 				: "cc");
116 		} while (res);
117 		break;
118 
119 	case 4:
120 		do {
121 			asm volatile("// __cmpxchg4\n"
122 			"	ldxr	%w1, %2\n"
123 			"	mov	%w0, #0\n"
124 			"	cmp	%w1, %w3\n"
125 			"	b.ne	1f\n"
126 			"	stxr	%w0, %w4, %2\n"
127 			"1:\n"
128 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
129 				: "Ir" (old), "r" (new)
130 				: "cc");
131 		} while (res);
132 		break;
133 
134 	case 8:
135 		do {
136 			asm volatile("// __cmpxchg8\n"
137 			"	ldxr	%1, %2\n"
138 			"	mov	%w0, #0\n"
139 			"	cmp	%1, %3\n"
140 			"	b.ne	1f\n"
141 			"	stxr	%w0, %4, %2\n"
142 			"1:\n"
143 				: "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
144 				: "Ir" (old), "r" (new)
145 				: "cc");
146 		} while (res);
147 		break;
148 
149 	default:
150 		BUILD_BUG();
151 	}
152 
153 	return oldval;
154 }
155 
156 #define system_has_cmpxchg_double()     1
157 
158 static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2,
159 		unsigned long old1, unsigned long old2,
160 		unsigned long new1, unsigned long new2, int size)
161 {
162 	unsigned long loop, lost;
163 
164 	switch (size) {
165 	case 8:
166 		VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1);
167 		do {
168 			asm volatile("// __cmpxchg_double8\n"
169 			"	ldxp	%0, %1, %2\n"
170 			"	eor	%0, %0, %3\n"
171 			"	eor	%1, %1, %4\n"
172 			"	orr	%1, %0, %1\n"
173 			"	mov	%w0, #0\n"
174 			"	cbnz	%1, 1f\n"
175 			"	stxp	%w0, %5, %6, %2\n"
176 			"1:\n"
177 				: "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1)
178 				: "r" (old1), "r"(old2), "r"(new1), "r"(new2));
179 		} while (loop);
180 		break;
181 	default:
182 		BUILD_BUG();
183 	}
184 
185 	return !lost;
186 }
187 
188 static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2,
189 			unsigned long old1, unsigned long old2,
190 			unsigned long new1, unsigned long new2, int size)
191 {
192 	int ret;
193 
194 	smp_mb();
195 	ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size);
196 	smp_mb();
197 
198 	return ret;
199 }
200 
201 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
202 					 unsigned long new, int size)
203 {
204 	unsigned long ret;
205 
206 	smp_mb();
207 	ret = __cmpxchg(ptr, old, new, size);
208 	smp_mb();
209 
210 	return ret;
211 }
212 
213 #define cmpxchg(ptr, o, n) \
214 ({ \
215 	__typeof__(*(ptr)) __ret; \
216 	__ret = (__typeof__(*(ptr))) \
217 		__cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
218 			     sizeof(*(ptr))); \
219 	__ret; \
220 })
221 
222 #define cmpxchg_local(ptr, o, n) \
223 ({ \
224 	__typeof__(*(ptr)) __ret; \
225 	__ret = (__typeof__(*(ptr))) \
226 		__cmpxchg((ptr), (unsigned long)(o), \
227 			  (unsigned long)(n), sizeof(*(ptr))); \
228 	__ret; \
229 })
230 
231 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
232 ({\
233 	int __ret;\
234 	__ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \
235 			(unsigned long)(o2), (unsigned long)(n1), \
236 			(unsigned long)(n2), sizeof(*(ptr1)));\
237 	__ret; \
238 })
239 
240 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
241 ({\
242 	int __ret;\
243 	__ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \
244 			(unsigned long)(o2), (unsigned long)(n1), \
245 			(unsigned long)(n2), sizeof(*(ptr1)));\
246 	__ret; \
247 })
248 
249 #define _protect_cmpxchg_local(pcp, o, n)			\
250 ({								\
251 	typeof(*raw_cpu_ptr(&(pcp))) __ret;			\
252 	preempt_disable();					\
253 	__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);	\
254 	preempt_enable();					\
255 	__ret;							\
256 })
257 
258 #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
259 #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
260 #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
261 #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
262 
263 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)		\
264 ({									\
265 	int __ret;							\
266 	preempt_disable();						\
267 	__ret = cmpxchg_double_local(	raw_cpu_ptr(&(ptr1)),		\
268 					raw_cpu_ptr(&(ptr2)),		\
269 					o1, o2, n1, n2);		\
270 	preempt_enable();						\
271 	__ret;								\
272 })
273 
274 #define cmpxchg64(ptr,o,n)		cmpxchg((ptr),(o),(n))
275 #define cmpxchg64_local(ptr,o,n)	cmpxchg_local((ptr),(o),(n))
276 
277 #define cmpxchg64_relaxed(ptr,o,n)	cmpxchg_local((ptr),(o),(n))
278 
279 #endif	/* __ASM_CMPXCHG_H */
280