xref: /openbmc/linux/arch/arm64/include/asm/cmpxchg.h (revision e9a4b795)
1 /*
2  * Based on arch/arm/include/asm/cmpxchg.h
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef __ASM_CMPXCHG_H
19 #define __ASM_CMPXCHG_H
20 
21 #include <linux/bug.h>
22 #include <linux/mmdebug.h>
23 
24 #include <asm/atomic.h>
25 #include <asm/barrier.h>
26 #include <asm/lse.h>
27 
28 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
29 {
30 	unsigned long ret, tmp;
31 
32 	switch (size) {
33 	case 1:
34 		asm volatile(ARM64_LSE_ATOMIC_INSN(
35 		/* LL/SC */
36 		"1:	ldxrb	%w0, %2\n"
37 		"	stlxrb	%w1, %w3, %2\n"
38 		"	cbnz	%w1, 1b\n"
39 		"	dmb	ish",
40 		/* LSE atomics */
41 		"	nop\n"
42 		"	swpalb	%w3, %w0, %2\n"
43 		"	nop\n"
44 		"	nop")
45 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
46 			: "r" (x)
47 			: "memory");
48 		break;
49 	case 2:
50 		asm volatile(ARM64_LSE_ATOMIC_INSN(
51 		/* LL/SC */
52 		"1:	ldxrh	%w0, %2\n"
53 		"	stlxrh	%w1, %w3, %2\n"
54 		"	cbnz	%w1, 1b\n"
55 		"	dmb	ish",
56 		/* LSE atomics */
57 		"	nop\n"
58 		"	swpalh	%w3, %w0, %2\n"
59 		"	nop\n"
60 		"	nop")
61 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
62 			: "r" (x)
63 			: "memory");
64 		break;
65 	case 4:
66 		asm volatile(ARM64_LSE_ATOMIC_INSN(
67 		/* LL/SC */
68 		"1:	ldxr	%w0, %2\n"
69 		"	stlxr	%w1, %w3, %2\n"
70 		"	cbnz	%w1, 1b\n"
71 		"	dmb	ish",
72 		/* LSE atomics */
73 		"	nop\n"
74 		"	swpal	%w3, %w0, %2\n"
75 		"	nop\n"
76 		"	nop")
77 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
78 			: "r" (x)
79 			: "memory");
80 		break;
81 	case 8:
82 		asm volatile(ARM64_LSE_ATOMIC_INSN(
83 		/* LL/SC */
84 		"1:	ldxr	%0, %2\n"
85 		"	stlxr	%w1, %3, %2\n"
86 		"	cbnz	%w1, 1b\n"
87 		"	dmb	ish",
88 		/* LSE atomics */
89 		"	nop\n"
90 		"	swpal	%3, %0, %2\n"
91 		"	nop\n"
92 		"	nop")
93 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
94 			: "r" (x)
95 			: "memory");
96 		break;
97 	default:
98 		BUILD_BUG();
99 	}
100 
101 	return ret;
102 }
103 
104 #define xchg(ptr,x) \
105 ({ \
106 	__typeof__(*(ptr)) __ret; \
107 	__ret = (__typeof__(*(ptr))) \
108 		__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
109 	__ret; \
110 })
111 
112 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
113 				      unsigned long new, int size)
114 {
115 	switch (size) {
116 	case 1:
117 		return __cmpxchg_case_1(ptr, old, new);
118 	case 2:
119 		return __cmpxchg_case_2(ptr, old, new);
120 	case 4:
121 		return __cmpxchg_case_4(ptr, old, new);
122 	case 8:
123 		return __cmpxchg_case_8(ptr, old, new);
124 	default:
125 		BUILD_BUG();
126 	}
127 
128 	unreachable();
129 }
130 
131 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
132 					 unsigned long new, int size)
133 {
134 	switch (size) {
135 	case 1:
136 		return __cmpxchg_case_mb_1(ptr, old, new);
137 	case 2:
138 		return __cmpxchg_case_mb_2(ptr, old, new);
139 	case 4:
140 		return __cmpxchg_case_mb_4(ptr, old, new);
141 	case 8:
142 		return __cmpxchg_case_mb_8(ptr, old, new);
143 	default:
144 		BUILD_BUG();
145 	}
146 
147 	unreachable();
148 }
149 
150 #define cmpxchg(ptr, o, n) \
151 ({ \
152 	__typeof__(*(ptr)) __ret; \
153 	__ret = (__typeof__(*(ptr))) \
154 		__cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
155 			     sizeof(*(ptr))); \
156 	__ret; \
157 })
158 
159 #define cmpxchg_local(ptr, o, n) \
160 ({ \
161 	__typeof__(*(ptr)) __ret; \
162 	__ret = (__typeof__(*(ptr))) \
163 		__cmpxchg((ptr), (unsigned long)(o), \
164 			  (unsigned long)(n), sizeof(*(ptr))); \
165 	__ret; \
166 })
167 
168 #define system_has_cmpxchg_double()     1
169 
170 #define __cmpxchg_double_check(ptr1, ptr2)					\
171 ({										\
172 	if (sizeof(*(ptr1)) != 8)						\
173 		BUILD_BUG();							\
174 	VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);	\
175 })
176 
177 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
178 ({\
179 	int __ret;\
180 	__cmpxchg_double_check(ptr1, ptr2); \
181 	__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
182 				     (unsigned long)(n1), (unsigned long)(n2), \
183 				     ptr1); \
184 	__ret; \
185 })
186 
187 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
188 ({\
189 	int __ret;\
190 	__cmpxchg_double_check(ptr1, ptr2); \
191 	__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
192 				  (unsigned long)(n1), (unsigned long)(n2), \
193 				  ptr1); \
194 	__ret; \
195 })
196 
197 #define _protect_cmpxchg_local(pcp, o, n)			\
198 ({								\
199 	typeof(*raw_cpu_ptr(&(pcp))) __ret;			\
200 	preempt_disable();					\
201 	__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);	\
202 	preempt_enable();					\
203 	__ret;							\
204 })
205 
206 #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
207 #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
208 #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
209 #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
210 
211 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)		\
212 ({									\
213 	int __ret;							\
214 	preempt_disable();						\
215 	__ret = cmpxchg_double_local(	raw_cpu_ptr(&(ptr1)),		\
216 					raw_cpu_ptr(&(ptr2)),		\
217 					o1, o2, n1, n2);		\
218 	preempt_enable();						\
219 	__ret;								\
220 })
221 
222 #define cmpxchg64(ptr,o,n)		cmpxchg((ptr),(o),(n))
223 #define cmpxchg64_local(ptr,o,n)	cmpxchg_local((ptr),(o),(n))
224 
225 #define cmpxchg64_relaxed(ptr,o,n)	cmpxchg_local((ptr),(o),(n))
226 
227 #endif	/* __ASM_CMPXCHG_H */
228