xref: /openbmc/linux/arch/arm64/include/asm/cmpxchg.h (revision 552b8b36)
1 /*
2  * Based on arch/arm/include/asm/cmpxchg.h
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef __ASM_CMPXCHG_H
19 #define __ASM_CMPXCHG_H
20 
21 #include <linux/bug.h>
22 #include <linux/mmdebug.h>
23 
24 #include <asm/atomic.h>
25 #include <asm/barrier.h>
26 #include <asm/lse.h>
27 
28 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
29 {
30 	unsigned long ret, tmp;
31 
32 	switch (size) {
33 	case 1:
34 		asm volatile(ARM64_LSE_ATOMIC_INSN(
35 		/* LL/SC */
36 		"	prfm	pstl1strm, %2\n"
37 		"1:	ldxrb	%w0, %2\n"
38 		"	stlxrb	%w1, %w3, %2\n"
39 		"	cbnz	%w1, 1b\n"
40 		"	dmb	ish",
41 		/* LSE atomics */
42 		"	nop\n"
43 		"	nop\n"
44 		"	swpalb	%w3, %w0, %2\n"
45 		"	nop\n"
46 		"	nop")
47 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
48 			: "r" (x)
49 			: "memory");
50 		break;
51 	case 2:
52 		asm volatile(ARM64_LSE_ATOMIC_INSN(
53 		/* LL/SC */
54 		"	prfm	pstl1strm, %2\n"
55 		"1:	ldxrh	%w0, %2\n"
56 		"	stlxrh	%w1, %w3, %2\n"
57 		"	cbnz	%w1, 1b\n"
58 		"	dmb	ish",
59 		/* LSE atomics */
60 		"	nop\n"
61 		"	nop\n"
62 		"	swpalh	%w3, %w0, %2\n"
63 		"	nop\n"
64 		"	nop")
65 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
66 			: "r" (x)
67 			: "memory");
68 		break;
69 	case 4:
70 		asm volatile(ARM64_LSE_ATOMIC_INSN(
71 		/* LL/SC */
72 		"	prfm	pstl1strm, %2\n"
73 		"1:	ldxr	%w0, %2\n"
74 		"	stlxr	%w1, %w3, %2\n"
75 		"	cbnz	%w1, 1b\n"
76 		"	dmb	ish",
77 		/* LSE atomics */
78 		"	nop\n"
79 		"	nop\n"
80 		"	swpal	%w3, %w0, %2\n"
81 		"	nop\n"
82 		"	nop")
83 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
84 			: "r" (x)
85 			: "memory");
86 		break;
87 	case 8:
88 		asm volatile(ARM64_LSE_ATOMIC_INSN(
89 		/* LL/SC */
90 		"	prfm	pstl1strm, %2\n"
91 		"1:	ldxr	%0, %2\n"
92 		"	stlxr	%w1, %3, %2\n"
93 		"	cbnz	%w1, 1b\n"
94 		"	dmb	ish",
95 		/* LSE atomics */
96 		"	nop\n"
97 		"	nop\n"
98 		"	swpal	%3, %0, %2\n"
99 		"	nop\n"
100 		"	nop")
101 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
102 			: "r" (x)
103 			: "memory");
104 		break;
105 	default:
106 		BUILD_BUG();
107 	}
108 
109 	return ret;
110 }
111 
112 #define xchg(ptr,x) \
113 ({ \
114 	__typeof__(*(ptr)) __ret; \
115 	__ret = (__typeof__(*(ptr))) \
116 		__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
117 	__ret; \
118 })
119 
120 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
121 				      unsigned long new, int size)
122 {
123 	switch (size) {
124 	case 1:
125 		return __cmpxchg_case_1(ptr, (u8)old, new);
126 	case 2:
127 		return __cmpxchg_case_2(ptr, (u16)old, new);
128 	case 4:
129 		return __cmpxchg_case_4(ptr, old, new);
130 	case 8:
131 		return __cmpxchg_case_8(ptr, old, new);
132 	default:
133 		BUILD_BUG();
134 	}
135 
136 	unreachable();
137 }
138 
139 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
140 					 unsigned long new, int size)
141 {
142 	switch (size) {
143 	case 1:
144 		return __cmpxchg_case_mb_1(ptr, (u8)old, new);
145 	case 2:
146 		return __cmpxchg_case_mb_2(ptr, (u16)old, new);
147 	case 4:
148 		return __cmpxchg_case_mb_4(ptr, old, new);
149 	case 8:
150 		return __cmpxchg_case_mb_8(ptr, old, new);
151 	default:
152 		BUILD_BUG();
153 	}
154 
155 	unreachable();
156 }
157 
158 #define cmpxchg(ptr, o, n) \
159 ({ \
160 	__typeof__(*(ptr)) __ret; \
161 	__ret = (__typeof__(*(ptr))) \
162 		__cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
163 			     sizeof(*(ptr))); \
164 	__ret; \
165 })
166 
167 #define cmpxchg_local(ptr, o, n) \
168 ({ \
169 	__typeof__(*(ptr)) __ret; \
170 	__ret = (__typeof__(*(ptr))) \
171 		__cmpxchg((ptr), (unsigned long)(o), \
172 			  (unsigned long)(n), sizeof(*(ptr))); \
173 	__ret; \
174 })
175 
176 #define system_has_cmpxchg_double()     1
177 
178 #define __cmpxchg_double_check(ptr1, ptr2)					\
179 ({										\
180 	if (sizeof(*(ptr1)) != 8)						\
181 		BUILD_BUG();							\
182 	VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);	\
183 })
184 
185 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
186 ({\
187 	int __ret;\
188 	__cmpxchg_double_check(ptr1, ptr2); \
189 	__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
190 				     (unsigned long)(n1), (unsigned long)(n2), \
191 				     ptr1); \
192 	__ret; \
193 })
194 
195 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
196 ({\
197 	int __ret;\
198 	__cmpxchg_double_check(ptr1, ptr2); \
199 	__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
200 				  (unsigned long)(n1), (unsigned long)(n2), \
201 				  ptr1); \
202 	__ret; \
203 })
204 
205 #define _protect_cmpxchg_local(pcp, o, n)			\
206 ({								\
207 	typeof(*raw_cpu_ptr(&(pcp))) __ret;			\
208 	preempt_disable();					\
209 	__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);	\
210 	preempt_enable();					\
211 	__ret;							\
212 })
213 
214 #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
215 #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
216 #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
217 #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
218 
219 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)		\
220 ({									\
221 	int __ret;							\
222 	preempt_disable();						\
223 	__ret = cmpxchg_double_local(	raw_cpu_ptr(&(ptr1)),		\
224 					raw_cpu_ptr(&(ptr2)),		\
225 					o1, o2, n1, n2);		\
226 	preempt_enable();						\
227 	__ret;								\
228 })
229 
230 #define cmpxchg64(ptr,o,n)		cmpxchg((ptr),(o),(n))
231 #define cmpxchg64_local(ptr,o,n)	cmpxchg_local((ptr),(o),(n))
232 
233 #define cmpxchg64_relaxed(ptr,o,n)	cmpxchg_local((ptr),(o),(n))
234 
235 #endif	/* __ASM_CMPXCHG_H */
236