xref: /openbmc/linux/arch/arm64/include/asm/cmpxchg.h (revision a8da474e)
1 /*
2  * Based on arch/arm/include/asm/cmpxchg.h
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef __ASM_CMPXCHG_H
19 #define __ASM_CMPXCHG_H
20 
21 #include <linux/bug.h>
22 #include <linux/mmdebug.h>
23 
24 #include <asm/atomic.h>
25 #include <asm/barrier.h>
26 #include <asm/lse.h>
27 
28 /*
29  * We need separate acquire parameters for ll/sc and lse, since the full
30  * barrier case is generated as release+dmb for the former and
31  * acquire+release for the latter.
32  */
33 #define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl)	\
34 static inline unsigned long __xchg_case_##name(unsigned long x,		\
35 					       volatile void *ptr)	\
36 {									\
37 	unsigned long ret, tmp;						\
38 									\
39 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
40 	/* LL/SC */							\
41 	"	prfm	pstl1strm, %2\n"				\
42 	"1:	ld" #acq "xr" #sz "\t%" #w "0, %2\n"			\
43 	"	st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n"		\
44 	"	cbnz	%w1, 1b\n"					\
45 	"	" #mb,							\
46 	/* LSE atomics */						\
47 	"	nop\n"							\
48 	"	nop\n"							\
49 	"	swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n"	\
50 	"	nop\n"							\
51 	"	" #nop_lse)						\
52 	: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)			\
53 	: "r" (x)							\
54 	: cl);								\
55 									\
56 	return ret;							\
57 }
58 
59 __XCHG_CASE(w, b,     1,        ,    ,  ,  ,  ,         )
60 __XCHG_CASE(w, h,     2,        ,    ,  ,  ,  ,         )
61 __XCHG_CASE(w,  ,     4,        ,    ,  ,  ,  ,         )
62 __XCHG_CASE( ,  ,     8,        ,    ,  ,  ,  ,         )
63 __XCHG_CASE(w, b, acq_1,        ,    , a, a,  , "memory")
64 __XCHG_CASE(w, h, acq_2,        ,    , a, a,  , "memory")
65 __XCHG_CASE(w,  , acq_4,        ,    , a, a,  , "memory")
66 __XCHG_CASE( ,  , acq_8,        ,    , a, a,  , "memory")
67 __XCHG_CASE(w, b, rel_1,        ,    ,  ,  , l, "memory")
68 __XCHG_CASE(w, h, rel_2,        ,    ,  ,  , l, "memory")
69 __XCHG_CASE(w,  , rel_4,        ,    ,  ,  , l, "memory")
70 __XCHG_CASE( ,  , rel_8,        ,    ,  ,  , l, "memory")
71 __XCHG_CASE(w, b,  mb_1, dmb ish, nop,  , a, l, "memory")
72 __XCHG_CASE(w, h,  mb_2, dmb ish, nop,  , a, l, "memory")
73 __XCHG_CASE(w,  ,  mb_4, dmb ish, nop,  , a, l, "memory")
74 __XCHG_CASE( ,  ,  mb_8, dmb ish, nop,  , a, l, "memory")
75 
76 #undef __XCHG_CASE
77 
78 #define __XCHG_GEN(sfx)							\
79 static inline unsigned long __xchg##sfx(unsigned long x,		\
80 					volatile void *ptr,		\
81 					int size)			\
82 {									\
83 	switch (size) {							\
84 	case 1:								\
85 		return __xchg_case##sfx##_1(x, ptr);			\
86 	case 2:								\
87 		return __xchg_case##sfx##_2(x, ptr);			\
88 	case 4:								\
89 		return __xchg_case##sfx##_4(x, ptr);			\
90 	case 8:								\
91 		return __xchg_case##sfx##_8(x, ptr);			\
92 	default:							\
93 		BUILD_BUG();						\
94 	}								\
95 									\
96 	unreachable();							\
97 }
98 
99 __XCHG_GEN()
100 __XCHG_GEN(_acq)
101 __XCHG_GEN(_rel)
102 __XCHG_GEN(_mb)
103 
104 #undef __XCHG_GEN
105 
106 #define __xchg_wrapper(sfx, ptr, x)					\
107 ({									\
108 	__typeof__(*(ptr)) __ret;					\
109 	__ret = (__typeof__(*(ptr)))					\
110 		__xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
111 	__ret;								\
112 })
113 
114 /* xchg */
115 #define xchg_relaxed(...)	__xchg_wrapper(    , __VA_ARGS__)
116 #define xchg_acquire(...)	__xchg_wrapper(_acq, __VA_ARGS__)
117 #define xchg_release(...)	__xchg_wrapper(_rel, __VA_ARGS__)
118 #define xchg(...)		__xchg_wrapper( _mb, __VA_ARGS__)
119 
120 #define __CMPXCHG_GEN(sfx)						\
121 static inline unsigned long __cmpxchg##sfx(volatile void *ptr,		\
122 					   unsigned long old,		\
123 					   unsigned long new,		\
124 					   int size)			\
125 {									\
126 	switch (size) {							\
127 	case 1:								\
128 		return __cmpxchg_case##sfx##_1(ptr, (u8)old, new);	\
129 	case 2:								\
130 		return __cmpxchg_case##sfx##_2(ptr, (u16)old, new);	\
131 	case 4:								\
132 		return __cmpxchg_case##sfx##_4(ptr, old, new);		\
133 	case 8:								\
134 		return __cmpxchg_case##sfx##_8(ptr, old, new);		\
135 	default:							\
136 		BUILD_BUG();						\
137 	}								\
138 									\
139 	unreachable();							\
140 }
141 
142 __CMPXCHG_GEN()
143 __CMPXCHG_GEN(_acq)
144 __CMPXCHG_GEN(_rel)
145 __CMPXCHG_GEN(_mb)
146 
147 #undef __CMPXCHG_GEN
148 
149 #define __cmpxchg_wrapper(sfx, ptr, o, n)				\
150 ({									\
151 	__typeof__(*(ptr)) __ret;					\
152 	__ret = (__typeof__(*(ptr)))					\
153 		__cmpxchg##sfx((ptr), (unsigned long)(o),		\
154 				(unsigned long)(n), sizeof(*(ptr)));	\
155 	__ret;								\
156 })
157 
158 /* cmpxchg */
159 #define cmpxchg_relaxed(...)	__cmpxchg_wrapper(    , __VA_ARGS__)
160 #define cmpxchg_acquire(...)	__cmpxchg_wrapper(_acq, __VA_ARGS__)
161 #define cmpxchg_release(...)	__cmpxchg_wrapper(_rel, __VA_ARGS__)
162 #define cmpxchg(...)		__cmpxchg_wrapper( _mb, __VA_ARGS__)
163 #define cmpxchg_local		cmpxchg_relaxed
164 
165 /* cmpxchg64 */
166 #define cmpxchg64_relaxed	cmpxchg_relaxed
167 #define cmpxchg64_acquire	cmpxchg_acquire
168 #define cmpxchg64_release	cmpxchg_release
169 #define cmpxchg64		cmpxchg
170 #define cmpxchg64_local		cmpxchg_local
171 
172 /* cmpxchg_double */
173 #define system_has_cmpxchg_double()     1
174 
175 #define __cmpxchg_double_check(ptr1, ptr2)					\
176 ({										\
177 	if (sizeof(*(ptr1)) != 8)						\
178 		BUILD_BUG();							\
179 	VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);	\
180 })
181 
182 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
183 ({\
184 	int __ret;\
185 	__cmpxchg_double_check(ptr1, ptr2); \
186 	__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
187 				     (unsigned long)(n1), (unsigned long)(n2), \
188 				     ptr1); \
189 	__ret; \
190 })
191 
192 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
193 ({\
194 	int __ret;\
195 	__cmpxchg_double_check(ptr1, ptr2); \
196 	__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
197 				  (unsigned long)(n1), (unsigned long)(n2), \
198 				  ptr1); \
199 	__ret; \
200 })
201 
202 /* this_cpu_cmpxchg */
203 #define _protect_cmpxchg_local(pcp, o, n)			\
204 ({								\
205 	typeof(*raw_cpu_ptr(&(pcp))) __ret;			\
206 	preempt_disable();					\
207 	__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);	\
208 	preempt_enable();					\
209 	__ret;							\
210 })
211 
212 #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
213 #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
214 #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
215 #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
216 
217 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)		\
218 ({									\
219 	int __ret;							\
220 	preempt_disable();						\
221 	__ret = cmpxchg_double_local(	raw_cpu_ptr(&(ptr1)),		\
222 					raw_cpu_ptr(&(ptr2)),		\
223 					o1, o2, n1, n2);		\
224 	preempt_enable();						\
225 	__ret;								\
226 })
227 
228 #endif	/* __ASM_CMPXCHG_H */
229