1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/atomic.h
4  *
5  * Copyright (C) 1996 Russell King.
6  * Copyright (C) 2002 Deep Blue Solutions Ltd.
7  * Copyright (C) 2012 ARM Ltd.
8  */
9 
10 #ifndef __ASM_ATOMIC_LSE_H
11 #define __ASM_ATOMIC_LSE_H
12 
13 #define ATOMIC_OP(op, asm_op)						\
14 static inline void __lse_atomic_##op(int i, atomic_t *v)		\
15 {									\
16 	asm volatile(							\
17 	__LSE_PREAMBLE							\
18 	"	" #asm_op "	%w[i], %[v]\n"				\
19 	: [v] "+Q" (v->counter)						\
20 	: [i] "r" (i));							\
21 }
22 
23 ATOMIC_OP(andnot, stclr)
24 ATOMIC_OP(or, stset)
25 ATOMIC_OP(xor, steor)
26 ATOMIC_OP(add, stadd)
27 
28 static inline void __lse_atomic_sub(int i, atomic_t *v)
29 {
30 	__lse_atomic_add(-i, v);
31 }
32 
33 #undef ATOMIC_OP
34 
35 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)			\
36 static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)	\
37 {									\
38 	int old;							\
39 									\
40 	asm volatile(							\
41 	__LSE_PREAMBLE							\
42 	"	" #asm_op #mb "	%w[i], %w[old], %[v]"			\
43 	: [v] "+Q" (v->counter),					\
44 	  [old] "=r" (old)						\
45 	: [i] "r" (i)							\
46 	: cl);								\
47 									\
48 	return old;							\
49 }
50 
51 #define ATOMIC_FETCH_OPS(op, asm_op)					\
52 	ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)			\
53 	ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
54 	ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")		\
55 	ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
56 
57 ATOMIC_FETCH_OPS(andnot, ldclr)
58 ATOMIC_FETCH_OPS(or, ldset)
59 ATOMIC_FETCH_OPS(xor, ldeor)
60 ATOMIC_FETCH_OPS(add, ldadd)
61 
62 #undef ATOMIC_FETCH_OP
63 #undef ATOMIC_FETCH_OPS
64 
65 #define ATOMIC_FETCH_OP_SUB(name)					\
66 static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)	\
67 {									\
68 	return __lse_atomic_fetch_add##name(-i, v);			\
69 }
70 
71 ATOMIC_FETCH_OP_SUB(_relaxed)
72 ATOMIC_FETCH_OP_SUB(_acquire)
73 ATOMIC_FETCH_OP_SUB(_release)
74 ATOMIC_FETCH_OP_SUB(        )
75 
76 #undef ATOMIC_FETCH_OP_SUB
77 
78 #define ATOMIC_OP_ADD_SUB_RETURN(name, mb, cl...)			\
79 static inline int __lse_atomic_add_return##name(int i, atomic_t *v)	\
80 {									\
81 	u32 tmp;							\
82 									\
83 	asm volatile(							\
84 	__LSE_PREAMBLE							\
85 	"	ldadd" #mb "	%w[i], %w[tmp], %[v]\n"			\
86 	"	add	%w[i], %w[i], %w[tmp]"				\
87 	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
88 	: "r" (v)							\
89 	: cl);								\
90 									\
91 	return i;							\
92 }									\
93 									\
94 static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)	\
95 {									\
96 	return __lse_atomic_add_return##name(-i, v);			\
97 }
98 
99 ATOMIC_OP_ADD_SUB_RETURN(_relaxed,   )
100 ATOMIC_OP_ADD_SUB_RETURN(_acquire,  a, "memory")
101 ATOMIC_OP_ADD_SUB_RETURN(_release,  l, "memory")
102 ATOMIC_OP_ADD_SUB_RETURN(        , al, "memory")
103 
104 #undef ATOMIC_OP_ADD_SUB_RETURN
105 
106 static inline void __lse_atomic_and(int i, atomic_t *v)
107 {
108 	return __lse_atomic_andnot(~i, v);
109 }
110 
111 #define ATOMIC_FETCH_OP_AND(name, mb, cl...)				\
112 static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)	\
113 {									\
114 	return __lse_atomic_fetch_andnot##name(~i, v);			\
115 }
116 
117 ATOMIC_FETCH_OP_AND(_relaxed,   )
118 ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
119 ATOMIC_FETCH_OP_AND(_release,  l, "memory")
120 ATOMIC_FETCH_OP_AND(        , al, "memory")
121 
122 #undef ATOMIC_FETCH_OP_AND
123 
124 #define ATOMIC64_OP(op, asm_op)						\
125 static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)		\
126 {									\
127 	asm volatile(							\
128 	__LSE_PREAMBLE							\
129 	"	" #asm_op "	%[i], %[v]\n"				\
130 	: [v] "+Q" (v->counter)						\
131 	: [i] "r" (i));							\
132 }
133 
134 ATOMIC64_OP(andnot, stclr)
135 ATOMIC64_OP(or, stset)
136 ATOMIC64_OP(xor, steor)
137 ATOMIC64_OP(add, stadd)
138 
139 static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
140 {
141 	__lse_atomic64_add(-i, v);
142 }
143 
144 #undef ATOMIC64_OP
145 
146 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
147 static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
148 {									\
149 	s64 old;							\
150 									\
151 	asm volatile(							\
152 	__LSE_PREAMBLE							\
153 	"	" #asm_op #mb "	%[i], %[old], %[v]"			\
154 	: [v] "+Q" (v->counter),					\
155 	  [old] "=r" (old)						\
156 	: [i] "r" (i) 							\
157 	: cl);								\
158 									\
159 	return old;							\
160 }
161 
162 #define ATOMIC64_FETCH_OPS(op, asm_op)					\
163 	ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)			\
164 	ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
165 	ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")		\
166 	ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
167 
168 ATOMIC64_FETCH_OPS(andnot, ldclr)
169 ATOMIC64_FETCH_OPS(or, ldset)
170 ATOMIC64_FETCH_OPS(xor, ldeor)
171 ATOMIC64_FETCH_OPS(add, ldadd)
172 
173 #undef ATOMIC64_FETCH_OP
174 #undef ATOMIC64_FETCH_OPS
175 
176 #define ATOMIC64_FETCH_OP_SUB(name)					\
177 static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)	\
178 {									\
179 	return __lse_atomic64_fetch_add##name(-i, v);			\
180 }
181 
182 ATOMIC64_FETCH_OP_SUB(_relaxed)
183 ATOMIC64_FETCH_OP_SUB(_acquire)
184 ATOMIC64_FETCH_OP_SUB(_release)
185 ATOMIC64_FETCH_OP_SUB(        )
186 
187 #undef ATOMIC64_FETCH_OP_SUB
188 
189 #define ATOMIC64_OP_ADD_SUB_RETURN(name, mb, cl...)			\
190 static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
191 {									\
192 	unsigned long tmp;						\
193 									\
194 	asm volatile(							\
195 	__LSE_PREAMBLE							\
196 	"	ldadd" #mb "	%[i], %x[tmp], %[v]\n"			\
197 	"	add	%[i], %[i], %x[tmp]"				\
198 	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
199 	: "r" (v)							\
200 	: cl);								\
201 									\
202 	return i;							\
203 }									\
204 									\
205 static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
206 {									\
207 	return __lse_atomic64_add_return##name(-i, v);			\
208 }
209 
210 ATOMIC64_OP_ADD_SUB_RETURN(_relaxed,   )
211 ATOMIC64_OP_ADD_SUB_RETURN(_acquire,  a, "memory")
212 ATOMIC64_OP_ADD_SUB_RETURN(_release,  l, "memory")
213 ATOMIC64_OP_ADD_SUB_RETURN(        , al, "memory")
214 
215 #undef ATOMIC64_OP_ADD_SUB_RETURN
216 
217 static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
218 {
219 	return __lse_atomic64_andnot(~i, v);
220 }
221 
222 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
223 static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)	\
224 {									\
225 	return __lse_atomic64_fetch_andnot##name(~i, v);		\
226 }
227 
228 ATOMIC64_FETCH_OP_AND(_relaxed,   )
229 ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
230 ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
231 ATOMIC64_FETCH_OP_AND(        , al, "memory")
232 
233 #undef ATOMIC64_FETCH_OP_AND
234 
235 static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
236 {
237 	unsigned long tmp;
238 
239 	asm volatile(
240 	__LSE_PREAMBLE
241 	"1:	ldr	%x[tmp], %[v]\n"
242 	"	subs	%[ret], %x[tmp], #1\n"
243 	"	b.lt	2f\n"
244 	"	casal	%x[tmp], %[ret], %[v]\n"
245 	"	sub	%x[tmp], %x[tmp], #1\n"
246 	"	sub	%x[tmp], %x[tmp], %[ret]\n"
247 	"	cbnz	%x[tmp], 1b\n"
248 	"2:"
249 	: [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
250 	:
251 	: "cc", "memory");
252 
253 	return (long)v;
254 }
255 
256 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)			\
257 static __always_inline u##sz						\
258 __lse__cmpxchg_case_##name##sz(volatile void *ptr,			\
259 					      u##sz old,		\
260 					      u##sz new)		\
261 {									\
262 	register unsigned long x0 asm ("x0") = (unsigned long)ptr;	\
263 	register u##sz x1 asm ("x1") = old;				\
264 	register u##sz x2 asm ("x2") = new;				\
265 	unsigned long tmp;						\
266 									\
267 	asm volatile(							\
268 	__LSE_PREAMBLE							\
269 	"	mov	%" #w "[tmp], %" #w "[old]\n"			\
270 	"	cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"	\
271 	"	mov	%" #w "[ret], %" #w "[tmp]"			\
272 	: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr),		\
273 	  [tmp] "=&r" (tmp)						\
274 	: [old] "r" (x1), [new] "r" (x2)				\
275 	: cl);								\
276 									\
277 	return x0;							\
278 }
279 
280 __CMPXCHG_CASE(w, b,     ,  8,   )
281 __CMPXCHG_CASE(w, h,     , 16,   )
282 __CMPXCHG_CASE(w,  ,     , 32,   )
283 __CMPXCHG_CASE(x,  ,     , 64,   )
284 __CMPXCHG_CASE(w, b, acq_,  8,  a, "memory")
285 __CMPXCHG_CASE(w, h, acq_, 16,  a, "memory")
286 __CMPXCHG_CASE(w,  , acq_, 32,  a, "memory")
287 __CMPXCHG_CASE(x,  , acq_, 64,  a, "memory")
288 __CMPXCHG_CASE(w, b, rel_,  8,  l, "memory")
289 __CMPXCHG_CASE(w, h, rel_, 16,  l, "memory")
290 __CMPXCHG_CASE(w,  , rel_, 32,  l, "memory")
291 __CMPXCHG_CASE(x,  , rel_, 64,  l, "memory")
292 __CMPXCHG_CASE(w, b,  mb_,  8, al, "memory")
293 __CMPXCHG_CASE(w, h,  mb_, 16, al, "memory")
294 __CMPXCHG_CASE(w,  ,  mb_, 32, al, "memory")
295 __CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
296 
297 #undef __CMPXCHG_CASE
298 
299 #define __CMPXCHG_DBL(name, mb, cl...)					\
300 static __always_inline long						\
301 __lse__cmpxchg_double##name(unsigned long old1,				\
302 					 unsigned long old2,		\
303 					 unsigned long new1,		\
304 					 unsigned long new2,		\
305 					 volatile void *ptr)		\
306 {									\
307 	unsigned long oldval1 = old1;					\
308 	unsigned long oldval2 = old2;					\
309 	register unsigned long x0 asm ("x0") = old1;			\
310 	register unsigned long x1 asm ("x1") = old2;			\
311 	register unsigned long x2 asm ("x2") = new1;			\
312 	register unsigned long x3 asm ("x3") = new2;			\
313 	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
314 									\
315 	asm volatile(							\
316 	__LSE_PREAMBLE							\
317 	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
318 	"	eor	%[old1], %[old1], %[oldval1]\n"			\
319 	"	eor	%[old2], %[old2], %[oldval2]\n"			\
320 	"	orr	%[old1], %[old1], %[old2]"			\
321 	: [old1] "+&r" (x0), [old2] "+&r" (x1),				\
322 	  [v] "+Q" (*(unsigned long *)ptr)				\
323 	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
324 	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
325 	: cl);								\
326 									\
327 	return x0;							\
328 }
329 
330 __CMPXCHG_DBL(   ,   )
331 __CMPXCHG_DBL(_mb, al, "memory")
332 
333 #undef __CMPXCHG_DBL
334 
335 #endif	/* __ASM_ATOMIC_LSE_H */
336