1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/atomic.h
4  *
5  * Copyright (C) 1996 Russell King.
6  * Copyright (C) 2002 Deep Blue Solutions Ltd.
7  * Copyright (C) 2012 ARM Ltd.
8  */
9 
10 #ifndef __ASM_ATOMIC_LSE_H
11 #define __ASM_ATOMIC_LSE_H
12 
13 #define ATOMIC_OP(op, asm_op)						\
14 static __always_inline void						\
15 __lse_atomic_##op(int i, atomic_t *v)					\
16 {									\
17 	asm volatile(							\
18 	__LSE_PREAMBLE							\
19 	"	" #asm_op "	%w[i], %[v]\n"				\
20 	: [v] "+Q" (v->counter)						\
21 	: [i] "r" (i));							\
22 }
23 
24 ATOMIC_OP(andnot, stclr)
25 ATOMIC_OP(or, stset)
26 ATOMIC_OP(xor, steor)
27 ATOMIC_OP(add, stadd)
28 
29 static __always_inline void __lse_atomic_sub(int i, atomic_t *v)
30 {
31 	__lse_atomic_add(-i, v);
32 }
33 
34 #undef ATOMIC_OP
35 
36 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)			\
37 static __always_inline int						\
38 __lse_atomic_fetch_##op##name(int i, atomic_t *v)			\
39 {									\
40 	int old;							\
41 									\
42 	asm volatile(							\
43 	__LSE_PREAMBLE							\
44 	"	" #asm_op #mb "	%w[i], %w[old], %[v]"			\
45 	: [v] "+Q" (v->counter),					\
46 	  [old] "=r" (old)						\
47 	: [i] "r" (i)							\
48 	: cl);								\
49 									\
50 	return old;							\
51 }
52 
53 #define ATOMIC_FETCH_OPS(op, asm_op)					\
54 	ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)			\
55 	ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
56 	ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")		\
57 	ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
58 
59 ATOMIC_FETCH_OPS(andnot, ldclr)
60 ATOMIC_FETCH_OPS(or, ldset)
61 ATOMIC_FETCH_OPS(xor, ldeor)
62 ATOMIC_FETCH_OPS(add, ldadd)
63 
64 #undef ATOMIC_FETCH_OP
65 #undef ATOMIC_FETCH_OPS
66 
67 #define ATOMIC_FETCH_OP_SUB(name)					\
68 static __always_inline int						\
69 __lse_atomic_fetch_sub##name(int i, atomic_t *v)			\
70 {									\
71 	return __lse_atomic_fetch_add##name(-i, v);			\
72 }
73 
74 ATOMIC_FETCH_OP_SUB(_relaxed)
75 ATOMIC_FETCH_OP_SUB(_acquire)
76 ATOMIC_FETCH_OP_SUB(_release)
77 ATOMIC_FETCH_OP_SUB(        )
78 
79 #undef ATOMIC_FETCH_OP_SUB
80 
81 #define ATOMIC_OP_ADD_SUB_RETURN(name)					\
82 static __always_inline int						\
83 __lse_atomic_add_return##name(int i, atomic_t *v)			\
84 {									\
85 	return __lse_atomic_fetch_add##name(i, v) + i;			\
86 }									\
87 									\
88 static __always_inline int						\
89 __lse_atomic_sub_return##name(int i, atomic_t *v)			\
90 {									\
91 	return __lse_atomic_fetch_sub(i, v) - i;			\
92 }
93 
94 ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
95 ATOMIC_OP_ADD_SUB_RETURN(_acquire)
96 ATOMIC_OP_ADD_SUB_RETURN(_release)
97 ATOMIC_OP_ADD_SUB_RETURN(        )
98 
99 #undef ATOMIC_OP_ADD_SUB_RETURN
100 
101 static __always_inline void __lse_atomic_and(int i, atomic_t *v)
102 {
103 	return __lse_atomic_andnot(~i, v);
104 }
105 
106 #define ATOMIC_FETCH_OP_AND(name, mb, cl...)				\
107 static __always_inline int						\
108 __lse_atomic_fetch_and##name(int i, atomic_t *v)			\
109 {									\
110 	return __lse_atomic_fetch_andnot##name(~i, v);			\
111 }
112 
113 ATOMIC_FETCH_OP_AND(_relaxed,   )
114 ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
115 ATOMIC_FETCH_OP_AND(_release,  l, "memory")
116 ATOMIC_FETCH_OP_AND(        , al, "memory")
117 
118 #undef ATOMIC_FETCH_OP_AND
119 
120 #define ATOMIC64_OP(op, asm_op)						\
121 static __always_inline void						\
122 __lse_atomic64_##op(s64 i, atomic64_t *v)				\
123 {									\
124 	asm volatile(							\
125 	__LSE_PREAMBLE							\
126 	"	" #asm_op "	%[i], %[v]\n"				\
127 	: [v] "+Q" (v->counter)						\
128 	: [i] "r" (i));							\
129 }
130 
131 ATOMIC64_OP(andnot, stclr)
132 ATOMIC64_OP(or, stset)
133 ATOMIC64_OP(xor, steor)
134 ATOMIC64_OP(add, stadd)
135 
136 static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
137 {
138 	__lse_atomic64_add(-i, v);
139 }
140 
141 #undef ATOMIC64_OP
142 
143 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
144 static __always_inline long						\
145 __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)			\
146 {									\
147 	s64 old;							\
148 									\
149 	asm volatile(							\
150 	__LSE_PREAMBLE							\
151 	"	" #asm_op #mb "	%[i], %[old], %[v]"			\
152 	: [v] "+Q" (v->counter),					\
153 	  [old] "=r" (old)						\
154 	: [i] "r" (i) 							\
155 	: cl);								\
156 									\
157 	return old;							\
158 }
159 
160 #define ATOMIC64_FETCH_OPS(op, asm_op)					\
161 	ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)			\
162 	ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
163 	ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")		\
164 	ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
165 
166 ATOMIC64_FETCH_OPS(andnot, ldclr)
167 ATOMIC64_FETCH_OPS(or, ldset)
168 ATOMIC64_FETCH_OPS(xor, ldeor)
169 ATOMIC64_FETCH_OPS(add, ldadd)
170 
171 #undef ATOMIC64_FETCH_OP
172 #undef ATOMIC64_FETCH_OPS
173 
174 #define ATOMIC64_FETCH_OP_SUB(name)					\
175 static __always_inline long						\
176 __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)			\
177 {									\
178 	return __lse_atomic64_fetch_add##name(-i, v);			\
179 }
180 
181 ATOMIC64_FETCH_OP_SUB(_relaxed)
182 ATOMIC64_FETCH_OP_SUB(_acquire)
183 ATOMIC64_FETCH_OP_SUB(_release)
184 ATOMIC64_FETCH_OP_SUB(        )
185 
186 #undef ATOMIC64_FETCH_OP_SUB
187 
188 #define ATOMIC64_OP_ADD_SUB_RETURN(name)				\
189 static __always_inline long						\
190 __lse_atomic64_add_return##name(s64 i, atomic64_t *v)			\
191 {									\
192 	return __lse_atomic64_fetch_add##name(i, v) + i;		\
193 }									\
194 									\
195 static __always_inline long						\
196 __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)			\
197 {									\
198 	return __lse_atomic64_fetch_sub##name(i, v) - i;		\
199 }
200 
201 ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
202 ATOMIC64_OP_ADD_SUB_RETURN(_acquire)
203 ATOMIC64_OP_ADD_SUB_RETURN(_release)
204 ATOMIC64_OP_ADD_SUB_RETURN(        )
205 
206 #undef ATOMIC64_OP_ADD_SUB_RETURN
207 
208 static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
209 {
210 	return __lse_atomic64_andnot(~i, v);
211 }
212 
213 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
214 static __always_inline long						\
215 __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)			\
216 {									\
217 	return __lse_atomic64_fetch_andnot##name(~i, v);		\
218 }
219 
220 ATOMIC64_FETCH_OP_AND(_relaxed,   )
221 ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
222 ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
223 ATOMIC64_FETCH_OP_AND(        , al, "memory")
224 
225 #undef ATOMIC64_FETCH_OP_AND
226 
227 static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
228 {
229 	unsigned long tmp;
230 
231 	asm volatile(
232 	__LSE_PREAMBLE
233 	"1:	ldr	%x[tmp], %[v]\n"
234 	"	subs	%[ret], %x[tmp], #1\n"
235 	"	b.lt	2f\n"
236 	"	casal	%x[tmp], %[ret], %[v]\n"
237 	"	sub	%x[tmp], %x[tmp], #1\n"
238 	"	sub	%x[tmp], %x[tmp], %[ret]\n"
239 	"	cbnz	%x[tmp], 1b\n"
240 	"2:"
241 	: [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
242 	:
243 	: "cc", "memory");
244 
245 	return (long)v;
246 }
247 
248 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)			\
249 static __always_inline u##sz						\
250 __lse__cmpxchg_case_##name##sz(volatile void *ptr,			\
251 					      u##sz old,		\
252 					      u##sz new)		\
253 {									\
254 	register unsigned long x0 asm ("x0") = (unsigned long)ptr;	\
255 	register u##sz x1 asm ("x1") = old;				\
256 	register u##sz x2 asm ("x2") = new;				\
257 	unsigned long tmp;						\
258 									\
259 	asm volatile(							\
260 	__LSE_PREAMBLE							\
261 	"	mov	%" #w "[tmp], %" #w "[old]\n"			\
262 	"	cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"	\
263 	"	mov	%" #w "[ret], %" #w "[tmp]"			\
264 	: [ret] "+r" (x0), [v] "+Q" (*(u##sz *)ptr),			\
265 	  [tmp] "=&r" (tmp)						\
266 	: [old] "r" (x1), [new] "r" (x2)				\
267 	: cl);								\
268 									\
269 	return x0;							\
270 }
271 
272 __CMPXCHG_CASE(w, b,     ,  8,   )
273 __CMPXCHG_CASE(w, h,     , 16,   )
274 __CMPXCHG_CASE(w,  ,     , 32,   )
275 __CMPXCHG_CASE(x,  ,     , 64,   )
276 __CMPXCHG_CASE(w, b, acq_,  8,  a, "memory")
277 __CMPXCHG_CASE(w, h, acq_, 16,  a, "memory")
278 __CMPXCHG_CASE(w,  , acq_, 32,  a, "memory")
279 __CMPXCHG_CASE(x,  , acq_, 64,  a, "memory")
280 __CMPXCHG_CASE(w, b, rel_,  8,  l, "memory")
281 __CMPXCHG_CASE(w, h, rel_, 16,  l, "memory")
282 __CMPXCHG_CASE(w,  , rel_, 32,  l, "memory")
283 __CMPXCHG_CASE(x,  , rel_, 64,  l, "memory")
284 __CMPXCHG_CASE(w, b,  mb_,  8, al, "memory")
285 __CMPXCHG_CASE(w, h,  mb_, 16, al, "memory")
286 __CMPXCHG_CASE(w,  ,  mb_, 32, al, "memory")
287 __CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
288 
289 #undef __CMPXCHG_CASE
290 
291 #define __CMPXCHG_DBL(name, mb, cl...)					\
292 static __always_inline long						\
293 __lse__cmpxchg_double##name(unsigned long old1,				\
294 					 unsigned long old2,		\
295 					 unsigned long new1,		\
296 					 unsigned long new2,		\
297 					 volatile void *ptr)		\
298 {									\
299 	unsigned long oldval1 = old1;					\
300 	unsigned long oldval2 = old2;					\
301 	register unsigned long x0 asm ("x0") = old1;			\
302 	register unsigned long x1 asm ("x1") = old2;			\
303 	register unsigned long x2 asm ("x2") = new1;			\
304 	register unsigned long x3 asm ("x3") = new2;			\
305 	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
306 									\
307 	asm volatile(							\
308 	__LSE_PREAMBLE							\
309 	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
310 	"	eor	%[old1], %[old1], %[oldval1]\n"			\
311 	"	eor	%[old2], %[old2], %[oldval2]\n"			\
312 	"	orr	%[old1], %[old1], %[old2]"			\
313 	: [old1] "+&r" (x0), [old2] "+&r" (x1),				\
314 	  [v] "+Q" (*(unsigned long *)ptr)				\
315 	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
316 	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
317 	: cl);								\
318 									\
319 	return x0;							\
320 }
321 
322 __CMPXCHG_DBL(   ,   )
323 __CMPXCHG_DBL(_mb, al, "memory")
324 
325 #undef __CMPXCHG_DBL
326 
327 #endif	/* __ASM_ATOMIC_LSE_H */
328