1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Atomic operations.
4 *
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 */
7 #ifndef _ASM_ATOMIC_H
8 #define _ASM_ATOMIC_H
9
10 #include <linux/types.h>
11 #include <asm/barrier.h>
12 #include <asm/cmpxchg.h>
13
14 #if __SIZEOF_LONG__ == 4
15 #define __LL "ll.w "
16 #define __SC "sc.w "
17 #define __AMADD "amadd.w "
18 #define __AMAND_DB "amand_db.w "
19 #define __AMOR_DB "amor_db.w "
20 #define __AMXOR_DB "amxor_db.w "
21 #elif __SIZEOF_LONG__ == 8
22 #define __LL "ll.d "
23 #define __SC "sc.d "
24 #define __AMADD "amadd.d "
25 #define __AMAND_DB "amand_db.d "
26 #define __AMOR_DB "amor_db.d "
27 #define __AMXOR_DB "amxor_db.d "
28 #endif
29
30 #define ATOMIC_INIT(i) { (i) }
31
32 #define arch_atomic_read(v) READ_ONCE((v)->counter)
33 #define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
34
35 #define ATOMIC_OP(op, I, asm_op) \
36 static inline void arch_atomic_##op(int i, atomic_t *v) \
37 { \
38 __asm__ __volatile__( \
39 "am"#asm_op"_db.w" " $zero, %1, %0 \n" \
40 : "+ZB" (v->counter) \
41 : "r" (I) \
42 : "memory"); \
43 }
44
45 #define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
46 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
47 { \
48 int result; \
49 \
50 __asm__ __volatile__( \
51 "am"#asm_op"_db.w" " %1, %2, %0 \n" \
52 : "+ZB" (v->counter), "=&r" (result) \
53 : "r" (I) \
54 : "memory"); \
55 \
56 return result c_op I; \
57 }
58
59 #define ATOMIC_FETCH_OP(op, I, asm_op) \
60 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
61 { \
62 int result; \
63 \
64 __asm__ __volatile__( \
65 "am"#asm_op"_db.w" " %1, %2, %0 \n" \
66 : "+ZB" (v->counter), "=&r" (result) \
67 : "r" (I) \
68 : "memory"); \
69 \
70 return result; \
71 }
72
73 #define ATOMIC_OPS(op, I, asm_op, c_op) \
74 ATOMIC_OP(op, I, asm_op) \
75 ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
76 ATOMIC_FETCH_OP(op, I, asm_op)
77
78 ATOMIC_OPS(add, i, add, +)
79 ATOMIC_OPS(sub, -i, add, +)
80
81 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
82 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
83 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
84 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
85
86 #undef ATOMIC_OPS
87
88 #define ATOMIC_OPS(op, I, asm_op) \
89 ATOMIC_OP(op, I, asm_op) \
90 ATOMIC_FETCH_OP(op, I, asm_op)
91
ATOMIC_OPS(and,i,and)92 ATOMIC_OPS(and, i, and)
93 ATOMIC_OPS(or, i, or)
94 ATOMIC_OPS(xor, i, xor)
95
96 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
97 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
98 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
99
100 #undef ATOMIC_OPS
101 #undef ATOMIC_FETCH_OP
102 #undef ATOMIC_OP_RETURN
103 #undef ATOMIC_OP
104
105 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
106 {
107 int prev, rc;
108
109 __asm__ __volatile__ (
110 "0: ll.w %[p], %[c]\n"
111 " beq %[p], %[u], 1f\n"
112 " add.w %[rc], %[p], %[a]\n"
113 " sc.w %[rc], %[c]\n"
114 " beqz %[rc], 0b\n"
115 " b 2f\n"
116 "1:\n"
117 __WEAK_LLSC_MB
118 "2:\n"
119 : [p]"=&r" (prev), [rc]"=&r" (rc),
120 [c]"=ZB" (v->counter)
121 : [a]"r" (a), [u]"r" (u)
122 : "memory");
123
124 return prev;
125 }
126 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
127
arch_atomic_sub_if_positive(int i,atomic_t * v)128 static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
129 {
130 int result;
131 int temp;
132
133 if (__builtin_constant_p(i)) {
134 __asm__ __volatile__(
135 "1: ll.w %1, %2 # atomic_sub_if_positive\n"
136 " addi.w %0, %1, %3 \n"
137 " move %1, %0 \n"
138 " bltz %0, 2f \n"
139 " sc.w %1, %2 \n"
140 " beqz %1, 1b \n"
141 "2: \n"
142 __WEAK_LLSC_MB
143 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
144 : "I" (-i));
145 } else {
146 __asm__ __volatile__(
147 "1: ll.w %1, %2 # atomic_sub_if_positive\n"
148 " sub.w %0, %1, %3 \n"
149 " move %1, %0 \n"
150 " bltz %0, 2f \n"
151 " sc.w %1, %2 \n"
152 " beqz %1, 1b \n"
153 "2: \n"
154 __WEAK_LLSC_MB
155 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
156 : "r" (i));
157 }
158
159 return result;
160 }
161
162 #define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
163
164 #ifdef CONFIG_64BIT
165
166 #define ATOMIC64_INIT(i) { (i) }
167
168 #define arch_atomic64_read(v) READ_ONCE((v)->counter)
169 #define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
170
171 #define ATOMIC64_OP(op, I, asm_op) \
172 static inline void arch_atomic64_##op(long i, atomic64_t *v) \
173 { \
174 __asm__ __volatile__( \
175 "am"#asm_op"_db.d " " $zero, %1, %0 \n" \
176 : "+ZB" (v->counter) \
177 : "r" (I) \
178 : "memory"); \
179 }
180
181 #define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
182 static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
183 { \
184 long result; \
185 __asm__ __volatile__( \
186 "am"#asm_op"_db.d " " %1, %2, %0 \n" \
187 : "+ZB" (v->counter), "=&r" (result) \
188 : "r" (I) \
189 : "memory"); \
190 \
191 return result c_op I; \
192 }
193
194 #define ATOMIC64_FETCH_OP(op, I, asm_op) \
195 static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
196 { \
197 long result; \
198 \
199 __asm__ __volatile__( \
200 "am"#asm_op"_db.d " " %1, %2, %0 \n" \
201 : "+ZB" (v->counter), "=&r" (result) \
202 : "r" (I) \
203 : "memory"); \
204 \
205 return result; \
206 }
207
208 #define ATOMIC64_OPS(op, I, asm_op, c_op) \
209 ATOMIC64_OP(op, I, asm_op) \
210 ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
211 ATOMIC64_FETCH_OP(op, I, asm_op)
212
213 ATOMIC64_OPS(add, i, add, +)
214 ATOMIC64_OPS(sub, -i, add, +)
215
216 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
217 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
218 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
219 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
220
221 #undef ATOMIC64_OPS
222
223 #define ATOMIC64_OPS(op, I, asm_op) \
224 ATOMIC64_OP(op, I, asm_op) \
225 ATOMIC64_FETCH_OP(op, I, asm_op)
226
ATOMIC64_OPS(and,i,and)227 ATOMIC64_OPS(and, i, and)
228 ATOMIC64_OPS(or, i, or)
229 ATOMIC64_OPS(xor, i, xor)
230
231 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
232 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
233 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
234
235 #undef ATOMIC64_OPS
236 #undef ATOMIC64_FETCH_OP
237 #undef ATOMIC64_OP_RETURN
238 #undef ATOMIC64_OP
239
240 static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
241 {
242 long prev, rc;
243
244 __asm__ __volatile__ (
245 "0: ll.d %[p], %[c]\n"
246 " beq %[p], %[u], 1f\n"
247 " add.d %[rc], %[p], %[a]\n"
248 " sc.d %[rc], %[c]\n"
249 " beqz %[rc], 0b\n"
250 " b 2f\n"
251 "1:\n"
252 __WEAK_LLSC_MB
253 "2:\n"
254 : [p]"=&r" (prev), [rc]"=&r" (rc),
255 [c] "=ZB" (v->counter)
256 : [a]"r" (a), [u]"r" (u)
257 : "memory");
258
259 return prev;
260 }
261 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
262
arch_atomic64_sub_if_positive(long i,atomic64_t * v)263 static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
264 {
265 long result;
266 long temp;
267
268 if (__builtin_constant_p(i)) {
269 __asm__ __volatile__(
270 "1: ll.d %1, %2 # atomic64_sub_if_positive \n"
271 " addi.d %0, %1, %3 \n"
272 " move %1, %0 \n"
273 " bltz %0, 2f \n"
274 " sc.d %1, %2 \n"
275 " beqz %1, 1b \n"
276 "2: \n"
277 __WEAK_LLSC_MB
278 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
279 : "I" (-i));
280 } else {
281 __asm__ __volatile__(
282 "1: ll.d %1, %2 # atomic64_sub_if_positive \n"
283 " sub.d %0, %1, %3 \n"
284 " move %1, %0 \n"
285 " bltz %0, 2f \n"
286 " sc.d %1, %2 \n"
287 " beqz %1, 1b \n"
288 "2: \n"
289 __WEAK_LLSC_MB
290 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
291 : "r" (i));
292 }
293
294 return result;
295 }
296
297 #define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
298
299 #endif /* CONFIG_64BIT */
300
301 #endif /* _ASM_ATOMIC_H */
302