xref: /openbmc/linux/arch/arc/include/asm/atomic.h (revision 842ed298)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4  */
5 
6 #ifndef _ASM_ARC_ATOMIC_H
7 #define _ASM_ARC_ATOMIC_H
8 
9 #ifndef __ASSEMBLY__
10 
11 #include <linux/types.h>
12 #include <linux/compiler.h>
13 #include <asm/cmpxchg.h>
14 #include <asm/barrier.h>
15 #include <asm/smp.h>
16 
17 #define atomic_read(v)  READ_ONCE((v)->counter)
18 
19 #ifdef CONFIG_ARC_HAS_LLSC
20 
21 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
22 
23 #define ATOMIC_OP(op, c_op, asm_op)					\
24 static inline void atomic_##op(int i, atomic_t *v)			\
25 {									\
26 	unsigned int val;						\
27 									\
28 	__asm__ __volatile__(						\
29 	"1:	llock   %[val], [%[ctr]]		\n"		\
30 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
31 	"	scond   %[val], [%[ctr]]		\n"		\
32 	"	bnz     1b				\n"		\
33 	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
34 	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
35 	  [i]	"ir"	(i)						\
36 	: "cc");							\
37 }									\
38 
39 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
40 static inline int atomic_##op##_return(int i, atomic_t *v)		\
41 {									\
42 	unsigned int val;						\
43 									\
44 	/*								\
45 	 * Explicit full memory barrier needed before/after as		\
46 	 * LLOCK/SCOND themselves don't provide any such semantics	\
47 	 */								\
48 	smp_mb();							\
49 									\
50 	__asm__ __volatile__(						\
51 	"1:	llock   %[val], [%[ctr]]		\n"		\
52 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
53 	"	scond   %[val], [%[ctr]]		\n"		\
54 	"	bnz     1b				\n"		\
55 	: [val]	"=&r"	(val)						\
56 	: [ctr]	"r"	(&v->counter),					\
57 	  [i]	"ir"	(i)						\
58 	: "cc");							\
59 									\
60 	smp_mb();							\
61 									\
62 	return val;							\
63 }
64 
65 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
66 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
67 {									\
68 	unsigned int val, orig;						\
69 									\
70 	/*								\
71 	 * Explicit full memory barrier needed before/after as		\
72 	 * LLOCK/SCOND themselves don't provide any such semantics	\
73 	 */								\
74 	smp_mb();							\
75 									\
76 	__asm__ __volatile__(						\
77 	"1:	llock   %[orig], [%[ctr]]		\n"		\
78 	"	" #asm_op " %[val], %[orig], %[i]	\n"		\
79 	"	scond   %[val], [%[ctr]]		\n"		\
80 	"	bnz     1b				\n"		\
81 	: [val]	"=&r"	(val),						\
82 	  [orig] "=&r" (orig)						\
83 	: [ctr]	"r"	(&v->counter),					\
84 	  [i]	"ir"	(i)						\
85 	: "cc");							\
86 									\
87 	smp_mb();							\
88 									\
89 	return orig;							\
90 }
91 
92 #else	/* !CONFIG_ARC_HAS_LLSC */
93 
94 #ifndef CONFIG_SMP
95 
96  /* violating atomic_xxx API locking protocol in UP for optimization sake */
97 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
98 
99 #else
100 
101 static inline void atomic_set(atomic_t *v, int i)
102 {
103 	/*
104 	 * Independent of hardware support, all of the atomic_xxx() APIs need
105 	 * to follow the same locking rules to make sure that a "hardware"
106 	 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
107 	 * sequence
108 	 *
109 	 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
110 	 * requires the locking.
111 	 */
112 	unsigned long flags;
113 
114 	atomic_ops_lock(flags);
115 	WRITE_ONCE(v->counter, i);
116 	atomic_ops_unlock(flags);
117 }
118 
119 #define atomic_set_release(v, i)	atomic_set((v), (i))
120 
121 #endif
122 
123 /*
124  * Non hardware assisted Atomic-R-M-W
125  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
126  */
127 
128 #define ATOMIC_OP(op, c_op, asm_op)					\
129 static inline void atomic_##op(int i, atomic_t *v)			\
130 {									\
131 	unsigned long flags;						\
132 									\
133 	atomic_ops_lock(flags);						\
134 	v->counter c_op i;						\
135 	atomic_ops_unlock(flags);					\
136 }
137 
138 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
139 static inline int atomic_##op##_return(int i, atomic_t *v)		\
140 {									\
141 	unsigned long flags;						\
142 	unsigned long temp;						\
143 									\
144 	/*								\
145 	 * spin lock/unlock provides the needed smp_mb() before/after	\
146 	 */								\
147 	atomic_ops_lock(flags);						\
148 	temp = v->counter;						\
149 	temp c_op i;							\
150 	v->counter = temp;						\
151 	atomic_ops_unlock(flags);					\
152 									\
153 	return temp;							\
154 }
155 
156 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
157 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
158 {									\
159 	unsigned long flags;						\
160 	unsigned long orig;						\
161 									\
162 	/*								\
163 	 * spin lock/unlock provides the needed smp_mb() before/after	\
164 	 */								\
165 	atomic_ops_lock(flags);						\
166 	orig = v->counter;						\
167 	v->counter c_op i;						\
168 	atomic_ops_unlock(flags);					\
169 									\
170 	return orig;							\
171 }
172 
173 #endif /* !CONFIG_ARC_HAS_LLSC */
174 
175 #define ATOMIC_OPS(op, c_op, asm_op)					\
176 	ATOMIC_OP(op, c_op, asm_op)					\
177 	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
178 	ATOMIC_FETCH_OP(op, c_op, asm_op)
179 
180 ATOMIC_OPS(add, +=, add)
181 ATOMIC_OPS(sub, -=, sub)
182 
183 #define atomic_andnot		atomic_andnot
184 #define atomic_fetch_andnot	atomic_fetch_andnot
185 
186 #undef ATOMIC_OPS
187 #define ATOMIC_OPS(op, c_op, asm_op)					\
188 	ATOMIC_OP(op, c_op, asm_op)					\
189 	ATOMIC_FETCH_OP(op, c_op, asm_op)
190 
191 ATOMIC_OPS(and, &=, and)
192 ATOMIC_OPS(andnot, &= ~, bic)
193 ATOMIC_OPS(or, |=, or)
194 ATOMIC_OPS(xor, ^=, xor)
195 
196 #undef ATOMIC_OPS
197 #undef ATOMIC_FETCH_OP
198 #undef ATOMIC_OP_RETURN
199 #undef ATOMIC_OP
200 
201 #ifdef CONFIG_GENERIC_ATOMIC64
202 
203 #include <asm-generic/atomic64.h>
204 
205 #else	/* Kconfig ensures this is only enabled with needed h/w assist */
206 
207 /*
208  * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
209  *  - The address HAS to be 64-bit aligned
210  *  - There are 2 semantics involved here:
211  *    = exclusive implies no interim update between load/store to same addr
212  *    = both words are observed/updated together: this is guaranteed even
213  *      for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
214  *      is NOT required to use LLOCKD+SCONDD, STD suffices
215  */
216 
217 typedef struct {
218 	s64 __aligned(8) counter;
219 } atomic64_t;
220 
221 #define ATOMIC64_INIT(a) { (a) }
222 
223 static inline s64 atomic64_read(const atomic64_t *v)
224 {
225 	s64 val;
226 
227 	__asm__ __volatile__(
228 	"	ldd   %0, [%1]	\n"
229 	: "=r"(val)
230 	: "r"(&v->counter));
231 
232 	return val;
233 }
234 
235 static inline void atomic64_set(atomic64_t *v, s64 a)
236 {
237 	/*
238 	 * This could have been a simple assignment in "C" but would need
239 	 * explicit volatile. Otherwise gcc optimizers could elide the store
240 	 * which borked atomic64 self-test
241 	 * In the inline asm version, memory clobber needed for exact same
242 	 * reason, to tell gcc about the store.
243 	 *
244 	 * This however is not needed for sibling atomic64_add() etc since both
245 	 * load/store are explicitly done in inline asm. As long as API is used
246 	 * for each access, gcc has no way to optimize away any load/store
247 	 */
248 	__asm__ __volatile__(
249 	"	std   %0, [%1]	\n"
250 	:
251 	: "r"(a), "r"(&v->counter)
252 	: "memory");
253 }
254 
255 #define ATOMIC64_OP(op, op1, op2)					\
256 static inline void atomic64_##op(s64 a, atomic64_t *v)			\
257 {									\
258 	s64 val;							\
259 									\
260 	__asm__ __volatile__(						\
261 	"1:				\n"				\
262 	"	llockd  %0, [%1]	\n"				\
263 	"	" #op1 " %L0, %L0, %L2	\n"				\
264 	"	" #op2 " %H0, %H0, %H2	\n"				\
265 	"	scondd   %0, [%1]	\n"				\
266 	"	bnz     1b		\n"				\
267 	: "=&r"(val)							\
268 	: "r"(&v->counter), "ir"(a)					\
269 	: "cc");							\
270 }									\
271 
272 #define ATOMIC64_OP_RETURN(op, op1, op2)		        	\
273 static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v)		\
274 {									\
275 	s64 val;							\
276 									\
277 	smp_mb();							\
278 									\
279 	__asm__ __volatile__(						\
280 	"1:				\n"				\
281 	"	llockd   %0, [%1]	\n"				\
282 	"	" #op1 " %L0, %L0, %L2	\n"				\
283 	"	" #op2 " %H0, %H0, %H2	\n"				\
284 	"	scondd   %0, [%1]	\n"				\
285 	"	bnz     1b		\n"				\
286 	: [val] "=&r"(val)						\
287 	: "r"(&v->counter), "ir"(a)					\
288 	: "cc");	/* memory clobber comes from smp_mb() */	\
289 									\
290 	smp_mb();							\
291 									\
292 	return val;							\
293 }
294 
295 #define ATOMIC64_FETCH_OP(op, op1, op2)		        		\
296 static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v)		\
297 {									\
298 	s64 val, orig;							\
299 									\
300 	smp_mb();							\
301 									\
302 	__asm__ __volatile__(						\
303 	"1:				\n"				\
304 	"	llockd   %0, [%2]	\n"				\
305 	"	" #op1 " %L1, %L0, %L3	\n"				\
306 	"	" #op2 " %H1, %H0, %H3	\n"				\
307 	"	scondd   %1, [%2]	\n"				\
308 	"	bnz     1b		\n"				\
309 	: "=&r"(orig), "=&r"(val)					\
310 	: "r"(&v->counter), "ir"(a)					\
311 	: "cc");	/* memory clobber comes from smp_mb() */	\
312 									\
313 	smp_mb();							\
314 									\
315 	return orig;							\
316 }
317 
318 #define ATOMIC64_OPS(op, op1, op2)					\
319 	ATOMIC64_OP(op, op1, op2)					\
320 	ATOMIC64_OP_RETURN(op, op1, op2)				\
321 	ATOMIC64_FETCH_OP(op, op1, op2)
322 
323 #define atomic64_andnot		atomic64_andnot
324 #define atomic64_fetch_andnot	atomic64_fetch_andnot
325 
326 ATOMIC64_OPS(add, add.f, adc)
327 ATOMIC64_OPS(sub, sub.f, sbc)
328 ATOMIC64_OPS(and, and, and)
329 ATOMIC64_OPS(andnot, bic, bic)
330 ATOMIC64_OPS(or, or, or)
331 ATOMIC64_OPS(xor, xor, xor)
332 
333 #undef ATOMIC64_OPS
334 #undef ATOMIC64_FETCH_OP
335 #undef ATOMIC64_OP_RETURN
336 #undef ATOMIC64_OP
337 
338 static inline s64
339 atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
340 {
341 	s64 prev;
342 
343 	smp_mb();
344 
345 	__asm__ __volatile__(
346 	"1:	llockd  %0, [%1]	\n"
347 	"	brne    %L0, %L2, 2f	\n"
348 	"	brne    %H0, %H2, 2f	\n"
349 	"	scondd  %3, [%1]	\n"
350 	"	bnz     1b		\n"
351 	"2:				\n"
352 	: "=&r"(prev)
353 	: "r"(ptr), "ir"(expected), "r"(new)
354 	: "cc");	/* memory clobber comes from smp_mb() */
355 
356 	smp_mb();
357 
358 	return prev;
359 }
360 
361 static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
362 {
363 	s64 prev;
364 
365 	smp_mb();
366 
367 	__asm__ __volatile__(
368 	"1:	llockd  %0, [%1]	\n"
369 	"	scondd  %2, [%1]	\n"
370 	"	bnz     1b		\n"
371 	"2:				\n"
372 	: "=&r"(prev)
373 	: "r"(ptr), "r"(new)
374 	: "cc");	/* memory clobber comes from smp_mb() */
375 
376 	smp_mb();
377 
378 	return prev;
379 }
380 
381 /**
382  * atomic64_dec_if_positive - decrement by 1 if old value positive
383  * @v: pointer of type atomic64_t
384  *
385  * The function returns the old value of *v minus 1, even if
386  * the atomic variable, v, was not decremented.
387  */
388 
389 static inline s64 atomic64_dec_if_positive(atomic64_t *v)
390 {
391 	s64 val;
392 
393 	smp_mb();
394 
395 	__asm__ __volatile__(
396 	"1:	llockd  %0, [%1]	\n"
397 	"	sub.f   %L0, %L0, 1	# w0 - 1, set C on borrow\n"
398 	"	sub.c   %H0, %H0, 1	# if C set, w1 - 1\n"
399 	"	brlt    %H0, 0, 2f	\n"
400 	"	scondd  %0, [%1]	\n"
401 	"	bnz     1b		\n"
402 	"2:				\n"
403 	: "=&r"(val)
404 	: "r"(&v->counter)
405 	: "cc");	/* memory clobber comes from smp_mb() */
406 
407 	smp_mb();
408 
409 	return val;
410 }
411 #define atomic64_dec_if_positive atomic64_dec_if_positive
412 
413 /**
414  * atomic64_fetch_add_unless - add unless the number is a given value
415  * @v: pointer of type atomic64_t
416  * @a: the amount to add to v...
417  * @u: ...unless v is equal to u.
418  *
419  * Atomically adds @a to @v, if it was not @u.
420  * Returns the old value of @v
421  */
422 static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
423 {
424 	s64 old, temp;
425 
426 	smp_mb();
427 
428 	__asm__ __volatile__(
429 	"1:	llockd  %0, [%2]	\n"
430 	"	brne	%L0, %L4, 2f	# continue to add since v != u \n"
431 	"	breq.d	%H0, %H4, 3f	# return since v == u \n"
432 	"2:				\n"
433 	"	add.f   %L1, %L0, %L3	\n"
434 	"	adc     %H1, %H0, %H3	\n"
435 	"	scondd  %1, [%2]	\n"
436 	"	bnz     1b		\n"
437 	"3:				\n"
438 	: "=&r"(old), "=&r" (temp)
439 	: "r"(&v->counter), "r"(a), "r"(u)
440 	: "cc");	/* memory clobber comes from smp_mb() */
441 
442 	smp_mb();
443 
444 	return old;
445 }
446 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
447 
448 #endif	/* !CONFIG_GENERIC_ATOMIC64 */
449 
450 #endif	/* !__ASSEMBLY__ */
451 
452 #endif
453