xref: /openbmc/linux/arch/arc/include/asm/atomic-llsc.h (revision 42f51fb2)
1b0f839b4SVineet Gupta /* SPDX-License-Identifier: GPL-2.0-only */
2b0f839b4SVineet Gupta 
3b0f839b4SVineet Gupta #ifndef _ASM_ARC_ATOMIC_LLSC_H
4b0f839b4SVineet Gupta #define _ASM_ARC_ATOMIC_LLSC_H
5b0f839b4SVineet Gupta 
6b0f839b4SVineet Gupta #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
7b0f839b4SVineet Gupta 
8ac411e41SSergey Matyukevich #define ATOMIC_OP(op, asm_op)					\
9b0f839b4SVineet Gupta static inline void arch_atomic_##op(int i, atomic_t *v)			\
10b0f839b4SVineet Gupta {									\
11b0f839b4SVineet Gupta 	unsigned int val;						\
12b0f839b4SVineet Gupta 									\
13b0f839b4SVineet Gupta 	__asm__ __volatile__(						\
14b0f839b4SVineet Gupta 	"1:	llock   %[val], [%[ctr]]		\n"		\
15b0f839b4SVineet Gupta 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
16b0f839b4SVineet Gupta 	"	scond   %[val], [%[ctr]]		\n"		\
17b0f839b4SVineet Gupta 	"	bnz     1b				\n"		\
18b0f839b4SVineet Gupta 	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
19b0f839b4SVineet Gupta 	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
20b0f839b4SVineet Gupta 	  [i]	"ir"	(i)						\
21*42f51fb2SPavel Kozlov 	: "cc", "memory");						\
22b0f839b4SVineet Gupta }									\
23b0f839b4SVineet Gupta 
24ac411e41SSergey Matyukevich #define ATOMIC_OP_RETURN(op, asm_op)				\
25b64be683SVineet Gupta static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
26b0f839b4SVineet Gupta {									\
27b0f839b4SVineet Gupta 	unsigned int val;						\
28b0f839b4SVineet Gupta 									\
29b0f839b4SVineet Gupta 	__asm__ __volatile__(						\
30b0f839b4SVineet Gupta 	"1:	llock   %[val], [%[ctr]]		\n"		\
31b0f839b4SVineet Gupta 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
32b0f839b4SVineet Gupta 	"	scond   %[val], [%[ctr]]		\n"		\
33b0f839b4SVineet Gupta 	"	bnz     1b				\n"		\
34b0f839b4SVineet Gupta 	: [val]	"=&r"	(val)						\
35b0f839b4SVineet Gupta 	: [ctr]	"r"	(&v->counter),					\
36b0f839b4SVineet Gupta 	  [i]	"ir"	(i)						\
37*42f51fb2SPavel Kozlov 	: "cc", "memory");						\
38b0f839b4SVineet Gupta 									\
39b0f839b4SVineet Gupta 	return val;							\
40b0f839b4SVineet Gupta }
41b0f839b4SVineet Gupta 
42b64be683SVineet Gupta #define arch_atomic_add_return_relaxed		arch_atomic_add_return_relaxed
43b64be683SVineet Gupta #define arch_atomic_sub_return_relaxed		arch_atomic_sub_return_relaxed
44b64be683SVineet Gupta 
45ac411e41SSergey Matyukevich #define ATOMIC_FETCH_OP(op, asm_op)				\
46b64be683SVineet Gupta static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
47b0f839b4SVineet Gupta {									\
48b0f839b4SVineet Gupta 	unsigned int val, orig;						\
49b0f839b4SVineet Gupta 									\
50b0f839b4SVineet Gupta 	__asm__ __volatile__(						\
51b0f839b4SVineet Gupta 	"1:	llock   %[orig], [%[ctr]]		\n"		\
52b0f839b4SVineet Gupta 	"	" #asm_op " %[val], %[orig], %[i]	\n"		\
53b0f839b4SVineet Gupta 	"	scond   %[val], [%[ctr]]		\n"		\
54b0f839b4SVineet Gupta 	"	bnz     1b				\n"		\
55b0f839b4SVineet Gupta 	: [val]	"=&r"	(val),						\
56b0f839b4SVineet Gupta 	  [orig] "=&r" (orig)						\
57b0f839b4SVineet Gupta 	: [ctr]	"r"	(&v->counter),					\
58b0f839b4SVineet Gupta 	  [i]	"ir"	(i)						\
59*42f51fb2SPavel Kozlov 	: "cc", "memory");						\
60b0f839b4SVineet Gupta 									\
61b0f839b4SVineet Gupta 	return orig;							\
62b0f839b4SVineet Gupta }
63b0f839b4SVineet Gupta 
64b64be683SVineet Gupta #define arch_atomic_fetch_add_relaxed		arch_atomic_fetch_add_relaxed
65b64be683SVineet Gupta #define arch_atomic_fetch_sub_relaxed		arch_atomic_fetch_sub_relaxed
66b64be683SVineet Gupta 
67b64be683SVineet Gupta #define arch_atomic_fetch_and_relaxed		arch_atomic_fetch_and_relaxed
68b64be683SVineet Gupta #define arch_atomic_fetch_andnot_relaxed	arch_atomic_fetch_andnot_relaxed
69b64be683SVineet Gupta #define arch_atomic_fetch_or_relaxed		arch_atomic_fetch_or_relaxed
70b64be683SVineet Gupta #define arch_atomic_fetch_xor_relaxed		arch_atomic_fetch_xor_relaxed
71b64be683SVineet Gupta 
72ac411e41SSergey Matyukevich #define ATOMIC_OPS(op, asm_op)					\
73ac411e41SSergey Matyukevich 	ATOMIC_OP(op, asm_op)					\
74ac411e41SSergey Matyukevich 	ATOMIC_OP_RETURN(op, asm_op)				\
75ac411e41SSergey Matyukevich 	ATOMIC_FETCH_OP(op, asm_op)
76b0f839b4SVineet Gupta 
77ac411e41SSergey Matyukevich ATOMIC_OPS(add, add)
78ac411e41SSergey Matyukevich ATOMIC_OPS(sub, sub)
79b0f839b4SVineet Gupta 
80b0f839b4SVineet Gupta #undef ATOMIC_OPS
81ac411e41SSergey Matyukevich #define ATOMIC_OPS(op, asm_op)					\
82ac411e41SSergey Matyukevich 	ATOMIC_OP(op, asm_op)					\
83ac411e41SSergey Matyukevich 	ATOMIC_FETCH_OP(op, asm_op)
84b0f839b4SVineet Gupta 
85ac411e41SSergey Matyukevich ATOMIC_OPS(and, and)
86ac411e41SSergey Matyukevich ATOMIC_OPS(andnot, bic)
87ac411e41SSergey Matyukevich ATOMIC_OPS(or, or)
88ac411e41SSergey Matyukevich ATOMIC_OPS(xor, xor)
89b0f839b4SVineet Gupta 
90b0f839b4SVineet Gupta #define arch_atomic_andnot		arch_atomic_andnot
91b0f839b4SVineet Gupta 
92b0f839b4SVineet Gupta #undef ATOMIC_OPS
93b0f839b4SVineet Gupta #undef ATOMIC_FETCH_OP
94b0f839b4SVineet Gupta #undef ATOMIC_OP_RETURN
95b0f839b4SVineet Gupta #undef ATOMIC_OP
96b0f839b4SVineet Gupta 
97b0f839b4SVineet Gupta #endif
98