xref: /openbmc/linux/arch/m68k/include/asm/atomic.h (revision 160b8e75)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ARCH_M68K_ATOMIC__
3 #define __ARCH_M68K_ATOMIC__
4 
5 #include <linux/types.h>
6 #include <linux/irqflags.h>
7 #include <asm/cmpxchg.h>
8 #include <asm/barrier.h>
9 
10 /*
11  * Atomic operations that C can't guarantee us.  Useful for
12  * resource counting etc..
13  */
14 
15 /*
16  * We do not have SMP m68k systems, so we don't have to deal with that.
17  */
18 
19 #define ATOMIC_INIT(i)	{ (i) }
20 
21 #define atomic_read(v)		READ_ONCE((v)->counter)
22 #define atomic_set(v, i)	WRITE_ONCE(((v)->counter), (i))
23 
24 /*
25  * The ColdFire parts cannot do some immediate to memory operations,
26  * so for them we do not specify the "i" asm constraint.
27  */
28 #ifdef CONFIG_COLDFIRE
29 #define	ASM_DI	"d"
30 #else
31 #define	ASM_DI	"di"
32 #endif
33 
34 #define ATOMIC_OP(op, c_op, asm_op)					\
35 static inline void atomic_##op(int i, atomic_t *v)			\
36 {									\
37 	__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
38 }									\
39 
40 #ifdef CONFIG_RMW_INSNS
41 
42 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
43 static inline int atomic_##op##_return(int i, atomic_t *v)		\
44 {									\
45 	int t, tmp;							\
46 									\
47 	__asm__ __volatile__(						\
48 			"1:	movel %2,%1\n"				\
49 			"	" #asm_op "l %3,%1\n"			\
50 			"	casl %2,%1,%0\n"			\
51 			"	jne 1b"					\
52 			: "+m" (*v), "=&d" (t), "=&d" (tmp)		\
53 			: "g" (i), "2" (atomic_read(v)));		\
54 	return t;							\
55 }
56 
57 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
58 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
59 {									\
60 	int t, tmp;							\
61 									\
62 	__asm__ __volatile__(						\
63 			"1:	movel %2,%1\n"				\
64 			"	" #asm_op "l %3,%1\n"			\
65 			"	casl %2,%1,%0\n"			\
66 			"	jne 1b"					\
67 			: "+m" (*v), "=&d" (t), "=&d" (tmp)		\
68 			: "g" (i), "2" (atomic_read(v)));		\
69 	return tmp;							\
70 }
71 
72 #else
73 
74 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
75 static inline int atomic_##op##_return(int i, atomic_t * v)		\
76 {									\
77 	unsigned long flags;						\
78 	int t;								\
79 									\
80 	local_irq_save(flags);						\
81 	t = (v->counter c_op i);					\
82 	local_irq_restore(flags);					\
83 									\
84 	return t;							\
85 }
86 
87 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
88 static inline int atomic_fetch_##op(int i, atomic_t * v)		\
89 {									\
90 	unsigned long flags;						\
91 	int t;								\
92 									\
93 	local_irq_save(flags);						\
94 	t = v->counter;							\
95 	v->counter c_op i;						\
96 	local_irq_restore(flags);					\
97 									\
98 	return t;							\
99 }
100 
101 #endif /* CONFIG_RMW_INSNS */
102 
103 #define ATOMIC_OPS(op, c_op, asm_op)					\
104 	ATOMIC_OP(op, c_op, asm_op)					\
105 	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
106 	ATOMIC_FETCH_OP(op, c_op, asm_op)
107 
108 ATOMIC_OPS(add, +=, add)
109 ATOMIC_OPS(sub, -=, sub)
110 
111 #undef ATOMIC_OPS
112 #define ATOMIC_OPS(op, c_op, asm_op)					\
113 	ATOMIC_OP(op, c_op, asm_op)					\
114 	ATOMIC_FETCH_OP(op, c_op, asm_op)
115 
116 ATOMIC_OPS(and, &=, and)
117 ATOMIC_OPS(or, |=, or)
118 ATOMIC_OPS(xor, ^=, eor)
119 
120 #undef ATOMIC_OPS
121 #undef ATOMIC_FETCH_OP
122 #undef ATOMIC_OP_RETURN
123 #undef ATOMIC_OP
124 
125 static inline void atomic_inc(atomic_t *v)
126 {
127 	__asm__ __volatile__("addql #1,%0" : "+m" (*v));
128 }
129 
130 static inline void atomic_dec(atomic_t *v)
131 {
132 	__asm__ __volatile__("subql #1,%0" : "+m" (*v));
133 }
134 
135 static inline int atomic_dec_and_test(atomic_t *v)
136 {
137 	char c;
138 	__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
139 	return c != 0;
140 }
141 
142 static inline int atomic_dec_and_test_lt(atomic_t *v)
143 {
144 	char c;
145 	__asm__ __volatile__(
146 		"subql #1,%1; slt %0"
147 		: "=d" (c), "=m" (*v)
148 		: "m" (*v));
149 	return c != 0;
150 }
151 
152 static inline int atomic_inc_and_test(atomic_t *v)
153 {
154 	char c;
155 	__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
156 	return c != 0;
157 }
158 
159 #ifdef CONFIG_RMW_INSNS
160 
161 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
162 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
163 
164 #else /* !CONFIG_RMW_INSNS */
165 
166 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
167 {
168 	unsigned long flags;
169 	int prev;
170 
171 	local_irq_save(flags);
172 	prev = atomic_read(v);
173 	if (prev == old)
174 		atomic_set(v, new);
175 	local_irq_restore(flags);
176 	return prev;
177 }
178 
179 static inline int atomic_xchg(atomic_t *v, int new)
180 {
181 	unsigned long flags;
182 	int prev;
183 
184 	local_irq_save(flags);
185 	prev = atomic_read(v);
186 	atomic_set(v, new);
187 	local_irq_restore(flags);
188 	return prev;
189 }
190 
191 #endif /* !CONFIG_RMW_INSNS */
192 
193 #define atomic_dec_return(v)	atomic_sub_return(1, (v))
194 #define atomic_inc_return(v)	atomic_add_return(1, (v))
195 
196 static inline int atomic_sub_and_test(int i, atomic_t *v)
197 {
198 	char c;
199 	__asm__ __volatile__("subl %2,%1; seq %0"
200 			     : "=d" (c), "+m" (*v)
201 			     : ASM_DI (i));
202 	return c != 0;
203 }
204 
205 static inline int atomic_add_negative(int i, atomic_t *v)
206 {
207 	char c;
208 	__asm__ __volatile__("addl %2,%1; smi %0"
209 			     : "=d" (c), "+m" (*v)
210 			     : ASM_DI (i));
211 	return c != 0;
212 }
213 
214 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
215 {
216 	int c, old;
217 	c = atomic_read(v);
218 	for (;;) {
219 		if (unlikely(c == (u)))
220 			break;
221 		old = atomic_cmpxchg((v), c, c + (a));
222 		if (likely(old == c))
223 			break;
224 		c = old;
225 	}
226 	return c;
227 }
228 
229 #endif /* __ARCH_M68K_ATOMIC __ */
230