xref: /openbmc/linux/arch/m68k/include/asm/atomic.h (revision bc5aa3a0)
1 #ifndef __ARCH_M68K_ATOMIC__
2 #define __ARCH_M68K_ATOMIC__
3 
4 #include <linux/types.h>
5 #include <linux/irqflags.h>
6 #include <asm/cmpxchg.h>
7 #include <asm/barrier.h>
8 
9 /*
10  * Atomic operations that C can't guarantee us.  Useful for
11  * resource counting etc..
12  */
13 
14 /*
15  * We do not have SMP m68k systems, so we don't have to deal with that.
16  */
17 
18 #define ATOMIC_INIT(i)	{ (i) }
19 
20 #define atomic_read(v)		READ_ONCE((v)->counter)
21 #define atomic_set(v, i)	WRITE_ONCE(((v)->counter), (i))
22 
23 /*
24  * The ColdFire parts cannot do some immediate to memory operations,
25  * so for them we do not specify the "i" asm constraint.
26  */
27 #ifdef CONFIG_COLDFIRE
28 #define	ASM_DI	"d"
29 #else
30 #define	ASM_DI	"di"
31 #endif
32 
33 #define ATOMIC_OP(op, c_op, asm_op)					\
34 static inline void atomic_##op(int i, atomic_t *v)			\
35 {									\
36 	__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
37 }									\
38 
39 #ifdef CONFIG_RMW_INSNS
40 
41 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
42 static inline int atomic_##op##_return(int i, atomic_t *v)		\
43 {									\
44 	int t, tmp;							\
45 									\
46 	__asm__ __volatile__(						\
47 			"1:	movel %2,%1\n"				\
48 			"	" #asm_op "l %3,%1\n"			\
49 			"	casl %2,%1,%0\n"			\
50 			"	jne 1b"					\
51 			: "+m" (*v), "=&d" (t), "=&d" (tmp)		\
52 			: "g" (i), "2" (atomic_read(v)));		\
53 	return t;							\
54 }
55 
56 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
57 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
58 {									\
59 	int t, tmp;							\
60 									\
61 	__asm__ __volatile__(						\
62 			"1:	movel %2,%1\n"				\
63 			"	" #asm_op "l %3,%1\n"			\
64 			"	casl %2,%1,%0\n"			\
65 			"	jne 1b"					\
66 			: "+m" (*v), "=&d" (t), "=&d" (tmp)		\
67 			: "g" (i), "2" (atomic_read(v)));		\
68 	return tmp;							\
69 }
70 
71 #else
72 
73 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
74 static inline int atomic_##op##_return(int i, atomic_t * v)		\
75 {									\
76 	unsigned long flags;						\
77 	int t;								\
78 									\
79 	local_irq_save(flags);						\
80 	t = (v->counter c_op i);					\
81 	local_irq_restore(flags);					\
82 									\
83 	return t;							\
84 }
85 
86 #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
87 static inline int atomic_fetch_##op(int i, atomic_t * v)		\
88 {									\
89 	unsigned long flags;						\
90 	int t;								\
91 									\
92 	local_irq_save(flags);						\
93 	t = v->counter;							\
94 	v->counter c_op i;						\
95 	local_irq_restore(flags);					\
96 									\
97 	return t;							\
98 }
99 
100 #endif /* CONFIG_RMW_INSNS */
101 
102 #define ATOMIC_OPS(op, c_op, asm_op)					\
103 	ATOMIC_OP(op, c_op, asm_op)					\
104 	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
105 	ATOMIC_FETCH_OP(op, c_op, asm_op)
106 
107 ATOMIC_OPS(add, +=, add)
108 ATOMIC_OPS(sub, -=, sub)
109 
110 #undef ATOMIC_OPS
111 #define ATOMIC_OPS(op, c_op, asm_op)					\
112 	ATOMIC_OP(op, c_op, asm_op)					\
113 	ATOMIC_FETCH_OP(op, c_op, asm_op)
114 
115 ATOMIC_OPS(and, &=, and)
116 ATOMIC_OPS(or, |=, or)
117 ATOMIC_OPS(xor, ^=, eor)
118 
119 #undef ATOMIC_OPS
120 #undef ATOMIC_FETCH_OP
121 #undef ATOMIC_OP_RETURN
122 #undef ATOMIC_OP
123 
124 static inline void atomic_inc(atomic_t *v)
125 {
126 	__asm__ __volatile__("addql #1,%0" : "+m" (*v));
127 }
128 
129 static inline void atomic_dec(atomic_t *v)
130 {
131 	__asm__ __volatile__("subql #1,%0" : "+m" (*v));
132 }
133 
134 static inline int atomic_dec_and_test(atomic_t *v)
135 {
136 	char c;
137 	__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
138 	return c != 0;
139 }
140 
141 static inline int atomic_dec_and_test_lt(atomic_t *v)
142 {
143 	char c;
144 	__asm__ __volatile__(
145 		"subql #1,%1; slt %0"
146 		: "=d" (c), "=m" (*v)
147 		: "m" (*v));
148 	return c != 0;
149 }
150 
151 static inline int atomic_inc_and_test(atomic_t *v)
152 {
153 	char c;
154 	__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
155 	return c != 0;
156 }
157 
158 #ifdef CONFIG_RMW_INSNS
159 
160 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
161 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
162 
163 #else /* !CONFIG_RMW_INSNS */
164 
165 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
166 {
167 	unsigned long flags;
168 	int prev;
169 
170 	local_irq_save(flags);
171 	prev = atomic_read(v);
172 	if (prev == old)
173 		atomic_set(v, new);
174 	local_irq_restore(flags);
175 	return prev;
176 }
177 
178 static inline int atomic_xchg(atomic_t *v, int new)
179 {
180 	unsigned long flags;
181 	int prev;
182 
183 	local_irq_save(flags);
184 	prev = atomic_read(v);
185 	atomic_set(v, new);
186 	local_irq_restore(flags);
187 	return prev;
188 }
189 
190 #endif /* !CONFIG_RMW_INSNS */
191 
192 #define atomic_dec_return(v)	atomic_sub_return(1, (v))
193 #define atomic_inc_return(v)	atomic_add_return(1, (v))
194 
195 static inline int atomic_sub_and_test(int i, atomic_t *v)
196 {
197 	char c;
198 	__asm__ __volatile__("subl %2,%1; seq %0"
199 			     : "=d" (c), "+m" (*v)
200 			     : ASM_DI (i));
201 	return c != 0;
202 }
203 
204 static inline int atomic_add_negative(int i, atomic_t *v)
205 {
206 	char c;
207 	__asm__ __volatile__("addl %2,%1; smi %0"
208 			     : "=d" (c), "+m" (*v)
209 			     : ASM_DI (i));
210 	return c != 0;
211 }
212 
213 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
214 {
215 	int c, old;
216 	c = atomic_read(v);
217 	for (;;) {
218 		if (unlikely(c == (u)))
219 			break;
220 		old = atomic_cmpxchg((v), c, c + (a));
221 		if (likely(old == c))
222 			break;
223 		c = old;
224 	}
225 	return c;
226 }
227 
228 #endif /* __ARCH_M68K_ATOMIC __ */
229