xref: /openbmc/linux/arch/m68k/include/asm/atomic.h (revision a2cce7a9)
1 #ifndef __ARCH_M68K_ATOMIC__
2 #define __ARCH_M68K_ATOMIC__
3 
4 #include <linux/types.h>
5 #include <linux/irqflags.h>
6 #include <asm/cmpxchg.h>
7 #include <asm/barrier.h>
8 
9 /*
10  * Atomic operations that C can't guarantee us.  Useful for
11  * resource counting etc..
12  */
13 
14 /*
15  * We do not have SMP m68k systems, so we don't have to deal with that.
16  */
17 
18 #define ATOMIC_INIT(i)	{ (i) }
19 
20 #define atomic_read(v)		ACCESS_ONCE((v)->counter)
21 #define atomic_set(v, i)	(((v)->counter) = i)
22 
23 /*
24  * The ColdFire parts cannot do some immediate to memory operations,
25  * so for them we do not specify the "i" asm constraint.
26  */
27 #ifdef CONFIG_COLDFIRE
28 #define	ASM_DI	"d"
29 #else
30 #define	ASM_DI	"di"
31 #endif
32 
33 #define ATOMIC_OP(op, c_op, asm_op)					\
34 static inline void atomic_##op(int i, atomic_t *v)			\
35 {									\
36 	__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
37 }									\
38 
39 #ifdef CONFIG_RMW_INSNS
40 
41 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
42 static inline int atomic_##op##_return(int i, atomic_t *v)		\
43 {									\
44 	int t, tmp;							\
45 									\
46 	__asm__ __volatile__(						\
47 			"1:	movel %2,%1\n"				\
48 			"	" #asm_op "l %3,%1\n"			\
49 			"	casl %2,%1,%0\n"			\
50 			"	jne 1b"					\
51 			: "+m" (*v), "=&d" (t), "=&d" (tmp)		\
52 			: "g" (i), "2" (atomic_read(v)));		\
53 	return t;							\
54 }
55 
56 #else
57 
58 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
59 static inline int atomic_##op##_return(int i, atomic_t * v)		\
60 {									\
61 	unsigned long flags;						\
62 	int t;								\
63 									\
64 	local_irq_save(flags);						\
65 	t = (v->counter c_op i);					\
66 	local_irq_restore(flags);					\
67 									\
68 	return t;							\
69 }
70 
71 #endif /* CONFIG_RMW_INSNS */
72 
73 #define ATOMIC_OPS(op, c_op, asm_op)					\
74 	ATOMIC_OP(op, c_op, asm_op)					\
75 	ATOMIC_OP_RETURN(op, c_op, asm_op)
76 
77 ATOMIC_OPS(add, +=, add)
78 ATOMIC_OPS(sub, -=, sub)
79 
80 ATOMIC_OP(and, &=, and)
81 ATOMIC_OP(or, |=, or)
82 ATOMIC_OP(xor, ^=, eor)
83 
84 #undef ATOMIC_OPS
85 #undef ATOMIC_OP_RETURN
86 #undef ATOMIC_OP
87 
88 static inline void atomic_inc(atomic_t *v)
89 {
90 	__asm__ __volatile__("addql #1,%0" : "+m" (*v));
91 }
92 
93 static inline void atomic_dec(atomic_t *v)
94 {
95 	__asm__ __volatile__("subql #1,%0" : "+m" (*v));
96 }
97 
98 static inline int atomic_dec_and_test(atomic_t *v)
99 {
100 	char c;
101 	__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
102 	return c != 0;
103 }
104 
105 static inline int atomic_dec_and_test_lt(atomic_t *v)
106 {
107 	char c;
108 	__asm__ __volatile__(
109 		"subql #1,%1; slt %0"
110 		: "=d" (c), "=m" (*v)
111 		: "m" (*v));
112 	return c != 0;
113 }
114 
115 static inline int atomic_inc_and_test(atomic_t *v)
116 {
117 	char c;
118 	__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
119 	return c != 0;
120 }
121 
122 #ifdef CONFIG_RMW_INSNS
123 
124 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
125 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
126 
127 #else /* !CONFIG_RMW_INSNS */
128 
129 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
130 {
131 	unsigned long flags;
132 	int prev;
133 
134 	local_irq_save(flags);
135 	prev = atomic_read(v);
136 	if (prev == old)
137 		atomic_set(v, new);
138 	local_irq_restore(flags);
139 	return prev;
140 }
141 
142 static inline int atomic_xchg(atomic_t *v, int new)
143 {
144 	unsigned long flags;
145 	int prev;
146 
147 	local_irq_save(flags);
148 	prev = atomic_read(v);
149 	atomic_set(v, new);
150 	local_irq_restore(flags);
151 	return prev;
152 }
153 
154 #endif /* !CONFIG_RMW_INSNS */
155 
156 #define atomic_dec_return(v)	atomic_sub_return(1, (v))
157 #define atomic_inc_return(v)	atomic_add_return(1, (v))
158 
159 static inline int atomic_sub_and_test(int i, atomic_t *v)
160 {
161 	char c;
162 	__asm__ __volatile__("subl %2,%1; seq %0"
163 			     : "=d" (c), "+m" (*v)
164 			     : ASM_DI (i));
165 	return c != 0;
166 }
167 
168 static inline int atomic_add_negative(int i, atomic_t *v)
169 {
170 	char c;
171 	__asm__ __volatile__("addl %2,%1; smi %0"
172 			     : "=d" (c), "+m" (*v)
173 			     : ASM_DI (i));
174 	return c != 0;
175 }
176 
177 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
178 {
179 	int c, old;
180 	c = atomic_read(v);
181 	for (;;) {
182 		if (unlikely(c == (u)))
183 			break;
184 		old = atomic_cmpxchg((v), c, c + (a));
185 		if (likely(old == c))
186 			break;
187 		c = old;
188 	}
189 	return c;
190 }
191 
192 #endif /* __ARCH_M68K_ATOMIC __ */
193