xref: /openbmc/linux/arch/alpha/include/asm/atomic.h (revision 12eb4683)
1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
3 
4 #include <linux/types.h>
5 #include <asm/barrier.h>
6 #include <asm/cmpxchg.h>
7 
8 /*
9  * Atomic operations that C can't guarantee us.  Useful for
10  * resource counting etc...
11  *
12  * But use these as seldom as possible since they are much slower
13  * than regular operations.
14  */
15 
16 
17 #define ATOMIC_INIT(i)		{ (i) }
18 #define ATOMIC64_INIT(i)	{ (i) }
19 
20 #define atomic_read(v)		(*(volatile int *)&(v)->counter)
21 #define atomic64_read(v)	(*(volatile long *)&(v)->counter)
22 
23 #define atomic_set(v,i)		((v)->counter = (i))
24 #define atomic64_set(v,i)	((v)->counter = (i))
25 
26 /*
27  * To get proper branch prediction for the main line, we must branch
28  * forward to code at the end of this object's .text section, then
29  * branch back to restart the operation.
30  */
31 
32 static __inline__ void atomic_add(int i, atomic_t * v)
33 {
34 	unsigned long temp;
35 	__asm__ __volatile__(
36 	"1:	ldl_l %0,%1\n"
37 	"	addl %0,%2,%0\n"
38 	"	stl_c %0,%1\n"
39 	"	beq %0,2f\n"
40 	".subsection 2\n"
41 	"2:	br 1b\n"
42 	".previous"
43 	:"=&r" (temp), "=m" (v->counter)
44 	:"Ir" (i), "m" (v->counter));
45 }
46 
47 static __inline__ void atomic64_add(long i, atomic64_t * v)
48 {
49 	unsigned long temp;
50 	__asm__ __volatile__(
51 	"1:	ldq_l %0,%1\n"
52 	"	addq %0,%2,%0\n"
53 	"	stq_c %0,%1\n"
54 	"	beq %0,2f\n"
55 	".subsection 2\n"
56 	"2:	br 1b\n"
57 	".previous"
58 	:"=&r" (temp), "=m" (v->counter)
59 	:"Ir" (i), "m" (v->counter));
60 }
61 
62 static __inline__ void atomic_sub(int i, atomic_t * v)
63 {
64 	unsigned long temp;
65 	__asm__ __volatile__(
66 	"1:	ldl_l %0,%1\n"
67 	"	subl %0,%2,%0\n"
68 	"	stl_c %0,%1\n"
69 	"	beq %0,2f\n"
70 	".subsection 2\n"
71 	"2:	br 1b\n"
72 	".previous"
73 	:"=&r" (temp), "=m" (v->counter)
74 	:"Ir" (i), "m" (v->counter));
75 }
76 
77 static __inline__ void atomic64_sub(long i, atomic64_t * v)
78 {
79 	unsigned long temp;
80 	__asm__ __volatile__(
81 	"1:	ldq_l %0,%1\n"
82 	"	subq %0,%2,%0\n"
83 	"	stq_c %0,%1\n"
84 	"	beq %0,2f\n"
85 	".subsection 2\n"
86 	"2:	br 1b\n"
87 	".previous"
88 	:"=&r" (temp), "=m" (v->counter)
89 	:"Ir" (i), "m" (v->counter));
90 }
91 
92 
93 /*
94  * Same as above, but return the result value
95  */
96 static inline int atomic_add_return(int i, atomic_t *v)
97 {
98 	long temp, result;
99 	smp_mb();
100 	__asm__ __volatile__(
101 	"1:	ldl_l %0,%1\n"
102 	"	addl %0,%3,%2\n"
103 	"	addl %0,%3,%0\n"
104 	"	stl_c %0,%1\n"
105 	"	beq %0,2f\n"
106 	".subsection 2\n"
107 	"2:	br 1b\n"
108 	".previous"
109 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
110 	:"Ir" (i), "m" (v->counter) : "memory");
111 	smp_mb();
112 	return result;
113 }
114 
115 static __inline__ long atomic64_add_return(long i, atomic64_t * v)
116 {
117 	long temp, result;
118 	smp_mb();
119 	__asm__ __volatile__(
120 	"1:	ldq_l %0,%1\n"
121 	"	addq %0,%3,%2\n"
122 	"	addq %0,%3,%0\n"
123 	"	stq_c %0,%1\n"
124 	"	beq %0,2f\n"
125 	".subsection 2\n"
126 	"2:	br 1b\n"
127 	".previous"
128 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
129 	:"Ir" (i), "m" (v->counter) : "memory");
130 	smp_mb();
131 	return result;
132 }
133 
134 static __inline__ long atomic_sub_return(int i, atomic_t * v)
135 {
136 	long temp, result;
137 	smp_mb();
138 	__asm__ __volatile__(
139 	"1:	ldl_l %0,%1\n"
140 	"	subl %0,%3,%2\n"
141 	"	subl %0,%3,%0\n"
142 	"	stl_c %0,%1\n"
143 	"	beq %0,2f\n"
144 	".subsection 2\n"
145 	"2:	br 1b\n"
146 	".previous"
147 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
148 	:"Ir" (i), "m" (v->counter) : "memory");
149 	smp_mb();
150 	return result;
151 }
152 
153 static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
154 {
155 	long temp, result;
156 	smp_mb();
157 	__asm__ __volatile__(
158 	"1:	ldq_l %0,%1\n"
159 	"	subq %0,%3,%2\n"
160 	"	subq %0,%3,%0\n"
161 	"	stq_c %0,%1\n"
162 	"	beq %0,2f\n"
163 	".subsection 2\n"
164 	"2:	br 1b\n"
165 	".previous"
166 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
167 	:"Ir" (i), "m" (v->counter) : "memory");
168 	smp_mb();
169 	return result;
170 }
171 
172 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
173 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
174 
175 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
176 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
177 
178 /**
179  * __atomic_add_unless - add unless the number is a given value
180  * @v: pointer of type atomic_t
181  * @a: the amount to add to v...
182  * @u: ...unless v is equal to u.
183  *
184  * Atomically adds @a to @v, so long as it was not @u.
185  * Returns the old value of @v.
186  */
187 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
188 {
189 	int c, new, old;
190 	smp_mb();
191 	__asm__ __volatile__(
192 	"1:	ldl_l	%[old],%[mem]\n"
193 	"	cmpeq	%[old],%[u],%[c]\n"
194 	"	addl	%[old],%[a],%[new]\n"
195 	"	bne	%[c],2f\n"
196 	"	stl_c	%[new],%[mem]\n"
197 	"	beq	%[new],3f\n"
198 	"2:\n"
199 	".subsection 2\n"
200 	"3:	br	1b\n"
201 	".previous"
202 	: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
203 	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
204 	: "memory");
205 	smp_mb();
206 	return old;
207 }
208 
209 
210 /**
211  * atomic64_add_unless - add unless the number is a given value
212  * @v: pointer of type atomic64_t
213  * @a: the amount to add to v...
214  * @u: ...unless v is equal to u.
215  *
216  * Atomically adds @a to @v, so long as it was not @u.
217  * Returns true iff @v was not @u.
218  */
219 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
220 {
221 	long c, tmp;
222 	smp_mb();
223 	__asm__ __volatile__(
224 	"1:	ldq_l	%[tmp],%[mem]\n"
225 	"	cmpeq	%[tmp],%[u],%[c]\n"
226 	"	addq	%[tmp],%[a],%[tmp]\n"
227 	"	bne	%[c],2f\n"
228 	"	stq_c	%[tmp],%[mem]\n"
229 	"	beq	%[tmp],3f\n"
230 	"2:\n"
231 	".subsection 2\n"
232 	"3:	br	1b\n"
233 	".previous"
234 	: [tmp] "=&r"(tmp), [c] "=&r"(c)
235 	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
236 	: "memory");
237 	smp_mb();
238 	return !c;
239 }
240 
241 /*
242  * atomic64_dec_if_positive - decrement by 1 if old value positive
243  * @v: pointer of type atomic_t
244  *
245  * The function returns the old value of *v minus 1, even if
246  * the atomic variable, v, was not decremented.
247  */
248 static inline long atomic64_dec_if_positive(atomic64_t *v)
249 {
250 	long old, tmp;
251 	smp_mb();
252 	__asm__ __volatile__(
253 	"1:	ldq_l	%[old],%[mem]\n"
254 	"	subq	%[old],1,%[tmp]\n"
255 	"	ble	%[old],2f\n"
256 	"	stq_c	%[tmp],%[mem]\n"
257 	"	beq	%[tmp],3f\n"
258 	"2:\n"
259 	".subsection 2\n"
260 	"3:	br	1b\n"
261 	".previous"
262 	: [old] "=&r"(old), [tmp] "=&r"(tmp)
263 	: [mem] "m"(*v)
264 	: "memory");
265 	smp_mb();
266 	return old - 1;
267 }
268 
269 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
270 
271 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
272 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
273 
274 #define atomic_dec_return(v) atomic_sub_return(1,(v))
275 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
276 
277 #define atomic_inc_return(v) atomic_add_return(1,(v))
278 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
279 
280 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
281 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
282 
283 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
284 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
285 
286 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
287 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
288 
289 #define atomic_inc(v) atomic_add(1,(v))
290 #define atomic64_inc(v) atomic64_add(1,(v))
291 
292 #define atomic_dec(v) atomic_sub(1,(v))
293 #define atomic64_dec(v) atomic64_sub(1,(v))
294 
295 #define smp_mb__before_atomic_dec()	smp_mb()
296 #define smp_mb__after_atomic_dec()	smp_mb()
297 #define smp_mb__before_atomic_inc()	smp_mb()
298 #define smp_mb__after_atomic_inc()	smp_mb()
299 
300 #endif /* _ALPHA_ATOMIC_H */
301