xref: /openbmc/linux/arch/alpha/include/asm/atomic.h (revision 5ba840f9)
1024b246eSLinus Torvalds #ifndef _ALPHA_ATOMIC_H
2024b246eSLinus Torvalds #define _ALPHA_ATOMIC_H
3024b246eSLinus Torvalds 
4ea435467SMatthew Wilcox #include <linux/types.h>
5024b246eSLinus Torvalds #include <asm/barrier.h>
65ba840f9SPaul Gortmaker #include <asm/cmpxchg.h>
7024b246eSLinus Torvalds 
8024b246eSLinus Torvalds /*
9024b246eSLinus Torvalds  * Atomic operations that C can't guarantee us.  Useful for
10024b246eSLinus Torvalds  * resource counting etc...
11024b246eSLinus Torvalds  *
12024b246eSLinus Torvalds  * But use these as seldom as possible since they are much slower
13024b246eSLinus Torvalds  * than regular operations.
14024b246eSLinus Torvalds  */
15024b246eSLinus Torvalds 
16024b246eSLinus Torvalds 
17024b246eSLinus Torvalds #define ATOMIC_INIT(i)		( (atomic_t) { (i) } )
18024b246eSLinus Torvalds #define ATOMIC64_INIT(i)	( (atomic64_t) { (i) } )
19024b246eSLinus Torvalds 
20f3d46f9dSAnton Blanchard #define atomic_read(v)		(*(volatile int *)&(v)->counter)
21f3d46f9dSAnton Blanchard #define atomic64_read(v)	(*(volatile long *)&(v)->counter)
22024b246eSLinus Torvalds 
23024b246eSLinus Torvalds #define atomic_set(v,i)		((v)->counter = (i))
24024b246eSLinus Torvalds #define atomic64_set(v,i)	((v)->counter = (i))
25024b246eSLinus Torvalds 
26024b246eSLinus Torvalds /*
27024b246eSLinus Torvalds  * To get proper branch prediction for the main line, we must branch
28024b246eSLinus Torvalds  * forward to code at the end of this object's .text section, then
29024b246eSLinus Torvalds  * branch back to restart the operation.
30024b246eSLinus Torvalds  */
31024b246eSLinus Torvalds 
32024b246eSLinus Torvalds static __inline__ void atomic_add(int i, atomic_t * v)
33024b246eSLinus Torvalds {
34024b246eSLinus Torvalds 	unsigned long temp;
35024b246eSLinus Torvalds 	__asm__ __volatile__(
36024b246eSLinus Torvalds 	"1:	ldl_l %0,%1\n"
37024b246eSLinus Torvalds 	"	addl %0,%2,%0\n"
38024b246eSLinus Torvalds 	"	stl_c %0,%1\n"
39024b246eSLinus Torvalds 	"	beq %0,2f\n"
40024b246eSLinus Torvalds 	".subsection 2\n"
41024b246eSLinus Torvalds 	"2:	br 1b\n"
42024b246eSLinus Torvalds 	".previous"
43024b246eSLinus Torvalds 	:"=&r" (temp), "=m" (v->counter)
44024b246eSLinus Torvalds 	:"Ir" (i), "m" (v->counter));
45024b246eSLinus Torvalds }
46024b246eSLinus Torvalds 
47024b246eSLinus Torvalds static __inline__ void atomic64_add(long i, atomic64_t * v)
48024b246eSLinus Torvalds {
49024b246eSLinus Torvalds 	unsigned long temp;
50024b246eSLinus Torvalds 	__asm__ __volatile__(
51024b246eSLinus Torvalds 	"1:	ldq_l %0,%1\n"
52024b246eSLinus Torvalds 	"	addq %0,%2,%0\n"
53024b246eSLinus Torvalds 	"	stq_c %0,%1\n"
54024b246eSLinus Torvalds 	"	beq %0,2f\n"
55024b246eSLinus Torvalds 	".subsection 2\n"
56024b246eSLinus Torvalds 	"2:	br 1b\n"
57024b246eSLinus Torvalds 	".previous"
58024b246eSLinus Torvalds 	:"=&r" (temp), "=m" (v->counter)
59024b246eSLinus Torvalds 	:"Ir" (i), "m" (v->counter));
60024b246eSLinus Torvalds }
61024b246eSLinus Torvalds 
62024b246eSLinus Torvalds static __inline__ void atomic_sub(int i, atomic_t * v)
63024b246eSLinus Torvalds {
64024b246eSLinus Torvalds 	unsigned long temp;
65024b246eSLinus Torvalds 	__asm__ __volatile__(
66024b246eSLinus Torvalds 	"1:	ldl_l %0,%1\n"
67024b246eSLinus Torvalds 	"	subl %0,%2,%0\n"
68024b246eSLinus Torvalds 	"	stl_c %0,%1\n"
69024b246eSLinus Torvalds 	"	beq %0,2f\n"
70024b246eSLinus Torvalds 	".subsection 2\n"
71024b246eSLinus Torvalds 	"2:	br 1b\n"
72024b246eSLinus Torvalds 	".previous"
73024b246eSLinus Torvalds 	:"=&r" (temp), "=m" (v->counter)
74024b246eSLinus Torvalds 	:"Ir" (i), "m" (v->counter));
75024b246eSLinus Torvalds }
76024b246eSLinus Torvalds 
77024b246eSLinus Torvalds static __inline__ void atomic64_sub(long i, atomic64_t * v)
78024b246eSLinus Torvalds {
79024b246eSLinus Torvalds 	unsigned long temp;
80024b246eSLinus Torvalds 	__asm__ __volatile__(
81024b246eSLinus Torvalds 	"1:	ldq_l %0,%1\n"
82024b246eSLinus Torvalds 	"	subq %0,%2,%0\n"
83024b246eSLinus Torvalds 	"	stq_c %0,%1\n"
84024b246eSLinus Torvalds 	"	beq %0,2f\n"
85024b246eSLinus Torvalds 	".subsection 2\n"
86024b246eSLinus Torvalds 	"2:	br 1b\n"
87024b246eSLinus Torvalds 	".previous"
88024b246eSLinus Torvalds 	:"=&r" (temp), "=m" (v->counter)
89024b246eSLinus Torvalds 	:"Ir" (i), "m" (v->counter));
90024b246eSLinus Torvalds }
91024b246eSLinus Torvalds 
92024b246eSLinus Torvalds 
93024b246eSLinus Torvalds /*
94024b246eSLinus Torvalds  * Same as above, but return the result value
95024b246eSLinus Torvalds  */
96024b246eSLinus Torvalds static inline int atomic_add_return(int i, atomic_t *v)
97024b246eSLinus Torvalds {
98024b246eSLinus Torvalds 	long temp, result;
99024b246eSLinus Torvalds 	smp_mb();
100024b246eSLinus Torvalds 	__asm__ __volatile__(
101024b246eSLinus Torvalds 	"1:	ldl_l %0,%1\n"
102024b246eSLinus Torvalds 	"	addl %0,%3,%2\n"
103024b246eSLinus Torvalds 	"	addl %0,%3,%0\n"
104024b246eSLinus Torvalds 	"	stl_c %0,%1\n"
105024b246eSLinus Torvalds 	"	beq %0,2f\n"
106024b246eSLinus Torvalds 	".subsection 2\n"
107024b246eSLinus Torvalds 	"2:	br 1b\n"
108024b246eSLinus Torvalds 	".previous"
109024b246eSLinus Torvalds 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
110024b246eSLinus Torvalds 	:"Ir" (i), "m" (v->counter) : "memory");
111024b246eSLinus Torvalds 	smp_mb();
112024b246eSLinus Torvalds 	return result;
113024b246eSLinus Torvalds }
114024b246eSLinus Torvalds 
115024b246eSLinus Torvalds static __inline__ long atomic64_add_return(long i, atomic64_t * v)
116024b246eSLinus Torvalds {
117024b246eSLinus Torvalds 	long temp, result;
118024b246eSLinus Torvalds 	smp_mb();
119024b246eSLinus Torvalds 	__asm__ __volatile__(
120024b246eSLinus Torvalds 	"1:	ldq_l %0,%1\n"
121024b246eSLinus Torvalds 	"	addq %0,%3,%2\n"
122024b246eSLinus Torvalds 	"	addq %0,%3,%0\n"
123024b246eSLinus Torvalds 	"	stq_c %0,%1\n"
124024b246eSLinus Torvalds 	"	beq %0,2f\n"
125024b246eSLinus Torvalds 	".subsection 2\n"
126024b246eSLinus Torvalds 	"2:	br 1b\n"
127024b246eSLinus Torvalds 	".previous"
128024b246eSLinus Torvalds 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
129024b246eSLinus Torvalds 	:"Ir" (i), "m" (v->counter) : "memory");
130024b246eSLinus Torvalds 	smp_mb();
131024b246eSLinus Torvalds 	return result;
132024b246eSLinus Torvalds }
133024b246eSLinus Torvalds 
134024b246eSLinus Torvalds static __inline__ long atomic_sub_return(int i, atomic_t * v)
135024b246eSLinus Torvalds {
136024b246eSLinus Torvalds 	long temp, result;
137024b246eSLinus Torvalds 	smp_mb();
138024b246eSLinus Torvalds 	__asm__ __volatile__(
139024b246eSLinus Torvalds 	"1:	ldl_l %0,%1\n"
140024b246eSLinus Torvalds 	"	subl %0,%3,%2\n"
141024b246eSLinus Torvalds 	"	subl %0,%3,%0\n"
142024b246eSLinus Torvalds 	"	stl_c %0,%1\n"
143024b246eSLinus Torvalds 	"	beq %0,2f\n"
144024b246eSLinus Torvalds 	".subsection 2\n"
145024b246eSLinus Torvalds 	"2:	br 1b\n"
146024b246eSLinus Torvalds 	".previous"
147024b246eSLinus Torvalds 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
148024b246eSLinus Torvalds 	:"Ir" (i), "m" (v->counter) : "memory");
149024b246eSLinus Torvalds 	smp_mb();
150024b246eSLinus Torvalds 	return result;
151024b246eSLinus Torvalds }
152024b246eSLinus Torvalds 
153024b246eSLinus Torvalds static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
154024b246eSLinus Torvalds {
155024b246eSLinus Torvalds 	long temp, result;
156024b246eSLinus Torvalds 	smp_mb();
157024b246eSLinus Torvalds 	__asm__ __volatile__(
158024b246eSLinus Torvalds 	"1:	ldq_l %0,%1\n"
159024b246eSLinus Torvalds 	"	subq %0,%3,%2\n"
160024b246eSLinus Torvalds 	"	subq %0,%3,%0\n"
161024b246eSLinus Torvalds 	"	stq_c %0,%1\n"
162024b246eSLinus Torvalds 	"	beq %0,2f\n"
163024b246eSLinus Torvalds 	".subsection 2\n"
164024b246eSLinus Torvalds 	"2:	br 1b\n"
165024b246eSLinus Torvalds 	".previous"
166024b246eSLinus Torvalds 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
167024b246eSLinus Torvalds 	:"Ir" (i), "m" (v->counter) : "memory");
168024b246eSLinus Torvalds 	smp_mb();
169024b246eSLinus Torvalds 	return result;
170024b246eSLinus Torvalds }
171024b246eSLinus Torvalds 
172024b246eSLinus Torvalds #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
173024b246eSLinus Torvalds #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
174024b246eSLinus Torvalds 
175024b246eSLinus Torvalds #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
176024b246eSLinus Torvalds #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
177024b246eSLinus Torvalds 
178024b246eSLinus Torvalds /**
179f24219b4SArun Sharma  * __atomic_add_unless - add unless the number is a given value
180024b246eSLinus Torvalds  * @v: pointer of type atomic_t
181024b246eSLinus Torvalds  * @a: the amount to add to v...
182024b246eSLinus Torvalds  * @u: ...unless v is equal to u.
183024b246eSLinus Torvalds  *
184024b246eSLinus Torvalds  * Atomically adds @a to @v, so long as it was not @u.
185f24219b4SArun Sharma  * Returns the old value of @v.
186024b246eSLinus Torvalds  */
187f24219b4SArun Sharma static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
188024b246eSLinus Torvalds {
189024b246eSLinus Torvalds 	int c, old;
190024b246eSLinus Torvalds 	c = atomic_read(v);
191024b246eSLinus Torvalds 	for (;;) {
192024b246eSLinus Torvalds 		if (unlikely(c == (u)))
193024b246eSLinus Torvalds 			break;
194024b246eSLinus Torvalds 		old = atomic_cmpxchg((v), c, c + (a));
195024b246eSLinus Torvalds 		if (likely(old == c))
196024b246eSLinus Torvalds 			break;
197024b246eSLinus Torvalds 		c = old;
198024b246eSLinus Torvalds 	}
199f24219b4SArun Sharma 	return c;
200024b246eSLinus Torvalds }
201024b246eSLinus Torvalds 
202024b246eSLinus Torvalds 
203024b246eSLinus Torvalds /**
204024b246eSLinus Torvalds  * atomic64_add_unless - add unless the number is a given value
205024b246eSLinus Torvalds  * @v: pointer of type atomic64_t
206024b246eSLinus Torvalds  * @a: the amount to add to v...
207024b246eSLinus Torvalds  * @u: ...unless v is equal to u.
208024b246eSLinus Torvalds  *
209024b246eSLinus Torvalds  * Atomically adds @a to @v, so long as it was not @u.
210f24219b4SArun Sharma  * Returns the old value of @v.
211024b246eSLinus Torvalds  */
212024b246eSLinus Torvalds static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
213024b246eSLinus Torvalds {
214024b246eSLinus Torvalds 	long c, old;
215024b246eSLinus Torvalds 	c = atomic64_read(v);
216024b246eSLinus Torvalds 	for (;;) {
217024b246eSLinus Torvalds 		if (unlikely(c == (u)))
218024b246eSLinus Torvalds 			break;
219024b246eSLinus Torvalds 		old = atomic64_cmpxchg((v), c, c + (a));
220024b246eSLinus Torvalds 		if (likely(old == c))
221024b246eSLinus Torvalds 			break;
222024b246eSLinus Torvalds 		c = old;
223024b246eSLinus Torvalds 	}
224024b246eSLinus Torvalds 	return c != (u);
225024b246eSLinus Torvalds }
226024b246eSLinus Torvalds 
227024b246eSLinus Torvalds #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
228024b246eSLinus Torvalds 
229024b246eSLinus Torvalds #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
230024b246eSLinus Torvalds #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
231024b246eSLinus Torvalds 
232024b246eSLinus Torvalds #define atomic_dec_return(v) atomic_sub_return(1,(v))
233024b246eSLinus Torvalds #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
234024b246eSLinus Torvalds 
235024b246eSLinus Torvalds #define atomic_inc_return(v) atomic_add_return(1,(v))
236024b246eSLinus Torvalds #define atomic64_inc_return(v) atomic64_add_return(1,(v))
237024b246eSLinus Torvalds 
238024b246eSLinus Torvalds #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
239024b246eSLinus Torvalds #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
240024b246eSLinus Torvalds 
241024b246eSLinus Torvalds #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
242024b246eSLinus Torvalds #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
243024b246eSLinus Torvalds 
244024b246eSLinus Torvalds #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
245024b246eSLinus Torvalds #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
246024b246eSLinus Torvalds 
247024b246eSLinus Torvalds #define atomic_inc(v) atomic_add(1,(v))
248024b246eSLinus Torvalds #define atomic64_inc(v) atomic64_add(1,(v))
249024b246eSLinus Torvalds 
250024b246eSLinus Torvalds #define atomic_dec(v) atomic_sub(1,(v))
251024b246eSLinus Torvalds #define atomic64_dec(v) atomic64_sub(1,(v))
252024b246eSLinus Torvalds 
253024b246eSLinus Torvalds #define smp_mb__before_atomic_dec()	smp_mb()
254024b246eSLinus Torvalds #define smp_mb__after_atomic_dec()	smp_mb()
255024b246eSLinus Torvalds #define smp_mb__before_atomic_inc()	smp_mb()
256024b246eSLinus Torvalds #define smp_mb__after_atomic_inc()	smp_mb()
257024b246eSLinus Torvalds 
258024b246eSLinus Torvalds #endif /* _ALPHA_ATOMIC_H */
259