xref: /openbmc/linux/arch/parisc/include/asm/atomic.h (revision 293d5b43)
1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3  */
4 
5 #ifndef _ASM_PARISC_ATOMIC_H_
6 #define _ASM_PARISC_ATOMIC_H_
7 
8 #include <linux/types.h>
9 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
11 
12 /*
13  * Atomic operations that C can't guarantee us.  Useful for
14  * resource counting etc..
15  *
16  * And probably incredibly slow on parisc.  OTOH, we don't
17  * have to write any serious assembly.   prumpf
18  */
19 
20 #ifdef CONFIG_SMP
21 #include <asm/spinlock.h>
22 #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
23 
24 /* Use an array of spinlocks for our atomic_ts.
25  * Hash function to index into a different SPINLOCK.
26  * Since "a" is usually an address, use one spinlock per cacheline.
27  */
28 #  define ATOMIC_HASH_SIZE 4
29 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
30 
31 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
32 
33 /* Can't use raw_spin_lock_irq because of #include problems, so
34  * this is the substitute */
35 #define _atomic_spin_lock_irqsave(l,f) do {	\
36 	arch_spinlock_t *s = ATOMIC_HASH(l);		\
37 	local_irq_save(f);			\
38 	arch_spin_lock(s);			\
39 } while(0)
40 
41 #define _atomic_spin_unlock_irqrestore(l,f) do {	\
42 	arch_spinlock_t *s = ATOMIC_HASH(l);			\
43 	arch_spin_unlock(s);				\
44 	local_irq_restore(f);				\
45 } while(0)
46 
47 
48 #else
49 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
50 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
51 #endif
52 
53 /*
54  * Note that we need not lock read accesses - aligned word writes/reads
55  * are atomic, so a reader never sees inconsistent values.
56  */
57 
58 static __inline__ void atomic_set(atomic_t *v, int i)
59 {
60 	unsigned long flags;
61 	_atomic_spin_lock_irqsave(v, flags);
62 
63 	v->counter = i;
64 
65 	_atomic_spin_unlock_irqrestore(v, flags);
66 }
67 
68 static __inline__ int atomic_read(const atomic_t *v)
69 {
70 	return READ_ONCE((v)->counter);
71 }
72 
73 /* exported interface */
74 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
75 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
76 
77 /**
78  * __atomic_add_unless - add unless the number is a given value
79  * @v: pointer of type atomic_t
80  * @a: the amount to add to v...
81  * @u: ...unless v is equal to u.
82  *
83  * Atomically adds @a to @v, so long as it was not @u.
84  * Returns the old value of @v.
85  */
86 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
87 {
88 	int c, old;
89 	c = atomic_read(v);
90 	for (;;) {
91 		if (unlikely(c == (u)))
92 			break;
93 		old = atomic_cmpxchg((v), c, c + (a));
94 		if (likely(old == c))
95 			break;
96 		c = old;
97 	}
98 	return c;
99 }
100 
101 #define ATOMIC_OP(op, c_op)						\
102 static __inline__ void atomic_##op(int i, atomic_t *v)			\
103 {									\
104 	unsigned long flags;						\
105 									\
106 	_atomic_spin_lock_irqsave(v, flags);				\
107 	v->counter c_op i;						\
108 	_atomic_spin_unlock_irqrestore(v, flags);			\
109 }									\
110 
111 #define ATOMIC_OP_RETURN(op, c_op)					\
112 static __inline__ int atomic_##op##_return(int i, atomic_t *v)		\
113 {									\
114 	unsigned long flags;						\
115 	int ret;							\
116 									\
117 	_atomic_spin_lock_irqsave(v, flags);				\
118 	ret = (v->counter c_op i);					\
119 	_atomic_spin_unlock_irqrestore(v, flags);			\
120 									\
121 	return ret;							\
122 }
123 
124 #define ATOMIC_FETCH_OP(op, c_op)					\
125 static __inline__ int atomic_fetch_##op(int i, atomic_t *v)		\
126 {									\
127 	unsigned long flags;						\
128 	int ret;							\
129 									\
130 	_atomic_spin_lock_irqsave(v, flags);				\
131 	ret = v->counter;						\
132 	v->counter c_op i;						\
133 	_atomic_spin_unlock_irqrestore(v, flags);			\
134 									\
135 	return ret;							\
136 }
137 
138 #define ATOMIC_OPS(op, c_op)						\
139 	ATOMIC_OP(op, c_op)						\
140 	ATOMIC_OP_RETURN(op, c_op)					\
141 	ATOMIC_FETCH_OP(op, c_op)
142 
143 ATOMIC_OPS(add, +=)
144 ATOMIC_OPS(sub, -=)
145 
146 #undef ATOMIC_OPS
147 #define ATOMIC_OPS(op, c_op)						\
148 	ATOMIC_OP(op, c_op)						\
149 	ATOMIC_FETCH_OP(op, c_op)
150 
151 ATOMIC_OPS(and, &=)
152 ATOMIC_OPS(or, |=)
153 ATOMIC_OPS(xor, ^=)
154 
155 #undef ATOMIC_OPS
156 #undef ATOMIC_FETCH_OP
157 #undef ATOMIC_OP_RETURN
158 #undef ATOMIC_OP
159 
160 #define atomic_inc(v)	(atomic_add(   1,(v)))
161 #define atomic_dec(v)	(atomic_add(  -1,(v)))
162 
163 #define atomic_inc_return(v)	(atomic_add_return(   1,(v)))
164 #define atomic_dec_return(v)	(atomic_add_return(  -1,(v)))
165 
166 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
167 
168 /*
169  * atomic_inc_and_test - increment and test
170  * @v: pointer of type atomic_t
171  *
172  * Atomically increments @v by 1
173  * and returns true if the result is zero, or false for all
174  * other cases.
175  */
176 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
177 
178 #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
179 
180 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i),(v)) == 0)
181 
182 #define ATOMIC_INIT(i)	{ (i) }
183 
184 #ifdef CONFIG_64BIT
185 
186 #define ATOMIC64_INIT(i) { (i) }
187 
188 #define ATOMIC64_OP(op, c_op)						\
189 static __inline__ void atomic64_##op(s64 i, atomic64_t *v)		\
190 {									\
191 	unsigned long flags;						\
192 									\
193 	_atomic_spin_lock_irqsave(v, flags);				\
194 	v->counter c_op i;						\
195 	_atomic_spin_unlock_irqrestore(v, flags);			\
196 }									\
197 
198 #define ATOMIC64_OP_RETURN(op, c_op)					\
199 static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)	\
200 {									\
201 	unsigned long flags;						\
202 	s64 ret;							\
203 									\
204 	_atomic_spin_lock_irqsave(v, flags);				\
205 	ret = (v->counter c_op i);					\
206 	_atomic_spin_unlock_irqrestore(v, flags);			\
207 									\
208 	return ret;							\
209 }
210 
211 #define ATOMIC64_FETCH_OP(op, c_op)					\
212 static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v)		\
213 {									\
214 	unsigned long flags;						\
215 	s64 ret;							\
216 									\
217 	_atomic_spin_lock_irqsave(v, flags);				\
218 	ret = v->counter;						\
219 	v->counter c_op i;						\
220 	_atomic_spin_unlock_irqrestore(v, flags);			\
221 									\
222 	return ret;							\
223 }
224 
225 #define ATOMIC64_OPS(op, c_op)						\
226 	ATOMIC64_OP(op, c_op)						\
227 	ATOMIC64_OP_RETURN(op, c_op)					\
228 	ATOMIC64_FETCH_OP(op, c_op)
229 
230 ATOMIC64_OPS(add, +=)
231 ATOMIC64_OPS(sub, -=)
232 
233 #undef ATOMIC64_OPS
234 #define ATOMIC64_OPS(op, c_op)						\
235 	ATOMIC64_OP(op, c_op)						\
236 	ATOMIC64_FETCH_OP(op, c_op)
237 
238 ATOMIC64_OPS(and, &=)
239 ATOMIC64_OPS(or, |=)
240 ATOMIC64_OPS(xor, ^=)
241 
242 #undef ATOMIC64_OPS
243 #undef ATOMIC64_FETCH_OP
244 #undef ATOMIC64_OP_RETURN
245 #undef ATOMIC64_OP
246 
247 static __inline__ void
248 atomic64_set(atomic64_t *v, s64 i)
249 {
250 	unsigned long flags;
251 	_atomic_spin_lock_irqsave(v, flags);
252 
253 	v->counter = i;
254 
255 	_atomic_spin_unlock_irqrestore(v, flags);
256 }
257 
258 static __inline__ s64
259 atomic64_read(const atomic64_t *v)
260 {
261 	return ACCESS_ONCE((v)->counter);
262 }
263 
264 #define atomic64_inc(v)		(atomic64_add(   1,(v)))
265 #define atomic64_dec(v)		(atomic64_add(  -1,(v)))
266 
267 #define atomic64_inc_return(v)		(atomic64_add_return(   1,(v)))
268 #define atomic64_dec_return(v)		(atomic64_add_return(  -1,(v)))
269 
270 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
271 
272 #define atomic64_inc_and_test(v) 	(atomic64_inc_return(v) == 0)
273 #define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
274 #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i),(v)) == 0)
275 
276 /* exported interface */
277 #define atomic64_cmpxchg(v, o, n) \
278 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
279 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
280 
281 /**
282  * atomic64_add_unless - add unless the number is a given value
283  * @v: pointer of type atomic64_t
284  * @a: the amount to add to v...
285  * @u: ...unless v is equal to u.
286  *
287  * Atomically adds @a to @v, so long as it was not @u.
288  * Returns the old value of @v.
289  */
290 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
291 {
292 	long c, old;
293 	c = atomic64_read(v);
294 	for (;;) {
295 		if (unlikely(c == (u)))
296 			break;
297 		old = atomic64_cmpxchg((v), c, c + (a));
298 		if (likely(old == c))
299 			break;
300 		c = old;
301 	}
302 	return c != (u);
303 }
304 
305 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
306 
307 /*
308  * atomic64_dec_if_positive - decrement by 1 if old value positive
309  * @v: pointer of type atomic_t
310  *
311  * The function returns the old value of *v minus 1, even if
312  * the atomic variable, v, was not decremented.
313  */
314 static inline long atomic64_dec_if_positive(atomic64_t *v)
315 {
316 	long c, old, dec;
317 	c = atomic64_read(v);
318 	for (;;) {
319 		dec = c - 1;
320 		if (unlikely(dec < 0))
321 			break;
322 		old = atomic64_cmpxchg((v), c, dec);
323 		if (likely(old == c))
324 			break;
325 		c = old;
326 	}
327 	return dec;
328 }
329 
330 #endif /* !CONFIG_64BIT */
331 
332 
333 #endif /* _ASM_PARISC_ATOMIC_H_ */
334