xref: /openbmc/linux/arch/parisc/include/asm/atomic.h (revision ba61bb17)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
3  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
4  */
5 
6 #ifndef _ASM_PARISC_ATOMIC_H_
7 #define _ASM_PARISC_ATOMIC_H_
8 
9 #include <linux/types.h>
10 #include <asm/cmpxchg.h>
11 #include <asm/barrier.h>
12 
13 /*
14  * Atomic operations that C can't guarantee us.  Useful for
15  * resource counting etc..
16  *
17  * And probably incredibly slow on parisc.  OTOH, we don't
18  * have to write any serious assembly.   prumpf
19  */
20 
21 #ifdef CONFIG_SMP
22 #include <asm/spinlock.h>
23 #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
24 
25 /* Use an array of spinlocks for our atomic_ts.
26  * Hash function to index into a different SPINLOCK.
27  * Since "a" is usually an address, use one spinlock per cacheline.
28  */
29 #  define ATOMIC_HASH_SIZE 4
30 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
31 
32 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
33 
34 /* Can't use raw_spin_lock_irq because of #include problems, so
35  * this is the substitute */
36 #define _atomic_spin_lock_irqsave(l,f) do {	\
37 	arch_spinlock_t *s = ATOMIC_HASH(l);		\
38 	local_irq_save(f);			\
39 	arch_spin_lock(s);			\
40 } while(0)
41 
42 #define _atomic_spin_unlock_irqrestore(l,f) do {	\
43 	arch_spinlock_t *s = ATOMIC_HASH(l);			\
44 	arch_spin_unlock(s);				\
45 	local_irq_restore(f);				\
46 } while(0)
47 
48 
49 #else
50 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
51 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
52 #endif
53 
54 /*
55  * Note that we need not lock read accesses - aligned word writes/reads
56  * are atomic, so a reader never sees inconsistent values.
57  */
58 
59 static __inline__ void atomic_set(atomic_t *v, int i)
60 {
61 	unsigned long flags;
62 	_atomic_spin_lock_irqsave(v, flags);
63 
64 	v->counter = i;
65 
66 	_atomic_spin_unlock_irqrestore(v, flags);
67 }
68 
69 #define atomic_set_release(v, i)	atomic_set((v), (i))
70 
71 static __inline__ int atomic_read(const atomic_t *v)
72 {
73 	return READ_ONCE((v)->counter);
74 }
75 
76 /* exported interface */
77 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
78 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
79 
80 /**
81  * __atomic_add_unless - add unless the number is a given value
82  * @v: pointer of type atomic_t
83  * @a: the amount to add to v...
84  * @u: ...unless v is equal to u.
85  *
86  * Atomically adds @a to @v, so long as it was not @u.
87  * Returns the old value of @v.
88  */
89 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
90 {
91 	int c, old;
92 	c = atomic_read(v);
93 	for (;;) {
94 		if (unlikely(c == (u)))
95 			break;
96 		old = atomic_cmpxchg((v), c, c + (a));
97 		if (likely(old == c))
98 			break;
99 		c = old;
100 	}
101 	return c;
102 }
103 
104 #define ATOMIC_OP(op, c_op)						\
105 static __inline__ void atomic_##op(int i, atomic_t *v)			\
106 {									\
107 	unsigned long flags;						\
108 									\
109 	_atomic_spin_lock_irqsave(v, flags);				\
110 	v->counter c_op i;						\
111 	_atomic_spin_unlock_irqrestore(v, flags);			\
112 }									\
113 
114 #define ATOMIC_OP_RETURN(op, c_op)					\
115 static __inline__ int atomic_##op##_return(int i, atomic_t *v)		\
116 {									\
117 	unsigned long flags;						\
118 	int ret;							\
119 									\
120 	_atomic_spin_lock_irqsave(v, flags);				\
121 	ret = (v->counter c_op i);					\
122 	_atomic_spin_unlock_irqrestore(v, flags);			\
123 									\
124 	return ret;							\
125 }
126 
127 #define ATOMIC_FETCH_OP(op, c_op)					\
128 static __inline__ int atomic_fetch_##op(int i, atomic_t *v)		\
129 {									\
130 	unsigned long flags;						\
131 	int ret;							\
132 									\
133 	_atomic_spin_lock_irqsave(v, flags);				\
134 	ret = v->counter;						\
135 	v->counter c_op i;						\
136 	_atomic_spin_unlock_irqrestore(v, flags);			\
137 									\
138 	return ret;							\
139 }
140 
141 #define ATOMIC_OPS(op, c_op)						\
142 	ATOMIC_OP(op, c_op)						\
143 	ATOMIC_OP_RETURN(op, c_op)					\
144 	ATOMIC_FETCH_OP(op, c_op)
145 
146 ATOMIC_OPS(add, +=)
147 ATOMIC_OPS(sub, -=)
148 
149 #undef ATOMIC_OPS
150 #define ATOMIC_OPS(op, c_op)						\
151 	ATOMIC_OP(op, c_op)						\
152 	ATOMIC_FETCH_OP(op, c_op)
153 
154 ATOMIC_OPS(and, &=)
155 ATOMIC_OPS(or, |=)
156 ATOMIC_OPS(xor, ^=)
157 
158 #undef ATOMIC_OPS
159 #undef ATOMIC_FETCH_OP
160 #undef ATOMIC_OP_RETURN
161 #undef ATOMIC_OP
162 
163 #define atomic_inc(v)	(atomic_add(   1,(v)))
164 #define atomic_dec(v)	(atomic_add(  -1,(v)))
165 
166 #define atomic_inc_return(v)	(atomic_add_return(   1,(v)))
167 #define atomic_dec_return(v)	(atomic_add_return(  -1,(v)))
168 
169 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
170 
171 /*
172  * atomic_inc_and_test - increment and test
173  * @v: pointer of type atomic_t
174  *
175  * Atomically increments @v by 1
176  * and returns true if the result is zero, or false for all
177  * other cases.
178  */
179 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
180 
181 #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
182 
183 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i),(v)) == 0)
184 
185 #define ATOMIC_INIT(i)	{ (i) }
186 
187 #ifdef CONFIG_64BIT
188 
189 #define ATOMIC64_INIT(i) { (i) }
190 
191 #define ATOMIC64_OP(op, c_op)						\
192 static __inline__ void atomic64_##op(s64 i, atomic64_t *v)		\
193 {									\
194 	unsigned long flags;						\
195 									\
196 	_atomic_spin_lock_irqsave(v, flags);				\
197 	v->counter c_op i;						\
198 	_atomic_spin_unlock_irqrestore(v, flags);			\
199 }									\
200 
201 #define ATOMIC64_OP_RETURN(op, c_op)					\
202 static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)	\
203 {									\
204 	unsigned long flags;						\
205 	s64 ret;							\
206 									\
207 	_atomic_spin_lock_irqsave(v, flags);				\
208 	ret = (v->counter c_op i);					\
209 	_atomic_spin_unlock_irqrestore(v, flags);			\
210 									\
211 	return ret;							\
212 }
213 
214 #define ATOMIC64_FETCH_OP(op, c_op)					\
215 static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v)		\
216 {									\
217 	unsigned long flags;						\
218 	s64 ret;							\
219 									\
220 	_atomic_spin_lock_irqsave(v, flags);				\
221 	ret = v->counter;						\
222 	v->counter c_op i;						\
223 	_atomic_spin_unlock_irqrestore(v, flags);			\
224 									\
225 	return ret;							\
226 }
227 
228 #define ATOMIC64_OPS(op, c_op)						\
229 	ATOMIC64_OP(op, c_op)						\
230 	ATOMIC64_OP_RETURN(op, c_op)					\
231 	ATOMIC64_FETCH_OP(op, c_op)
232 
233 ATOMIC64_OPS(add, +=)
234 ATOMIC64_OPS(sub, -=)
235 
236 #undef ATOMIC64_OPS
237 #define ATOMIC64_OPS(op, c_op)						\
238 	ATOMIC64_OP(op, c_op)						\
239 	ATOMIC64_FETCH_OP(op, c_op)
240 
241 ATOMIC64_OPS(and, &=)
242 ATOMIC64_OPS(or, |=)
243 ATOMIC64_OPS(xor, ^=)
244 
245 #undef ATOMIC64_OPS
246 #undef ATOMIC64_FETCH_OP
247 #undef ATOMIC64_OP_RETURN
248 #undef ATOMIC64_OP
249 
250 static __inline__ void
251 atomic64_set(atomic64_t *v, s64 i)
252 {
253 	unsigned long flags;
254 	_atomic_spin_lock_irqsave(v, flags);
255 
256 	v->counter = i;
257 
258 	_atomic_spin_unlock_irqrestore(v, flags);
259 }
260 
261 static __inline__ s64
262 atomic64_read(const atomic64_t *v)
263 {
264 	return READ_ONCE((v)->counter);
265 }
266 
267 #define atomic64_inc(v)		(atomic64_add(   1,(v)))
268 #define atomic64_dec(v)		(atomic64_add(  -1,(v)))
269 
270 #define atomic64_inc_return(v)		(atomic64_add_return(   1,(v)))
271 #define atomic64_dec_return(v)		(atomic64_add_return(  -1,(v)))
272 
273 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
274 
275 #define atomic64_inc_and_test(v) 	(atomic64_inc_return(v) == 0)
276 #define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
277 #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i),(v)) == 0)
278 
279 /* exported interface */
280 #define atomic64_cmpxchg(v, o, n) \
281 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
282 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
283 
284 /**
285  * atomic64_add_unless - add unless the number is a given value
286  * @v: pointer of type atomic64_t
287  * @a: the amount to add to v...
288  * @u: ...unless v is equal to u.
289  *
290  * Atomically adds @a to @v, so long as it was not @u.
291  * Returns the old value of @v.
292  */
293 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
294 {
295 	long c, old;
296 	c = atomic64_read(v);
297 	for (;;) {
298 		if (unlikely(c == (u)))
299 			break;
300 		old = atomic64_cmpxchg((v), c, c + (a));
301 		if (likely(old == c))
302 			break;
303 		c = old;
304 	}
305 	return c != (u);
306 }
307 
308 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
309 
310 /*
311  * atomic64_dec_if_positive - decrement by 1 if old value positive
312  * @v: pointer of type atomic_t
313  *
314  * The function returns the old value of *v minus 1, even if
315  * the atomic variable, v, was not decremented.
316  */
317 static inline long atomic64_dec_if_positive(atomic64_t *v)
318 {
319 	long c, old, dec;
320 	c = atomic64_read(v);
321 	for (;;) {
322 		dec = c - 1;
323 		if (unlikely(dec < 0))
324 			break;
325 		old = atomic64_cmpxchg((v), c, dec);
326 		if (likely(old == c))
327 			break;
328 		c = old;
329 	}
330 	return dec;
331 }
332 
333 #endif /* !CONFIG_64BIT */
334 
335 
336 #endif /* _ASM_PARISC_ATOMIC_H_ */
337