xref: /openbmc/linux/arch/parisc/include/asm/atomic.h (revision e190bfe5)
1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3  */
4 
5 #ifndef _ASM_PARISC_ATOMIC_H_
6 #define _ASM_PARISC_ATOMIC_H_
7 
8 #include <linux/types.h>
9 #include <asm/system.h>
10 
11 /*
12  * Atomic operations that C can't guarantee us.  Useful for
13  * resource counting etc..
14  *
15  * And probably incredibly slow on parisc.  OTOH, we don't
16  * have to write any serious assembly.   prumpf
17  */
18 
19 #ifdef CONFIG_SMP
20 #include <asm/spinlock.h>
21 #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
22 
23 /* Use an array of spinlocks for our atomic_ts.
24  * Hash function to index into a different SPINLOCK.
25  * Since "a" is usually an address, use one spinlock per cacheline.
26  */
27 #  define ATOMIC_HASH_SIZE 4
28 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
29 
30 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
31 
32 /* Can't use raw_spin_lock_irq because of #include problems, so
33  * this is the substitute */
34 #define _atomic_spin_lock_irqsave(l,f) do {	\
35 	arch_spinlock_t *s = ATOMIC_HASH(l);		\
36 	local_irq_save(f);			\
37 	arch_spin_lock(s);			\
38 } while(0)
39 
40 #define _atomic_spin_unlock_irqrestore(l,f) do {	\
41 	arch_spinlock_t *s = ATOMIC_HASH(l);			\
42 	arch_spin_unlock(s);				\
43 	local_irq_restore(f);				\
44 } while(0)
45 
46 
47 #else
48 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
49 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
50 #endif
51 
52 /* This should get optimized out since it's never called.
53 ** Or get a link error if xchg is used "wrong".
54 */
55 extern void __xchg_called_with_bad_pointer(void);
56 
57 
58 /* __xchg32/64 defined in arch/parisc/lib/bitops.c */
59 extern unsigned long __xchg8(char, char *);
60 extern unsigned long __xchg32(int, int *);
61 #ifdef CONFIG_64BIT
62 extern unsigned long __xchg64(unsigned long, unsigned long *);
63 #endif
64 
65 /* optimizer better get rid of switch since size is a constant */
66 static __inline__ unsigned long
67 __xchg(unsigned long x, __volatile__ void * ptr, int size)
68 {
69 	switch(size) {
70 #ifdef CONFIG_64BIT
71 	case 8: return __xchg64(x,(unsigned long *) ptr);
72 #endif
73 	case 4: return __xchg32((int) x, (int *) ptr);
74 	case 1: return __xchg8((char) x, (char *) ptr);
75 	}
76 	__xchg_called_with_bad_pointer();
77 	return x;
78 }
79 
80 
81 /*
82 ** REVISIT - Abandoned use of LDCW in xchg() for now:
83 ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
84 ** o and while we are at it, could CONFIG_64BIT code use LDCD too?
85 **
86 **	if (__builtin_constant_p(x) && (x == NULL))
87 **		if (((unsigned long)p & 0xf) == 0)
88 **			return __ldcw(p);
89 */
90 #define xchg(ptr,x) \
91 	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
92 
93 
94 #define __HAVE_ARCH_CMPXCHG	1
95 
96 /* bug catcher for when unsupported size is used - won't link */
97 extern void __cmpxchg_called_with_bad_pointer(void);
98 
99 /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
100 extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
101 extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
102 
103 /* don't worry...optimizer will get rid of most of this */
104 static __inline__ unsigned long
105 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
106 {
107 	switch(size) {
108 #ifdef CONFIG_64BIT
109 	case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
110 #endif
111 	case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
112 	}
113 	__cmpxchg_called_with_bad_pointer();
114 	return old;
115 }
116 
117 #define cmpxchg(ptr,o,n)						 \
118   ({									 \
119      __typeof__(*(ptr)) _o_ = (o);					 \
120      __typeof__(*(ptr)) _n_ = (n);					 \
121      (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
122 				    (unsigned long)_n_, sizeof(*(ptr))); \
123   })
124 
125 #include <asm-generic/cmpxchg-local.h>
126 
127 static inline unsigned long __cmpxchg_local(volatile void *ptr,
128 				      unsigned long old,
129 				      unsigned long new_, int size)
130 {
131 	switch (size) {
132 #ifdef CONFIG_64BIT
133 	case 8:	return __cmpxchg_u64((unsigned long *)ptr, old, new_);
134 #endif
135 	case 4:	return __cmpxchg_u32(ptr, old, new_);
136 	default:
137 		return __cmpxchg_local_generic(ptr, old, new_, size);
138 	}
139 }
140 
141 /*
142  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
143  * them available.
144  */
145 #define cmpxchg_local(ptr, o, n)				  	\
146 	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
147 			(unsigned long)(n), sizeof(*(ptr))))
148 #ifdef CONFIG_64BIT
149 #define cmpxchg64_local(ptr, o, n)					\
150   ({									\
151 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
152 	cmpxchg_local((ptr), (o), (n));					\
153   })
154 #else
155 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
156 #endif
157 
158 /*
159  * Note that we need not lock read accesses - aligned word writes/reads
160  * are atomic, so a reader never sees inconsistent values.
161  */
162 
163 /* It's possible to reduce all atomic operations to either
164  * __atomic_add_return, atomic_set and atomic_read (the latter
165  * is there only for consistency).
166  */
167 
168 static __inline__ int __atomic_add_return(int i, atomic_t *v)
169 {
170 	int ret;
171 	unsigned long flags;
172 	_atomic_spin_lock_irqsave(v, flags);
173 
174 	ret = (v->counter += i);
175 
176 	_atomic_spin_unlock_irqrestore(v, flags);
177 	return ret;
178 }
179 
180 static __inline__ void atomic_set(atomic_t *v, int i)
181 {
182 	unsigned long flags;
183 	_atomic_spin_lock_irqsave(v, flags);
184 
185 	v->counter = i;
186 
187 	_atomic_spin_unlock_irqrestore(v, flags);
188 }
189 
190 static __inline__ int atomic_read(const atomic_t *v)
191 {
192 	return (*(volatile int *)&(v)->counter);
193 }
194 
195 /* exported interface */
196 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
197 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
198 
199 /**
200  * atomic_add_unless - add unless the number is a given value
201  * @v: pointer of type atomic_t
202  * @a: the amount to add to v...
203  * @u: ...unless v is equal to u.
204  *
205  * Atomically adds @a to @v, so long as it was not @u.
206  * Returns non-zero if @v was not @u, and zero otherwise.
207  */
208 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
209 {
210 	int c, old;
211 	c = atomic_read(v);
212 	for (;;) {
213 		if (unlikely(c == (u)))
214 			break;
215 		old = atomic_cmpxchg((v), c, c + (a));
216 		if (likely(old == c))
217 			break;
218 		c = old;
219 	}
220 	return c != (u);
221 }
222 
223 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
224 
225 #define atomic_add(i,v)	((void)(__atomic_add_return( (i),(v))))
226 #define atomic_sub(i,v)	((void)(__atomic_add_return(-(i),(v))))
227 #define atomic_inc(v)	((void)(__atomic_add_return(   1,(v))))
228 #define atomic_dec(v)	((void)(__atomic_add_return(  -1,(v))))
229 
230 #define atomic_add_return(i,v)	(__atomic_add_return( (i),(v)))
231 #define atomic_sub_return(i,v)	(__atomic_add_return(-(i),(v)))
232 #define atomic_inc_return(v)	(__atomic_add_return(   1,(v)))
233 #define atomic_dec_return(v)	(__atomic_add_return(  -1,(v)))
234 
235 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
236 
237 /*
238  * atomic_inc_and_test - increment and test
239  * @v: pointer of type atomic_t
240  *
241  * Atomically increments @v by 1
242  * and returns true if the result is zero, or false for all
243  * other cases.
244  */
245 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
246 
247 #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
248 
249 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i),(v)) == 0)
250 
251 #define ATOMIC_INIT(i)	((atomic_t) { (i) })
252 
253 #define smp_mb__before_atomic_dec()	smp_mb()
254 #define smp_mb__after_atomic_dec()	smp_mb()
255 #define smp_mb__before_atomic_inc()	smp_mb()
256 #define smp_mb__after_atomic_inc()	smp_mb()
257 
258 #ifdef CONFIG_64BIT
259 
260 #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
261 
262 static __inline__ int
263 __atomic64_add_return(s64 i, atomic64_t *v)
264 {
265 	int ret;
266 	unsigned long flags;
267 	_atomic_spin_lock_irqsave(v, flags);
268 
269 	ret = (v->counter += i);
270 
271 	_atomic_spin_unlock_irqrestore(v, flags);
272 	return ret;
273 }
274 
275 static __inline__ void
276 atomic64_set(atomic64_t *v, s64 i)
277 {
278 	unsigned long flags;
279 	_atomic_spin_lock_irqsave(v, flags);
280 
281 	v->counter = i;
282 
283 	_atomic_spin_unlock_irqrestore(v, flags);
284 }
285 
286 static __inline__ s64
287 atomic64_read(const atomic64_t *v)
288 {
289 	return (*(volatile long *)&(v)->counter);
290 }
291 
292 #define atomic64_add(i,v)	((void)(__atomic64_add_return( ((s64)(i)),(v))))
293 #define atomic64_sub(i,v)	((void)(__atomic64_add_return(-((s64)(i)),(v))))
294 #define atomic64_inc(v)		((void)(__atomic64_add_return(   1,(v))))
295 #define atomic64_dec(v)		((void)(__atomic64_add_return(  -1,(v))))
296 
297 #define atomic64_add_return(i,v)	(__atomic64_add_return( ((s64)(i)),(v)))
298 #define atomic64_sub_return(i,v)	(__atomic64_add_return(-((s64)(i)),(v)))
299 #define atomic64_inc_return(v)		(__atomic64_add_return(   1,(v)))
300 #define atomic64_dec_return(v)		(__atomic64_add_return(  -1,(v)))
301 
302 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
303 
304 #define atomic64_inc_and_test(v) 	(atomic64_inc_return(v) == 0)
305 #define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
306 #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i),(v)) == 0)
307 
308 /* exported interface */
309 #define atomic64_cmpxchg(v, o, n) \
310 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
311 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
312 
313 /**
314  * atomic64_add_unless - add unless the number is a given value
315  * @v: pointer of type atomic64_t
316  * @a: the amount to add to v...
317  * @u: ...unless v is equal to u.
318  *
319  * Atomically adds @a to @v, so long as it was not @u.
320  * Returns non-zero if @v was not @u, and zero otherwise.
321  */
322 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
323 {
324 	long c, old;
325 	c = atomic64_read(v);
326 	for (;;) {
327 		if (unlikely(c == (u)))
328 			break;
329 		old = atomic64_cmpxchg((v), c, c + (a));
330 		if (likely(old == c))
331 			break;
332 		c = old;
333 	}
334 	return c != (u);
335 }
336 
337 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
338 
339 #else /* CONFIG_64BIT */
340 
341 #include <asm-generic/atomic64.h>
342 
343 #endif /* !CONFIG_64BIT */
344 
345 #include <asm-generic/atomic-long.h>
346 
347 #endif /* _ASM_PARISC_ATOMIC_H_ */
348