xref: /openbmc/linux/arch/parisc/include/asm/atomic.h (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2deae26bfSKyle McMartin /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
3deae26bfSKyle McMartin  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
4deae26bfSKyle McMartin  */
5deae26bfSKyle McMartin 
6deae26bfSKyle McMartin #ifndef _ASM_PARISC_ATOMIC_H_
7deae26bfSKyle McMartin #define _ASM_PARISC_ATOMIC_H_
8deae26bfSKyle McMartin 
9deae26bfSKyle McMartin #include <linux/types.h>
109e5228ceSPaul Gortmaker #include <asm/cmpxchg.h>
11e4a65e9dSPeter Zijlstra #include <asm/barrier.h>
12deae26bfSKyle McMartin 
13deae26bfSKyle McMartin /*
14deae26bfSKyle McMartin  * Atomic operations that C can't guarantee us.  Useful for
15deae26bfSKyle McMartin  * resource counting etc..
16deae26bfSKyle McMartin  *
17deae26bfSKyle McMartin  * And probably incredibly slow on parisc.  OTOH, we don't
18deae26bfSKyle McMartin  * have to write any serious assembly.   prumpf
19deae26bfSKyle McMartin  */
20deae26bfSKyle McMartin 
21deae26bfSKyle McMartin #ifdef CONFIG_SMP
22deae26bfSKyle McMartin #include <asm/spinlock.h>
23deae26bfSKyle McMartin #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
24deae26bfSKyle McMartin 
25deae26bfSKyle McMartin /* Use an array of spinlocks for our atomic_ts.
26deae26bfSKyle McMartin  * Hash function to index into a different SPINLOCK.
27deae26bfSKyle McMartin  * Since "a" is usually an address, use one spinlock per cacheline.
28deae26bfSKyle McMartin  */
29deae26bfSKyle McMartin #  define ATOMIC_HASH_SIZE 4
3047e669ceSJames Bottomley #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
31deae26bfSKyle McMartin 
32445c8951SThomas Gleixner extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
33deae26bfSKyle McMartin 
34deae26bfSKyle McMartin /* Can't use raw_spin_lock_irq because of #include problems, so
35deae26bfSKyle McMartin  * this is the substitute */
36deae26bfSKyle McMartin #define _atomic_spin_lock_irqsave(l,f) do {	\
37445c8951SThomas Gleixner 	arch_spinlock_t *s = ATOMIC_HASH(l);	\
38deae26bfSKyle McMartin 	local_irq_save(f);			\
390199c4e6SThomas Gleixner 	arch_spin_lock(s);			\
40deae26bfSKyle McMartin } while(0)
41deae26bfSKyle McMartin 
42deae26bfSKyle McMartin #define _atomic_spin_unlock_irqrestore(l,f) do {	\
43445c8951SThomas Gleixner 	arch_spinlock_t *s = ATOMIC_HASH(l);		\
440199c4e6SThomas Gleixner 	arch_spin_unlock(s);				\
45deae26bfSKyle McMartin 	local_irq_restore(f);				\
46deae26bfSKyle McMartin } while(0)
47deae26bfSKyle McMartin 
48deae26bfSKyle McMartin 
49deae26bfSKyle McMartin #else
50deae26bfSKyle McMartin #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
51deae26bfSKyle McMartin #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
52deae26bfSKyle McMartin #endif
53deae26bfSKyle McMartin 
54ea435467SMatthew Wilcox /*
55ea435467SMatthew Wilcox  * Note that we need not lock read accesses - aligned word writes/reads
56ea435467SMatthew Wilcox  * are atomic, so a reader never sees inconsistent values.
57deae26bfSKyle McMartin  */
58deae26bfSKyle McMartin 
arch_atomic_set(atomic_t * v,int i)59329c161bSMark Rutland static __inline__ void arch_atomic_set(atomic_t *v, int i)
60deae26bfSKyle McMartin {
61deae26bfSKyle McMartin 	unsigned long flags;
62deae26bfSKyle McMartin 	_atomic_spin_lock_irqsave(v, flags);
63deae26bfSKyle McMartin 
64deae26bfSKyle McMartin 	v->counter = i;
65deae26bfSKyle McMartin 
66deae26bfSKyle McMartin 	_atomic_spin_unlock_irqrestore(v, flags);
67deae26bfSKyle McMartin }
68deae26bfSKyle McMartin 
69329c161bSMark Rutland #define arch_atomic_set_release(v, i)	arch_atomic_set((v), (i))
709d664c0aSPeter Zijlstra 
arch_atomic_read(const atomic_t * v)71329c161bSMark Rutland static __inline__ int arch_atomic_read(const atomic_t *v)
72deae26bfSKyle McMartin {
7362e8a325SPeter Zijlstra 	return READ_ONCE((v)->counter);
74deae26bfSKyle McMartin }
75deae26bfSKyle McMartin 
7615e3f6d7SPeter Zijlstra #define ATOMIC_OP(op, c_op)						\
77329c161bSMark Rutland static __inline__ void arch_atomic_##op(int i, atomic_t *v)		\
7815e3f6d7SPeter Zijlstra {									\
7915e3f6d7SPeter Zijlstra 	unsigned long flags;						\
8015e3f6d7SPeter Zijlstra 									\
8115e3f6d7SPeter Zijlstra 	_atomic_spin_lock_irqsave(v, flags);				\
8215e3f6d7SPeter Zijlstra 	v->counter c_op i;						\
8315e3f6d7SPeter Zijlstra 	_atomic_spin_unlock_irqrestore(v, flags);			\
843bc6e3dcSHelge Deller }
85deae26bfSKyle McMartin 
8615e3f6d7SPeter Zijlstra #define ATOMIC_OP_RETURN(op, c_op)					\
87329c161bSMark Rutland static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v)	\
8815e3f6d7SPeter Zijlstra {									\
8915e3f6d7SPeter Zijlstra 	unsigned long flags;						\
9015e3f6d7SPeter Zijlstra 	int ret;							\
9115e3f6d7SPeter Zijlstra 									\
9215e3f6d7SPeter Zijlstra 	_atomic_spin_lock_irqsave(v, flags);				\
9315e3f6d7SPeter Zijlstra 	ret = (v->counter c_op i);					\
9415e3f6d7SPeter Zijlstra 	_atomic_spin_unlock_irqrestore(v, flags);			\
9515e3f6d7SPeter Zijlstra 									\
9615e3f6d7SPeter Zijlstra 	return ret;							\
9715e3f6d7SPeter Zijlstra }
98deae26bfSKyle McMartin 
99e5857a6eSPeter Zijlstra #define ATOMIC_FETCH_OP(op, c_op)					\
100329c161bSMark Rutland static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v)	\
101e5857a6eSPeter Zijlstra {									\
102e5857a6eSPeter Zijlstra 	unsigned long flags;						\
103e5857a6eSPeter Zijlstra 	int ret;							\
104e5857a6eSPeter Zijlstra 									\
105e5857a6eSPeter Zijlstra 	_atomic_spin_lock_irqsave(v, flags);				\
106e5857a6eSPeter Zijlstra 	ret = v->counter;						\
107e5857a6eSPeter Zijlstra 	v->counter c_op i;						\
108e5857a6eSPeter Zijlstra 	_atomic_spin_unlock_irqrestore(v, flags);			\
109e5857a6eSPeter Zijlstra 									\
110e5857a6eSPeter Zijlstra 	return ret;							\
111e5857a6eSPeter Zijlstra }
112e5857a6eSPeter Zijlstra 
113e5857a6eSPeter Zijlstra #define ATOMIC_OPS(op, c_op)						\
114e5857a6eSPeter Zijlstra 	ATOMIC_OP(op, c_op)						\
115e5857a6eSPeter Zijlstra 	ATOMIC_OP_RETURN(op, c_op)					\
116e5857a6eSPeter Zijlstra 	ATOMIC_FETCH_OP(op, c_op)
11715e3f6d7SPeter Zijlstra 
11815e3f6d7SPeter Zijlstra ATOMIC_OPS(add, +=)
11915e3f6d7SPeter Zijlstra ATOMIC_OPS(sub, -=)
12015e3f6d7SPeter Zijlstra 
121*07bf3dcbSMark Rutland #define arch_atomic_add_return	arch_atomic_add_return
122*07bf3dcbSMark Rutland #define arch_atomic_sub_return	arch_atomic_sub_return
123*07bf3dcbSMark Rutland #define arch_atomic_fetch_add	arch_atomic_fetch_add
124*07bf3dcbSMark Rutland #define arch_atomic_fetch_sub	arch_atomic_fetch_sub
125*07bf3dcbSMark Rutland 
126e5857a6eSPeter Zijlstra #undef ATOMIC_OPS
127e5857a6eSPeter Zijlstra #define ATOMIC_OPS(op, c_op)						\
128e5857a6eSPeter Zijlstra 	ATOMIC_OP(op, c_op)						\
129e5857a6eSPeter Zijlstra 	ATOMIC_FETCH_OP(op, c_op)
130e5857a6eSPeter Zijlstra 
131e5857a6eSPeter Zijlstra ATOMIC_OPS(and, &=)
132e5857a6eSPeter Zijlstra ATOMIC_OPS(or, |=)
133e5857a6eSPeter Zijlstra ATOMIC_OPS(xor, ^=)
134aebea935SPeter Zijlstra 
135*07bf3dcbSMark Rutland #define arch_atomic_fetch_and	arch_atomic_fetch_and
136*07bf3dcbSMark Rutland #define arch_atomic_fetch_or	arch_atomic_fetch_or
137*07bf3dcbSMark Rutland #define arch_atomic_fetch_xor	arch_atomic_fetch_xor
138*07bf3dcbSMark Rutland 
13915e3f6d7SPeter Zijlstra #undef ATOMIC_OPS
140e5857a6eSPeter Zijlstra #undef ATOMIC_FETCH_OP
14115e3f6d7SPeter Zijlstra #undef ATOMIC_OP_RETURN
14215e3f6d7SPeter Zijlstra #undef ATOMIC_OP
14315e3f6d7SPeter Zijlstra 
144deae26bfSKyle McMartin #ifdef CONFIG_64BIT
145deae26bfSKyle McMartin 
146bba3d8c3SMel Gorman #define ATOMIC64_INIT(i) { (i) }
147deae26bfSKyle McMartin 
14815e3f6d7SPeter Zijlstra #define ATOMIC64_OP(op, c_op)						\
149329c161bSMark Rutland static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v)		\
15015e3f6d7SPeter Zijlstra {									\
15115e3f6d7SPeter Zijlstra 	unsigned long flags;						\
15215e3f6d7SPeter Zijlstra 									\
15315e3f6d7SPeter Zijlstra 	_atomic_spin_lock_irqsave(v, flags);				\
15415e3f6d7SPeter Zijlstra 	v->counter c_op i;						\
15515e3f6d7SPeter Zijlstra 	_atomic_spin_unlock_irqrestore(v, flags);			\
1563bc6e3dcSHelge Deller }
157deae26bfSKyle McMartin 
15815e3f6d7SPeter Zijlstra #define ATOMIC64_OP_RETURN(op, c_op)					\
159329c161bSMark Rutland static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v)	\
16015e3f6d7SPeter Zijlstra {									\
16115e3f6d7SPeter Zijlstra 	unsigned long flags;						\
16215e3f6d7SPeter Zijlstra 	s64 ret;							\
16315e3f6d7SPeter Zijlstra 									\
16415e3f6d7SPeter Zijlstra 	_atomic_spin_lock_irqsave(v, flags);				\
16515e3f6d7SPeter Zijlstra 	ret = (v->counter c_op i);					\
16615e3f6d7SPeter Zijlstra 	_atomic_spin_unlock_irqrestore(v, flags);			\
16715e3f6d7SPeter Zijlstra 									\
16815e3f6d7SPeter Zijlstra 	return ret;							\
169deae26bfSKyle McMartin }
170deae26bfSKyle McMartin 
171e5857a6eSPeter Zijlstra #define ATOMIC64_FETCH_OP(op, c_op)					\
172329c161bSMark Rutland static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v)	\
173e5857a6eSPeter Zijlstra {									\
174e5857a6eSPeter Zijlstra 	unsigned long flags;						\
175e5857a6eSPeter Zijlstra 	s64 ret;							\
176e5857a6eSPeter Zijlstra 									\
177e5857a6eSPeter Zijlstra 	_atomic_spin_lock_irqsave(v, flags);				\
178e5857a6eSPeter Zijlstra 	ret = v->counter;						\
179e5857a6eSPeter Zijlstra 	v->counter c_op i;						\
180e5857a6eSPeter Zijlstra 	_atomic_spin_unlock_irqrestore(v, flags);			\
181e5857a6eSPeter Zijlstra 									\
182e5857a6eSPeter Zijlstra 	return ret;							\
183e5857a6eSPeter Zijlstra }
184e5857a6eSPeter Zijlstra 
185e5857a6eSPeter Zijlstra #define ATOMIC64_OPS(op, c_op)						\
186e5857a6eSPeter Zijlstra 	ATOMIC64_OP(op, c_op)						\
187e5857a6eSPeter Zijlstra 	ATOMIC64_OP_RETURN(op, c_op)					\
188e5857a6eSPeter Zijlstra 	ATOMIC64_FETCH_OP(op, c_op)
18915e3f6d7SPeter Zijlstra 
19015e3f6d7SPeter Zijlstra ATOMIC64_OPS(add, +=)
19115e3f6d7SPeter Zijlstra ATOMIC64_OPS(sub, -=)
19215e3f6d7SPeter Zijlstra 
193*07bf3dcbSMark Rutland #define arch_atomic64_add_return	arch_atomic64_add_return
194*07bf3dcbSMark Rutland #define arch_atomic64_sub_return	arch_atomic64_sub_return
195*07bf3dcbSMark Rutland #define arch_atomic64_fetch_add		arch_atomic64_fetch_add
196*07bf3dcbSMark Rutland #define arch_atomic64_fetch_sub		arch_atomic64_fetch_sub
197*07bf3dcbSMark Rutland 
19815e3f6d7SPeter Zijlstra #undef ATOMIC64_OPS
199e5857a6eSPeter Zijlstra #define ATOMIC64_OPS(op, c_op)						\
200e5857a6eSPeter Zijlstra 	ATOMIC64_OP(op, c_op)						\
201e5857a6eSPeter Zijlstra 	ATOMIC64_FETCH_OP(op, c_op)
202e5857a6eSPeter Zijlstra 
203e5857a6eSPeter Zijlstra ATOMIC64_OPS(and, &=)
204e5857a6eSPeter Zijlstra ATOMIC64_OPS(or, |=)
205e5857a6eSPeter Zijlstra ATOMIC64_OPS(xor, ^=)
206e5857a6eSPeter Zijlstra 
207*07bf3dcbSMark Rutland #define arch_atomic64_fetch_and		arch_atomic64_fetch_and
208*07bf3dcbSMark Rutland #define arch_atomic64_fetch_or		arch_atomic64_fetch_or
209*07bf3dcbSMark Rutland #define arch_atomic64_fetch_xor		arch_atomic64_fetch_xor
210*07bf3dcbSMark Rutland 
211e5857a6eSPeter Zijlstra #undef ATOMIC64_OPS
212e5857a6eSPeter Zijlstra #undef ATOMIC64_FETCH_OP
21315e3f6d7SPeter Zijlstra #undef ATOMIC64_OP_RETURN
21415e3f6d7SPeter Zijlstra #undef ATOMIC64_OP
21515e3f6d7SPeter Zijlstra 
216deae26bfSKyle McMartin static __inline__ void
arch_atomic64_set(atomic64_t * v,s64 i)217329c161bSMark Rutland arch_atomic64_set(atomic64_t *v, s64 i)
218deae26bfSKyle McMartin {
219deae26bfSKyle McMartin 	unsigned long flags;
220deae26bfSKyle McMartin 	_atomic_spin_lock_irqsave(v, flags);
221deae26bfSKyle McMartin 
222deae26bfSKyle McMartin 	v->counter = i;
223deae26bfSKyle McMartin 
224deae26bfSKyle McMartin 	_atomic_spin_unlock_irqrestore(v, flags);
225deae26bfSKyle McMartin }
226deae26bfSKyle McMartin 
227329c161bSMark Rutland #define arch_atomic64_set_release(v, i)	arch_atomic64_set((v), (i))
228be6577afSJohn David Anglin 
229deae26bfSKyle McMartin static __inline__ s64
arch_atomic64_read(const atomic64_t * v)230329c161bSMark Rutland arch_atomic64_read(const atomic64_t *v)
231deae26bfSKyle McMartin {
2326aa7de05SMark Rutland 	return READ_ONCE((v)->counter);
233deae26bfSKyle McMartin }
234deae26bfSKyle McMartin 
23564daa443SKyle McMartin #endif /* !CONFIG_64BIT */
236deae26bfSKyle McMartin 
237deae26bfSKyle McMartin 
238deae26bfSKyle McMartin #endif /* _ASM_PARISC_ATOMIC_H_ */
239