xref: /openbmc/linux/arch/parisc/include/asm/atomic.h (revision 7effbd18)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
3  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
4  */
5 
6 #ifndef _ASM_PARISC_ATOMIC_H_
7 #define _ASM_PARISC_ATOMIC_H_
8 
9 #include <linux/types.h>
10 #include <asm/cmpxchg.h>
11 #include <asm/barrier.h>
12 
13 /*
14  * Atomic operations that C can't guarantee us.  Useful for
15  * resource counting etc..
16  *
17  * And probably incredibly slow on parisc.  OTOH, we don't
18  * have to write any serious assembly.   prumpf
19  */
20 
21 #ifdef CONFIG_SMP
22 #include <asm/spinlock.h>
23 #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
24 
25 /* Use an array of spinlocks for our atomic_ts.
26  * Hash function to index into a different SPINLOCK.
27  * Since "a" is usually an address, use one spinlock per cacheline.
28  */
29 #  define ATOMIC_HASH_SIZE 4
30 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
31 
32 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
33 
34 /* Can't use raw_spin_lock_irq because of #include problems, so
35  * this is the substitute */
36 #define _atomic_spin_lock_irqsave(l,f) do {	\
37 	arch_spinlock_t *s = ATOMIC_HASH(l);	\
38 	local_irq_save(f);			\
39 	arch_spin_lock(s);			\
40 } while(0)
41 
42 #define _atomic_spin_unlock_irqrestore(l,f) do {	\
43 	arch_spinlock_t *s = ATOMIC_HASH(l);		\
44 	arch_spin_unlock(s);				\
45 	local_irq_restore(f);				\
46 } while(0)
47 
48 
49 #else
50 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
51 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
52 #endif
53 
54 /*
55  * Note that we need not lock read accesses - aligned word writes/reads
56  * are atomic, so a reader never sees inconsistent values.
57  */
58 
59 static __inline__ void arch_atomic_set(atomic_t *v, int i)
60 {
61 	unsigned long flags;
62 	_atomic_spin_lock_irqsave(v, flags);
63 
64 	v->counter = i;
65 
66 	_atomic_spin_unlock_irqrestore(v, flags);
67 }
68 
69 #define arch_atomic_set_release(v, i)	arch_atomic_set((v), (i))
70 
71 static __inline__ int arch_atomic_read(const atomic_t *v)
72 {
73 	return READ_ONCE((v)->counter);
74 }
75 
76 /* exported interface */
77 #define arch_atomic_cmpxchg(v, o, n)	(arch_cmpxchg(&((v)->counter), (o), (n)))
78 #define arch_atomic_xchg(v, new)	(arch_xchg(&((v)->counter), new))
79 
80 #define ATOMIC_OP(op, c_op)						\
81 static __inline__ void arch_atomic_##op(int i, atomic_t *v)		\
82 {									\
83 	unsigned long flags;						\
84 									\
85 	_atomic_spin_lock_irqsave(v, flags);				\
86 	v->counter c_op i;						\
87 	_atomic_spin_unlock_irqrestore(v, flags);			\
88 }
89 
90 #define ATOMIC_OP_RETURN(op, c_op)					\
91 static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v)	\
92 {									\
93 	unsigned long flags;						\
94 	int ret;							\
95 									\
96 	_atomic_spin_lock_irqsave(v, flags);				\
97 	ret = (v->counter c_op i);					\
98 	_atomic_spin_unlock_irqrestore(v, flags);			\
99 									\
100 	return ret;							\
101 }
102 
103 #define ATOMIC_FETCH_OP(op, c_op)					\
104 static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v)	\
105 {									\
106 	unsigned long flags;						\
107 	int ret;							\
108 									\
109 	_atomic_spin_lock_irqsave(v, flags);				\
110 	ret = v->counter;						\
111 	v->counter c_op i;						\
112 	_atomic_spin_unlock_irqrestore(v, flags);			\
113 									\
114 	return ret;							\
115 }
116 
117 #define ATOMIC_OPS(op, c_op)						\
118 	ATOMIC_OP(op, c_op)						\
119 	ATOMIC_OP_RETURN(op, c_op)					\
120 	ATOMIC_FETCH_OP(op, c_op)
121 
122 ATOMIC_OPS(add, +=)
123 ATOMIC_OPS(sub, -=)
124 
125 #undef ATOMIC_OPS
126 #define ATOMIC_OPS(op, c_op)						\
127 	ATOMIC_OP(op, c_op)						\
128 	ATOMIC_FETCH_OP(op, c_op)
129 
130 ATOMIC_OPS(and, &=)
131 ATOMIC_OPS(or, |=)
132 ATOMIC_OPS(xor, ^=)
133 
134 #undef ATOMIC_OPS
135 #undef ATOMIC_FETCH_OP
136 #undef ATOMIC_OP_RETURN
137 #undef ATOMIC_OP
138 
139 #ifdef CONFIG_64BIT
140 
141 #define ATOMIC64_INIT(i) { (i) }
142 
143 #define ATOMIC64_OP(op, c_op)						\
144 static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v)		\
145 {									\
146 	unsigned long flags;						\
147 									\
148 	_atomic_spin_lock_irqsave(v, flags);				\
149 	v->counter c_op i;						\
150 	_atomic_spin_unlock_irqrestore(v, flags);			\
151 }
152 
153 #define ATOMIC64_OP_RETURN(op, c_op)					\
154 static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v)	\
155 {									\
156 	unsigned long flags;						\
157 	s64 ret;							\
158 									\
159 	_atomic_spin_lock_irqsave(v, flags);				\
160 	ret = (v->counter c_op i);					\
161 	_atomic_spin_unlock_irqrestore(v, flags);			\
162 									\
163 	return ret;							\
164 }
165 
166 #define ATOMIC64_FETCH_OP(op, c_op)					\
167 static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v)	\
168 {									\
169 	unsigned long flags;						\
170 	s64 ret;							\
171 									\
172 	_atomic_spin_lock_irqsave(v, flags);				\
173 	ret = v->counter;						\
174 	v->counter c_op i;						\
175 	_atomic_spin_unlock_irqrestore(v, flags);			\
176 									\
177 	return ret;							\
178 }
179 
180 #define ATOMIC64_OPS(op, c_op)						\
181 	ATOMIC64_OP(op, c_op)						\
182 	ATOMIC64_OP_RETURN(op, c_op)					\
183 	ATOMIC64_FETCH_OP(op, c_op)
184 
185 ATOMIC64_OPS(add, +=)
186 ATOMIC64_OPS(sub, -=)
187 
188 #undef ATOMIC64_OPS
189 #define ATOMIC64_OPS(op, c_op)						\
190 	ATOMIC64_OP(op, c_op)						\
191 	ATOMIC64_FETCH_OP(op, c_op)
192 
193 ATOMIC64_OPS(and, &=)
194 ATOMIC64_OPS(or, |=)
195 ATOMIC64_OPS(xor, ^=)
196 
197 #undef ATOMIC64_OPS
198 #undef ATOMIC64_FETCH_OP
199 #undef ATOMIC64_OP_RETURN
200 #undef ATOMIC64_OP
201 
202 static __inline__ void
203 arch_atomic64_set(atomic64_t *v, s64 i)
204 {
205 	unsigned long flags;
206 	_atomic_spin_lock_irqsave(v, flags);
207 
208 	v->counter = i;
209 
210 	_atomic_spin_unlock_irqrestore(v, flags);
211 }
212 
213 #define arch_atomic64_set_release(v, i)	arch_atomic64_set((v), (i))
214 
215 static __inline__ s64
216 arch_atomic64_read(const atomic64_t *v)
217 {
218 	return READ_ONCE((v)->counter);
219 }
220 
221 /* exported interface */
222 #define arch_atomic64_cmpxchg(v, o, n) \
223 	((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
224 #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
225 
226 #endif /* !CONFIG_64BIT */
227 
228 
229 #endif /* _ASM_PARISC_ATOMIC_H_ */
230