xref: /openbmc/linux/include/asm-generic/atomic.h (revision e3b9f1e8)
1 /*
2  * Generic C implementation of atomic counter operations. Usable on
3  * UP systems only. Do not include in machine independent code.
4  *
5  * Originally implemented for MN10300.
6  *
7  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
8  * Written by David Howells (dhowells@redhat.com)
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public Licence
12  * as published by the Free Software Foundation; either version
13  * 2 of the Licence, or (at your option) any later version.
14  */
15 #ifndef __ASM_GENERIC_ATOMIC_H
16 #define __ASM_GENERIC_ATOMIC_H
17 
18 #include <asm/cmpxchg.h>
19 #include <asm/barrier.h>
20 
21 /*
22  * atomic_$op() - $op integer to atomic variable
23  * @i: integer value to $op
24  * @v: pointer to the atomic variable
25  *
26  * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
27  * smp_mb__{before,after}_atomic().
28  */
29 
30 /*
31  * atomic_$op_return() - $op interer to atomic variable and returns the result
32  * @i: integer value to $op
33  * @v: pointer to the atomic variable
34  *
35  * Atomically $ops @i to @v. Does imply a full memory barrier.
36  */
37 
38 #ifdef CONFIG_SMP
39 
40 /* we can build all atomic primitives from cmpxchg */
41 
42 #define ATOMIC_OP(op, c_op)						\
43 static inline void atomic_##op(int i, atomic_t *v)			\
44 {									\
45 	int c, old;							\
46 									\
47 	c = v->counter;							\
48 	while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)		\
49 		c = old;						\
50 }
51 
52 #define ATOMIC_OP_RETURN(op, c_op)					\
53 static inline int atomic_##op##_return(int i, atomic_t *v)		\
54 {									\
55 	int c, old;							\
56 									\
57 	c = v->counter;							\
58 	while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)		\
59 		c = old;						\
60 									\
61 	return c c_op i;						\
62 }
63 
64 #define ATOMIC_FETCH_OP(op, c_op)					\
65 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
66 {									\
67 	int c, old;							\
68 									\
69 	c = v->counter;							\
70 	while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)		\
71 		c = old;						\
72 									\
73 	return c;							\
74 }
75 
76 #else
77 
78 #include <linux/irqflags.h>
79 
80 #define ATOMIC_OP(op, c_op)						\
81 static inline void atomic_##op(int i, atomic_t *v)			\
82 {									\
83 	unsigned long flags;						\
84 									\
85 	raw_local_irq_save(flags);					\
86 	v->counter = v->counter c_op i;					\
87 	raw_local_irq_restore(flags);					\
88 }
89 
90 #define ATOMIC_OP_RETURN(op, c_op)					\
91 static inline int atomic_##op##_return(int i, atomic_t *v)		\
92 {									\
93 	unsigned long flags;						\
94 	int ret;							\
95 									\
96 	raw_local_irq_save(flags);					\
97 	ret = (v->counter = v->counter c_op i);				\
98 	raw_local_irq_restore(flags);					\
99 									\
100 	return ret;							\
101 }
102 
103 #define ATOMIC_FETCH_OP(op, c_op)					\
104 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
105 {									\
106 	unsigned long flags;						\
107 	int ret;							\
108 									\
109 	raw_local_irq_save(flags);					\
110 	ret = v->counter;						\
111 	v->counter = v->counter c_op i;					\
112 	raw_local_irq_restore(flags);					\
113 									\
114 	return ret;							\
115 }
116 
117 #endif /* CONFIG_SMP */
118 
119 #ifndef atomic_add_return
120 ATOMIC_OP_RETURN(add, +)
121 #endif
122 
123 #ifndef atomic_sub_return
124 ATOMIC_OP_RETURN(sub, -)
125 #endif
126 
127 #ifndef atomic_fetch_add
128 ATOMIC_FETCH_OP(add, +)
129 #endif
130 
131 #ifndef atomic_fetch_sub
132 ATOMIC_FETCH_OP(sub, -)
133 #endif
134 
135 #ifndef atomic_fetch_and
136 ATOMIC_FETCH_OP(and, &)
137 #endif
138 
139 #ifndef atomic_fetch_or
140 ATOMIC_FETCH_OP(or, |)
141 #endif
142 
143 #ifndef atomic_fetch_xor
144 ATOMIC_FETCH_OP(xor, ^)
145 #endif
146 
147 #ifndef atomic_and
148 ATOMIC_OP(and, &)
149 #endif
150 
151 #ifndef atomic_or
152 ATOMIC_OP(or, |)
153 #endif
154 
155 #ifndef atomic_xor
156 ATOMIC_OP(xor, ^)
157 #endif
158 
159 #undef ATOMIC_FETCH_OP
160 #undef ATOMIC_OP_RETURN
161 #undef ATOMIC_OP
162 
163 /*
164  * Atomic operations that C can't guarantee us.  Useful for
165  * resource counting etc..
166  */
167 
168 #define ATOMIC_INIT(i)	{ (i) }
169 
170 /**
171  * atomic_read - read atomic variable
172  * @v: pointer of type atomic_t
173  *
174  * Atomically reads the value of @v.
175  */
176 #ifndef atomic_read
177 #define atomic_read(v)	READ_ONCE((v)->counter)
178 #endif
179 
180 /**
181  * atomic_set - set atomic variable
182  * @v: pointer of type atomic_t
183  * @i: required value
184  *
185  * Atomically sets the value of @v to @i.
186  */
187 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
188 
189 #include <linux/irqflags.h>
190 
191 static inline int atomic_add_negative(int i, atomic_t *v)
192 {
193 	return atomic_add_return(i, v) < 0;
194 }
195 
196 static inline void atomic_add(int i, atomic_t *v)
197 {
198 	atomic_add_return(i, v);
199 }
200 
201 static inline void atomic_sub(int i, atomic_t *v)
202 {
203 	atomic_sub_return(i, v);
204 }
205 
206 static inline void atomic_inc(atomic_t *v)
207 {
208 	atomic_add_return(1, v);
209 }
210 
211 static inline void atomic_dec(atomic_t *v)
212 {
213 	atomic_sub_return(1, v);
214 }
215 
216 #define atomic_dec_return(v)		atomic_sub_return(1, (v))
217 #define atomic_inc_return(v)		atomic_add_return(1, (v))
218 
219 #define atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
220 #define atomic_dec_and_test(v)		(atomic_dec_return(v) == 0)
221 #define atomic_inc_and_test(v)		(atomic_inc_return(v) == 0)
222 
223 #define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
224 #define atomic_cmpxchg(v, old, new)	(cmpxchg(&((v)->counter), (old), (new)))
225 
226 #ifndef __atomic_add_unless
227 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
228 {
229 	int c, old;
230 	c = atomic_read(v);
231 	while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
232 		c = old;
233 	return c;
234 }
235 #endif
236 
237 #endif /* __ASM_GENERIC_ATOMIC_H */
238