1 /*
2  * Percpu refcounts:
3  * (C) 2012 Google, Inc.
4  * Author: Kent Overstreet <koverstreet@google.com>
5  *
6  * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
7  * atomic_dec_and_test() - but percpu.
8  *
9  * There's one important difference between percpu refs and normal atomic_t
10  * refcounts; you have to keep track of your initial refcount, and then when you
11  * start shutting down you call percpu_ref_kill() _before_ dropping the initial
12  * refcount.
13  *
14  * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15  * than an atomic_t - this is because of the way shutdown works, see
16  * percpu_ref_kill()/PERCPU_COUNT_BIAS.
17  *
18  * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19  * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
20  * puts the ref back in single atomic_t mode, collecting the per cpu refs and
21  * issuing the appropriate barriers, and then marks the ref as shutting down so
22  * that percpu_ref_put() will check for the ref hitting 0.  After it returns,
23  * it's safe to drop the initial ref.
24  *
25  * USAGE:
26  *
27  * See fs/aio.c for some example usage; it's used there for struct kioctx, which
28  * is created when userspaces calls io_setup(), and destroyed when userspace
29  * calls io_destroy() or the process exits.
30  *
31  * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
32  * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
33  * the kioctx from the proccess's list of kioctxs - after that, there can't be
34  * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
35  * the initial ref with percpu_ref_put().
36  *
37  * Code that does a two stage shutdown like this often needs some kind of
38  * explicit synchronization to ensure the initial refcount can only be dropped
39  * once - percpu_ref_kill() does this for you, it returns true once and false if
40  * someone else already called it. The aio code uses it this way, but it's not
41  * necessary if the code has some other mechanism to synchronize teardown.
42  * around.
43  */
44 
45 #ifndef _LINUX_PERCPU_REFCOUNT_H
46 #define _LINUX_PERCPU_REFCOUNT_H
47 
48 #include <linux/atomic.h>
49 #include <linux/kernel.h>
50 #include <linux/percpu.h>
51 #include <linux/rcupdate.h>
52 #include <linux/gfp.h>
53 
54 struct percpu_ref;
55 typedef void (percpu_ref_func_t)(struct percpu_ref *);
56 
57 /* flags set in the lower bits of percpu_ref->percpu_count_ptr */
58 enum {
59 	__PERCPU_REF_ATOMIC	= 1LU << 0,	/* operating in atomic mode */
60 	__PERCPU_REF_DEAD	= 1LU << 1,	/* (being) killed */
61 	__PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
62 
63 	__PERCPU_REF_FLAG_BITS	= 2,
64 };
65 
66 /* @flags for percpu_ref_init() */
67 enum {
68 	/*
69 	 * Start w/ ref == 1 in atomic mode.  Can be switched to percpu
70 	 * operation using percpu_ref_switch_to_percpu().  If initialized
71 	 * with this flag, the ref will stay in atomic mode until
72 	 * percpu_ref_switch_to_percpu() is invoked on it.
73 	 */
74 	PERCPU_REF_INIT_ATOMIC	= 1 << 0,
75 
76 	/*
77 	 * Start dead w/ ref == 0 in atomic mode.  Must be revived with
78 	 * percpu_ref_reinit() before used.  Implies INIT_ATOMIC.
79 	 */
80 	PERCPU_REF_INIT_DEAD	= 1 << 1,
81 };
82 
83 struct percpu_ref {
84 	atomic_long_t		count;
85 	/*
86 	 * The low bit of the pointer indicates whether the ref is in percpu
87 	 * mode; if set, then get/put will manipulate the atomic_t.
88 	 */
89 	unsigned long		percpu_count_ptr;
90 	percpu_ref_func_t	*release;
91 	percpu_ref_func_t	*confirm_switch;
92 	bool			force_atomic:1;
93 	struct rcu_head		rcu;
94 };
95 
96 int __must_check percpu_ref_init(struct percpu_ref *ref,
97 				 percpu_ref_func_t *release, unsigned int flags,
98 				 gfp_t gfp);
99 void percpu_ref_exit(struct percpu_ref *ref);
100 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
101 				 percpu_ref_func_t *confirm_switch);
102 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
103 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
104 				 percpu_ref_func_t *confirm_kill);
105 void percpu_ref_reinit(struct percpu_ref *ref);
106 
107 /**
108  * percpu_ref_kill - drop the initial ref
109  * @ref: percpu_ref to kill
110  *
111  * Must be used to drop the initial ref on a percpu refcount; must be called
112  * precisely once before shutdown.
113  *
114  * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
115  * percpu counters and dropping the initial ref.
116  */
117 static inline void percpu_ref_kill(struct percpu_ref *ref)
118 {
119 	percpu_ref_kill_and_confirm(ref, NULL);
120 }
121 
122 /*
123  * Internal helper.  Don't use outside percpu-refcount proper.  The
124  * function doesn't return the pointer and let the caller test it for NULL
125  * because doing so forces the compiler to generate two conditional
126  * branches as it can't assume that @ref->percpu_count is not NULL.
127  */
128 static inline bool __ref_is_percpu(struct percpu_ref *ref,
129 					  unsigned long __percpu **percpu_countp)
130 {
131 	unsigned long percpu_ptr;
132 
133 	/*
134 	 * The value of @ref->percpu_count_ptr is tested for
135 	 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
136 	 * used as a pointer.  If the compiler generates a separate fetch
137 	 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
138 	 * between contaminating the pointer value, meaning that
139 	 * READ_ONCE() is required when fetching it.
140 	 */
141 	percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
142 
143 	/* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
144 	smp_read_barrier_depends();
145 
146 	/*
147 	 * Theoretically, the following could test just ATOMIC; however,
148 	 * then we'd have to mask off DEAD separately as DEAD may be
149 	 * visible without ATOMIC if we race with percpu_ref_kill().  DEAD
150 	 * implies ATOMIC anyway.  Test them together.
151 	 */
152 	if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
153 		return false;
154 
155 	*percpu_countp = (unsigned long __percpu *)percpu_ptr;
156 	return true;
157 }
158 
159 /**
160  * percpu_ref_get_many - increment a percpu refcount
161  * @ref: percpu_ref to get
162  * @nr: number of references to get
163  *
164  * Analogous to atomic_long_add().
165  *
166  * This function is safe to call as long as @ref is between init and exit.
167  */
168 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
169 {
170 	unsigned long __percpu *percpu_count;
171 
172 	rcu_read_lock_sched();
173 
174 	if (__ref_is_percpu(ref, &percpu_count))
175 		this_cpu_add(*percpu_count, nr);
176 	else
177 		atomic_long_add(nr, &ref->count);
178 
179 	rcu_read_unlock_sched();
180 }
181 
182 /**
183  * percpu_ref_get - increment a percpu refcount
184  * @ref: percpu_ref to get
185  *
186  * Analagous to atomic_long_inc().
187  *
188  * This function is safe to call as long as @ref is between init and exit.
189  */
190 static inline void percpu_ref_get(struct percpu_ref *ref)
191 {
192 	percpu_ref_get_many(ref, 1);
193 }
194 
195 /**
196  * percpu_ref_tryget - try to increment a percpu refcount
197  * @ref: percpu_ref to try-get
198  *
199  * Increment a percpu refcount unless its count already reached zero.
200  * Returns %true on success; %false on failure.
201  *
202  * This function is safe to call as long as @ref is between init and exit.
203  */
204 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
205 {
206 	unsigned long __percpu *percpu_count;
207 	int ret;
208 
209 	rcu_read_lock_sched();
210 
211 	if (__ref_is_percpu(ref, &percpu_count)) {
212 		this_cpu_inc(*percpu_count);
213 		ret = true;
214 	} else {
215 		ret = atomic_long_inc_not_zero(&ref->count);
216 	}
217 
218 	rcu_read_unlock_sched();
219 
220 	return ret;
221 }
222 
223 /**
224  * percpu_ref_tryget_live - try to increment a live percpu refcount
225  * @ref: percpu_ref to try-get
226  *
227  * Increment a percpu refcount unless it has already been killed.  Returns
228  * %true on success; %false on failure.
229  *
230  * Completion of percpu_ref_kill() in itself doesn't guarantee that this
231  * function will fail.  For such guarantee, percpu_ref_kill_and_confirm()
232  * should be used.  After the confirm_kill callback is invoked, it's
233  * guaranteed that no new reference will be given out by
234  * percpu_ref_tryget_live().
235  *
236  * This function is safe to call as long as @ref is between init and exit.
237  */
238 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
239 {
240 	unsigned long __percpu *percpu_count;
241 	int ret = false;
242 
243 	rcu_read_lock_sched();
244 
245 	if (__ref_is_percpu(ref, &percpu_count)) {
246 		this_cpu_inc(*percpu_count);
247 		ret = true;
248 	} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
249 		ret = atomic_long_inc_not_zero(&ref->count);
250 	}
251 
252 	rcu_read_unlock_sched();
253 
254 	return ret;
255 }
256 
257 /**
258  * percpu_ref_put_many - decrement a percpu refcount
259  * @ref: percpu_ref to put
260  * @nr: number of references to put
261  *
262  * Decrement the refcount, and if 0, call the release function (which was passed
263  * to percpu_ref_init())
264  *
265  * This function is safe to call as long as @ref is between init and exit.
266  */
267 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
268 {
269 	unsigned long __percpu *percpu_count;
270 
271 	rcu_read_lock_sched();
272 
273 	if (__ref_is_percpu(ref, &percpu_count))
274 		this_cpu_sub(*percpu_count, nr);
275 	else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
276 		ref->release(ref);
277 
278 	rcu_read_unlock_sched();
279 }
280 
281 /**
282  * percpu_ref_put - decrement a percpu refcount
283  * @ref: percpu_ref to put
284  *
285  * Decrement the refcount, and if 0, call the release function (which was passed
286  * to percpu_ref_init())
287  *
288  * This function is safe to call as long as @ref is between init and exit.
289  */
290 static inline void percpu_ref_put(struct percpu_ref *ref)
291 {
292 	percpu_ref_put_many(ref, 1);
293 }
294 
295 /**
296  * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
297  * @ref: percpu_ref to test
298  *
299  * Returns %true if @ref is dying or dead.
300  *
301  * This function is safe to call as long as @ref is between init and exit
302  * and the caller is responsible for synchronizing against state changes.
303  */
304 static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
305 {
306 	return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
307 }
308 
309 /**
310  * percpu_ref_is_zero - test whether a percpu refcount reached zero
311  * @ref: percpu_ref to test
312  *
313  * Returns %true if @ref reached zero.
314  *
315  * This function is safe to call as long as @ref is between init and exit.
316  */
317 static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
318 {
319 	unsigned long __percpu *percpu_count;
320 
321 	if (__ref_is_percpu(ref, &percpu_count))
322 		return false;
323 	return !atomic_long_read(&ref->count);
324 }
325 
326 #endif
327