1 /*
2  * Percpu refcounts:
3  * (C) 2012 Google, Inc.
4  * Author: Kent Overstreet <koverstreet@google.com>
5  *
6  * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
7  * atomic_dec_and_test() - but percpu.
8  *
9  * There's one important difference between percpu refs and normal atomic_t
10  * refcounts; you have to keep track of your initial refcount, and then when you
11  * start shutting down you call percpu_ref_kill() _before_ dropping the initial
12  * refcount.
13  *
14  * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15  * than an atomic_t - this is because of the way shutdown works, see
16  * percpu_ref_kill()/PCPU_COUNT_BIAS.
17  *
18  * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19  * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
20  * puts the ref back in single atomic_t mode, collecting the per cpu refs and
21  * issuing the appropriate barriers, and then marks the ref as shutting down so
22  * that percpu_ref_put() will check for the ref hitting 0.  After it returns,
23  * it's safe to drop the initial ref.
24  *
25  * USAGE:
26  *
27  * See fs/aio.c for some example usage; it's used there for struct kioctx, which
28  * is created when userspaces calls io_setup(), and destroyed when userspace
29  * calls io_destroy() or the process exits.
30  *
31  * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
32  * calls percpu_ref_kill(), then hlist_del_rcu() and sychronize_rcu() to remove
33  * the kioctx from the proccess's list of kioctxs - after that, there can't be
34  * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
35  * the initial ref with percpu_ref_put().
36  *
37  * Code that does a two stage shutdown like this often needs some kind of
38  * explicit synchronization to ensure the initial refcount can only be dropped
39  * once - percpu_ref_kill() does this for you, it returns true once and false if
40  * someone else already called it. The aio code uses it this way, but it's not
41  * necessary if the code has some other mechanism to synchronize teardown.
42  * around.
43  */
44 
45 #ifndef _LINUX_PERCPU_REFCOUNT_H
46 #define _LINUX_PERCPU_REFCOUNT_H
47 
48 #include <linux/atomic.h>
49 #include <linux/kernel.h>
50 #include <linux/percpu.h>
51 #include <linux/rcupdate.h>
52 
53 struct percpu_ref;
54 typedef void (percpu_ref_func_t)(struct percpu_ref *);
55 
56 struct percpu_ref {
57 	atomic_t		count;
58 	/*
59 	 * The low bit of the pointer indicates whether the ref is in percpu
60 	 * mode; if set, then get/put will manipulate the atomic_t.
61 	 */
62 	unsigned long		pcpu_count_ptr;
63 	percpu_ref_func_t	*release;
64 	percpu_ref_func_t	*confirm_kill;
65 	struct rcu_head		rcu;
66 };
67 
68 int __must_check percpu_ref_init(struct percpu_ref *ref,
69 				 percpu_ref_func_t *release);
70 void percpu_ref_reinit(struct percpu_ref *ref);
71 void percpu_ref_exit(struct percpu_ref *ref);
72 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
73 				 percpu_ref_func_t *confirm_kill);
74 
75 /**
76  * percpu_ref_kill - drop the initial ref
77  * @ref: percpu_ref to kill
78  *
79  * Must be used to drop the initial ref on a percpu refcount; must be called
80  * precisely once before shutdown.
81  *
82  * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
83  * percpu counters and dropping the initial ref.
84  */
85 static inline void percpu_ref_kill(struct percpu_ref *ref)
86 {
87 	return percpu_ref_kill_and_confirm(ref, NULL);
88 }
89 
90 #define PCPU_REF_DEAD		1
91 
92 /*
93  * Internal helper.  Don't use outside percpu-refcount proper.  The
94  * function doesn't return the pointer and let the caller test it for NULL
95  * because doing so forces the compiler to generate two conditional
96  * branches as it can't assume that @ref->pcpu_count is not NULL.
97  */
98 static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
99 				    unsigned __percpu **pcpu_countp)
100 {
101 	unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
102 
103 	/* paired with smp_store_release() in percpu_ref_reinit() */
104 	smp_read_barrier_depends();
105 
106 	if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
107 		return false;
108 
109 	*pcpu_countp = (unsigned __percpu *)pcpu_ptr;
110 	return true;
111 }
112 
113 /**
114  * percpu_ref_get - increment a percpu refcount
115  * @ref: percpu_ref to get
116  *
117  * Analagous to atomic_inc().
118   */
119 static inline void percpu_ref_get(struct percpu_ref *ref)
120 {
121 	unsigned __percpu *pcpu_count;
122 
123 	rcu_read_lock_sched();
124 
125 	if (__pcpu_ref_alive(ref, &pcpu_count))
126 		this_cpu_inc(*pcpu_count);
127 	else
128 		atomic_inc(&ref->count);
129 
130 	rcu_read_unlock_sched();
131 }
132 
133 /**
134  * percpu_ref_tryget - try to increment a percpu refcount
135  * @ref: percpu_ref to try-get
136  *
137  * Increment a percpu refcount unless its count already reached zero.
138  * Returns %true on success; %false on failure.
139  *
140  * The caller is responsible for ensuring that @ref stays accessible.
141  */
142 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
143 {
144 	unsigned __percpu *pcpu_count;
145 	int ret = false;
146 
147 	rcu_read_lock_sched();
148 
149 	if (__pcpu_ref_alive(ref, &pcpu_count)) {
150 		this_cpu_inc(*pcpu_count);
151 		ret = true;
152 	} else {
153 		ret = atomic_inc_not_zero(&ref->count);
154 	}
155 
156 	rcu_read_unlock_sched();
157 
158 	return ret;
159 }
160 
161 /**
162  * percpu_ref_tryget_live - try to increment a live percpu refcount
163  * @ref: percpu_ref to try-get
164  *
165  * Increment a percpu refcount unless it has already been killed.  Returns
166  * %true on success; %false on failure.
167  *
168  * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget
169  * will fail.  For such guarantee, percpu_ref_kill_and_confirm() should be
170  * used.  After the confirm_kill callback is invoked, it's guaranteed that
171  * no new reference will be given out by percpu_ref_tryget().
172  *
173  * The caller is responsible for ensuring that @ref stays accessible.
174  */
175 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
176 {
177 	unsigned __percpu *pcpu_count;
178 	int ret = false;
179 
180 	rcu_read_lock_sched();
181 
182 	if (__pcpu_ref_alive(ref, &pcpu_count)) {
183 		this_cpu_inc(*pcpu_count);
184 		ret = true;
185 	}
186 
187 	rcu_read_unlock_sched();
188 
189 	return ret;
190 }
191 
192 /**
193  * percpu_ref_put - decrement a percpu refcount
194  * @ref: percpu_ref to put
195  *
196  * Decrement the refcount, and if 0, call the release function (which was passed
197  * to percpu_ref_init())
198  */
199 static inline void percpu_ref_put(struct percpu_ref *ref)
200 {
201 	unsigned __percpu *pcpu_count;
202 
203 	rcu_read_lock_sched();
204 
205 	if (__pcpu_ref_alive(ref, &pcpu_count))
206 		this_cpu_dec(*pcpu_count);
207 	else if (unlikely(atomic_dec_and_test(&ref->count)))
208 		ref->release(ref);
209 
210 	rcu_read_unlock_sched();
211 }
212 
213 /**
214  * percpu_ref_is_zero - test whether a percpu refcount reached zero
215  * @ref: percpu_ref to test
216  *
217  * Returns %true if @ref reached zero.
218  */
219 static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
220 {
221 	unsigned __percpu *pcpu_count;
222 
223 	if (__pcpu_ref_alive(ref, &pcpu_count))
224 		return false;
225 	return !atomic_read(&ref->count);
226 }
227 
228 #endif
229