xref: /openbmc/linux/lib/percpu-refcount.c (revision f66501dc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "%s: " fmt "\n", __func__
3 
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/wait.h>
7 #include <linux/percpu-refcount.h>
8 
9 /*
10  * Initially, a percpu refcount is just a set of percpu counters. Initially, we
11  * don't try to detect the ref hitting 0 - which means that get/put can just
12  * increment or decrement the local counter. Note that the counter on a
13  * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
14  * percpu counters will all sum to the correct value
15  *
16  * (More precisely: because modular arithmetic is commutative the sum of all the
17  * percpu_count vars will be equal to what it would have been if all the gets
18  * and puts were done to a single integer, even if some of the percpu integers
19  * overflow or underflow).
20  *
21  * The real trick to implementing percpu refcounts is shutdown. We can't detect
22  * the ref hitting 0 on every put - this would require global synchronization
23  * and defeat the whole purpose of using percpu refs.
24  *
25  * What we do is require the user to keep track of the initial refcount; we know
26  * the ref can't hit 0 before the user drops the initial ref, so as long as we
27  * convert to non percpu mode before the initial ref is dropped everything
28  * works.
29  *
30  * Converting to non percpu mode is done with some RCUish stuff in
31  * percpu_ref_kill. Additionally, we need a bias value so that the
32  * atomic_long_t can't hit 0 before we've added up all the percpu refs.
33  */
34 
35 #define PERCPU_COUNT_BIAS	(1LU << (BITS_PER_LONG - 1))
36 
37 static DEFINE_SPINLOCK(percpu_ref_switch_lock);
38 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
39 
40 static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
41 {
42 	return (unsigned long __percpu *)
43 		(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
44 }
45 
46 /**
47  * percpu_ref_init - initialize a percpu refcount
48  * @ref: percpu_ref to initialize
49  * @release: function which will be called when refcount hits 0
50  * @flags: PERCPU_REF_INIT_* flags
51  * @gfp: allocation mask to use
52  *
53  * Initializes @ref.  If @flags is zero, @ref starts in percpu mode with a
54  * refcount of 1; analagous to atomic_long_set(ref, 1).  See the
55  * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
56  *
57  * Note that @release must not sleep - it may potentially be called from RCU
58  * callback context by percpu_ref_kill().
59  */
60 int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
61 		    unsigned int flags, gfp_t gfp)
62 {
63 	size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
64 			     __alignof__(unsigned long));
65 	unsigned long start_count = 0;
66 
67 	ref->percpu_count_ptr = (unsigned long)
68 		__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
69 	if (!ref->percpu_count_ptr)
70 		return -ENOMEM;
71 
72 	ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
73 
74 	if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
75 		ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
76 	else
77 		start_count += PERCPU_COUNT_BIAS;
78 
79 	if (flags & PERCPU_REF_INIT_DEAD)
80 		ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
81 	else
82 		start_count++;
83 
84 	atomic_long_set(&ref->count, start_count);
85 
86 	ref->release = release;
87 	ref->confirm_switch = NULL;
88 	return 0;
89 }
90 EXPORT_SYMBOL_GPL(percpu_ref_init);
91 
92 /**
93  * percpu_ref_exit - undo percpu_ref_init()
94  * @ref: percpu_ref to exit
95  *
96  * This function exits @ref.  The caller is responsible for ensuring that
97  * @ref is no longer in active use.  The usual places to invoke this
98  * function from are the @ref->release() callback or in init failure path
99  * where percpu_ref_init() succeeded but other parts of the initialization
100  * of the embedding object failed.
101  */
102 void percpu_ref_exit(struct percpu_ref *ref)
103 {
104 	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
105 
106 	if (percpu_count) {
107 		/* non-NULL confirm_switch indicates switching in progress */
108 		WARN_ON_ONCE(ref->confirm_switch);
109 		free_percpu(percpu_count);
110 		ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
111 	}
112 }
113 EXPORT_SYMBOL_GPL(percpu_ref_exit);
114 
115 static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
116 {
117 	struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
118 
119 	ref->confirm_switch(ref);
120 	ref->confirm_switch = NULL;
121 	wake_up_all(&percpu_ref_switch_waitq);
122 
123 	/* drop ref from percpu_ref_switch_to_atomic() */
124 	percpu_ref_put(ref);
125 }
126 
127 static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
128 {
129 	struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
130 	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
131 	unsigned long count = 0;
132 	int cpu;
133 
134 	for_each_possible_cpu(cpu)
135 		count += *per_cpu_ptr(percpu_count, cpu);
136 
137 	pr_debug("global %ld percpu %ld",
138 		 atomic_long_read(&ref->count), (long)count);
139 
140 	/*
141 	 * It's crucial that we sum the percpu counters _before_ adding the sum
142 	 * to &ref->count; since gets could be happening on one cpu while puts
143 	 * happen on another, adding a single cpu's count could cause
144 	 * @ref->count to hit 0 before we've got a consistent value - but the
145 	 * sum of all the counts will be consistent and correct.
146 	 *
147 	 * Subtracting the bias value then has to happen _after_ adding count to
148 	 * &ref->count; we need the bias value to prevent &ref->count from
149 	 * reaching 0 before we add the percpu counts. But doing it at the same
150 	 * time is equivalent and saves us atomic operations:
151 	 */
152 	atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
153 
154 	WARN_ONCE(atomic_long_read(&ref->count) <= 0,
155 		  "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
156 		  ref->release, atomic_long_read(&ref->count));
157 
158 	/* @ref is viewed as dead on all CPUs, send out switch confirmation */
159 	percpu_ref_call_confirm_rcu(rcu);
160 }
161 
162 static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
163 {
164 }
165 
166 static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
167 					  percpu_ref_func_t *confirm_switch)
168 {
169 	if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
170 		if (confirm_switch)
171 			confirm_switch(ref);
172 		return;
173 	}
174 
175 	/* switching from percpu to atomic */
176 	ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
177 
178 	/*
179 	 * Non-NULL ->confirm_switch is used to indicate that switching is
180 	 * in progress.  Use noop one if unspecified.
181 	 */
182 	ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
183 
184 	percpu_ref_get(ref);	/* put after confirmation */
185 	call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
186 }
187 
188 static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
189 {
190 	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
191 	int cpu;
192 
193 	BUG_ON(!percpu_count);
194 
195 	if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
196 		return;
197 
198 	atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
199 
200 	/*
201 	 * Restore per-cpu operation.  smp_store_release() is paired
202 	 * with READ_ONCE() in __ref_is_percpu() and guarantees that the
203 	 * zeroing is visible to all percpu accesses which can see the
204 	 * following __PERCPU_REF_ATOMIC clearing.
205 	 */
206 	for_each_possible_cpu(cpu)
207 		*per_cpu_ptr(percpu_count, cpu) = 0;
208 
209 	smp_store_release(&ref->percpu_count_ptr,
210 			  ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
211 }
212 
213 static void __percpu_ref_switch_mode(struct percpu_ref *ref,
214 				     percpu_ref_func_t *confirm_switch)
215 {
216 	lockdep_assert_held(&percpu_ref_switch_lock);
217 
218 	/*
219 	 * If the previous ATOMIC switching hasn't finished yet, wait for
220 	 * its completion.  If the caller ensures that ATOMIC switching
221 	 * isn't in progress, this function can be called from any context.
222 	 */
223 	wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
224 			    percpu_ref_switch_lock);
225 
226 	if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
227 		__percpu_ref_switch_to_atomic(ref, confirm_switch);
228 	else
229 		__percpu_ref_switch_to_percpu(ref);
230 }
231 
232 /**
233  * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
234  * @ref: percpu_ref to switch to atomic mode
235  * @confirm_switch: optional confirmation callback
236  *
237  * There's no reason to use this function for the usual reference counting.
238  * Use percpu_ref_kill[_and_confirm]().
239  *
240  * Schedule switching of @ref to atomic mode.  All its percpu counts will
241  * be collected to the main atomic counter.  On completion, when all CPUs
242  * are guaraneed to be in atomic mode, @confirm_switch, which may not
243  * block, is invoked.  This function may be invoked concurrently with all
244  * the get/put operations and can safely be mixed with kill and reinit
245  * operations.  Note that @ref will stay in atomic mode across kill/reinit
246  * cycles until percpu_ref_switch_to_percpu() is called.
247  *
248  * This function may block if @ref is in the process of switching to atomic
249  * mode.  If the caller ensures that @ref is not in the process of
250  * switching to atomic mode, this function can be called from any context.
251  */
252 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
253 				 percpu_ref_func_t *confirm_switch)
254 {
255 	unsigned long flags;
256 
257 	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
258 
259 	ref->force_atomic = true;
260 	__percpu_ref_switch_mode(ref, confirm_switch);
261 
262 	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
263 }
264 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
265 
266 /**
267  * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
268  * @ref: percpu_ref to switch to atomic mode
269  *
270  * Schedule switching the ref to atomic mode, and wait for the
271  * switch to complete.  Caller must ensure that no other thread
272  * will switch back to percpu mode.
273  */
274 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
275 {
276 	percpu_ref_switch_to_atomic(ref, NULL);
277 	wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
278 }
279 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
280 
281 /**
282  * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
283  * @ref: percpu_ref to switch to percpu mode
284  *
285  * There's no reason to use this function for the usual reference counting.
286  * To re-use an expired ref, use percpu_ref_reinit().
287  *
288  * Switch @ref to percpu mode.  This function may be invoked concurrently
289  * with all the get/put operations and can safely be mixed with kill and
290  * reinit operations.  This function reverses the sticky atomic state set
291  * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic().  If @ref is
292  * dying or dead, the actual switching takes place on the following
293  * percpu_ref_reinit().
294  *
295  * This function may block if @ref is in the process of switching to atomic
296  * mode.  If the caller ensures that @ref is not in the process of
297  * switching to atomic mode, this function can be called from any context.
298  */
299 void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
300 {
301 	unsigned long flags;
302 
303 	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
304 
305 	ref->force_atomic = false;
306 	__percpu_ref_switch_mode(ref, NULL);
307 
308 	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
309 }
310 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
311 
312 /**
313  * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
314  * @ref: percpu_ref to kill
315  * @confirm_kill: optional confirmation callback
316  *
317  * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
318  * @confirm_kill is not NULL.  @confirm_kill, which may not block, will be
319  * called after @ref is seen as dead from all CPUs at which point all
320  * further invocations of percpu_ref_tryget_live() will fail.  See
321  * percpu_ref_tryget_live() for details.
322  *
323  * This function normally doesn't block and can be called from any context
324  * but it may block if @confirm_kill is specified and @ref is in the
325  * process of switching to atomic mode by percpu_ref_switch_to_atomic().
326  *
327  * There are no implied RCU grace periods between kill and release.
328  */
329 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
330 				 percpu_ref_func_t *confirm_kill)
331 {
332 	unsigned long flags;
333 
334 	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
335 
336 	WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
337 		  "%s called more than once on %ps!", __func__, ref->release);
338 
339 	ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
340 	__percpu_ref_switch_mode(ref, confirm_kill);
341 	percpu_ref_put(ref);
342 
343 	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
344 }
345 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
346 
347 /**
348  * percpu_ref_reinit - re-initialize a percpu refcount
349  * @ref: perpcu_ref to re-initialize
350  *
351  * Re-initialize @ref so that it's in the same state as when it finished
352  * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD.  @ref must have been
353  * initialized successfully and reached 0 but not exited.
354  *
355  * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
356  * this function is in progress.
357  */
358 void percpu_ref_reinit(struct percpu_ref *ref)
359 {
360 	WARN_ON_ONCE(!percpu_ref_is_zero(ref));
361 
362 	percpu_ref_resurrect(ref);
363 }
364 EXPORT_SYMBOL_GPL(percpu_ref_reinit);
365 
366 /**
367  * percpu_ref_resurrect - modify a percpu refcount from dead to live
368  * @ref: perpcu_ref to resurrect
369  *
370  * Modify @ref so that it's in the same state as before percpu_ref_kill() was
371  * called. @ref must be dead but must not yet have exited.
372  *
373  * If @ref->release() frees @ref then the caller is responsible for
374  * guaranteeing that @ref->release() does not get called while this
375  * function is in progress.
376  *
377  * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
378  * this function is in progress.
379  */
380 void percpu_ref_resurrect(struct percpu_ref *ref)
381 {
382 	unsigned long __percpu *percpu_count;
383 	unsigned long flags;
384 
385 	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
386 
387 	WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
388 	WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
389 
390 	ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
391 	percpu_ref_get(ref);
392 	__percpu_ref_switch_mode(ref, NULL);
393 
394 	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
395 }
396 EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
397