xref: /openbmc/linux/lib/percpu-refcount.c (revision 275876e2)
1 #define pr_fmt(fmt) "%s: " fmt "\n", __func__
2 
3 #include <linux/kernel.h>
4 #include <linux/percpu-refcount.h>
5 
6 /*
7  * Initially, a percpu refcount is just a set of percpu counters. Initially, we
8  * don't try to detect the ref hitting 0 - which means that get/put can just
9  * increment or decrement the local counter. Note that the counter on a
10  * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
11  * percpu counters will all sum to the correct value
12  *
13  * (More precisely: because moduler arithmatic is commutative the sum of all the
14  * pcpu_count vars will be equal to what it would have been if all the gets and
15  * puts were done to a single integer, even if some of the percpu integers
16  * overflow or underflow).
17  *
18  * The real trick to implementing percpu refcounts is shutdown. We can't detect
19  * the ref hitting 0 on every put - this would require global synchronization
20  * and defeat the whole purpose of using percpu refs.
21  *
22  * What we do is require the user to keep track of the initial refcount; we know
23  * the ref can't hit 0 before the user drops the initial ref, so as long as we
24  * convert to non percpu mode before the initial ref is dropped everything
25  * works.
26  *
27  * Converting to non percpu mode is done with some RCUish stuff in
28  * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
29  * can't hit 0 before we've added up all the percpu refs.
30  */
31 
32 #define PCPU_COUNT_BIAS		(1U << 31)
33 
34 static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
35 {
36 	return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
37 }
38 
39 /**
40  * percpu_ref_init - initialize a percpu refcount
41  * @ref: percpu_ref to initialize
42  * @release: function which will be called when refcount hits 0
43  *
44  * Initializes the refcount in single atomic counter mode with a refcount of 1;
45  * analagous to atomic_set(ref, 1).
46  *
47  * Note that @release must not sleep - it may potentially be called from RCU
48  * callback context by percpu_ref_kill().
49  */
50 int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
51 {
52 	atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
53 
54 	ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
55 	if (!ref->pcpu_count_ptr)
56 		return -ENOMEM;
57 
58 	ref->release = release;
59 	return 0;
60 }
61 EXPORT_SYMBOL_GPL(percpu_ref_init);
62 
63 /**
64  * percpu_ref_reinit - re-initialize a percpu refcount
65  * @ref: perpcu_ref to re-initialize
66  *
67  * Re-initialize @ref so that it's in the same state as when it finished
68  * percpu_ref_init().  @ref must have been initialized successfully, killed
69  * and reached 0 but not exited.
70  *
71  * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
72  * this function is in progress.
73  */
74 void percpu_ref_reinit(struct percpu_ref *ref)
75 {
76 	unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
77 	int cpu;
78 
79 	BUG_ON(!pcpu_count);
80 	WARN_ON(!percpu_ref_is_zero(ref));
81 
82 	atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
83 
84 	/*
85 	 * Restore per-cpu operation.  smp_store_release() is paired with
86 	 * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
87 	 * that the zeroing is visible to all percpu accesses which can see
88 	 * the following PCPU_REF_DEAD clearing.
89 	 */
90 	for_each_possible_cpu(cpu)
91 		*per_cpu_ptr(pcpu_count, cpu) = 0;
92 
93 	smp_store_release(&ref->pcpu_count_ptr,
94 			  ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
95 }
96 EXPORT_SYMBOL_GPL(percpu_ref_reinit);
97 
98 /**
99  * percpu_ref_exit - undo percpu_ref_init()
100  * @ref: percpu_ref to exit
101  *
102  * This function exits @ref.  The caller is responsible for ensuring that
103  * @ref is no longer in active use.  The usual places to invoke this
104  * function from are the @ref->release() callback or in init failure path
105  * where percpu_ref_init() succeeded but other parts of the initialization
106  * of the embedding object failed.
107  */
108 void percpu_ref_exit(struct percpu_ref *ref)
109 {
110 	unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
111 
112 	if (pcpu_count) {
113 		free_percpu(pcpu_count);
114 		ref->pcpu_count_ptr = PCPU_REF_DEAD;
115 	}
116 }
117 EXPORT_SYMBOL_GPL(percpu_ref_exit);
118 
119 static void percpu_ref_kill_rcu(struct rcu_head *rcu)
120 {
121 	struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
122 	unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
123 	unsigned count = 0;
124 	int cpu;
125 
126 	for_each_possible_cpu(cpu)
127 		count += *per_cpu_ptr(pcpu_count, cpu);
128 
129 	pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
130 
131 	/*
132 	 * It's crucial that we sum the percpu counters _before_ adding the sum
133 	 * to &ref->count; since gets could be happening on one cpu while puts
134 	 * happen on another, adding a single cpu's count could cause
135 	 * @ref->count to hit 0 before we've got a consistent value - but the
136 	 * sum of all the counts will be consistent and correct.
137 	 *
138 	 * Subtracting the bias value then has to happen _after_ adding count to
139 	 * &ref->count; we need the bias value to prevent &ref->count from
140 	 * reaching 0 before we add the percpu counts. But doing it at the same
141 	 * time is equivalent and saves us atomic operations:
142 	 */
143 
144 	atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
145 
146 	WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)",
147 		  atomic_read(&ref->count));
148 
149 	/* @ref is viewed as dead on all CPUs, send out kill confirmation */
150 	if (ref->confirm_kill)
151 		ref->confirm_kill(ref);
152 
153 	/*
154 	 * Now we're in single atomic_t mode with a consistent refcount, so it's
155 	 * safe to drop our initial ref:
156 	 */
157 	percpu_ref_put(ref);
158 }
159 
160 /**
161  * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
162  * @ref: percpu_ref to kill
163  * @confirm_kill: optional confirmation callback
164  *
165  * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
166  * @confirm_kill is not NULL.  @confirm_kill, which may not block, will be
167  * called after @ref is seen as dead from all CPUs - all further
168  * invocations of percpu_ref_tryget() will fail.  See percpu_ref_tryget()
169  * for more details.
170  *
171  * Due to the way percpu_ref is implemented, @confirm_kill will be called
172  * after at least one full RCU grace period has passed but this is an
173  * implementation detail and callers must not depend on it.
174  */
175 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
176 				 percpu_ref_func_t *confirm_kill)
177 {
178 	WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
179 		  "percpu_ref_kill() called more than once!\n");
180 
181 	ref->pcpu_count_ptr |= PCPU_REF_DEAD;
182 	ref->confirm_kill = confirm_kill;
183 
184 	call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
185 }
186 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
187