xref: /openbmc/linux/lib/percpu_counter.c (revision 2a598d0b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Fast batching percpu counters.
4  */
5 
6 #include <linux/percpu_counter.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/module.h>
11 #include <linux/debugobjects.h>
12 
13 #ifdef CONFIG_HOTPLUG_CPU
14 static LIST_HEAD(percpu_counters);
15 static DEFINE_SPINLOCK(percpu_counters_lock);
16 #endif
17 
18 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
19 
20 static const struct debug_obj_descr percpu_counter_debug_descr;
21 
22 static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
23 {
24 	struct percpu_counter *fbc = addr;
25 
26 	switch (state) {
27 	case ODEBUG_STATE_ACTIVE:
28 		percpu_counter_destroy(fbc);
29 		debug_object_free(fbc, &percpu_counter_debug_descr);
30 		return true;
31 	default:
32 		return false;
33 	}
34 }
35 
36 static const struct debug_obj_descr percpu_counter_debug_descr = {
37 	.name		= "percpu_counter",
38 	.fixup_free	= percpu_counter_fixup_free,
39 };
40 
41 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
42 {
43 	debug_object_init(fbc, &percpu_counter_debug_descr);
44 	debug_object_activate(fbc, &percpu_counter_debug_descr);
45 }
46 
47 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
48 {
49 	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
50 	debug_object_free(fbc, &percpu_counter_debug_descr);
51 }
52 
53 #else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
54 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
55 { }
56 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
57 { }
58 #endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
59 
60 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
61 {
62 	int cpu;
63 	unsigned long flags;
64 
65 	raw_spin_lock_irqsave(&fbc->lock, flags);
66 	for_each_possible_cpu(cpu) {
67 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
68 		*pcount = 0;
69 	}
70 	fbc->count = amount;
71 	raw_spin_unlock_irqrestore(&fbc->lock, flags);
72 }
73 EXPORT_SYMBOL(percpu_counter_set);
74 
75 /*
76  * local_irq_save() is needed to make the function irq safe:
77  * - The slow path would be ok as protected by an irq-safe spinlock.
78  * - this_cpu_add would be ok as it is irq-safe by definition.
79  * But:
80  * The decision slow path/fast path and the actual update must be atomic, too.
81  * Otherwise a call in process context could check the current values and
82  * decide that the fast path can be used. If now an interrupt occurs before
83  * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
84  * then the this_cpu_add() that is executed after the interrupt has completed
85  * can produce values larger than "batch" or even overflows.
86  */
87 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
88 {
89 	s64 count;
90 	unsigned long flags;
91 
92 	local_irq_save(flags);
93 	count = __this_cpu_read(*fbc->counters) + amount;
94 	if (abs(count) >= batch) {
95 		raw_spin_lock(&fbc->lock);
96 		fbc->count += count;
97 		__this_cpu_sub(*fbc->counters, count - amount);
98 		raw_spin_unlock(&fbc->lock);
99 	} else {
100 		this_cpu_add(*fbc->counters, amount);
101 	}
102 	local_irq_restore(flags);
103 }
104 EXPORT_SYMBOL(percpu_counter_add_batch);
105 
106 /*
107  * For percpu_counter with a big batch, the devication of its count could
108  * be big, and there is requirement to reduce the deviation, like when the
109  * counter's batch could be runtime decreased to get a better accuracy,
110  * which can be achieved by running this sync function on each CPU.
111  */
112 void percpu_counter_sync(struct percpu_counter *fbc)
113 {
114 	unsigned long flags;
115 	s64 count;
116 
117 	raw_spin_lock_irqsave(&fbc->lock, flags);
118 	count = __this_cpu_read(*fbc->counters);
119 	fbc->count += count;
120 	__this_cpu_sub(*fbc->counters, count);
121 	raw_spin_unlock_irqrestore(&fbc->lock, flags);
122 }
123 EXPORT_SYMBOL(percpu_counter_sync);
124 
125 /*
126  * Add up all the per-cpu counts, return the result.  This is a more accurate
127  * but much slower version of percpu_counter_read_positive().
128  *
129  * We use the cpu mask of (cpu_online_mask | cpu_dying_mask) to capture sums
130  * from CPUs that are in the process of being taken offline. Dying cpus have
131  * been removed from the online mask, but may not have had the hotplug dead
132  * notifier called to fold the percpu count back into the global counter sum.
133  * By including dying CPUs in the iteration mask, we avoid this race condition
134  * so __percpu_counter_sum() just does the right thing when CPUs are being taken
135  * offline.
136  */
137 s64 __percpu_counter_sum(struct percpu_counter *fbc)
138 {
139 	s64 ret;
140 	int cpu;
141 	unsigned long flags;
142 
143 	raw_spin_lock_irqsave(&fbc->lock, flags);
144 	ret = fbc->count;
145 	for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
146 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
147 		ret += *pcount;
148 	}
149 	raw_spin_unlock_irqrestore(&fbc->lock, flags);
150 	return ret;
151 }
152 EXPORT_SYMBOL(__percpu_counter_sum);
153 
154 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
155 			  struct lock_class_key *key)
156 {
157 	unsigned long flags __maybe_unused;
158 
159 	raw_spin_lock_init(&fbc->lock);
160 	lockdep_set_class(&fbc->lock, key);
161 	fbc->count = amount;
162 	fbc->counters = alloc_percpu_gfp(s32, gfp);
163 	if (!fbc->counters)
164 		return -ENOMEM;
165 
166 	debug_percpu_counter_activate(fbc);
167 
168 #ifdef CONFIG_HOTPLUG_CPU
169 	INIT_LIST_HEAD(&fbc->list);
170 	spin_lock_irqsave(&percpu_counters_lock, flags);
171 	list_add(&fbc->list, &percpu_counters);
172 	spin_unlock_irqrestore(&percpu_counters_lock, flags);
173 #endif
174 	return 0;
175 }
176 EXPORT_SYMBOL(__percpu_counter_init);
177 
178 void percpu_counter_destroy(struct percpu_counter *fbc)
179 {
180 	unsigned long flags __maybe_unused;
181 
182 	if (!fbc->counters)
183 		return;
184 
185 	debug_percpu_counter_deactivate(fbc);
186 
187 #ifdef CONFIG_HOTPLUG_CPU
188 	spin_lock_irqsave(&percpu_counters_lock, flags);
189 	list_del(&fbc->list);
190 	spin_unlock_irqrestore(&percpu_counters_lock, flags);
191 #endif
192 	free_percpu(fbc->counters);
193 	fbc->counters = NULL;
194 }
195 EXPORT_SYMBOL(percpu_counter_destroy);
196 
197 int percpu_counter_batch __read_mostly = 32;
198 EXPORT_SYMBOL(percpu_counter_batch);
199 
200 static int compute_batch_value(unsigned int cpu)
201 {
202 	int nr = num_online_cpus();
203 
204 	percpu_counter_batch = max(32, nr*2);
205 	return 0;
206 }
207 
208 static int percpu_counter_cpu_dead(unsigned int cpu)
209 {
210 #ifdef CONFIG_HOTPLUG_CPU
211 	struct percpu_counter *fbc;
212 
213 	compute_batch_value(cpu);
214 
215 	spin_lock_irq(&percpu_counters_lock);
216 	list_for_each_entry(fbc, &percpu_counters, list) {
217 		s32 *pcount;
218 
219 		raw_spin_lock(&fbc->lock);
220 		pcount = per_cpu_ptr(fbc->counters, cpu);
221 		fbc->count += *pcount;
222 		*pcount = 0;
223 		raw_spin_unlock(&fbc->lock);
224 	}
225 	spin_unlock_irq(&percpu_counters_lock);
226 #endif
227 	return 0;
228 }
229 
230 /*
231  * Compare counter against given value.
232  * Return 1 if greater, 0 if equal and -1 if less
233  */
234 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
235 {
236 	s64	count;
237 
238 	count = percpu_counter_read(fbc);
239 	/* Check to see if rough count will be sufficient for comparison */
240 	if (abs(count - rhs) > (batch * num_online_cpus())) {
241 		if (count > rhs)
242 			return 1;
243 		else
244 			return -1;
245 	}
246 	/* Need to use precise count */
247 	count = percpu_counter_sum(fbc);
248 	if (count > rhs)
249 		return 1;
250 	else if (count < rhs)
251 		return -1;
252 	else
253 		return 0;
254 }
255 EXPORT_SYMBOL(__percpu_counter_compare);
256 
257 static int __init percpu_counter_startup(void)
258 {
259 	int ret;
260 
261 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
262 				compute_batch_value, NULL);
263 	WARN_ON(ret < 0);
264 	ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
265 					"lib/percpu_cnt:dead", NULL,
266 					percpu_counter_cpu_dead);
267 	WARN_ON(ret < 0);
268 	return 0;
269 }
270 module_init(percpu_counter_startup);
271