xref: /openbmc/linux/lib/percpu_counter.c (revision 7fe2f639)
1 /*
2  * Fast batching percpu counters.
3  */
4 
5 #include <linux/percpu_counter.h>
6 #include <linux/notifier.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/module.h>
11 #include <linux/debugobjects.h>
12 
13 static LIST_HEAD(percpu_counters);
14 static DEFINE_MUTEX(percpu_counters_lock);
15 
16 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
17 
18 static struct debug_obj_descr percpu_counter_debug_descr;
19 
20 static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
21 {
22 	struct percpu_counter *fbc = addr;
23 
24 	switch (state) {
25 	case ODEBUG_STATE_ACTIVE:
26 		percpu_counter_destroy(fbc);
27 		debug_object_free(fbc, &percpu_counter_debug_descr);
28 		return 1;
29 	default:
30 		return 0;
31 	}
32 }
33 
34 static struct debug_obj_descr percpu_counter_debug_descr = {
35 	.name		= "percpu_counter",
36 	.fixup_free	= percpu_counter_fixup_free,
37 };
38 
39 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
40 {
41 	debug_object_init(fbc, &percpu_counter_debug_descr);
42 	debug_object_activate(fbc, &percpu_counter_debug_descr);
43 }
44 
45 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
46 {
47 	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
48 	debug_object_free(fbc, &percpu_counter_debug_descr);
49 }
50 
51 #else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
52 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
53 { }
54 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
55 { }
56 #endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
57 
58 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
59 {
60 	int cpu;
61 
62 	spin_lock(&fbc->lock);
63 	for_each_possible_cpu(cpu) {
64 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
65 		*pcount = 0;
66 	}
67 	fbc->count = amount;
68 	spin_unlock(&fbc->lock);
69 }
70 EXPORT_SYMBOL(percpu_counter_set);
71 
72 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
73 {
74 	s64 count;
75 
76 	preempt_disable();
77 	count = __this_cpu_read(*fbc->counters) + amount;
78 	if (count >= batch || count <= -batch) {
79 		spin_lock(&fbc->lock);
80 		fbc->count += count;
81 		__this_cpu_write(*fbc->counters, 0);
82 		spin_unlock(&fbc->lock);
83 	} else {
84 		__this_cpu_write(*fbc->counters, count);
85 	}
86 	preempt_enable();
87 }
88 EXPORT_SYMBOL(__percpu_counter_add);
89 
90 /*
91  * Add up all the per-cpu counts, return the result.  This is a more accurate
92  * but much slower version of percpu_counter_read_positive()
93  */
94 s64 __percpu_counter_sum(struct percpu_counter *fbc)
95 {
96 	s64 ret;
97 	int cpu;
98 
99 	spin_lock(&fbc->lock);
100 	ret = fbc->count;
101 	for_each_online_cpu(cpu) {
102 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
103 		ret += *pcount;
104 	}
105 	spin_unlock(&fbc->lock);
106 	return ret;
107 }
108 EXPORT_SYMBOL(__percpu_counter_sum);
109 
110 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
111 			  struct lock_class_key *key)
112 {
113 	spin_lock_init(&fbc->lock);
114 	lockdep_set_class(&fbc->lock, key);
115 	fbc->count = amount;
116 	fbc->counters = alloc_percpu(s32);
117 	if (!fbc->counters)
118 		return -ENOMEM;
119 
120 	debug_percpu_counter_activate(fbc);
121 
122 #ifdef CONFIG_HOTPLUG_CPU
123 	INIT_LIST_HEAD(&fbc->list);
124 	mutex_lock(&percpu_counters_lock);
125 	list_add(&fbc->list, &percpu_counters);
126 	mutex_unlock(&percpu_counters_lock);
127 #endif
128 	return 0;
129 }
130 EXPORT_SYMBOL(__percpu_counter_init);
131 
132 void percpu_counter_destroy(struct percpu_counter *fbc)
133 {
134 	if (!fbc->counters)
135 		return;
136 
137 	debug_percpu_counter_deactivate(fbc);
138 
139 #ifdef CONFIG_HOTPLUG_CPU
140 	mutex_lock(&percpu_counters_lock);
141 	list_del(&fbc->list);
142 	mutex_unlock(&percpu_counters_lock);
143 #endif
144 	free_percpu(fbc->counters);
145 	fbc->counters = NULL;
146 }
147 EXPORT_SYMBOL(percpu_counter_destroy);
148 
149 int percpu_counter_batch __read_mostly = 32;
150 EXPORT_SYMBOL(percpu_counter_batch);
151 
152 static void compute_batch_value(void)
153 {
154 	int nr = num_online_cpus();
155 
156 	percpu_counter_batch = max(32, nr*2);
157 }
158 
159 static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
160 					unsigned long action, void *hcpu)
161 {
162 #ifdef CONFIG_HOTPLUG_CPU
163 	unsigned int cpu;
164 	struct percpu_counter *fbc;
165 
166 	compute_batch_value();
167 	if (action != CPU_DEAD)
168 		return NOTIFY_OK;
169 
170 	cpu = (unsigned long)hcpu;
171 	mutex_lock(&percpu_counters_lock);
172 	list_for_each_entry(fbc, &percpu_counters, list) {
173 		s32 *pcount;
174 		unsigned long flags;
175 
176 		spin_lock_irqsave(&fbc->lock, flags);
177 		pcount = per_cpu_ptr(fbc->counters, cpu);
178 		fbc->count += *pcount;
179 		*pcount = 0;
180 		spin_unlock_irqrestore(&fbc->lock, flags);
181 	}
182 	mutex_unlock(&percpu_counters_lock);
183 #endif
184 	return NOTIFY_OK;
185 }
186 
187 /*
188  * Compare counter against given value.
189  * Return 1 if greater, 0 if equal and -1 if less
190  */
191 int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
192 {
193 	s64	count;
194 
195 	count = percpu_counter_read(fbc);
196 	/* Check to see if rough count will be sufficient for comparison */
197 	if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
198 		if (count > rhs)
199 			return 1;
200 		else
201 			return -1;
202 	}
203 	/* Need to use precise count */
204 	count = percpu_counter_sum(fbc);
205 	if (count > rhs)
206 		return 1;
207 	else if (count < rhs)
208 		return -1;
209 	else
210 		return 0;
211 }
212 EXPORT_SYMBOL(percpu_counter_compare);
213 
214 static int __init percpu_counter_startup(void)
215 {
216 	compute_batch_value();
217 	hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
218 	return 0;
219 }
220 module_init(percpu_counter_startup);
221