1 /* 2 * Fast batching percpu counters. 3 */ 4 5 #include <linux/percpu_counter.h> 6 #include <linux/notifier.h> 7 #include <linux/mutex.h> 8 #include <linux/init.h> 9 #include <linux/cpu.h> 10 #include <linux/module.h> 11 12 #ifdef CONFIG_HOTPLUG_CPU 13 static LIST_HEAD(percpu_counters); 14 static DEFINE_MUTEX(percpu_counters_lock); 15 #endif 16 17 void percpu_counter_set(struct percpu_counter *fbc, s64 amount) 18 { 19 int cpu; 20 21 spin_lock(&fbc->lock); 22 for_each_possible_cpu(cpu) { 23 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 24 *pcount = 0; 25 } 26 fbc->count = amount; 27 spin_unlock(&fbc->lock); 28 } 29 EXPORT_SYMBOL(percpu_counter_set); 30 31 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) 32 { 33 s64 count; 34 s32 *pcount; 35 int cpu = get_cpu(); 36 37 pcount = per_cpu_ptr(fbc->counters, cpu); 38 count = *pcount + amount; 39 if (count >= batch || count <= -batch) { 40 spin_lock(&fbc->lock); 41 fbc->count += count; 42 *pcount = 0; 43 spin_unlock(&fbc->lock); 44 } else { 45 *pcount = count; 46 } 47 put_cpu(); 48 } 49 EXPORT_SYMBOL(__percpu_counter_add); 50 51 /* 52 * Add up all the per-cpu counts, return the result. This is a more accurate 53 * but much slower version of percpu_counter_read_positive() 54 */ 55 s64 __percpu_counter_sum(struct percpu_counter *fbc) 56 { 57 s64 ret; 58 int cpu; 59 60 spin_lock(&fbc->lock); 61 ret = fbc->count; 62 for_each_online_cpu(cpu) { 63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 64 ret += *pcount; 65 *pcount = 0; 66 } 67 fbc->count = ret; 68 69 spin_unlock(&fbc->lock); 70 return ret; 71 } 72 EXPORT_SYMBOL(__percpu_counter_sum); 73 74 static struct lock_class_key percpu_counter_irqsafe; 75 76 int percpu_counter_init(struct percpu_counter *fbc, s64 amount) 77 { 78 spin_lock_init(&fbc->lock); 79 fbc->count = amount; 80 fbc->counters = alloc_percpu(s32); 81 if (!fbc->counters) 82 return -ENOMEM; 83 #ifdef CONFIG_HOTPLUG_CPU 84 mutex_lock(&percpu_counters_lock); 85 list_add(&fbc->list, &percpu_counters); 86 mutex_unlock(&percpu_counters_lock); 87 #endif 88 return 0; 89 } 90 EXPORT_SYMBOL(percpu_counter_init); 91 92 int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount) 93 { 94 int err; 95 96 err = percpu_counter_init(fbc, amount); 97 if (!err) 98 lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe); 99 return err; 100 } 101 102 void percpu_counter_destroy(struct percpu_counter *fbc) 103 { 104 if (!fbc->counters) 105 return; 106 107 free_percpu(fbc->counters); 108 fbc->counters = NULL; 109 #ifdef CONFIG_HOTPLUG_CPU 110 mutex_lock(&percpu_counters_lock); 111 list_del(&fbc->list); 112 mutex_unlock(&percpu_counters_lock); 113 #endif 114 } 115 EXPORT_SYMBOL(percpu_counter_destroy); 116 117 #ifdef CONFIG_HOTPLUG_CPU 118 static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, 119 unsigned long action, void *hcpu) 120 { 121 unsigned int cpu; 122 struct percpu_counter *fbc; 123 124 if (action != CPU_DEAD) 125 return NOTIFY_OK; 126 127 cpu = (unsigned long)hcpu; 128 mutex_lock(&percpu_counters_lock); 129 list_for_each_entry(fbc, &percpu_counters, list) { 130 s32 *pcount; 131 unsigned long flags; 132 133 spin_lock_irqsave(&fbc->lock, flags); 134 pcount = per_cpu_ptr(fbc->counters, cpu); 135 fbc->count += *pcount; 136 *pcount = 0; 137 spin_unlock_irqrestore(&fbc->lock, flags); 138 } 139 mutex_unlock(&percpu_counters_lock); 140 return NOTIFY_OK; 141 } 142 143 static int __init percpu_counter_startup(void) 144 { 145 hotcpu_notifier(percpu_counter_hotcpu_callback, 0); 146 return 0; 147 } 148 module_init(percpu_counter_startup); 149 #endif 150