xref: /openbmc/linux/lib/flex_proportions.c (revision 82e6fdd6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Floating proportions with flexible aging period
4  *
5  *   Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
6  *
7  * The goal of this code is: Given different types of event, measure proportion
8  * of each type of event over time. The proportions are measured with
9  * exponentially decaying history to give smooth transitions. A formula
10  * expressing proportion of event of type 'j' is:
11  *
12  *   p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
13  *
14  * Where x_{i,j} is j's number of events in i-th last time period and x_i is
15  * total number of events in i-th last time period.
16  *
17  * Note that p_{j}'s are normalised, i.e.
18  *
19  *   \Sum_{j} p_{j} = 1,
20  *
21  * This formula can be straightforwardly computed by maintaining denominator
22  * (let's call it 'd') and for each event type its numerator (let's call it
23  * 'n_j'). When an event of type 'j' happens, we simply need to do:
24  *   n_j++; d++;
25  *
26  * When a new period is declared, we could do:
27  *   d /= 2
28  *   for each j
29  *     n_j /= 2
30  *
31  * To avoid iteration over all event types, we instead shift numerator of event
32  * j lazily when someone asks for a proportion of event j or when event j
33  * occurs. This can bit trivially implemented by remembering last period in
34  * which something happened with proportion of type j.
35  */
36 #include <linux/flex_proportions.h>
37 
38 int fprop_global_init(struct fprop_global *p, gfp_t gfp)
39 {
40 	int err;
41 
42 	p->period = 0;
43 	/* Use 1 to avoid dealing with periods with 0 events... */
44 	err = percpu_counter_init(&p->events, 1, gfp);
45 	if (err)
46 		return err;
47 	seqcount_init(&p->sequence);
48 	return 0;
49 }
50 
51 void fprop_global_destroy(struct fprop_global *p)
52 {
53 	percpu_counter_destroy(&p->events);
54 }
55 
56 /*
57  * Declare @periods new periods. It is upto the caller to make sure period
58  * transitions cannot happen in parallel.
59  *
60  * The function returns true if the proportions are still defined and false
61  * if aging zeroed out all events. This can be used to detect whether declaring
62  * further periods has any effect.
63  */
64 bool fprop_new_period(struct fprop_global *p, int periods)
65 {
66 	s64 events;
67 	unsigned long flags;
68 
69 	local_irq_save(flags);
70 	events = percpu_counter_sum(&p->events);
71 	/*
72 	 * Don't do anything if there are no events.
73 	 */
74 	if (events <= 1) {
75 		local_irq_restore(flags);
76 		return false;
77 	}
78 	write_seqcount_begin(&p->sequence);
79 	if (periods < 64)
80 		events -= events >> periods;
81 	/* Use addition to avoid losing events happening between sum and set */
82 	percpu_counter_add(&p->events, -events);
83 	p->period += periods;
84 	write_seqcount_end(&p->sequence);
85 	local_irq_restore(flags);
86 
87 	return true;
88 }
89 
90 /*
91  * ---- SINGLE ----
92  */
93 
94 int fprop_local_init_single(struct fprop_local_single *pl)
95 {
96 	pl->events = 0;
97 	pl->period = 0;
98 	raw_spin_lock_init(&pl->lock);
99 	return 0;
100 }
101 
102 void fprop_local_destroy_single(struct fprop_local_single *pl)
103 {
104 }
105 
106 static void fprop_reflect_period_single(struct fprop_global *p,
107 					struct fprop_local_single *pl)
108 {
109 	unsigned int period = p->period;
110 	unsigned long flags;
111 
112 	/* Fast path - period didn't change */
113 	if (pl->period == period)
114 		return;
115 	raw_spin_lock_irqsave(&pl->lock, flags);
116 	/* Someone updated pl->period while we were spinning? */
117 	if (pl->period >= period) {
118 		raw_spin_unlock_irqrestore(&pl->lock, flags);
119 		return;
120 	}
121 	/* Aging zeroed our fraction? */
122 	if (period - pl->period < BITS_PER_LONG)
123 		pl->events >>= period - pl->period;
124 	else
125 		pl->events = 0;
126 	pl->period = period;
127 	raw_spin_unlock_irqrestore(&pl->lock, flags);
128 }
129 
130 /* Event of type pl happened */
131 void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
132 {
133 	fprop_reflect_period_single(p, pl);
134 	pl->events++;
135 	percpu_counter_add(&p->events, 1);
136 }
137 
138 /* Return fraction of events of type pl */
139 void fprop_fraction_single(struct fprop_global *p,
140 			   struct fprop_local_single *pl,
141 			   unsigned long *numerator, unsigned long *denominator)
142 {
143 	unsigned int seq;
144 	s64 num, den;
145 
146 	do {
147 		seq = read_seqcount_begin(&p->sequence);
148 		fprop_reflect_period_single(p, pl);
149 		num = pl->events;
150 		den = percpu_counter_read_positive(&p->events);
151 	} while (read_seqcount_retry(&p->sequence, seq));
152 
153 	/*
154 	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
155 	 * counter errors
156 	 */
157 	if (den <= num) {
158 		if (num)
159 			den = num;
160 		else
161 			den = 1;
162 	}
163 	*denominator = den;
164 	*numerator = num;
165 }
166 
167 /*
168  * ---- PERCPU ----
169  */
170 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
171 
172 int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
173 {
174 	int err;
175 
176 	err = percpu_counter_init(&pl->events, 0, gfp);
177 	if (err)
178 		return err;
179 	pl->period = 0;
180 	raw_spin_lock_init(&pl->lock);
181 	return 0;
182 }
183 
184 void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
185 {
186 	percpu_counter_destroy(&pl->events);
187 }
188 
189 static void fprop_reflect_period_percpu(struct fprop_global *p,
190 					struct fprop_local_percpu *pl)
191 {
192 	unsigned int period = p->period;
193 	unsigned long flags;
194 
195 	/* Fast path - period didn't change */
196 	if (pl->period == period)
197 		return;
198 	raw_spin_lock_irqsave(&pl->lock, flags);
199 	/* Someone updated pl->period while we were spinning? */
200 	if (pl->period >= period) {
201 		raw_spin_unlock_irqrestore(&pl->lock, flags);
202 		return;
203 	}
204 	/* Aging zeroed our fraction? */
205 	if (period - pl->period < BITS_PER_LONG) {
206 		s64 val = percpu_counter_read(&pl->events);
207 
208 		if (val < (nr_cpu_ids * PROP_BATCH))
209 			val = percpu_counter_sum(&pl->events);
210 
211 		percpu_counter_add_batch(&pl->events,
212 			-val + (val >> (period-pl->period)), PROP_BATCH);
213 	} else
214 		percpu_counter_set(&pl->events, 0);
215 	pl->period = period;
216 	raw_spin_unlock_irqrestore(&pl->lock, flags);
217 }
218 
219 /* Event of type pl happened */
220 void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
221 {
222 	fprop_reflect_period_percpu(p, pl);
223 	percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
224 	percpu_counter_add(&p->events, 1);
225 }
226 
227 void fprop_fraction_percpu(struct fprop_global *p,
228 			   struct fprop_local_percpu *pl,
229 			   unsigned long *numerator, unsigned long *denominator)
230 {
231 	unsigned int seq;
232 	s64 num, den;
233 
234 	do {
235 		seq = read_seqcount_begin(&p->sequence);
236 		fprop_reflect_period_percpu(p, pl);
237 		num = percpu_counter_read_positive(&pl->events);
238 		den = percpu_counter_read_positive(&p->events);
239 	} while (read_seqcount_retry(&p->sequence, seq));
240 
241 	/*
242 	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
243 	 * counter errors
244 	 */
245 	if (den <= num) {
246 		if (num)
247 			den = num;
248 		else
249 			den = 1;
250 	}
251 	*denominator = den;
252 	*numerator = num;
253 }
254 
255 /*
256  * Like __fprop_inc_percpu() except that event is counted only if the given
257  * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
258  */
259 void __fprop_inc_percpu_max(struct fprop_global *p,
260 			    struct fprop_local_percpu *pl, int max_frac)
261 {
262 	if (unlikely(max_frac < FPROP_FRAC_BASE)) {
263 		unsigned long numerator, denominator;
264 
265 		fprop_fraction_percpu(p, pl, &numerator, &denominator);
266 		if (numerator >
267 		    (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
268 			return;
269 	} else
270 		fprop_reflect_period_percpu(p, pl);
271 	percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
272 	percpu_counter_add(&p->events, 1);
273 }
274