xref: /openbmc/linux/lib/flex_proportions.c (revision dd3cb467)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Floating proportions with flexible aging period
4  *
5  *   Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
6  *
7  * The goal of this code is: Given different types of event, measure proportion
8  * of each type of event over time. The proportions are measured with
9  * exponentially decaying history to give smooth transitions. A formula
10  * expressing proportion of event of type 'j' is:
11  *
12  *   p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
13  *
14  * Where x_{i,j} is j's number of events in i-th last time period and x_i is
15  * total number of events in i-th last time period.
16  *
17  * Note that p_{j}'s are normalised, i.e.
18  *
19  *   \Sum_{j} p_{j} = 1,
20  *
21  * This formula can be straightforwardly computed by maintaining denominator
22  * (let's call it 'd') and for each event type its numerator (let's call it
23  * 'n_j'). When an event of type 'j' happens, we simply need to do:
24  *   n_j++; d++;
25  *
26  * When a new period is declared, we could do:
27  *   d /= 2
28  *   for each j
29  *     n_j /= 2
30  *
31  * To avoid iteration over all event types, we instead shift numerator of event
32  * j lazily when someone asks for a proportion of event j or when event j
33  * occurs. This can bit trivially implemented by remembering last period in
34  * which something happened with proportion of type j.
35  */
36 #include <linux/flex_proportions.h>
37 
38 int fprop_global_init(struct fprop_global *p, gfp_t gfp)
39 {
40 	int err;
41 
42 	p->period = 0;
43 	/* Use 1 to avoid dealing with periods with 0 events... */
44 	err = percpu_counter_init(&p->events, 1, gfp);
45 	if (err)
46 		return err;
47 	seqcount_init(&p->sequence);
48 	return 0;
49 }
50 
51 void fprop_global_destroy(struct fprop_global *p)
52 {
53 	percpu_counter_destroy(&p->events);
54 }
55 
56 /*
57  * Declare @periods new periods. It is upto the caller to make sure period
58  * transitions cannot happen in parallel.
59  *
60  * The function returns true if the proportions are still defined and false
61  * if aging zeroed out all events. This can be used to detect whether declaring
62  * further periods has any effect.
63  */
64 bool fprop_new_period(struct fprop_global *p, int periods)
65 {
66 	s64 events = percpu_counter_sum(&p->events);
67 
68 	/*
69 	 * Don't do anything if there are no events.
70 	 */
71 	if (events <= 1)
72 		return false;
73 	write_seqcount_begin(&p->sequence);
74 	if (periods < 64)
75 		events -= events >> periods;
76 	/* Use addition to avoid losing events happening between sum and set */
77 	percpu_counter_add(&p->events, -events);
78 	p->period += periods;
79 	write_seqcount_end(&p->sequence);
80 
81 	return true;
82 }
83 
84 /*
85  * ---- SINGLE ----
86  */
87 
88 int fprop_local_init_single(struct fprop_local_single *pl)
89 {
90 	pl->events = 0;
91 	pl->period = 0;
92 	raw_spin_lock_init(&pl->lock);
93 	return 0;
94 }
95 
96 void fprop_local_destroy_single(struct fprop_local_single *pl)
97 {
98 }
99 
100 static void fprop_reflect_period_single(struct fprop_global *p,
101 					struct fprop_local_single *pl)
102 {
103 	unsigned int period = p->period;
104 	unsigned long flags;
105 
106 	/* Fast path - period didn't change */
107 	if (pl->period == period)
108 		return;
109 	raw_spin_lock_irqsave(&pl->lock, flags);
110 	/* Someone updated pl->period while we were spinning? */
111 	if (pl->period >= period) {
112 		raw_spin_unlock_irqrestore(&pl->lock, flags);
113 		return;
114 	}
115 	/* Aging zeroed our fraction? */
116 	if (period - pl->period < BITS_PER_LONG)
117 		pl->events >>= period - pl->period;
118 	else
119 		pl->events = 0;
120 	pl->period = period;
121 	raw_spin_unlock_irqrestore(&pl->lock, flags);
122 }
123 
124 /* Event of type pl happened */
125 void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
126 {
127 	fprop_reflect_period_single(p, pl);
128 	pl->events++;
129 	percpu_counter_add(&p->events, 1);
130 }
131 
132 /* Return fraction of events of type pl */
133 void fprop_fraction_single(struct fprop_global *p,
134 			   struct fprop_local_single *pl,
135 			   unsigned long *numerator, unsigned long *denominator)
136 {
137 	unsigned int seq;
138 	s64 num, den;
139 
140 	do {
141 		seq = read_seqcount_begin(&p->sequence);
142 		fprop_reflect_period_single(p, pl);
143 		num = pl->events;
144 		den = percpu_counter_read_positive(&p->events);
145 	} while (read_seqcount_retry(&p->sequence, seq));
146 
147 	/*
148 	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
149 	 * counter errors
150 	 */
151 	if (den <= num) {
152 		if (num)
153 			den = num;
154 		else
155 			den = 1;
156 	}
157 	*denominator = den;
158 	*numerator = num;
159 }
160 
161 /*
162  * ---- PERCPU ----
163  */
164 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
165 
166 int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
167 {
168 	int err;
169 
170 	err = percpu_counter_init(&pl->events, 0, gfp);
171 	if (err)
172 		return err;
173 	pl->period = 0;
174 	raw_spin_lock_init(&pl->lock);
175 	return 0;
176 }
177 
178 void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
179 {
180 	percpu_counter_destroy(&pl->events);
181 }
182 
183 static void fprop_reflect_period_percpu(struct fprop_global *p,
184 					struct fprop_local_percpu *pl)
185 {
186 	unsigned int period = p->period;
187 	unsigned long flags;
188 
189 	/* Fast path - period didn't change */
190 	if (pl->period == period)
191 		return;
192 	raw_spin_lock_irqsave(&pl->lock, flags);
193 	/* Someone updated pl->period while we were spinning? */
194 	if (pl->period >= period) {
195 		raw_spin_unlock_irqrestore(&pl->lock, flags);
196 		return;
197 	}
198 	/* Aging zeroed our fraction? */
199 	if (period - pl->period < BITS_PER_LONG) {
200 		s64 val = percpu_counter_read(&pl->events);
201 
202 		if (val < (nr_cpu_ids * PROP_BATCH))
203 			val = percpu_counter_sum(&pl->events);
204 
205 		percpu_counter_add_batch(&pl->events,
206 			-val + (val >> (period-pl->period)), PROP_BATCH);
207 	} else
208 		percpu_counter_set(&pl->events, 0);
209 	pl->period = period;
210 	raw_spin_unlock_irqrestore(&pl->lock, flags);
211 }
212 
213 /* Event of type pl happened */
214 void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
215 		long nr)
216 {
217 	fprop_reflect_period_percpu(p, pl);
218 	percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
219 	percpu_counter_add(&p->events, nr);
220 }
221 
222 void fprop_fraction_percpu(struct fprop_global *p,
223 			   struct fprop_local_percpu *pl,
224 			   unsigned long *numerator, unsigned long *denominator)
225 {
226 	unsigned int seq;
227 	s64 num, den;
228 
229 	do {
230 		seq = read_seqcount_begin(&p->sequence);
231 		fprop_reflect_period_percpu(p, pl);
232 		num = percpu_counter_read_positive(&pl->events);
233 		den = percpu_counter_read_positive(&p->events);
234 	} while (read_seqcount_retry(&p->sequence, seq));
235 
236 	/*
237 	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
238 	 * counter errors
239 	 */
240 	if (den <= num) {
241 		if (num)
242 			den = num;
243 		else
244 			den = 1;
245 	}
246 	*denominator = den;
247 	*numerator = num;
248 }
249 
250 /*
251  * Like __fprop_add_percpu() except that event is counted only if the given
252  * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
253  */
254 void __fprop_add_percpu_max(struct fprop_global *p,
255 		struct fprop_local_percpu *pl, int max_frac, long nr)
256 {
257 	if (unlikely(max_frac < FPROP_FRAC_BASE)) {
258 		unsigned long numerator, denominator;
259 		s64 tmp;
260 
261 		fprop_fraction_percpu(p, pl, &numerator, &denominator);
262 		/* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
263 		tmp = (u64)denominator * max_frac -
264 					((u64)numerator << FPROP_FRAC_SHIFT);
265 		if (tmp < 0) {
266 			/* Maximum fraction already exceeded? */
267 			return;
268 		} else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
269 			/* Add just enough for the fraction to saturate */
270 			nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
271 					FPROP_FRAC_BASE - max_frac);
272 		}
273 	}
274 
275 	__fprop_add_percpu(p, pl, nr);
276 }
277