xref: /openbmc/linux/drivers/cpuidle/governors/menu.c (revision 8730046c)
1 /*
2  * menu.c - the menu idle governor
3  *
4  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5  * Copyright (C) 2009 Intel Corporation
6  * Author:
7  *        Arjan van de Ven <arjan@linux.intel.com>
8  *
9  * This code is licenced under the GPL version 2 as described
10  * in the COPYING file that acompanies the Linux Kernel.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/pm_qos.h>
16 #include <linux/time.h>
17 #include <linux/ktime.h>
18 #include <linux/hrtimer.h>
19 #include <linux/tick.h>
20 #include <linux/sched.h>
21 #include <linux/math64.h>
22 
23 /*
24  * Please note when changing the tuning values:
25  * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
26  * a scaling operation multiplication may overflow on 32 bit platforms.
27  * In that case, #define RESOLUTION as ULL to get 64 bit result:
28  * #define RESOLUTION 1024ULL
29  *
30  * The default values do not overflow.
31  */
32 #define BUCKETS 12
33 #define INTERVAL_SHIFT 3
34 #define INTERVALS (1UL << INTERVAL_SHIFT)
35 #define RESOLUTION 1024
36 #define DECAY 8
37 #define MAX_INTERESTING 50000
38 
39 
40 /*
41  * Concepts and ideas behind the menu governor
42  *
43  * For the menu governor, there are 3 decision factors for picking a C
44  * state:
45  * 1) Energy break even point
46  * 2) Performance impact
47  * 3) Latency tolerance (from pmqos infrastructure)
48  * These these three factors are treated independently.
49  *
50  * Energy break even point
51  * -----------------------
52  * C state entry and exit have an energy cost, and a certain amount of time in
53  * the  C state is required to actually break even on this cost. CPUIDLE
54  * provides us this duration in the "target_residency" field. So all that we
55  * need is a good prediction of how long we'll be idle. Like the traditional
56  * menu governor, we start with the actual known "next timer event" time.
57  *
58  * Since there are other source of wakeups (interrupts for example) than
59  * the next timer event, this estimation is rather optimistic. To get a
60  * more realistic estimate, a correction factor is applied to the estimate,
61  * that is based on historic behavior. For example, if in the past the actual
62  * duration always was 50% of the next timer tick, the correction factor will
63  * be 0.5.
64  *
65  * menu uses a running average for this correction factor, however it uses a
66  * set of factors, not just a single factor. This stems from the realization
67  * that the ratio is dependent on the order of magnitude of the expected
68  * duration; if we expect 500 milliseconds of idle time the likelihood of
69  * getting an interrupt very early is much higher than if we expect 50 micro
70  * seconds of idle time. A second independent factor that has big impact on
71  * the actual factor is if there is (disk) IO outstanding or not.
72  * (as a special twist, we consider every sleep longer than 50 milliseconds
73  * as perfect; there are no power gains for sleeping longer than this)
74  *
75  * For these two reasons we keep an array of 12 independent factors, that gets
76  * indexed based on the magnitude of the expected duration as well as the
77  * "is IO outstanding" property.
78  *
79  * Repeatable-interval-detector
80  * ----------------------------
81  * There are some cases where "next timer" is a completely unusable predictor:
82  * Those cases where the interval is fixed, for example due to hardware
83  * interrupt mitigation, but also due to fixed transfer rate devices such as
84  * mice.
85  * For this, we use a different predictor: We track the duration of the last 8
86  * intervals and if the stand deviation of these 8 intervals is below a
87  * threshold value, we use the average of these intervals as prediction.
88  *
89  * Limiting Performance Impact
90  * ---------------------------
91  * C states, especially those with large exit latencies, can have a real
92  * noticeable impact on workloads, which is not acceptable for most sysadmins,
93  * and in addition, less performance has a power price of its own.
94  *
95  * As a general rule of thumb, menu assumes that the following heuristic
96  * holds:
97  *     The busier the system, the less impact of C states is acceptable
98  *
99  * This rule-of-thumb is implemented using a performance-multiplier:
100  * If the exit latency times the performance multiplier is longer than
101  * the predicted duration, the C state is not considered a candidate
102  * for selection due to a too high performance impact. So the higher
103  * this multiplier is, the longer we need to be idle to pick a deep C
104  * state, and thus the less likely a busy CPU will hit such a deep
105  * C state.
106  *
107  * Two factors are used in determing this multiplier:
108  * a value of 10 is added for each point of "per cpu load average" we have.
109  * a value of 5 points is added for each process that is waiting for
110  * IO on this CPU.
111  * (these values are experimentally determined)
112  *
113  * The load average factor gives a longer term (few seconds) input to the
114  * decision, while the iowait value gives a cpu local instantanious input.
115  * The iowait factor may look low, but realize that this is also already
116  * represented in the system load average.
117  *
118  */
119 
120 struct menu_device {
121 	int		last_state_idx;
122 	int             needs_update;
123 
124 	unsigned int	next_timer_us;
125 	unsigned int	predicted_us;
126 	unsigned int	bucket;
127 	unsigned int	correction_factor[BUCKETS];
128 	unsigned int	intervals[INTERVALS];
129 	int		interval_ptr;
130 };
131 
132 
133 #define LOAD_INT(x) ((x) >> FSHIFT)
134 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
135 
136 static inline int get_loadavg(unsigned long load)
137 {
138 	return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
139 }
140 
141 static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
142 {
143 	int bucket = 0;
144 
145 	/*
146 	 * We keep two groups of stats; one with no
147 	 * IO pending, one without.
148 	 * This allows us to calculate
149 	 * E(duration)|iowait
150 	 */
151 	if (nr_iowaiters)
152 		bucket = BUCKETS/2;
153 
154 	if (duration < 10)
155 		return bucket;
156 	if (duration < 100)
157 		return bucket + 1;
158 	if (duration < 1000)
159 		return bucket + 2;
160 	if (duration < 10000)
161 		return bucket + 3;
162 	if (duration < 100000)
163 		return bucket + 4;
164 	return bucket + 5;
165 }
166 
167 /*
168  * Return a multiplier for the exit latency that is intended
169  * to take performance requirements into account.
170  * The more performance critical we estimate the system
171  * to be, the higher this multiplier, and thus the higher
172  * the barrier to go to an expensive C state.
173  */
174 static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
175 {
176 	int mult = 1;
177 
178 	/* for higher loadavg, we are more reluctant */
179 
180 	mult += 2 * get_loadavg(load);
181 
182 	/* for IO wait tasks (per cpu!) we add 5x each */
183 	mult += 10 * nr_iowaiters;
184 
185 	return mult;
186 }
187 
188 static DEFINE_PER_CPU(struct menu_device, menu_devices);
189 
190 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
191 
192 /*
193  * Try detecting repeating patterns by keeping track of the last 8
194  * intervals, and checking if the standard deviation of that set
195  * of points is below a threshold. If it is... then use the
196  * average of these 8 points as the estimated value.
197  */
198 static unsigned int get_typical_interval(struct menu_device *data)
199 {
200 	int i, divisor;
201 	unsigned int max, thresh, avg;
202 	uint64_t sum, variance;
203 
204 	thresh = UINT_MAX; /* Discard outliers above this value */
205 
206 again:
207 
208 	/* First calculate the average of past intervals */
209 	max = 0;
210 	sum = 0;
211 	divisor = 0;
212 	for (i = 0; i < INTERVALS; i++) {
213 		unsigned int value = data->intervals[i];
214 		if (value <= thresh) {
215 			sum += value;
216 			divisor++;
217 			if (value > max)
218 				max = value;
219 		}
220 	}
221 	if (divisor == INTERVALS)
222 		avg = sum >> INTERVAL_SHIFT;
223 	else
224 		avg = div_u64(sum, divisor);
225 
226 	/* Then try to determine variance */
227 	variance = 0;
228 	for (i = 0; i < INTERVALS; i++) {
229 		unsigned int value = data->intervals[i];
230 		if (value <= thresh) {
231 			int64_t diff = (int64_t)value - avg;
232 			variance += diff * diff;
233 		}
234 	}
235 	if (divisor == INTERVALS)
236 		variance >>= INTERVAL_SHIFT;
237 	else
238 		do_div(variance, divisor);
239 
240 	/*
241 	 * The typical interval is obtained when standard deviation is
242 	 * small (stddev <= 20 us, variance <= 400 us^2) or standard
243 	 * deviation is small compared to the average interval (avg >
244 	 * 6*stddev, avg^2 > 36*variance). The average is smaller than
245 	 * UINT_MAX aka U32_MAX, so computing its square does not
246 	 * overflow a u64. We simply reject this candidate average if
247 	 * the standard deviation is greater than 715 s (which is
248 	 * rather unlikely).
249 	 *
250 	 * Use this result only if there is no timer to wake us up sooner.
251 	 */
252 	if (likely(variance <= U64_MAX/36)) {
253 		if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
254 							|| variance <= 400) {
255 			return avg;
256 		}
257 	}
258 
259 	/*
260 	 * If we have outliers to the upside in our distribution, discard
261 	 * those by setting the threshold to exclude these outliers, then
262 	 * calculate the average and standard deviation again. Once we get
263 	 * down to the bottom 3/4 of our samples, stop excluding samples.
264 	 *
265 	 * This can deal with workloads that have long pauses interspersed
266 	 * with sporadic activity with a bunch of short pauses.
267 	 */
268 	if ((divisor * 4) <= INTERVALS * 3)
269 		return UINT_MAX;
270 
271 	thresh = max - 1;
272 	goto again;
273 }
274 
275 /**
276  * menu_select - selects the next idle state to enter
277  * @drv: cpuidle driver containing state data
278  * @dev: the CPU
279  */
280 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
281 {
282 	struct menu_device *data = this_cpu_ptr(&menu_devices);
283 	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
284 	int i;
285 	unsigned int interactivity_req;
286 	unsigned int expected_interval;
287 	unsigned long nr_iowaiters, cpu_load;
288 
289 	if (data->needs_update) {
290 		menu_update(drv, dev);
291 		data->needs_update = 0;
292 	}
293 
294 	/* Special case when user has set very strict latency requirement */
295 	if (unlikely(latency_req == 0))
296 		return 0;
297 
298 	/* determine the expected residency time, round up */
299 	data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length());
300 
301 	get_iowait_load(&nr_iowaiters, &cpu_load);
302 	data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
303 
304 	/*
305 	 * Force the result of multiplication to be 64 bits even if both
306 	 * operands are 32 bits.
307 	 * Make sure to round up for half microseconds.
308 	 */
309 	data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
310 					 data->correction_factor[data->bucket],
311 					 RESOLUTION * DECAY);
312 
313 	expected_interval = get_typical_interval(data);
314 	expected_interval = min(expected_interval, data->next_timer_us);
315 
316 	if (CPUIDLE_DRIVER_STATE_START > 0) {
317 		struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START];
318 		unsigned int polling_threshold;
319 
320 		/*
321 		 * We want to default to C1 (hlt), not to busy polling
322 		 * unless the timer is happening really really soon, or
323 		 * C1's exit latency exceeds the user configured limit.
324 		 */
325 		polling_threshold = max_t(unsigned int, 20, s->target_residency);
326 		if (data->next_timer_us > polling_threshold &&
327 		    latency_req > s->exit_latency && !s->disabled &&
328 		    !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
329 			data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
330 		else
331 			data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
332 	} else {
333 		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
334 	}
335 
336 	/*
337 	 * Use the lowest expected idle interval to pick the idle state.
338 	 */
339 	data->predicted_us = min(data->predicted_us, expected_interval);
340 
341 	/*
342 	 * Use the performance multiplier and the user-configurable
343 	 * latency_req to determine the maximum exit latency.
344 	 */
345 	interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
346 	if (latency_req > interactivity_req)
347 		latency_req = interactivity_req;
348 
349 	/*
350 	 * Find the idle state with the lowest power while satisfying
351 	 * our constraints.
352 	 */
353 	for (i = data->last_state_idx + 1; i < drv->state_count; i++) {
354 		struct cpuidle_state *s = &drv->states[i];
355 		struct cpuidle_state_usage *su = &dev->states_usage[i];
356 
357 		if (s->disabled || su->disable)
358 			continue;
359 		if (s->target_residency > data->predicted_us)
360 			continue;
361 		if (s->exit_latency > latency_req)
362 			continue;
363 
364 		data->last_state_idx = i;
365 	}
366 
367 	return data->last_state_idx;
368 }
369 
370 /**
371  * menu_reflect - records that data structures need update
372  * @dev: the CPU
373  * @index: the index of actual entered state
374  *
375  * NOTE: it's important to be fast here because this operation will add to
376  *       the overall exit latency.
377  */
378 static void menu_reflect(struct cpuidle_device *dev, int index)
379 {
380 	struct menu_device *data = this_cpu_ptr(&menu_devices);
381 
382 	data->last_state_idx = index;
383 	data->needs_update = 1;
384 }
385 
386 /**
387  * menu_update - attempts to guess what happened after entry
388  * @drv: cpuidle driver containing state data
389  * @dev: the CPU
390  */
391 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
392 {
393 	struct menu_device *data = this_cpu_ptr(&menu_devices);
394 	int last_idx = data->last_state_idx;
395 	struct cpuidle_state *target = &drv->states[last_idx];
396 	unsigned int measured_us;
397 	unsigned int new_factor;
398 
399 	/*
400 	 * Try to figure out how much time passed between entry to low
401 	 * power state and occurrence of the wakeup event.
402 	 *
403 	 * If the entered idle state didn't support residency measurements,
404 	 * we use them anyway if they are short, and if long,
405 	 * truncate to the whole expected time.
406 	 *
407 	 * Any measured amount of time will include the exit latency.
408 	 * Since we are interested in when the wakeup begun, not when it
409 	 * was completed, we must subtract the exit latency. However, if
410 	 * the measured amount of time is less than the exit latency,
411 	 * assume the state was never reached and the exit latency is 0.
412 	 */
413 
414 	/* measured value */
415 	measured_us = cpuidle_get_last_residency(dev);
416 
417 	/* Deduct exit latency */
418 	if (measured_us > 2 * target->exit_latency)
419 		measured_us -= target->exit_latency;
420 	else
421 		measured_us /= 2;
422 
423 	/* Make sure our coefficients do not exceed unity */
424 	if (measured_us > data->next_timer_us)
425 		measured_us = data->next_timer_us;
426 
427 	/* Update our correction ratio */
428 	new_factor = data->correction_factor[data->bucket];
429 	new_factor -= new_factor / DECAY;
430 
431 	if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
432 		new_factor += RESOLUTION * measured_us / data->next_timer_us;
433 	else
434 		/*
435 		 * we were idle so long that we count it as a perfect
436 		 * prediction
437 		 */
438 		new_factor += RESOLUTION;
439 
440 	/*
441 	 * We don't want 0 as factor; we always want at least
442 	 * a tiny bit of estimated time. Fortunately, due to rounding,
443 	 * new_factor will stay nonzero regardless of measured_us values
444 	 * and the compiler can eliminate this test as long as DECAY > 1.
445 	 */
446 	if (DECAY == 1 && unlikely(new_factor == 0))
447 		new_factor = 1;
448 
449 	data->correction_factor[data->bucket] = new_factor;
450 
451 	/* update the repeating-pattern data */
452 	data->intervals[data->interval_ptr++] = measured_us;
453 	if (data->interval_ptr >= INTERVALS)
454 		data->interval_ptr = 0;
455 }
456 
457 /**
458  * menu_enable_device - scans a CPU's states and does setup
459  * @drv: cpuidle driver
460  * @dev: the CPU
461  */
462 static int menu_enable_device(struct cpuidle_driver *drv,
463 				struct cpuidle_device *dev)
464 {
465 	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
466 	int i;
467 
468 	memset(data, 0, sizeof(struct menu_device));
469 
470 	/*
471 	 * if the correction factor is 0 (eg first time init or cpu hotplug
472 	 * etc), we actually want to start out with a unity factor.
473 	 */
474 	for(i = 0; i < BUCKETS; i++)
475 		data->correction_factor[i] = RESOLUTION * DECAY;
476 
477 	return 0;
478 }
479 
480 static struct cpuidle_governor menu_governor = {
481 	.name =		"menu",
482 	.rating =	20,
483 	.enable =	menu_enable_device,
484 	.select =	menu_select,
485 	.reflect =	menu_reflect,
486 };
487 
488 /**
489  * init_menu - initializes the governor
490  */
491 static int __init init_menu(void)
492 {
493 	return cpuidle_register_governor(&menu_governor);
494 }
495 
496 postcore_initcall(init_menu);
497