xref: /openbmc/linux/drivers/cpuidle/governors/menu.c (revision cfdfc14e)
1 /*
2  * menu.c - the menu idle governor
3  *
4  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5  * Copyright (C) 2009 Intel Corporation
6  * Author:
7  *        Arjan van de Ven <arjan@linux.intel.com>
8  *
9  * This code is licenced under the GPL version 2 as described
10  * in the COPYING file that acompanies the Linux Kernel.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/time.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/sched.h>
20 #include <linux/sched/loadavg.h>
21 #include <linux/sched/stat.h>
22 #include <linux/math64.h>
23 
24 /*
25  * Please note when changing the tuning values:
26  * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
27  * a scaling operation multiplication may overflow on 32 bit platforms.
28  * In that case, #define RESOLUTION as ULL to get 64 bit result:
29  * #define RESOLUTION 1024ULL
30  *
31  * The default values do not overflow.
32  */
33 #define BUCKETS 12
34 #define INTERVAL_SHIFT 3
35 #define INTERVALS (1UL << INTERVAL_SHIFT)
36 #define RESOLUTION 1024
37 #define DECAY 8
38 #define MAX_INTERESTING 50000
39 
40 
41 /*
42  * Concepts and ideas behind the menu governor
43  *
44  * For the menu governor, there are 3 decision factors for picking a C
45  * state:
46  * 1) Energy break even point
47  * 2) Performance impact
48  * 3) Latency tolerance (from pmqos infrastructure)
49  * These these three factors are treated independently.
50  *
51  * Energy break even point
52  * -----------------------
53  * C state entry and exit have an energy cost, and a certain amount of time in
54  * the  C state is required to actually break even on this cost. CPUIDLE
55  * provides us this duration in the "target_residency" field. So all that we
56  * need is a good prediction of how long we'll be idle. Like the traditional
57  * menu governor, we start with the actual known "next timer event" time.
58  *
59  * Since there are other source of wakeups (interrupts for example) than
60  * the next timer event, this estimation is rather optimistic. To get a
61  * more realistic estimate, a correction factor is applied to the estimate,
62  * that is based on historic behavior. For example, if in the past the actual
63  * duration always was 50% of the next timer tick, the correction factor will
64  * be 0.5.
65  *
66  * menu uses a running average for this correction factor, however it uses a
67  * set of factors, not just a single factor. This stems from the realization
68  * that the ratio is dependent on the order of magnitude of the expected
69  * duration; if we expect 500 milliseconds of idle time the likelihood of
70  * getting an interrupt very early is much higher than if we expect 50 micro
71  * seconds of idle time. A second independent factor that has big impact on
72  * the actual factor is if there is (disk) IO outstanding or not.
73  * (as a special twist, we consider every sleep longer than 50 milliseconds
74  * as perfect; there are no power gains for sleeping longer than this)
75  *
76  * For these two reasons we keep an array of 12 independent factors, that gets
77  * indexed based on the magnitude of the expected duration as well as the
78  * "is IO outstanding" property.
79  *
80  * Repeatable-interval-detector
81  * ----------------------------
82  * There are some cases where "next timer" is a completely unusable predictor:
83  * Those cases where the interval is fixed, for example due to hardware
84  * interrupt mitigation, but also due to fixed transfer rate devices such as
85  * mice.
86  * For this, we use a different predictor: We track the duration of the last 8
87  * intervals and if the stand deviation of these 8 intervals is below a
88  * threshold value, we use the average of these intervals as prediction.
89  *
90  * Limiting Performance Impact
91  * ---------------------------
92  * C states, especially those with large exit latencies, can have a real
93  * noticeable impact on workloads, which is not acceptable for most sysadmins,
94  * and in addition, less performance has a power price of its own.
95  *
96  * As a general rule of thumb, menu assumes that the following heuristic
97  * holds:
98  *     The busier the system, the less impact of C states is acceptable
99  *
100  * This rule-of-thumb is implemented using a performance-multiplier:
101  * If the exit latency times the performance multiplier is longer than
102  * the predicted duration, the C state is not considered a candidate
103  * for selection due to a too high performance impact. So the higher
104  * this multiplier is, the longer we need to be idle to pick a deep C
105  * state, and thus the less likely a busy CPU will hit such a deep
106  * C state.
107  *
108  * Two factors are used in determing this multiplier:
109  * a value of 10 is added for each point of "per cpu load average" we have.
110  * a value of 5 points is added for each process that is waiting for
111  * IO on this CPU.
112  * (these values are experimentally determined)
113  *
114  * The load average factor gives a longer term (few seconds) input to the
115  * decision, while the iowait value gives a cpu local instantanious input.
116  * The iowait factor may look low, but realize that this is also already
117  * represented in the system load average.
118  *
119  */
120 
121 struct menu_device {
122 	int		last_state_idx;
123 	int             needs_update;
124 	int             tick_wakeup;
125 
126 	unsigned int	next_timer_us;
127 	unsigned int	predicted_us;
128 	unsigned int	bucket;
129 	unsigned int	correction_factor[BUCKETS];
130 	unsigned int	intervals[INTERVALS];
131 	int		interval_ptr;
132 };
133 
134 
135 #define LOAD_INT(x) ((x) >> FSHIFT)
136 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
137 
138 static inline int get_loadavg(unsigned long load)
139 {
140 	return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
141 }
142 
143 static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
144 {
145 	int bucket = 0;
146 
147 	/*
148 	 * We keep two groups of stats; one with no
149 	 * IO pending, one without.
150 	 * This allows us to calculate
151 	 * E(duration)|iowait
152 	 */
153 	if (nr_iowaiters)
154 		bucket = BUCKETS/2;
155 
156 	if (duration < 10)
157 		return bucket;
158 	if (duration < 100)
159 		return bucket + 1;
160 	if (duration < 1000)
161 		return bucket + 2;
162 	if (duration < 10000)
163 		return bucket + 3;
164 	if (duration < 100000)
165 		return bucket + 4;
166 	return bucket + 5;
167 }
168 
169 /*
170  * Return a multiplier for the exit latency that is intended
171  * to take performance requirements into account.
172  * The more performance critical we estimate the system
173  * to be, the higher this multiplier, and thus the higher
174  * the barrier to go to an expensive C state.
175  */
176 static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
177 {
178 	int mult = 1;
179 
180 	/* for higher loadavg, we are more reluctant */
181 
182 	mult += 2 * get_loadavg(load);
183 
184 	/* for IO wait tasks (per cpu!) we add 5x each */
185 	mult += 10 * nr_iowaiters;
186 
187 	return mult;
188 }
189 
190 static DEFINE_PER_CPU(struct menu_device, menu_devices);
191 
192 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
193 
194 /*
195  * Try detecting repeating patterns by keeping track of the last 8
196  * intervals, and checking if the standard deviation of that set
197  * of points is below a threshold. If it is... then use the
198  * average of these 8 points as the estimated value.
199  */
200 static unsigned int get_typical_interval(struct menu_device *data)
201 {
202 	int i, divisor;
203 	unsigned int max, thresh, avg;
204 	uint64_t sum, variance;
205 
206 	thresh = UINT_MAX; /* Discard outliers above this value */
207 
208 again:
209 
210 	/* First calculate the average of past intervals */
211 	max = 0;
212 	sum = 0;
213 	divisor = 0;
214 	for (i = 0; i < INTERVALS; i++) {
215 		unsigned int value = data->intervals[i];
216 		if (value <= thresh) {
217 			sum += value;
218 			divisor++;
219 			if (value > max)
220 				max = value;
221 		}
222 	}
223 	if (divisor == INTERVALS)
224 		avg = sum >> INTERVAL_SHIFT;
225 	else
226 		avg = div_u64(sum, divisor);
227 
228 	/* Then try to determine variance */
229 	variance = 0;
230 	for (i = 0; i < INTERVALS; i++) {
231 		unsigned int value = data->intervals[i];
232 		if (value <= thresh) {
233 			int64_t diff = (int64_t)value - avg;
234 			variance += diff * diff;
235 		}
236 	}
237 	if (divisor == INTERVALS)
238 		variance >>= INTERVAL_SHIFT;
239 	else
240 		do_div(variance, divisor);
241 
242 	/*
243 	 * The typical interval is obtained when standard deviation is
244 	 * small (stddev <= 20 us, variance <= 400 us^2) or standard
245 	 * deviation is small compared to the average interval (avg >
246 	 * 6*stddev, avg^2 > 36*variance). The average is smaller than
247 	 * UINT_MAX aka U32_MAX, so computing its square does not
248 	 * overflow a u64. We simply reject this candidate average if
249 	 * the standard deviation is greater than 715 s (which is
250 	 * rather unlikely).
251 	 *
252 	 * Use this result only if there is no timer to wake us up sooner.
253 	 */
254 	if (likely(variance <= U64_MAX/36)) {
255 		if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
256 							|| variance <= 400) {
257 			return avg;
258 		}
259 	}
260 
261 	/*
262 	 * If we have outliers to the upside in our distribution, discard
263 	 * those by setting the threshold to exclude these outliers, then
264 	 * calculate the average and standard deviation again. Once we get
265 	 * down to the bottom 3/4 of our samples, stop excluding samples.
266 	 *
267 	 * This can deal with workloads that have long pauses interspersed
268 	 * with sporadic activity with a bunch of short pauses.
269 	 */
270 	if ((divisor * 4) <= INTERVALS * 3)
271 		return UINT_MAX;
272 
273 	thresh = max - 1;
274 	goto again;
275 }
276 
277 /**
278  * menu_select - selects the next idle state to enter
279  * @drv: cpuidle driver containing state data
280  * @dev: the CPU
281  * @stop_tick: indication on whether or not to stop the tick
282  */
283 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
284 		       bool *stop_tick)
285 {
286 	struct menu_device *data = this_cpu_ptr(&menu_devices);
287 	int latency_req = cpuidle_governor_latency_req(dev->cpu);
288 	int i;
289 	int first_idx;
290 	int idx;
291 	unsigned int interactivity_req;
292 	unsigned int expected_interval;
293 	unsigned long nr_iowaiters, cpu_load;
294 	ktime_t delta_next;
295 
296 	if (data->needs_update) {
297 		menu_update(drv, dev);
298 		data->needs_update = 0;
299 	}
300 
301 	/* Special case when user has set very strict latency requirement */
302 	if (unlikely(latency_req == 0)) {
303 		*stop_tick = false;
304 		return 0;
305 	}
306 
307 	/* determine the expected residency time, round up */
308 	data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
309 
310 	get_iowait_load(&nr_iowaiters, &cpu_load);
311 	data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
312 
313 	/*
314 	 * Force the result of multiplication to be 64 bits even if both
315 	 * operands are 32 bits.
316 	 * Make sure to round up for half microseconds.
317 	 */
318 	data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
319 					 data->correction_factor[data->bucket],
320 					 RESOLUTION * DECAY);
321 
322 	expected_interval = get_typical_interval(data);
323 	expected_interval = min(expected_interval, data->next_timer_us);
324 
325 	first_idx = 0;
326 	if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
327 		struct cpuidle_state *s = &drv->states[1];
328 		unsigned int polling_threshold;
329 
330 		/*
331 		 * We want to default to C1 (hlt), not to busy polling
332 		 * unless the timer is happening really really soon, or
333 		 * C1's exit latency exceeds the user configured limit.
334 		 */
335 		polling_threshold = max_t(unsigned int, 20, s->target_residency);
336 		if (data->next_timer_us > polling_threshold &&
337 		    latency_req > s->exit_latency && !s->disabled &&
338 		    !dev->states_usage[1].disable)
339 			first_idx = 1;
340 	}
341 
342 	/*
343 	 * Use the lowest expected idle interval to pick the idle state.
344 	 */
345 	data->predicted_us = min(data->predicted_us, expected_interval);
346 
347 	if (tick_nohz_tick_stopped()) {
348 		/*
349 		 * If the tick is already stopped, the cost of possible short
350 		 * idle duration misprediction is much higher, because the CPU
351 		 * may be stuck in a shallow idle state for a long time as a
352 		 * result of it.  In that case say we might mispredict and try
353 		 * to force the CPU into a state for which we would have stopped
354 		 * the tick, unless a timer is going to expire really soon
355 		 * anyway.
356 		 */
357 		if (data->predicted_us < TICK_USEC)
358 			data->predicted_us = min_t(unsigned int, TICK_USEC,
359 						   ktime_to_us(delta_next));
360 	} else {
361 		/*
362 		 * Use the performance multiplier and the user-configurable
363 		 * latency_req to determine the maximum exit latency.
364 		 */
365 		interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
366 		if (latency_req > interactivity_req)
367 			latency_req = interactivity_req;
368 	}
369 
370 	expected_interval = data->predicted_us;
371 	/*
372 	 * Find the idle state with the lowest power while satisfying
373 	 * our constraints.
374 	 */
375 	idx = -1;
376 	for (i = first_idx; i < drv->state_count; i++) {
377 		struct cpuidle_state *s = &drv->states[i];
378 		struct cpuidle_state_usage *su = &dev->states_usage[i];
379 
380 		if (s->disabled || su->disable)
381 			continue;
382 		if (idx == -1)
383 			idx = i; /* first enabled state */
384 		if (s->target_residency > data->predicted_us)
385 			break;
386 		if (s->exit_latency > latency_req) {
387 			/*
388 			 * If we break out of the loop for latency reasons, use
389 			 * the target residency of the selected state as the
390 			 * expected idle duration so that the tick is retained
391 			 * as long as that target residency is low enough.
392 			 */
393 			expected_interval = drv->states[idx].target_residency;
394 			break;
395 		}
396 		idx = i;
397 	}
398 
399 	if (idx == -1)
400 		idx = 0; /* No states enabled. Must use 0. */
401 
402 	/*
403 	 * Don't stop the tick if the selected state is a polling one or if the
404 	 * expected idle duration is shorter than the tick period length.
405 	 */
406 	if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
407 	    expected_interval < TICK_USEC) {
408 		unsigned int delta_next_us = ktime_to_us(delta_next);
409 
410 		*stop_tick = false;
411 
412 		if (!tick_nohz_tick_stopped() && idx > 0 &&
413 		    drv->states[idx].target_residency > delta_next_us) {
414 			/*
415 			 * The tick is not going to be stopped and the target
416 			 * residency of the state to be returned is not within
417 			 * the time until the next timer event including the
418 			 * tick, so try to correct that.
419 			 */
420 			for (i = idx - 1; i >= 0; i--) {
421 			    if (drv->states[i].disabled ||
422 			        dev->states_usage[i].disable)
423 					continue;
424 
425 				idx = i;
426 				if (drv->states[i].target_residency <= delta_next_us)
427 					break;
428 			}
429 		}
430 	}
431 
432 	data->last_state_idx = idx;
433 
434 	return data->last_state_idx;
435 }
436 
437 /**
438  * menu_reflect - records that data structures need update
439  * @dev: the CPU
440  * @index: the index of actual entered state
441  *
442  * NOTE: it's important to be fast here because this operation will add to
443  *       the overall exit latency.
444  */
445 static void menu_reflect(struct cpuidle_device *dev, int index)
446 {
447 	struct menu_device *data = this_cpu_ptr(&menu_devices);
448 
449 	data->last_state_idx = index;
450 	data->needs_update = 1;
451 	data->tick_wakeup = tick_nohz_idle_got_tick();
452 }
453 
454 /**
455  * menu_update - attempts to guess what happened after entry
456  * @drv: cpuidle driver containing state data
457  * @dev: the CPU
458  */
459 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
460 {
461 	struct menu_device *data = this_cpu_ptr(&menu_devices);
462 	int last_idx = data->last_state_idx;
463 	struct cpuidle_state *target = &drv->states[last_idx];
464 	unsigned int measured_us;
465 	unsigned int new_factor;
466 
467 	/*
468 	 * Try to figure out how much time passed between entry to low
469 	 * power state and occurrence of the wakeup event.
470 	 *
471 	 * If the entered idle state didn't support residency measurements,
472 	 * we use them anyway if they are short, and if long,
473 	 * truncate to the whole expected time.
474 	 *
475 	 * Any measured amount of time will include the exit latency.
476 	 * Since we are interested in when the wakeup begun, not when it
477 	 * was completed, we must subtract the exit latency. However, if
478 	 * the measured amount of time is less than the exit latency,
479 	 * assume the state was never reached and the exit latency is 0.
480 	 */
481 
482 	if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
483 		/*
484 		 * The nohz code said that there wouldn't be any events within
485 		 * the tick boundary (if the tick was stopped), but the idle
486 		 * duration predictor had a differing opinion.  Since the CPU
487 		 * was woken up by a tick (that wasn't stopped after all), the
488 		 * predictor was not quite right, so assume that the CPU could
489 		 * have been idle long (but not forever) to help the idle
490 		 * duration predictor do a better job next time.
491 		 */
492 		measured_us = 9 * MAX_INTERESTING / 10;
493 	} else {
494 		/* measured value */
495 		measured_us = cpuidle_get_last_residency(dev);
496 
497 		/* Deduct exit latency */
498 		if (measured_us > 2 * target->exit_latency)
499 			measured_us -= target->exit_latency;
500 		else
501 			measured_us /= 2;
502 	}
503 
504 	/* Make sure our coefficients do not exceed unity */
505 	if (measured_us > data->next_timer_us)
506 		measured_us = data->next_timer_us;
507 
508 	/* Update our correction ratio */
509 	new_factor = data->correction_factor[data->bucket];
510 	new_factor -= new_factor / DECAY;
511 
512 	if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
513 		new_factor += RESOLUTION * measured_us / data->next_timer_us;
514 	else
515 		/*
516 		 * we were idle so long that we count it as a perfect
517 		 * prediction
518 		 */
519 		new_factor += RESOLUTION;
520 
521 	/*
522 	 * We don't want 0 as factor; we always want at least
523 	 * a tiny bit of estimated time. Fortunately, due to rounding,
524 	 * new_factor will stay nonzero regardless of measured_us values
525 	 * and the compiler can eliminate this test as long as DECAY > 1.
526 	 */
527 	if (DECAY == 1 && unlikely(new_factor == 0))
528 		new_factor = 1;
529 
530 	data->correction_factor[data->bucket] = new_factor;
531 
532 	/* update the repeating-pattern data */
533 	data->intervals[data->interval_ptr++] = measured_us;
534 	if (data->interval_ptr >= INTERVALS)
535 		data->interval_ptr = 0;
536 }
537 
538 /**
539  * menu_enable_device - scans a CPU's states and does setup
540  * @drv: cpuidle driver
541  * @dev: the CPU
542  */
543 static int menu_enable_device(struct cpuidle_driver *drv,
544 				struct cpuidle_device *dev)
545 {
546 	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
547 	int i;
548 
549 	memset(data, 0, sizeof(struct menu_device));
550 
551 	/*
552 	 * if the correction factor is 0 (eg first time init or cpu hotplug
553 	 * etc), we actually want to start out with a unity factor.
554 	 */
555 	for(i = 0; i < BUCKETS; i++)
556 		data->correction_factor[i] = RESOLUTION * DECAY;
557 
558 	return 0;
559 }
560 
561 static struct cpuidle_governor menu_governor = {
562 	.name =		"menu",
563 	.rating =	20,
564 	.enable =	menu_enable_device,
565 	.select =	menu_select,
566 	.reflect =	menu_reflect,
567 };
568 
569 /**
570  * init_menu - initializes the governor
571  */
572 static int __init init_menu(void)
573 {
574 	return cpuidle_register_governor(&menu_governor);
575 }
576 
577 postcore_initcall(init_menu);
578