xref: /openbmc/linux/drivers/cpuidle/governors/menu.c (revision 2d972b6a)
1 /*
2  * menu.c - the menu idle governor
3  *
4  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5  * Copyright (C) 2009 Intel Corporation
6  * Author:
7  *        Arjan van de Ven <arjan@linux.intel.com>
8  *
9  * This code is licenced under the GPL version 2 as described
10  * in the COPYING file that acompanies the Linux Kernel.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/pm_qos.h>
16 #include <linux/time.h>
17 #include <linux/ktime.h>
18 #include <linux/hrtimer.h>
19 #include <linux/tick.h>
20 #include <linux/sched.h>
21 #include <linux/sched/loadavg.h>
22 #include <linux/sched/stat.h>
23 #include <linux/math64.h>
24 #include <linux/cpu.h>
25 
26 /*
27  * Please note when changing the tuning values:
28  * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
29  * a scaling operation multiplication may overflow on 32 bit platforms.
30  * In that case, #define RESOLUTION as ULL to get 64 bit result:
31  * #define RESOLUTION 1024ULL
32  *
33  * The default values do not overflow.
34  */
35 #define BUCKETS 12
36 #define INTERVAL_SHIFT 3
37 #define INTERVALS (1UL << INTERVAL_SHIFT)
38 #define RESOLUTION 1024
39 #define DECAY 8
40 #define MAX_INTERESTING 50000
41 
42 
43 /*
44  * Concepts and ideas behind the menu governor
45  *
46  * For the menu governor, there are 3 decision factors for picking a C
47  * state:
48  * 1) Energy break even point
49  * 2) Performance impact
50  * 3) Latency tolerance (from pmqos infrastructure)
51  * These these three factors are treated independently.
52  *
53  * Energy break even point
54  * -----------------------
55  * C state entry and exit have an energy cost, and a certain amount of time in
56  * the  C state is required to actually break even on this cost. CPUIDLE
57  * provides us this duration in the "target_residency" field. So all that we
58  * need is a good prediction of how long we'll be idle. Like the traditional
59  * menu governor, we start with the actual known "next timer event" time.
60  *
61  * Since there are other source of wakeups (interrupts for example) than
62  * the next timer event, this estimation is rather optimistic. To get a
63  * more realistic estimate, a correction factor is applied to the estimate,
64  * that is based on historic behavior. For example, if in the past the actual
65  * duration always was 50% of the next timer tick, the correction factor will
66  * be 0.5.
67  *
68  * menu uses a running average for this correction factor, however it uses a
69  * set of factors, not just a single factor. This stems from the realization
70  * that the ratio is dependent on the order of magnitude of the expected
71  * duration; if we expect 500 milliseconds of idle time the likelihood of
72  * getting an interrupt very early is much higher than if we expect 50 micro
73  * seconds of idle time. A second independent factor that has big impact on
74  * the actual factor is if there is (disk) IO outstanding or not.
75  * (as a special twist, we consider every sleep longer than 50 milliseconds
76  * as perfect; there are no power gains for sleeping longer than this)
77  *
78  * For these two reasons we keep an array of 12 independent factors, that gets
79  * indexed based on the magnitude of the expected duration as well as the
80  * "is IO outstanding" property.
81  *
82  * Repeatable-interval-detector
83  * ----------------------------
84  * There are some cases where "next timer" is a completely unusable predictor:
85  * Those cases where the interval is fixed, for example due to hardware
86  * interrupt mitigation, but also due to fixed transfer rate devices such as
87  * mice.
88  * For this, we use a different predictor: We track the duration of the last 8
89  * intervals and if the stand deviation of these 8 intervals is below a
90  * threshold value, we use the average of these intervals as prediction.
91  *
92  * Limiting Performance Impact
93  * ---------------------------
94  * C states, especially those with large exit latencies, can have a real
95  * noticeable impact on workloads, which is not acceptable for most sysadmins,
96  * and in addition, less performance has a power price of its own.
97  *
98  * As a general rule of thumb, menu assumes that the following heuristic
99  * holds:
100  *     The busier the system, the less impact of C states is acceptable
101  *
102  * This rule-of-thumb is implemented using a performance-multiplier:
103  * If the exit latency times the performance multiplier is longer than
104  * the predicted duration, the C state is not considered a candidate
105  * for selection due to a too high performance impact. So the higher
106  * this multiplier is, the longer we need to be idle to pick a deep C
107  * state, and thus the less likely a busy CPU will hit such a deep
108  * C state.
109  *
110  * Two factors are used in determing this multiplier:
111  * a value of 10 is added for each point of "per cpu load average" we have.
112  * a value of 5 points is added for each process that is waiting for
113  * IO on this CPU.
114  * (these values are experimentally determined)
115  *
116  * The load average factor gives a longer term (few seconds) input to the
117  * decision, while the iowait value gives a cpu local instantanious input.
118  * The iowait factor may look low, but realize that this is also already
119  * represented in the system load average.
120  *
121  */
122 
123 struct menu_device {
124 	int		last_state_idx;
125 	int             needs_update;
126 	int             tick_wakeup;
127 
128 	unsigned int	next_timer_us;
129 	unsigned int	predicted_us;
130 	unsigned int	bucket;
131 	unsigned int	correction_factor[BUCKETS];
132 	unsigned int	intervals[INTERVALS];
133 	int		interval_ptr;
134 };
135 
136 
137 #define LOAD_INT(x) ((x) >> FSHIFT)
138 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
139 
140 static inline int get_loadavg(unsigned long load)
141 {
142 	return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
143 }
144 
145 static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
146 {
147 	int bucket = 0;
148 
149 	/*
150 	 * We keep two groups of stats; one with no
151 	 * IO pending, one without.
152 	 * This allows us to calculate
153 	 * E(duration)|iowait
154 	 */
155 	if (nr_iowaiters)
156 		bucket = BUCKETS/2;
157 
158 	if (duration < 10)
159 		return bucket;
160 	if (duration < 100)
161 		return bucket + 1;
162 	if (duration < 1000)
163 		return bucket + 2;
164 	if (duration < 10000)
165 		return bucket + 3;
166 	if (duration < 100000)
167 		return bucket + 4;
168 	return bucket + 5;
169 }
170 
171 /*
172  * Return a multiplier for the exit latency that is intended
173  * to take performance requirements into account.
174  * The more performance critical we estimate the system
175  * to be, the higher this multiplier, and thus the higher
176  * the barrier to go to an expensive C state.
177  */
178 static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
179 {
180 	int mult = 1;
181 
182 	/* for higher loadavg, we are more reluctant */
183 
184 	mult += 2 * get_loadavg(load);
185 
186 	/* for IO wait tasks (per cpu!) we add 5x each */
187 	mult += 10 * nr_iowaiters;
188 
189 	return mult;
190 }
191 
192 static DEFINE_PER_CPU(struct menu_device, menu_devices);
193 
194 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
195 
196 /*
197  * Try detecting repeating patterns by keeping track of the last 8
198  * intervals, and checking if the standard deviation of that set
199  * of points is below a threshold. If it is... then use the
200  * average of these 8 points as the estimated value.
201  */
202 static unsigned int get_typical_interval(struct menu_device *data)
203 {
204 	int i, divisor;
205 	unsigned int max, thresh, avg;
206 	uint64_t sum, variance;
207 
208 	thresh = UINT_MAX; /* Discard outliers above this value */
209 
210 again:
211 
212 	/* First calculate the average of past intervals */
213 	max = 0;
214 	sum = 0;
215 	divisor = 0;
216 	for (i = 0; i < INTERVALS; i++) {
217 		unsigned int value = data->intervals[i];
218 		if (value <= thresh) {
219 			sum += value;
220 			divisor++;
221 			if (value > max)
222 				max = value;
223 		}
224 	}
225 	if (divisor == INTERVALS)
226 		avg = sum >> INTERVAL_SHIFT;
227 	else
228 		avg = div_u64(sum, divisor);
229 
230 	/* Then try to determine variance */
231 	variance = 0;
232 	for (i = 0; i < INTERVALS; i++) {
233 		unsigned int value = data->intervals[i];
234 		if (value <= thresh) {
235 			int64_t diff = (int64_t)value - avg;
236 			variance += diff * diff;
237 		}
238 	}
239 	if (divisor == INTERVALS)
240 		variance >>= INTERVAL_SHIFT;
241 	else
242 		do_div(variance, divisor);
243 
244 	/*
245 	 * The typical interval is obtained when standard deviation is
246 	 * small (stddev <= 20 us, variance <= 400 us^2) or standard
247 	 * deviation is small compared to the average interval (avg >
248 	 * 6*stddev, avg^2 > 36*variance). The average is smaller than
249 	 * UINT_MAX aka U32_MAX, so computing its square does not
250 	 * overflow a u64. We simply reject this candidate average if
251 	 * the standard deviation is greater than 715 s (which is
252 	 * rather unlikely).
253 	 *
254 	 * Use this result only if there is no timer to wake us up sooner.
255 	 */
256 	if (likely(variance <= U64_MAX/36)) {
257 		if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
258 							|| variance <= 400) {
259 			return avg;
260 		}
261 	}
262 
263 	/*
264 	 * If we have outliers to the upside in our distribution, discard
265 	 * those by setting the threshold to exclude these outliers, then
266 	 * calculate the average and standard deviation again. Once we get
267 	 * down to the bottom 3/4 of our samples, stop excluding samples.
268 	 *
269 	 * This can deal with workloads that have long pauses interspersed
270 	 * with sporadic activity with a bunch of short pauses.
271 	 */
272 	if ((divisor * 4) <= INTERVALS * 3)
273 		return UINT_MAX;
274 
275 	thresh = max - 1;
276 	goto again;
277 }
278 
279 /**
280  * menu_select - selects the next idle state to enter
281  * @drv: cpuidle driver containing state data
282  * @dev: the CPU
283  * @stop_tick: indication on whether or not to stop the tick
284  */
285 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
286 		       bool *stop_tick)
287 {
288 	struct menu_device *data = this_cpu_ptr(&menu_devices);
289 	struct device *device = get_cpu_device(dev->cpu);
290 	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
291 	int i;
292 	int first_idx;
293 	int idx;
294 	unsigned int interactivity_req;
295 	unsigned int expected_interval;
296 	unsigned long nr_iowaiters, cpu_load;
297 	int resume_latency = dev_pm_qos_raw_read_value(device);
298 	ktime_t delta_next;
299 
300 	if (data->needs_update) {
301 		menu_update(drv, dev);
302 		data->needs_update = 0;
303 	}
304 
305 	if (resume_latency < latency_req &&
306 	    resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
307 		latency_req = resume_latency;
308 
309 	/* Special case when user has set very strict latency requirement */
310 	if (unlikely(latency_req == 0)) {
311 		*stop_tick = false;
312 		return 0;
313 	}
314 
315 	/* determine the expected residency time, round up */
316 	data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
317 
318 	get_iowait_load(&nr_iowaiters, &cpu_load);
319 	data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
320 
321 	/*
322 	 * Force the result of multiplication to be 64 bits even if both
323 	 * operands are 32 bits.
324 	 * Make sure to round up for half microseconds.
325 	 */
326 	data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
327 					 data->correction_factor[data->bucket],
328 					 RESOLUTION * DECAY);
329 
330 	expected_interval = get_typical_interval(data);
331 	expected_interval = min(expected_interval, data->next_timer_us);
332 
333 	first_idx = 0;
334 	if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
335 		struct cpuidle_state *s = &drv->states[1];
336 		unsigned int polling_threshold;
337 
338 		/*
339 		 * We want to default to C1 (hlt), not to busy polling
340 		 * unless the timer is happening really really soon, or
341 		 * C1's exit latency exceeds the user configured limit.
342 		 */
343 		polling_threshold = max_t(unsigned int, 20, s->target_residency);
344 		if (data->next_timer_us > polling_threshold &&
345 		    latency_req > s->exit_latency && !s->disabled &&
346 		    !dev->states_usage[1].disable)
347 			first_idx = 1;
348 	}
349 
350 	/*
351 	 * Use the lowest expected idle interval to pick the idle state.
352 	 */
353 	data->predicted_us = min(data->predicted_us, expected_interval);
354 
355 	if (tick_nohz_tick_stopped()) {
356 		/*
357 		 * If the tick is already stopped, the cost of possible short
358 		 * idle duration misprediction is much higher, because the CPU
359 		 * may be stuck in a shallow idle state for a long time as a
360 		 * result of it.  In that case say we might mispredict and try
361 		 * to force the CPU into a state for which we would have stopped
362 		 * the tick, unless a timer is going to expire really soon
363 		 * anyway.
364 		 */
365 		if (data->predicted_us < TICK_USEC)
366 			data->predicted_us = min_t(unsigned int, TICK_USEC,
367 						   ktime_to_us(delta_next));
368 	} else {
369 		/*
370 		 * Use the performance multiplier and the user-configurable
371 		 * latency_req to determine the maximum exit latency.
372 		 */
373 		interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
374 		if (latency_req > interactivity_req)
375 			latency_req = interactivity_req;
376 	}
377 
378 	expected_interval = data->predicted_us;
379 	/*
380 	 * Find the idle state with the lowest power while satisfying
381 	 * our constraints.
382 	 */
383 	idx = -1;
384 	for (i = first_idx; i < drv->state_count; i++) {
385 		struct cpuidle_state *s = &drv->states[i];
386 		struct cpuidle_state_usage *su = &dev->states_usage[i];
387 
388 		if (s->disabled || su->disable)
389 			continue;
390 		if (idx == -1)
391 			idx = i; /* first enabled state */
392 		if (s->target_residency > data->predicted_us)
393 			break;
394 		if (s->exit_latency > latency_req) {
395 			/*
396 			 * If we break out of the loop for latency reasons, use
397 			 * the target residency of the selected state as the
398 			 * expected idle duration so that the tick is retained
399 			 * as long as that target residency is low enough.
400 			 */
401 			expected_interval = drv->states[idx].target_residency;
402 			break;
403 		}
404 		idx = i;
405 	}
406 
407 	if (idx == -1)
408 		idx = 0; /* No states enabled. Must use 0. */
409 
410 	/*
411 	 * Don't stop the tick if the selected state is a polling one or if the
412 	 * expected idle duration is shorter than the tick period length.
413 	 */
414 	if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
415 	    expected_interval < TICK_USEC) {
416 		unsigned int delta_next_us = ktime_to_us(delta_next);
417 
418 		*stop_tick = false;
419 
420 		if (!tick_nohz_tick_stopped() && idx > 0 &&
421 		    drv->states[idx].target_residency > delta_next_us) {
422 			/*
423 			 * The tick is not going to be stopped and the target
424 			 * residency of the state to be returned is not within
425 			 * the time until the next timer event including the
426 			 * tick, so try to correct that.
427 			 */
428 			for (i = idx - 1; i >= 0; i--) {
429 			    if (drv->states[i].disabled ||
430 			        dev->states_usage[i].disable)
431 					continue;
432 
433 				idx = i;
434 				if (drv->states[i].target_residency <= delta_next_us)
435 					break;
436 			}
437 		}
438 	}
439 
440 	data->last_state_idx = idx;
441 
442 	return data->last_state_idx;
443 }
444 
445 /**
446  * menu_reflect - records that data structures need update
447  * @dev: the CPU
448  * @index: the index of actual entered state
449  *
450  * NOTE: it's important to be fast here because this operation will add to
451  *       the overall exit latency.
452  */
453 static void menu_reflect(struct cpuidle_device *dev, int index)
454 {
455 	struct menu_device *data = this_cpu_ptr(&menu_devices);
456 
457 	data->last_state_idx = index;
458 	data->needs_update = 1;
459 	data->tick_wakeup = tick_nohz_idle_got_tick();
460 }
461 
462 /**
463  * menu_update - attempts to guess what happened after entry
464  * @drv: cpuidle driver containing state data
465  * @dev: the CPU
466  */
467 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
468 {
469 	struct menu_device *data = this_cpu_ptr(&menu_devices);
470 	int last_idx = data->last_state_idx;
471 	struct cpuidle_state *target = &drv->states[last_idx];
472 	unsigned int measured_us;
473 	unsigned int new_factor;
474 
475 	/*
476 	 * Try to figure out how much time passed between entry to low
477 	 * power state and occurrence of the wakeup event.
478 	 *
479 	 * If the entered idle state didn't support residency measurements,
480 	 * we use them anyway if they are short, and if long,
481 	 * truncate to the whole expected time.
482 	 *
483 	 * Any measured amount of time will include the exit latency.
484 	 * Since we are interested in when the wakeup begun, not when it
485 	 * was completed, we must subtract the exit latency. However, if
486 	 * the measured amount of time is less than the exit latency,
487 	 * assume the state was never reached and the exit latency is 0.
488 	 */
489 
490 	if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
491 		/*
492 		 * The nohz code said that there wouldn't be any events within
493 		 * the tick boundary (if the tick was stopped), but the idle
494 		 * duration predictor had a differing opinion.  Since the CPU
495 		 * was woken up by a tick (that wasn't stopped after all), the
496 		 * predictor was not quite right, so assume that the CPU could
497 		 * have been idle long (but not forever) to help the idle
498 		 * duration predictor do a better job next time.
499 		 */
500 		measured_us = 9 * MAX_INTERESTING / 10;
501 	} else {
502 		/* measured value */
503 		measured_us = cpuidle_get_last_residency(dev);
504 
505 		/* Deduct exit latency */
506 		if (measured_us > 2 * target->exit_latency)
507 			measured_us -= target->exit_latency;
508 		else
509 			measured_us /= 2;
510 	}
511 
512 	/* Make sure our coefficients do not exceed unity */
513 	if (measured_us > data->next_timer_us)
514 		measured_us = data->next_timer_us;
515 
516 	/* Update our correction ratio */
517 	new_factor = data->correction_factor[data->bucket];
518 	new_factor -= new_factor / DECAY;
519 
520 	if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
521 		new_factor += RESOLUTION * measured_us / data->next_timer_us;
522 	else
523 		/*
524 		 * we were idle so long that we count it as a perfect
525 		 * prediction
526 		 */
527 		new_factor += RESOLUTION;
528 
529 	/*
530 	 * We don't want 0 as factor; we always want at least
531 	 * a tiny bit of estimated time. Fortunately, due to rounding,
532 	 * new_factor will stay nonzero regardless of measured_us values
533 	 * and the compiler can eliminate this test as long as DECAY > 1.
534 	 */
535 	if (DECAY == 1 && unlikely(new_factor == 0))
536 		new_factor = 1;
537 
538 	data->correction_factor[data->bucket] = new_factor;
539 
540 	/* update the repeating-pattern data */
541 	data->intervals[data->interval_ptr++] = measured_us;
542 	if (data->interval_ptr >= INTERVALS)
543 		data->interval_ptr = 0;
544 }
545 
546 /**
547  * menu_enable_device - scans a CPU's states and does setup
548  * @drv: cpuidle driver
549  * @dev: the CPU
550  */
551 static int menu_enable_device(struct cpuidle_driver *drv,
552 				struct cpuidle_device *dev)
553 {
554 	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
555 	int i;
556 
557 	memset(data, 0, sizeof(struct menu_device));
558 
559 	/*
560 	 * if the correction factor is 0 (eg first time init or cpu hotplug
561 	 * etc), we actually want to start out with a unity factor.
562 	 */
563 	for(i = 0; i < BUCKETS; i++)
564 		data->correction_factor[i] = RESOLUTION * DECAY;
565 
566 	return 0;
567 }
568 
569 static struct cpuidle_governor menu_governor = {
570 	.name =		"menu",
571 	.rating =	20,
572 	.enable =	menu_enable_device,
573 	.select =	menu_select,
574 	.reflect =	menu_reflect,
575 };
576 
577 /**
578  * init_menu - initializes the governor
579  */
580 static int __init init_menu(void)
581 {
582 	return cpuidle_register_governor(&menu_governor);
583 }
584 
585 postcore_initcall(init_menu);
586