xref: /openbmc/linux/drivers/cpuidle/governors/menu.c (revision 242cdad8)
1 /*
2  * menu.c - the menu idle governor
3  *
4  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5  * Copyright (C) 2009 Intel Corporation
6  * Author:
7  *        Arjan van de Ven <arjan@linux.intel.com>
8  *
9  * This code is licenced under the GPL version 2 as described
10  * in the COPYING file that acompanies the Linux Kernel.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/time.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/sched.h>
20 #include <linux/sched/loadavg.h>
21 #include <linux/sched/stat.h>
22 #include <linux/math64.h>
23 
24 /*
25  * Please note when changing the tuning values:
26  * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
27  * a scaling operation multiplication may overflow on 32 bit platforms.
28  * In that case, #define RESOLUTION as ULL to get 64 bit result:
29  * #define RESOLUTION 1024ULL
30  *
31  * The default values do not overflow.
32  */
33 #define BUCKETS 12
34 #define INTERVAL_SHIFT 3
35 #define INTERVALS (1UL << INTERVAL_SHIFT)
36 #define RESOLUTION 1024
37 #define DECAY 8
38 #define MAX_INTERESTING 50000
39 
40 
41 /*
42  * Concepts and ideas behind the menu governor
43  *
44  * For the menu governor, there are 3 decision factors for picking a C
45  * state:
46  * 1) Energy break even point
47  * 2) Performance impact
48  * 3) Latency tolerance (from pmqos infrastructure)
49  * These these three factors are treated independently.
50  *
51  * Energy break even point
52  * -----------------------
53  * C state entry and exit have an energy cost, and a certain amount of time in
54  * the  C state is required to actually break even on this cost. CPUIDLE
55  * provides us this duration in the "target_residency" field. So all that we
56  * need is a good prediction of how long we'll be idle. Like the traditional
57  * menu governor, we start with the actual known "next timer event" time.
58  *
59  * Since there are other source of wakeups (interrupts for example) than
60  * the next timer event, this estimation is rather optimistic. To get a
61  * more realistic estimate, a correction factor is applied to the estimate,
62  * that is based on historic behavior. For example, if in the past the actual
63  * duration always was 50% of the next timer tick, the correction factor will
64  * be 0.5.
65  *
66  * menu uses a running average for this correction factor, however it uses a
67  * set of factors, not just a single factor. This stems from the realization
68  * that the ratio is dependent on the order of magnitude of the expected
69  * duration; if we expect 500 milliseconds of idle time the likelihood of
70  * getting an interrupt very early is much higher than if we expect 50 micro
71  * seconds of idle time. A second independent factor that has big impact on
72  * the actual factor is if there is (disk) IO outstanding or not.
73  * (as a special twist, we consider every sleep longer than 50 milliseconds
74  * as perfect; there are no power gains for sleeping longer than this)
75  *
76  * For these two reasons we keep an array of 12 independent factors, that gets
77  * indexed based on the magnitude of the expected duration as well as the
78  * "is IO outstanding" property.
79  *
80  * Repeatable-interval-detector
81  * ----------------------------
82  * There are some cases where "next timer" is a completely unusable predictor:
83  * Those cases where the interval is fixed, for example due to hardware
84  * interrupt mitigation, but also due to fixed transfer rate devices such as
85  * mice.
86  * For this, we use a different predictor: We track the duration of the last 8
87  * intervals and if the stand deviation of these 8 intervals is below a
88  * threshold value, we use the average of these intervals as prediction.
89  *
90  * Limiting Performance Impact
91  * ---------------------------
92  * C states, especially those with large exit latencies, can have a real
93  * noticeable impact on workloads, which is not acceptable for most sysadmins,
94  * and in addition, less performance has a power price of its own.
95  *
96  * As a general rule of thumb, menu assumes that the following heuristic
97  * holds:
98  *     The busier the system, the less impact of C states is acceptable
99  *
100  * This rule-of-thumb is implemented using a performance-multiplier:
101  * If the exit latency times the performance multiplier is longer than
102  * the predicted duration, the C state is not considered a candidate
103  * for selection due to a too high performance impact. So the higher
104  * this multiplier is, the longer we need to be idle to pick a deep C
105  * state, and thus the less likely a busy CPU will hit such a deep
106  * C state.
107  *
108  * Two factors are used in determing this multiplier:
109  * a value of 10 is added for each point of "per cpu load average" we have.
110  * a value of 5 points is added for each process that is waiting for
111  * IO on this CPU.
112  * (these values are experimentally determined)
113  *
114  * The load average factor gives a longer term (few seconds) input to the
115  * decision, while the iowait value gives a cpu local instantanious input.
116  * The iowait factor may look low, but realize that this is also already
117  * represented in the system load average.
118  *
119  */
120 
121 struct menu_device {
122 	int		last_state_idx;
123 	int             needs_update;
124 	int             tick_wakeup;
125 
126 	unsigned int	next_timer_us;
127 	unsigned int	predicted_us;
128 	unsigned int	bucket;
129 	unsigned int	correction_factor[BUCKETS];
130 	unsigned int	intervals[INTERVALS];
131 	int		interval_ptr;
132 };
133 
134 
135 #define LOAD_INT(x) ((x) >> FSHIFT)
136 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
137 
138 static inline int get_loadavg(unsigned long load)
139 {
140 	return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
141 }
142 
143 static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
144 {
145 	int bucket = 0;
146 
147 	/*
148 	 * We keep two groups of stats; one with no
149 	 * IO pending, one without.
150 	 * This allows us to calculate
151 	 * E(duration)|iowait
152 	 */
153 	if (nr_iowaiters)
154 		bucket = BUCKETS/2;
155 
156 	if (duration < 10)
157 		return bucket;
158 	if (duration < 100)
159 		return bucket + 1;
160 	if (duration < 1000)
161 		return bucket + 2;
162 	if (duration < 10000)
163 		return bucket + 3;
164 	if (duration < 100000)
165 		return bucket + 4;
166 	return bucket + 5;
167 }
168 
169 /*
170  * Return a multiplier for the exit latency that is intended
171  * to take performance requirements into account.
172  * The more performance critical we estimate the system
173  * to be, the higher this multiplier, and thus the higher
174  * the barrier to go to an expensive C state.
175  */
176 static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
177 {
178 	int mult = 1;
179 
180 	/* for higher loadavg, we are more reluctant */
181 
182 	mult += 2 * get_loadavg(load);
183 
184 	/* for IO wait tasks (per cpu!) we add 5x each */
185 	mult += 10 * nr_iowaiters;
186 
187 	return mult;
188 }
189 
190 static DEFINE_PER_CPU(struct menu_device, menu_devices);
191 
192 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
193 
194 /*
195  * Try detecting repeating patterns by keeping track of the last 8
196  * intervals, and checking if the standard deviation of that set
197  * of points is below a threshold. If it is... then use the
198  * average of these 8 points as the estimated value.
199  */
200 static unsigned int get_typical_interval(struct menu_device *data)
201 {
202 	int i, divisor;
203 	unsigned int max, thresh, avg;
204 	uint64_t sum, variance;
205 
206 	thresh = UINT_MAX; /* Discard outliers above this value */
207 
208 again:
209 
210 	/* First calculate the average of past intervals */
211 	max = 0;
212 	sum = 0;
213 	divisor = 0;
214 	for (i = 0; i < INTERVALS; i++) {
215 		unsigned int value = data->intervals[i];
216 		if (value <= thresh) {
217 			sum += value;
218 			divisor++;
219 			if (value > max)
220 				max = value;
221 		}
222 	}
223 	if (divisor == INTERVALS)
224 		avg = sum >> INTERVAL_SHIFT;
225 	else
226 		avg = div_u64(sum, divisor);
227 
228 	/* Then try to determine variance */
229 	variance = 0;
230 	for (i = 0; i < INTERVALS; i++) {
231 		unsigned int value = data->intervals[i];
232 		if (value <= thresh) {
233 			int64_t diff = (int64_t)value - avg;
234 			variance += diff * diff;
235 		}
236 	}
237 	if (divisor == INTERVALS)
238 		variance >>= INTERVAL_SHIFT;
239 	else
240 		do_div(variance, divisor);
241 
242 	/*
243 	 * The typical interval is obtained when standard deviation is
244 	 * small (stddev <= 20 us, variance <= 400 us^2) or standard
245 	 * deviation is small compared to the average interval (avg >
246 	 * 6*stddev, avg^2 > 36*variance). The average is smaller than
247 	 * UINT_MAX aka U32_MAX, so computing its square does not
248 	 * overflow a u64. We simply reject this candidate average if
249 	 * the standard deviation is greater than 715 s (which is
250 	 * rather unlikely).
251 	 *
252 	 * Use this result only if there is no timer to wake us up sooner.
253 	 */
254 	if (likely(variance <= U64_MAX/36)) {
255 		if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
256 							|| variance <= 400) {
257 			return avg;
258 		}
259 	}
260 
261 	/*
262 	 * If we have outliers to the upside in our distribution, discard
263 	 * those by setting the threshold to exclude these outliers, then
264 	 * calculate the average and standard deviation again. Once we get
265 	 * down to the bottom 3/4 of our samples, stop excluding samples.
266 	 *
267 	 * This can deal with workloads that have long pauses interspersed
268 	 * with sporadic activity with a bunch of short pauses.
269 	 */
270 	if ((divisor * 4) <= INTERVALS * 3)
271 		return UINT_MAX;
272 
273 	thresh = max - 1;
274 	goto again;
275 }
276 
277 /**
278  * menu_select - selects the next idle state to enter
279  * @drv: cpuidle driver containing state data
280  * @dev: the CPU
281  * @stop_tick: indication on whether or not to stop the tick
282  */
283 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
284 		       bool *stop_tick)
285 {
286 	struct menu_device *data = this_cpu_ptr(&menu_devices);
287 	int latency_req = cpuidle_governor_latency_req(dev->cpu);
288 	int i;
289 	int first_idx;
290 	int idx;
291 	unsigned int interactivity_req;
292 	unsigned int expected_interval;
293 	unsigned long nr_iowaiters, cpu_load;
294 	ktime_t delta_next;
295 
296 	if (data->needs_update) {
297 		menu_update(drv, dev);
298 		data->needs_update = 0;
299 	}
300 
301 	/* Special case when user has set very strict latency requirement */
302 	if (unlikely(latency_req == 0)) {
303 		*stop_tick = false;
304 		return 0;
305 	}
306 
307 	/* determine the expected residency time, round up */
308 	data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
309 
310 	get_iowait_load(&nr_iowaiters, &cpu_load);
311 	data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
312 
313 	/*
314 	 * Force the result of multiplication to be 64 bits even if both
315 	 * operands are 32 bits.
316 	 * Make sure to round up for half microseconds.
317 	 */
318 	data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
319 					 data->correction_factor[data->bucket],
320 					 RESOLUTION * DECAY);
321 
322 	expected_interval = get_typical_interval(data);
323 	expected_interval = min(expected_interval, data->next_timer_us);
324 
325 	first_idx = 0;
326 	if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
327 		struct cpuidle_state *s = &drv->states[1];
328 		unsigned int polling_threshold;
329 
330 		/*
331 		 * Default to a physical idle state, not to busy polling, unless
332 		 * a timer is going to trigger really really soon.
333 		 */
334 		polling_threshold = max_t(unsigned int, 20, s->target_residency);
335 		if (data->next_timer_us > polling_threshold &&
336 		    latency_req > s->exit_latency && !s->disabled &&
337 		    !dev->states_usage[1].disable)
338 			first_idx = 1;
339 	}
340 
341 	/*
342 	 * Use the lowest expected idle interval to pick the idle state.
343 	 */
344 	data->predicted_us = min(data->predicted_us, expected_interval);
345 
346 	if (tick_nohz_tick_stopped()) {
347 		/*
348 		 * If the tick is already stopped, the cost of possible short
349 		 * idle duration misprediction is much higher, because the CPU
350 		 * may be stuck in a shallow idle state for a long time as a
351 		 * result of it.  In that case say we might mispredict and use
352 		 * the known time till the closest timer event for the idle
353 		 * state selection.
354 		 */
355 		if (data->predicted_us < TICK_USEC)
356 			data->predicted_us = ktime_to_us(delta_next);
357 	} else {
358 		/*
359 		 * Use the performance multiplier and the user-configurable
360 		 * latency_req to determine the maximum exit latency.
361 		 */
362 		interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
363 		if (latency_req > interactivity_req)
364 			latency_req = interactivity_req;
365 	}
366 
367 	expected_interval = data->predicted_us;
368 	/*
369 	 * Find the idle state with the lowest power while satisfying
370 	 * our constraints.
371 	 */
372 	idx = -1;
373 	for (i = first_idx; i < drv->state_count; i++) {
374 		struct cpuidle_state *s = &drv->states[i];
375 		struct cpuidle_state_usage *su = &dev->states_usage[i];
376 
377 		if (s->disabled || su->disable)
378 			continue;
379 		if (idx == -1)
380 			idx = i; /* first enabled state */
381 		if (s->target_residency > data->predicted_us) {
382 			if (data->predicted_us < TICK_USEC)
383 				break;
384 
385 			if (!tick_nohz_tick_stopped()) {
386 				/*
387 				 * If the state selected so far is shallow,
388 				 * waking up early won't hurt, so retain the
389 				 * tick in that case and let the governor run
390 				 * again in the next iteration of the loop.
391 				 */
392 				expected_interval = drv->states[idx].target_residency;
393 				break;
394 			}
395 
396 			/*
397 			 * If the state selected so far is shallow and this
398 			 * state's target residency matches the time till the
399 			 * closest timer event, select this one to avoid getting
400 			 * stuck in the shallow one for too long.
401 			 */
402 			if (drv->states[idx].target_residency < TICK_USEC &&
403 			    s->target_residency <= ktime_to_us(delta_next))
404 				idx = i;
405 
406 			goto out;
407 		}
408 		if (s->exit_latency > latency_req) {
409 			/*
410 			 * If we break out of the loop for latency reasons, use
411 			 * the target residency of the selected state as the
412 			 * expected idle duration so that the tick is retained
413 			 * as long as that target residency is low enough.
414 			 */
415 			expected_interval = drv->states[idx].target_residency;
416 			break;
417 		}
418 		idx = i;
419 	}
420 
421 	if (idx == -1)
422 		idx = 0; /* No states enabled. Must use 0. */
423 
424 	/*
425 	 * Don't stop the tick if the selected state is a polling one or if the
426 	 * expected idle duration is shorter than the tick period length.
427 	 */
428 	if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
429 	     expected_interval < TICK_USEC) && !tick_nohz_tick_stopped()) {
430 		unsigned int delta_next_us = ktime_to_us(delta_next);
431 
432 		*stop_tick = false;
433 
434 		if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
435 			/*
436 			 * The tick is not going to be stopped and the target
437 			 * residency of the state to be returned is not within
438 			 * the time until the next timer event including the
439 			 * tick, so try to correct that.
440 			 */
441 			for (i = idx - 1; i >= 0; i--) {
442 				if (drv->states[i].disabled ||
443 				    dev->states_usage[i].disable)
444 					continue;
445 
446 				idx = i;
447 				if (drv->states[i].target_residency <= delta_next_us)
448 					break;
449 			}
450 		}
451 	}
452 
453 out:
454 	data->last_state_idx = idx;
455 
456 	return data->last_state_idx;
457 }
458 
459 /**
460  * menu_reflect - records that data structures need update
461  * @dev: the CPU
462  * @index: the index of actual entered state
463  *
464  * NOTE: it's important to be fast here because this operation will add to
465  *       the overall exit latency.
466  */
467 static void menu_reflect(struct cpuidle_device *dev, int index)
468 {
469 	struct menu_device *data = this_cpu_ptr(&menu_devices);
470 
471 	data->last_state_idx = index;
472 	data->needs_update = 1;
473 	data->tick_wakeup = tick_nohz_idle_got_tick();
474 }
475 
476 /**
477  * menu_update - attempts to guess what happened after entry
478  * @drv: cpuidle driver containing state data
479  * @dev: the CPU
480  */
481 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
482 {
483 	struct menu_device *data = this_cpu_ptr(&menu_devices);
484 	int last_idx = data->last_state_idx;
485 	struct cpuidle_state *target = &drv->states[last_idx];
486 	unsigned int measured_us;
487 	unsigned int new_factor;
488 
489 	/*
490 	 * Try to figure out how much time passed between entry to low
491 	 * power state and occurrence of the wakeup event.
492 	 *
493 	 * If the entered idle state didn't support residency measurements,
494 	 * we use them anyway if they are short, and if long,
495 	 * truncate to the whole expected time.
496 	 *
497 	 * Any measured amount of time will include the exit latency.
498 	 * Since we are interested in when the wakeup begun, not when it
499 	 * was completed, we must subtract the exit latency. However, if
500 	 * the measured amount of time is less than the exit latency,
501 	 * assume the state was never reached and the exit latency is 0.
502 	 */
503 
504 	if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
505 		/*
506 		 * The nohz code said that there wouldn't be any events within
507 		 * the tick boundary (if the tick was stopped), but the idle
508 		 * duration predictor had a differing opinion.  Since the CPU
509 		 * was woken up by a tick (that wasn't stopped after all), the
510 		 * predictor was not quite right, so assume that the CPU could
511 		 * have been idle long (but not forever) to help the idle
512 		 * duration predictor do a better job next time.
513 		 */
514 		measured_us = 9 * MAX_INTERESTING / 10;
515 	} else {
516 		/* measured value */
517 		measured_us = cpuidle_get_last_residency(dev);
518 
519 		/* Deduct exit latency */
520 		if (measured_us > 2 * target->exit_latency)
521 			measured_us -= target->exit_latency;
522 		else
523 			measured_us /= 2;
524 	}
525 
526 	/* Make sure our coefficients do not exceed unity */
527 	if (measured_us > data->next_timer_us)
528 		measured_us = data->next_timer_us;
529 
530 	/* Update our correction ratio */
531 	new_factor = data->correction_factor[data->bucket];
532 	new_factor -= new_factor / DECAY;
533 
534 	if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
535 		new_factor += RESOLUTION * measured_us / data->next_timer_us;
536 	else
537 		/*
538 		 * we were idle so long that we count it as a perfect
539 		 * prediction
540 		 */
541 		new_factor += RESOLUTION;
542 
543 	/*
544 	 * We don't want 0 as factor; we always want at least
545 	 * a tiny bit of estimated time. Fortunately, due to rounding,
546 	 * new_factor will stay nonzero regardless of measured_us values
547 	 * and the compiler can eliminate this test as long as DECAY > 1.
548 	 */
549 	if (DECAY == 1 && unlikely(new_factor == 0))
550 		new_factor = 1;
551 
552 	data->correction_factor[data->bucket] = new_factor;
553 
554 	/* update the repeating-pattern data */
555 	data->intervals[data->interval_ptr++] = measured_us;
556 	if (data->interval_ptr >= INTERVALS)
557 		data->interval_ptr = 0;
558 }
559 
560 /**
561  * menu_enable_device - scans a CPU's states and does setup
562  * @drv: cpuidle driver
563  * @dev: the CPU
564  */
565 static int menu_enable_device(struct cpuidle_driver *drv,
566 				struct cpuidle_device *dev)
567 {
568 	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
569 	int i;
570 
571 	memset(data, 0, sizeof(struct menu_device));
572 
573 	/*
574 	 * if the correction factor is 0 (eg first time init or cpu hotplug
575 	 * etc), we actually want to start out with a unity factor.
576 	 */
577 	for(i = 0; i < BUCKETS; i++)
578 		data->correction_factor[i] = RESOLUTION * DECAY;
579 
580 	return 0;
581 }
582 
583 static struct cpuidle_governor menu_governor = {
584 	.name =		"menu",
585 	.rating =	20,
586 	.enable =	menu_enable_device,
587 	.select =	menu_select,
588 	.reflect =	menu_reflect,
589 };
590 
591 /**
592  * init_menu - initializes the governor
593  */
594 static int __init init_menu(void)
595 {
596 	return cpuidle_register_governor(&menu_governor);
597 }
598 
599 postcore_initcall(init_menu);
600