xref: /openbmc/linux/drivers/cpuidle/governors/menu.c (revision 7d545e77)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * menu.c - the menu idle governor
4  *
5  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
6  * Copyright (C) 2009 Intel Corporation
7  * Author:
8  *        Arjan van de Ven <arjan@linux.intel.com>
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/cpuidle.h>
13 #include <linux/time.h>
14 #include <linux/ktime.h>
15 #include <linux/hrtimer.h>
16 #include <linux/tick.h>
17 #include <linux/sched.h>
18 #include <linux/sched/loadavg.h>
19 #include <linux/sched/stat.h>
20 #include <linux/math64.h>
21 
22 /*
23  * Please note when changing the tuning values:
24  * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
25  * a scaling operation multiplication may overflow on 32 bit platforms.
26  * In that case, #define RESOLUTION as ULL to get 64 bit result:
27  * #define RESOLUTION 1024ULL
28  *
29  * The default values do not overflow.
30  */
31 #define BUCKETS 12
32 #define INTERVAL_SHIFT 3
33 #define INTERVALS (1UL << INTERVAL_SHIFT)
34 #define RESOLUTION 1024
35 #define DECAY 8
36 #define MAX_INTERESTING 50000
37 
38 
39 /*
40  * Concepts and ideas behind the menu governor
41  *
42  * For the menu governor, there are 3 decision factors for picking a C
43  * state:
44  * 1) Energy break even point
45  * 2) Performance impact
46  * 3) Latency tolerance (from pmqos infrastructure)
47  * These these three factors are treated independently.
48  *
49  * Energy break even point
50  * -----------------------
51  * C state entry and exit have an energy cost, and a certain amount of time in
52  * the  C state is required to actually break even on this cost. CPUIDLE
53  * provides us this duration in the "target_residency" field. So all that we
54  * need is a good prediction of how long we'll be idle. Like the traditional
55  * menu governor, we start with the actual known "next timer event" time.
56  *
57  * Since there are other source of wakeups (interrupts for example) than
58  * the next timer event, this estimation is rather optimistic. To get a
59  * more realistic estimate, a correction factor is applied to the estimate,
60  * that is based on historic behavior. For example, if in the past the actual
61  * duration always was 50% of the next timer tick, the correction factor will
62  * be 0.5.
63  *
64  * menu uses a running average for this correction factor, however it uses a
65  * set of factors, not just a single factor. This stems from the realization
66  * that the ratio is dependent on the order of magnitude of the expected
67  * duration; if we expect 500 milliseconds of idle time the likelihood of
68  * getting an interrupt very early is much higher than if we expect 50 micro
69  * seconds of idle time. A second independent factor that has big impact on
70  * the actual factor is if there is (disk) IO outstanding or not.
71  * (as a special twist, we consider every sleep longer than 50 milliseconds
72  * as perfect; there are no power gains for sleeping longer than this)
73  *
74  * For these two reasons we keep an array of 12 independent factors, that gets
75  * indexed based on the magnitude of the expected duration as well as the
76  * "is IO outstanding" property.
77  *
78  * Repeatable-interval-detector
79  * ----------------------------
80  * There are some cases where "next timer" is a completely unusable predictor:
81  * Those cases where the interval is fixed, for example due to hardware
82  * interrupt mitigation, but also due to fixed transfer rate devices such as
83  * mice.
84  * For this, we use a different predictor: We track the duration of the last 8
85  * intervals and if the stand deviation of these 8 intervals is below a
86  * threshold value, we use the average of these intervals as prediction.
87  *
88  * Limiting Performance Impact
89  * ---------------------------
90  * C states, especially those with large exit latencies, can have a real
91  * noticeable impact on workloads, which is not acceptable for most sysadmins,
92  * and in addition, less performance has a power price of its own.
93  *
94  * As a general rule of thumb, menu assumes that the following heuristic
95  * holds:
96  *     The busier the system, the less impact of C states is acceptable
97  *
98  * This rule-of-thumb is implemented using a performance-multiplier:
99  * If the exit latency times the performance multiplier is longer than
100  * the predicted duration, the C state is not considered a candidate
101  * for selection due to a too high performance impact. So the higher
102  * this multiplier is, the longer we need to be idle to pick a deep C
103  * state, and thus the less likely a busy CPU will hit such a deep
104  * C state.
105  *
106  * Two factors are used in determing this multiplier:
107  * a value of 10 is added for each point of "per cpu load average" we have.
108  * a value of 5 points is added for each process that is waiting for
109  * IO on this CPU.
110  * (these values are experimentally determined)
111  *
112  * The load average factor gives a longer term (few seconds) input to the
113  * decision, while the iowait value gives a cpu local instantanious input.
114  * The iowait factor may look low, but realize that this is also already
115  * represented in the system load average.
116  *
117  */
118 
119 struct menu_device {
120 	int		last_state_idx;
121 	int             needs_update;
122 	int             tick_wakeup;
123 
124 	unsigned int	next_timer_us;
125 	unsigned int	bucket;
126 	unsigned int	correction_factor[BUCKETS];
127 	unsigned int	intervals[INTERVALS];
128 	int		interval_ptr;
129 };
130 
131 static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
132 {
133 	int bucket = 0;
134 
135 	/*
136 	 * We keep two groups of stats; one with no
137 	 * IO pending, one without.
138 	 * This allows us to calculate
139 	 * E(duration)|iowait
140 	 */
141 	if (nr_iowaiters)
142 		bucket = BUCKETS/2;
143 
144 	if (duration < 10)
145 		return bucket;
146 	if (duration < 100)
147 		return bucket + 1;
148 	if (duration < 1000)
149 		return bucket + 2;
150 	if (duration < 10000)
151 		return bucket + 3;
152 	if (duration < 100000)
153 		return bucket + 4;
154 	return bucket + 5;
155 }
156 
157 /*
158  * Return a multiplier for the exit latency that is intended
159  * to take performance requirements into account.
160  * The more performance critical we estimate the system
161  * to be, the higher this multiplier, and thus the higher
162  * the barrier to go to an expensive C state.
163  */
164 static inline int performance_multiplier(unsigned long nr_iowaiters)
165 {
166 	/* for IO wait tasks (per cpu!) we add 10x each */
167 	return 1 + 10 * nr_iowaiters;
168 }
169 
170 static DEFINE_PER_CPU(struct menu_device, menu_devices);
171 
172 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
173 
174 /*
175  * Try detecting repeating patterns by keeping track of the last 8
176  * intervals, and checking if the standard deviation of that set
177  * of points is below a threshold. If it is... then use the
178  * average of these 8 points as the estimated value.
179  */
180 static unsigned int get_typical_interval(struct menu_device *data,
181 					 unsigned int predicted_us)
182 {
183 	int i, divisor;
184 	unsigned int min, max, thresh, avg;
185 	uint64_t sum, variance;
186 
187 	thresh = INT_MAX; /* Discard outliers above this value */
188 
189 again:
190 
191 	/* First calculate the average of past intervals */
192 	min = UINT_MAX;
193 	max = 0;
194 	sum = 0;
195 	divisor = 0;
196 	for (i = 0; i < INTERVALS; i++) {
197 		unsigned int value = data->intervals[i];
198 		if (value <= thresh) {
199 			sum += value;
200 			divisor++;
201 			if (value > max)
202 				max = value;
203 
204 			if (value < min)
205 				min = value;
206 		}
207 	}
208 
209 	/*
210 	 * If the result of the computation is going to be discarded anyway,
211 	 * avoid the computation altogether.
212 	 */
213 	if (min >= predicted_us)
214 		return UINT_MAX;
215 
216 	if (divisor == INTERVALS)
217 		avg = sum >> INTERVAL_SHIFT;
218 	else
219 		avg = div_u64(sum, divisor);
220 
221 	/* Then try to determine variance */
222 	variance = 0;
223 	for (i = 0; i < INTERVALS; i++) {
224 		unsigned int value = data->intervals[i];
225 		if (value <= thresh) {
226 			int64_t diff = (int64_t)value - avg;
227 			variance += diff * diff;
228 		}
229 	}
230 	if (divisor == INTERVALS)
231 		variance >>= INTERVAL_SHIFT;
232 	else
233 		do_div(variance, divisor);
234 
235 	/*
236 	 * The typical interval is obtained when standard deviation is
237 	 * small (stddev <= 20 us, variance <= 400 us^2) or standard
238 	 * deviation is small compared to the average interval (avg >
239 	 * 6*stddev, avg^2 > 36*variance). The average is smaller than
240 	 * UINT_MAX aka U32_MAX, so computing its square does not
241 	 * overflow a u64. We simply reject this candidate average if
242 	 * the standard deviation is greater than 715 s (which is
243 	 * rather unlikely).
244 	 *
245 	 * Use this result only if there is no timer to wake us up sooner.
246 	 */
247 	if (likely(variance <= U64_MAX/36)) {
248 		if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
249 							|| variance <= 400) {
250 			return avg;
251 		}
252 	}
253 
254 	/*
255 	 * If we have outliers to the upside in our distribution, discard
256 	 * those by setting the threshold to exclude these outliers, then
257 	 * calculate the average and standard deviation again. Once we get
258 	 * down to the bottom 3/4 of our samples, stop excluding samples.
259 	 *
260 	 * This can deal with workloads that have long pauses interspersed
261 	 * with sporadic activity with a bunch of short pauses.
262 	 */
263 	if ((divisor * 4) <= INTERVALS * 3)
264 		return UINT_MAX;
265 
266 	thresh = max - 1;
267 	goto again;
268 }
269 
270 /**
271  * menu_select - selects the next idle state to enter
272  * @drv: cpuidle driver containing state data
273  * @dev: the CPU
274  * @stop_tick: indication on whether or not to stop the tick
275  */
276 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
277 		       bool *stop_tick)
278 {
279 	struct menu_device *data = this_cpu_ptr(&menu_devices);
280 	int latency_req = cpuidle_governor_latency_req(dev->cpu);
281 	int i;
282 	int idx;
283 	unsigned int interactivity_req;
284 	unsigned int predicted_us;
285 	unsigned long nr_iowaiters;
286 	ktime_t delta_next;
287 
288 	if (data->needs_update) {
289 		menu_update(drv, dev);
290 		data->needs_update = 0;
291 	}
292 
293 	/* determine the expected residency time, round up */
294 	data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
295 
296 	nr_iowaiters = nr_iowait_cpu(dev->cpu);
297 	data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
298 
299 	if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
300 	    ((data->next_timer_us < drv->states[1].target_residency ||
301 	      latency_req < drv->states[1].exit_latency) &&
302 	     !drv->states[0].disabled && !dev->states_usage[0].disable)) {
303 		/*
304 		 * In this case state[0] will be used no matter what, so return
305 		 * it right away and keep the tick running.
306 		 */
307 		*stop_tick = false;
308 		return 0;
309 	}
310 
311 	/*
312 	 * Force the result of multiplication to be 64 bits even if both
313 	 * operands are 32 bits.
314 	 * Make sure to round up for half microseconds.
315 	 */
316 	predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
317 					 data->correction_factor[data->bucket],
318 					 RESOLUTION * DECAY);
319 	/*
320 	 * Use the lowest expected idle interval to pick the idle state.
321 	 */
322 	predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
323 
324 	if (tick_nohz_tick_stopped()) {
325 		/*
326 		 * If the tick is already stopped, the cost of possible short
327 		 * idle duration misprediction is much higher, because the CPU
328 		 * may be stuck in a shallow idle state for a long time as a
329 		 * result of it.  In that case say we might mispredict and use
330 		 * the known time till the closest timer event for the idle
331 		 * state selection.
332 		 */
333 		if (predicted_us < TICK_USEC)
334 			predicted_us = ktime_to_us(delta_next);
335 	} else {
336 		/*
337 		 * Use the performance multiplier and the user-configurable
338 		 * latency_req to determine the maximum exit latency.
339 		 */
340 		interactivity_req = predicted_us / performance_multiplier(nr_iowaiters);
341 		if (latency_req > interactivity_req)
342 			latency_req = interactivity_req;
343 	}
344 
345 	/*
346 	 * Find the idle state with the lowest power while satisfying
347 	 * our constraints.
348 	 */
349 	idx = -1;
350 	for (i = 0; i < drv->state_count; i++) {
351 		struct cpuidle_state *s = &drv->states[i];
352 		struct cpuidle_state_usage *su = &dev->states_usage[i];
353 
354 		if (s->disabled || su->disable)
355 			continue;
356 
357 		if (idx == -1)
358 			idx = i; /* first enabled state */
359 
360 		if (s->target_residency > predicted_us) {
361 			/*
362 			 * Use a physical idle state, not busy polling, unless
363 			 * a timer is going to trigger soon enough.
364 			 */
365 			if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
366 			    s->exit_latency <= latency_req &&
367 			    s->target_residency <= data->next_timer_us) {
368 				predicted_us = s->target_residency;
369 				idx = i;
370 				break;
371 			}
372 			if (predicted_us < TICK_USEC)
373 				break;
374 
375 			if (!tick_nohz_tick_stopped()) {
376 				/*
377 				 * If the state selected so far is shallow,
378 				 * waking up early won't hurt, so retain the
379 				 * tick in that case and let the governor run
380 				 * again in the next iteration of the loop.
381 				 */
382 				predicted_us = drv->states[idx].target_residency;
383 				break;
384 			}
385 
386 			/*
387 			 * If the state selected so far is shallow and this
388 			 * state's target residency matches the time till the
389 			 * closest timer event, select this one to avoid getting
390 			 * stuck in the shallow one for too long.
391 			 */
392 			if (drv->states[idx].target_residency < TICK_USEC &&
393 			    s->target_residency <= ktime_to_us(delta_next))
394 				idx = i;
395 
396 			return idx;
397 		}
398 		if (s->exit_latency > latency_req) {
399 			/*
400 			 * If we break out of the loop for latency reasons, use
401 			 * the target residency of the selected state as the
402 			 * expected idle duration so that the tick is retained
403 			 * as long as that target residency is low enough.
404 			 */
405 			predicted_us = drv->states[idx].target_residency;
406 			break;
407 		}
408 		idx = i;
409 	}
410 
411 	if (idx == -1)
412 		idx = 0; /* No states enabled. Must use 0. */
413 
414 	/*
415 	 * Don't stop the tick if the selected state is a polling one or if the
416 	 * expected idle duration is shorter than the tick period length.
417 	 */
418 	if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
419 	     predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
420 		unsigned int delta_next_us = ktime_to_us(delta_next);
421 
422 		*stop_tick = false;
423 
424 		if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
425 			/*
426 			 * The tick is not going to be stopped and the target
427 			 * residency of the state to be returned is not within
428 			 * the time until the next timer event including the
429 			 * tick, so try to correct that.
430 			 */
431 			for (i = idx - 1; i >= 0; i--) {
432 				if (drv->states[i].disabled ||
433 				    dev->states_usage[i].disable)
434 					continue;
435 
436 				idx = i;
437 				if (drv->states[i].target_residency <= delta_next_us)
438 					break;
439 			}
440 		}
441 	}
442 
443 	return idx;
444 }
445 
446 /**
447  * menu_reflect - records that data structures need update
448  * @dev: the CPU
449  * @index: the index of actual entered state
450  *
451  * NOTE: it's important to be fast here because this operation will add to
452  *       the overall exit latency.
453  */
454 static void menu_reflect(struct cpuidle_device *dev, int index)
455 {
456 	struct menu_device *data = this_cpu_ptr(&menu_devices);
457 
458 	data->last_state_idx = index;
459 	data->needs_update = 1;
460 	data->tick_wakeup = tick_nohz_idle_got_tick();
461 }
462 
463 /**
464  * menu_update - attempts to guess what happened after entry
465  * @drv: cpuidle driver containing state data
466  * @dev: the CPU
467  */
468 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
469 {
470 	struct menu_device *data = this_cpu_ptr(&menu_devices);
471 	int last_idx = data->last_state_idx;
472 	struct cpuidle_state *target = &drv->states[last_idx];
473 	unsigned int measured_us;
474 	unsigned int new_factor;
475 
476 	/*
477 	 * Try to figure out how much time passed between entry to low
478 	 * power state and occurrence of the wakeup event.
479 	 *
480 	 * If the entered idle state didn't support residency measurements,
481 	 * we use them anyway if they are short, and if long,
482 	 * truncate to the whole expected time.
483 	 *
484 	 * Any measured amount of time will include the exit latency.
485 	 * Since we are interested in when the wakeup begun, not when it
486 	 * was completed, we must subtract the exit latency. However, if
487 	 * the measured amount of time is less than the exit latency,
488 	 * assume the state was never reached and the exit latency is 0.
489 	 */
490 
491 	if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
492 		/*
493 		 * The nohz code said that there wouldn't be any events within
494 		 * the tick boundary (if the tick was stopped), but the idle
495 		 * duration predictor had a differing opinion.  Since the CPU
496 		 * was woken up by a tick (that wasn't stopped after all), the
497 		 * predictor was not quite right, so assume that the CPU could
498 		 * have been idle long (but not forever) to help the idle
499 		 * duration predictor do a better job next time.
500 		 */
501 		measured_us = 9 * MAX_INTERESTING / 10;
502 	} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
503 		   dev->poll_time_limit) {
504 		/*
505 		 * The CPU exited the "polling" state due to a time limit, so
506 		 * the idle duration prediction leading to the selection of that
507 		 * state was inaccurate.  If a better prediction had been made,
508 		 * the CPU might have been woken up from idle by the next timer.
509 		 * Assume that to be the case.
510 		 */
511 		measured_us = data->next_timer_us;
512 	} else {
513 		/* measured value */
514 		measured_us = dev->last_residency;
515 
516 		/* Deduct exit latency */
517 		if (measured_us > 2 * target->exit_latency)
518 			measured_us -= target->exit_latency;
519 		else
520 			measured_us /= 2;
521 	}
522 
523 	/* Make sure our coefficients do not exceed unity */
524 	if (measured_us > data->next_timer_us)
525 		measured_us = data->next_timer_us;
526 
527 	/* Update our correction ratio */
528 	new_factor = data->correction_factor[data->bucket];
529 	new_factor -= new_factor / DECAY;
530 
531 	if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
532 		new_factor += RESOLUTION * measured_us / data->next_timer_us;
533 	else
534 		/*
535 		 * we were idle so long that we count it as a perfect
536 		 * prediction
537 		 */
538 		new_factor += RESOLUTION;
539 
540 	/*
541 	 * We don't want 0 as factor; we always want at least
542 	 * a tiny bit of estimated time. Fortunately, due to rounding,
543 	 * new_factor will stay nonzero regardless of measured_us values
544 	 * and the compiler can eliminate this test as long as DECAY > 1.
545 	 */
546 	if (DECAY == 1 && unlikely(new_factor == 0))
547 		new_factor = 1;
548 
549 	data->correction_factor[data->bucket] = new_factor;
550 
551 	/* update the repeating-pattern data */
552 	data->intervals[data->interval_ptr++] = measured_us;
553 	if (data->interval_ptr >= INTERVALS)
554 		data->interval_ptr = 0;
555 }
556 
557 /**
558  * menu_enable_device - scans a CPU's states and does setup
559  * @drv: cpuidle driver
560  * @dev: the CPU
561  */
562 static int menu_enable_device(struct cpuidle_driver *drv,
563 				struct cpuidle_device *dev)
564 {
565 	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
566 	int i;
567 
568 	memset(data, 0, sizeof(struct menu_device));
569 
570 	/*
571 	 * if the correction factor is 0 (eg first time init or cpu hotplug
572 	 * etc), we actually want to start out with a unity factor.
573 	 */
574 	for(i = 0; i < BUCKETS; i++)
575 		data->correction_factor[i] = RESOLUTION * DECAY;
576 
577 	return 0;
578 }
579 
580 static struct cpuidle_governor menu_governor = {
581 	.name =		"menu",
582 	.rating =	20,
583 	.enable =	menu_enable_device,
584 	.select =	menu_select,
585 	.reflect =	menu_reflect,
586 };
587 
588 /**
589  * init_menu - initializes the governor
590  */
591 static int __init init_menu(void)
592 {
593 	return cpuidle_register_governor(&menu_governor);
594 }
595 
596 postcore_initcall(init_menu);
597