1 /* 2 * menu.c - the menu idle governor 3 * 4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> 5 * Copyright (C) 2009 Intel Corporation 6 * Author: 7 * Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This code is licenced under the GPL version 2 as described 10 * in the COPYING file that acompanies the Linux Kernel. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/cpuidle.h> 15 #include <linux/time.h> 16 #include <linux/ktime.h> 17 #include <linux/hrtimer.h> 18 #include <linux/tick.h> 19 #include <linux/sched.h> 20 #include <linux/sched/loadavg.h> 21 #include <linux/sched/stat.h> 22 #include <linux/math64.h> 23 24 /* 25 * Please note when changing the tuning values: 26 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of 27 * a scaling operation multiplication may overflow on 32 bit platforms. 28 * In that case, #define RESOLUTION as ULL to get 64 bit result: 29 * #define RESOLUTION 1024ULL 30 * 31 * The default values do not overflow. 32 */ 33 #define BUCKETS 12 34 #define INTERVAL_SHIFT 3 35 #define INTERVALS (1UL << INTERVAL_SHIFT) 36 #define RESOLUTION 1024 37 #define DECAY 8 38 #define MAX_INTERESTING 50000 39 40 41 /* 42 * Concepts and ideas behind the menu governor 43 * 44 * For the menu governor, there are 3 decision factors for picking a C 45 * state: 46 * 1) Energy break even point 47 * 2) Performance impact 48 * 3) Latency tolerance (from pmqos infrastructure) 49 * These these three factors are treated independently. 50 * 51 * Energy break even point 52 * ----------------------- 53 * C state entry and exit have an energy cost, and a certain amount of time in 54 * the C state is required to actually break even on this cost. CPUIDLE 55 * provides us this duration in the "target_residency" field. So all that we 56 * need is a good prediction of how long we'll be idle. Like the traditional 57 * menu governor, we start with the actual known "next timer event" time. 58 * 59 * Since there are other source of wakeups (interrupts for example) than 60 * the next timer event, this estimation is rather optimistic. To get a 61 * more realistic estimate, a correction factor is applied to the estimate, 62 * that is based on historic behavior. For example, if in the past the actual 63 * duration always was 50% of the next timer tick, the correction factor will 64 * be 0.5. 65 * 66 * menu uses a running average for this correction factor, however it uses a 67 * set of factors, not just a single factor. This stems from the realization 68 * that the ratio is dependent on the order of magnitude of the expected 69 * duration; if we expect 500 milliseconds of idle time the likelihood of 70 * getting an interrupt very early is much higher than if we expect 50 micro 71 * seconds of idle time. A second independent factor that has big impact on 72 * the actual factor is if there is (disk) IO outstanding or not. 73 * (as a special twist, we consider every sleep longer than 50 milliseconds 74 * as perfect; there are no power gains for sleeping longer than this) 75 * 76 * For these two reasons we keep an array of 12 independent factors, that gets 77 * indexed based on the magnitude of the expected duration as well as the 78 * "is IO outstanding" property. 79 * 80 * Repeatable-interval-detector 81 * ---------------------------- 82 * There are some cases where "next timer" is a completely unusable predictor: 83 * Those cases where the interval is fixed, for example due to hardware 84 * interrupt mitigation, but also due to fixed transfer rate devices such as 85 * mice. 86 * For this, we use a different predictor: We track the duration of the last 8 87 * intervals and if the stand deviation of these 8 intervals is below a 88 * threshold value, we use the average of these intervals as prediction. 89 * 90 * Limiting Performance Impact 91 * --------------------------- 92 * C states, especially those with large exit latencies, can have a real 93 * noticeable impact on workloads, which is not acceptable for most sysadmins, 94 * and in addition, less performance has a power price of its own. 95 * 96 * As a general rule of thumb, menu assumes that the following heuristic 97 * holds: 98 * The busier the system, the less impact of C states is acceptable 99 * 100 * This rule-of-thumb is implemented using a performance-multiplier: 101 * If the exit latency times the performance multiplier is longer than 102 * the predicted duration, the C state is not considered a candidate 103 * for selection due to a too high performance impact. So the higher 104 * this multiplier is, the longer we need to be idle to pick a deep C 105 * state, and thus the less likely a busy CPU will hit such a deep 106 * C state. 107 * 108 * Two factors are used in determing this multiplier: 109 * a value of 10 is added for each point of "per cpu load average" we have. 110 * a value of 5 points is added for each process that is waiting for 111 * IO on this CPU. 112 * (these values are experimentally determined) 113 * 114 * The load average factor gives a longer term (few seconds) input to the 115 * decision, while the iowait value gives a cpu local instantanious input. 116 * The iowait factor may look low, but realize that this is also already 117 * represented in the system load average. 118 * 119 */ 120 121 struct menu_device { 122 int last_state_idx; 123 int needs_update; 124 int tick_wakeup; 125 126 unsigned int next_timer_us; 127 unsigned int bucket; 128 unsigned int correction_factor[BUCKETS]; 129 unsigned int intervals[INTERVALS]; 130 int interval_ptr; 131 }; 132 133 static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters) 134 { 135 int bucket = 0; 136 137 /* 138 * We keep two groups of stats; one with no 139 * IO pending, one without. 140 * This allows us to calculate 141 * E(duration)|iowait 142 */ 143 if (nr_iowaiters) 144 bucket = BUCKETS/2; 145 146 if (duration < 10) 147 return bucket; 148 if (duration < 100) 149 return bucket + 1; 150 if (duration < 1000) 151 return bucket + 2; 152 if (duration < 10000) 153 return bucket + 3; 154 if (duration < 100000) 155 return bucket + 4; 156 return bucket + 5; 157 } 158 159 /* 160 * Return a multiplier for the exit latency that is intended 161 * to take performance requirements into account. 162 * The more performance critical we estimate the system 163 * to be, the higher this multiplier, and thus the higher 164 * the barrier to go to an expensive C state. 165 */ 166 static inline int performance_multiplier(unsigned long nr_iowaiters) 167 { 168 /* for IO wait tasks (per cpu!) we add 10x each */ 169 return 1 + 10 * nr_iowaiters; 170 } 171 172 static DEFINE_PER_CPU(struct menu_device, menu_devices); 173 174 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); 175 176 /* 177 * Try detecting repeating patterns by keeping track of the last 8 178 * intervals, and checking if the standard deviation of that set 179 * of points is below a threshold. If it is... then use the 180 * average of these 8 points as the estimated value. 181 */ 182 static unsigned int get_typical_interval(struct menu_device *data, 183 unsigned int predicted_us) 184 { 185 int i, divisor; 186 unsigned int min, max, thresh, avg; 187 uint64_t sum, variance; 188 189 thresh = UINT_MAX; /* Discard outliers above this value */ 190 191 again: 192 193 /* First calculate the average of past intervals */ 194 min = UINT_MAX; 195 max = 0; 196 sum = 0; 197 divisor = 0; 198 for (i = 0; i < INTERVALS; i++) { 199 unsigned int value = data->intervals[i]; 200 if (value <= thresh) { 201 sum += value; 202 divisor++; 203 if (value > max) 204 max = value; 205 206 if (value < min) 207 min = value; 208 } 209 } 210 211 /* 212 * If the result of the computation is going to be discarded anyway, 213 * avoid the computation altogether. 214 */ 215 if (min >= predicted_us) 216 return UINT_MAX; 217 218 if (divisor == INTERVALS) 219 avg = sum >> INTERVAL_SHIFT; 220 else 221 avg = div_u64(sum, divisor); 222 223 /* Then try to determine variance */ 224 variance = 0; 225 for (i = 0; i < INTERVALS; i++) { 226 unsigned int value = data->intervals[i]; 227 if (value <= thresh) { 228 int64_t diff = (int64_t)value - avg; 229 variance += diff * diff; 230 } 231 } 232 if (divisor == INTERVALS) 233 variance >>= INTERVAL_SHIFT; 234 else 235 do_div(variance, divisor); 236 237 /* 238 * The typical interval is obtained when standard deviation is 239 * small (stddev <= 20 us, variance <= 400 us^2) or standard 240 * deviation is small compared to the average interval (avg > 241 * 6*stddev, avg^2 > 36*variance). The average is smaller than 242 * UINT_MAX aka U32_MAX, so computing its square does not 243 * overflow a u64. We simply reject this candidate average if 244 * the standard deviation is greater than 715 s (which is 245 * rather unlikely). 246 * 247 * Use this result only if there is no timer to wake us up sooner. 248 */ 249 if (likely(variance <= U64_MAX/36)) { 250 if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3)) 251 || variance <= 400) { 252 return avg; 253 } 254 } 255 256 /* 257 * If we have outliers to the upside in our distribution, discard 258 * those by setting the threshold to exclude these outliers, then 259 * calculate the average and standard deviation again. Once we get 260 * down to the bottom 3/4 of our samples, stop excluding samples. 261 * 262 * This can deal with workloads that have long pauses interspersed 263 * with sporadic activity with a bunch of short pauses. 264 */ 265 if ((divisor * 4) <= INTERVALS * 3) 266 return UINT_MAX; 267 268 thresh = max - 1; 269 goto again; 270 } 271 272 /** 273 * menu_select - selects the next idle state to enter 274 * @drv: cpuidle driver containing state data 275 * @dev: the CPU 276 * @stop_tick: indication on whether or not to stop the tick 277 */ 278 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, 279 bool *stop_tick) 280 { 281 struct menu_device *data = this_cpu_ptr(&menu_devices); 282 int latency_req = cpuidle_governor_latency_req(dev->cpu); 283 int i; 284 int idx; 285 unsigned int interactivity_req; 286 unsigned int predicted_us; 287 unsigned long nr_iowaiters; 288 ktime_t delta_next; 289 290 if (data->needs_update) { 291 menu_update(drv, dev); 292 data->needs_update = 0; 293 } 294 295 /* determine the expected residency time, round up */ 296 data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next)); 297 298 nr_iowaiters = nr_iowait_cpu(dev->cpu); 299 data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); 300 301 if (unlikely(drv->state_count <= 1 || latency_req == 0) || 302 ((data->next_timer_us < drv->states[1].target_residency || 303 latency_req < drv->states[1].exit_latency) && 304 !drv->states[0].disabled && !dev->states_usage[0].disable)) { 305 /* 306 * In this case state[0] will be used no matter what, so return 307 * it right away and keep the tick running. 308 */ 309 *stop_tick = false; 310 return 0; 311 } 312 313 /* 314 * Force the result of multiplication to be 64 bits even if both 315 * operands are 32 bits. 316 * Make sure to round up for half microseconds. 317 */ 318 predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us * 319 data->correction_factor[data->bucket], 320 RESOLUTION * DECAY); 321 /* 322 * Use the lowest expected idle interval to pick the idle state. 323 */ 324 predicted_us = min(predicted_us, get_typical_interval(data, predicted_us)); 325 326 if (tick_nohz_tick_stopped()) { 327 /* 328 * If the tick is already stopped, the cost of possible short 329 * idle duration misprediction is much higher, because the CPU 330 * may be stuck in a shallow idle state for a long time as a 331 * result of it. In that case say we might mispredict and use 332 * the known time till the closest timer event for the idle 333 * state selection. 334 */ 335 if (predicted_us < TICK_USEC) 336 predicted_us = ktime_to_us(delta_next); 337 } else { 338 /* 339 * Use the performance multiplier and the user-configurable 340 * latency_req to determine the maximum exit latency. 341 */ 342 interactivity_req = predicted_us / performance_multiplier(nr_iowaiters); 343 if (latency_req > interactivity_req) 344 latency_req = interactivity_req; 345 } 346 347 /* 348 * Find the idle state with the lowest power while satisfying 349 * our constraints. 350 */ 351 idx = -1; 352 for (i = 0; i < drv->state_count; i++) { 353 struct cpuidle_state *s = &drv->states[i]; 354 struct cpuidle_state_usage *su = &dev->states_usage[i]; 355 356 if (s->disabled || su->disable) 357 continue; 358 359 if (idx == -1) 360 idx = i; /* first enabled state */ 361 362 if (s->target_residency > predicted_us) { 363 /* 364 * Use a physical idle state, not busy polling, unless 365 * a timer is going to trigger soon enough. 366 */ 367 if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && 368 s->exit_latency <= latency_req && 369 s->target_residency <= data->next_timer_us) { 370 predicted_us = s->target_residency; 371 idx = i; 372 break; 373 } 374 if (predicted_us < TICK_USEC) 375 break; 376 377 if (!tick_nohz_tick_stopped()) { 378 /* 379 * If the state selected so far is shallow, 380 * waking up early won't hurt, so retain the 381 * tick in that case and let the governor run 382 * again in the next iteration of the loop. 383 */ 384 predicted_us = drv->states[idx].target_residency; 385 break; 386 } 387 388 /* 389 * If the state selected so far is shallow and this 390 * state's target residency matches the time till the 391 * closest timer event, select this one to avoid getting 392 * stuck in the shallow one for too long. 393 */ 394 if (drv->states[idx].target_residency < TICK_USEC && 395 s->target_residency <= ktime_to_us(delta_next)) 396 idx = i; 397 398 return idx; 399 } 400 if (s->exit_latency > latency_req) { 401 /* 402 * If we break out of the loop for latency reasons, use 403 * the target residency of the selected state as the 404 * expected idle duration so that the tick is retained 405 * as long as that target residency is low enough. 406 */ 407 predicted_us = drv->states[idx].target_residency; 408 break; 409 } 410 idx = i; 411 } 412 413 if (idx == -1) 414 idx = 0; /* No states enabled. Must use 0. */ 415 416 /* 417 * Don't stop the tick if the selected state is a polling one or if the 418 * expected idle duration is shorter than the tick period length. 419 */ 420 if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || 421 predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) { 422 unsigned int delta_next_us = ktime_to_us(delta_next); 423 424 *stop_tick = false; 425 426 if (idx > 0 && drv->states[idx].target_residency > delta_next_us) { 427 /* 428 * The tick is not going to be stopped and the target 429 * residency of the state to be returned is not within 430 * the time until the next timer event including the 431 * tick, so try to correct that. 432 */ 433 for (i = idx - 1; i >= 0; i--) { 434 if (drv->states[i].disabled || 435 dev->states_usage[i].disable) 436 continue; 437 438 idx = i; 439 if (drv->states[i].target_residency <= delta_next_us) 440 break; 441 } 442 } 443 } 444 445 return idx; 446 } 447 448 /** 449 * menu_reflect - records that data structures need update 450 * @dev: the CPU 451 * @index: the index of actual entered state 452 * 453 * NOTE: it's important to be fast here because this operation will add to 454 * the overall exit latency. 455 */ 456 static void menu_reflect(struct cpuidle_device *dev, int index) 457 { 458 struct menu_device *data = this_cpu_ptr(&menu_devices); 459 460 data->last_state_idx = index; 461 data->needs_update = 1; 462 data->tick_wakeup = tick_nohz_idle_got_tick(); 463 } 464 465 /** 466 * menu_update - attempts to guess what happened after entry 467 * @drv: cpuidle driver containing state data 468 * @dev: the CPU 469 */ 470 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) 471 { 472 struct menu_device *data = this_cpu_ptr(&menu_devices); 473 int last_idx = data->last_state_idx; 474 struct cpuidle_state *target = &drv->states[last_idx]; 475 unsigned int measured_us; 476 unsigned int new_factor; 477 478 /* 479 * Try to figure out how much time passed between entry to low 480 * power state and occurrence of the wakeup event. 481 * 482 * If the entered idle state didn't support residency measurements, 483 * we use them anyway if they are short, and if long, 484 * truncate to the whole expected time. 485 * 486 * Any measured amount of time will include the exit latency. 487 * Since we are interested in when the wakeup begun, not when it 488 * was completed, we must subtract the exit latency. However, if 489 * the measured amount of time is less than the exit latency, 490 * assume the state was never reached and the exit latency is 0. 491 */ 492 493 if (data->tick_wakeup && data->next_timer_us > TICK_USEC) { 494 /* 495 * The nohz code said that there wouldn't be any events within 496 * the tick boundary (if the tick was stopped), but the idle 497 * duration predictor had a differing opinion. Since the CPU 498 * was woken up by a tick (that wasn't stopped after all), the 499 * predictor was not quite right, so assume that the CPU could 500 * have been idle long (but not forever) to help the idle 501 * duration predictor do a better job next time. 502 */ 503 measured_us = 9 * MAX_INTERESTING / 10; 504 } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) && 505 dev->poll_time_limit) { 506 /* 507 * The CPU exited the "polling" state due to a time limit, so 508 * the idle duration prediction leading to the selection of that 509 * state was inaccurate. If a better prediction had been made, 510 * the CPU might have been woken up from idle by the next timer. 511 * Assume that to be the case. 512 */ 513 measured_us = data->next_timer_us; 514 } else { 515 /* measured value */ 516 measured_us = dev->last_residency; 517 518 /* Deduct exit latency */ 519 if (measured_us > 2 * target->exit_latency) 520 measured_us -= target->exit_latency; 521 else 522 measured_us /= 2; 523 } 524 525 /* Make sure our coefficients do not exceed unity */ 526 if (measured_us > data->next_timer_us) 527 measured_us = data->next_timer_us; 528 529 /* Update our correction ratio */ 530 new_factor = data->correction_factor[data->bucket]; 531 new_factor -= new_factor / DECAY; 532 533 if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING) 534 new_factor += RESOLUTION * measured_us / data->next_timer_us; 535 else 536 /* 537 * we were idle so long that we count it as a perfect 538 * prediction 539 */ 540 new_factor += RESOLUTION; 541 542 /* 543 * We don't want 0 as factor; we always want at least 544 * a tiny bit of estimated time. Fortunately, due to rounding, 545 * new_factor will stay nonzero regardless of measured_us values 546 * and the compiler can eliminate this test as long as DECAY > 1. 547 */ 548 if (DECAY == 1 && unlikely(new_factor == 0)) 549 new_factor = 1; 550 551 data->correction_factor[data->bucket] = new_factor; 552 553 /* update the repeating-pattern data */ 554 data->intervals[data->interval_ptr++] = measured_us; 555 if (data->interval_ptr >= INTERVALS) 556 data->interval_ptr = 0; 557 } 558 559 /** 560 * menu_enable_device - scans a CPU's states and does setup 561 * @drv: cpuidle driver 562 * @dev: the CPU 563 */ 564 static int menu_enable_device(struct cpuidle_driver *drv, 565 struct cpuidle_device *dev) 566 { 567 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 568 int i; 569 570 memset(data, 0, sizeof(struct menu_device)); 571 572 /* 573 * if the correction factor is 0 (eg first time init or cpu hotplug 574 * etc), we actually want to start out with a unity factor. 575 */ 576 for(i = 0; i < BUCKETS; i++) 577 data->correction_factor[i] = RESOLUTION * DECAY; 578 579 return 0; 580 } 581 582 static struct cpuidle_governor menu_governor = { 583 .name = "menu", 584 .rating = 20, 585 .enable = menu_enable_device, 586 .select = menu_select, 587 .reflect = menu_reflect, 588 }; 589 590 /** 591 * init_menu - initializes the governor 592 */ 593 static int __init init_menu(void) 594 { 595 return cpuidle_register_governor(&menu_governor); 596 } 597 598 postcore_initcall(init_menu); 599