1 /* 2 * menu.c - the menu idle governor 3 * 4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> 5 * Copyright (C) 2009 Intel Corporation 6 * Author: 7 * Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This code is licenced under the GPL version 2 as described 10 * in the COPYING file that acompanies the Linux Kernel. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/cpuidle.h> 15 #include <linux/pm_qos.h> 16 #include <linux/time.h> 17 #include <linux/ktime.h> 18 #include <linux/hrtimer.h> 19 #include <linux/tick.h> 20 #include <linux/sched.h> 21 #include <linux/math64.h> 22 #include <linux/module.h> 23 24 /* 25 * Please note when changing the tuning values: 26 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of 27 * a scaling operation multiplication may overflow on 32 bit platforms. 28 * In that case, #define RESOLUTION as ULL to get 64 bit result: 29 * #define RESOLUTION 1024ULL 30 * 31 * The default values do not overflow. 32 */ 33 #define BUCKETS 12 34 #define INTERVALS 8 35 #define RESOLUTION 1024 36 #define DECAY 8 37 #define MAX_INTERESTING 50000 38 #define STDDEV_THRESH 400 39 40 41 /* 42 * Concepts and ideas behind the menu governor 43 * 44 * For the menu governor, there are 3 decision factors for picking a C 45 * state: 46 * 1) Energy break even point 47 * 2) Performance impact 48 * 3) Latency tolerance (from pmqos infrastructure) 49 * These these three factors are treated independently. 50 * 51 * Energy break even point 52 * ----------------------- 53 * C state entry and exit have an energy cost, and a certain amount of time in 54 * the C state is required to actually break even on this cost. CPUIDLE 55 * provides us this duration in the "target_residency" field. So all that we 56 * need is a good prediction of how long we'll be idle. Like the traditional 57 * menu governor, we start with the actual known "next timer event" time. 58 * 59 * Since there are other source of wakeups (interrupts for example) than 60 * the next timer event, this estimation is rather optimistic. To get a 61 * more realistic estimate, a correction factor is applied to the estimate, 62 * that is based on historic behavior. For example, if in the past the actual 63 * duration always was 50% of the next timer tick, the correction factor will 64 * be 0.5. 65 * 66 * menu uses a running average for this correction factor, however it uses a 67 * set of factors, not just a single factor. This stems from the realization 68 * that the ratio is dependent on the order of magnitude of the expected 69 * duration; if we expect 500 milliseconds of idle time the likelihood of 70 * getting an interrupt very early is much higher than if we expect 50 micro 71 * seconds of idle time. A second independent factor that has big impact on 72 * the actual factor is if there is (disk) IO outstanding or not. 73 * (as a special twist, we consider every sleep longer than 50 milliseconds 74 * as perfect; there are no power gains for sleeping longer than this) 75 * 76 * For these two reasons we keep an array of 12 independent factors, that gets 77 * indexed based on the magnitude of the expected duration as well as the 78 * "is IO outstanding" property. 79 * 80 * Repeatable-interval-detector 81 * ---------------------------- 82 * There are some cases where "next timer" is a completely unusable predictor: 83 * Those cases where the interval is fixed, for example due to hardware 84 * interrupt mitigation, but also due to fixed transfer rate devices such as 85 * mice. 86 * For this, we use a different predictor: We track the duration of the last 8 87 * intervals and if the stand deviation of these 8 intervals is below a 88 * threshold value, we use the average of these intervals as prediction. 89 * 90 * Limiting Performance Impact 91 * --------------------------- 92 * C states, especially those with large exit latencies, can have a real 93 * noticeable impact on workloads, which is not acceptable for most sysadmins, 94 * and in addition, less performance has a power price of its own. 95 * 96 * As a general rule of thumb, menu assumes that the following heuristic 97 * holds: 98 * The busier the system, the less impact of C states is acceptable 99 * 100 * This rule-of-thumb is implemented using a performance-multiplier: 101 * If the exit latency times the performance multiplier is longer than 102 * the predicted duration, the C state is not considered a candidate 103 * for selection due to a too high performance impact. So the higher 104 * this multiplier is, the longer we need to be idle to pick a deep C 105 * state, and thus the less likely a busy CPU will hit such a deep 106 * C state. 107 * 108 * Two factors are used in determing this multiplier: 109 * a value of 10 is added for each point of "per cpu load average" we have. 110 * a value of 5 points is added for each process that is waiting for 111 * IO on this CPU. 112 * (these values are experimentally determined) 113 * 114 * The load average factor gives a longer term (few seconds) input to the 115 * decision, while the iowait value gives a cpu local instantanious input. 116 * The iowait factor may look low, but realize that this is also already 117 * represented in the system load average. 118 * 119 */ 120 121 struct menu_device { 122 int last_state_idx; 123 int needs_update; 124 125 unsigned int next_timer_us; 126 unsigned int predicted_us; 127 unsigned int bucket; 128 unsigned int correction_factor[BUCKETS]; 129 unsigned int intervals[INTERVALS]; 130 int interval_ptr; 131 }; 132 133 134 #define LOAD_INT(x) ((x) >> FSHIFT) 135 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 136 137 static int get_loadavg(void) 138 { 139 unsigned long this = this_cpu_load(); 140 141 142 return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10; 143 } 144 145 static inline int which_bucket(unsigned int duration) 146 { 147 int bucket = 0; 148 149 /* 150 * We keep two groups of stats; one with no 151 * IO pending, one without. 152 * This allows us to calculate 153 * E(duration)|iowait 154 */ 155 if (nr_iowait_cpu(smp_processor_id())) 156 bucket = BUCKETS/2; 157 158 if (duration < 10) 159 return bucket; 160 if (duration < 100) 161 return bucket + 1; 162 if (duration < 1000) 163 return bucket + 2; 164 if (duration < 10000) 165 return bucket + 3; 166 if (duration < 100000) 167 return bucket + 4; 168 return bucket + 5; 169 } 170 171 /* 172 * Return a multiplier for the exit latency that is intended 173 * to take performance requirements into account. 174 * The more performance critical we estimate the system 175 * to be, the higher this multiplier, and thus the higher 176 * the barrier to go to an expensive C state. 177 */ 178 static inline int performance_multiplier(void) 179 { 180 int mult = 1; 181 182 /* for higher loadavg, we are more reluctant */ 183 184 mult += 2 * get_loadavg(); 185 186 /* for IO wait tasks (per cpu!) we add 5x each */ 187 mult += 10 * nr_iowait_cpu(smp_processor_id()); 188 189 return mult; 190 } 191 192 static DEFINE_PER_CPU(struct menu_device, menu_devices); 193 194 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); 195 196 /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ 197 static u64 div_round64(u64 dividend, u32 divisor) 198 { 199 return div_u64(dividend + (divisor / 2), divisor); 200 } 201 202 /* 203 * Try detecting repeating patterns by keeping track of the last 8 204 * intervals, and checking if the standard deviation of that set 205 * of points is below a threshold. If it is... then use the 206 * average of these 8 points as the estimated value. 207 */ 208 static void get_typical_interval(struct menu_device *data) 209 { 210 int i, divisor; 211 unsigned int max, thresh; 212 uint64_t avg, stddev; 213 214 thresh = UINT_MAX; /* Discard outliers above this value */ 215 216 again: 217 218 /* First calculate the average of past intervals */ 219 max = 0; 220 avg = 0; 221 divisor = 0; 222 for (i = 0; i < INTERVALS; i++) { 223 unsigned int value = data->intervals[i]; 224 if (value <= thresh) { 225 avg += value; 226 divisor++; 227 if (value > max) 228 max = value; 229 } 230 } 231 do_div(avg, divisor); 232 233 /* Then try to determine standard deviation */ 234 stddev = 0; 235 for (i = 0; i < INTERVALS; i++) { 236 unsigned int value = data->intervals[i]; 237 if (value <= thresh) { 238 int64_t diff = value - avg; 239 stddev += diff * diff; 240 } 241 } 242 do_div(stddev, divisor); 243 /* 244 * The typical interval is obtained when standard deviation is small 245 * or standard deviation is small compared to the average interval. 246 * 247 * int_sqrt() formal parameter type is unsigned long. When the 248 * greatest difference to an outlier exceeds ~65 ms * sqrt(divisor) 249 * the resulting squared standard deviation exceeds the input domain 250 * of int_sqrt on platforms where unsigned long is 32 bits in size. 251 * In such case reject the candidate average. 252 * 253 * Use this result only if there is no timer to wake us up sooner. 254 */ 255 if (likely(stddev <= ULONG_MAX)) { 256 stddev = int_sqrt(stddev); 257 if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) 258 || stddev <= 20) { 259 if (data->next_timer_us > avg) 260 data->predicted_us = avg; 261 return; 262 } 263 } 264 265 /* 266 * If we have outliers to the upside in our distribution, discard 267 * those by setting the threshold to exclude these outliers, then 268 * calculate the average and standard deviation again. Once we get 269 * down to the bottom 3/4 of our samples, stop excluding samples. 270 * 271 * This can deal with workloads that have long pauses interspersed 272 * with sporadic activity with a bunch of short pauses. 273 */ 274 if ((divisor * 4) <= INTERVALS * 3) 275 return; 276 277 thresh = max - 1; 278 goto again; 279 } 280 281 /** 282 * menu_select - selects the next idle state to enter 283 * @drv: cpuidle driver containing state data 284 * @dev: the CPU 285 */ 286 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) 287 { 288 struct menu_device *data = &__get_cpu_var(menu_devices); 289 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); 290 int i; 291 unsigned int interactivity_req; 292 struct timespec t; 293 294 if (data->needs_update) { 295 menu_update(drv, dev); 296 data->needs_update = 0; 297 } 298 299 data->last_state_idx = 0; 300 301 /* Special case when user has set very strict latency requirement */ 302 if (unlikely(latency_req == 0)) 303 return 0; 304 305 /* determine the expected residency time, round up */ 306 t = ktime_to_timespec(tick_nohz_get_sleep_length()); 307 data->next_timer_us = 308 t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC; 309 310 311 data->bucket = which_bucket(data->next_timer_us); 312 313 /* 314 * if the correction factor is 0 (eg first time init or cpu hotplug 315 * etc), we actually want to start out with a unity factor. 316 */ 317 if (data->correction_factor[data->bucket] == 0) 318 data->correction_factor[data->bucket] = RESOLUTION * DECAY; 319 320 /* 321 * Force the result of multiplication to be 64 bits even if both 322 * operands are 32 bits. 323 * Make sure to round up for half microseconds. 324 */ 325 data->predicted_us = div_round64((uint64_t)data->next_timer_us * 326 data->correction_factor[data->bucket], 327 RESOLUTION * DECAY); 328 329 get_typical_interval(data); 330 331 /* 332 * Performance multiplier defines a minimum predicted idle 333 * duration / latency ratio. Adjust the latency limit if 334 * necessary. 335 */ 336 interactivity_req = data->predicted_us / performance_multiplier(); 337 if (latency_req > interactivity_req) 338 latency_req = interactivity_req; 339 340 /* 341 * We want to default to C1 (hlt), not to busy polling 342 * unless the timer is happening really really soon. 343 */ 344 if (data->next_timer_us > 5 && 345 !drv->states[CPUIDLE_DRIVER_STATE_START].disabled && 346 dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0) 347 data->last_state_idx = CPUIDLE_DRIVER_STATE_START; 348 349 /* 350 * Find the idle state with the lowest power while satisfying 351 * our constraints. 352 */ 353 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 354 struct cpuidle_state *s = &drv->states[i]; 355 struct cpuidle_state_usage *su = &dev->states_usage[i]; 356 357 if (s->disabled || su->disable) 358 continue; 359 if (s->target_residency > data->predicted_us) 360 continue; 361 if (s->exit_latency > latency_req) 362 continue; 363 364 data->last_state_idx = i; 365 } 366 367 return data->last_state_idx; 368 } 369 370 /** 371 * menu_reflect - records that data structures need update 372 * @dev: the CPU 373 * @index: the index of actual entered state 374 * 375 * NOTE: it's important to be fast here because this operation will add to 376 * the overall exit latency. 377 */ 378 static void menu_reflect(struct cpuidle_device *dev, int index) 379 { 380 struct menu_device *data = &__get_cpu_var(menu_devices); 381 data->last_state_idx = index; 382 if (index >= 0) 383 data->needs_update = 1; 384 } 385 386 /** 387 * menu_update - attempts to guess what happened after entry 388 * @drv: cpuidle driver containing state data 389 * @dev: the CPU 390 */ 391 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) 392 { 393 struct menu_device *data = &__get_cpu_var(menu_devices); 394 int last_idx = data->last_state_idx; 395 struct cpuidle_state *target = &drv->states[last_idx]; 396 unsigned int measured_us; 397 unsigned int new_factor; 398 399 /* 400 * Try to figure out how much time passed between entry to low 401 * power state and occurrence of the wakeup event. 402 * 403 * If the entered idle state didn't support residency measurements, 404 * we are basically lost in the dark how much time passed. 405 * As a compromise, assume we slept for the whole expected time. 406 * 407 * Any measured amount of time will include the exit latency. 408 * Since we are interested in when the wakeup begun, not when it 409 * was completed, we must substract the exit latency. However, if 410 * the measured amount of time is less than the exit latency, 411 * assume the state was never reached and the exit latency is 0. 412 */ 413 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) { 414 /* Use timer value as is */ 415 measured_us = data->next_timer_us; 416 417 } else { 418 /* Use measured value */ 419 measured_us = cpuidle_get_last_residency(dev); 420 421 /* Deduct exit latency */ 422 if (measured_us > target->exit_latency) 423 measured_us -= target->exit_latency; 424 425 /* Make sure our coefficients do not exceed unity */ 426 if (measured_us > data->next_timer_us) 427 measured_us = data->next_timer_us; 428 } 429 430 /* Update our correction ratio */ 431 new_factor = data->correction_factor[data->bucket]; 432 new_factor -= new_factor / DECAY; 433 434 if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING) 435 new_factor += RESOLUTION * measured_us / data->next_timer_us; 436 else 437 /* 438 * we were idle so long that we count it as a perfect 439 * prediction 440 */ 441 new_factor += RESOLUTION; 442 443 /* 444 * We don't want 0 as factor; we always want at least 445 * a tiny bit of estimated time. Fortunately, due to rounding, 446 * new_factor will stay nonzero regardless of measured_us values 447 * and the compiler can eliminate this test as long as DECAY > 1. 448 */ 449 if (DECAY == 1 && unlikely(new_factor == 0)) 450 new_factor = 1; 451 452 data->correction_factor[data->bucket] = new_factor; 453 454 /* update the repeating-pattern data */ 455 data->intervals[data->interval_ptr++] = measured_us; 456 if (data->interval_ptr >= INTERVALS) 457 data->interval_ptr = 0; 458 } 459 460 /** 461 * menu_enable_device - scans a CPU's states and does setup 462 * @drv: cpuidle driver 463 * @dev: the CPU 464 */ 465 static int menu_enable_device(struct cpuidle_driver *drv, 466 struct cpuidle_device *dev) 467 { 468 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 469 470 memset(data, 0, sizeof(struct menu_device)); 471 472 return 0; 473 } 474 475 static struct cpuidle_governor menu_governor = { 476 .name = "menu", 477 .rating = 20, 478 .enable = menu_enable_device, 479 .select = menu_select, 480 .reflect = menu_reflect, 481 .owner = THIS_MODULE, 482 }; 483 484 /** 485 * init_menu - initializes the governor 486 */ 487 static int __init init_menu(void) 488 { 489 return cpuidle_register_governor(&menu_governor); 490 } 491 492 postcore_initcall(init_menu); 493