1 /* 2 * menu.c - the menu idle governor 3 * 4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> 5 * Copyright (C) 2009 Intel Corporation 6 * Author: 7 * Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This code is licenced under the GPL version 2 as described 10 * in the COPYING file that acompanies the Linux Kernel. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/cpuidle.h> 15 #include <linux/pm_qos.h> 16 #include <linux/time.h> 17 #include <linux/ktime.h> 18 #include <linux/hrtimer.h> 19 #include <linux/tick.h> 20 #include <linux/sched.h> 21 #include <linux/math64.h> 22 #include <linux/module.h> 23 24 /* 25 * Please note when changing the tuning values: 26 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of 27 * a scaling operation multiplication may overflow on 32 bit platforms. 28 * In that case, #define RESOLUTION as ULL to get 64 bit result: 29 * #define RESOLUTION 1024ULL 30 * 31 * The default values do not overflow. 32 */ 33 #define BUCKETS 12 34 #define INTERVAL_SHIFT 3 35 #define INTERVALS (1UL << INTERVAL_SHIFT) 36 #define RESOLUTION 1024 37 #define DECAY 8 38 #define MAX_INTERESTING 50000 39 40 41 /* 42 * Concepts and ideas behind the menu governor 43 * 44 * For the menu governor, there are 3 decision factors for picking a C 45 * state: 46 * 1) Energy break even point 47 * 2) Performance impact 48 * 3) Latency tolerance (from pmqos infrastructure) 49 * These these three factors are treated independently. 50 * 51 * Energy break even point 52 * ----------------------- 53 * C state entry and exit have an energy cost, and a certain amount of time in 54 * the C state is required to actually break even on this cost. CPUIDLE 55 * provides us this duration in the "target_residency" field. So all that we 56 * need is a good prediction of how long we'll be idle. Like the traditional 57 * menu governor, we start with the actual known "next timer event" time. 58 * 59 * Since there are other source of wakeups (interrupts for example) than 60 * the next timer event, this estimation is rather optimistic. To get a 61 * more realistic estimate, a correction factor is applied to the estimate, 62 * that is based on historic behavior. For example, if in the past the actual 63 * duration always was 50% of the next timer tick, the correction factor will 64 * be 0.5. 65 * 66 * menu uses a running average for this correction factor, however it uses a 67 * set of factors, not just a single factor. This stems from the realization 68 * that the ratio is dependent on the order of magnitude of the expected 69 * duration; if we expect 500 milliseconds of idle time the likelihood of 70 * getting an interrupt very early is much higher than if we expect 50 micro 71 * seconds of idle time. A second independent factor that has big impact on 72 * the actual factor is if there is (disk) IO outstanding or not. 73 * (as a special twist, we consider every sleep longer than 50 milliseconds 74 * as perfect; there are no power gains for sleeping longer than this) 75 * 76 * For these two reasons we keep an array of 12 independent factors, that gets 77 * indexed based on the magnitude of the expected duration as well as the 78 * "is IO outstanding" property. 79 * 80 * Repeatable-interval-detector 81 * ---------------------------- 82 * There are some cases where "next timer" is a completely unusable predictor: 83 * Those cases where the interval is fixed, for example due to hardware 84 * interrupt mitigation, but also due to fixed transfer rate devices such as 85 * mice. 86 * For this, we use a different predictor: We track the duration of the last 8 87 * intervals and if the stand deviation of these 8 intervals is below a 88 * threshold value, we use the average of these intervals as prediction. 89 * 90 * Limiting Performance Impact 91 * --------------------------- 92 * C states, especially those with large exit latencies, can have a real 93 * noticeable impact on workloads, which is not acceptable for most sysadmins, 94 * and in addition, less performance has a power price of its own. 95 * 96 * As a general rule of thumb, menu assumes that the following heuristic 97 * holds: 98 * The busier the system, the less impact of C states is acceptable 99 * 100 * This rule-of-thumb is implemented using a performance-multiplier: 101 * If the exit latency times the performance multiplier is longer than 102 * the predicted duration, the C state is not considered a candidate 103 * for selection due to a too high performance impact. So the higher 104 * this multiplier is, the longer we need to be idle to pick a deep C 105 * state, and thus the less likely a busy CPU will hit such a deep 106 * C state. 107 * 108 * Two factors are used in determing this multiplier: 109 * a value of 10 is added for each point of "per cpu load average" we have. 110 * a value of 5 points is added for each process that is waiting for 111 * IO on this CPU. 112 * (these values are experimentally determined) 113 * 114 * The load average factor gives a longer term (few seconds) input to the 115 * decision, while the iowait value gives a cpu local instantanious input. 116 * The iowait factor may look low, but realize that this is also already 117 * represented in the system load average. 118 * 119 */ 120 121 struct menu_device { 122 int last_state_idx; 123 int needs_update; 124 125 unsigned int next_timer_us; 126 unsigned int predicted_us; 127 unsigned int bucket; 128 unsigned int correction_factor[BUCKETS]; 129 unsigned int intervals[INTERVALS]; 130 int interval_ptr; 131 }; 132 133 134 #define LOAD_INT(x) ((x) >> FSHIFT) 135 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 136 137 static inline int get_loadavg(unsigned long load) 138 { 139 return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10; 140 } 141 142 static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters) 143 { 144 int bucket = 0; 145 146 /* 147 * We keep two groups of stats; one with no 148 * IO pending, one without. 149 * This allows us to calculate 150 * E(duration)|iowait 151 */ 152 if (nr_iowaiters) 153 bucket = BUCKETS/2; 154 155 if (duration < 10) 156 return bucket; 157 if (duration < 100) 158 return bucket + 1; 159 if (duration < 1000) 160 return bucket + 2; 161 if (duration < 10000) 162 return bucket + 3; 163 if (duration < 100000) 164 return bucket + 4; 165 return bucket + 5; 166 } 167 168 /* 169 * Return a multiplier for the exit latency that is intended 170 * to take performance requirements into account. 171 * The more performance critical we estimate the system 172 * to be, the higher this multiplier, and thus the higher 173 * the barrier to go to an expensive C state. 174 */ 175 static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load) 176 { 177 int mult = 1; 178 179 /* for higher loadavg, we are more reluctant */ 180 181 mult += 2 * get_loadavg(load); 182 183 /* for IO wait tasks (per cpu!) we add 5x each */ 184 mult += 10 * nr_iowaiters; 185 186 return mult; 187 } 188 189 static DEFINE_PER_CPU(struct menu_device, menu_devices); 190 191 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); 192 193 /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ 194 static u64 div_round64(u64 dividend, u32 divisor) 195 { 196 return div_u64(dividend + (divisor / 2), divisor); 197 } 198 199 /* 200 * Try detecting repeating patterns by keeping track of the last 8 201 * intervals, and checking if the standard deviation of that set 202 * of points is below a threshold. If it is... then use the 203 * average of these 8 points as the estimated value. 204 */ 205 static void get_typical_interval(struct menu_device *data) 206 { 207 int i, divisor; 208 unsigned int max, thresh; 209 uint64_t avg, stddev; 210 211 thresh = UINT_MAX; /* Discard outliers above this value */ 212 213 again: 214 215 /* First calculate the average of past intervals */ 216 max = 0; 217 avg = 0; 218 divisor = 0; 219 for (i = 0; i < INTERVALS; i++) { 220 unsigned int value = data->intervals[i]; 221 if (value <= thresh) { 222 avg += value; 223 divisor++; 224 if (value > max) 225 max = value; 226 } 227 } 228 if (divisor == INTERVALS) 229 avg >>= INTERVAL_SHIFT; 230 else 231 do_div(avg, divisor); 232 233 /* Then try to determine standard deviation */ 234 stddev = 0; 235 for (i = 0; i < INTERVALS; i++) { 236 unsigned int value = data->intervals[i]; 237 if (value <= thresh) { 238 int64_t diff = value - avg; 239 stddev += diff * diff; 240 } 241 } 242 if (divisor == INTERVALS) 243 stddev >>= INTERVAL_SHIFT; 244 else 245 do_div(stddev, divisor); 246 247 /* 248 * The typical interval is obtained when standard deviation is small 249 * or standard deviation is small compared to the average interval. 250 * 251 * int_sqrt() formal parameter type is unsigned long. When the 252 * greatest difference to an outlier exceeds ~65 ms * sqrt(divisor) 253 * the resulting squared standard deviation exceeds the input domain 254 * of int_sqrt on platforms where unsigned long is 32 bits in size. 255 * In such case reject the candidate average. 256 * 257 * Use this result only if there is no timer to wake us up sooner. 258 */ 259 if (likely(stddev <= ULONG_MAX)) { 260 stddev = int_sqrt(stddev); 261 if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) 262 || stddev <= 20) { 263 if (data->next_timer_us > avg) 264 data->predicted_us = avg; 265 return; 266 } 267 } 268 269 /* 270 * If we have outliers to the upside in our distribution, discard 271 * those by setting the threshold to exclude these outliers, then 272 * calculate the average and standard deviation again. Once we get 273 * down to the bottom 3/4 of our samples, stop excluding samples. 274 * 275 * This can deal with workloads that have long pauses interspersed 276 * with sporadic activity with a bunch of short pauses. 277 */ 278 if ((divisor * 4) <= INTERVALS * 3) 279 return; 280 281 thresh = max - 1; 282 goto again; 283 } 284 285 /** 286 * menu_select - selects the next idle state to enter 287 * @drv: cpuidle driver containing state data 288 * @dev: the CPU 289 */ 290 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) 291 { 292 struct menu_device *data = &__get_cpu_var(menu_devices); 293 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); 294 int i; 295 unsigned int interactivity_req; 296 unsigned long nr_iowaiters, cpu_load; 297 298 if (data->needs_update) { 299 menu_update(drv, dev); 300 data->needs_update = 0; 301 } 302 303 data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1; 304 305 /* Special case when user has set very strict latency requirement */ 306 if (unlikely(latency_req == 0)) 307 return 0; 308 309 /* determine the expected residency time, round up */ 310 data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length()); 311 312 get_iowait_load(&nr_iowaiters, &cpu_load); 313 data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); 314 315 /* 316 * Force the result of multiplication to be 64 bits even if both 317 * operands are 32 bits. 318 * Make sure to round up for half microseconds. 319 */ 320 data->predicted_us = div_round64((uint64_t)data->next_timer_us * 321 data->correction_factor[data->bucket], 322 RESOLUTION * DECAY); 323 324 get_typical_interval(data); 325 326 /* 327 * Performance multiplier defines a minimum predicted idle 328 * duration / latency ratio. Adjust the latency limit if 329 * necessary. 330 */ 331 interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); 332 if (latency_req > interactivity_req) 333 latency_req = interactivity_req; 334 335 /* 336 * We want to default to C1 (hlt), not to busy polling 337 * unless the timer is happening really really soon. 338 */ 339 if (data->next_timer_us > 5 && 340 !drv->states[CPUIDLE_DRIVER_STATE_START].disabled && 341 dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0) 342 data->last_state_idx = CPUIDLE_DRIVER_STATE_START; 343 344 /* 345 * Find the idle state with the lowest power while satisfying 346 * our constraints. 347 */ 348 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 349 struct cpuidle_state *s = &drv->states[i]; 350 struct cpuidle_state_usage *su = &dev->states_usage[i]; 351 352 if (s->disabled || su->disable) 353 continue; 354 if (s->target_residency > data->predicted_us) 355 continue; 356 if (s->exit_latency > latency_req) 357 continue; 358 359 data->last_state_idx = i; 360 } 361 362 return data->last_state_idx; 363 } 364 365 /** 366 * menu_reflect - records that data structures need update 367 * @dev: the CPU 368 * @index: the index of actual entered state 369 * 370 * NOTE: it's important to be fast here because this operation will add to 371 * the overall exit latency. 372 */ 373 static void menu_reflect(struct cpuidle_device *dev, int index) 374 { 375 struct menu_device *data = &__get_cpu_var(menu_devices); 376 data->last_state_idx = index; 377 if (index >= 0) 378 data->needs_update = 1; 379 } 380 381 /** 382 * menu_update - attempts to guess what happened after entry 383 * @drv: cpuidle driver containing state data 384 * @dev: the CPU 385 */ 386 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) 387 { 388 struct menu_device *data = &__get_cpu_var(menu_devices); 389 int last_idx = data->last_state_idx; 390 struct cpuidle_state *target = &drv->states[last_idx]; 391 unsigned int measured_us; 392 unsigned int new_factor; 393 394 /* 395 * Try to figure out how much time passed between entry to low 396 * power state and occurrence of the wakeup event. 397 * 398 * If the entered idle state didn't support residency measurements, 399 * we are basically lost in the dark how much time passed. 400 * As a compromise, assume we slept for the whole expected time. 401 * 402 * Any measured amount of time will include the exit latency. 403 * Since we are interested in when the wakeup begun, not when it 404 * was completed, we must subtract the exit latency. However, if 405 * the measured amount of time is less than the exit latency, 406 * assume the state was never reached and the exit latency is 0. 407 */ 408 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) { 409 /* Use timer value as is */ 410 measured_us = data->next_timer_us; 411 412 } else { 413 /* Use measured value */ 414 measured_us = cpuidle_get_last_residency(dev); 415 416 /* Deduct exit latency */ 417 if (measured_us > target->exit_latency) 418 measured_us -= target->exit_latency; 419 420 /* Make sure our coefficients do not exceed unity */ 421 if (measured_us > data->next_timer_us) 422 measured_us = data->next_timer_us; 423 } 424 425 /* Update our correction ratio */ 426 new_factor = data->correction_factor[data->bucket]; 427 new_factor -= new_factor / DECAY; 428 429 if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING) 430 new_factor += RESOLUTION * measured_us / data->next_timer_us; 431 else 432 /* 433 * we were idle so long that we count it as a perfect 434 * prediction 435 */ 436 new_factor += RESOLUTION; 437 438 /* 439 * We don't want 0 as factor; we always want at least 440 * a tiny bit of estimated time. Fortunately, due to rounding, 441 * new_factor will stay nonzero regardless of measured_us values 442 * and the compiler can eliminate this test as long as DECAY > 1. 443 */ 444 if (DECAY == 1 && unlikely(new_factor == 0)) 445 new_factor = 1; 446 447 data->correction_factor[data->bucket] = new_factor; 448 449 /* update the repeating-pattern data */ 450 data->intervals[data->interval_ptr++] = measured_us; 451 if (data->interval_ptr >= INTERVALS) 452 data->interval_ptr = 0; 453 } 454 455 /** 456 * menu_enable_device - scans a CPU's states and does setup 457 * @drv: cpuidle driver 458 * @dev: the CPU 459 */ 460 static int menu_enable_device(struct cpuidle_driver *drv, 461 struct cpuidle_device *dev) 462 { 463 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 464 int i; 465 466 memset(data, 0, sizeof(struct menu_device)); 467 468 /* 469 * if the correction factor is 0 (eg first time init or cpu hotplug 470 * etc), we actually want to start out with a unity factor. 471 */ 472 for(i = 0; i < BUCKETS; i++) 473 data->correction_factor[i] = RESOLUTION * DECAY; 474 475 return 0; 476 } 477 478 static struct cpuidle_governor menu_governor = { 479 .name = "menu", 480 .rating = 20, 481 .enable = menu_enable_device, 482 .select = menu_select, 483 .reflect = menu_reflect, 484 .owner = THIS_MODULE, 485 }; 486 487 /** 488 * init_menu - initializes the governor 489 */ 490 static int __init init_menu(void) 491 { 492 return cpuidle_register_governor(&menu_governor); 493 } 494 495 postcore_initcall(init_menu); 496