1 /* 2 * menu.c - the menu idle governor 3 * 4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> 5 * Copyright (C) 2009 Intel Corporation 6 * Author: 7 * Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This code is licenced under the GPL version 2 as described 10 * in the COPYING file that acompanies the Linux Kernel. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/cpuidle.h> 15 #include <linux/pm_qos_params.h> 16 #include <linux/time.h> 17 #include <linux/ktime.h> 18 #include <linux/hrtimer.h> 19 #include <linux/tick.h> 20 #include <linux/sched.h> 21 #include <linux/math64.h> 22 23 #define BUCKETS 12 24 #define RESOLUTION 1024 25 #define DECAY 4 26 #define MAX_INTERESTING 50000 27 28 /* 29 * Concepts and ideas behind the menu governor 30 * 31 * For the menu governor, there are 3 decision factors for picking a C 32 * state: 33 * 1) Energy break even point 34 * 2) Performance impact 35 * 3) Latency tolerance (from pmqos infrastructure) 36 * These these three factors are treated independently. 37 * 38 * Energy break even point 39 * ----------------------- 40 * C state entry and exit have an energy cost, and a certain amount of time in 41 * the C state is required to actually break even on this cost. CPUIDLE 42 * provides us this duration in the "target_residency" field. So all that we 43 * need is a good prediction of how long we'll be idle. Like the traditional 44 * menu governor, we start with the actual known "next timer event" time. 45 * 46 * Since there are other source of wakeups (interrupts for example) than 47 * the next timer event, this estimation is rather optimistic. To get a 48 * more realistic estimate, a correction factor is applied to the estimate, 49 * that is based on historic behavior. For example, if in the past the actual 50 * duration always was 50% of the next timer tick, the correction factor will 51 * be 0.5. 52 * 53 * menu uses a running average for this correction factor, however it uses a 54 * set of factors, not just a single factor. This stems from the realization 55 * that the ratio is dependent on the order of magnitude of the expected 56 * duration; if we expect 500 milliseconds of idle time the likelihood of 57 * getting an interrupt very early is much higher than if we expect 50 micro 58 * seconds of idle time. A second independent factor that has big impact on 59 * the actual factor is if there is (disk) IO outstanding or not. 60 * (as a special twist, we consider every sleep longer than 50 milliseconds 61 * as perfect; there are no power gains for sleeping longer than this) 62 * 63 * For these two reasons we keep an array of 12 independent factors, that gets 64 * indexed based on the magnitude of the expected duration as well as the 65 * "is IO outstanding" property. 66 * 67 * Limiting Performance Impact 68 * --------------------------- 69 * C states, especially those with large exit latencies, can have a real 70 * noticable impact on workloads, which is not acceptable for most sysadmins, 71 * and in addition, less performance has a power price of its own. 72 * 73 * As a general rule of thumb, menu assumes that the following heuristic 74 * holds: 75 * The busier the system, the less impact of C states is acceptable 76 * 77 * This rule-of-thumb is implemented using a performance-multiplier: 78 * If the exit latency times the performance multiplier is longer than 79 * the predicted duration, the C state is not considered a candidate 80 * for selection due to a too high performance impact. So the higher 81 * this multiplier is, the longer we need to be idle to pick a deep C 82 * state, and thus the less likely a busy CPU will hit such a deep 83 * C state. 84 * 85 * Two factors are used in determing this multiplier: 86 * a value of 10 is added for each point of "per cpu load average" we have. 87 * a value of 5 points is added for each process that is waiting for 88 * IO on this CPU. 89 * (these values are experimentally determined) 90 * 91 * The load average factor gives a longer term (few seconds) input to the 92 * decision, while the iowait value gives a cpu local instantanious input. 93 * The iowait factor may look low, but realize that this is also already 94 * represented in the system load average. 95 * 96 */ 97 98 struct menu_device { 99 int last_state_idx; 100 int needs_update; 101 102 unsigned int expected_us; 103 u64 predicted_us; 104 unsigned int measured_us; 105 unsigned int exit_us; 106 unsigned int bucket; 107 u64 correction_factor[BUCKETS]; 108 }; 109 110 111 #define LOAD_INT(x) ((x) >> FSHIFT) 112 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 113 114 static int get_loadavg(void) 115 { 116 unsigned long this = this_cpu_load(); 117 118 119 return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10; 120 } 121 122 static inline int which_bucket(unsigned int duration) 123 { 124 int bucket = 0; 125 126 /* 127 * We keep two groups of stats; one with no 128 * IO pending, one without. 129 * This allows us to calculate 130 * E(duration)|iowait 131 */ 132 if (nr_iowait_cpu()) 133 bucket = BUCKETS/2; 134 135 if (duration < 10) 136 return bucket; 137 if (duration < 100) 138 return bucket + 1; 139 if (duration < 1000) 140 return bucket + 2; 141 if (duration < 10000) 142 return bucket + 3; 143 if (duration < 100000) 144 return bucket + 4; 145 return bucket + 5; 146 } 147 148 /* 149 * Return a multiplier for the exit latency that is intended 150 * to take performance requirements into account. 151 * The more performance critical we estimate the system 152 * to be, the higher this multiplier, and thus the higher 153 * the barrier to go to an expensive C state. 154 */ 155 static inline int performance_multiplier(void) 156 { 157 int mult = 1; 158 159 /* for higher loadavg, we are more reluctant */ 160 161 mult += 2 * get_loadavg(); 162 163 /* for IO wait tasks (per cpu!) we add 5x each */ 164 mult += 10 * nr_iowait_cpu(); 165 166 return mult; 167 } 168 169 static DEFINE_PER_CPU(struct menu_device, menu_devices); 170 171 static void menu_update(struct cpuidle_device *dev); 172 173 /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ 174 static u64 div_round64(u64 dividend, u32 divisor) 175 { 176 return div_u64(dividend + (divisor / 2), divisor); 177 } 178 179 /** 180 * menu_select - selects the next idle state to enter 181 * @dev: the CPU 182 */ 183 static int menu_select(struct cpuidle_device *dev) 184 { 185 struct menu_device *data = &__get_cpu_var(menu_devices); 186 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); 187 int i; 188 int multiplier; 189 190 data->last_state_idx = 0; 191 data->exit_us = 0; 192 193 if (data->needs_update) { 194 menu_update(dev); 195 data->needs_update = 0; 196 } 197 198 /* Special case when user has set very strict latency requirement */ 199 if (unlikely(latency_req == 0)) 200 return 0; 201 202 /* determine the expected residency time, round up */ 203 data->expected_us = 204 DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000); 205 206 207 data->bucket = which_bucket(data->expected_us); 208 209 multiplier = performance_multiplier(); 210 211 /* 212 * if the correction factor is 0 (eg first time init or cpu hotplug 213 * etc), we actually want to start out with a unity factor. 214 */ 215 if (data->correction_factor[data->bucket] == 0) 216 data->correction_factor[data->bucket] = RESOLUTION * DECAY; 217 218 /* Make sure to round up for half microseconds */ 219 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], 220 RESOLUTION * DECAY); 221 222 /* 223 * We want to default to C1 (hlt), not to busy polling 224 * unless the timer is happening really really soon. 225 */ 226 if (data->expected_us > 5) 227 data->last_state_idx = CPUIDLE_DRIVER_STATE_START; 228 229 230 /* find the deepest idle state that satisfies our constraints */ 231 for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { 232 struct cpuidle_state *s = &dev->states[i]; 233 234 if (s->target_residency > data->predicted_us) 235 break; 236 if (s->exit_latency > latency_req) 237 break; 238 if (s->exit_latency * multiplier > data->predicted_us) 239 break; 240 data->exit_us = s->exit_latency; 241 data->last_state_idx = i; 242 } 243 244 return data->last_state_idx; 245 } 246 247 /** 248 * menu_reflect - records that data structures need update 249 * @dev: the CPU 250 * 251 * NOTE: it's important to be fast here because this operation will add to 252 * the overall exit latency. 253 */ 254 static void menu_reflect(struct cpuidle_device *dev) 255 { 256 struct menu_device *data = &__get_cpu_var(menu_devices); 257 data->needs_update = 1; 258 } 259 260 /** 261 * menu_update - attempts to guess what happened after entry 262 * @dev: the CPU 263 */ 264 static void menu_update(struct cpuidle_device *dev) 265 { 266 struct menu_device *data = &__get_cpu_var(menu_devices); 267 int last_idx = data->last_state_idx; 268 unsigned int last_idle_us = cpuidle_get_last_residency(dev); 269 struct cpuidle_state *target = &dev->states[last_idx]; 270 unsigned int measured_us; 271 u64 new_factor; 272 273 /* 274 * Ugh, this idle state doesn't support residency measurements, so we 275 * are basically lost in the dark. As a compromise, assume we slept 276 * for the whole expected time. 277 */ 278 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) 279 last_idle_us = data->expected_us; 280 281 282 measured_us = last_idle_us; 283 284 /* 285 * We correct for the exit latency; we are assuming here that the 286 * exit latency happens after the event that we're interested in. 287 */ 288 if (measured_us > data->exit_us) 289 measured_us -= data->exit_us; 290 291 292 /* update our correction ratio */ 293 294 new_factor = data->correction_factor[data->bucket] 295 * (DECAY - 1) / DECAY; 296 297 if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING) 298 new_factor += RESOLUTION * measured_us / data->expected_us; 299 else 300 /* 301 * we were idle so long that we count it as a perfect 302 * prediction 303 */ 304 new_factor += RESOLUTION; 305 306 /* 307 * We don't want 0 as factor; we always want at least 308 * a tiny bit of estimated time. 309 */ 310 if (new_factor == 0) 311 new_factor = 1; 312 313 data->correction_factor[data->bucket] = new_factor; 314 } 315 316 /** 317 * menu_enable_device - scans a CPU's states and does setup 318 * @dev: the CPU 319 */ 320 static int menu_enable_device(struct cpuidle_device *dev) 321 { 322 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 323 324 memset(data, 0, sizeof(struct menu_device)); 325 326 return 0; 327 } 328 329 static struct cpuidle_governor menu_governor = { 330 .name = "menu", 331 .rating = 20, 332 .enable = menu_enable_device, 333 .select = menu_select, 334 .reflect = menu_reflect, 335 .owner = THIS_MODULE, 336 }; 337 338 /** 339 * init_menu - initializes the governor 340 */ 341 static int __init init_menu(void) 342 { 343 return cpuidle_register_governor(&menu_governor); 344 } 345 346 /** 347 * exit_menu - exits the governor 348 */ 349 static void __exit exit_menu(void) 350 { 351 cpuidle_unregister_governor(&menu_governor); 352 } 353 354 MODULE_LICENSE("GPL"); 355 module_init(init_menu); 356 module_exit(exit_menu); 357