1 /* 2 * drivers/cpufreq/cpufreq_ondemand.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 6 * Jun Nakajima <jun.nakajima@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/cpufreq.h> 17 #include <linux/cpu.h> 18 #include <linux/jiffies.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/mutex.h> 21 22 /* 23 * dbs is used in this file as a shortform for demandbased switching 24 * It helps to keep variable names smaller, simpler 25 */ 26 27 #define DEF_FREQUENCY_UP_THRESHOLD (80) 28 #define MIN_FREQUENCY_UP_THRESHOLD (11) 29 #define MAX_FREQUENCY_UP_THRESHOLD (100) 30 31 /* 32 * The polling frequency of this governor depends on the capability of 33 * the processor. Default polling frequency is 1000 times the transition 34 * latency of the processor. The governor will work on any processor with 35 * transition latency <= 10mS, using appropriate sampling 36 * rate. 37 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) 38 * this governor will not work. 39 * All times here are in uS. 40 */ 41 static unsigned int def_sampling_rate; 42 #define MIN_SAMPLING_RATE_RATIO (2) 43 /* for correct statistics, we need at least 10 ticks between each measure */ 44 #define MIN_STAT_SAMPLING_RATE \ 45 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 46 #define MIN_SAMPLING_RATE \ 47 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 48 #define MAX_SAMPLING_RATE (500 * def_sampling_rate) 49 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 50 #define TRANSITION_LATENCY_LIMIT (10 * 1000) 51 52 static void do_dbs_timer(struct work_struct *work); 53 54 /* Sampling types */ 55 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; 56 57 struct cpu_dbs_info_s { 58 cputime64_t prev_cpu_idle; 59 cputime64_t prev_cpu_wall; 60 struct cpufreq_policy *cur_policy; 61 struct delayed_work work; 62 struct cpufreq_frequency_table *freq_table; 63 unsigned int freq_lo; 64 unsigned int freq_lo_jiffies; 65 unsigned int freq_hi_jiffies; 66 int cpu; 67 unsigned int enable:1, 68 sample_type:1; 69 }; 70 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 71 72 static unsigned int dbs_enable; /* number of CPUs using this policy */ 73 74 /* 75 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 76 * lock and dbs_mutex. cpu_hotplug lock should always be held before 77 * dbs_mutex. If any function that can potentially take cpu_hotplug lock 78 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 79 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 80 * is recursive for the same process. -Venki 81 */ 82 static DEFINE_MUTEX(dbs_mutex); 83 84 static struct workqueue_struct *kondemand_wq; 85 86 static struct dbs_tuners { 87 unsigned int sampling_rate; 88 unsigned int up_threshold; 89 unsigned int ignore_nice; 90 unsigned int powersave_bias; 91 } dbs_tuners_ins = { 92 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 93 .ignore_nice = 0, 94 .powersave_bias = 0, 95 }; 96 97 static inline cputime64_t get_cpu_idle_time(unsigned int cpu) 98 { 99 cputime64_t retval; 100 101 retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, 102 kstat_cpu(cpu).cpustat.iowait); 103 104 if (dbs_tuners_ins.ignore_nice) 105 retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); 106 107 return retval; 108 } 109 110 /* 111 * Find right freq to be set now with powersave_bias on. 112 * Returns the freq_hi to be used right now and will set freq_hi_jiffies, 113 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. 114 */ 115 static unsigned int powersave_bias_target(struct cpufreq_policy *policy, 116 unsigned int freq_next, 117 unsigned int relation) 118 { 119 unsigned int freq_req, freq_reduc, freq_avg; 120 unsigned int freq_hi, freq_lo; 121 unsigned int index = 0; 122 unsigned int jiffies_total, jiffies_hi, jiffies_lo; 123 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); 124 125 if (!dbs_info->freq_table) { 126 dbs_info->freq_lo = 0; 127 dbs_info->freq_lo_jiffies = 0; 128 return freq_next; 129 } 130 131 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, 132 relation, &index); 133 freq_req = dbs_info->freq_table[index].frequency; 134 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; 135 freq_avg = freq_req - freq_reduc; 136 137 /* Find freq bounds for freq_avg in freq_table */ 138 index = 0; 139 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, 140 CPUFREQ_RELATION_H, &index); 141 freq_lo = dbs_info->freq_table[index].frequency; 142 index = 0; 143 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, 144 CPUFREQ_RELATION_L, &index); 145 freq_hi = dbs_info->freq_table[index].frequency; 146 147 /* Find out how long we have to be in hi and lo freqs */ 148 if (freq_hi == freq_lo) { 149 dbs_info->freq_lo = 0; 150 dbs_info->freq_lo_jiffies = 0; 151 return freq_lo; 152 } 153 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 154 jiffies_hi = (freq_avg - freq_lo) * jiffies_total; 155 jiffies_hi += ((freq_hi - freq_lo) / 2); 156 jiffies_hi /= (freq_hi - freq_lo); 157 jiffies_lo = jiffies_total - jiffies_hi; 158 dbs_info->freq_lo = freq_lo; 159 dbs_info->freq_lo_jiffies = jiffies_lo; 160 dbs_info->freq_hi_jiffies = jiffies_hi; 161 return freq_hi; 162 } 163 164 static void ondemand_powersave_bias_init(void) 165 { 166 int i; 167 for_each_online_cpu(i) { 168 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); 169 dbs_info->freq_table = cpufreq_frequency_get_table(i); 170 dbs_info->freq_lo = 0; 171 } 172 } 173 174 /************************** sysfs interface ************************/ 175 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 176 { 177 return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); 178 } 179 180 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 181 { 182 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); 183 } 184 185 #define define_one_ro(_name) \ 186 static struct freq_attr _name = \ 187 __ATTR(_name, 0444, show_##_name, NULL) 188 189 define_one_ro(sampling_rate_max); 190 define_one_ro(sampling_rate_min); 191 192 /* cpufreq_ondemand Governor Tunables */ 193 #define show_one(file_name, object) \ 194 static ssize_t show_##file_name \ 195 (struct cpufreq_policy *unused, char *buf) \ 196 { \ 197 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 198 } 199 show_one(sampling_rate, sampling_rate); 200 show_one(up_threshold, up_threshold); 201 show_one(ignore_nice_load, ignore_nice); 202 show_one(powersave_bias, powersave_bias); 203 204 static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 205 const char *buf, size_t count) 206 { 207 unsigned int input; 208 int ret; 209 ret = sscanf(buf, "%u", &input); 210 211 mutex_lock(&dbs_mutex); 212 if (ret != 1 || input > MAX_SAMPLING_RATE 213 || input < MIN_SAMPLING_RATE) { 214 mutex_unlock(&dbs_mutex); 215 return -EINVAL; 216 } 217 218 dbs_tuners_ins.sampling_rate = input; 219 mutex_unlock(&dbs_mutex); 220 221 return count; 222 } 223 224 static ssize_t store_up_threshold(struct cpufreq_policy *unused, 225 const char *buf, size_t count) 226 { 227 unsigned int input; 228 int ret; 229 ret = sscanf(buf, "%u", &input); 230 231 mutex_lock(&dbs_mutex); 232 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 233 input < MIN_FREQUENCY_UP_THRESHOLD) { 234 mutex_unlock(&dbs_mutex); 235 return -EINVAL; 236 } 237 238 dbs_tuners_ins.up_threshold = input; 239 mutex_unlock(&dbs_mutex); 240 241 return count; 242 } 243 244 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, 245 const char *buf, size_t count) 246 { 247 unsigned int input; 248 int ret; 249 250 unsigned int j; 251 252 ret = sscanf(buf, "%u", &input); 253 if ( ret != 1 ) 254 return -EINVAL; 255 256 if ( input > 1 ) 257 input = 1; 258 259 mutex_lock(&dbs_mutex); 260 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 261 mutex_unlock(&dbs_mutex); 262 return count; 263 } 264 dbs_tuners_ins.ignore_nice = input; 265 266 /* we need to re-evaluate prev_cpu_idle */ 267 for_each_online_cpu(j) { 268 struct cpu_dbs_info_s *dbs_info; 269 dbs_info = &per_cpu(cpu_dbs_info, j); 270 dbs_info->prev_cpu_idle = get_cpu_idle_time(j); 271 dbs_info->prev_cpu_wall = get_jiffies_64(); 272 } 273 mutex_unlock(&dbs_mutex); 274 275 return count; 276 } 277 278 static ssize_t store_powersave_bias(struct cpufreq_policy *unused, 279 const char *buf, size_t count) 280 { 281 unsigned int input; 282 int ret; 283 ret = sscanf(buf, "%u", &input); 284 285 if (ret != 1) 286 return -EINVAL; 287 288 if (input > 1000) 289 input = 1000; 290 291 mutex_lock(&dbs_mutex); 292 dbs_tuners_ins.powersave_bias = input; 293 ondemand_powersave_bias_init(); 294 mutex_unlock(&dbs_mutex); 295 296 return count; 297 } 298 299 #define define_one_rw(_name) \ 300 static struct freq_attr _name = \ 301 __ATTR(_name, 0644, show_##_name, store_##_name) 302 303 define_one_rw(sampling_rate); 304 define_one_rw(up_threshold); 305 define_one_rw(ignore_nice_load); 306 define_one_rw(powersave_bias); 307 308 static struct attribute * dbs_attributes[] = { 309 &sampling_rate_max.attr, 310 &sampling_rate_min.attr, 311 &sampling_rate.attr, 312 &up_threshold.attr, 313 &ignore_nice_load.attr, 314 &powersave_bias.attr, 315 NULL 316 }; 317 318 static struct attribute_group dbs_attr_group = { 319 .attrs = dbs_attributes, 320 .name = "ondemand", 321 }; 322 323 /************************** sysfs end ************************/ 324 325 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 326 { 327 unsigned int idle_ticks, total_ticks; 328 unsigned int load; 329 cputime64_t cur_jiffies; 330 331 struct cpufreq_policy *policy; 332 unsigned int j; 333 334 if (!this_dbs_info->enable) 335 return; 336 337 this_dbs_info->freq_lo = 0; 338 policy = this_dbs_info->cur_policy; 339 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); 340 total_ticks = (unsigned int) cputime64_sub(cur_jiffies, 341 this_dbs_info->prev_cpu_wall); 342 this_dbs_info->prev_cpu_wall = cur_jiffies; 343 if (!total_ticks) 344 return; 345 /* 346 * Every sampling_rate, we check, if current idle time is less 347 * than 20% (default), then we try to increase frequency 348 * Every sampling_rate, we look for a the lowest 349 * frequency which can sustain the load while keeping idle time over 350 * 30%. If such a frequency exist, we try to decrease to this frequency. 351 * 352 * Any frequency increase takes it to the maximum frequency. 353 * Frequency reduction happens at minimum steps of 354 * 5% (default) of current frequency 355 */ 356 357 /* Get Idle Time */ 358 idle_ticks = UINT_MAX; 359 for_each_cpu_mask(j, policy->cpus) { 360 cputime64_t total_idle_ticks; 361 unsigned int tmp_idle_ticks; 362 struct cpu_dbs_info_s *j_dbs_info; 363 364 j_dbs_info = &per_cpu(cpu_dbs_info, j); 365 total_idle_ticks = get_cpu_idle_time(j); 366 tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, 367 j_dbs_info->prev_cpu_idle); 368 j_dbs_info->prev_cpu_idle = total_idle_ticks; 369 370 if (tmp_idle_ticks < idle_ticks) 371 idle_ticks = tmp_idle_ticks; 372 } 373 load = (100 * (total_ticks - idle_ticks)) / total_ticks; 374 375 /* Check for frequency increase */ 376 if (load > dbs_tuners_ins.up_threshold) { 377 /* if we are already at full speed then break out early */ 378 if (!dbs_tuners_ins.powersave_bias) { 379 if (policy->cur == policy->max) 380 return; 381 382 __cpufreq_driver_target(policy, policy->max, 383 CPUFREQ_RELATION_H); 384 } else { 385 int freq = powersave_bias_target(policy, policy->max, 386 CPUFREQ_RELATION_H); 387 __cpufreq_driver_target(policy, freq, 388 CPUFREQ_RELATION_L); 389 } 390 return; 391 } 392 393 /* Check for frequency decrease */ 394 /* if we cannot reduce the frequency anymore, break out early */ 395 if (policy->cur == policy->min) 396 return; 397 398 /* 399 * The optimal frequency is the frequency that is the lowest that 400 * can support the current CPU usage without triggering the up 401 * policy. To be safe, we focus 10 points under the threshold. 402 */ 403 if (load < (dbs_tuners_ins.up_threshold - 10)) { 404 unsigned int freq_next, freq_cur; 405 406 freq_cur = __cpufreq_driver_getavg(policy); 407 if (!freq_cur) 408 freq_cur = policy->cur; 409 410 freq_next = (freq_cur * load) / 411 (dbs_tuners_ins.up_threshold - 10); 412 413 if (!dbs_tuners_ins.powersave_bias) { 414 __cpufreq_driver_target(policy, freq_next, 415 CPUFREQ_RELATION_L); 416 } else { 417 int freq = powersave_bias_target(policy, freq_next, 418 CPUFREQ_RELATION_L); 419 __cpufreq_driver_target(policy, freq, 420 CPUFREQ_RELATION_L); 421 } 422 } 423 } 424 425 static void do_dbs_timer(struct work_struct *work) 426 { 427 struct cpu_dbs_info_s *dbs_info = 428 container_of(work, struct cpu_dbs_info_s, work.work); 429 unsigned int cpu = dbs_info->cpu; 430 int sample_type = dbs_info->sample_type; 431 432 /* We want all CPUs to do sampling nearly on same jiffy */ 433 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 434 435 delay -= jiffies % delay; 436 437 if (lock_policy_rwsem_write(cpu) < 0) 438 return; 439 440 if (!dbs_info->enable) { 441 unlock_policy_rwsem_write(cpu); 442 return; 443 } 444 445 /* Common NORMAL_SAMPLE setup */ 446 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 447 if (!dbs_tuners_ins.powersave_bias || 448 sample_type == DBS_NORMAL_SAMPLE) { 449 dbs_check_cpu(dbs_info); 450 if (dbs_info->freq_lo) { 451 /* Setup timer for SUB_SAMPLE */ 452 dbs_info->sample_type = DBS_SUB_SAMPLE; 453 delay = dbs_info->freq_hi_jiffies; 454 } 455 } else { 456 __cpufreq_driver_target(dbs_info->cur_policy, 457 dbs_info->freq_lo, 458 CPUFREQ_RELATION_H); 459 } 460 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 461 unlock_policy_rwsem_write(cpu); 462 } 463 464 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) 465 { 466 /* We want all CPUs to do sampling nearly on same jiffy */ 467 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 468 delay -= jiffies % delay; 469 470 dbs_info->enable = 1; 471 ondemand_powersave_bias_init(); 472 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 473 INIT_DELAYED_WORK(&dbs_info->work, do_dbs_timer); 474 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, 475 delay); 476 } 477 478 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 479 { 480 dbs_info->enable = 0; 481 cancel_delayed_work(&dbs_info->work); 482 } 483 484 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 485 unsigned int event) 486 { 487 unsigned int cpu = policy->cpu; 488 struct cpu_dbs_info_s *this_dbs_info; 489 unsigned int j; 490 int rc; 491 492 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 493 494 switch (event) { 495 case CPUFREQ_GOV_START: 496 if ((!cpu_online(cpu)) || (!policy->cur)) 497 return -EINVAL; 498 499 if (policy->cpuinfo.transition_latency > 500 (TRANSITION_LATENCY_LIMIT * 1000)) { 501 printk(KERN_WARNING "ondemand governor failed to load " 502 "due to too long transition latency\n"); 503 return -EINVAL; 504 } 505 if (this_dbs_info->enable) /* Already enabled */ 506 break; 507 508 mutex_lock(&dbs_mutex); 509 dbs_enable++; 510 511 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 512 if (rc) { 513 dbs_enable--; 514 mutex_unlock(&dbs_mutex); 515 return rc; 516 } 517 518 for_each_cpu_mask(j, policy->cpus) { 519 struct cpu_dbs_info_s *j_dbs_info; 520 j_dbs_info = &per_cpu(cpu_dbs_info, j); 521 j_dbs_info->cur_policy = policy; 522 523 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); 524 j_dbs_info->prev_cpu_wall = get_jiffies_64(); 525 } 526 this_dbs_info->cpu = cpu; 527 /* 528 * Start the timerschedule work, when this governor 529 * is used for first time 530 */ 531 if (dbs_enable == 1) { 532 unsigned int latency; 533 /* policy latency is in nS. Convert it to uS first */ 534 latency = policy->cpuinfo.transition_latency / 1000; 535 if (latency == 0) 536 latency = 1; 537 538 def_sampling_rate = latency * 539 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 540 541 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) 542 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 543 544 dbs_tuners_ins.sampling_rate = def_sampling_rate; 545 } 546 dbs_timer_init(this_dbs_info); 547 548 mutex_unlock(&dbs_mutex); 549 break; 550 551 case CPUFREQ_GOV_STOP: 552 mutex_lock(&dbs_mutex); 553 dbs_timer_exit(this_dbs_info); 554 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 555 dbs_enable--; 556 mutex_unlock(&dbs_mutex); 557 558 break; 559 560 case CPUFREQ_GOV_LIMITS: 561 mutex_lock(&dbs_mutex); 562 if (policy->max < this_dbs_info->cur_policy->cur) 563 __cpufreq_driver_target(this_dbs_info->cur_policy, 564 policy->max, 565 CPUFREQ_RELATION_H); 566 else if (policy->min > this_dbs_info->cur_policy->cur) 567 __cpufreq_driver_target(this_dbs_info->cur_policy, 568 policy->min, 569 CPUFREQ_RELATION_L); 570 mutex_unlock(&dbs_mutex); 571 break; 572 } 573 return 0; 574 } 575 576 static struct cpufreq_governor cpufreq_gov_dbs = { 577 .name = "ondemand", 578 .governor = cpufreq_governor_dbs, 579 .owner = THIS_MODULE, 580 }; 581 582 static int __init cpufreq_gov_dbs_init(void) 583 { 584 kondemand_wq = create_workqueue("kondemand"); 585 if (!kondemand_wq) { 586 printk(KERN_ERR "Creation of kondemand failed\n"); 587 return -EFAULT; 588 } 589 return cpufreq_register_governor(&cpufreq_gov_dbs); 590 } 591 592 static void __exit cpufreq_gov_dbs_exit(void) 593 { 594 cpufreq_unregister_governor(&cpufreq_gov_dbs); 595 destroy_workqueue(kondemand_wq); 596 } 597 598 599 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); 600 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); 601 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " 602 "Low Latency Frequency Transition capable processors"); 603 MODULE_LICENSE("GPL"); 604 605 module_init(cpufreq_gov_dbs_init); 606 module_exit(cpufreq_gov_dbs_exit); 607 608