1 /* 2 * drivers/cpufreq/cpufreq_conservative.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 6 * Jun Nakajima <jun.nakajima@intel.com> 7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/smp.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/ctype.h> 20 #include <linux/cpufreq.h> 21 #include <linux/sysctl.h> 22 #include <linux/types.h> 23 #include <linux/fs.h> 24 #include <linux/sysfs.h> 25 #include <linux/cpu.h> 26 #include <linux/kmod.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/kernel_stat.h> 30 #include <linux/percpu.h> 31 #include <linux/mutex.h> 32 /* 33 * dbs is used in this file as a shortform for demandbased switching 34 * It helps to keep variable names smaller, simpler 35 */ 36 37 #define DEF_FREQUENCY_UP_THRESHOLD (80) 38 #define DEF_FREQUENCY_DOWN_THRESHOLD (20) 39 40 /* 41 * The polling frequency of this governor depends on the capability of 42 * the processor. Default polling frequency is 1000 times the transition 43 * latency of the processor. The governor will work on any processor with 44 * transition latency <= 10mS, using appropriate sampling 45 * rate. 46 * For CPUs with transition latency > 10mS (mostly drivers 47 * with CPUFREQ_ETERNAL), this governor will not work. 48 * All times here are in uS. 49 */ 50 static unsigned int def_sampling_rate; 51 #define MIN_SAMPLING_RATE_RATIO (2) 52 /* for correct statistics, we need at least 10 ticks between each measure */ 53 #define MIN_STAT_SAMPLING_RATE \ 54 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 55 #define MIN_SAMPLING_RATE \ 56 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 57 /* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon 58 * Define the minimal settable sampling rate to the greater of: 59 * - "HW transition latency" * 100 (same as default sampling / 10) 60 * - MIN_STAT_SAMPLING_RATE 61 * To avoid that userspace shoots itself. 62 */ 63 static unsigned int minimum_sampling_rate(void) 64 { 65 return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE); 66 } 67 68 /* This will also vanish soon with removing sampling_rate_max */ 69 #define MAX_SAMPLING_RATE (500 * def_sampling_rate) 70 #define LATENCY_MULTIPLIER (1000) 71 #define DEF_SAMPLING_DOWN_FACTOR (1) 72 #define MAX_SAMPLING_DOWN_FACTOR (10) 73 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 74 75 static void do_dbs_timer(struct work_struct *work); 76 77 struct cpu_dbs_info_s { 78 struct cpufreq_policy *cur_policy; 79 unsigned int prev_cpu_idle_up; 80 unsigned int prev_cpu_idle_down; 81 unsigned int enable; 82 unsigned int down_skip; 83 unsigned int requested_freq; 84 }; 85 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 86 87 static unsigned int dbs_enable; /* number of CPUs using this policy */ 88 89 /* 90 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 91 * lock and dbs_mutex. cpu_hotplug lock should always be held before 92 * dbs_mutex. If any function that can potentially take cpu_hotplug lock 93 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 94 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 95 * is recursive for the same process. -Venki 96 */ 97 static DEFINE_MUTEX(dbs_mutex); 98 static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); 99 100 struct dbs_tuners { 101 unsigned int sampling_rate; 102 unsigned int sampling_down_factor; 103 unsigned int up_threshold; 104 unsigned int down_threshold; 105 unsigned int ignore_nice; 106 unsigned int freq_step; 107 }; 108 109 static struct dbs_tuners dbs_tuners_ins = { 110 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 111 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 112 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 113 .ignore_nice = 0, 114 .freq_step = 5, 115 }; 116 117 static inline unsigned int get_cpu_idle_time(unsigned int cpu) 118 { 119 unsigned int add_nice = 0, ret; 120 121 if (dbs_tuners_ins.ignore_nice) 122 add_nice = kstat_cpu(cpu).cpustat.nice; 123 124 ret = kstat_cpu(cpu).cpustat.idle + 125 kstat_cpu(cpu).cpustat.iowait + 126 add_nice; 127 128 return ret; 129 } 130 131 /* keep track of frequency transitions */ 132 static int 133 dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 134 void *data) 135 { 136 struct cpufreq_freqs *freq = data; 137 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, 138 freq->cpu); 139 140 struct cpufreq_policy *policy; 141 142 if (!this_dbs_info->enable) 143 return 0; 144 145 policy = this_dbs_info->cur_policy; 146 147 /* 148 * we only care if our internally tracked freq moves outside 149 * the 'valid' ranges of freqency available to us otherwise 150 * we do not change it 151 */ 152 if (this_dbs_info->requested_freq > policy->max 153 || this_dbs_info->requested_freq < policy->min) 154 this_dbs_info->requested_freq = freq->new; 155 156 return 0; 157 } 158 159 static struct notifier_block dbs_cpufreq_notifier_block = { 160 .notifier_call = dbs_cpufreq_notifier 161 }; 162 163 /************************** sysfs interface ************************/ 164 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 165 { 166 static int print_once; 167 168 if (!print_once) { 169 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max " 170 "sysfs file is deprecated - used by: %s\n", 171 current->comm); 172 print_once = 1; 173 } 174 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE); 175 } 176 177 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 178 { 179 static int print_once; 180 181 if (!print_once) { 182 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max " 183 "sysfs file is deprecated - used by: %s\n", current->comm); 184 print_once = 1; 185 } 186 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE); 187 } 188 189 #define define_one_ro(_name) \ 190 static struct freq_attr _name = \ 191 __ATTR(_name, 0444, show_##_name, NULL) 192 193 define_one_ro(sampling_rate_max); 194 define_one_ro(sampling_rate_min); 195 196 /* cpufreq_conservative Governor Tunables */ 197 #define show_one(file_name, object) \ 198 static ssize_t show_##file_name \ 199 (struct cpufreq_policy *unused, char *buf) \ 200 { \ 201 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 202 } 203 show_one(sampling_rate, sampling_rate); 204 show_one(sampling_down_factor, sampling_down_factor); 205 show_one(up_threshold, up_threshold); 206 show_one(down_threshold, down_threshold); 207 show_one(ignore_nice_load, ignore_nice); 208 show_one(freq_step, freq_step); 209 210 static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 211 const char *buf, size_t count) 212 { 213 unsigned int input; 214 int ret; 215 ret = sscanf(buf, "%u", &input); 216 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 217 return -EINVAL; 218 219 mutex_lock(&dbs_mutex); 220 dbs_tuners_ins.sampling_down_factor = input; 221 mutex_unlock(&dbs_mutex); 222 223 return count; 224 } 225 226 static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 227 const char *buf, size_t count) 228 { 229 unsigned int input; 230 int ret; 231 ret = sscanf(buf, "%u", &input); 232 233 mutex_lock(&dbs_mutex); 234 if (ret != 1) { 235 mutex_unlock(&dbs_mutex); 236 return -EINVAL; 237 } 238 dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate()); 239 mutex_unlock(&dbs_mutex); 240 241 return count; 242 } 243 244 static ssize_t store_up_threshold(struct cpufreq_policy *unused, 245 const char *buf, size_t count) 246 { 247 unsigned int input; 248 int ret; 249 ret = sscanf(buf, "%u", &input); 250 251 mutex_lock(&dbs_mutex); 252 if (ret != 1 || input > 100 || 253 input <= dbs_tuners_ins.down_threshold) { 254 mutex_unlock(&dbs_mutex); 255 return -EINVAL; 256 } 257 258 dbs_tuners_ins.up_threshold = input; 259 mutex_unlock(&dbs_mutex); 260 261 return count; 262 } 263 264 static ssize_t store_down_threshold(struct cpufreq_policy *unused, 265 const char *buf, size_t count) 266 { 267 unsigned int input; 268 int ret; 269 ret = sscanf(buf, "%u", &input); 270 271 mutex_lock(&dbs_mutex); 272 if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { 273 mutex_unlock(&dbs_mutex); 274 return -EINVAL; 275 } 276 277 dbs_tuners_ins.down_threshold = input; 278 mutex_unlock(&dbs_mutex); 279 280 return count; 281 } 282 283 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, 284 const char *buf, size_t count) 285 { 286 unsigned int input; 287 int ret; 288 289 unsigned int j; 290 291 ret = sscanf(buf, "%u", &input); 292 if (ret != 1) 293 return -EINVAL; 294 295 if (input > 1) 296 input = 1; 297 298 mutex_lock(&dbs_mutex); 299 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ 300 mutex_unlock(&dbs_mutex); 301 return count; 302 } 303 dbs_tuners_ins.ignore_nice = input; 304 305 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ 306 for_each_online_cpu(j) { 307 struct cpu_dbs_info_s *j_dbs_info; 308 j_dbs_info = &per_cpu(cpu_dbs_info, j); 309 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 310 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 311 } 312 mutex_unlock(&dbs_mutex); 313 314 return count; 315 } 316 317 static ssize_t store_freq_step(struct cpufreq_policy *policy, 318 const char *buf, size_t count) 319 { 320 unsigned int input; 321 int ret; 322 323 ret = sscanf(buf, "%u", &input); 324 325 if (ret != 1) 326 return -EINVAL; 327 328 if (input > 100) 329 input = 100; 330 331 /* no need to test here if freq_step is zero as the user might actually 332 * want this, they would be crazy though :) */ 333 mutex_lock(&dbs_mutex); 334 dbs_tuners_ins.freq_step = input; 335 mutex_unlock(&dbs_mutex); 336 337 return count; 338 } 339 340 #define define_one_rw(_name) \ 341 static struct freq_attr _name = \ 342 __ATTR(_name, 0644, show_##_name, store_##_name) 343 344 define_one_rw(sampling_rate); 345 define_one_rw(sampling_down_factor); 346 define_one_rw(up_threshold); 347 define_one_rw(down_threshold); 348 define_one_rw(ignore_nice_load); 349 define_one_rw(freq_step); 350 351 static struct attribute *dbs_attributes[] = { 352 &sampling_rate_max.attr, 353 &sampling_rate_min.attr, 354 &sampling_rate.attr, 355 &sampling_down_factor.attr, 356 &up_threshold.attr, 357 &down_threshold.attr, 358 &ignore_nice_load.attr, 359 &freq_step.attr, 360 NULL 361 }; 362 363 static struct attribute_group dbs_attr_group = { 364 .attrs = dbs_attributes, 365 .name = "conservative", 366 }; 367 368 /************************** sysfs end ************************/ 369 370 static void dbs_check_cpu(int cpu) 371 { 372 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 373 unsigned int tmp_idle_ticks, total_idle_ticks; 374 unsigned int freq_target; 375 unsigned int freq_down_sampling_rate; 376 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 377 struct cpufreq_policy *policy; 378 379 if (!this_dbs_info->enable) 380 return; 381 382 policy = this_dbs_info->cur_policy; 383 384 /* 385 * The default safe range is 20% to 80% 386 * Every sampling_rate, we check 387 * - If current idle time is less than 20%, then we try to 388 * increase frequency 389 * Every sampling_rate*sampling_down_factor, we check 390 * - If current idle time is more than 80%, then we try to 391 * decrease frequency 392 * 393 * Any frequency increase takes it to the maximum frequency. 394 * Frequency reduction happens at minimum steps of 395 * 5% (default) of max_frequency 396 */ 397 398 /* Check for frequency increase */ 399 idle_ticks = UINT_MAX; 400 401 /* Check for frequency increase */ 402 total_idle_ticks = get_cpu_idle_time(cpu); 403 tmp_idle_ticks = total_idle_ticks - 404 this_dbs_info->prev_cpu_idle_up; 405 this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 406 407 if (tmp_idle_ticks < idle_ticks) 408 idle_ticks = tmp_idle_ticks; 409 410 /* Scale idle ticks by 100 and compare with up and down ticks */ 411 idle_ticks *= 100; 412 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * 413 usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 414 415 if (idle_ticks < up_idle_ticks) { 416 this_dbs_info->down_skip = 0; 417 this_dbs_info->prev_cpu_idle_down = 418 this_dbs_info->prev_cpu_idle_up; 419 420 /* if we are already at full speed then break out early */ 421 if (this_dbs_info->requested_freq == policy->max) 422 return; 423 424 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 425 426 /* max freq cannot be less than 100. But who knows.... */ 427 if (unlikely(freq_target == 0)) 428 freq_target = 5; 429 430 this_dbs_info->requested_freq += freq_target; 431 if (this_dbs_info->requested_freq > policy->max) 432 this_dbs_info->requested_freq = policy->max; 433 434 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 435 CPUFREQ_RELATION_H); 436 return; 437 } 438 439 /* Check for frequency decrease */ 440 this_dbs_info->down_skip++; 441 if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) 442 return; 443 444 /* Check for frequency decrease */ 445 total_idle_ticks = this_dbs_info->prev_cpu_idle_up; 446 tmp_idle_ticks = total_idle_ticks - 447 this_dbs_info->prev_cpu_idle_down; 448 this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 449 450 if (tmp_idle_ticks < idle_ticks) 451 idle_ticks = tmp_idle_ticks; 452 453 /* Scale idle ticks by 100 and compare with up and down ticks */ 454 idle_ticks *= 100; 455 this_dbs_info->down_skip = 0; 456 457 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * 458 dbs_tuners_ins.sampling_down_factor; 459 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 460 usecs_to_jiffies(freq_down_sampling_rate); 461 462 if (idle_ticks > down_idle_ticks) { 463 /* 464 * if we are already at the lowest speed then break out early 465 * or if we 'cannot' reduce the speed as the user might want 466 * freq_target to be zero 467 */ 468 if (this_dbs_info->requested_freq == policy->min 469 || dbs_tuners_ins.freq_step == 0) 470 return; 471 472 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 473 474 /* max freq cannot be less than 100. But who knows.... */ 475 if (unlikely(freq_target == 0)) 476 freq_target = 5; 477 478 this_dbs_info->requested_freq -= freq_target; 479 if (this_dbs_info->requested_freq < policy->min) 480 this_dbs_info->requested_freq = policy->min; 481 482 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 483 CPUFREQ_RELATION_H); 484 return; 485 } 486 } 487 488 static void do_dbs_timer(struct work_struct *work) 489 { 490 int i; 491 mutex_lock(&dbs_mutex); 492 for_each_online_cpu(i) 493 dbs_check_cpu(i); 494 schedule_delayed_work(&dbs_work, 495 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 496 mutex_unlock(&dbs_mutex); 497 } 498 499 static inline void dbs_timer_init(void) 500 { 501 init_timer_deferrable(&dbs_work.timer); 502 schedule_delayed_work(&dbs_work, 503 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 504 return; 505 } 506 507 static inline void dbs_timer_exit(void) 508 { 509 cancel_delayed_work(&dbs_work); 510 return; 511 } 512 513 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 514 unsigned int event) 515 { 516 unsigned int cpu = policy->cpu; 517 struct cpu_dbs_info_s *this_dbs_info; 518 unsigned int j; 519 int rc; 520 521 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 522 523 switch (event) { 524 case CPUFREQ_GOV_START: 525 if ((!cpu_online(cpu)) || (!policy->cur)) 526 return -EINVAL; 527 528 if (this_dbs_info->enable) /* Already enabled */ 529 break; 530 531 mutex_lock(&dbs_mutex); 532 533 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 534 if (rc) { 535 mutex_unlock(&dbs_mutex); 536 return rc; 537 } 538 539 for_each_cpu(j, policy->cpus) { 540 struct cpu_dbs_info_s *j_dbs_info; 541 j_dbs_info = &per_cpu(cpu_dbs_info, j); 542 j_dbs_info->cur_policy = policy; 543 544 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); 545 j_dbs_info->prev_cpu_idle_down 546 = j_dbs_info->prev_cpu_idle_up; 547 } 548 this_dbs_info->enable = 1; 549 this_dbs_info->down_skip = 0; 550 this_dbs_info->requested_freq = policy->cur; 551 552 dbs_enable++; 553 /* 554 * Start the timerschedule work, when this governor 555 * is used for first time 556 */ 557 if (dbs_enable == 1) { 558 unsigned int latency; 559 /* policy latency is in nS. Convert it to uS first */ 560 latency = policy->cpuinfo.transition_latency / 1000; 561 if (latency == 0) 562 latency = 1; 563 564 def_sampling_rate = 565 max(10 * latency * LATENCY_MULTIPLIER, 566 MIN_STAT_SAMPLING_RATE); 567 568 dbs_tuners_ins.sampling_rate = def_sampling_rate; 569 570 dbs_timer_init(); 571 cpufreq_register_notifier( 572 &dbs_cpufreq_notifier_block, 573 CPUFREQ_TRANSITION_NOTIFIER); 574 } 575 576 mutex_unlock(&dbs_mutex); 577 break; 578 579 case CPUFREQ_GOV_STOP: 580 mutex_lock(&dbs_mutex); 581 this_dbs_info->enable = 0; 582 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 583 dbs_enable--; 584 /* 585 * Stop the timerschedule work, when this governor 586 * is used for first time 587 */ 588 if (dbs_enable == 0) { 589 dbs_timer_exit(); 590 cpufreq_unregister_notifier( 591 &dbs_cpufreq_notifier_block, 592 CPUFREQ_TRANSITION_NOTIFIER); 593 } 594 595 mutex_unlock(&dbs_mutex); 596 597 break; 598 599 case CPUFREQ_GOV_LIMITS: 600 mutex_lock(&dbs_mutex); 601 if (policy->max < this_dbs_info->cur_policy->cur) 602 __cpufreq_driver_target( 603 this_dbs_info->cur_policy, 604 policy->max, CPUFREQ_RELATION_H); 605 else if (policy->min > this_dbs_info->cur_policy->cur) 606 __cpufreq_driver_target( 607 this_dbs_info->cur_policy, 608 policy->min, CPUFREQ_RELATION_L); 609 mutex_unlock(&dbs_mutex); 610 break; 611 } 612 return 0; 613 } 614 615 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 616 static 617 #endif 618 struct cpufreq_governor cpufreq_gov_conservative = { 619 .name = "conservative", 620 .governor = cpufreq_governor_dbs, 621 .max_transition_latency = TRANSITION_LATENCY_LIMIT, 622 .owner = THIS_MODULE, 623 }; 624 625 static int __init cpufreq_gov_dbs_init(void) 626 { 627 return cpufreq_register_governor(&cpufreq_gov_conservative); 628 } 629 630 static void __exit cpufreq_gov_dbs_exit(void) 631 { 632 /* Make sure that the scheduled work is indeed not running */ 633 flush_scheduled_work(); 634 635 cpufreq_unregister_governor(&cpufreq_gov_conservative); 636 } 637 638 639 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); 640 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " 641 "Low Latency Frequency Transition capable processors " 642 "optimised for use in a battery environment"); 643 MODULE_LICENSE("GPL"); 644 645 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 646 fs_initcall(cpufreq_gov_dbs_init); 647 #else 648 module_init(cpufreq_gov_dbs_init); 649 #endif 650 module_exit(cpufreq_gov_dbs_exit); 651