1 /* 2 * drivers/cpufreq/cpufreq_conservative.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 6 * Jun Nakajima <jun.nakajima@intel.com> 7 * (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/smp.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/ctype.h> 20 #include <linux/cpufreq.h> 21 #include <linux/sysctl.h> 22 #include <linux/types.h> 23 #include <linux/fs.h> 24 #include <linux/sysfs.h> 25 #include <linux/cpu.h> 26 #include <linux/kmod.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/kernel_stat.h> 30 #include <linux/percpu.h> 31 #include <linux/mutex.h> 32 /* 33 * dbs is used in this file as a shortform for demandbased switching 34 * It helps to keep variable names smaller, simpler 35 */ 36 37 #define DEF_FREQUENCY_UP_THRESHOLD (80) 38 #define DEF_FREQUENCY_DOWN_THRESHOLD (20) 39 40 /* 41 * The polling frequency of this governor depends on the capability of 42 * the processor. Default polling frequency is 1000 times the transition 43 * latency of the processor. The governor will work on any processor with 44 * transition latency <= 10mS, using appropriate sampling 45 * rate. 46 * For CPUs with transition latency > 10mS (mostly drivers 47 * with CPUFREQ_ETERNAL), this governor will not work. 48 * All times here are in uS. 49 */ 50 static unsigned int def_sampling_rate; 51 #define MIN_SAMPLING_RATE_RATIO (2) 52 /* for correct statistics, we need at least 10 ticks between each measure */ 53 #define MIN_STAT_SAMPLING_RATE \ 54 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 55 #define MIN_SAMPLING_RATE \ 56 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 57 #define MAX_SAMPLING_RATE (500 * def_sampling_rate) 58 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 59 #define DEF_SAMPLING_DOWN_FACTOR (1) 60 #define MAX_SAMPLING_DOWN_FACTOR (10) 61 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 62 63 static void do_dbs_timer(struct work_struct *work); 64 65 struct cpu_dbs_info_s { 66 struct cpufreq_policy *cur_policy; 67 unsigned int prev_cpu_idle_up; 68 unsigned int prev_cpu_idle_down; 69 unsigned int enable; 70 unsigned int down_skip; 71 unsigned int requested_freq; 72 }; 73 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 74 75 static unsigned int dbs_enable; /* number of CPUs using this policy */ 76 77 /* 78 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 79 * lock and dbs_mutex. cpu_hotplug lock should always be held before 80 * dbs_mutex. If any function that can potentially take cpu_hotplug lock 81 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 82 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 83 * is recursive for the same process. -Venki 84 */ 85 static DEFINE_MUTEX(dbs_mutex); 86 static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); 87 88 struct dbs_tuners { 89 unsigned int sampling_rate; 90 unsigned int sampling_down_factor; 91 unsigned int up_threshold; 92 unsigned int down_threshold; 93 unsigned int ignore_nice; 94 unsigned int freq_step; 95 }; 96 97 static struct dbs_tuners dbs_tuners_ins = { 98 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 99 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 100 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 101 .ignore_nice = 0, 102 .freq_step = 5, 103 }; 104 105 static inline unsigned int get_cpu_idle_time(unsigned int cpu) 106 { 107 unsigned int add_nice = 0, ret; 108 109 if (dbs_tuners_ins.ignore_nice) 110 add_nice = kstat_cpu(cpu).cpustat.nice; 111 112 ret = kstat_cpu(cpu).cpustat.idle + 113 kstat_cpu(cpu).cpustat.iowait + 114 add_nice; 115 116 return ret; 117 } 118 119 /* keep track of frequency transitions */ 120 static int 121 dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 122 void *data) 123 { 124 struct cpufreq_freqs *freq = data; 125 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, 126 freq->cpu); 127 128 if (!this_dbs_info->enable) 129 return 0; 130 131 this_dbs_info->requested_freq = freq->new; 132 133 return 0; 134 } 135 136 static struct notifier_block dbs_cpufreq_notifier_block = { 137 .notifier_call = dbs_cpufreq_notifier 138 }; 139 140 /************************** sysfs interface ************************/ 141 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 142 { 143 static int print_once; 144 145 if (!print_once) { 146 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max " 147 "sysfs file is deprecated - used by: %s\n", 148 current->comm); 149 print_once = 1; 150 } 151 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE); 152 } 153 154 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 155 { 156 static int print_once; 157 158 if (!print_once) { 159 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max " 160 "sysfs file is deprecated - used by: %s\n", current->comm); 161 print_once = 1; 162 } 163 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE); 164 } 165 166 #define define_one_ro(_name) \ 167 static struct freq_attr _name = \ 168 __ATTR(_name, 0444, show_##_name, NULL) 169 170 define_one_ro(sampling_rate_max); 171 define_one_ro(sampling_rate_min); 172 173 /* cpufreq_conservative Governor Tunables */ 174 #define show_one(file_name, object) \ 175 static ssize_t show_##file_name \ 176 (struct cpufreq_policy *unused, char *buf) \ 177 { \ 178 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 179 } 180 show_one(sampling_rate, sampling_rate); 181 show_one(sampling_down_factor, sampling_down_factor); 182 show_one(up_threshold, up_threshold); 183 show_one(down_threshold, down_threshold); 184 show_one(ignore_nice_load, ignore_nice); 185 show_one(freq_step, freq_step); 186 187 static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 188 const char *buf, size_t count) 189 { 190 unsigned int input; 191 int ret; 192 ret = sscanf(buf, "%u", &input); 193 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 194 return -EINVAL; 195 196 mutex_lock(&dbs_mutex); 197 dbs_tuners_ins.sampling_down_factor = input; 198 mutex_unlock(&dbs_mutex); 199 200 return count; 201 } 202 203 static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 204 const char *buf, size_t count) 205 { 206 unsigned int input; 207 int ret; 208 ret = sscanf(buf, "%u", &input); 209 210 mutex_lock(&dbs_mutex); 211 if (ret != 1 || input > MAX_SAMPLING_RATE || 212 input < MIN_SAMPLING_RATE) { 213 mutex_unlock(&dbs_mutex); 214 return -EINVAL; 215 } 216 217 dbs_tuners_ins.sampling_rate = input; 218 mutex_unlock(&dbs_mutex); 219 220 return count; 221 } 222 223 static ssize_t store_up_threshold(struct cpufreq_policy *unused, 224 const char *buf, size_t count) 225 { 226 unsigned int input; 227 int ret; 228 ret = sscanf(buf, "%u", &input); 229 230 mutex_lock(&dbs_mutex); 231 if (ret != 1 || input > 100 || 232 input <= dbs_tuners_ins.down_threshold) { 233 mutex_unlock(&dbs_mutex); 234 return -EINVAL; 235 } 236 237 dbs_tuners_ins.up_threshold = input; 238 mutex_unlock(&dbs_mutex); 239 240 return count; 241 } 242 243 static ssize_t store_down_threshold(struct cpufreq_policy *unused, 244 const char *buf, size_t count) 245 { 246 unsigned int input; 247 int ret; 248 ret = sscanf(buf, "%u", &input); 249 250 mutex_lock(&dbs_mutex); 251 if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { 252 mutex_unlock(&dbs_mutex); 253 return -EINVAL; 254 } 255 256 dbs_tuners_ins.down_threshold = input; 257 mutex_unlock(&dbs_mutex); 258 259 return count; 260 } 261 262 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, 263 const char *buf, size_t count) 264 { 265 unsigned int input; 266 int ret; 267 268 unsigned int j; 269 270 ret = sscanf(buf, "%u", &input); 271 if (ret != 1) 272 return -EINVAL; 273 274 if (input > 1) 275 input = 1; 276 277 mutex_lock(&dbs_mutex); 278 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ 279 mutex_unlock(&dbs_mutex); 280 return count; 281 } 282 dbs_tuners_ins.ignore_nice = input; 283 284 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ 285 for_each_online_cpu(j) { 286 struct cpu_dbs_info_s *j_dbs_info; 287 j_dbs_info = &per_cpu(cpu_dbs_info, j); 288 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 289 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 290 } 291 mutex_unlock(&dbs_mutex); 292 293 return count; 294 } 295 296 static ssize_t store_freq_step(struct cpufreq_policy *policy, 297 const char *buf, size_t count) 298 { 299 unsigned int input; 300 int ret; 301 302 ret = sscanf(buf, "%u", &input); 303 304 if (ret != 1) 305 return -EINVAL; 306 307 if (input > 100) 308 input = 100; 309 310 /* no need to test here if freq_step is zero as the user might actually 311 * want this, they would be crazy though :) */ 312 mutex_lock(&dbs_mutex); 313 dbs_tuners_ins.freq_step = input; 314 mutex_unlock(&dbs_mutex); 315 316 return count; 317 } 318 319 #define define_one_rw(_name) \ 320 static struct freq_attr _name = \ 321 __ATTR(_name, 0644, show_##_name, store_##_name) 322 323 define_one_rw(sampling_rate); 324 define_one_rw(sampling_down_factor); 325 define_one_rw(up_threshold); 326 define_one_rw(down_threshold); 327 define_one_rw(ignore_nice_load); 328 define_one_rw(freq_step); 329 330 static struct attribute *dbs_attributes[] = { 331 &sampling_rate_max.attr, 332 &sampling_rate_min.attr, 333 &sampling_rate.attr, 334 &sampling_down_factor.attr, 335 &up_threshold.attr, 336 &down_threshold.attr, 337 &ignore_nice_load.attr, 338 &freq_step.attr, 339 NULL 340 }; 341 342 static struct attribute_group dbs_attr_group = { 343 .attrs = dbs_attributes, 344 .name = "conservative", 345 }; 346 347 /************************** sysfs end ************************/ 348 349 static void dbs_check_cpu(int cpu) 350 { 351 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 352 unsigned int tmp_idle_ticks, total_idle_ticks; 353 unsigned int freq_target; 354 unsigned int freq_down_sampling_rate; 355 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 356 struct cpufreq_policy *policy; 357 358 if (!this_dbs_info->enable) 359 return; 360 361 policy = this_dbs_info->cur_policy; 362 363 /* 364 * The default safe range is 20% to 80% 365 * Every sampling_rate, we check 366 * - If current idle time is less than 20%, then we try to 367 * increase frequency 368 * Every sampling_rate*sampling_down_factor, we check 369 * - If current idle time is more than 80%, then we try to 370 * decrease frequency 371 * 372 * Any frequency increase takes it to the maximum frequency. 373 * Frequency reduction happens at minimum steps of 374 * 5% (default) of max_frequency 375 */ 376 377 /* Check for frequency increase */ 378 idle_ticks = UINT_MAX; 379 380 /* Check for frequency increase */ 381 total_idle_ticks = get_cpu_idle_time(cpu); 382 tmp_idle_ticks = total_idle_ticks - 383 this_dbs_info->prev_cpu_idle_up; 384 this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 385 386 if (tmp_idle_ticks < idle_ticks) 387 idle_ticks = tmp_idle_ticks; 388 389 /* Scale idle ticks by 100 and compare with up and down ticks */ 390 idle_ticks *= 100; 391 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * 392 usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 393 394 if (idle_ticks < up_idle_ticks) { 395 this_dbs_info->down_skip = 0; 396 this_dbs_info->prev_cpu_idle_down = 397 this_dbs_info->prev_cpu_idle_up; 398 399 /* if we are already at full speed then break out early */ 400 if (this_dbs_info->requested_freq == policy->max) 401 return; 402 403 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 404 405 /* max freq cannot be less than 100. But who knows.... */ 406 if (unlikely(freq_target == 0)) 407 freq_target = 5; 408 409 this_dbs_info->requested_freq += freq_target; 410 if (this_dbs_info->requested_freq > policy->max) 411 this_dbs_info->requested_freq = policy->max; 412 413 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 414 CPUFREQ_RELATION_H); 415 return; 416 } 417 418 /* Check for frequency decrease */ 419 this_dbs_info->down_skip++; 420 if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) 421 return; 422 423 /* Check for frequency decrease */ 424 total_idle_ticks = this_dbs_info->prev_cpu_idle_up; 425 tmp_idle_ticks = total_idle_ticks - 426 this_dbs_info->prev_cpu_idle_down; 427 this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 428 429 if (tmp_idle_ticks < idle_ticks) 430 idle_ticks = tmp_idle_ticks; 431 432 /* Scale idle ticks by 100 and compare with up and down ticks */ 433 idle_ticks *= 100; 434 this_dbs_info->down_skip = 0; 435 436 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * 437 dbs_tuners_ins.sampling_down_factor; 438 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 439 usecs_to_jiffies(freq_down_sampling_rate); 440 441 if (idle_ticks > down_idle_ticks) { 442 /* 443 * if we are already at the lowest speed then break out early 444 * or if we 'cannot' reduce the speed as the user might want 445 * freq_target to be zero 446 */ 447 if (this_dbs_info->requested_freq == policy->min 448 || dbs_tuners_ins.freq_step == 0) 449 return; 450 451 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 452 453 /* max freq cannot be less than 100. But who knows.... */ 454 if (unlikely(freq_target == 0)) 455 freq_target = 5; 456 457 this_dbs_info->requested_freq -= freq_target; 458 if (this_dbs_info->requested_freq < policy->min) 459 this_dbs_info->requested_freq = policy->min; 460 461 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 462 CPUFREQ_RELATION_H); 463 return; 464 } 465 } 466 467 static void do_dbs_timer(struct work_struct *work) 468 { 469 int i; 470 mutex_lock(&dbs_mutex); 471 for_each_online_cpu(i) 472 dbs_check_cpu(i); 473 schedule_delayed_work(&dbs_work, 474 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 475 mutex_unlock(&dbs_mutex); 476 } 477 478 static inline void dbs_timer_init(void) 479 { 480 init_timer_deferrable(&dbs_work.timer); 481 schedule_delayed_work(&dbs_work, 482 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 483 return; 484 } 485 486 static inline void dbs_timer_exit(void) 487 { 488 cancel_delayed_work(&dbs_work); 489 return; 490 } 491 492 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 493 unsigned int event) 494 { 495 unsigned int cpu = policy->cpu; 496 struct cpu_dbs_info_s *this_dbs_info; 497 unsigned int j; 498 int rc; 499 500 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 501 502 switch (event) { 503 case CPUFREQ_GOV_START: 504 if ((!cpu_online(cpu)) || (!policy->cur)) 505 return -EINVAL; 506 507 if (this_dbs_info->enable) /* Already enabled */ 508 break; 509 510 mutex_lock(&dbs_mutex); 511 512 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 513 if (rc) { 514 mutex_unlock(&dbs_mutex); 515 return rc; 516 } 517 518 for_each_cpu(j, policy->cpus) { 519 struct cpu_dbs_info_s *j_dbs_info; 520 j_dbs_info = &per_cpu(cpu_dbs_info, j); 521 j_dbs_info->cur_policy = policy; 522 523 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); 524 j_dbs_info->prev_cpu_idle_down 525 = j_dbs_info->prev_cpu_idle_up; 526 } 527 this_dbs_info->enable = 1; 528 this_dbs_info->down_skip = 0; 529 this_dbs_info->requested_freq = policy->cur; 530 531 dbs_enable++; 532 /* 533 * Start the timerschedule work, when this governor 534 * is used for first time 535 */ 536 if (dbs_enable == 1) { 537 unsigned int latency; 538 /* policy latency is in nS. Convert it to uS first */ 539 latency = policy->cpuinfo.transition_latency / 1000; 540 if (latency == 0) 541 latency = 1; 542 543 def_sampling_rate = 10 * latency * 544 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 545 546 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) 547 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 548 549 dbs_tuners_ins.sampling_rate = def_sampling_rate; 550 551 dbs_timer_init(); 552 cpufreq_register_notifier( 553 &dbs_cpufreq_notifier_block, 554 CPUFREQ_TRANSITION_NOTIFIER); 555 } 556 557 mutex_unlock(&dbs_mutex); 558 break; 559 560 case CPUFREQ_GOV_STOP: 561 mutex_lock(&dbs_mutex); 562 this_dbs_info->enable = 0; 563 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 564 dbs_enable--; 565 /* 566 * Stop the timerschedule work, when this governor 567 * is used for first time 568 */ 569 if (dbs_enable == 0) { 570 dbs_timer_exit(); 571 cpufreq_unregister_notifier( 572 &dbs_cpufreq_notifier_block, 573 CPUFREQ_TRANSITION_NOTIFIER); 574 } 575 576 mutex_unlock(&dbs_mutex); 577 578 break; 579 580 case CPUFREQ_GOV_LIMITS: 581 mutex_lock(&dbs_mutex); 582 if (policy->max < this_dbs_info->cur_policy->cur) 583 __cpufreq_driver_target( 584 this_dbs_info->cur_policy, 585 policy->max, CPUFREQ_RELATION_H); 586 else if (policy->min > this_dbs_info->cur_policy->cur) 587 __cpufreq_driver_target( 588 this_dbs_info->cur_policy, 589 policy->min, CPUFREQ_RELATION_L); 590 mutex_unlock(&dbs_mutex); 591 break; 592 } 593 return 0; 594 } 595 596 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 597 static 598 #endif 599 struct cpufreq_governor cpufreq_gov_conservative = { 600 .name = "conservative", 601 .governor = cpufreq_governor_dbs, 602 .max_transition_latency = TRANSITION_LATENCY_LIMIT, 603 .owner = THIS_MODULE, 604 }; 605 606 static int __init cpufreq_gov_dbs_init(void) 607 { 608 return cpufreq_register_governor(&cpufreq_gov_conservative); 609 } 610 611 static void __exit cpufreq_gov_dbs_exit(void) 612 { 613 /* Make sure that the scheduled work is indeed not running */ 614 flush_scheduled_work(); 615 616 cpufreq_unregister_governor(&cpufreq_gov_conservative); 617 } 618 619 620 MODULE_AUTHOR("Alexander Clouter <alex-kernel@digriz.org.uk>"); 621 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " 622 "Low Latency Frequency Transition capable processors " 623 "optimised for use in a battery environment"); 624 MODULE_LICENSE("GPL"); 625 626 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 627 fs_initcall(cpufreq_gov_dbs_init); 628 #else 629 module_init(cpufreq_gov_dbs_init); 630 #endif 631 module_exit(cpufreq_gov_dbs_exit); 632