1 /* 2 * drivers/cpufreq/cpufreq_conservative.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 6 * Jun Nakajima <jun.nakajima@intel.com> 7 * (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/smp.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/ctype.h> 20 #include <linux/cpufreq.h> 21 #include <linux/sysctl.h> 22 #include <linux/types.h> 23 #include <linux/fs.h> 24 #include <linux/sysfs.h> 25 #include <linux/cpu.h> 26 #include <linux/kmod.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/kernel_stat.h> 30 #include <linux/percpu.h> 31 #include <linux/mutex.h> 32 /* 33 * dbs is used in this file as a shortform for demandbased switching 34 * It helps to keep variable names smaller, simpler 35 */ 36 37 #define DEF_FREQUENCY_UP_THRESHOLD (80) 38 #define DEF_FREQUENCY_DOWN_THRESHOLD (20) 39 40 /* 41 * The polling frequency of this governor depends on the capability of 42 * the processor. Default polling frequency is 1000 times the transition 43 * latency of the processor. The governor will work on any processor with 44 * transition latency <= 10mS, using appropriate sampling 45 * rate. 46 * For CPUs with transition latency > 10mS (mostly drivers 47 * with CPUFREQ_ETERNAL), this governor will not work. 48 * All times here are in uS. 49 */ 50 static unsigned int def_sampling_rate; 51 #define MIN_SAMPLING_RATE_RATIO (2) 52 /* for correct statistics, we need at least 10 ticks between each measure */ 53 #define MIN_STAT_SAMPLING_RATE \ 54 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 55 #define MIN_SAMPLING_RATE \ 56 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 57 #define MAX_SAMPLING_RATE (500 * def_sampling_rate) 58 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 59 #define DEF_SAMPLING_DOWN_FACTOR (1) 60 #define MAX_SAMPLING_DOWN_FACTOR (10) 61 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 62 63 static void do_dbs_timer(struct work_struct *work); 64 65 struct cpu_dbs_info_s { 66 struct cpufreq_policy *cur_policy; 67 unsigned int prev_cpu_idle_up; 68 unsigned int prev_cpu_idle_down; 69 unsigned int enable; 70 unsigned int down_skip; 71 unsigned int requested_freq; 72 }; 73 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 74 75 static unsigned int dbs_enable; /* number of CPUs using this policy */ 76 77 /* 78 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 79 * lock and dbs_mutex. cpu_hotplug lock should always be held before 80 * dbs_mutex. If any function that can potentially take cpu_hotplug lock 81 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 82 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 83 * is recursive for the same process. -Venki 84 */ 85 static DEFINE_MUTEX(dbs_mutex); 86 static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); 87 88 struct dbs_tuners { 89 unsigned int sampling_rate; 90 unsigned int sampling_down_factor; 91 unsigned int up_threshold; 92 unsigned int down_threshold; 93 unsigned int ignore_nice; 94 unsigned int freq_step; 95 }; 96 97 static struct dbs_tuners dbs_tuners_ins = { 98 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 99 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 100 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 101 .ignore_nice = 0, 102 .freq_step = 5, 103 }; 104 105 static inline unsigned int get_cpu_idle_time(unsigned int cpu) 106 { 107 unsigned int add_nice = 0, ret; 108 109 if (dbs_tuners_ins.ignore_nice) 110 add_nice = kstat_cpu(cpu).cpustat.nice; 111 112 ret = kstat_cpu(cpu).cpustat.idle + 113 kstat_cpu(cpu).cpustat.iowait + 114 add_nice; 115 116 return ret; 117 } 118 119 /* keep track of frequency transitions */ 120 static int 121 dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 122 void *data) 123 { 124 struct cpufreq_freqs *freq = data; 125 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, 126 freq->cpu); 127 128 if (!this_dbs_info->enable) 129 return 0; 130 131 this_dbs_info->requested_freq = freq->new; 132 133 return 0; 134 } 135 136 static struct notifier_block dbs_cpufreq_notifier_block = { 137 .notifier_call = dbs_cpufreq_notifier 138 }; 139 140 /************************** sysfs interface ************************/ 141 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 142 { 143 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE); 144 } 145 146 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 147 { 148 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE); 149 } 150 151 #define define_one_ro(_name) \ 152 static struct freq_attr _name = \ 153 __ATTR(_name, 0444, show_##_name, NULL) 154 155 define_one_ro(sampling_rate_max); 156 define_one_ro(sampling_rate_min); 157 158 /* cpufreq_conservative Governor Tunables */ 159 #define show_one(file_name, object) \ 160 static ssize_t show_##file_name \ 161 (struct cpufreq_policy *unused, char *buf) \ 162 { \ 163 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 164 } 165 show_one(sampling_rate, sampling_rate); 166 show_one(sampling_down_factor, sampling_down_factor); 167 show_one(up_threshold, up_threshold); 168 show_one(down_threshold, down_threshold); 169 show_one(ignore_nice_load, ignore_nice); 170 show_one(freq_step, freq_step); 171 172 static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 173 const char *buf, size_t count) 174 { 175 unsigned int input; 176 int ret; 177 ret = sscanf(buf, "%u", &input); 178 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 179 return -EINVAL; 180 181 mutex_lock(&dbs_mutex); 182 dbs_tuners_ins.sampling_down_factor = input; 183 mutex_unlock(&dbs_mutex); 184 185 return count; 186 } 187 188 static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 189 const char *buf, size_t count) 190 { 191 unsigned int input; 192 int ret; 193 ret = sscanf(buf, "%u", &input); 194 195 mutex_lock(&dbs_mutex); 196 if (ret != 1 || input > MAX_SAMPLING_RATE || 197 input < MIN_SAMPLING_RATE) { 198 mutex_unlock(&dbs_mutex); 199 return -EINVAL; 200 } 201 202 dbs_tuners_ins.sampling_rate = input; 203 mutex_unlock(&dbs_mutex); 204 205 return count; 206 } 207 208 static ssize_t store_up_threshold(struct cpufreq_policy *unused, 209 const char *buf, size_t count) 210 { 211 unsigned int input; 212 int ret; 213 ret = sscanf(buf, "%u", &input); 214 215 mutex_lock(&dbs_mutex); 216 if (ret != 1 || input > 100 || 217 input <= dbs_tuners_ins.down_threshold) { 218 mutex_unlock(&dbs_mutex); 219 return -EINVAL; 220 } 221 222 dbs_tuners_ins.up_threshold = input; 223 mutex_unlock(&dbs_mutex); 224 225 return count; 226 } 227 228 static ssize_t store_down_threshold(struct cpufreq_policy *unused, 229 const char *buf, size_t count) 230 { 231 unsigned int input; 232 int ret; 233 ret = sscanf(buf, "%u", &input); 234 235 mutex_lock(&dbs_mutex); 236 if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { 237 mutex_unlock(&dbs_mutex); 238 return -EINVAL; 239 } 240 241 dbs_tuners_ins.down_threshold = input; 242 mutex_unlock(&dbs_mutex); 243 244 return count; 245 } 246 247 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, 248 const char *buf, size_t count) 249 { 250 unsigned int input; 251 int ret; 252 253 unsigned int j; 254 255 ret = sscanf(buf, "%u", &input); 256 if (ret != 1) 257 return -EINVAL; 258 259 if (input > 1) 260 input = 1; 261 262 mutex_lock(&dbs_mutex); 263 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ 264 mutex_unlock(&dbs_mutex); 265 return count; 266 } 267 dbs_tuners_ins.ignore_nice = input; 268 269 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ 270 for_each_online_cpu(j) { 271 struct cpu_dbs_info_s *j_dbs_info; 272 j_dbs_info = &per_cpu(cpu_dbs_info, j); 273 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 274 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 275 } 276 mutex_unlock(&dbs_mutex); 277 278 return count; 279 } 280 281 static ssize_t store_freq_step(struct cpufreq_policy *policy, 282 const char *buf, size_t count) 283 { 284 unsigned int input; 285 int ret; 286 287 ret = sscanf(buf, "%u", &input); 288 289 if (ret != 1) 290 return -EINVAL; 291 292 if (input > 100) 293 input = 100; 294 295 /* no need to test here if freq_step is zero as the user might actually 296 * want this, they would be crazy though :) */ 297 mutex_lock(&dbs_mutex); 298 dbs_tuners_ins.freq_step = input; 299 mutex_unlock(&dbs_mutex); 300 301 return count; 302 } 303 304 #define define_one_rw(_name) \ 305 static struct freq_attr _name = \ 306 __ATTR(_name, 0644, show_##_name, store_##_name) 307 308 define_one_rw(sampling_rate); 309 define_one_rw(sampling_down_factor); 310 define_one_rw(up_threshold); 311 define_one_rw(down_threshold); 312 define_one_rw(ignore_nice_load); 313 define_one_rw(freq_step); 314 315 static struct attribute *dbs_attributes[] = { 316 &sampling_rate_max.attr, 317 &sampling_rate_min.attr, 318 &sampling_rate.attr, 319 &sampling_down_factor.attr, 320 &up_threshold.attr, 321 &down_threshold.attr, 322 &ignore_nice_load.attr, 323 &freq_step.attr, 324 NULL 325 }; 326 327 static struct attribute_group dbs_attr_group = { 328 .attrs = dbs_attributes, 329 .name = "conservative", 330 }; 331 332 /************************** sysfs end ************************/ 333 334 static void dbs_check_cpu(int cpu) 335 { 336 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 337 unsigned int tmp_idle_ticks, total_idle_ticks; 338 unsigned int freq_target; 339 unsigned int freq_down_sampling_rate; 340 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 341 struct cpufreq_policy *policy; 342 343 if (!this_dbs_info->enable) 344 return; 345 346 policy = this_dbs_info->cur_policy; 347 348 /* 349 * The default safe range is 20% to 80% 350 * Every sampling_rate, we check 351 * - If current idle time is less than 20%, then we try to 352 * increase frequency 353 * Every sampling_rate*sampling_down_factor, we check 354 * - If current idle time is more than 80%, then we try to 355 * decrease frequency 356 * 357 * Any frequency increase takes it to the maximum frequency. 358 * Frequency reduction happens at minimum steps of 359 * 5% (default) of max_frequency 360 */ 361 362 /* Check for frequency increase */ 363 idle_ticks = UINT_MAX; 364 365 /* Check for frequency increase */ 366 total_idle_ticks = get_cpu_idle_time(cpu); 367 tmp_idle_ticks = total_idle_ticks - 368 this_dbs_info->prev_cpu_idle_up; 369 this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 370 371 if (tmp_idle_ticks < idle_ticks) 372 idle_ticks = tmp_idle_ticks; 373 374 /* Scale idle ticks by 100 and compare with up and down ticks */ 375 idle_ticks *= 100; 376 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * 377 usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 378 379 if (idle_ticks < up_idle_ticks) { 380 this_dbs_info->down_skip = 0; 381 this_dbs_info->prev_cpu_idle_down = 382 this_dbs_info->prev_cpu_idle_up; 383 384 /* if we are already at full speed then break out early */ 385 if (this_dbs_info->requested_freq == policy->max) 386 return; 387 388 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 389 390 /* max freq cannot be less than 100. But who knows.... */ 391 if (unlikely(freq_target == 0)) 392 freq_target = 5; 393 394 this_dbs_info->requested_freq += freq_target; 395 if (this_dbs_info->requested_freq > policy->max) 396 this_dbs_info->requested_freq = policy->max; 397 398 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 399 CPUFREQ_RELATION_H); 400 return; 401 } 402 403 /* Check for frequency decrease */ 404 this_dbs_info->down_skip++; 405 if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) 406 return; 407 408 /* Check for frequency decrease */ 409 total_idle_ticks = this_dbs_info->prev_cpu_idle_up; 410 tmp_idle_ticks = total_idle_ticks - 411 this_dbs_info->prev_cpu_idle_down; 412 this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 413 414 if (tmp_idle_ticks < idle_ticks) 415 idle_ticks = tmp_idle_ticks; 416 417 /* Scale idle ticks by 100 and compare with up and down ticks */ 418 idle_ticks *= 100; 419 this_dbs_info->down_skip = 0; 420 421 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * 422 dbs_tuners_ins.sampling_down_factor; 423 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 424 usecs_to_jiffies(freq_down_sampling_rate); 425 426 if (idle_ticks > down_idle_ticks) { 427 /* 428 * if we are already at the lowest speed then break out early 429 * or if we 'cannot' reduce the speed as the user might want 430 * freq_target to be zero 431 */ 432 if (this_dbs_info->requested_freq == policy->min 433 || dbs_tuners_ins.freq_step == 0) 434 return; 435 436 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 437 438 /* max freq cannot be less than 100. But who knows.... */ 439 if (unlikely(freq_target == 0)) 440 freq_target = 5; 441 442 this_dbs_info->requested_freq -= freq_target; 443 if (this_dbs_info->requested_freq < policy->min) 444 this_dbs_info->requested_freq = policy->min; 445 446 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 447 CPUFREQ_RELATION_H); 448 return; 449 } 450 } 451 452 static void do_dbs_timer(struct work_struct *work) 453 { 454 int i; 455 mutex_lock(&dbs_mutex); 456 for_each_online_cpu(i) 457 dbs_check_cpu(i); 458 schedule_delayed_work(&dbs_work, 459 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 460 mutex_unlock(&dbs_mutex); 461 } 462 463 static inline void dbs_timer_init(void) 464 { 465 init_timer_deferrable(&dbs_work.timer); 466 schedule_delayed_work(&dbs_work, 467 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 468 return; 469 } 470 471 static inline void dbs_timer_exit(void) 472 { 473 cancel_delayed_work(&dbs_work); 474 return; 475 } 476 477 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 478 unsigned int event) 479 { 480 unsigned int cpu = policy->cpu; 481 struct cpu_dbs_info_s *this_dbs_info; 482 unsigned int j; 483 int rc; 484 485 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 486 487 switch (event) { 488 case CPUFREQ_GOV_START: 489 if ((!cpu_online(cpu)) || (!policy->cur)) 490 return -EINVAL; 491 492 if (this_dbs_info->enable) /* Already enabled */ 493 break; 494 495 mutex_lock(&dbs_mutex); 496 497 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 498 if (rc) { 499 mutex_unlock(&dbs_mutex); 500 return rc; 501 } 502 503 for_each_cpu(j, policy->cpus) { 504 struct cpu_dbs_info_s *j_dbs_info; 505 j_dbs_info = &per_cpu(cpu_dbs_info, j); 506 j_dbs_info->cur_policy = policy; 507 508 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); 509 j_dbs_info->prev_cpu_idle_down 510 = j_dbs_info->prev_cpu_idle_up; 511 } 512 this_dbs_info->enable = 1; 513 this_dbs_info->down_skip = 0; 514 this_dbs_info->requested_freq = policy->cur; 515 516 dbs_enable++; 517 /* 518 * Start the timerschedule work, when this governor 519 * is used for first time 520 */ 521 if (dbs_enable == 1) { 522 unsigned int latency; 523 /* policy latency is in nS. Convert it to uS first */ 524 latency = policy->cpuinfo.transition_latency / 1000; 525 if (latency == 0) 526 latency = 1; 527 528 def_sampling_rate = 10 * latency * 529 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 530 531 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) 532 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 533 534 dbs_tuners_ins.sampling_rate = def_sampling_rate; 535 536 dbs_timer_init(); 537 cpufreq_register_notifier( 538 &dbs_cpufreq_notifier_block, 539 CPUFREQ_TRANSITION_NOTIFIER); 540 } 541 542 mutex_unlock(&dbs_mutex); 543 break; 544 545 case CPUFREQ_GOV_STOP: 546 mutex_lock(&dbs_mutex); 547 this_dbs_info->enable = 0; 548 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 549 dbs_enable--; 550 /* 551 * Stop the timerschedule work, when this governor 552 * is used for first time 553 */ 554 if (dbs_enable == 0) { 555 dbs_timer_exit(); 556 cpufreq_unregister_notifier( 557 &dbs_cpufreq_notifier_block, 558 CPUFREQ_TRANSITION_NOTIFIER); 559 } 560 561 mutex_unlock(&dbs_mutex); 562 563 break; 564 565 case CPUFREQ_GOV_LIMITS: 566 mutex_lock(&dbs_mutex); 567 if (policy->max < this_dbs_info->cur_policy->cur) 568 __cpufreq_driver_target( 569 this_dbs_info->cur_policy, 570 policy->max, CPUFREQ_RELATION_H); 571 else if (policy->min > this_dbs_info->cur_policy->cur) 572 __cpufreq_driver_target( 573 this_dbs_info->cur_policy, 574 policy->min, CPUFREQ_RELATION_L); 575 mutex_unlock(&dbs_mutex); 576 break; 577 } 578 return 0; 579 } 580 581 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 582 static 583 #endif 584 struct cpufreq_governor cpufreq_gov_conservative = { 585 .name = "conservative", 586 .governor = cpufreq_governor_dbs, 587 .max_transition_latency = TRANSITION_LATENCY_LIMIT, 588 .owner = THIS_MODULE, 589 }; 590 591 static int __init cpufreq_gov_dbs_init(void) 592 { 593 return cpufreq_register_governor(&cpufreq_gov_conservative); 594 } 595 596 static void __exit cpufreq_gov_dbs_exit(void) 597 { 598 /* Make sure that the scheduled work is indeed not running */ 599 flush_scheduled_work(); 600 601 cpufreq_unregister_governor(&cpufreq_gov_conservative); 602 } 603 604 605 MODULE_AUTHOR("Alexander Clouter <alex-kernel@digriz.org.uk>"); 606 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " 607 "Low Latency Frequency Transition capable processors " 608 "optimised for use in a battery environment"); 609 MODULE_LICENSE("GPL"); 610 611 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 612 fs_initcall(cpufreq_gov_dbs_init); 613 #else 614 module_init(cpufreq_gov_dbs_init); 615 #endif 616 module_exit(cpufreq_gov_dbs_exit); 617