1 /* 2 * drivers/cpufreq/cpufreq_conservative.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 6 * Jun Nakajima <jun.nakajima@intel.com> 7 * (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/smp.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/ctype.h> 20 #include <linux/cpufreq.h> 21 #include <linux/sysctl.h> 22 #include <linux/types.h> 23 #include <linux/fs.h> 24 #include <linux/sysfs.h> 25 #include <linux/cpu.h> 26 #include <linux/sched.h> 27 #include <linux/kmod.h> 28 #include <linux/workqueue.h> 29 #include <linux/jiffies.h> 30 #include <linux/kernel_stat.h> 31 #include <linux/percpu.h> 32 #include <linux/mutex.h> 33 /* 34 * dbs is used in this file as a shortform for demandbased switching 35 * It helps to keep variable names smaller, simpler 36 */ 37 38 #define DEF_FREQUENCY_UP_THRESHOLD (80) 39 #define DEF_FREQUENCY_DOWN_THRESHOLD (20) 40 41 /* 42 * The polling frequency of this governor depends on the capability of 43 * the processor. Default polling frequency is 1000 times the transition 44 * latency of the processor. The governor will work on any processor with 45 * transition latency <= 10mS, using appropriate sampling 46 * rate. 47 * For CPUs with transition latency > 10mS (mostly drivers 48 * with CPUFREQ_ETERNAL), this governor will not work. 49 * All times here are in uS. 50 */ 51 static unsigned int def_sampling_rate; 52 #define MIN_SAMPLING_RATE_RATIO (2) 53 /* for correct statistics, we need at least 10 ticks between each measure */ 54 #define MIN_STAT_SAMPLING_RATE \ 55 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 56 #define MIN_SAMPLING_RATE \ 57 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 58 #define MAX_SAMPLING_RATE (500 * def_sampling_rate) 59 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 60 #define DEF_SAMPLING_DOWN_FACTOR (1) 61 #define MAX_SAMPLING_DOWN_FACTOR (10) 62 #define TRANSITION_LATENCY_LIMIT (10 * 1000) 63 64 static void do_dbs_timer(struct work_struct *work); 65 66 struct cpu_dbs_info_s { 67 struct cpufreq_policy *cur_policy; 68 unsigned int prev_cpu_idle_up; 69 unsigned int prev_cpu_idle_down; 70 unsigned int enable; 71 unsigned int down_skip; 72 unsigned int requested_freq; 73 }; 74 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 75 76 static unsigned int dbs_enable; /* number of CPUs using this policy */ 77 78 /* 79 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 80 * lock and dbs_mutex. cpu_hotplug lock should always be held before 81 * dbs_mutex. If any function that can potentially take cpu_hotplug lock 82 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 83 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 84 * is recursive for the same process. -Venki 85 */ 86 static DEFINE_MUTEX (dbs_mutex); 87 static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); 88 89 struct dbs_tuners { 90 unsigned int sampling_rate; 91 unsigned int sampling_down_factor; 92 unsigned int up_threshold; 93 unsigned int down_threshold; 94 unsigned int ignore_nice; 95 unsigned int freq_step; 96 }; 97 98 static struct dbs_tuners dbs_tuners_ins = { 99 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 100 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 101 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 102 .ignore_nice = 0, 103 .freq_step = 5, 104 }; 105 106 static inline unsigned int get_cpu_idle_time(unsigned int cpu) 107 { 108 unsigned int add_nice = 0, ret; 109 110 if (dbs_tuners_ins.ignore_nice) 111 add_nice = kstat_cpu(cpu).cpustat.nice; 112 113 ret = kstat_cpu(cpu).cpustat.idle + 114 kstat_cpu(cpu).cpustat.iowait + 115 add_nice; 116 117 return ret; 118 } 119 120 /************************** sysfs interface ************************/ 121 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 122 { 123 return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); 124 } 125 126 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 127 { 128 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); 129 } 130 131 #define define_one_ro(_name) \ 132 static struct freq_attr _name = \ 133 __ATTR(_name, 0444, show_##_name, NULL) 134 135 define_one_ro(sampling_rate_max); 136 define_one_ro(sampling_rate_min); 137 138 /* cpufreq_conservative Governor Tunables */ 139 #define show_one(file_name, object) \ 140 static ssize_t show_##file_name \ 141 (struct cpufreq_policy *unused, char *buf) \ 142 { \ 143 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 144 } 145 show_one(sampling_rate, sampling_rate); 146 show_one(sampling_down_factor, sampling_down_factor); 147 show_one(up_threshold, up_threshold); 148 show_one(down_threshold, down_threshold); 149 show_one(ignore_nice_load, ignore_nice); 150 show_one(freq_step, freq_step); 151 152 static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 153 const char *buf, size_t count) 154 { 155 unsigned int input; 156 int ret; 157 ret = sscanf (buf, "%u", &input); 158 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 159 return -EINVAL; 160 161 mutex_lock(&dbs_mutex); 162 dbs_tuners_ins.sampling_down_factor = input; 163 mutex_unlock(&dbs_mutex); 164 165 return count; 166 } 167 168 static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 169 const char *buf, size_t count) 170 { 171 unsigned int input; 172 int ret; 173 ret = sscanf (buf, "%u", &input); 174 175 mutex_lock(&dbs_mutex); 176 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { 177 mutex_unlock(&dbs_mutex); 178 return -EINVAL; 179 } 180 181 dbs_tuners_ins.sampling_rate = input; 182 mutex_unlock(&dbs_mutex); 183 184 return count; 185 } 186 187 static ssize_t store_up_threshold(struct cpufreq_policy *unused, 188 const char *buf, size_t count) 189 { 190 unsigned int input; 191 int ret; 192 ret = sscanf (buf, "%u", &input); 193 194 mutex_lock(&dbs_mutex); 195 if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { 196 mutex_unlock(&dbs_mutex); 197 return -EINVAL; 198 } 199 200 dbs_tuners_ins.up_threshold = input; 201 mutex_unlock(&dbs_mutex); 202 203 return count; 204 } 205 206 static ssize_t store_down_threshold(struct cpufreq_policy *unused, 207 const char *buf, size_t count) 208 { 209 unsigned int input; 210 int ret; 211 ret = sscanf (buf, "%u", &input); 212 213 mutex_lock(&dbs_mutex); 214 if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { 215 mutex_unlock(&dbs_mutex); 216 return -EINVAL; 217 } 218 219 dbs_tuners_ins.down_threshold = input; 220 mutex_unlock(&dbs_mutex); 221 222 return count; 223 } 224 225 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, 226 const char *buf, size_t count) 227 { 228 unsigned int input; 229 int ret; 230 231 unsigned int j; 232 233 ret = sscanf (buf, "%u", &input); 234 if ( ret != 1 ) 235 return -EINVAL; 236 237 if ( input > 1 ) 238 input = 1; 239 240 mutex_lock(&dbs_mutex); 241 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 242 mutex_unlock(&dbs_mutex); 243 return count; 244 } 245 dbs_tuners_ins.ignore_nice = input; 246 247 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ 248 for_each_online_cpu(j) { 249 struct cpu_dbs_info_s *j_dbs_info; 250 j_dbs_info = &per_cpu(cpu_dbs_info, j); 251 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 252 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 253 } 254 mutex_unlock(&dbs_mutex); 255 256 return count; 257 } 258 259 static ssize_t store_freq_step(struct cpufreq_policy *policy, 260 const char *buf, size_t count) 261 { 262 unsigned int input; 263 int ret; 264 265 ret = sscanf (buf, "%u", &input); 266 267 if ( ret != 1 ) 268 return -EINVAL; 269 270 if ( input > 100 ) 271 input = 100; 272 273 /* no need to test here if freq_step is zero as the user might actually 274 * want this, they would be crazy though :) */ 275 mutex_lock(&dbs_mutex); 276 dbs_tuners_ins.freq_step = input; 277 mutex_unlock(&dbs_mutex); 278 279 return count; 280 } 281 282 #define define_one_rw(_name) \ 283 static struct freq_attr _name = \ 284 __ATTR(_name, 0644, show_##_name, store_##_name) 285 286 define_one_rw(sampling_rate); 287 define_one_rw(sampling_down_factor); 288 define_one_rw(up_threshold); 289 define_one_rw(down_threshold); 290 define_one_rw(ignore_nice_load); 291 define_one_rw(freq_step); 292 293 static struct attribute * dbs_attributes[] = { 294 &sampling_rate_max.attr, 295 &sampling_rate_min.attr, 296 &sampling_rate.attr, 297 &sampling_down_factor.attr, 298 &up_threshold.attr, 299 &down_threshold.attr, 300 &ignore_nice_load.attr, 301 &freq_step.attr, 302 NULL 303 }; 304 305 static struct attribute_group dbs_attr_group = { 306 .attrs = dbs_attributes, 307 .name = "conservative", 308 }; 309 310 /************************** sysfs end ************************/ 311 312 static void dbs_check_cpu(int cpu) 313 { 314 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 315 unsigned int tmp_idle_ticks, total_idle_ticks; 316 unsigned int freq_step; 317 unsigned int freq_down_sampling_rate; 318 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 319 struct cpufreq_policy *policy; 320 321 if (!this_dbs_info->enable) 322 return; 323 324 policy = this_dbs_info->cur_policy; 325 326 /* 327 * The default safe range is 20% to 80% 328 * Every sampling_rate, we check 329 * - If current idle time is less than 20%, then we try to 330 * increase frequency 331 * Every sampling_rate*sampling_down_factor, we check 332 * - If current idle time is more than 80%, then we try to 333 * decrease frequency 334 * 335 * Any frequency increase takes it to the maximum frequency. 336 * Frequency reduction happens at minimum steps of 337 * 5% (default) of max_frequency 338 */ 339 340 /* Check for frequency increase */ 341 idle_ticks = UINT_MAX; 342 343 /* Check for frequency increase */ 344 total_idle_ticks = get_cpu_idle_time(cpu); 345 tmp_idle_ticks = total_idle_ticks - 346 this_dbs_info->prev_cpu_idle_up; 347 this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 348 349 if (tmp_idle_ticks < idle_ticks) 350 idle_ticks = tmp_idle_ticks; 351 352 /* Scale idle ticks by 100 and compare with up and down ticks */ 353 idle_ticks *= 100; 354 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * 355 usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 356 357 if (idle_ticks < up_idle_ticks) { 358 this_dbs_info->down_skip = 0; 359 this_dbs_info->prev_cpu_idle_down = 360 this_dbs_info->prev_cpu_idle_up; 361 362 /* if we are already at full speed then break out early */ 363 if (this_dbs_info->requested_freq == policy->max) 364 return; 365 366 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; 367 368 /* max freq cannot be less than 100. But who knows.... */ 369 if (unlikely(freq_step == 0)) 370 freq_step = 5; 371 372 this_dbs_info->requested_freq += freq_step; 373 if (this_dbs_info->requested_freq > policy->max) 374 this_dbs_info->requested_freq = policy->max; 375 376 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 377 CPUFREQ_RELATION_H); 378 return; 379 } 380 381 /* Check for frequency decrease */ 382 this_dbs_info->down_skip++; 383 if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) 384 return; 385 386 /* Check for frequency decrease */ 387 total_idle_ticks = this_dbs_info->prev_cpu_idle_up; 388 tmp_idle_ticks = total_idle_ticks - 389 this_dbs_info->prev_cpu_idle_down; 390 this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 391 392 if (tmp_idle_ticks < idle_ticks) 393 idle_ticks = tmp_idle_ticks; 394 395 /* Scale idle ticks by 100 and compare with up and down ticks */ 396 idle_ticks *= 100; 397 this_dbs_info->down_skip = 0; 398 399 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * 400 dbs_tuners_ins.sampling_down_factor; 401 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 402 usecs_to_jiffies(freq_down_sampling_rate); 403 404 if (idle_ticks > down_idle_ticks) { 405 /* 406 * if we are already at the lowest speed then break out early 407 * or if we 'cannot' reduce the speed as the user might want 408 * freq_step to be zero 409 */ 410 if (this_dbs_info->requested_freq == policy->min 411 || dbs_tuners_ins.freq_step == 0) 412 return; 413 414 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; 415 416 /* max freq cannot be less than 100. But who knows.... */ 417 if (unlikely(freq_step == 0)) 418 freq_step = 5; 419 420 this_dbs_info->requested_freq -= freq_step; 421 if (this_dbs_info->requested_freq < policy->min) 422 this_dbs_info->requested_freq = policy->min; 423 424 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 425 CPUFREQ_RELATION_H); 426 return; 427 } 428 } 429 430 static void do_dbs_timer(struct work_struct *work) 431 { 432 int i; 433 mutex_lock(&dbs_mutex); 434 for_each_online_cpu(i) 435 dbs_check_cpu(i); 436 schedule_delayed_work(&dbs_work, 437 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 438 mutex_unlock(&dbs_mutex); 439 } 440 441 static inline void dbs_timer_init(void) 442 { 443 schedule_delayed_work(&dbs_work, 444 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 445 return; 446 } 447 448 static inline void dbs_timer_exit(void) 449 { 450 cancel_delayed_work(&dbs_work); 451 return; 452 } 453 454 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 455 unsigned int event) 456 { 457 unsigned int cpu = policy->cpu; 458 struct cpu_dbs_info_s *this_dbs_info; 459 unsigned int j; 460 int rc; 461 462 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 463 464 switch (event) { 465 case CPUFREQ_GOV_START: 466 if ((!cpu_online(cpu)) || 467 (!policy->cur)) 468 return -EINVAL; 469 470 if (policy->cpuinfo.transition_latency > 471 (TRANSITION_LATENCY_LIMIT * 1000)) 472 return -EINVAL; 473 if (this_dbs_info->enable) /* Already enabled */ 474 break; 475 476 mutex_lock(&dbs_mutex); 477 478 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 479 if (rc) { 480 mutex_unlock(&dbs_mutex); 481 return rc; 482 } 483 484 for_each_cpu_mask(j, policy->cpus) { 485 struct cpu_dbs_info_s *j_dbs_info; 486 j_dbs_info = &per_cpu(cpu_dbs_info, j); 487 j_dbs_info->cur_policy = policy; 488 489 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); 490 j_dbs_info->prev_cpu_idle_down 491 = j_dbs_info->prev_cpu_idle_up; 492 } 493 this_dbs_info->enable = 1; 494 this_dbs_info->down_skip = 0; 495 this_dbs_info->requested_freq = policy->cur; 496 497 dbs_enable++; 498 /* 499 * Start the timerschedule work, when this governor 500 * is used for first time 501 */ 502 if (dbs_enable == 1) { 503 unsigned int latency; 504 /* policy latency is in nS. Convert it to uS first */ 505 latency = policy->cpuinfo.transition_latency / 1000; 506 if (latency == 0) 507 latency = 1; 508 509 def_sampling_rate = 10 * latency * 510 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 511 512 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) 513 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 514 515 dbs_tuners_ins.sampling_rate = def_sampling_rate; 516 517 dbs_timer_init(); 518 } 519 520 mutex_unlock(&dbs_mutex); 521 break; 522 523 case CPUFREQ_GOV_STOP: 524 mutex_lock(&dbs_mutex); 525 this_dbs_info->enable = 0; 526 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 527 dbs_enable--; 528 /* 529 * Stop the timerschedule work, when this governor 530 * is used for first time 531 */ 532 if (dbs_enable == 0) 533 dbs_timer_exit(); 534 535 mutex_unlock(&dbs_mutex); 536 537 break; 538 539 case CPUFREQ_GOV_LIMITS: 540 mutex_lock(&dbs_mutex); 541 if (policy->max < this_dbs_info->cur_policy->cur) 542 __cpufreq_driver_target( 543 this_dbs_info->cur_policy, 544 policy->max, CPUFREQ_RELATION_H); 545 else if (policy->min > this_dbs_info->cur_policy->cur) 546 __cpufreq_driver_target( 547 this_dbs_info->cur_policy, 548 policy->min, CPUFREQ_RELATION_L); 549 mutex_unlock(&dbs_mutex); 550 break; 551 } 552 return 0; 553 } 554 555 static struct cpufreq_governor cpufreq_gov_dbs = { 556 .name = "conservative", 557 .governor = cpufreq_governor_dbs, 558 .owner = THIS_MODULE, 559 }; 560 561 static int __init cpufreq_gov_dbs_init(void) 562 { 563 return cpufreq_register_governor(&cpufreq_gov_dbs); 564 } 565 566 static void __exit cpufreq_gov_dbs_exit(void) 567 { 568 /* Make sure that the scheduled work is indeed not running */ 569 flush_scheduled_work(); 570 571 cpufreq_unregister_governor(&cpufreq_gov_dbs); 572 } 573 574 575 MODULE_AUTHOR ("Alexander Clouter <alex-kernel@digriz.org.uk>"); 576 MODULE_DESCRIPTION ("'cpufreq_conservative' - A dynamic cpufreq governor for " 577 "Low Latency Frequency Transition capable processors " 578 "optimised for use in a battery environment"); 579 MODULE_LICENSE ("GPL"); 580 581 module_init(cpufreq_gov_dbs_init); 582 module_exit(cpufreq_gov_dbs_exit); 583