1 /* 2 * drivers/cpufreq/cpufreq_conservative.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 6 * Jun Nakajima <jun.nakajima@intel.com> 7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/cpufreq.h> 18 #include <linux/cpu.h> 19 #include <linux/jiffies.h> 20 #include <linux/kernel_stat.h> 21 #include <linux/mutex.h> 22 #include <linux/hrtimer.h> 23 #include <linux/tick.h> 24 #include <linux/ktime.h> 25 #include <linux/sched.h> 26 27 /* 28 * dbs is used in this file as a shortform for demandbased switching 29 * It helps to keep variable names smaller, simpler 30 */ 31 32 #define DEF_FREQUENCY_UP_THRESHOLD (80) 33 #define DEF_FREQUENCY_DOWN_THRESHOLD (20) 34 35 /* 36 * The polling frequency of this governor depends on the capability of 37 * the processor. Default polling frequency is 1000 times the transition 38 * latency of the processor. The governor will work on any processor with 39 * transition latency <= 10mS, using appropriate sampling 40 * rate. 41 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) 42 * this governor will not work. 43 * All times here are in uS. 44 */ 45 static unsigned int def_sampling_rate; 46 #define MIN_SAMPLING_RATE_RATIO (2) 47 /* for correct statistics, we need at least 10 ticks between each measure */ 48 #define MIN_STAT_SAMPLING_RATE \ 49 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 50 #define MIN_SAMPLING_RATE \ 51 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 52 /* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon 53 * Define the minimal settable sampling rate to the greater of: 54 * - "HW transition latency" * 100 (same as default sampling / 10) 55 * - MIN_STAT_SAMPLING_RATE 56 * To avoid that userspace shoots itself. 57 */ 58 static unsigned int minimum_sampling_rate(void) 59 { 60 return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE); 61 } 62 63 /* This will also vanish soon with removing sampling_rate_max */ 64 #define MAX_SAMPLING_RATE (500 * def_sampling_rate) 65 #define LATENCY_MULTIPLIER (1000) 66 #define DEF_SAMPLING_DOWN_FACTOR (1) 67 #define MAX_SAMPLING_DOWN_FACTOR (10) 68 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 69 70 static void do_dbs_timer(struct work_struct *work); 71 72 struct cpu_dbs_info_s { 73 cputime64_t prev_cpu_idle; 74 cputime64_t prev_cpu_wall; 75 cputime64_t prev_cpu_nice; 76 struct cpufreq_policy *cur_policy; 77 struct delayed_work work; 78 unsigned int down_skip; 79 unsigned int requested_freq; 80 int cpu; 81 unsigned int enable:1; 82 }; 83 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 84 85 static unsigned int dbs_enable; /* number of CPUs using this policy */ 86 87 /* 88 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 89 * lock and dbs_mutex. cpu_hotplug lock should always be held before 90 * dbs_mutex. If any function that can potentially take cpu_hotplug lock 91 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 92 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 93 * is recursive for the same process. -Venki 94 * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it 95 * would deadlock with cancel_delayed_work_sync(), which is needed for proper 96 * raceless workqueue teardown. 97 */ 98 static DEFINE_MUTEX(dbs_mutex); 99 100 static struct workqueue_struct *kconservative_wq; 101 102 static struct dbs_tuners { 103 unsigned int sampling_rate; 104 unsigned int sampling_down_factor; 105 unsigned int up_threshold; 106 unsigned int down_threshold; 107 unsigned int ignore_nice; 108 unsigned int freq_step; 109 } dbs_tuners_ins = { 110 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 111 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 112 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 113 .ignore_nice = 0, 114 .freq_step = 5, 115 }; 116 117 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, 118 cputime64_t *wall) 119 { 120 cputime64_t idle_time; 121 cputime64_t cur_wall_time; 122 cputime64_t busy_time; 123 124 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 125 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, 126 kstat_cpu(cpu).cpustat.system); 127 128 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); 129 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); 130 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); 131 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); 132 133 idle_time = cputime64_sub(cur_wall_time, busy_time); 134 if (wall) 135 *wall = cur_wall_time; 136 137 return idle_time; 138 } 139 140 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) 141 { 142 u64 idle_time = get_cpu_idle_time_us(cpu, wall); 143 144 if (idle_time == -1ULL) 145 return get_cpu_idle_time_jiffy(cpu, wall); 146 147 return idle_time; 148 } 149 150 /* keep track of frequency transitions */ 151 static int 152 dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 153 void *data) 154 { 155 struct cpufreq_freqs *freq = data; 156 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, 157 freq->cpu); 158 159 struct cpufreq_policy *policy; 160 161 if (!this_dbs_info->enable) 162 return 0; 163 164 policy = this_dbs_info->cur_policy; 165 166 /* 167 * we only care if our internally tracked freq moves outside 168 * the 'valid' ranges of freqency available to us otherwise 169 * we do not change it 170 */ 171 if (this_dbs_info->requested_freq > policy->max 172 || this_dbs_info->requested_freq < policy->min) 173 this_dbs_info->requested_freq = freq->new; 174 175 return 0; 176 } 177 178 static struct notifier_block dbs_cpufreq_notifier_block = { 179 .notifier_call = dbs_cpufreq_notifier 180 }; 181 182 /************************** sysfs interface ************************/ 183 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 184 { 185 static int print_once; 186 187 if (!print_once) { 188 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max " 189 "sysfs file is deprecated - used by: %s\n", 190 current->comm); 191 print_once = 1; 192 } 193 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE); 194 } 195 196 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 197 { 198 static int print_once; 199 200 if (!print_once) { 201 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max " 202 "sysfs file is deprecated - used by: %s\n", current->comm); 203 print_once = 1; 204 } 205 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE); 206 } 207 208 #define define_one_ro(_name) \ 209 static struct freq_attr _name = \ 210 __ATTR(_name, 0444, show_##_name, NULL) 211 212 define_one_ro(sampling_rate_max); 213 define_one_ro(sampling_rate_min); 214 215 /* cpufreq_conservative Governor Tunables */ 216 #define show_one(file_name, object) \ 217 static ssize_t show_##file_name \ 218 (struct cpufreq_policy *unused, char *buf) \ 219 { \ 220 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 221 } 222 show_one(sampling_rate, sampling_rate); 223 show_one(sampling_down_factor, sampling_down_factor); 224 show_one(up_threshold, up_threshold); 225 show_one(down_threshold, down_threshold); 226 show_one(ignore_nice_load, ignore_nice); 227 show_one(freq_step, freq_step); 228 229 static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 230 const char *buf, size_t count) 231 { 232 unsigned int input; 233 int ret; 234 ret = sscanf(buf, "%u", &input); 235 236 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 237 return -EINVAL; 238 239 mutex_lock(&dbs_mutex); 240 dbs_tuners_ins.sampling_down_factor = input; 241 mutex_unlock(&dbs_mutex); 242 243 return count; 244 } 245 246 static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 247 const char *buf, size_t count) 248 { 249 unsigned int input; 250 int ret; 251 ret = sscanf(buf, "%u", &input); 252 253 if (ret != 1) 254 return -EINVAL; 255 256 mutex_lock(&dbs_mutex); 257 dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate()); 258 mutex_unlock(&dbs_mutex); 259 260 return count; 261 } 262 263 static ssize_t store_up_threshold(struct cpufreq_policy *unused, 264 const char *buf, size_t count) 265 { 266 unsigned int input; 267 int ret; 268 ret = sscanf(buf, "%u", &input); 269 270 mutex_lock(&dbs_mutex); 271 if (ret != 1 || input > 100 || 272 input <= dbs_tuners_ins.down_threshold) { 273 mutex_unlock(&dbs_mutex); 274 return -EINVAL; 275 } 276 277 dbs_tuners_ins.up_threshold = input; 278 mutex_unlock(&dbs_mutex); 279 280 return count; 281 } 282 283 static ssize_t store_down_threshold(struct cpufreq_policy *unused, 284 const char *buf, size_t count) 285 { 286 unsigned int input; 287 int ret; 288 ret = sscanf(buf, "%u", &input); 289 290 mutex_lock(&dbs_mutex); 291 /* cannot be lower than 11 otherwise freq will not fall */ 292 if (ret != 1 || input < 11 || input > 100 || 293 input >= dbs_tuners_ins.up_threshold) { 294 mutex_unlock(&dbs_mutex); 295 return -EINVAL; 296 } 297 298 dbs_tuners_ins.down_threshold = input; 299 mutex_unlock(&dbs_mutex); 300 301 return count; 302 } 303 304 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, 305 const char *buf, size_t count) 306 { 307 unsigned int input; 308 int ret; 309 310 unsigned int j; 311 312 ret = sscanf(buf, "%u", &input); 313 if (ret != 1) 314 return -EINVAL; 315 316 if (input > 1) 317 input = 1; 318 319 mutex_lock(&dbs_mutex); 320 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ 321 mutex_unlock(&dbs_mutex); 322 return count; 323 } 324 dbs_tuners_ins.ignore_nice = input; 325 326 /* we need to re-evaluate prev_cpu_idle */ 327 for_each_online_cpu(j) { 328 struct cpu_dbs_info_s *dbs_info; 329 dbs_info = &per_cpu(cpu_dbs_info, j); 330 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 331 &dbs_info->prev_cpu_wall); 332 if (dbs_tuners_ins.ignore_nice) 333 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 334 } 335 mutex_unlock(&dbs_mutex); 336 337 return count; 338 } 339 340 static ssize_t store_freq_step(struct cpufreq_policy *policy, 341 const char *buf, size_t count) 342 { 343 unsigned int input; 344 int ret; 345 ret = sscanf(buf, "%u", &input); 346 347 if (ret != 1) 348 return -EINVAL; 349 350 if (input > 100) 351 input = 100; 352 353 /* no need to test here if freq_step is zero as the user might actually 354 * want this, they would be crazy though :) */ 355 mutex_lock(&dbs_mutex); 356 dbs_tuners_ins.freq_step = input; 357 mutex_unlock(&dbs_mutex); 358 359 return count; 360 } 361 362 #define define_one_rw(_name) \ 363 static struct freq_attr _name = \ 364 __ATTR(_name, 0644, show_##_name, store_##_name) 365 366 define_one_rw(sampling_rate); 367 define_one_rw(sampling_down_factor); 368 define_one_rw(up_threshold); 369 define_one_rw(down_threshold); 370 define_one_rw(ignore_nice_load); 371 define_one_rw(freq_step); 372 373 static struct attribute *dbs_attributes[] = { 374 &sampling_rate_max.attr, 375 &sampling_rate_min.attr, 376 &sampling_rate.attr, 377 &sampling_down_factor.attr, 378 &up_threshold.attr, 379 &down_threshold.attr, 380 &ignore_nice_load.attr, 381 &freq_step.attr, 382 NULL 383 }; 384 385 static struct attribute_group dbs_attr_group = { 386 .attrs = dbs_attributes, 387 .name = "conservative", 388 }; 389 390 /************************** sysfs end ************************/ 391 392 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 393 { 394 unsigned int load = 0; 395 unsigned int freq_target; 396 397 struct cpufreq_policy *policy; 398 unsigned int j; 399 400 policy = this_dbs_info->cur_policy; 401 402 /* 403 * Every sampling_rate, we check, if current idle time is less 404 * than 20% (default), then we try to increase frequency 405 * Every sampling_rate*sampling_down_factor, we check, if current 406 * idle time is more than 80%, then we try to decrease frequency 407 * 408 * Any frequency increase takes it to the maximum frequency. 409 * Frequency reduction happens at minimum steps of 410 * 5% (default) of maximum frequency 411 */ 412 413 /* Get Absolute Load */ 414 for_each_cpu(j, policy->cpus) { 415 struct cpu_dbs_info_s *j_dbs_info; 416 cputime64_t cur_wall_time, cur_idle_time; 417 unsigned int idle_time, wall_time; 418 419 j_dbs_info = &per_cpu(cpu_dbs_info, j); 420 421 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 422 423 wall_time = (unsigned int) cputime64_sub(cur_wall_time, 424 j_dbs_info->prev_cpu_wall); 425 j_dbs_info->prev_cpu_wall = cur_wall_time; 426 427 idle_time = (unsigned int) cputime64_sub(cur_idle_time, 428 j_dbs_info->prev_cpu_idle); 429 j_dbs_info->prev_cpu_idle = cur_idle_time; 430 431 if (dbs_tuners_ins.ignore_nice) { 432 cputime64_t cur_nice; 433 unsigned long cur_nice_jiffies; 434 435 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, 436 j_dbs_info->prev_cpu_nice); 437 /* 438 * Assumption: nice time between sampling periods will 439 * be less than 2^32 jiffies for 32 bit sys 440 */ 441 cur_nice_jiffies = (unsigned long) 442 cputime64_to_jiffies64(cur_nice); 443 444 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 445 idle_time += jiffies_to_usecs(cur_nice_jiffies); 446 } 447 448 if (unlikely(!wall_time || wall_time < idle_time)) 449 continue; 450 451 load = 100 * (wall_time - idle_time) / wall_time; 452 } 453 454 /* 455 * break out if we 'cannot' reduce the speed as the user might 456 * want freq_step to be zero 457 */ 458 if (dbs_tuners_ins.freq_step == 0) 459 return; 460 461 /* Check for frequency increase */ 462 if (load > dbs_tuners_ins.up_threshold) { 463 this_dbs_info->down_skip = 0; 464 465 /* if we are already at full speed then break out early */ 466 if (this_dbs_info->requested_freq == policy->max) 467 return; 468 469 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 470 471 /* max freq cannot be less than 100. But who knows.... */ 472 if (unlikely(freq_target == 0)) 473 freq_target = 5; 474 475 this_dbs_info->requested_freq += freq_target; 476 if (this_dbs_info->requested_freq > policy->max) 477 this_dbs_info->requested_freq = policy->max; 478 479 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 480 CPUFREQ_RELATION_H); 481 return; 482 } 483 484 /* 485 * The optimal frequency is the frequency that is the lowest that 486 * can support the current CPU usage without triggering the up 487 * policy. To be safe, we focus 10 points under the threshold. 488 */ 489 if (load < (dbs_tuners_ins.down_threshold - 10)) { 490 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 491 492 this_dbs_info->requested_freq -= freq_target; 493 if (this_dbs_info->requested_freq < policy->min) 494 this_dbs_info->requested_freq = policy->min; 495 496 /* 497 * if we cannot reduce the frequency anymore, break out early 498 */ 499 if (policy->cur == policy->min) 500 return; 501 502 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 503 CPUFREQ_RELATION_H); 504 return; 505 } 506 } 507 508 static void do_dbs_timer(struct work_struct *work) 509 { 510 struct cpu_dbs_info_s *dbs_info = 511 container_of(work, struct cpu_dbs_info_s, work.work); 512 unsigned int cpu = dbs_info->cpu; 513 514 /* We want all CPUs to do sampling nearly on same jiffy */ 515 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 516 517 delay -= jiffies % delay; 518 519 if (lock_policy_rwsem_write(cpu) < 0) 520 return; 521 522 if (!dbs_info->enable) { 523 unlock_policy_rwsem_write(cpu); 524 return; 525 } 526 527 dbs_check_cpu(dbs_info); 528 529 queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); 530 unlock_policy_rwsem_write(cpu); 531 } 532 533 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) 534 { 535 /* We want all CPUs to do sampling nearly on same jiffy */ 536 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 537 delay -= jiffies % delay; 538 539 dbs_info->enable = 1; 540 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 541 queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, 542 delay); 543 } 544 545 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 546 { 547 dbs_info->enable = 0; 548 cancel_delayed_work_sync(&dbs_info->work); 549 } 550 551 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 552 unsigned int event) 553 { 554 unsigned int cpu = policy->cpu; 555 struct cpu_dbs_info_s *this_dbs_info; 556 unsigned int j; 557 int rc; 558 559 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 560 561 switch (event) { 562 case CPUFREQ_GOV_START: 563 if ((!cpu_online(cpu)) || (!policy->cur)) 564 return -EINVAL; 565 566 if (this_dbs_info->enable) /* Already enabled */ 567 break; 568 569 mutex_lock(&dbs_mutex); 570 571 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 572 if (rc) { 573 mutex_unlock(&dbs_mutex); 574 return rc; 575 } 576 577 for_each_cpu(j, policy->cpus) { 578 struct cpu_dbs_info_s *j_dbs_info; 579 j_dbs_info = &per_cpu(cpu_dbs_info, j); 580 j_dbs_info->cur_policy = policy; 581 582 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 583 &j_dbs_info->prev_cpu_wall); 584 if (dbs_tuners_ins.ignore_nice) { 585 j_dbs_info->prev_cpu_nice = 586 kstat_cpu(j).cpustat.nice; 587 } 588 } 589 this_dbs_info->down_skip = 0; 590 this_dbs_info->requested_freq = policy->cur; 591 592 dbs_enable++; 593 /* 594 * Start the timerschedule work, when this governor 595 * is used for first time 596 */ 597 if (dbs_enable == 1) { 598 unsigned int latency; 599 /* policy latency is in nS. Convert it to uS first */ 600 latency = policy->cpuinfo.transition_latency / 1000; 601 if (latency == 0) 602 latency = 1; 603 604 def_sampling_rate = 605 max(latency * LATENCY_MULTIPLIER, 606 MIN_STAT_SAMPLING_RATE); 607 608 dbs_tuners_ins.sampling_rate = def_sampling_rate; 609 610 cpufreq_register_notifier( 611 &dbs_cpufreq_notifier_block, 612 CPUFREQ_TRANSITION_NOTIFIER); 613 } 614 dbs_timer_init(this_dbs_info); 615 616 mutex_unlock(&dbs_mutex); 617 618 break; 619 620 case CPUFREQ_GOV_STOP: 621 mutex_lock(&dbs_mutex); 622 dbs_timer_exit(this_dbs_info); 623 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 624 dbs_enable--; 625 626 /* 627 * Stop the timerschedule work, when this governor 628 * is used for first time 629 */ 630 if (dbs_enable == 0) 631 cpufreq_unregister_notifier( 632 &dbs_cpufreq_notifier_block, 633 CPUFREQ_TRANSITION_NOTIFIER); 634 635 mutex_unlock(&dbs_mutex); 636 637 break; 638 639 case CPUFREQ_GOV_LIMITS: 640 mutex_lock(&dbs_mutex); 641 if (policy->max < this_dbs_info->cur_policy->cur) 642 __cpufreq_driver_target( 643 this_dbs_info->cur_policy, 644 policy->max, CPUFREQ_RELATION_H); 645 else if (policy->min > this_dbs_info->cur_policy->cur) 646 __cpufreq_driver_target( 647 this_dbs_info->cur_policy, 648 policy->min, CPUFREQ_RELATION_L); 649 mutex_unlock(&dbs_mutex); 650 651 break; 652 } 653 return 0; 654 } 655 656 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 657 static 658 #endif 659 struct cpufreq_governor cpufreq_gov_conservative = { 660 .name = "conservative", 661 .governor = cpufreq_governor_dbs, 662 .max_transition_latency = TRANSITION_LATENCY_LIMIT, 663 .owner = THIS_MODULE, 664 }; 665 666 static int __init cpufreq_gov_dbs_init(void) 667 { 668 int err; 669 670 kconservative_wq = create_workqueue("kconservative"); 671 if (!kconservative_wq) { 672 printk(KERN_ERR "Creation of kconservative failed\n"); 673 return -EFAULT; 674 } 675 676 err = cpufreq_register_governor(&cpufreq_gov_conservative); 677 if (err) 678 destroy_workqueue(kconservative_wq); 679 680 return err; 681 } 682 683 static void __exit cpufreq_gov_dbs_exit(void) 684 { 685 cpufreq_unregister_governor(&cpufreq_gov_conservative); 686 destroy_workqueue(kconservative_wq); 687 } 688 689 690 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); 691 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " 692 "Low Latency Frequency Transition capable processors " 693 "optimised for use in a battery environment"); 694 MODULE_LICENSE("GPL"); 695 696 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 697 fs_initcall(cpufreq_gov_dbs_init); 698 #else 699 module_init(cpufreq_gov_dbs_init); 700 #endif 701 module_exit(cpufreq_gov_dbs_exit); 702