1 /* 2 * drivers/cpufreq/cpufreq_conservative.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 6 * Jun Nakajima <jun.nakajima@intel.com> 7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/cpufreq.h> 18 #include <linux/cpu.h> 19 #include <linux/jiffies.h> 20 #include <linux/kernel_stat.h> 21 #include <linux/mutex.h> 22 #include <linux/hrtimer.h> 23 #include <linux/tick.h> 24 #include <linux/ktime.h> 25 #include <linux/sched.h> 26 27 /* 28 * dbs is used in this file as a shortform for demandbased switching 29 * It helps to keep variable names smaller, simpler 30 */ 31 32 #define DEF_FREQUENCY_UP_THRESHOLD (80) 33 #define DEF_FREQUENCY_DOWN_THRESHOLD (20) 34 35 /* 36 * The polling frequency of this governor depends on the capability of 37 * the processor. Default polling frequency is 1000 times the transition 38 * latency of the processor. The governor will work on any processor with 39 * transition latency <= 10mS, using appropriate sampling 40 * rate. 41 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) 42 * this governor will not work. 43 * All times here are in uS. 44 */ 45 static unsigned int def_sampling_rate; 46 #define MIN_SAMPLING_RATE_RATIO (2) 47 /* for correct statistics, we need at least 10 ticks between each measure */ 48 #define MIN_STAT_SAMPLING_RATE \ 49 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 50 #define MIN_SAMPLING_RATE \ 51 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 52 /* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon 53 * Define the minimal settable sampling rate to the greater of: 54 * - "HW transition latency" * 100 (same as default sampling / 10) 55 * - MIN_STAT_SAMPLING_RATE 56 * To avoid that userspace shoots itself. 57 */ 58 static unsigned int minimum_sampling_rate(void) 59 { 60 return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE); 61 } 62 63 /* This will also vanish soon with removing sampling_rate_max */ 64 #define MAX_SAMPLING_RATE (500 * def_sampling_rate) 65 #define LATENCY_MULTIPLIER (1000) 66 #define DEF_SAMPLING_DOWN_FACTOR (1) 67 #define MAX_SAMPLING_DOWN_FACTOR (10) 68 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 69 70 static void do_dbs_timer(struct work_struct *work); 71 72 struct cpu_dbs_info_s { 73 cputime64_t prev_cpu_idle; 74 cputime64_t prev_cpu_wall; 75 cputime64_t prev_cpu_nice; 76 struct cpufreq_policy *cur_policy; 77 struct delayed_work work; 78 unsigned int down_skip; 79 unsigned int requested_freq; 80 int cpu; 81 unsigned int enable:1; 82 }; 83 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 84 85 static unsigned int dbs_enable; /* number of CPUs using this policy */ 86 87 /* 88 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 89 * lock and dbs_mutex. cpu_hotplug lock should always be held before 90 * dbs_mutex. If any function that can potentially take cpu_hotplug lock 91 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 92 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 93 * is recursive for the same process. -Venki 94 */ 95 static DEFINE_MUTEX(dbs_mutex); 96 97 static struct workqueue_struct *kconservative_wq; 98 99 static struct dbs_tuners { 100 unsigned int sampling_rate; 101 unsigned int sampling_down_factor; 102 unsigned int up_threshold; 103 unsigned int down_threshold; 104 unsigned int ignore_nice; 105 unsigned int freq_step; 106 } dbs_tuners_ins = { 107 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 108 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 109 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 110 .ignore_nice = 0, 111 .freq_step = 5, 112 }; 113 114 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, 115 cputime64_t *wall) 116 { 117 cputime64_t idle_time; 118 cputime64_t cur_wall_time; 119 cputime64_t busy_time; 120 121 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 122 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, 123 kstat_cpu(cpu).cpustat.system); 124 125 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); 126 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); 127 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); 128 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); 129 130 idle_time = cputime64_sub(cur_wall_time, busy_time); 131 if (wall) 132 *wall = cur_wall_time; 133 134 return idle_time; 135 } 136 137 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) 138 { 139 u64 idle_time = get_cpu_idle_time_us(cpu, wall); 140 141 if (idle_time == -1ULL) 142 return get_cpu_idle_time_jiffy(cpu, wall); 143 144 return idle_time; 145 } 146 147 /* keep track of frequency transitions */ 148 static int 149 dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 150 void *data) 151 { 152 struct cpufreq_freqs *freq = data; 153 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, 154 freq->cpu); 155 156 struct cpufreq_policy *policy; 157 158 if (!this_dbs_info->enable) 159 return 0; 160 161 policy = this_dbs_info->cur_policy; 162 163 /* 164 * we only care if our internally tracked freq moves outside 165 * the 'valid' ranges of freqency available to us otherwise 166 * we do not change it 167 */ 168 if (this_dbs_info->requested_freq > policy->max 169 || this_dbs_info->requested_freq < policy->min) 170 this_dbs_info->requested_freq = freq->new; 171 172 return 0; 173 } 174 175 static struct notifier_block dbs_cpufreq_notifier_block = { 176 .notifier_call = dbs_cpufreq_notifier 177 }; 178 179 /************************** sysfs interface ************************/ 180 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 181 { 182 static int print_once; 183 184 if (!print_once) { 185 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max " 186 "sysfs file is deprecated - used by: %s\n", 187 current->comm); 188 print_once = 1; 189 } 190 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE); 191 } 192 193 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 194 { 195 static int print_once; 196 197 if (!print_once) { 198 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max " 199 "sysfs file is deprecated - used by: %s\n", current->comm); 200 print_once = 1; 201 } 202 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE); 203 } 204 205 #define define_one_ro(_name) \ 206 static struct freq_attr _name = \ 207 __ATTR(_name, 0444, show_##_name, NULL) 208 209 define_one_ro(sampling_rate_max); 210 define_one_ro(sampling_rate_min); 211 212 /* cpufreq_conservative Governor Tunables */ 213 #define show_one(file_name, object) \ 214 static ssize_t show_##file_name \ 215 (struct cpufreq_policy *unused, char *buf) \ 216 { \ 217 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 218 } 219 show_one(sampling_rate, sampling_rate); 220 show_one(sampling_down_factor, sampling_down_factor); 221 show_one(up_threshold, up_threshold); 222 show_one(down_threshold, down_threshold); 223 show_one(ignore_nice_load, ignore_nice); 224 show_one(freq_step, freq_step); 225 226 static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 227 const char *buf, size_t count) 228 { 229 unsigned int input; 230 int ret; 231 ret = sscanf(buf, "%u", &input); 232 233 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 234 return -EINVAL; 235 236 mutex_lock(&dbs_mutex); 237 dbs_tuners_ins.sampling_down_factor = input; 238 mutex_unlock(&dbs_mutex); 239 240 return count; 241 } 242 243 static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 244 const char *buf, size_t count) 245 { 246 unsigned int input; 247 int ret; 248 ret = sscanf(buf, "%u", &input); 249 250 if (ret != 1) 251 return -EINVAL; 252 253 mutex_lock(&dbs_mutex); 254 dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate()); 255 mutex_unlock(&dbs_mutex); 256 257 return count; 258 } 259 260 static ssize_t store_up_threshold(struct cpufreq_policy *unused, 261 const char *buf, size_t count) 262 { 263 unsigned int input; 264 int ret; 265 ret = sscanf(buf, "%u", &input); 266 267 mutex_lock(&dbs_mutex); 268 if (ret != 1 || input > 100 || 269 input <= dbs_tuners_ins.down_threshold) { 270 mutex_unlock(&dbs_mutex); 271 return -EINVAL; 272 } 273 274 dbs_tuners_ins.up_threshold = input; 275 mutex_unlock(&dbs_mutex); 276 277 return count; 278 } 279 280 static ssize_t store_down_threshold(struct cpufreq_policy *unused, 281 const char *buf, size_t count) 282 { 283 unsigned int input; 284 int ret; 285 ret = sscanf(buf, "%u", &input); 286 287 mutex_lock(&dbs_mutex); 288 /* cannot be lower than 11 otherwise freq will not fall */ 289 if (ret != 1 || input < 11 || input > 100 || 290 input >= dbs_tuners_ins.up_threshold) { 291 mutex_unlock(&dbs_mutex); 292 return -EINVAL; 293 } 294 295 dbs_tuners_ins.down_threshold = input; 296 mutex_unlock(&dbs_mutex); 297 298 return count; 299 } 300 301 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, 302 const char *buf, size_t count) 303 { 304 unsigned int input; 305 int ret; 306 307 unsigned int j; 308 309 ret = sscanf(buf, "%u", &input); 310 if (ret != 1) 311 return -EINVAL; 312 313 if (input > 1) 314 input = 1; 315 316 mutex_lock(&dbs_mutex); 317 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ 318 mutex_unlock(&dbs_mutex); 319 return count; 320 } 321 dbs_tuners_ins.ignore_nice = input; 322 323 /* we need to re-evaluate prev_cpu_idle */ 324 for_each_online_cpu(j) { 325 struct cpu_dbs_info_s *dbs_info; 326 dbs_info = &per_cpu(cpu_dbs_info, j); 327 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 328 &dbs_info->prev_cpu_wall); 329 if (dbs_tuners_ins.ignore_nice) 330 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 331 } 332 mutex_unlock(&dbs_mutex); 333 334 return count; 335 } 336 337 static ssize_t store_freq_step(struct cpufreq_policy *policy, 338 const char *buf, size_t count) 339 { 340 unsigned int input; 341 int ret; 342 ret = sscanf(buf, "%u", &input); 343 344 if (ret != 1) 345 return -EINVAL; 346 347 if (input > 100) 348 input = 100; 349 350 /* no need to test here if freq_step is zero as the user might actually 351 * want this, they would be crazy though :) */ 352 mutex_lock(&dbs_mutex); 353 dbs_tuners_ins.freq_step = input; 354 mutex_unlock(&dbs_mutex); 355 356 return count; 357 } 358 359 #define define_one_rw(_name) \ 360 static struct freq_attr _name = \ 361 __ATTR(_name, 0644, show_##_name, store_##_name) 362 363 define_one_rw(sampling_rate); 364 define_one_rw(sampling_down_factor); 365 define_one_rw(up_threshold); 366 define_one_rw(down_threshold); 367 define_one_rw(ignore_nice_load); 368 define_one_rw(freq_step); 369 370 static struct attribute *dbs_attributes[] = { 371 &sampling_rate_max.attr, 372 &sampling_rate_min.attr, 373 &sampling_rate.attr, 374 &sampling_down_factor.attr, 375 &up_threshold.attr, 376 &down_threshold.attr, 377 &ignore_nice_load.attr, 378 &freq_step.attr, 379 NULL 380 }; 381 382 static struct attribute_group dbs_attr_group = { 383 .attrs = dbs_attributes, 384 .name = "conservative", 385 }; 386 387 /************************** sysfs end ************************/ 388 389 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 390 { 391 unsigned int load = 0; 392 unsigned int freq_target; 393 394 struct cpufreq_policy *policy; 395 unsigned int j; 396 397 policy = this_dbs_info->cur_policy; 398 399 /* 400 * Every sampling_rate, we check, if current idle time is less 401 * than 20% (default), then we try to increase frequency 402 * Every sampling_rate*sampling_down_factor, we check, if current 403 * idle time is more than 80%, then we try to decrease frequency 404 * 405 * Any frequency increase takes it to the maximum frequency. 406 * Frequency reduction happens at minimum steps of 407 * 5% (default) of maximum frequency 408 */ 409 410 /* Get Absolute Load */ 411 for_each_cpu(j, policy->cpus) { 412 struct cpu_dbs_info_s *j_dbs_info; 413 cputime64_t cur_wall_time, cur_idle_time; 414 unsigned int idle_time, wall_time; 415 416 j_dbs_info = &per_cpu(cpu_dbs_info, j); 417 418 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 419 420 wall_time = (unsigned int) cputime64_sub(cur_wall_time, 421 j_dbs_info->prev_cpu_wall); 422 j_dbs_info->prev_cpu_wall = cur_wall_time; 423 424 idle_time = (unsigned int) cputime64_sub(cur_idle_time, 425 j_dbs_info->prev_cpu_idle); 426 j_dbs_info->prev_cpu_idle = cur_idle_time; 427 428 if (dbs_tuners_ins.ignore_nice) { 429 cputime64_t cur_nice; 430 unsigned long cur_nice_jiffies; 431 432 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, 433 j_dbs_info->prev_cpu_nice); 434 /* 435 * Assumption: nice time between sampling periods will 436 * be less than 2^32 jiffies for 32 bit sys 437 */ 438 cur_nice_jiffies = (unsigned long) 439 cputime64_to_jiffies64(cur_nice); 440 441 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 442 idle_time += jiffies_to_usecs(cur_nice_jiffies); 443 } 444 445 if (unlikely(!wall_time || wall_time < idle_time)) 446 continue; 447 448 load = 100 * (wall_time - idle_time) / wall_time; 449 } 450 451 /* 452 * break out if we 'cannot' reduce the speed as the user might 453 * want freq_step to be zero 454 */ 455 if (dbs_tuners_ins.freq_step == 0) 456 return; 457 458 /* Check for frequency increase */ 459 if (load > dbs_tuners_ins.up_threshold) { 460 this_dbs_info->down_skip = 0; 461 462 /* if we are already at full speed then break out early */ 463 if (this_dbs_info->requested_freq == policy->max) 464 return; 465 466 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 467 468 /* max freq cannot be less than 100. But who knows.... */ 469 if (unlikely(freq_target == 0)) 470 freq_target = 5; 471 472 this_dbs_info->requested_freq += freq_target; 473 if (this_dbs_info->requested_freq > policy->max) 474 this_dbs_info->requested_freq = policy->max; 475 476 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 477 CPUFREQ_RELATION_H); 478 return; 479 } 480 481 /* 482 * The optimal frequency is the frequency that is the lowest that 483 * can support the current CPU usage without triggering the up 484 * policy. To be safe, we focus 10 points under the threshold. 485 */ 486 if (load < (dbs_tuners_ins.down_threshold - 10)) { 487 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 488 489 this_dbs_info->requested_freq -= freq_target; 490 if (this_dbs_info->requested_freq < policy->min) 491 this_dbs_info->requested_freq = policy->min; 492 493 /* 494 * if we cannot reduce the frequency anymore, break out early 495 */ 496 if (policy->cur == policy->min) 497 return; 498 499 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 500 CPUFREQ_RELATION_H); 501 return; 502 } 503 } 504 505 static void do_dbs_timer(struct work_struct *work) 506 { 507 struct cpu_dbs_info_s *dbs_info = 508 container_of(work, struct cpu_dbs_info_s, work.work); 509 unsigned int cpu = dbs_info->cpu; 510 511 /* We want all CPUs to do sampling nearly on same jiffy */ 512 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 513 514 delay -= jiffies % delay; 515 516 if (lock_policy_rwsem_write(cpu) < 0) 517 return; 518 519 if (!dbs_info->enable) { 520 unlock_policy_rwsem_write(cpu); 521 return; 522 } 523 524 dbs_check_cpu(dbs_info); 525 526 queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); 527 unlock_policy_rwsem_write(cpu); 528 } 529 530 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) 531 { 532 /* We want all CPUs to do sampling nearly on same jiffy */ 533 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 534 delay -= jiffies % delay; 535 536 dbs_info->enable = 1; 537 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 538 queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, 539 delay); 540 } 541 542 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 543 { 544 dbs_info->enable = 0; 545 cancel_delayed_work(&dbs_info->work); 546 } 547 548 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 549 unsigned int event) 550 { 551 unsigned int cpu = policy->cpu; 552 struct cpu_dbs_info_s *this_dbs_info; 553 unsigned int j; 554 int rc; 555 556 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 557 558 switch (event) { 559 case CPUFREQ_GOV_START: 560 if ((!cpu_online(cpu)) || (!policy->cur)) 561 return -EINVAL; 562 563 if (this_dbs_info->enable) /* Already enabled */ 564 break; 565 566 mutex_lock(&dbs_mutex); 567 568 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 569 if (rc) { 570 mutex_unlock(&dbs_mutex); 571 return rc; 572 } 573 574 for_each_cpu(j, policy->cpus) { 575 struct cpu_dbs_info_s *j_dbs_info; 576 j_dbs_info = &per_cpu(cpu_dbs_info, j); 577 j_dbs_info->cur_policy = policy; 578 579 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 580 &j_dbs_info->prev_cpu_wall); 581 if (dbs_tuners_ins.ignore_nice) { 582 j_dbs_info->prev_cpu_nice = 583 kstat_cpu(j).cpustat.nice; 584 } 585 } 586 this_dbs_info->down_skip = 0; 587 this_dbs_info->requested_freq = policy->cur; 588 589 dbs_enable++; 590 /* 591 * Start the timerschedule work, when this governor 592 * is used for first time 593 */ 594 if (dbs_enable == 1) { 595 unsigned int latency; 596 /* policy latency is in nS. Convert it to uS first */ 597 latency = policy->cpuinfo.transition_latency / 1000; 598 if (latency == 0) 599 latency = 1; 600 601 def_sampling_rate = 602 max(latency * LATENCY_MULTIPLIER, 603 MIN_STAT_SAMPLING_RATE); 604 605 dbs_tuners_ins.sampling_rate = def_sampling_rate; 606 607 cpufreq_register_notifier( 608 &dbs_cpufreq_notifier_block, 609 CPUFREQ_TRANSITION_NOTIFIER); 610 } 611 dbs_timer_init(this_dbs_info); 612 613 mutex_unlock(&dbs_mutex); 614 615 break; 616 617 case CPUFREQ_GOV_STOP: 618 mutex_lock(&dbs_mutex); 619 dbs_timer_exit(this_dbs_info); 620 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 621 dbs_enable--; 622 623 /* 624 * Stop the timerschedule work, when this governor 625 * is used for first time 626 */ 627 if (dbs_enable == 0) 628 cpufreq_unregister_notifier( 629 &dbs_cpufreq_notifier_block, 630 CPUFREQ_TRANSITION_NOTIFIER); 631 632 mutex_unlock(&dbs_mutex); 633 634 break; 635 636 case CPUFREQ_GOV_LIMITS: 637 mutex_lock(&dbs_mutex); 638 if (policy->max < this_dbs_info->cur_policy->cur) 639 __cpufreq_driver_target( 640 this_dbs_info->cur_policy, 641 policy->max, CPUFREQ_RELATION_H); 642 else if (policy->min > this_dbs_info->cur_policy->cur) 643 __cpufreq_driver_target( 644 this_dbs_info->cur_policy, 645 policy->min, CPUFREQ_RELATION_L); 646 mutex_unlock(&dbs_mutex); 647 648 break; 649 } 650 return 0; 651 } 652 653 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 654 static 655 #endif 656 struct cpufreq_governor cpufreq_gov_conservative = { 657 .name = "conservative", 658 .governor = cpufreq_governor_dbs, 659 .max_transition_latency = TRANSITION_LATENCY_LIMIT, 660 .owner = THIS_MODULE, 661 }; 662 663 static int __init cpufreq_gov_dbs_init(void) 664 { 665 int err; 666 667 kconservative_wq = create_workqueue("kconservative"); 668 if (!kconservative_wq) { 669 printk(KERN_ERR "Creation of kconservative failed\n"); 670 return -EFAULT; 671 } 672 673 err = cpufreq_register_governor(&cpufreq_gov_conservative); 674 if (err) 675 destroy_workqueue(kconservative_wq); 676 677 return err; 678 } 679 680 static void __exit cpufreq_gov_dbs_exit(void) 681 { 682 cpufreq_unregister_governor(&cpufreq_gov_conservative); 683 destroy_workqueue(kconservative_wq); 684 } 685 686 687 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); 688 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " 689 "Low Latency Frequency Transition capable processors " 690 "optimised for use in a battery environment"); 691 MODULE_LICENSE("GPL"); 692 693 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 694 fs_initcall(cpufreq_gov_dbs_init); 695 #else 696 module_init(cpufreq_gov_dbs_init); 697 #endif 698 module_exit(cpufreq_gov_dbs_exit); 699