1 /* 2 * CPUFreq governor based on scheduler-provided CPU utilization data. 3 * 4 * Copyright (C) 2016, Intel Corporation 5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/cpufreq.h> 15 #include <linux/kthread.h> 16 #include <uapi/linux/sched/types.h> 17 #include <linux/slab.h> 18 #include <trace/events/power.h> 19 20 #include "sched.h" 21 22 #define SUGOV_KTHREAD_PRIORITY 50 23 24 struct sugov_tunables { 25 struct gov_attr_set attr_set; 26 unsigned int rate_limit_us; 27 }; 28 29 struct sugov_policy { 30 struct cpufreq_policy *policy; 31 32 struct sugov_tunables *tunables; 33 struct list_head tunables_hook; 34 35 raw_spinlock_t update_lock; /* For shared policies */ 36 u64 last_freq_update_time; 37 s64 freq_update_delay_ns; 38 unsigned int next_freq; 39 unsigned int cached_raw_freq; 40 41 /* The next fields are only needed if fast switch cannot be used. */ 42 struct irq_work irq_work; 43 struct kthread_work work; 44 struct mutex work_lock; 45 struct kthread_worker worker; 46 struct task_struct *thread; 47 bool work_in_progress; 48 49 bool need_freq_update; 50 }; 51 52 struct sugov_cpu { 53 struct update_util_data update_util; 54 struct sugov_policy *sg_policy; 55 unsigned int cpu; 56 57 bool iowait_boost_pending; 58 unsigned int iowait_boost; 59 unsigned int iowait_boost_max; 60 u64 last_update; 61 62 /* The fields below are only needed when sharing a policy. */ 63 unsigned long util; 64 unsigned long max; 65 unsigned int flags; 66 67 /* The field below is for single-CPU policies only. */ 68 #ifdef CONFIG_NO_HZ_COMMON 69 unsigned long saved_idle_calls; 70 #endif 71 }; 72 73 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); 74 75 /************************ Governor internals ***********************/ 76 77 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) 78 { 79 s64 delta_ns; 80 81 /* 82 * Since cpufreq_update_util() is called with rq->lock held for 83 * the @target_cpu, our per-cpu data is fully serialized. 84 * 85 * However, drivers cannot in general deal with cross-cpu 86 * requests, so while get_next_freq() will work, our 87 * sugov_update_commit() call may not for the fast switching platforms. 88 * 89 * Hence stop here for remote requests if they aren't supported 90 * by the hardware, as calculating the frequency is pointless if 91 * we cannot in fact act on it. 92 * 93 * For the slow switching platforms, the kthread is always scheduled on 94 * the right set of CPUs and any CPU can find the next frequency and 95 * schedule the kthread. 96 */ 97 if (sg_policy->policy->fast_switch_enabled && 98 !cpufreq_can_do_remote_dvfs(sg_policy->policy)) 99 return false; 100 101 if (sg_policy->work_in_progress) 102 return false; 103 104 if (unlikely(sg_policy->need_freq_update)) { 105 sg_policy->need_freq_update = false; 106 /* 107 * This happens when limits change, so forget the previous 108 * next_freq value and force an update. 109 */ 110 sg_policy->next_freq = UINT_MAX; 111 return true; 112 } 113 114 delta_ns = time - sg_policy->last_freq_update_time; 115 return delta_ns >= sg_policy->freq_update_delay_ns; 116 } 117 118 static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, 119 unsigned int next_freq) 120 { 121 struct cpufreq_policy *policy = sg_policy->policy; 122 123 if (sg_policy->next_freq == next_freq) 124 return; 125 126 sg_policy->next_freq = next_freq; 127 sg_policy->last_freq_update_time = time; 128 129 if (policy->fast_switch_enabled) { 130 next_freq = cpufreq_driver_fast_switch(policy, next_freq); 131 if (!next_freq) 132 return; 133 134 policy->cur = next_freq; 135 trace_cpu_frequency(next_freq, smp_processor_id()); 136 } else { 137 sg_policy->work_in_progress = true; 138 irq_work_queue(&sg_policy->irq_work); 139 } 140 } 141 142 /** 143 * get_next_freq - Compute a new frequency for a given cpufreq policy. 144 * @sg_policy: schedutil policy object to compute the new frequency for. 145 * @util: Current CPU utilization. 146 * @max: CPU capacity. 147 * 148 * If the utilization is frequency-invariant, choose the new frequency to be 149 * proportional to it, that is 150 * 151 * next_freq = C * max_freq * util / max 152 * 153 * Otherwise, approximate the would-be frequency-invariant utilization by 154 * util_raw * (curr_freq / max_freq) which leads to 155 * 156 * next_freq = C * curr_freq * util_raw / max 157 * 158 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8. 159 * 160 * The lowest driver-supported frequency which is equal or greater than the raw 161 * next_freq (as calculated above) is returned, subject to policy min/max and 162 * cpufreq driver limitations. 163 */ 164 static unsigned int get_next_freq(struct sugov_policy *sg_policy, 165 unsigned long util, unsigned long max) 166 { 167 struct cpufreq_policy *policy = sg_policy->policy; 168 unsigned int freq = arch_scale_freq_invariant() ? 169 policy->cpuinfo.max_freq : policy->cur; 170 171 freq = (freq + (freq >> 2)) * util / max; 172 173 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX) 174 return sg_policy->next_freq; 175 sg_policy->cached_raw_freq = freq; 176 return cpufreq_driver_resolve_freq(policy, freq); 177 } 178 179 static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu) 180 { 181 struct rq *rq = cpu_rq(cpu); 182 unsigned long cfs_max; 183 184 cfs_max = arch_scale_cpu_capacity(NULL, cpu); 185 186 *util = min(rq->cfs.avg.util_avg, cfs_max); 187 *max = cfs_max; 188 } 189 190 static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, 191 unsigned int flags) 192 { 193 if (flags & SCHED_CPUFREQ_IOWAIT) { 194 if (sg_cpu->iowait_boost_pending) 195 return; 196 197 sg_cpu->iowait_boost_pending = true; 198 199 if (sg_cpu->iowait_boost) { 200 sg_cpu->iowait_boost <<= 1; 201 if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max) 202 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max; 203 } else { 204 sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min; 205 } 206 } else if (sg_cpu->iowait_boost) { 207 s64 delta_ns = time - sg_cpu->last_update; 208 209 /* Clear iowait_boost if the CPU apprears to have been idle. */ 210 if (delta_ns > TICK_NSEC) { 211 sg_cpu->iowait_boost = 0; 212 sg_cpu->iowait_boost_pending = false; 213 } 214 } 215 } 216 217 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, 218 unsigned long *max) 219 { 220 unsigned int boost_util, boost_max; 221 222 if (!sg_cpu->iowait_boost) 223 return; 224 225 if (sg_cpu->iowait_boost_pending) { 226 sg_cpu->iowait_boost_pending = false; 227 } else { 228 sg_cpu->iowait_boost >>= 1; 229 if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) { 230 sg_cpu->iowait_boost = 0; 231 return; 232 } 233 } 234 235 boost_util = sg_cpu->iowait_boost; 236 boost_max = sg_cpu->iowait_boost_max; 237 238 if (*util * boost_max < *max * boost_util) { 239 *util = boost_util; 240 *max = boost_max; 241 } 242 } 243 244 #ifdef CONFIG_NO_HZ_COMMON 245 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) 246 { 247 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); 248 bool ret = idle_calls == sg_cpu->saved_idle_calls; 249 250 sg_cpu->saved_idle_calls = idle_calls; 251 return ret; 252 } 253 #else 254 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } 255 #endif /* CONFIG_NO_HZ_COMMON */ 256 257 static void sugov_update_single(struct update_util_data *hook, u64 time, 258 unsigned int flags) 259 { 260 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 261 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 262 struct cpufreq_policy *policy = sg_policy->policy; 263 unsigned long util, max; 264 unsigned int next_f; 265 bool busy; 266 267 sugov_set_iowait_boost(sg_cpu, time, flags); 268 sg_cpu->last_update = time; 269 270 if (!sugov_should_update_freq(sg_policy, time)) 271 return; 272 273 busy = sugov_cpu_is_busy(sg_cpu); 274 275 if (flags & SCHED_CPUFREQ_RT_DL) { 276 next_f = policy->cpuinfo.max_freq; 277 } else { 278 sugov_get_util(&util, &max, sg_cpu->cpu); 279 sugov_iowait_boost(sg_cpu, &util, &max); 280 next_f = get_next_freq(sg_policy, util, max); 281 /* 282 * Do not reduce the frequency if the CPU has not been idle 283 * recently, as the reduction is likely to be premature then. 284 */ 285 if (busy && next_f < sg_policy->next_freq) { 286 next_f = sg_policy->next_freq; 287 288 /* Reset cached freq as next_freq has changed */ 289 sg_policy->cached_raw_freq = 0; 290 } 291 } 292 sugov_update_commit(sg_policy, time, next_f); 293 } 294 295 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) 296 { 297 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 298 struct cpufreq_policy *policy = sg_policy->policy; 299 unsigned long util = 0, max = 1; 300 unsigned int j; 301 302 for_each_cpu(j, policy->cpus) { 303 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); 304 unsigned long j_util, j_max; 305 s64 delta_ns; 306 307 /* 308 * If the CPU utilization was last updated before the previous 309 * frequency update and the time elapsed between the last update 310 * of the CPU utilization and the last frequency update is long 311 * enough, don't take the CPU into account as it probably is 312 * idle now (and clear iowait_boost for it). 313 */ 314 delta_ns = time - j_sg_cpu->last_update; 315 if (delta_ns > TICK_NSEC) { 316 j_sg_cpu->iowait_boost = 0; 317 j_sg_cpu->iowait_boost_pending = false; 318 continue; 319 } 320 if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL) 321 return policy->cpuinfo.max_freq; 322 323 j_util = j_sg_cpu->util; 324 j_max = j_sg_cpu->max; 325 if (j_util * max > j_max * util) { 326 util = j_util; 327 max = j_max; 328 } 329 330 sugov_iowait_boost(j_sg_cpu, &util, &max); 331 } 332 333 return get_next_freq(sg_policy, util, max); 334 } 335 336 static void sugov_update_shared(struct update_util_data *hook, u64 time, 337 unsigned int flags) 338 { 339 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 340 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 341 unsigned long util, max; 342 unsigned int next_f; 343 344 sugov_get_util(&util, &max, sg_cpu->cpu); 345 346 raw_spin_lock(&sg_policy->update_lock); 347 348 sg_cpu->util = util; 349 sg_cpu->max = max; 350 sg_cpu->flags = flags; 351 352 sugov_set_iowait_boost(sg_cpu, time, flags); 353 sg_cpu->last_update = time; 354 355 if (sugov_should_update_freq(sg_policy, time)) { 356 if (flags & SCHED_CPUFREQ_RT_DL) 357 next_f = sg_policy->policy->cpuinfo.max_freq; 358 else 359 next_f = sugov_next_freq_shared(sg_cpu, time); 360 361 sugov_update_commit(sg_policy, time, next_f); 362 } 363 364 raw_spin_unlock(&sg_policy->update_lock); 365 } 366 367 static void sugov_work(struct kthread_work *work) 368 { 369 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); 370 371 mutex_lock(&sg_policy->work_lock); 372 __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq, 373 CPUFREQ_RELATION_L); 374 mutex_unlock(&sg_policy->work_lock); 375 376 sg_policy->work_in_progress = false; 377 } 378 379 static void sugov_irq_work(struct irq_work *irq_work) 380 { 381 struct sugov_policy *sg_policy; 382 383 sg_policy = container_of(irq_work, struct sugov_policy, irq_work); 384 385 /* 386 * For RT and deadline tasks, the schedutil governor shoots the 387 * frequency to maximum. Special care must be taken to ensure that this 388 * kthread doesn't result in the same behavior. 389 * 390 * This is (mostly) guaranteed by the work_in_progress flag. The flag is 391 * updated only at the end of the sugov_work() function and before that 392 * the schedutil governor rejects all other frequency scaling requests. 393 * 394 * There is a very rare case though, where the RT thread yields right 395 * after the work_in_progress flag is cleared. The effects of that are 396 * neglected for now. 397 */ 398 kthread_queue_work(&sg_policy->worker, &sg_policy->work); 399 } 400 401 /************************** sysfs interface ************************/ 402 403 static struct sugov_tunables *global_tunables; 404 static DEFINE_MUTEX(global_tunables_lock); 405 406 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set) 407 { 408 return container_of(attr_set, struct sugov_tunables, attr_set); 409 } 410 411 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) 412 { 413 struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 414 415 return sprintf(buf, "%u\n", tunables->rate_limit_us); 416 } 417 418 static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, 419 size_t count) 420 { 421 struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 422 struct sugov_policy *sg_policy; 423 unsigned int rate_limit_us; 424 425 if (kstrtouint(buf, 10, &rate_limit_us)) 426 return -EINVAL; 427 428 tunables->rate_limit_us = rate_limit_us; 429 430 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) 431 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; 432 433 return count; 434 } 435 436 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); 437 438 static struct attribute *sugov_attributes[] = { 439 &rate_limit_us.attr, 440 NULL 441 }; 442 443 static struct kobj_type sugov_tunables_ktype = { 444 .default_attrs = sugov_attributes, 445 .sysfs_ops = &governor_sysfs_ops, 446 }; 447 448 /********************** cpufreq governor interface *********************/ 449 450 static struct cpufreq_governor schedutil_gov; 451 452 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) 453 { 454 struct sugov_policy *sg_policy; 455 456 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL); 457 if (!sg_policy) 458 return NULL; 459 460 sg_policy->policy = policy; 461 raw_spin_lock_init(&sg_policy->update_lock); 462 return sg_policy; 463 } 464 465 static void sugov_policy_free(struct sugov_policy *sg_policy) 466 { 467 kfree(sg_policy); 468 } 469 470 static int sugov_kthread_create(struct sugov_policy *sg_policy) 471 { 472 struct task_struct *thread; 473 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 }; 474 struct cpufreq_policy *policy = sg_policy->policy; 475 int ret; 476 477 /* kthread only required for slow path */ 478 if (policy->fast_switch_enabled) 479 return 0; 480 481 kthread_init_work(&sg_policy->work, sugov_work); 482 kthread_init_worker(&sg_policy->worker); 483 thread = kthread_create(kthread_worker_fn, &sg_policy->worker, 484 "sugov:%d", 485 cpumask_first(policy->related_cpus)); 486 if (IS_ERR(thread)) { 487 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread)); 488 return PTR_ERR(thread); 489 } 490 491 ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, ¶m); 492 if (ret) { 493 kthread_stop(thread); 494 pr_warn("%s: failed to set SCHED_FIFO\n", __func__); 495 return ret; 496 } 497 498 sg_policy->thread = thread; 499 500 /* Kthread is bound to all CPUs by default */ 501 if (!policy->dvfs_possible_from_any_cpu) 502 kthread_bind_mask(thread, policy->related_cpus); 503 504 init_irq_work(&sg_policy->irq_work, sugov_irq_work); 505 mutex_init(&sg_policy->work_lock); 506 507 wake_up_process(thread); 508 509 return 0; 510 } 511 512 static void sugov_kthread_stop(struct sugov_policy *sg_policy) 513 { 514 /* kthread only required for slow path */ 515 if (sg_policy->policy->fast_switch_enabled) 516 return; 517 518 kthread_flush_worker(&sg_policy->worker); 519 kthread_stop(sg_policy->thread); 520 mutex_destroy(&sg_policy->work_lock); 521 } 522 523 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) 524 { 525 struct sugov_tunables *tunables; 526 527 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); 528 if (tunables) { 529 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); 530 if (!have_governor_per_policy()) 531 global_tunables = tunables; 532 } 533 return tunables; 534 } 535 536 static void sugov_tunables_free(struct sugov_tunables *tunables) 537 { 538 if (!have_governor_per_policy()) 539 global_tunables = NULL; 540 541 kfree(tunables); 542 } 543 544 static int sugov_init(struct cpufreq_policy *policy) 545 { 546 struct sugov_policy *sg_policy; 547 struct sugov_tunables *tunables; 548 int ret = 0; 549 550 /* State should be equivalent to EXIT */ 551 if (policy->governor_data) 552 return -EBUSY; 553 554 cpufreq_enable_fast_switch(policy); 555 556 sg_policy = sugov_policy_alloc(policy); 557 if (!sg_policy) { 558 ret = -ENOMEM; 559 goto disable_fast_switch; 560 } 561 562 ret = sugov_kthread_create(sg_policy); 563 if (ret) 564 goto free_sg_policy; 565 566 mutex_lock(&global_tunables_lock); 567 568 if (global_tunables) { 569 if (WARN_ON(have_governor_per_policy())) { 570 ret = -EINVAL; 571 goto stop_kthread; 572 } 573 policy->governor_data = sg_policy; 574 sg_policy->tunables = global_tunables; 575 576 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook); 577 goto out; 578 } 579 580 tunables = sugov_tunables_alloc(sg_policy); 581 if (!tunables) { 582 ret = -ENOMEM; 583 goto stop_kthread; 584 } 585 586 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy); 587 588 policy->governor_data = sg_policy; 589 sg_policy->tunables = tunables; 590 591 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype, 592 get_governor_parent_kobj(policy), "%s", 593 schedutil_gov.name); 594 if (ret) 595 goto fail; 596 597 out: 598 mutex_unlock(&global_tunables_lock); 599 return 0; 600 601 fail: 602 policy->governor_data = NULL; 603 sugov_tunables_free(tunables); 604 605 stop_kthread: 606 sugov_kthread_stop(sg_policy); 607 608 free_sg_policy: 609 mutex_unlock(&global_tunables_lock); 610 611 sugov_policy_free(sg_policy); 612 613 disable_fast_switch: 614 cpufreq_disable_fast_switch(policy); 615 616 pr_err("initialization failed (error %d)\n", ret); 617 return ret; 618 } 619 620 static void sugov_exit(struct cpufreq_policy *policy) 621 { 622 struct sugov_policy *sg_policy = policy->governor_data; 623 struct sugov_tunables *tunables = sg_policy->tunables; 624 unsigned int count; 625 626 mutex_lock(&global_tunables_lock); 627 628 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); 629 policy->governor_data = NULL; 630 if (!count) 631 sugov_tunables_free(tunables); 632 633 mutex_unlock(&global_tunables_lock); 634 635 sugov_kthread_stop(sg_policy); 636 sugov_policy_free(sg_policy); 637 cpufreq_disable_fast_switch(policy); 638 } 639 640 static int sugov_start(struct cpufreq_policy *policy) 641 { 642 struct sugov_policy *sg_policy = policy->governor_data; 643 unsigned int cpu; 644 645 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; 646 sg_policy->last_freq_update_time = 0; 647 sg_policy->next_freq = UINT_MAX; 648 sg_policy->work_in_progress = false; 649 sg_policy->need_freq_update = false; 650 sg_policy->cached_raw_freq = 0; 651 652 for_each_cpu(cpu, policy->cpus) { 653 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 654 655 memset(sg_cpu, 0, sizeof(*sg_cpu)); 656 sg_cpu->cpu = cpu; 657 sg_cpu->sg_policy = sg_policy; 658 sg_cpu->flags = SCHED_CPUFREQ_RT; 659 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; 660 } 661 662 for_each_cpu(cpu, policy->cpus) { 663 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 664 665 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, 666 policy_is_shared(policy) ? 667 sugov_update_shared : 668 sugov_update_single); 669 } 670 return 0; 671 } 672 673 static void sugov_stop(struct cpufreq_policy *policy) 674 { 675 struct sugov_policy *sg_policy = policy->governor_data; 676 unsigned int cpu; 677 678 for_each_cpu(cpu, policy->cpus) 679 cpufreq_remove_update_util_hook(cpu); 680 681 synchronize_sched(); 682 683 if (!policy->fast_switch_enabled) { 684 irq_work_sync(&sg_policy->irq_work); 685 kthread_cancel_work_sync(&sg_policy->work); 686 } 687 } 688 689 static void sugov_limits(struct cpufreq_policy *policy) 690 { 691 struct sugov_policy *sg_policy = policy->governor_data; 692 693 if (!policy->fast_switch_enabled) { 694 mutex_lock(&sg_policy->work_lock); 695 cpufreq_policy_apply_limits(policy); 696 mutex_unlock(&sg_policy->work_lock); 697 } 698 699 sg_policy->need_freq_update = true; 700 } 701 702 static struct cpufreq_governor schedutil_gov = { 703 .name = "schedutil", 704 .owner = THIS_MODULE, 705 .dynamic_switching = true, 706 .init = sugov_init, 707 .exit = sugov_exit, 708 .start = sugov_start, 709 .stop = sugov_stop, 710 .limits = sugov_limits, 711 }; 712 713 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL 714 struct cpufreq_governor *cpufreq_default_governor(void) 715 { 716 return &schedutil_gov; 717 } 718 #endif 719 720 static int __init sugov_register(void) 721 { 722 return cpufreq_register_governor(&schedutil_gov); 723 } 724 fs_initcall(sugov_register); 725