1 /* 2 * CPUFreq governor based on scheduler-provided CPU utilization data. 3 * 4 * Copyright (C) 2016, Intel Corporation 5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/cpufreq.h> 15 #include <linux/kthread.h> 16 #include <uapi/linux/sched/types.h> 17 #include <linux/slab.h> 18 #include <trace/events/power.h> 19 20 #include "sched.h" 21 22 #define SUGOV_KTHREAD_PRIORITY 50 23 24 struct sugov_tunables { 25 struct gov_attr_set attr_set; 26 unsigned int rate_limit_us; 27 }; 28 29 struct sugov_policy { 30 struct cpufreq_policy *policy; 31 32 struct sugov_tunables *tunables; 33 struct list_head tunables_hook; 34 35 raw_spinlock_t update_lock; /* For shared policies */ 36 u64 last_freq_update_time; 37 s64 freq_update_delay_ns; 38 unsigned int next_freq; 39 unsigned int cached_raw_freq; 40 41 /* The next fields are only needed if fast switch cannot be used. */ 42 struct irq_work irq_work; 43 struct kthread_work work; 44 struct mutex work_lock; 45 struct kthread_worker worker; 46 struct task_struct *thread; 47 bool work_in_progress; 48 49 bool need_freq_update; 50 }; 51 52 struct sugov_cpu { 53 struct update_util_data update_util; 54 struct sugov_policy *sg_policy; 55 unsigned int cpu; 56 57 bool iowait_boost_pending; 58 unsigned int iowait_boost; 59 unsigned int iowait_boost_max; 60 u64 last_update; 61 62 /* The fields below are only needed when sharing a policy. */ 63 unsigned long util_cfs; 64 unsigned long util_dl; 65 unsigned long max; 66 unsigned int flags; 67 68 /* The field below is for single-CPU policies only. */ 69 #ifdef CONFIG_NO_HZ_COMMON 70 unsigned long saved_idle_calls; 71 #endif 72 }; 73 74 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); 75 76 /************************ Governor internals ***********************/ 77 78 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) 79 { 80 s64 delta_ns; 81 82 /* 83 * Since cpufreq_update_util() is called with rq->lock held for 84 * the @target_cpu, our per-cpu data is fully serialized. 85 * 86 * However, drivers cannot in general deal with cross-cpu 87 * requests, so while get_next_freq() will work, our 88 * sugov_update_commit() call may not for the fast switching platforms. 89 * 90 * Hence stop here for remote requests if they aren't supported 91 * by the hardware, as calculating the frequency is pointless if 92 * we cannot in fact act on it. 93 * 94 * For the slow switching platforms, the kthread is always scheduled on 95 * the right set of CPUs and any CPU can find the next frequency and 96 * schedule the kthread. 97 */ 98 if (sg_policy->policy->fast_switch_enabled && 99 !cpufreq_can_do_remote_dvfs(sg_policy->policy)) 100 return false; 101 102 if (sg_policy->work_in_progress) 103 return false; 104 105 if (unlikely(sg_policy->need_freq_update)) { 106 sg_policy->need_freq_update = false; 107 /* 108 * This happens when limits change, so forget the previous 109 * next_freq value and force an update. 110 */ 111 sg_policy->next_freq = UINT_MAX; 112 return true; 113 } 114 115 delta_ns = time - sg_policy->last_freq_update_time; 116 return delta_ns >= sg_policy->freq_update_delay_ns; 117 } 118 119 static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, 120 unsigned int next_freq) 121 { 122 struct cpufreq_policy *policy = sg_policy->policy; 123 124 if (sg_policy->next_freq == next_freq) 125 return; 126 127 sg_policy->next_freq = next_freq; 128 sg_policy->last_freq_update_time = time; 129 130 if (policy->fast_switch_enabled) { 131 next_freq = cpufreq_driver_fast_switch(policy, next_freq); 132 if (!next_freq) 133 return; 134 135 policy->cur = next_freq; 136 trace_cpu_frequency(next_freq, smp_processor_id()); 137 } else { 138 sg_policy->work_in_progress = true; 139 irq_work_queue(&sg_policy->irq_work); 140 } 141 } 142 143 /** 144 * get_next_freq - Compute a new frequency for a given cpufreq policy. 145 * @sg_policy: schedutil policy object to compute the new frequency for. 146 * @util: Current CPU utilization. 147 * @max: CPU capacity. 148 * 149 * If the utilization is frequency-invariant, choose the new frequency to be 150 * proportional to it, that is 151 * 152 * next_freq = C * max_freq * util / max 153 * 154 * Otherwise, approximate the would-be frequency-invariant utilization by 155 * util_raw * (curr_freq / max_freq) which leads to 156 * 157 * next_freq = C * curr_freq * util_raw / max 158 * 159 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8. 160 * 161 * The lowest driver-supported frequency which is equal or greater than the raw 162 * next_freq (as calculated above) is returned, subject to policy min/max and 163 * cpufreq driver limitations. 164 */ 165 static unsigned int get_next_freq(struct sugov_policy *sg_policy, 166 unsigned long util, unsigned long max) 167 { 168 struct cpufreq_policy *policy = sg_policy->policy; 169 unsigned int freq = arch_scale_freq_invariant() ? 170 policy->cpuinfo.max_freq : policy->cur; 171 172 freq = (freq + (freq >> 2)) * util / max; 173 174 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX) 175 return sg_policy->next_freq; 176 sg_policy->cached_raw_freq = freq; 177 return cpufreq_driver_resolve_freq(policy, freq); 178 } 179 180 static void sugov_get_util(struct sugov_cpu *sg_cpu) 181 { 182 struct rq *rq = cpu_rq(sg_cpu->cpu); 183 184 sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); 185 sg_cpu->util_cfs = cpu_util_cfs(rq); 186 sg_cpu->util_dl = cpu_util_dl(rq); 187 } 188 189 static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) 190 { 191 /* 192 * Ideally we would like to set util_dl as min/guaranteed freq and 193 * util_cfs + util_dl as requested freq. However, cpufreq is not yet 194 * ready for such an interface. So, we only do the latter for now. 195 */ 196 return min(sg_cpu->util_cfs + sg_cpu->util_dl, sg_cpu->max); 197 } 198 199 static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time) 200 { 201 if (sg_cpu->flags & SCHED_CPUFREQ_IOWAIT) { 202 if (sg_cpu->iowait_boost_pending) 203 return; 204 205 sg_cpu->iowait_boost_pending = true; 206 207 if (sg_cpu->iowait_boost) { 208 sg_cpu->iowait_boost <<= 1; 209 if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max) 210 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max; 211 } else { 212 sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min; 213 } 214 } else if (sg_cpu->iowait_boost) { 215 s64 delta_ns = time - sg_cpu->last_update; 216 217 /* Clear iowait_boost if the CPU apprears to have been idle. */ 218 if (delta_ns > TICK_NSEC) { 219 sg_cpu->iowait_boost = 0; 220 sg_cpu->iowait_boost_pending = false; 221 } 222 } 223 } 224 225 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, 226 unsigned long *max) 227 { 228 unsigned int boost_util, boost_max; 229 230 if (!sg_cpu->iowait_boost) 231 return; 232 233 if (sg_cpu->iowait_boost_pending) { 234 sg_cpu->iowait_boost_pending = false; 235 } else { 236 sg_cpu->iowait_boost >>= 1; 237 if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) { 238 sg_cpu->iowait_boost = 0; 239 return; 240 } 241 } 242 243 boost_util = sg_cpu->iowait_boost; 244 boost_max = sg_cpu->iowait_boost_max; 245 246 if (*util * boost_max < *max * boost_util) { 247 *util = boost_util; 248 *max = boost_max; 249 } 250 } 251 252 #ifdef CONFIG_NO_HZ_COMMON 253 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) 254 { 255 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); 256 bool ret = idle_calls == sg_cpu->saved_idle_calls; 257 258 sg_cpu->saved_idle_calls = idle_calls; 259 return ret; 260 } 261 #else 262 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } 263 #endif /* CONFIG_NO_HZ_COMMON */ 264 265 static void sugov_update_single(struct update_util_data *hook, u64 time, 266 unsigned int flags) 267 { 268 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 269 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 270 struct cpufreq_policy *policy = sg_policy->policy; 271 unsigned long util, max; 272 unsigned int next_f; 273 bool busy; 274 275 sugov_set_iowait_boost(sg_cpu, time); 276 sg_cpu->last_update = time; 277 278 if (!sugov_should_update_freq(sg_policy, time)) 279 return; 280 281 busy = sugov_cpu_is_busy(sg_cpu); 282 283 if (flags & SCHED_CPUFREQ_RT) { 284 next_f = policy->cpuinfo.max_freq; 285 } else { 286 sugov_get_util(sg_cpu); 287 max = sg_cpu->max; 288 util = sugov_aggregate_util(sg_cpu); 289 sugov_iowait_boost(sg_cpu, &util, &max); 290 next_f = get_next_freq(sg_policy, util, max); 291 /* 292 * Do not reduce the frequency if the CPU has not been idle 293 * recently, as the reduction is likely to be premature then. 294 */ 295 if (busy && next_f < sg_policy->next_freq) { 296 next_f = sg_policy->next_freq; 297 298 /* Reset cached freq as next_freq has changed */ 299 sg_policy->cached_raw_freq = 0; 300 } 301 } 302 sugov_update_commit(sg_policy, time, next_f); 303 } 304 305 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) 306 { 307 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 308 struct cpufreq_policy *policy = sg_policy->policy; 309 unsigned long util = 0, max = 1; 310 unsigned int j; 311 312 for_each_cpu(j, policy->cpus) { 313 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); 314 unsigned long j_util, j_max; 315 s64 delta_ns; 316 317 /* 318 * If the CFS CPU utilization was last updated before the 319 * previous frequency update and the time elapsed between the 320 * last update of the CPU utilization and the last frequency 321 * update is long enough, reset iowait_boost and util_cfs, as 322 * they are now probably stale. However, still consider the 323 * CPU contribution if it has some DEADLINE utilization 324 * (util_dl). 325 */ 326 delta_ns = time - j_sg_cpu->last_update; 327 if (delta_ns > TICK_NSEC) { 328 j_sg_cpu->iowait_boost = 0; 329 j_sg_cpu->iowait_boost_pending = false; 330 j_sg_cpu->util_cfs = 0; 331 if (j_sg_cpu->util_dl == 0) 332 continue; 333 } 334 if (j_sg_cpu->flags & SCHED_CPUFREQ_RT) 335 return policy->cpuinfo.max_freq; 336 337 j_max = j_sg_cpu->max; 338 j_util = sugov_aggregate_util(j_sg_cpu); 339 if (j_util * max > j_max * util) { 340 util = j_util; 341 max = j_max; 342 } 343 344 sugov_iowait_boost(j_sg_cpu, &util, &max); 345 } 346 347 return get_next_freq(sg_policy, util, max); 348 } 349 350 static void sugov_update_shared(struct update_util_data *hook, u64 time, 351 unsigned int flags) 352 { 353 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 354 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 355 unsigned int next_f; 356 357 raw_spin_lock(&sg_policy->update_lock); 358 359 sugov_get_util(sg_cpu); 360 sg_cpu->flags = flags; 361 362 sugov_set_iowait_boost(sg_cpu, time); 363 sg_cpu->last_update = time; 364 365 if (sugov_should_update_freq(sg_policy, time)) { 366 if (flags & SCHED_CPUFREQ_RT) 367 next_f = sg_policy->policy->cpuinfo.max_freq; 368 else 369 next_f = sugov_next_freq_shared(sg_cpu, time); 370 371 sugov_update_commit(sg_policy, time, next_f); 372 } 373 374 raw_spin_unlock(&sg_policy->update_lock); 375 } 376 377 static void sugov_work(struct kthread_work *work) 378 { 379 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); 380 381 mutex_lock(&sg_policy->work_lock); 382 __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq, 383 CPUFREQ_RELATION_L); 384 mutex_unlock(&sg_policy->work_lock); 385 386 sg_policy->work_in_progress = false; 387 } 388 389 static void sugov_irq_work(struct irq_work *irq_work) 390 { 391 struct sugov_policy *sg_policy; 392 393 sg_policy = container_of(irq_work, struct sugov_policy, irq_work); 394 395 /* 396 * For RT tasks, the schedutil governor shoots the frequency to maximum. 397 * Special care must be taken to ensure that this kthread doesn't result 398 * in the same behavior. 399 * 400 * This is (mostly) guaranteed by the work_in_progress flag. The flag is 401 * updated only at the end of the sugov_work() function and before that 402 * the schedutil governor rejects all other frequency scaling requests. 403 * 404 * There is a very rare case though, where the RT thread yields right 405 * after the work_in_progress flag is cleared. The effects of that are 406 * neglected for now. 407 */ 408 kthread_queue_work(&sg_policy->worker, &sg_policy->work); 409 } 410 411 /************************** sysfs interface ************************/ 412 413 static struct sugov_tunables *global_tunables; 414 static DEFINE_MUTEX(global_tunables_lock); 415 416 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set) 417 { 418 return container_of(attr_set, struct sugov_tunables, attr_set); 419 } 420 421 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) 422 { 423 struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 424 425 return sprintf(buf, "%u\n", tunables->rate_limit_us); 426 } 427 428 static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, 429 size_t count) 430 { 431 struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 432 struct sugov_policy *sg_policy; 433 unsigned int rate_limit_us; 434 435 if (kstrtouint(buf, 10, &rate_limit_us)) 436 return -EINVAL; 437 438 tunables->rate_limit_us = rate_limit_us; 439 440 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) 441 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; 442 443 return count; 444 } 445 446 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); 447 448 static struct attribute *sugov_attributes[] = { 449 &rate_limit_us.attr, 450 NULL 451 }; 452 453 static struct kobj_type sugov_tunables_ktype = { 454 .default_attrs = sugov_attributes, 455 .sysfs_ops = &governor_sysfs_ops, 456 }; 457 458 /********************** cpufreq governor interface *********************/ 459 460 static struct cpufreq_governor schedutil_gov; 461 462 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) 463 { 464 struct sugov_policy *sg_policy; 465 466 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL); 467 if (!sg_policy) 468 return NULL; 469 470 sg_policy->policy = policy; 471 raw_spin_lock_init(&sg_policy->update_lock); 472 return sg_policy; 473 } 474 475 static void sugov_policy_free(struct sugov_policy *sg_policy) 476 { 477 kfree(sg_policy); 478 } 479 480 static int sugov_kthread_create(struct sugov_policy *sg_policy) 481 { 482 struct task_struct *thread; 483 struct sched_attr attr = { 484 .size = sizeof(struct sched_attr), 485 .sched_policy = SCHED_DEADLINE, 486 .sched_flags = SCHED_FLAG_SUGOV, 487 .sched_nice = 0, 488 .sched_priority = 0, 489 /* 490 * Fake (unused) bandwidth; workaround to "fix" 491 * priority inheritance. 492 */ 493 .sched_runtime = 1000000, 494 .sched_deadline = 10000000, 495 .sched_period = 10000000, 496 }; 497 struct cpufreq_policy *policy = sg_policy->policy; 498 int ret; 499 500 /* kthread only required for slow path */ 501 if (policy->fast_switch_enabled) 502 return 0; 503 504 kthread_init_work(&sg_policy->work, sugov_work); 505 kthread_init_worker(&sg_policy->worker); 506 thread = kthread_create(kthread_worker_fn, &sg_policy->worker, 507 "sugov:%d", 508 cpumask_first(policy->related_cpus)); 509 if (IS_ERR(thread)) { 510 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread)); 511 return PTR_ERR(thread); 512 } 513 514 ret = sched_setattr_nocheck(thread, &attr); 515 if (ret) { 516 kthread_stop(thread); 517 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); 518 return ret; 519 } 520 521 sg_policy->thread = thread; 522 523 /* Kthread is bound to all CPUs by default */ 524 if (!policy->dvfs_possible_from_any_cpu) 525 kthread_bind_mask(thread, policy->related_cpus); 526 527 init_irq_work(&sg_policy->irq_work, sugov_irq_work); 528 mutex_init(&sg_policy->work_lock); 529 530 wake_up_process(thread); 531 532 return 0; 533 } 534 535 static void sugov_kthread_stop(struct sugov_policy *sg_policy) 536 { 537 /* kthread only required for slow path */ 538 if (sg_policy->policy->fast_switch_enabled) 539 return; 540 541 kthread_flush_worker(&sg_policy->worker); 542 kthread_stop(sg_policy->thread); 543 mutex_destroy(&sg_policy->work_lock); 544 } 545 546 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) 547 { 548 struct sugov_tunables *tunables; 549 550 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); 551 if (tunables) { 552 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); 553 if (!have_governor_per_policy()) 554 global_tunables = tunables; 555 } 556 return tunables; 557 } 558 559 static void sugov_tunables_free(struct sugov_tunables *tunables) 560 { 561 if (!have_governor_per_policy()) 562 global_tunables = NULL; 563 564 kfree(tunables); 565 } 566 567 static int sugov_init(struct cpufreq_policy *policy) 568 { 569 struct sugov_policy *sg_policy; 570 struct sugov_tunables *tunables; 571 int ret = 0; 572 573 /* State should be equivalent to EXIT */ 574 if (policy->governor_data) 575 return -EBUSY; 576 577 cpufreq_enable_fast_switch(policy); 578 579 sg_policy = sugov_policy_alloc(policy); 580 if (!sg_policy) { 581 ret = -ENOMEM; 582 goto disable_fast_switch; 583 } 584 585 ret = sugov_kthread_create(sg_policy); 586 if (ret) 587 goto free_sg_policy; 588 589 mutex_lock(&global_tunables_lock); 590 591 if (global_tunables) { 592 if (WARN_ON(have_governor_per_policy())) { 593 ret = -EINVAL; 594 goto stop_kthread; 595 } 596 policy->governor_data = sg_policy; 597 sg_policy->tunables = global_tunables; 598 599 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook); 600 goto out; 601 } 602 603 tunables = sugov_tunables_alloc(sg_policy); 604 if (!tunables) { 605 ret = -ENOMEM; 606 goto stop_kthread; 607 } 608 609 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy); 610 611 policy->governor_data = sg_policy; 612 sg_policy->tunables = tunables; 613 614 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype, 615 get_governor_parent_kobj(policy), "%s", 616 schedutil_gov.name); 617 if (ret) 618 goto fail; 619 620 out: 621 mutex_unlock(&global_tunables_lock); 622 return 0; 623 624 fail: 625 policy->governor_data = NULL; 626 sugov_tunables_free(tunables); 627 628 stop_kthread: 629 sugov_kthread_stop(sg_policy); 630 631 free_sg_policy: 632 mutex_unlock(&global_tunables_lock); 633 634 sugov_policy_free(sg_policy); 635 636 disable_fast_switch: 637 cpufreq_disable_fast_switch(policy); 638 639 pr_err("initialization failed (error %d)\n", ret); 640 return ret; 641 } 642 643 static void sugov_exit(struct cpufreq_policy *policy) 644 { 645 struct sugov_policy *sg_policy = policy->governor_data; 646 struct sugov_tunables *tunables = sg_policy->tunables; 647 unsigned int count; 648 649 mutex_lock(&global_tunables_lock); 650 651 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); 652 policy->governor_data = NULL; 653 if (!count) 654 sugov_tunables_free(tunables); 655 656 mutex_unlock(&global_tunables_lock); 657 658 sugov_kthread_stop(sg_policy); 659 sugov_policy_free(sg_policy); 660 cpufreq_disable_fast_switch(policy); 661 } 662 663 static int sugov_start(struct cpufreq_policy *policy) 664 { 665 struct sugov_policy *sg_policy = policy->governor_data; 666 unsigned int cpu; 667 668 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; 669 sg_policy->last_freq_update_time = 0; 670 sg_policy->next_freq = UINT_MAX; 671 sg_policy->work_in_progress = false; 672 sg_policy->need_freq_update = false; 673 sg_policy->cached_raw_freq = 0; 674 675 for_each_cpu(cpu, policy->cpus) { 676 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 677 678 memset(sg_cpu, 0, sizeof(*sg_cpu)); 679 sg_cpu->cpu = cpu; 680 sg_cpu->sg_policy = sg_policy; 681 sg_cpu->flags = 0; 682 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; 683 } 684 685 for_each_cpu(cpu, policy->cpus) { 686 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 687 688 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, 689 policy_is_shared(policy) ? 690 sugov_update_shared : 691 sugov_update_single); 692 } 693 return 0; 694 } 695 696 static void sugov_stop(struct cpufreq_policy *policy) 697 { 698 struct sugov_policy *sg_policy = policy->governor_data; 699 unsigned int cpu; 700 701 for_each_cpu(cpu, policy->cpus) 702 cpufreq_remove_update_util_hook(cpu); 703 704 synchronize_sched(); 705 706 if (!policy->fast_switch_enabled) { 707 irq_work_sync(&sg_policy->irq_work); 708 kthread_cancel_work_sync(&sg_policy->work); 709 } 710 } 711 712 static void sugov_limits(struct cpufreq_policy *policy) 713 { 714 struct sugov_policy *sg_policy = policy->governor_data; 715 716 if (!policy->fast_switch_enabled) { 717 mutex_lock(&sg_policy->work_lock); 718 cpufreq_policy_apply_limits(policy); 719 mutex_unlock(&sg_policy->work_lock); 720 } 721 722 sg_policy->need_freq_update = true; 723 } 724 725 static struct cpufreq_governor schedutil_gov = { 726 .name = "schedutil", 727 .owner = THIS_MODULE, 728 .dynamic_switching = true, 729 .init = sugov_init, 730 .exit = sugov_exit, 731 .start = sugov_start, 732 .stop = sugov_stop, 733 .limits = sugov_limits, 734 }; 735 736 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL 737 struct cpufreq_governor *cpufreq_default_governor(void) 738 { 739 return &schedutil_gov; 740 } 741 #endif 742 743 static int __init sugov_register(void) 744 { 745 return cpufreq_register_governor(&schedutil_gov); 746 } 747 fs_initcall(sugov_register); 748