1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * CPUFreq governor based on scheduler-provided CPU utilization data. 4 * 5 * Copyright (C) 2016, Intel Corporation 6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 7 */ 8 9 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8) 10 11 struct sugov_tunables { 12 struct gov_attr_set attr_set; 13 unsigned int rate_limit_us; 14 }; 15 16 struct sugov_policy { 17 struct cpufreq_policy *policy; 18 19 struct sugov_tunables *tunables; 20 struct list_head tunables_hook; 21 22 raw_spinlock_t update_lock; 23 u64 last_freq_update_time; 24 s64 freq_update_delay_ns; 25 unsigned int next_freq; 26 unsigned int cached_raw_freq; 27 28 /* The next fields are only needed if fast switch cannot be used: */ 29 struct irq_work irq_work; 30 struct kthread_work work; 31 struct mutex work_lock; 32 struct kthread_worker worker; 33 struct task_struct *thread; 34 bool work_in_progress; 35 36 bool limits_changed; 37 bool need_freq_update; 38 }; 39 40 struct sugov_cpu { 41 struct update_util_data update_util; 42 struct sugov_policy *sg_policy; 43 unsigned int cpu; 44 45 bool iowait_boost_pending; 46 unsigned int iowait_boost; 47 u64 last_update; 48 49 unsigned long util; 50 unsigned long bw_dl; 51 52 /* The field below is for single-CPU policies only: */ 53 #ifdef CONFIG_NO_HZ_COMMON 54 unsigned long saved_idle_calls; 55 #endif 56 }; 57 58 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); 59 60 /************************ Governor internals ***********************/ 61 62 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) 63 { 64 s64 delta_ns; 65 66 /* 67 * Since cpufreq_update_util() is called with rq->lock held for 68 * the @target_cpu, our per-CPU data is fully serialized. 69 * 70 * However, drivers cannot in general deal with cross-CPU 71 * requests, so while get_next_freq() will work, our 72 * sugov_update_commit() call may not for the fast switching platforms. 73 * 74 * Hence stop here for remote requests if they aren't supported 75 * by the hardware, as calculating the frequency is pointless if 76 * we cannot in fact act on it. 77 * 78 * This is needed on the slow switching platforms too to prevent CPUs 79 * going offline from leaving stale IRQ work items behind. 80 */ 81 if (!cpufreq_this_cpu_can_update(sg_policy->policy)) 82 return false; 83 84 if (unlikely(sg_policy->limits_changed)) { 85 sg_policy->limits_changed = false; 86 sg_policy->need_freq_update = true; 87 return true; 88 } 89 90 delta_ns = time - sg_policy->last_freq_update_time; 91 92 return delta_ns >= sg_policy->freq_update_delay_ns; 93 } 94 95 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, 96 unsigned int next_freq) 97 { 98 if (sg_policy->need_freq_update) 99 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); 100 else if (sg_policy->next_freq == next_freq) 101 return false; 102 103 sg_policy->next_freq = next_freq; 104 sg_policy->last_freq_update_time = time; 105 106 return true; 107 } 108 109 static void sugov_deferred_update(struct sugov_policy *sg_policy) 110 { 111 if (!sg_policy->work_in_progress) { 112 sg_policy->work_in_progress = true; 113 irq_work_queue(&sg_policy->irq_work); 114 } 115 } 116 117 /** 118 * get_next_freq - Compute a new frequency for a given cpufreq policy. 119 * @sg_policy: schedutil policy object to compute the new frequency for. 120 * @util: Current CPU utilization. 121 * @max: CPU capacity. 122 * 123 * If the utilization is frequency-invariant, choose the new frequency to be 124 * proportional to it, that is 125 * 126 * next_freq = C * max_freq * util / max 127 * 128 * Otherwise, approximate the would-be frequency-invariant utilization by 129 * util_raw * (curr_freq / max_freq) which leads to 130 * 131 * next_freq = C * curr_freq * util_raw / max 132 * 133 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8. 134 * 135 * The lowest driver-supported frequency which is equal or greater than the raw 136 * next_freq (as calculated above) is returned, subject to policy min/max and 137 * cpufreq driver limitations. 138 */ 139 static unsigned int get_next_freq(struct sugov_policy *sg_policy, 140 unsigned long util, unsigned long max) 141 { 142 struct cpufreq_policy *policy = sg_policy->policy; 143 unsigned int freq = arch_scale_freq_invariant() ? 144 policy->cpuinfo.max_freq : policy->cur; 145 146 util = map_util_perf(util); 147 freq = map_util_freq(util, freq, max); 148 149 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) 150 return sg_policy->next_freq; 151 152 sg_policy->cached_raw_freq = freq; 153 return cpufreq_driver_resolve_freq(policy, freq); 154 } 155 156 static void sugov_get_util(struct sugov_cpu *sg_cpu) 157 { 158 unsigned long util = cpu_util_cfs_boost(sg_cpu->cpu); 159 struct rq *rq = cpu_rq(sg_cpu->cpu); 160 161 sg_cpu->bw_dl = cpu_bw_dl(rq); 162 sg_cpu->util = effective_cpu_util(sg_cpu->cpu, util, 163 FREQUENCY_UTIL, NULL); 164 } 165 166 /** 167 * sugov_iowait_reset() - Reset the IO boost status of a CPU. 168 * @sg_cpu: the sugov data for the CPU to boost 169 * @time: the update time from the caller 170 * @set_iowait_boost: true if an IO boost has been requested 171 * 172 * The IO wait boost of a task is disabled after a tick since the last update 173 * of a CPU. If a new IO wait boost is requested after more then a tick, then 174 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy 175 * efficiency by ignoring sporadic wakeups from IO. 176 */ 177 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, 178 bool set_iowait_boost) 179 { 180 s64 delta_ns = time - sg_cpu->last_update; 181 182 /* Reset boost only if a tick has elapsed since last request */ 183 if (delta_ns <= TICK_NSEC) 184 return false; 185 186 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; 187 sg_cpu->iowait_boost_pending = set_iowait_boost; 188 189 return true; 190 } 191 192 /** 193 * sugov_iowait_boost() - Updates the IO boost status of a CPU. 194 * @sg_cpu: the sugov data for the CPU to boost 195 * @time: the update time from the caller 196 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait 197 * 198 * Each time a task wakes up after an IO operation, the CPU utilization can be 199 * boosted to a certain utilization which doubles at each "frequent and 200 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization 201 * of the maximum OPP. 202 * 203 * To keep doubling, an IO boost has to be requested at least once per tick, 204 * otherwise we restart from the utilization of the minimum OPP. 205 */ 206 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, 207 unsigned int flags) 208 { 209 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT; 210 211 /* Reset boost if the CPU appears to have been idle enough */ 212 if (sg_cpu->iowait_boost && 213 sugov_iowait_reset(sg_cpu, time, set_iowait_boost)) 214 return; 215 216 /* Boost only tasks waking up after IO */ 217 if (!set_iowait_boost) 218 return; 219 220 /* Ensure boost doubles only one time at each request */ 221 if (sg_cpu->iowait_boost_pending) 222 return; 223 sg_cpu->iowait_boost_pending = true; 224 225 /* Double the boost at each request */ 226 if (sg_cpu->iowait_boost) { 227 sg_cpu->iowait_boost = 228 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); 229 return; 230 } 231 232 /* First wakeup after IO: start with minimum boost */ 233 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; 234 } 235 236 /** 237 * sugov_iowait_apply() - Apply the IO boost to a CPU. 238 * @sg_cpu: the sugov data for the cpu to boost 239 * @time: the update time from the caller 240 * @max_cap: the max CPU capacity 241 * 242 * A CPU running a task which woken up after an IO operation can have its 243 * utilization boosted to speed up the completion of those IO operations. 244 * The IO boost value is increased each time a task wakes up from IO, in 245 * sugov_iowait_apply(), and it's instead decreased by this function, 246 * each time an increase has not been requested (!iowait_boost_pending). 247 * 248 * A CPU which also appears to have been idle for at least one tick has also 249 * its IO boost utilization reset. 250 * 251 * This mechanism is designed to boost high frequently IO waiting tasks, while 252 * being more conservative on tasks which does sporadic IO operations. 253 */ 254 static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, 255 unsigned long max_cap) 256 { 257 unsigned long boost; 258 259 /* No boost currently required */ 260 if (!sg_cpu->iowait_boost) 261 return; 262 263 /* Reset boost if the CPU appears to have been idle enough */ 264 if (sugov_iowait_reset(sg_cpu, time, false)) 265 return; 266 267 if (!sg_cpu->iowait_boost_pending) { 268 /* 269 * No boost pending; reduce the boost value. 270 */ 271 sg_cpu->iowait_boost >>= 1; 272 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { 273 sg_cpu->iowait_boost = 0; 274 return; 275 } 276 } 277 278 sg_cpu->iowait_boost_pending = false; 279 280 /* 281 * sg_cpu->util is already in capacity scale; convert iowait_boost 282 * into the same scale so we can compare. 283 */ 284 boost = (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT; 285 boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL); 286 if (sg_cpu->util < boost) 287 sg_cpu->util = boost; 288 } 289 290 #ifdef CONFIG_NO_HZ_COMMON 291 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) 292 { 293 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); 294 bool ret = idle_calls == sg_cpu->saved_idle_calls; 295 296 sg_cpu->saved_idle_calls = idle_calls; 297 return ret; 298 } 299 #else 300 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } 301 #endif /* CONFIG_NO_HZ_COMMON */ 302 303 /* 304 * Make sugov_should_update_freq() ignore the rate limit when DL 305 * has increased the utilization. 306 */ 307 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) 308 { 309 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) 310 sg_cpu->sg_policy->limits_changed = true; 311 } 312 313 static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, 314 u64 time, unsigned long max_cap, 315 unsigned int flags) 316 { 317 sugov_iowait_boost(sg_cpu, time, flags); 318 sg_cpu->last_update = time; 319 320 ignore_dl_rate_limit(sg_cpu); 321 322 if (!sugov_should_update_freq(sg_cpu->sg_policy, time)) 323 return false; 324 325 sugov_get_util(sg_cpu); 326 sugov_iowait_apply(sg_cpu, time, max_cap); 327 328 return true; 329 } 330 331 static void sugov_update_single_freq(struct update_util_data *hook, u64 time, 332 unsigned int flags) 333 { 334 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 335 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 336 unsigned int cached_freq = sg_policy->cached_raw_freq; 337 unsigned long max_cap; 338 unsigned int next_f; 339 340 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu); 341 342 if (!sugov_update_single_common(sg_cpu, time, max_cap, flags)) 343 return; 344 345 next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap); 346 /* 347 * Do not reduce the frequency if the CPU has not been idle 348 * recently, as the reduction is likely to be premature then. 349 * 350 * Except when the rq is capped by uclamp_max. 351 */ 352 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && 353 sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq && 354 !sg_policy->need_freq_update) { 355 next_f = sg_policy->next_freq; 356 357 /* Restore cached freq as next_freq has changed */ 358 sg_policy->cached_raw_freq = cached_freq; 359 } 360 361 if (!sugov_update_next_freq(sg_policy, time, next_f)) 362 return; 363 364 /* 365 * This code runs under rq->lock for the target CPU, so it won't run 366 * concurrently on two different CPUs for the same target and it is not 367 * necessary to acquire the lock in the fast switch case. 368 */ 369 if (sg_policy->policy->fast_switch_enabled) { 370 cpufreq_driver_fast_switch(sg_policy->policy, next_f); 371 } else { 372 raw_spin_lock(&sg_policy->update_lock); 373 sugov_deferred_update(sg_policy); 374 raw_spin_unlock(&sg_policy->update_lock); 375 } 376 } 377 378 static void sugov_update_single_perf(struct update_util_data *hook, u64 time, 379 unsigned int flags) 380 { 381 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 382 unsigned long prev_util = sg_cpu->util; 383 unsigned long max_cap; 384 385 /* 386 * Fall back to the "frequency" path if frequency invariance is not 387 * supported, because the direct mapping between the utilization and 388 * the performance levels depends on the frequency invariance. 389 */ 390 if (!arch_scale_freq_invariant()) { 391 sugov_update_single_freq(hook, time, flags); 392 return; 393 } 394 395 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu); 396 397 if (!sugov_update_single_common(sg_cpu, time, max_cap, flags)) 398 return; 399 400 /* 401 * Do not reduce the target performance level if the CPU has not been 402 * idle recently, as the reduction is likely to be premature then. 403 * 404 * Except when the rq is capped by uclamp_max. 405 */ 406 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && 407 sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util) 408 sg_cpu->util = prev_util; 409 410 cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl), 411 map_util_perf(sg_cpu->util), max_cap); 412 413 sg_cpu->sg_policy->last_freq_update_time = time; 414 } 415 416 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) 417 { 418 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 419 struct cpufreq_policy *policy = sg_policy->policy; 420 unsigned long util = 0, max_cap; 421 unsigned int j; 422 423 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu); 424 425 for_each_cpu(j, policy->cpus) { 426 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); 427 428 sugov_get_util(j_sg_cpu); 429 sugov_iowait_apply(j_sg_cpu, time, max_cap); 430 431 util = max(j_sg_cpu->util, util); 432 } 433 434 return get_next_freq(sg_policy, util, max_cap); 435 } 436 437 static void 438 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) 439 { 440 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 441 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 442 unsigned int next_f; 443 444 raw_spin_lock(&sg_policy->update_lock); 445 446 sugov_iowait_boost(sg_cpu, time, flags); 447 sg_cpu->last_update = time; 448 449 ignore_dl_rate_limit(sg_cpu); 450 451 if (sugov_should_update_freq(sg_policy, time)) { 452 next_f = sugov_next_freq_shared(sg_cpu, time); 453 454 if (!sugov_update_next_freq(sg_policy, time, next_f)) 455 goto unlock; 456 457 if (sg_policy->policy->fast_switch_enabled) 458 cpufreq_driver_fast_switch(sg_policy->policy, next_f); 459 else 460 sugov_deferred_update(sg_policy); 461 } 462 unlock: 463 raw_spin_unlock(&sg_policy->update_lock); 464 } 465 466 static void sugov_work(struct kthread_work *work) 467 { 468 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); 469 unsigned int freq; 470 unsigned long flags; 471 472 /* 473 * Hold sg_policy->update_lock shortly to handle the case where: 474 * in case sg_policy->next_freq is read here, and then updated by 475 * sugov_deferred_update() just before work_in_progress is set to false 476 * here, we may miss queueing the new update. 477 * 478 * Note: If a work was queued after the update_lock is released, 479 * sugov_work() will just be called again by kthread_work code; and the 480 * request will be proceed before the sugov thread sleeps. 481 */ 482 raw_spin_lock_irqsave(&sg_policy->update_lock, flags); 483 freq = sg_policy->next_freq; 484 sg_policy->work_in_progress = false; 485 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); 486 487 mutex_lock(&sg_policy->work_lock); 488 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L); 489 mutex_unlock(&sg_policy->work_lock); 490 } 491 492 static void sugov_irq_work(struct irq_work *irq_work) 493 { 494 struct sugov_policy *sg_policy; 495 496 sg_policy = container_of(irq_work, struct sugov_policy, irq_work); 497 498 kthread_queue_work(&sg_policy->worker, &sg_policy->work); 499 } 500 501 /************************** sysfs interface ************************/ 502 503 static struct sugov_tunables *global_tunables; 504 static DEFINE_MUTEX(global_tunables_lock); 505 506 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set) 507 { 508 return container_of(attr_set, struct sugov_tunables, attr_set); 509 } 510 511 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) 512 { 513 struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 514 515 return sprintf(buf, "%u\n", tunables->rate_limit_us); 516 } 517 518 static ssize_t 519 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count) 520 { 521 struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 522 struct sugov_policy *sg_policy; 523 unsigned int rate_limit_us; 524 525 if (kstrtouint(buf, 10, &rate_limit_us)) 526 return -EINVAL; 527 528 tunables->rate_limit_us = rate_limit_us; 529 530 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) 531 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; 532 533 return count; 534 } 535 536 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); 537 538 static struct attribute *sugov_attrs[] = { 539 &rate_limit_us.attr, 540 NULL 541 }; 542 ATTRIBUTE_GROUPS(sugov); 543 544 static void sugov_tunables_free(struct kobject *kobj) 545 { 546 struct gov_attr_set *attr_set = to_gov_attr_set(kobj); 547 548 kfree(to_sugov_tunables(attr_set)); 549 } 550 551 static const struct kobj_type sugov_tunables_ktype = { 552 .default_groups = sugov_groups, 553 .sysfs_ops = &governor_sysfs_ops, 554 .release = &sugov_tunables_free, 555 }; 556 557 /********************** cpufreq governor interface *********************/ 558 559 struct cpufreq_governor schedutil_gov; 560 561 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) 562 { 563 struct sugov_policy *sg_policy; 564 565 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL); 566 if (!sg_policy) 567 return NULL; 568 569 sg_policy->policy = policy; 570 raw_spin_lock_init(&sg_policy->update_lock); 571 return sg_policy; 572 } 573 574 static void sugov_policy_free(struct sugov_policy *sg_policy) 575 { 576 kfree(sg_policy); 577 } 578 579 static int sugov_kthread_create(struct sugov_policy *sg_policy) 580 { 581 struct task_struct *thread; 582 struct sched_attr attr = { 583 .size = sizeof(struct sched_attr), 584 .sched_policy = SCHED_DEADLINE, 585 .sched_flags = SCHED_FLAG_SUGOV, 586 .sched_nice = 0, 587 .sched_priority = 0, 588 /* 589 * Fake (unused) bandwidth; workaround to "fix" 590 * priority inheritance. 591 */ 592 .sched_runtime = 1000000, 593 .sched_deadline = 10000000, 594 .sched_period = 10000000, 595 }; 596 struct cpufreq_policy *policy = sg_policy->policy; 597 int ret; 598 599 /* kthread only required for slow path */ 600 if (policy->fast_switch_enabled) 601 return 0; 602 603 kthread_init_work(&sg_policy->work, sugov_work); 604 kthread_init_worker(&sg_policy->worker); 605 thread = kthread_create(kthread_worker_fn, &sg_policy->worker, 606 "sugov:%d", 607 cpumask_first(policy->related_cpus)); 608 if (IS_ERR(thread)) { 609 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread)); 610 return PTR_ERR(thread); 611 } 612 613 ret = sched_setattr_nocheck(thread, &attr); 614 if (ret) { 615 kthread_stop(thread); 616 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); 617 return ret; 618 } 619 620 sg_policy->thread = thread; 621 kthread_bind_mask(thread, policy->related_cpus); 622 init_irq_work(&sg_policy->irq_work, sugov_irq_work); 623 mutex_init(&sg_policy->work_lock); 624 625 wake_up_process(thread); 626 627 return 0; 628 } 629 630 static void sugov_kthread_stop(struct sugov_policy *sg_policy) 631 { 632 /* kthread only required for slow path */ 633 if (sg_policy->policy->fast_switch_enabled) 634 return; 635 636 kthread_flush_worker(&sg_policy->worker); 637 kthread_stop(sg_policy->thread); 638 mutex_destroy(&sg_policy->work_lock); 639 } 640 641 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) 642 { 643 struct sugov_tunables *tunables; 644 645 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); 646 if (tunables) { 647 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); 648 if (!have_governor_per_policy()) 649 global_tunables = tunables; 650 } 651 return tunables; 652 } 653 654 static void sugov_clear_global_tunables(void) 655 { 656 if (!have_governor_per_policy()) 657 global_tunables = NULL; 658 } 659 660 static int sugov_init(struct cpufreq_policy *policy) 661 { 662 struct sugov_policy *sg_policy; 663 struct sugov_tunables *tunables; 664 int ret = 0; 665 666 /* State should be equivalent to EXIT */ 667 if (policy->governor_data) 668 return -EBUSY; 669 670 cpufreq_enable_fast_switch(policy); 671 672 sg_policy = sugov_policy_alloc(policy); 673 if (!sg_policy) { 674 ret = -ENOMEM; 675 goto disable_fast_switch; 676 } 677 678 ret = sugov_kthread_create(sg_policy); 679 if (ret) 680 goto free_sg_policy; 681 682 mutex_lock(&global_tunables_lock); 683 684 if (global_tunables) { 685 if (WARN_ON(have_governor_per_policy())) { 686 ret = -EINVAL; 687 goto stop_kthread; 688 } 689 policy->governor_data = sg_policy; 690 sg_policy->tunables = global_tunables; 691 692 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook); 693 goto out; 694 } 695 696 tunables = sugov_tunables_alloc(sg_policy); 697 if (!tunables) { 698 ret = -ENOMEM; 699 goto stop_kthread; 700 } 701 702 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy); 703 704 policy->governor_data = sg_policy; 705 sg_policy->tunables = tunables; 706 707 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype, 708 get_governor_parent_kobj(policy), "%s", 709 schedutil_gov.name); 710 if (ret) 711 goto fail; 712 713 out: 714 mutex_unlock(&global_tunables_lock); 715 return 0; 716 717 fail: 718 kobject_put(&tunables->attr_set.kobj); 719 policy->governor_data = NULL; 720 sugov_clear_global_tunables(); 721 722 stop_kthread: 723 sugov_kthread_stop(sg_policy); 724 mutex_unlock(&global_tunables_lock); 725 726 free_sg_policy: 727 sugov_policy_free(sg_policy); 728 729 disable_fast_switch: 730 cpufreq_disable_fast_switch(policy); 731 732 pr_err("initialization failed (error %d)\n", ret); 733 return ret; 734 } 735 736 static void sugov_exit(struct cpufreq_policy *policy) 737 { 738 struct sugov_policy *sg_policy = policy->governor_data; 739 struct sugov_tunables *tunables = sg_policy->tunables; 740 unsigned int count; 741 742 mutex_lock(&global_tunables_lock); 743 744 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); 745 policy->governor_data = NULL; 746 if (!count) 747 sugov_clear_global_tunables(); 748 749 mutex_unlock(&global_tunables_lock); 750 751 sugov_kthread_stop(sg_policy); 752 sugov_policy_free(sg_policy); 753 cpufreq_disable_fast_switch(policy); 754 } 755 756 static int sugov_start(struct cpufreq_policy *policy) 757 { 758 struct sugov_policy *sg_policy = policy->governor_data; 759 void (*uu)(struct update_util_data *data, u64 time, unsigned int flags); 760 unsigned int cpu; 761 762 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; 763 sg_policy->last_freq_update_time = 0; 764 sg_policy->next_freq = 0; 765 sg_policy->work_in_progress = false; 766 sg_policy->limits_changed = false; 767 sg_policy->cached_raw_freq = 0; 768 769 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); 770 771 for_each_cpu(cpu, policy->cpus) { 772 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 773 774 memset(sg_cpu, 0, sizeof(*sg_cpu)); 775 sg_cpu->cpu = cpu; 776 sg_cpu->sg_policy = sg_policy; 777 } 778 779 if (policy_is_shared(policy)) 780 uu = sugov_update_shared; 781 else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf()) 782 uu = sugov_update_single_perf; 783 else 784 uu = sugov_update_single_freq; 785 786 for_each_cpu(cpu, policy->cpus) { 787 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 788 789 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu); 790 } 791 return 0; 792 } 793 794 static void sugov_stop(struct cpufreq_policy *policy) 795 { 796 struct sugov_policy *sg_policy = policy->governor_data; 797 unsigned int cpu; 798 799 for_each_cpu(cpu, policy->cpus) 800 cpufreq_remove_update_util_hook(cpu); 801 802 synchronize_rcu(); 803 804 if (!policy->fast_switch_enabled) { 805 irq_work_sync(&sg_policy->irq_work); 806 kthread_cancel_work_sync(&sg_policy->work); 807 } 808 } 809 810 static void sugov_limits(struct cpufreq_policy *policy) 811 { 812 struct sugov_policy *sg_policy = policy->governor_data; 813 814 if (!policy->fast_switch_enabled) { 815 mutex_lock(&sg_policy->work_lock); 816 cpufreq_policy_apply_limits(policy); 817 mutex_unlock(&sg_policy->work_lock); 818 } 819 820 sg_policy->limits_changed = true; 821 } 822 823 struct cpufreq_governor schedutil_gov = { 824 .name = "schedutil", 825 .owner = THIS_MODULE, 826 .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, 827 .init = sugov_init, 828 .exit = sugov_exit, 829 .start = sugov_start, 830 .stop = sugov_stop, 831 .limits = sugov_limits, 832 }; 833 834 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL 835 struct cpufreq_governor *cpufreq_default_governor(void) 836 { 837 return &schedutil_gov; 838 } 839 #endif 840 841 cpufreq_governor_init(schedutil_gov); 842 843 #ifdef CONFIG_ENERGY_MODEL 844 static void rebuild_sd_workfn(struct work_struct *work) 845 { 846 rebuild_sched_domains_energy(); 847 } 848 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); 849 850 /* 851 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains 852 * on governor changes to make sure the scheduler knows about it. 853 */ 854 void sched_cpufreq_governor_change(struct cpufreq_policy *policy, 855 struct cpufreq_governor *old_gov) 856 { 857 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) { 858 /* 859 * When called from the cpufreq_register_driver() path, the 860 * cpu_hotplug_lock is already held, so use a work item to 861 * avoid nested locking in rebuild_sched_domains(). 862 */ 863 schedule_work(&rebuild_sd_work); 864 } 865 866 } 867 #endif 868