1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * CPUFreq governor based on scheduler-provided CPU utilization data. 4 * 5 * Copyright (C) 2016, Intel Corporation 6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include "sched.h" 12 13 #include <linux/sched/cpufreq.h> 14 #include <trace/events/power.h> 15 16 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8) 17 18 struct sugov_tunables { 19 struct gov_attr_set attr_set; 20 unsigned int rate_limit_us; 21 }; 22 23 struct sugov_policy { 24 struct cpufreq_policy *policy; 25 26 struct sugov_tunables *tunables; 27 struct list_head tunables_hook; 28 29 raw_spinlock_t update_lock; /* For shared policies */ 30 u64 last_freq_update_time; 31 s64 freq_update_delay_ns; 32 unsigned int next_freq; 33 unsigned int cached_raw_freq; 34 35 /* The next fields are only needed if fast switch cannot be used: */ 36 struct irq_work irq_work; 37 struct kthread_work work; 38 struct mutex work_lock; 39 struct kthread_worker worker; 40 struct task_struct *thread; 41 bool work_in_progress; 42 43 bool need_freq_update; 44 }; 45 46 struct sugov_cpu { 47 struct update_util_data update_util; 48 struct sugov_policy *sg_policy; 49 unsigned int cpu; 50 51 bool iowait_boost_pending; 52 unsigned int iowait_boost; 53 u64 last_update; 54 55 unsigned long bw_dl; 56 unsigned long max; 57 58 /* The field below is for single-CPU policies only: */ 59 #ifdef CONFIG_NO_HZ_COMMON 60 unsigned long saved_idle_calls; 61 #endif 62 }; 63 64 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); 65 66 /************************ Governor internals ***********************/ 67 68 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) 69 { 70 s64 delta_ns; 71 72 /* 73 * Since cpufreq_update_util() is called with rq->lock held for 74 * the @target_cpu, our per-CPU data is fully serialized. 75 * 76 * However, drivers cannot in general deal with cross-CPU 77 * requests, so while get_next_freq() will work, our 78 * sugov_update_commit() call may not for the fast switching platforms. 79 * 80 * Hence stop here for remote requests if they aren't supported 81 * by the hardware, as calculating the frequency is pointless if 82 * we cannot in fact act on it. 83 * 84 * For the slow switching platforms, the kthread is always scheduled on 85 * the right set of CPUs and any CPU can find the next frequency and 86 * schedule the kthread. 87 */ 88 if (sg_policy->policy->fast_switch_enabled && 89 !cpufreq_this_cpu_can_update(sg_policy->policy)) 90 return false; 91 92 if (unlikely(sg_policy->need_freq_update)) 93 return true; 94 95 delta_ns = time - sg_policy->last_freq_update_time; 96 97 return delta_ns >= sg_policy->freq_update_delay_ns; 98 } 99 100 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, 101 unsigned int next_freq) 102 { 103 if (sg_policy->next_freq == next_freq) 104 return false; 105 106 sg_policy->next_freq = next_freq; 107 sg_policy->last_freq_update_time = time; 108 109 return true; 110 } 111 112 static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time, 113 unsigned int next_freq) 114 { 115 struct cpufreq_policy *policy = sg_policy->policy; 116 117 if (!sugov_update_next_freq(sg_policy, time, next_freq)) 118 return; 119 120 next_freq = cpufreq_driver_fast_switch(policy, next_freq); 121 if (!next_freq) 122 return; 123 124 policy->cur = next_freq; 125 trace_cpu_frequency(next_freq, smp_processor_id()); 126 } 127 128 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time, 129 unsigned int next_freq) 130 { 131 if (!sugov_update_next_freq(sg_policy, time, next_freq)) 132 return; 133 134 if (!sg_policy->work_in_progress) { 135 sg_policy->work_in_progress = true; 136 irq_work_queue(&sg_policy->irq_work); 137 } 138 } 139 140 /** 141 * get_next_freq - Compute a new frequency for a given cpufreq policy. 142 * @sg_policy: schedutil policy object to compute the new frequency for. 143 * @util: Current CPU utilization. 144 * @max: CPU capacity. 145 * 146 * If the utilization is frequency-invariant, choose the new frequency to be 147 * proportional to it, that is 148 * 149 * next_freq = C * max_freq * util / max 150 * 151 * Otherwise, approximate the would-be frequency-invariant utilization by 152 * util_raw * (curr_freq / max_freq) which leads to 153 * 154 * next_freq = C * curr_freq * util_raw / max 155 * 156 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8. 157 * 158 * The lowest driver-supported frequency which is equal or greater than the raw 159 * next_freq (as calculated above) is returned, subject to policy min/max and 160 * cpufreq driver limitations. 161 */ 162 static unsigned int get_next_freq(struct sugov_policy *sg_policy, 163 unsigned long util, unsigned long max) 164 { 165 struct cpufreq_policy *policy = sg_policy->policy; 166 unsigned int freq = arch_scale_freq_invariant() ? 167 policy->cpuinfo.max_freq : policy->cur; 168 169 freq = map_util_freq(util, freq, max); 170 171 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) 172 return sg_policy->next_freq; 173 174 sg_policy->need_freq_update = false; 175 sg_policy->cached_raw_freq = freq; 176 return cpufreq_driver_resolve_freq(policy, freq); 177 } 178 179 /* 180 * This function computes an effective utilization for the given CPU, to be 181 * used for frequency selection given the linear relation: f = u * f_max. 182 * 183 * The scheduler tracks the following metrics: 184 * 185 * cpu_util_{cfs,rt,dl,irq}() 186 * cpu_bw_dl() 187 * 188 * Where the cfs,rt and dl util numbers are tracked with the same metric and 189 * synchronized windows and are thus directly comparable. 190 * 191 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 192 * which excludes things like IRQ and steal-time. These latter are then accrued 193 * in the irq utilization. 194 * 195 * The DL bandwidth number otoh is not a measured metric but a value computed 196 * based on the task model parameters and gives the minimal utilization 197 * required to meet deadlines. 198 */ 199 unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs, 200 unsigned long max, enum schedutil_type type) 201 { 202 unsigned long dl_util, util, irq; 203 struct rq *rq = cpu_rq(cpu); 204 205 if (type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) 206 return max; 207 208 /* 209 * Early check to see if IRQ/steal time saturates the CPU, can be 210 * because of inaccuracies in how we track these -- see 211 * update_irq_load_avg(). 212 */ 213 irq = cpu_util_irq(rq); 214 if (unlikely(irq >= max)) 215 return max; 216 217 /* 218 * Because the time spend on RT/DL tasks is visible as 'lost' time to 219 * CFS tasks and we use the same metric to track the effective 220 * utilization (PELT windows are synchronized) we can directly add them 221 * to obtain the CPU's actual utilization. 222 */ 223 util = util_cfs; 224 util += cpu_util_rt(rq); 225 226 dl_util = cpu_util_dl(rq); 227 228 /* 229 * For frequency selection we do not make cpu_util_dl() a permanent part 230 * of this sum because we want to use cpu_bw_dl() later on, but we need 231 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such 232 * that we select f_max when there is no idle time. 233 * 234 * NOTE: numerical errors or stop class might cause us to not quite hit 235 * saturation when we should -- something for later. 236 */ 237 if (util + dl_util >= max) 238 return max; 239 240 /* 241 * OTOH, for energy computation we need the estimated running time, so 242 * include util_dl and ignore dl_bw. 243 */ 244 if (type == ENERGY_UTIL) 245 util += dl_util; 246 247 /* 248 * There is still idle time; further improve the number by using the 249 * irq metric. Because IRQ/steal time is hidden from the task clock we 250 * need to scale the task numbers: 251 * 252 * 1 - irq 253 * U' = irq + ------- * U 254 * max 255 */ 256 util = scale_irq_capacity(util, irq, max); 257 util += irq; 258 259 /* 260 * Bandwidth required by DEADLINE must always be granted while, for 261 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism 262 * to gracefully reduce the frequency when no tasks show up for longer 263 * periods of time. 264 * 265 * Ideally we would like to set bw_dl as min/guaranteed freq and util + 266 * bw_dl as requested freq. However, cpufreq is not yet ready for such 267 * an interface. So, we only do the latter for now. 268 */ 269 if (type == FREQUENCY_UTIL) 270 util += cpu_bw_dl(rq); 271 272 return min(max, util); 273 } 274 275 static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) 276 { 277 struct rq *rq = cpu_rq(sg_cpu->cpu); 278 unsigned long util = cpu_util_cfs(rq); 279 unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); 280 281 sg_cpu->max = max; 282 sg_cpu->bw_dl = cpu_bw_dl(rq); 283 284 return schedutil_freq_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL); 285 } 286 287 /** 288 * sugov_iowait_reset() - Reset the IO boost status of a CPU. 289 * @sg_cpu: the sugov data for the CPU to boost 290 * @time: the update time from the caller 291 * @set_iowait_boost: true if an IO boost has been requested 292 * 293 * The IO wait boost of a task is disabled after a tick since the last update 294 * of a CPU. If a new IO wait boost is requested after more then a tick, then 295 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy 296 * efficiency by ignoring sporadic wakeups from IO. 297 */ 298 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, 299 bool set_iowait_boost) 300 { 301 s64 delta_ns = time - sg_cpu->last_update; 302 303 /* Reset boost only if a tick has elapsed since last request */ 304 if (delta_ns <= TICK_NSEC) 305 return false; 306 307 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; 308 sg_cpu->iowait_boost_pending = set_iowait_boost; 309 310 return true; 311 } 312 313 /** 314 * sugov_iowait_boost() - Updates the IO boost status of a CPU. 315 * @sg_cpu: the sugov data for the CPU to boost 316 * @time: the update time from the caller 317 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait 318 * 319 * Each time a task wakes up after an IO operation, the CPU utilization can be 320 * boosted to a certain utilization which doubles at each "frequent and 321 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization 322 * of the maximum OPP. 323 * 324 * To keep doubling, an IO boost has to be requested at least once per tick, 325 * otherwise we restart from the utilization of the minimum OPP. 326 */ 327 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, 328 unsigned int flags) 329 { 330 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT; 331 332 /* Reset boost if the CPU appears to have been idle enough */ 333 if (sg_cpu->iowait_boost && 334 sugov_iowait_reset(sg_cpu, time, set_iowait_boost)) 335 return; 336 337 /* Boost only tasks waking up after IO */ 338 if (!set_iowait_boost) 339 return; 340 341 /* Ensure boost doubles only one time at each request */ 342 if (sg_cpu->iowait_boost_pending) 343 return; 344 sg_cpu->iowait_boost_pending = true; 345 346 /* Double the boost at each request */ 347 if (sg_cpu->iowait_boost) { 348 sg_cpu->iowait_boost = 349 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); 350 return; 351 } 352 353 /* First wakeup after IO: start with minimum boost */ 354 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; 355 } 356 357 /** 358 * sugov_iowait_apply() - Apply the IO boost to a CPU. 359 * @sg_cpu: the sugov data for the cpu to boost 360 * @time: the update time from the caller 361 * @util: the utilization to (eventually) boost 362 * @max: the maximum value the utilization can be boosted to 363 * 364 * A CPU running a task which woken up after an IO operation can have its 365 * utilization boosted to speed up the completion of those IO operations. 366 * The IO boost value is increased each time a task wakes up from IO, in 367 * sugov_iowait_apply(), and it's instead decreased by this function, 368 * each time an increase has not been requested (!iowait_boost_pending). 369 * 370 * A CPU which also appears to have been idle for at least one tick has also 371 * its IO boost utilization reset. 372 * 373 * This mechanism is designed to boost high frequently IO waiting tasks, while 374 * being more conservative on tasks which does sporadic IO operations. 375 */ 376 static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, 377 unsigned long util, unsigned long max) 378 { 379 unsigned long boost; 380 381 /* No boost currently required */ 382 if (!sg_cpu->iowait_boost) 383 return util; 384 385 /* Reset boost if the CPU appears to have been idle enough */ 386 if (sugov_iowait_reset(sg_cpu, time, false)) 387 return util; 388 389 if (!sg_cpu->iowait_boost_pending) { 390 /* 391 * No boost pending; reduce the boost value. 392 */ 393 sg_cpu->iowait_boost >>= 1; 394 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { 395 sg_cpu->iowait_boost = 0; 396 return util; 397 } 398 } 399 400 sg_cpu->iowait_boost_pending = false; 401 402 /* 403 * @util is already in capacity scale; convert iowait_boost 404 * into the same scale so we can compare. 405 */ 406 boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT; 407 return max(boost, util); 408 } 409 410 #ifdef CONFIG_NO_HZ_COMMON 411 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) 412 { 413 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); 414 bool ret = idle_calls == sg_cpu->saved_idle_calls; 415 416 sg_cpu->saved_idle_calls = idle_calls; 417 return ret; 418 } 419 #else 420 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } 421 #endif /* CONFIG_NO_HZ_COMMON */ 422 423 /* 424 * Make sugov_should_update_freq() ignore the rate limit when DL 425 * has increased the utilization. 426 */ 427 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) 428 { 429 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) 430 sg_policy->need_freq_update = true; 431 } 432 433 static void sugov_update_single(struct update_util_data *hook, u64 time, 434 unsigned int flags) 435 { 436 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 437 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 438 unsigned long util, max; 439 unsigned int next_f; 440 bool busy; 441 442 sugov_iowait_boost(sg_cpu, time, flags); 443 sg_cpu->last_update = time; 444 445 ignore_dl_rate_limit(sg_cpu, sg_policy); 446 447 if (!sugov_should_update_freq(sg_policy, time)) 448 return; 449 450 busy = sugov_cpu_is_busy(sg_cpu); 451 452 util = sugov_get_util(sg_cpu); 453 max = sg_cpu->max; 454 util = sugov_iowait_apply(sg_cpu, time, util, max); 455 next_f = get_next_freq(sg_policy, util, max); 456 /* 457 * Do not reduce the frequency if the CPU has not been idle 458 * recently, as the reduction is likely to be premature then. 459 */ 460 if (busy && next_f < sg_policy->next_freq) { 461 next_f = sg_policy->next_freq; 462 463 /* Reset cached freq as next_freq has changed */ 464 sg_policy->cached_raw_freq = 0; 465 } 466 467 /* 468 * This code runs under rq->lock for the target CPU, so it won't run 469 * concurrently on two different CPUs for the same target and it is not 470 * necessary to acquire the lock in the fast switch case. 471 */ 472 if (sg_policy->policy->fast_switch_enabled) { 473 sugov_fast_switch(sg_policy, time, next_f); 474 } else { 475 raw_spin_lock(&sg_policy->update_lock); 476 sugov_deferred_update(sg_policy, time, next_f); 477 raw_spin_unlock(&sg_policy->update_lock); 478 } 479 } 480 481 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) 482 { 483 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 484 struct cpufreq_policy *policy = sg_policy->policy; 485 unsigned long util = 0, max = 1; 486 unsigned int j; 487 488 for_each_cpu(j, policy->cpus) { 489 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); 490 unsigned long j_util, j_max; 491 492 j_util = sugov_get_util(j_sg_cpu); 493 j_max = j_sg_cpu->max; 494 j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max); 495 496 if (j_util * max > j_max * util) { 497 util = j_util; 498 max = j_max; 499 } 500 } 501 502 return get_next_freq(sg_policy, util, max); 503 } 504 505 static void 506 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) 507 { 508 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 509 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 510 unsigned int next_f; 511 512 raw_spin_lock(&sg_policy->update_lock); 513 514 sugov_iowait_boost(sg_cpu, time, flags); 515 sg_cpu->last_update = time; 516 517 ignore_dl_rate_limit(sg_cpu, sg_policy); 518 519 if (sugov_should_update_freq(sg_policy, time)) { 520 next_f = sugov_next_freq_shared(sg_cpu, time); 521 522 if (sg_policy->policy->fast_switch_enabled) 523 sugov_fast_switch(sg_policy, time, next_f); 524 else 525 sugov_deferred_update(sg_policy, time, next_f); 526 } 527 528 raw_spin_unlock(&sg_policy->update_lock); 529 } 530 531 static void sugov_work(struct kthread_work *work) 532 { 533 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); 534 unsigned int freq; 535 unsigned long flags; 536 537 /* 538 * Hold sg_policy->update_lock shortly to handle the case where: 539 * incase sg_policy->next_freq is read here, and then updated by 540 * sugov_deferred_update() just before work_in_progress is set to false 541 * here, we may miss queueing the new update. 542 * 543 * Note: If a work was queued after the update_lock is released, 544 * sugov_work() will just be called again by kthread_work code; and the 545 * request will be proceed before the sugov thread sleeps. 546 */ 547 raw_spin_lock_irqsave(&sg_policy->update_lock, flags); 548 freq = sg_policy->next_freq; 549 sg_policy->work_in_progress = false; 550 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); 551 552 mutex_lock(&sg_policy->work_lock); 553 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L); 554 mutex_unlock(&sg_policy->work_lock); 555 } 556 557 static void sugov_irq_work(struct irq_work *irq_work) 558 { 559 struct sugov_policy *sg_policy; 560 561 sg_policy = container_of(irq_work, struct sugov_policy, irq_work); 562 563 kthread_queue_work(&sg_policy->worker, &sg_policy->work); 564 } 565 566 /************************** sysfs interface ************************/ 567 568 static struct sugov_tunables *global_tunables; 569 static DEFINE_MUTEX(global_tunables_lock); 570 571 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set) 572 { 573 return container_of(attr_set, struct sugov_tunables, attr_set); 574 } 575 576 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) 577 { 578 struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 579 580 return sprintf(buf, "%u\n", tunables->rate_limit_us); 581 } 582 583 static ssize_t 584 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count) 585 { 586 struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 587 struct sugov_policy *sg_policy; 588 unsigned int rate_limit_us; 589 590 if (kstrtouint(buf, 10, &rate_limit_us)) 591 return -EINVAL; 592 593 tunables->rate_limit_us = rate_limit_us; 594 595 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) 596 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; 597 598 return count; 599 } 600 601 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); 602 603 static struct attribute *sugov_attrs[] = { 604 &rate_limit_us.attr, 605 NULL 606 }; 607 ATTRIBUTE_GROUPS(sugov); 608 609 static struct kobj_type sugov_tunables_ktype = { 610 .default_groups = sugov_groups, 611 .sysfs_ops = &governor_sysfs_ops, 612 }; 613 614 /********************** cpufreq governor interface *********************/ 615 616 struct cpufreq_governor schedutil_gov; 617 618 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) 619 { 620 struct sugov_policy *sg_policy; 621 622 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL); 623 if (!sg_policy) 624 return NULL; 625 626 sg_policy->policy = policy; 627 raw_spin_lock_init(&sg_policy->update_lock); 628 return sg_policy; 629 } 630 631 static void sugov_policy_free(struct sugov_policy *sg_policy) 632 { 633 kfree(sg_policy); 634 } 635 636 static int sugov_kthread_create(struct sugov_policy *sg_policy) 637 { 638 struct task_struct *thread; 639 struct sched_attr attr = { 640 .size = sizeof(struct sched_attr), 641 .sched_policy = SCHED_DEADLINE, 642 .sched_flags = SCHED_FLAG_SUGOV, 643 .sched_nice = 0, 644 .sched_priority = 0, 645 /* 646 * Fake (unused) bandwidth; workaround to "fix" 647 * priority inheritance. 648 */ 649 .sched_runtime = 1000000, 650 .sched_deadline = 10000000, 651 .sched_period = 10000000, 652 }; 653 struct cpufreq_policy *policy = sg_policy->policy; 654 int ret; 655 656 /* kthread only required for slow path */ 657 if (policy->fast_switch_enabled) 658 return 0; 659 660 kthread_init_work(&sg_policy->work, sugov_work); 661 kthread_init_worker(&sg_policy->worker); 662 thread = kthread_create(kthread_worker_fn, &sg_policy->worker, 663 "sugov:%d", 664 cpumask_first(policy->related_cpus)); 665 if (IS_ERR(thread)) { 666 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread)); 667 return PTR_ERR(thread); 668 } 669 670 ret = sched_setattr_nocheck(thread, &attr); 671 if (ret) { 672 kthread_stop(thread); 673 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); 674 return ret; 675 } 676 677 sg_policy->thread = thread; 678 kthread_bind_mask(thread, policy->related_cpus); 679 init_irq_work(&sg_policy->irq_work, sugov_irq_work); 680 mutex_init(&sg_policy->work_lock); 681 682 wake_up_process(thread); 683 684 return 0; 685 } 686 687 static void sugov_kthread_stop(struct sugov_policy *sg_policy) 688 { 689 /* kthread only required for slow path */ 690 if (sg_policy->policy->fast_switch_enabled) 691 return; 692 693 kthread_flush_worker(&sg_policy->worker); 694 kthread_stop(sg_policy->thread); 695 mutex_destroy(&sg_policy->work_lock); 696 } 697 698 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) 699 { 700 struct sugov_tunables *tunables; 701 702 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); 703 if (tunables) { 704 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); 705 if (!have_governor_per_policy()) 706 global_tunables = tunables; 707 } 708 return tunables; 709 } 710 711 static void sugov_tunables_free(struct sugov_tunables *tunables) 712 { 713 if (!have_governor_per_policy()) 714 global_tunables = NULL; 715 716 kfree(tunables); 717 } 718 719 static int sugov_init(struct cpufreq_policy *policy) 720 { 721 struct sugov_policy *sg_policy; 722 struct sugov_tunables *tunables; 723 int ret = 0; 724 725 /* State should be equivalent to EXIT */ 726 if (policy->governor_data) 727 return -EBUSY; 728 729 cpufreq_enable_fast_switch(policy); 730 731 sg_policy = sugov_policy_alloc(policy); 732 if (!sg_policy) { 733 ret = -ENOMEM; 734 goto disable_fast_switch; 735 } 736 737 ret = sugov_kthread_create(sg_policy); 738 if (ret) 739 goto free_sg_policy; 740 741 mutex_lock(&global_tunables_lock); 742 743 if (global_tunables) { 744 if (WARN_ON(have_governor_per_policy())) { 745 ret = -EINVAL; 746 goto stop_kthread; 747 } 748 policy->governor_data = sg_policy; 749 sg_policy->tunables = global_tunables; 750 751 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook); 752 goto out; 753 } 754 755 tunables = sugov_tunables_alloc(sg_policy); 756 if (!tunables) { 757 ret = -ENOMEM; 758 goto stop_kthread; 759 } 760 761 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy); 762 763 policy->governor_data = sg_policy; 764 sg_policy->tunables = tunables; 765 766 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype, 767 get_governor_parent_kobj(policy), "%s", 768 schedutil_gov.name); 769 if (ret) 770 goto fail; 771 772 out: 773 mutex_unlock(&global_tunables_lock); 774 return 0; 775 776 fail: 777 kobject_put(&tunables->attr_set.kobj); 778 policy->governor_data = NULL; 779 sugov_tunables_free(tunables); 780 781 stop_kthread: 782 sugov_kthread_stop(sg_policy); 783 mutex_unlock(&global_tunables_lock); 784 785 free_sg_policy: 786 sugov_policy_free(sg_policy); 787 788 disable_fast_switch: 789 cpufreq_disable_fast_switch(policy); 790 791 pr_err("initialization failed (error %d)\n", ret); 792 return ret; 793 } 794 795 static void sugov_exit(struct cpufreq_policy *policy) 796 { 797 struct sugov_policy *sg_policy = policy->governor_data; 798 struct sugov_tunables *tunables = sg_policy->tunables; 799 unsigned int count; 800 801 mutex_lock(&global_tunables_lock); 802 803 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); 804 policy->governor_data = NULL; 805 if (!count) 806 sugov_tunables_free(tunables); 807 808 mutex_unlock(&global_tunables_lock); 809 810 sugov_kthread_stop(sg_policy); 811 sugov_policy_free(sg_policy); 812 cpufreq_disable_fast_switch(policy); 813 } 814 815 static int sugov_start(struct cpufreq_policy *policy) 816 { 817 struct sugov_policy *sg_policy = policy->governor_data; 818 unsigned int cpu; 819 820 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; 821 sg_policy->last_freq_update_time = 0; 822 sg_policy->next_freq = 0; 823 sg_policy->work_in_progress = false; 824 sg_policy->need_freq_update = false; 825 sg_policy->cached_raw_freq = 0; 826 827 for_each_cpu(cpu, policy->cpus) { 828 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 829 830 memset(sg_cpu, 0, sizeof(*sg_cpu)); 831 sg_cpu->cpu = cpu; 832 sg_cpu->sg_policy = sg_policy; 833 } 834 835 for_each_cpu(cpu, policy->cpus) { 836 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 837 838 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, 839 policy_is_shared(policy) ? 840 sugov_update_shared : 841 sugov_update_single); 842 } 843 return 0; 844 } 845 846 static void sugov_stop(struct cpufreq_policy *policy) 847 { 848 struct sugov_policy *sg_policy = policy->governor_data; 849 unsigned int cpu; 850 851 for_each_cpu(cpu, policy->cpus) 852 cpufreq_remove_update_util_hook(cpu); 853 854 synchronize_rcu(); 855 856 if (!policy->fast_switch_enabled) { 857 irq_work_sync(&sg_policy->irq_work); 858 kthread_cancel_work_sync(&sg_policy->work); 859 } 860 } 861 862 static void sugov_limits(struct cpufreq_policy *policy) 863 { 864 struct sugov_policy *sg_policy = policy->governor_data; 865 866 if (!policy->fast_switch_enabled) { 867 mutex_lock(&sg_policy->work_lock); 868 cpufreq_policy_apply_limits(policy); 869 mutex_unlock(&sg_policy->work_lock); 870 } 871 872 sg_policy->need_freq_update = true; 873 } 874 875 struct cpufreq_governor schedutil_gov = { 876 .name = "schedutil", 877 .owner = THIS_MODULE, 878 .dynamic_switching = true, 879 .init = sugov_init, 880 .exit = sugov_exit, 881 .start = sugov_start, 882 .stop = sugov_stop, 883 .limits = sugov_limits, 884 }; 885 886 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL 887 struct cpufreq_governor *cpufreq_default_governor(void) 888 { 889 return &schedutil_gov; 890 } 891 #endif 892 893 static int __init sugov_register(void) 894 { 895 return cpufreq_register_governor(&schedutil_gov); 896 } 897 fs_initcall(sugov_register); 898 899 #ifdef CONFIG_ENERGY_MODEL 900 extern bool sched_energy_update; 901 extern struct mutex sched_energy_mutex; 902 903 static void rebuild_sd_workfn(struct work_struct *work) 904 { 905 mutex_lock(&sched_energy_mutex); 906 sched_energy_update = true; 907 rebuild_sched_domains(); 908 sched_energy_update = false; 909 mutex_unlock(&sched_energy_mutex); 910 } 911 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); 912 913 /* 914 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains 915 * on governor changes to make sure the scheduler knows about it. 916 */ 917 void sched_cpufreq_governor_change(struct cpufreq_policy *policy, 918 struct cpufreq_governor *old_gov) 919 { 920 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) { 921 /* 922 * When called from the cpufreq_register_driver() path, the 923 * cpu_hotplug_lock is already held, so use a work item to 924 * avoid nested locking in rebuild_sched_domains(). 925 */ 926 schedule_work(&rebuild_sd_work); 927 } 928 929 } 930 #endif 931