1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * CPUFreq governor based on scheduler-provided CPU utilization data. 4 * 5 * Copyright (C) 2016, Intel Corporation 6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include "sched.h" 12 13 #include <linux/sched/cpufreq.h> 14 #include <trace/events/power.h> 15 16 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8) 17 18 struct sugov_tunables { 19 struct gov_attr_set attr_set; 20 unsigned int rate_limit_us; 21 }; 22 23 struct sugov_policy { 24 struct cpufreq_policy *policy; 25 26 struct sugov_tunables *tunables; 27 struct list_head tunables_hook; 28 29 raw_spinlock_t update_lock; /* For shared policies */ 30 u64 last_freq_update_time; 31 s64 freq_update_delay_ns; 32 unsigned int next_freq; 33 unsigned int cached_raw_freq; 34 35 /* The next fields are only needed if fast switch cannot be used: */ 36 struct irq_work irq_work; 37 struct kthread_work work; 38 struct mutex work_lock; 39 struct kthread_worker worker; 40 struct task_struct *thread; 41 bool work_in_progress; 42 43 bool limits_changed; 44 bool need_freq_update; 45 }; 46 47 struct sugov_cpu { 48 struct update_util_data update_util; 49 struct sugov_policy *sg_policy; 50 unsigned int cpu; 51 52 bool iowait_boost_pending; 53 unsigned int iowait_boost; 54 u64 last_update; 55 56 unsigned long bw_dl; 57 unsigned long max; 58 59 /* The field below is for single-CPU policies only: */ 60 #ifdef CONFIG_NO_HZ_COMMON 61 unsigned long saved_idle_calls; 62 #endif 63 }; 64 65 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); 66 67 /************************ Governor internals ***********************/ 68 69 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) 70 { 71 s64 delta_ns; 72 73 /* 74 * Since cpufreq_update_util() is called with rq->lock held for 75 * the @target_cpu, our per-CPU data is fully serialized. 76 * 77 * However, drivers cannot in general deal with cross-CPU 78 * requests, so while get_next_freq() will work, our 79 * sugov_update_commit() call may not for the fast switching platforms. 80 * 81 * Hence stop here for remote requests if they aren't supported 82 * by the hardware, as calculating the frequency is pointless if 83 * we cannot in fact act on it. 84 * 85 * This is needed on the slow switching platforms too to prevent CPUs 86 * going offline from leaving stale IRQ work items behind. 87 */ 88 if (!cpufreq_this_cpu_can_update(sg_policy->policy)) 89 return false; 90 91 if (unlikely(sg_policy->limits_changed)) { 92 sg_policy->limits_changed = false; 93 sg_policy->need_freq_update = true; 94 return true; 95 } 96 97 delta_ns = time - sg_policy->last_freq_update_time; 98 99 return delta_ns >= sg_policy->freq_update_delay_ns; 100 } 101 102 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, 103 unsigned int next_freq) 104 { 105 if (!sg_policy->need_freq_update) { 106 if (sg_policy->next_freq == next_freq) 107 return false; 108 } else { 109 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); 110 } 111 112 sg_policy->next_freq = next_freq; 113 sg_policy->last_freq_update_time = time; 114 115 return true; 116 } 117 118 static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time, 119 unsigned int next_freq) 120 { 121 if (sugov_update_next_freq(sg_policy, time, next_freq)) 122 cpufreq_driver_fast_switch(sg_policy->policy, next_freq); 123 } 124 125 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time, 126 unsigned int next_freq) 127 { 128 if (!sugov_update_next_freq(sg_policy, time, next_freq)) 129 return; 130 131 if (!sg_policy->work_in_progress) { 132 sg_policy->work_in_progress = true; 133 irq_work_queue(&sg_policy->irq_work); 134 } 135 } 136 137 /** 138 * get_next_freq - Compute a new frequency for a given cpufreq policy. 139 * @sg_policy: schedutil policy object to compute the new frequency for. 140 * @util: Current CPU utilization. 141 * @max: CPU capacity. 142 * 143 * If the utilization is frequency-invariant, choose the new frequency to be 144 * proportional to it, that is 145 * 146 * next_freq = C * max_freq * util / max 147 * 148 * Otherwise, approximate the would-be frequency-invariant utilization by 149 * util_raw * (curr_freq / max_freq) which leads to 150 * 151 * next_freq = C * curr_freq * util_raw / max 152 * 153 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8. 154 * 155 * The lowest driver-supported frequency which is equal or greater than the raw 156 * next_freq (as calculated above) is returned, subject to policy min/max and 157 * cpufreq driver limitations. 158 */ 159 static unsigned int get_next_freq(struct sugov_policy *sg_policy, 160 unsigned long util, unsigned long max) 161 { 162 struct cpufreq_policy *policy = sg_policy->policy; 163 unsigned int freq = arch_scale_freq_invariant() ? 164 policy->cpuinfo.max_freq : policy->cur; 165 166 freq = map_util_freq(util, freq, max); 167 168 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) 169 return sg_policy->next_freq; 170 171 sg_policy->cached_raw_freq = freq; 172 return cpufreq_driver_resolve_freq(policy, freq); 173 } 174 175 /* 176 * This function computes an effective utilization for the given CPU, to be 177 * used for frequency selection given the linear relation: f = u * f_max. 178 * 179 * The scheduler tracks the following metrics: 180 * 181 * cpu_util_{cfs,rt,dl,irq}() 182 * cpu_bw_dl() 183 * 184 * Where the cfs,rt and dl util numbers are tracked with the same metric and 185 * synchronized windows and are thus directly comparable. 186 * 187 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 188 * which excludes things like IRQ and steal-time. These latter are then accrued 189 * in the irq utilization. 190 * 191 * The DL bandwidth number otoh is not a measured metric but a value computed 192 * based on the task model parameters and gives the minimal utilization 193 * required to meet deadlines. 194 */ 195 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 196 unsigned long max, enum schedutil_type type, 197 struct task_struct *p) 198 { 199 unsigned long dl_util, util, irq; 200 struct rq *rq = cpu_rq(cpu); 201 202 if (!uclamp_is_used() && 203 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { 204 return max; 205 } 206 207 /* 208 * Early check to see if IRQ/steal time saturates the CPU, can be 209 * because of inaccuracies in how we track these -- see 210 * update_irq_load_avg(). 211 */ 212 irq = cpu_util_irq(rq); 213 if (unlikely(irq >= max)) 214 return max; 215 216 /* 217 * Because the time spend on RT/DL tasks is visible as 'lost' time to 218 * CFS tasks and we use the same metric to track the effective 219 * utilization (PELT windows are synchronized) we can directly add them 220 * to obtain the CPU's actual utilization. 221 * 222 * CFS and RT utilization can be boosted or capped, depending on 223 * utilization clamp constraints requested by currently RUNNABLE 224 * tasks. 225 * When there are no CFS RUNNABLE tasks, clamps are released and 226 * frequency will be gracefully reduced with the utilization decay. 227 */ 228 util = util_cfs + cpu_util_rt(rq); 229 if (type == FREQUENCY_UTIL) 230 util = uclamp_rq_util_with(rq, util, p); 231 232 dl_util = cpu_util_dl(rq); 233 234 /* 235 * For frequency selection we do not make cpu_util_dl() a permanent part 236 * of this sum because we want to use cpu_bw_dl() later on, but we need 237 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such 238 * that we select f_max when there is no idle time. 239 * 240 * NOTE: numerical errors or stop class might cause us to not quite hit 241 * saturation when we should -- something for later. 242 */ 243 if (util + dl_util >= max) 244 return max; 245 246 /* 247 * OTOH, for energy computation we need the estimated running time, so 248 * include util_dl and ignore dl_bw. 249 */ 250 if (type == ENERGY_UTIL) 251 util += dl_util; 252 253 /* 254 * There is still idle time; further improve the number by using the 255 * irq metric. Because IRQ/steal time is hidden from the task clock we 256 * need to scale the task numbers: 257 * 258 * max - irq 259 * U' = irq + --------- * U 260 * max 261 */ 262 util = scale_irq_capacity(util, irq, max); 263 util += irq; 264 265 /* 266 * Bandwidth required by DEADLINE must always be granted while, for 267 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism 268 * to gracefully reduce the frequency when no tasks show up for longer 269 * periods of time. 270 * 271 * Ideally we would like to set bw_dl as min/guaranteed freq and util + 272 * bw_dl as requested freq. However, cpufreq is not yet ready for such 273 * an interface. So, we only do the latter for now. 274 */ 275 if (type == FREQUENCY_UTIL) 276 util += cpu_bw_dl(rq); 277 278 return min(max, util); 279 } 280 281 static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) 282 { 283 struct rq *rq = cpu_rq(sg_cpu->cpu); 284 unsigned long util = cpu_util_cfs(rq); 285 unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); 286 287 sg_cpu->max = max; 288 sg_cpu->bw_dl = cpu_bw_dl(rq); 289 290 return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL); 291 } 292 293 /** 294 * sugov_iowait_reset() - Reset the IO boost status of a CPU. 295 * @sg_cpu: the sugov data for the CPU to boost 296 * @time: the update time from the caller 297 * @set_iowait_boost: true if an IO boost has been requested 298 * 299 * The IO wait boost of a task is disabled after a tick since the last update 300 * of a CPU. If a new IO wait boost is requested after more then a tick, then 301 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy 302 * efficiency by ignoring sporadic wakeups from IO. 303 */ 304 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, 305 bool set_iowait_boost) 306 { 307 s64 delta_ns = time - sg_cpu->last_update; 308 309 /* Reset boost only if a tick has elapsed since last request */ 310 if (delta_ns <= TICK_NSEC) 311 return false; 312 313 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; 314 sg_cpu->iowait_boost_pending = set_iowait_boost; 315 316 return true; 317 } 318 319 /** 320 * sugov_iowait_boost() - Updates the IO boost status of a CPU. 321 * @sg_cpu: the sugov data for the CPU to boost 322 * @time: the update time from the caller 323 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait 324 * 325 * Each time a task wakes up after an IO operation, the CPU utilization can be 326 * boosted to a certain utilization which doubles at each "frequent and 327 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization 328 * of the maximum OPP. 329 * 330 * To keep doubling, an IO boost has to be requested at least once per tick, 331 * otherwise we restart from the utilization of the minimum OPP. 332 */ 333 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, 334 unsigned int flags) 335 { 336 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT; 337 338 /* Reset boost if the CPU appears to have been idle enough */ 339 if (sg_cpu->iowait_boost && 340 sugov_iowait_reset(sg_cpu, time, set_iowait_boost)) 341 return; 342 343 /* Boost only tasks waking up after IO */ 344 if (!set_iowait_boost) 345 return; 346 347 /* Ensure boost doubles only one time at each request */ 348 if (sg_cpu->iowait_boost_pending) 349 return; 350 sg_cpu->iowait_boost_pending = true; 351 352 /* Double the boost at each request */ 353 if (sg_cpu->iowait_boost) { 354 sg_cpu->iowait_boost = 355 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); 356 return; 357 } 358 359 /* First wakeup after IO: start with minimum boost */ 360 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; 361 } 362 363 /** 364 * sugov_iowait_apply() - Apply the IO boost to a CPU. 365 * @sg_cpu: the sugov data for the cpu to boost 366 * @time: the update time from the caller 367 * @util: the utilization to (eventually) boost 368 * @max: the maximum value the utilization can be boosted to 369 * 370 * A CPU running a task which woken up after an IO operation can have its 371 * utilization boosted to speed up the completion of those IO operations. 372 * The IO boost value is increased each time a task wakes up from IO, in 373 * sugov_iowait_apply(), and it's instead decreased by this function, 374 * each time an increase has not been requested (!iowait_boost_pending). 375 * 376 * A CPU which also appears to have been idle for at least one tick has also 377 * its IO boost utilization reset. 378 * 379 * This mechanism is designed to boost high frequently IO waiting tasks, while 380 * being more conservative on tasks which does sporadic IO operations. 381 */ 382 static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, 383 unsigned long util, unsigned long max) 384 { 385 unsigned long boost; 386 387 /* No boost currently required */ 388 if (!sg_cpu->iowait_boost) 389 return util; 390 391 /* Reset boost if the CPU appears to have been idle enough */ 392 if (sugov_iowait_reset(sg_cpu, time, false)) 393 return util; 394 395 if (!sg_cpu->iowait_boost_pending) { 396 /* 397 * No boost pending; reduce the boost value. 398 */ 399 sg_cpu->iowait_boost >>= 1; 400 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { 401 sg_cpu->iowait_boost = 0; 402 return util; 403 } 404 } 405 406 sg_cpu->iowait_boost_pending = false; 407 408 /* 409 * @util is already in capacity scale; convert iowait_boost 410 * into the same scale so we can compare. 411 */ 412 boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT; 413 return max(boost, util); 414 } 415 416 #ifdef CONFIG_NO_HZ_COMMON 417 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) 418 { 419 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); 420 bool ret = idle_calls == sg_cpu->saved_idle_calls; 421 422 sg_cpu->saved_idle_calls = idle_calls; 423 return ret; 424 } 425 #else 426 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } 427 #endif /* CONFIG_NO_HZ_COMMON */ 428 429 /* 430 * Make sugov_should_update_freq() ignore the rate limit when DL 431 * has increased the utilization. 432 */ 433 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) 434 { 435 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) 436 sg_policy->limits_changed = true; 437 } 438 439 static void sugov_update_single(struct update_util_data *hook, u64 time, 440 unsigned int flags) 441 { 442 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 443 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 444 unsigned long util, max; 445 unsigned int next_f; 446 unsigned int cached_freq = sg_policy->cached_raw_freq; 447 448 sugov_iowait_boost(sg_cpu, time, flags); 449 sg_cpu->last_update = time; 450 451 ignore_dl_rate_limit(sg_cpu, sg_policy); 452 453 if (!sugov_should_update_freq(sg_policy, time)) 454 return; 455 456 util = sugov_get_util(sg_cpu); 457 max = sg_cpu->max; 458 util = sugov_iowait_apply(sg_cpu, time, util, max); 459 next_f = get_next_freq(sg_policy, util, max); 460 /* 461 * Do not reduce the frequency if the CPU has not been idle 462 * recently, as the reduction is likely to be premature then. 463 */ 464 if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) { 465 next_f = sg_policy->next_freq; 466 467 /* Restore cached freq as next_freq has changed */ 468 sg_policy->cached_raw_freq = cached_freq; 469 } 470 471 /* 472 * This code runs under rq->lock for the target CPU, so it won't run 473 * concurrently on two different CPUs for the same target and it is not 474 * necessary to acquire the lock in the fast switch case. 475 */ 476 if (sg_policy->policy->fast_switch_enabled) { 477 sugov_fast_switch(sg_policy, time, next_f); 478 } else { 479 raw_spin_lock(&sg_policy->update_lock); 480 sugov_deferred_update(sg_policy, time, next_f); 481 raw_spin_unlock(&sg_policy->update_lock); 482 } 483 } 484 485 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) 486 { 487 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 488 struct cpufreq_policy *policy = sg_policy->policy; 489 unsigned long util = 0, max = 1; 490 unsigned int j; 491 492 for_each_cpu(j, policy->cpus) { 493 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); 494 unsigned long j_util, j_max; 495 496 j_util = sugov_get_util(j_sg_cpu); 497 j_max = j_sg_cpu->max; 498 j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max); 499 500 if (j_util * max > j_max * util) { 501 util = j_util; 502 max = j_max; 503 } 504 } 505 506 return get_next_freq(sg_policy, util, max); 507 } 508 509 static void 510 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) 511 { 512 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); 513 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 514 unsigned int next_f; 515 516 raw_spin_lock(&sg_policy->update_lock); 517 518 sugov_iowait_boost(sg_cpu, time, flags); 519 sg_cpu->last_update = time; 520 521 ignore_dl_rate_limit(sg_cpu, sg_policy); 522 523 if (sugov_should_update_freq(sg_policy, time)) { 524 next_f = sugov_next_freq_shared(sg_cpu, time); 525 526 if (sg_policy->policy->fast_switch_enabled) 527 sugov_fast_switch(sg_policy, time, next_f); 528 else 529 sugov_deferred_update(sg_policy, time, next_f); 530 } 531 532 raw_spin_unlock(&sg_policy->update_lock); 533 } 534 535 static void sugov_work(struct kthread_work *work) 536 { 537 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); 538 unsigned int freq; 539 unsigned long flags; 540 541 /* 542 * Hold sg_policy->update_lock shortly to handle the case where: 543 * incase sg_policy->next_freq is read here, and then updated by 544 * sugov_deferred_update() just before work_in_progress is set to false 545 * here, we may miss queueing the new update. 546 * 547 * Note: If a work was queued after the update_lock is released, 548 * sugov_work() will just be called again by kthread_work code; and the 549 * request will be proceed before the sugov thread sleeps. 550 */ 551 raw_spin_lock_irqsave(&sg_policy->update_lock, flags); 552 freq = sg_policy->next_freq; 553 sg_policy->work_in_progress = false; 554 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); 555 556 mutex_lock(&sg_policy->work_lock); 557 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L); 558 mutex_unlock(&sg_policy->work_lock); 559 } 560 561 static void sugov_irq_work(struct irq_work *irq_work) 562 { 563 struct sugov_policy *sg_policy; 564 565 sg_policy = container_of(irq_work, struct sugov_policy, irq_work); 566 567 kthread_queue_work(&sg_policy->worker, &sg_policy->work); 568 } 569 570 /************************** sysfs interface ************************/ 571 572 static struct sugov_tunables *global_tunables; 573 static DEFINE_MUTEX(global_tunables_lock); 574 575 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set) 576 { 577 return container_of(attr_set, struct sugov_tunables, attr_set); 578 } 579 580 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) 581 { 582 struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 583 584 return sprintf(buf, "%u\n", tunables->rate_limit_us); 585 } 586 587 static ssize_t 588 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count) 589 { 590 struct sugov_tunables *tunables = to_sugov_tunables(attr_set); 591 struct sugov_policy *sg_policy; 592 unsigned int rate_limit_us; 593 594 if (kstrtouint(buf, 10, &rate_limit_us)) 595 return -EINVAL; 596 597 tunables->rate_limit_us = rate_limit_us; 598 599 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) 600 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; 601 602 return count; 603 } 604 605 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); 606 607 static struct attribute *sugov_attrs[] = { 608 &rate_limit_us.attr, 609 NULL 610 }; 611 ATTRIBUTE_GROUPS(sugov); 612 613 static struct kobj_type sugov_tunables_ktype = { 614 .default_groups = sugov_groups, 615 .sysfs_ops = &governor_sysfs_ops, 616 }; 617 618 /********************** cpufreq governor interface *********************/ 619 620 struct cpufreq_governor schedutil_gov; 621 622 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) 623 { 624 struct sugov_policy *sg_policy; 625 626 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL); 627 if (!sg_policy) 628 return NULL; 629 630 sg_policy->policy = policy; 631 raw_spin_lock_init(&sg_policy->update_lock); 632 return sg_policy; 633 } 634 635 static void sugov_policy_free(struct sugov_policy *sg_policy) 636 { 637 kfree(sg_policy); 638 } 639 640 static int sugov_kthread_create(struct sugov_policy *sg_policy) 641 { 642 struct task_struct *thread; 643 struct sched_attr attr = { 644 .size = sizeof(struct sched_attr), 645 .sched_policy = SCHED_DEADLINE, 646 .sched_flags = SCHED_FLAG_SUGOV, 647 .sched_nice = 0, 648 .sched_priority = 0, 649 /* 650 * Fake (unused) bandwidth; workaround to "fix" 651 * priority inheritance. 652 */ 653 .sched_runtime = 1000000, 654 .sched_deadline = 10000000, 655 .sched_period = 10000000, 656 }; 657 struct cpufreq_policy *policy = sg_policy->policy; 658 int ret; 659 660 /* kthread only required for slow path */ 661 if (policy->fast_switch_enabled) 662 return 0; 663 664 kthread_init_work(&sg_policy->work, sugov_work); 665 kthread_init_worker(&sg_policy->worker); 666 thread = kthread_create(kthread_worker_fn, &sg_policy->worker, 667 "sugov:%d", 668 cpumask_first(policy->related_cpus)); 669 if (IS_ERR(thread)) { 670 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread)); 671 return PTR_ERR(thread); 672 } 673 674 ret = sched_setattr_nocheck(thread, &attr); 675 if (ret) { 676 kthread_stop(thread); 677 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); 678 return ret; 679 } 680 681 sg_policy->thread = thread; 682 kthread_bind_mask(thread, policy->related_cpus); 683 init_irq_work(&sg_policy->irq_work, sugov_irq_work); 684 mutex_init(&sg_policy->work_lock); 685 686 wake_up_process(thread); 687 688 return 0; 689 } 690 691 static void sugov_kthread_stop(struct sugov_policy *sg_policy) 692 { 693 /* kthread only required for slow path */ 694 if (sg_policy->policy->fast_switch_enabled) 695 return; 696 697 kthread_flush_worker(&sg_policy->worker); 698 kthread_stop(sg_policy->thread); 699 mutex_destroy(&sg_policy->work_lock); 700 } 701 702 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) 703 { 704 struct sugov_tunables *tunables; 705 706 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); 707 if (tunables) { 708 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); 709 if (!have_governor_per_policy()) 710 global_tunables = tunables; 711 } 712 return tunables; 713 } 714 715 static void sugov_tunables_free(struct sugov_tunables *tunables) 716 { 717 if (!have_governor_per_policy()) 718 global_tunables = NULL; 719 720 kfree(tunables); 721 } 722 723 static int sugov_init(struct cpufreq_policy *policy) 724 { 725 struct sugov_policy *sg_policy; 726 struct sugov_tunables *tunables; 727 int ret = 0; 728 729 /* State should be equivalent to EXIT */ 730 if (policy->governor_data) 731 return -EBUSY; 732 733 cpufreq_enable_fast_switch(policy); 734 735 sg_policy = sugov_policy_alloc(policy); 736 if (!sg_policy) { 737 ret = -ENOMEM; 738 goto disable_fast_switch; 739 } 740 741 ret = sugov_kthread_create(sg_policy); 742 if (ret) 743 goto free_sg_policy; 744 745 mutex_lock(&global_tunables_lock); 746 747 if (global_tunables) { 748 if (WARN_ON(have_governor_per_policy())) { 749 ret = -EINVAL; 750 goto stop_kthread; 751 } 752 policy->governor_data = sg_policy; 753 sg_policy->tunables = global_tunables; 754 755 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook); 756 goto out; 757 } 758 759 tunables = sugov_tunables_alloc(sg_policy); 760 if (!tunables) { 761 ret = -ENOMEM; 762 goto stop_kthread; 763 } 764 765 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy); 766 767 policy->governor_data = sg_policy; 768 sg_policy->tunables = tunables; 769 770 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype, 771 get_governor_parent_kobj(policy), "%s", 772 schedutil_gov.name); 773 if (ret) 774 goto fail; 775 776 out: 777 mutex_unlock(&global_tunables_lock); 778 return 0; 779 780 fail: 781 kobject_put(&tunables->attr_set.kobj); 782 policy->governor_data = NULL; 783 sugov_tunables_free(tunables); 784 785 stop_kthread: 786 sugov_kthread_stop(sg_policy); 787 mutex_unlock(&global_tunables_lock); 788 789 free_sg_policy: 790 sugov_policy_free(sg_policy); 791 792 disable_fast_switch: 793 cpufreq_disable_fast_switch(policy); 794 795 pr_err("initialization failed (error %d)\n", ret); 796 return ret; 797 } 798 799 static void sugov_exit(struct cpufreq_policy *policy) 800 { 801 struct sugov_policy *sg_policy = policy->governor_data; 802 struct sugov_tunables *tunables = sg_policy->tunables; 803 unsigned int count; 804 805 mutex_lock(&global_tunables_lock); 806 807 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); 808 policy->governor_data = NULL; 809 if (!count) 810 sugov_tunables_free(tunables); 811 812 mutex_unlock(&global_tunables_lock); 813 814 sugov_kthread_stop(sg_policy); 815 sugov_policy_free(sg_policy); 816 cpufreq_disable_fast_switch(policy); 817 } 818 819 static int sugov_start(struct cpufreq_policy *policy) 820 { 821 struct sugov_policy *sg_policy = policy->governor_data; 822 unsigned int cpu; 823 824 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; 825 sg_policy->last_freq_update_time = 0; 826 sg_policy->next_freq = 0; 827 sg_policy->work_in_progress = false; 828 sg_policy->limits_changed = false; 829 sg_policy->cached_raw_freq = 0; 830 831 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); 832 833 for_each_cpu(cpu, policy->cpus) { 834 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 835 836 memset(sg_cpu, 0, sizeof(*sg_cpu)); 837 sg_cpu->cpu = cpu; 838 sg_cpu->sg_policy = sg_policy; 839 } 840 841 for_each_cpu(cpu, policy->cpus) { 842 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 843 844 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, 845 policy_is_shared(policy) ? 846 sugov_update_shared : 847 sugov_update_single); 848 } 849 return 0; 850 } 851 852 static void sugov_stop(struct cpufreq_policy *policy) 853 { 854 struct sugov_policy *sg_policy = policy->governor_data; 855 unsigned int cpu; 856 857 for_each_cpu(cpu, policy->cpus) 858 cpufreq_remove_update_util_hook(cpu); 859 860 synchronize_rcu(); 861 862 if (!policy->fast_switch_enabled) { 863 irq_work_sync(&sg_policy->irq_work); 864 kthread_cancel_work_sync(&sg_policy->work); 865 } 866 } 867 868 static void sugov_limits(struct cpufreq_policy *policy) 869 { 870 struct sugov_policy *sg_policy = policy->governor_data; 871 872 if (!policy->fast_switch_enabled) { 873 mutex_lock(&sg_policy->work_lock); 874 cpufreq_policy_apply_limits(policy); 875 mutex_unlock(&sg_policy->work_lock); 876 } 877 878 sg_policy->limits_changed = true; 879 } 880 881 struct cpufreq_governor schedutil_gov = { 882 .name = "schedutil", 883 .owner = THIS_MODULE, 884 .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, 885 .init = sugov_init, 886 .exit = sugov_exit, 887 .start = sugov_start, 888 .stop = sugov_stop, 889 .limits = sugov_limits, 890 }; 891 892 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL 893 struct cpufreq_governor *cpufreq_default_governor(void) 894 { 895 return &schedutil_gov; 896 } 897 #endif 898 899 cpufreq_governor_init(schedutil_gov); 900 901 #ifdef CONFIG_ENERGY_MODEL 902 extern bool sched_energy_update; 903 extern struct mutex sched_energy_mutex; 904 905 static void rebuild_sd_workfn(struct work_struct *work) 906 { 907 mutex_lock(&sched_energy_mutex); 908 sched_energy_update = true; 909 rebuild_sched_domains(); 910 sched_energy_update = false; 911 mutex_unlock(&sched_energy_mutex); 912 } 913 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); 914 915 /* 916 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains 917 * on governor changes to make sure the scheduler knows about it. 918 */ 919 void sched_cpufreq_governor_change(struct cpufreq_policy *policy, 920 struct cpufreq_governor *old_gov) 921 { 922 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) { 923 /* 924 * When called from the cpufreq_register_driver() path, the 925 * cpu_hotplug_lock is already held, so use a work item to 926 * avoid nested locking in rebuild_sched_domains(). 927 */ 928 schedule_work(&rebuild_sd_work); 929 } 930 931 } 932 #endif 933