1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * CPUFreq governor based on scheduler-provided CPU utilization data.
4  *
5  * Copyright (C) 2016, Intel Corporation
6  * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include "sched.h"
12 
13 #include <linux/sched/cpufreq.h>
14 #include <trace/events/power.h>
15 
16 #define IOWAIT_BOOST_MIN	(SCHED_CAPACITY_SCALE / 8)
17 
18 struct sugov_tunables {
19 	struct gov_attr_set	attr_set;
20 	unsigned int		rate_limit_us;
21 };
22 
23 struct sugov_policy {
24 	struct cpufreq_policy	*policy;
25 
26 	struct sugov_tunables	*tunables;
27 	struct list_head	tunables_hook;
28 
29 	raw_spinlock_t		update_lock;	/* For shared policies */
30 	u64			last_freq_update_time;
31 	s64			freq_update_delay_ns;
32 	unsigned int		next_freq;
33 	unsigned int		cached_raw_freq;
34 
35 	/* The next fields are only needed if fast switch cannot be used: */
36 	struct			irq_work irq_work;
37 	struct			kthread_work work;
38 	struct			mutex work_lock;
39 	struct			kthread_worker worker;
40 	struct task_struct	*thread;
41 	bool			work_in_progress;
42 
43 	bool			limits_changed;
44 	bool			need_freq_update;
45 };
46 
47 struct sugov_cpu {
48 	struct update_util_data	update_util;
49 	struct sugov_policy	*sg_policy;
50 	unsigned int		cpu;
51 
52 	bool			iowait_boost_pending;
53 	unsigned int		iowait_boost;
54 	u64			last_update;
55 
56 	unsigned long		util;
57 	unsigned long		bw_dl;
58 	unsigned long		max;
59 
60 	/* The field below is for single-CPU policies only: */
61 #ifdef CONFIG_NO_HZ_COMMON
62 	unsigned long		saved_idle_calls;
63 #endif
64 };
65 
66 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
67 
68 /************************ Governor internals ***********************/
69 
70 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
71 {
72 	s64 delta_ns;
73 
74 	/*
75 	 * Since cpufreq_update_util() is called with rq->lock held for
76 	 * the @target_cpu, our per-CPU data is fully serialized.
77 	 *
78 	 * However, drivers cannot in general deal with cross-CPU
79 	 * requests, so while get_next_freq() will work, our
80 	 * sugov_update_commit() call may not for the fast switching platforms.
81 	 *
82 	 * Hence stop here for remote requests if they aren't supported
83 	 * by the hardware, as calculating the frequency is pointless if
84 	 * we cannot in fact act on it.
85 	 *
86 	 * This is needed on the slow switching platforms too to prevent CPUs
87 	 * going offline from leaving stale IRQ work items behind.
88 	 */
89 	if (!cpufreq_this_cpu_can_update(sg_policy->policy))
90 		return false;
91 
92 	if (unlikely(sg_policy->limits_changed)) {
93 		sg_policy->limits_changed = false;
94 		sg_policy->need_freq_update = true;
95 		return true;
96 	}
97 
98 	delta_ns = time - sg_policy->last_freq_update_time;
99 
100 	return delta_ns >= sg_policy->freq_update_delay_ns;
101 }
102 
103 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
104 				   unsigned int next_freq)
105 {
106 	if (sg_policy->need_freq_update)
107 		sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
108 	else if (sg_policy->next_freq == next_freq)
109 		return false;
110 
111 	sg_policy->next_freq = next_freq;
112 	sg_policy->last_freq_update_time = time;
113 
114 	return true;
115 }
116 
117 static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
118 			      unsigned int next_freq)
119 {
120 	if (sugov_update_next_freq(sg_policy, time, next_freq))
121 		cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
122 }
123 
124 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
125 				  unsigned int next_freq)
126 {
127 	if (!sugov_update_next_freq(sg_policy, time, next_freq))
128 		return;
129 
130 	if (!sg_policy->work_in_progress) {
131 		sg_policy->work_in_progress = true;
132 		irq_work_queue(&sg_policy->irq_work);
133 	}
134 }
135 
136 /**
137  * get_next_freq - Compute a new frequency for a given cpufreq policy.
138  * @sg_policy: schedutil policy object to compute the new frequency for.
139  * @util: Current CPU utilization.
140  * @max: CPU capacity.
141  *
142  * If the utilization is frequency-invariant, choose the new frequency to be
143  * proportional to it, that is
144  *
145  * next_freq = C * max_freq * util / max
146  *
147  * Otherwise, approximate the would-be frequency-invariant utilization by
148  * util_raw * (curr_freq / max_freq) which leads to
149  *
150  * next_freq = C * curr_freq * util_raw / max
151  *
152  * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
153  *
154  * The lowest driver-supported frequency which is equal or greater than the raw
155  * next_freq (as calculated above) is returned, subject to policy min/max and
156  * cpufreq driver limitations.
157  */
158 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
159 				  unsigned long util, unsigned long max)
160 {
161 	struct cpufreq_policy *policy = sg_policy->policy;
162 	unsigned int freq = arch_scale_freq_invariant() ?
163 				policy->cpuinfo.max_freq : policy->cur;
164 
165 	freq = map_util_freq(util, freq, max);
166 
167 	if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
168 		return sg_policy->next_freq;
169 
170 	sg_policy->cached_raw_freq = freq;
171 	return cpufreq_driver_resolve_freq(policy, freq);
172 }
173 
174 /*
175  * This function computes an effective utilization for the given CPU, to be
176  * used for frequency selection given the linear relation: f = u * f_max.
177  *
178  * The scheduler tracks the following metrics:
179  *
180  *   cpu_util_{cfs,rt,dl,irq}()
181  *   cpu_bw_dl()
182  *
183  * Where the cfs,rt and dl util numbers are tracked with the same metric and
184  * synchronized windows and are thus directly comparable.
185  *
186  * The cfs,rt,dl utilization are the running times measured with rq->clock_task
187  * which excludes things like IRQ and steal-time. These latter are then accrued
188  * in the irq utilization.
189  *
190  * The DL bandwidth number otoh is not a measured metric but a value computed
191  * based on the task model parameters and gives the minimal utilization
192  * required to meet deadlines.
193  */
194 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
195 				 unsigned long max, enum schedutil_type type,
196 				 struct task_struct *p)
197 {
198 	unsigned long dl_util, util, irq;
199 	struct rq *rq = cpu_rq(cpu);
200 
201 	if (!uclamp_is_used() &&
202 	    type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
203 		return max;
204 	}
205 
206 	/*
207 	 * Early check to see if IRQ/steal time saturates the CPU, can be
208 	 * because of inaccuracies in how we track these -- see
209 	 * update_irq_load_avg().
210 	 */
211 	irq = cpu_util_irq(rq);
212 	if (unlikely(irq >= max))
213 		return max;
214 
215 	/*
216 	 * Because the time spend on RT/DL tasks is visible as 'lost' time to
217 	 * CFS tasks and we use the same metric to track the effective
218 	 * utilization (PELT windows are synchronized) we can directly add them
219 	 * to obtain the CPU's actual utilization.
220 	 *
221 	 * CFS and RT utilization can be boosted or capped, depending on
222 	 * utilization clamp constraints requested by currently RUNNABLE
223 	 * tasks.
224 	 * When there are no CFS RUNNABLE tasks, clamps are released and
225 	 * frequency will be gracefully reduced with the utilization decay.
226 	 */
227 	util = util_cfs + cpu_util_rt(rq);
228 	if (type == FREQUENCY_UTIL)
229 		util = uclamp_rq_util_with(rq, util, p);
230 
231 	dl_util = cpu_util_dl(rq);
232 
233 	/*
234 	 * For frequency selection we do not make cpu_util_dl() a permanent part
235 	 * of this sum because we want to use cpu_bw_dl() later on, but we need
236 	 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
237 	 * that we select f_max when there is no idle time.
238 	 *
239 	 * NOTE: numerical errors or stop class might cause us to not quite hit
240 	 * saturation when we should -- something for later.
241 	 */
242 	if (util + dl_util >= max)
243 		return max;
244 
245 	/*
246 	 * OTOH, for energy computation we need the estimated running time, so
247 	 * include util_dl and ignore dl_bw.
248 	 */
249 	if (type == ENERGY_UTIL)
250 		util += dl_util;
251 
252 	/*
253 	 * There is still idle time; further improve the number by using the
254 	 * irq metric. Because IRQ/steal time is hidden from the task clock we
255 	 * need to scale the task numbers:
256 	 *
257 	 *              max - irq
258 	 *   U' = irq + --------- * U
259 	 *                 max
260 	 */
261 	util = scale_irq_capacity(util, irq, max);
262 	util += irq;
263 
264 	/*
265 	 * Bandwidth required by DEADLINE must always be granted while, for
266 	 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
267 	 * to gracefully reduce the frequency when no tasks show up for longer
268 	 * periods of time.
269 	 *
270 	 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
271 	 * bw_dl as requested freq. However, cpufreq is not yet ready for such
272 	 * an interface. So, we only do the latter for now.
273 	 */
274 	if (type == FREQUENCY_UTIL)
275 		util += cpu_bw_dl(rq);
276 
277 	return min(max, util);
278 }
279 
280 static void sugov_get_util(struct sugov_cpu *sg_cpu)
281 {
282 	struct rq *rq = cpu_rq(sg_cpu->cpu);
283 	unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
284 
285 	sg_cpu->max = max;
286 	sg_cpu->bw_dl = cpu_bw_dl(rq);
287 	sg_cpu->util = schedutil_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
288 					  FREQUENCY_UTIL, NULL);
289 }
290 
291 /**
292  * sugov_iowait_reset() - Reset the IO boost status of a CPU.
293  * @sg_cpu: the sugov data for the CPU to boost
294  * @time: the update time from the caller
295  * @set_iowait_boost: true if an IO boost has been requested
296  *
297  * The IO wait boost of a task is disabled after a tick since the last update
298  * of a CPU. If a new IO wait boost is requested after more then a tick, then
299  * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
300  * efficiency by ignoring sporadic wakeups from IO.
301  */
302 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
303 			       bool set_iowait_boost)
304 {
305 	s64 delta_ns = time - sg_cpu->last_update;
306 
307 	/* Reset boost only if a tick has elapsed since last request */
308 	if (delta_ns <= TICK_NSEC)
309 		return false;
310 
311 	sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
312 	sg_cpu->iowait_boost_pending = set_iowait_boost;
313 
314 	return true;
315 }
316 
317 /**
318  * sugov_iowait_boost() - Updates the IO boost status of a CPU.
319  * @sg_cpu: the sugov data for the CPU to boost
320  * @time: the update time from the caller
321  * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
322  *
323  * Each time a task wakes up after an IO operation, the CPU utilization can be
324  * boosted to a certain utilization which doubles at each "frequent and
325  * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
326  * of the maximum OPP.
327  *
328  * To keep doubling, an IO boost has to be requested at least once per tick,
329  * otherwise we restart from the utilization of the minimum OPP.
330  */
331 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
332 			       unsigned int flags)
333 {
334 	bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
335 
336 	/* Reset boost if the CPU appears to have been idle enough */
337 	if (sg_cpu->iowait_boost &&
338 	    sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
339 		return;
340 
341 	/* Boost only tasks waking up after IO */
342 	if (!set_iowait_boost)
343 		return;
344 
345 	/* Ensure boost doubles only one time at each request */
346 	if (sg_cpu->iowait_boost_pending)
347 		return;
348 	sg_cpu->iowait_boost_pending = true;
349 
350 	/* Double the boost at each request */
351 	if (sg_cpu->iowait_boost) {
352 		sg_cpu->iowait_boost =
353 			min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
354 		return;
355 	}
356 
357 	/* First wakeup after IO: start with minimum boost */
358 	sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
359 }
360 
361 /**
362  * sugov_iowait_apply() - Apply the IO boost to a CPU.
363  * @sg_cpu: the sugov data for the cpu to boost
364  * @time: the update time from the caller
365  *
366  * A CPU running a task which woken up after an IO operation can have its
367  * utilization boosted to speed up the completion of those IO operations.
368  * The IO boost value is increased each time a task wakes up from IO, in
369  * sugov_iowait_apply(), and it's instead decreased by this function,
370  * each time an increase has not been requested (!iowait_boost_pending).
371  *
372  * A CPU which also appears to have been idle for at least one tick has also
373  * its IO boost utilization reset.
374  *
375  * This mechanism is designed to boost high frequently IO waiting tasks, while
376  * being more conservative on tasks which does sporadic IO operations.
377  */
378 static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
379 {
380 	unsigned long boost;
381 
382 	/* No boost currently required */
383 	if (!sg_cpu->iowait_boost)
384 		return;
385 
386 	/* Reset boost if the CPU appears to have been idle enough */
387 	if (sugov_iowait_reset(sg_cpu, time, false))
388 		return;
389 
390 	if (!sg_cpu->iowait_boost_pending) {
391 		/*
392 		 * No boost pending; reduce the boost value.
393 		 */
394 		sg_cpu->iowait_boost >>= 1;
395 		if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
396 			sg_cpu->iowait_boost = 0;
397 			return;
398 		}
399 	}
400 
401 	sg_cpu->iowait_boost_pending = false;
402 
403 	/*
404 	 * sg_cpu->util is already in capacity scale; convert iowait_boost
405 	 * into the same scale so we can compare.
406 	 */
407 	boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
408 	if (sg_cpu->util < boost)
409 		sg_cpu->util = boost;
410 }
411 
412 #ifdef CONFIG_NO_HZ_COMMON
413 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
414 {
415 	unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
416 	bool ret = idle_calls == sg_cpu->saved_idle_calls;
417 
418 	sg_cpu->saved_idle_calls = idle_calls;
419 	return ret;
420 }
421 #else
422 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
423 #endif /* CONFIG_NO_HZ_COMMON */
424 
425 /*
426  * Make sugov_should_update_freq() ignore the rate limit when DL
427  * has increased the utilization.
428  */
429 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
430 {
431 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
432 		sg_policy->limits_changed = true;
433 }
434 
435 static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
436 					      u64 time, unsigned int flags)
437 {
438 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
439 
440 	sugov_iowait_boost(sg_cpu, time, flags);
441 	sg_cpu->last_update = time;
442 
443 	ignore_dl_rate_limit(sg_cpu, sg_policy);
444 
445 	if (!sugov_should_update_freq(sg_policy, time))
446 		return false;
447 
448 	sugov_get_util(sg_cpu);
449 	sugov_iowait_apply(sg_cpu, time);
450 
451 	return true;
452 }
453 
454 static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
455 				     unsigned int flags)
456 {
457 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
458 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
459 	unsigned int cached_freq = sg_policy->cached_raw_freq;
460 	unsigned int next_f;
461 
462 	if (!sugov_update_single_common(sg_cpu, time, flags))
463 		return;
464 
465 	next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
466 	/*
467 	 * Do not reduce the frequency if the CPU has not been idle
468 	 * recently, as the reduction is likely to be premature then.
469 	 */
470 	if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
471 		next_f = sg_policy->next_freq;
472 
473 		/* Restore cached freq as next_freq has changed */
474 		sg_policy->cached_raw_freq = cached_freq;
475 	}
476 
477 	/*
478 	 * This code runs under rq->lock for the target CPU, so it won't run
479 	 * concurrently on two different CPUs for the same target and it is not
480 	 * necessary to acquire the lock in the fast switch case.
481 	 */
482 	if (sg_policy->policy->fast_switch_enabled) {
483 		sugov_fast_switch(sg_policy, time, next_f);
484 	} else {
485 		raw_spin_lock(&sg_policy->update_lock);
486 		sugov_deferred_update(sg_policy, time, next_f);
487 		raw_spin_unlock(&sg_policy->update_lock);
488 	}
489 }
490 
491 static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
492 				     unsigned int flags)
493 {
494 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
495 	unsigned long prev_util = sg_cpu->util;
496 
497 	/*
498 	 * Fall back to the "frequency" path if frequency invariance is not
499 	 * supported, because the direct mapping between the utilization and
500 	 * the performance levels depends on the frequency invariance.
501 	 */
502 	if (!arch_scale_freq_invariant()) {
503 		sugov_update_single_freq(hook, time, flags);
504 		return;
505 	}
506 
507 	if (!sugov_update_single_common(sg_cpu, time, flags))
508 		return;
509 
510 	/*
511 	 * Do not reduce the target performance level if the CPU has not been
512 	 * idle recently, as the reduction is likely to be premature then.
513 	 */
514 	if (sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
515 		sg_cpu->util = prev_util;
516 
517 	cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
518 				   map_util_perf(sg_cpu->util), sg_cpu->max);
519 
520 	sg_cpu->sg_policy->last_freq_update_time = time;
521 }
522 
523 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
524 {
525 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
526 	struct cpufreq_policy *policy = sg_policy->policy;
527 	unsigned long util = 0, max = 1;
528 	unsigned int j;
529 
530 	for_each_cpu(j, policy->cpus) {
531 		struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
532 		unsigned long j_util, j_max;
533 
534 		sugov_get_util(j_sg_cpu);
535 		sugov_iowait_apply(j_sg_cpu, time);
536 		j_util = j_sg_cpu->util;
537 		j_max = j_sg_cpu->max;
538 
539 		if (j_util * max > j_max * util) {
540 			util = j_util;
541 			max = j_max;
542 		}
543 	}
544 
545 	return get_next_freq(sg_policy, util, max);
546 }
547 
548 static void
549 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
550 {
551 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
552 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
553 	unsigned int next_f;
554 
555 	raw_spin_lock(&sg_policy->update_lock);
556 
557 	sugov_iowait_boost(sg_cpu, time, flags);
558 	sg_cpu->last_update = time;
559 
560 	ignore_dl_rate_limit(sg_cpu, sg_policy);
561 
562 	if (sugov_should_update_freq(sg_policy, time)) {
563 		next_f = sugov_next_freq_shared(sg_cpu, time);
564 
565 		if (sg_policy->policy->fast_switch_enabled)
566 			sugov_fast_switch(sg_policy, time, next_f);
567 		else
568 			sugov_deferred_update(sg_policy, time, next_f);
569 	}
570 
571 	raw_spin_unlock(&sg_policy->update_lock);
572 }
573 
574 static void sugov_work(struct kthread_work *work)
575 {
576 	struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
577 	unsigned int freq;
578 	unsigned long flags;
579 
580 	/*
581 	 * Hold sg_policy->update_lock shortly to handle the case where:
582 	 * incase sg_policy->next_freq is read here, and then updated by
583 	 * sugov_deferred_update() just before work_in_progress is set to false
584 	 * here, we may miss queueing the new update.
585 	 *
586 	 * Note: If a work was queued after the update_lock is released,
587 	 * sugov_work() will just be called again by kthread_work code; and the
588 	 * request will be proceed before the sugov thread sleeps.
589 	 */
590 	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
591 	freq = sg_policy->next_freq;
592 	sg_policy->work_in_progress = false;
593 	raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
594 
595 	mutex_lock(&sg_policy->work_lock);
596 	__cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
597 	mutex_unlock(&sg_policy->work_lock);
598 }
599 
600 static void sugov_irq_work(struct irq_work *irq_work)
601 {
602 	struct sugov_policy *sg_policy;
603 
604 	sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
605 
606 	kthread_queue_work(&sg_policy->worker, &sg_policy->work);
607 }
608 
609 /************************** sysfs interface ************************/
610 
611 static struct sugov_tunables *global_tunables;
612 static DEFINE_MUTEX(global_tunables_lock);
613 
614 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
615 {
616 	return container_of(attr_set, struct sugov_tunables, attr_set);
617 }
618 
619 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
620 {
621 	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
622 
623 	return sprintf(buf, "%u\n", tunables->rate_limit_us);
624 }
625 
626 static ssize_t
627 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
628 {
629 	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
630 	struct sugov_policy *sg_policy;
631 	unsigned int rate_limit_us;
632 
633 	if (kstrtouint(buf, 10, &rate_limit_us))
634 		return -EINVAL;
635 
636 	tunables->rate_limit_us = rate_limit_us;
637 
638 	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
639 		sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
640 
641 	return count;
642 }
643 
644 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
645 
646 static struct attribute *sugov_attrs[] = {
647 	&rate_limit_us.attr,
648 	NULL
649 };
650 ATTRIBUTE_GROUPS(sugov);
651 
652 static struct kobj_type sugov_tunables_ktype = {
653 	.default_groups = sugov_groups,
654 	.sysfs_ops = &governor_sysfs_ops,
655 };
656 
657 /********************** cpufreq governor interface *********************/
658 
659 struct cpufreq_governor schedutil_gov;
660 
661 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
662 {
663 	struct sugov_policy *sg_policy;
664 
665 	sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
666 	if (!sg_policy)
667 		return NULL;
668 
669 	sg_policy->policy = policy;
670 	raw_spin_lock_init(&sg_policy->update_lock);
671 	return sg_policy;
672 }
673 
674 static void sugov_policy_free(struct sugov_policy *sg_policy)
675 {
676 	kfree(sg_policy);
677 }
678 
679 static int sugov_kthread_create(struct sugov_policy *sg_policy)
680 {
681 	struct task_struct *thread;
682 	struct sched_attr attr = {
683 		.size		= sizeof(struct sched_attr),
684 		.sched_policy	= SCHED_DEADLINE,
685 		.sched_flags	= SCHED_FLAG_SUGOV,
686 		.sched_nice	= 0,
687 		.sched_priority	= 0,
688 		/*
689 		 * Fake (unused) bandwidth; workaround to "fix"
690 		 * priority inheritance.
691 		 */
692 		.sched_runtime	=  1000000,
693 		.sched_deadline = 10000000,
694 		.sched_period	= 10000000,
695 	};
696 	struct cpufreq_policy *policy = sg_policy->policy;
697 	int ret;
698 
699 	/* kthread only required for slow path */
700 	if (policy->fast_switch_enabled)
701 		return 0;
702 
703 	kthread_init_work(&sg_policy->work, sugov_work);
704 	kthread_init_worker(&sg_policy->worker);
705 	thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
706 				"sugov:%d",
707 				cpumask_first(policy->related_cpus));
708 	if (IS_ERR(thread)) {
709 		pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
710 		return PTR_ERR(thread);
711 	}
712 
713 	ret = sched_setattr_nocheck(thread, &attr);
714 	if (ret) {
715 		kthread_stop(thread);
716 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
717 		return ret;
718 	}
719 
720 	sg_policy->thread = thread;
721 	kthread_bind_mask(thread, policy->related_cpus);
722 	init_irq_work(&sg_policy->irq_work, sugov_irq_work);
723 	mutex_init(&sg_policy->work_lock);
724 
725 	wake_up_process(thread);
726 
727 	return 0;
728 }
729 
730 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
731 {
732 	/* kthread only required for slow path */
733 	if (sg_policy->policy->fast_switch_enabled)
734 		return;
735 
736 	kthread_flush_worker(&sg_policy->worker);
737 	kthread_stop(sg_policy->thread);
738 	mutex_destroy(&sg_policy->work_lock);
739 }
740 
741 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
742 {
743 	struct sugov_tunables *tunables;
744 
745 	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
746 	if (tunables) {
747 		gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
748 		if (!have_governor_per_policy())
749 			global_tunables = tunables;
750 	}
751 	return tunables;
752 }
753 
754 static void sugov_tunables_free(struct sugov_tunables *tunables)
755 {
756 	if (!have_governor_per_policy())
757 		global_tunables = NULL;
758 
759 	kfree(tunables);
760 }
761 
762 static int sugov_init(struct cpufreq_policy *policy)
763 {
764 	struct sugov_policy *sg_policy;
765 	struct sugov_tunables *tunables;
766 	int ret = 0;
767 
768 	/* State should be equivalent to EXIT */
769 	if (policy->governor_data)
770 		return -EBUSY;
771 
772 	cpufreq_enable_fast_switch(policy);
773 
774 	sg_policy = sugov_policy_alloc(policy);
775 	if (!sg_policy) {
776 		ret = -ENOMEM;
777 		goto disable_fast_switch;
778 	}
779 
780 	ret = sugov_kthread_create(sg_policy);
781 	if (ret)
782 		goto free_sg_policy;
783 
784 	mutex_lock(&global_tunables_lock);
785 
786 	if (global_tunables) {
787 		if (WARN_ON(have_governor_per_policy())) {
788 			ret = -EINVAL;
789 			goto stop_kthread;
790 		}
791 		policy->governor_data = sg_policy;
792 		sg_policy->tunables = global_tunables;
793 
794 		gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
795 		goto out;
796 	}
797 
798 	tunables = sugov_tunables_alloc(sg_policy);
799 	if (!tunables) {
800 		ret = -ENOMEM;
801 		goto stop_kthread;
802 	}
803 
804 	tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
805 
806 	policy->governor_data = sg_policy;
807 	sg_policy->tunables = tunables;
808 
809 	ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
810 				   get_governor_parent_kobj(policy), "%s",
811 				   schedutil_gov.name);
812 	if (ret)
813 		goto fail;
814 
815 out:
816 	mutex_unlock(&global_tunables_lock);
817 	return 0;
818 
819 fail:
820 	kobject_put(&tunables->attr_set.kobj);
821 	policy->governor_data = NULL;
822 	sugov_tunables_free(tunables);
823 
824 stop_kthread:
825 	sugov_kthread_stop(sg_policy);
826 	mutex_unlock(&global_tunables_lock);
827 
828 free_sg_policy:
829 	sugov_policy_free(sg_policy);
830 
831 disable_fast_switch:
832 	cpufreq_disable_fast_switch(policy);
833 
834 	pr_err("initialization failed (error %d)\n", ret);
835 	return ret;
836 }
837 
838 static void sugov_exit(struct cpufreq_policy *policy)
839 {
840 	struct sugov_policy *sg_policy = policy->governor_data;
841 	struct sugov_tunables *tunables = sg_policy->tunables;
842 	unsigned int count;
843 
844 	mutex_lock(&global_tunables_lock);
845 
846 	count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
847 	policy->governor_data = NULL;
848 	if (!count)
849 		sugov_tunables_free(tunables);
850 
851 	mutex_unlock(&global_tunables_lock);
852 
853 	sugov_kthread_stop(sg_policy);
854 	sugov_policy_free(sg_policy);
855 	cpufreq_disable_fast_switch(policy);
856 }
857 
858 static int sugov_start(struct cpufreq_policy *policy)
859 {
860 	struct sugov_policy *sg_policy = policy->governor_data;
861 	void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
862 	unsigned int cpu;
863 
864 	sg_policy->freq_update_delay_ns	= sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
865 	sg_policy->last_freq_update_time	= 0;
866 	sg_policy->next_freq			= 0;
867 	sg_policy->work_in_progress		= false;
868 	sg_policy->limits_changed		= false;
869 	sg_policy->cached_raw_freq		= 0;
870 
871 	sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
872 
873 	for_each_cpu(cpu, policy->cpus) {
874 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
875 
876 		memset(sg_cpu, 0, sizeof(*sg_cpu));
877 		sg_cpu->cpu			= cpu;
878 		sg_cpu->sg_policy		= sg_policy;
879 	}
880 
881 	if (policy_is_shared(policy))
882 		uu = sugov_update_shared;
883 	else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
884 		uu = sugov_update_single_perf;
885 	else
886 		uu = sugov_update_single_freq;
887 
888 	for_each_cpu(cpu, policy->cpus) {
889 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
890 
891 		cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
892 	}
893 	return 0;
894 }
895 
896 static void sugov_stop(struct cpufreq_policy *policy)
897 {
898 	struct sugov_policy *sg_policy = policy->governor_data;
899 	unsigned int cpu;
900 
901 	for_each_cpu(cpu, policy->cpus)
902 		cpufreq_remove_update_util_hook(cpu);
903 
904 	synchronize_rcu();
905 
906 	if (!policy->fast_switch_enabled) {
907 		irq_work_sync(&sg_policy->irq_work);
908 		kthread_cancel_work_sync(&sg_policy->work);
909 	}
910 }
911 
912 static void sugov_limits(struct cpufreq_policy *policy)
913 {
914 	struct sugov_policy *sg_policy = policy->governor_data;
915 
916 	if (!policy->fast_switch_enabled) {
917 		mutex_lock(&sg_policy->work_lock);
918 		cpufreq_policy_apply_limits(policy);
919 		mutex_unlock(&sg_policy->work_lock);
920 	}
921 
922 	sg_policy->limits_changed = true;
923 }
924 
925 struct cpufreq_governor schedutil_gov = {
926 	.name			= "schedutil",
927 	.owner			= THIS_MODULE,
928 	.flags			= CPUFREQ_GOV_DYNAMIC_SWITCHING,
929 	.init			= sugov_init,
930 	.exit			= sugov_exit,
931 	.start			= sugov_start,
932 	.stop			= sugov_stop,
933 	.limits			= sugov_limits,
934 };
935 
936 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
937 struct cpufreq_governor *cpufreq_default_governor(void)
938 {
939 	return &schedutil_gov;
940 }
941 #endif
942 
943 cpufreq_governor_init(schedutil_gov);
944 
945 #ifdef CONFIG_ENERGY_MODEL
946 static void rebuild_sd_workfn(struct work_struct *work)
947 {
948 	rebuild_sched_domains_energy();
949 }
950 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
951 
952 /*
953  * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
954  * on governor changes to make sure the scheduler knows about it.
955  */
956 void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
957 				  struct cpufreq_governor *old_gov)
958 {
959 	if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
960 		/*
961 		 * When called from the cpufreq_register_driver() path, the
962 		 * cpu_hotplug_lock is already held, so use a work item to
963 		 * avoid nested locking in rebuild_sched_domains().
964 		 */
965 		schedule_work(&rebuild_sd_work);
966 	}
967 
968 }
969 #endif
970