1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * CPUFreq governor based on scheduler-provided CPU utilization data.
4  *
5  * Copyright (C) 2016, Intel Corporation
6  * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include "sched.h"
12 
13 #include <linux/sched/cpufreq.h>
14 #include <trace/events/power.h>
15 
16 #define IOWAIT_BOOST_MIN	(SCHED_CAPACITY_SCALE / 8)
17 
18 struct sugov_tunables {
19 	struct gov_attr_set	attr_set;
20 	unsigned int		rate_limit_us;
21 };
22 
23 struct sugov_policy {
24 	struct cpufreq_policy	*policy;
25 
26 	struct sugov_tunables	*tunables;
27 	struct list_head	tunables_hook;
28 
29 	raw_spinlock_t		update_lock;	/* For shared policies */
30 	u64			last_freq_update_time;
31 	s64			freq_update_delay_ns;
32 	unsigned int		next_freq;
33 	unsigned int		cached_raw_freq;
34 
35 	/* The next fields are only needed if fast switch cannot be used: */
36 	struct			irq_work irq_work;
37 	struct			kthread_work work;
38 	struct			mutex work_lock;
39 	struct			kthread_worker worker;
40 	struct task_struct	*thread;
41 	bool			work_in_progress;
42 
43 	bool			need_freq_update;
44 };
45 
46 struct sugov_cpu {
47 	struct update_util_data	update_util;
48 	struct sugov_policy	*sg_policy;
49 	unsigned int		cpu;
50 
51 	bool			iowait_boost_pending;
52 	unsigned int		iowait_boost;
53 	u64			last_update;
54 
55 	unsigned long		bw_dl;
56 	unsigned long		max;
57 
58 	/* The field below is for single-CPU policies only: */
59 #ifdef CONFIG_NO_HZ_COMMON
60 	unsigned long		saved_idle_calls;
61 #endif
62 };
63 
64 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
65 
66 /************************ Governor internals ***********************/
67 
68 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
69 {
70 	s64 delta_ns;
71 
72 	/*
73 	 * Since cpufreq_update_util() is called with rq->lock held for
74 	 * the @target_cpu, our per-CPU data is fully serialized.
75 	 *
76 	 * However, drivers cannot in general deal with cross-CPU
77 	 * requests, so while get_next_freq() will work, our
78 	 * sugov_update_commit() call may not for the fast switching platforms.
79 	 *
80 	 * Hence stop here for remote requests if they aren't supported
81 	 * by the hardware, as calculating the frequency is pointless if
82 	 * we cannot in fact act on it.
83 	 *
84 	 * For the slow switching platforms, the kthread is always scheduled on
85 	 * the right set of CPUs and any CPU can find the next frequency and
86 	 * schedule the kthread.
87 	 */
88 	if (sg_policy->policy->fast_switch_enabled &&
89 	    !cpufreq_this_cpu_can_update(sg_policy->policy))
90 		return false;
91 
92 	if (unlikely(sg_policy->need_freq_update))
93 		return true;
94 
95 	delta_ns = time - sg_policy->last_freq_update_time;
96 
97 	return delta_ns >= sg_policy->freq_update_delay_ns;
98 }
99 
100 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
101 				   unsigned int next_freq)
102 {
103 	if (sg_policy->next_freq == next_freq)
104 		return false;
105 
106 	sg_policy->next_freq = next_freq;
107 	sg_policy->last_freq_update_time = time;
108 
109 	return true;
110 }
111 
112 static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
113 			      unsigned int next_freq)
114 {
115 	struct cpufreq_policy *policy = sg_policy->policy;
116 
117 	if (!sugov_update_next_freq(sg_policy, time, next_freq))
118 		return;
119 
120 	next_freq = cpufreq_driver_fast_switch(policy, next_freq);
121 	if (!next_freq)
122 		return;
123 
124 	policy->cur = next_freq;
125 	trace_cpu_frequency(next_freq, smp_processor_id());
126 }
127 
128 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
129 				  unsigned int next_freq)
130 {
131 	if (!sugov_update_next_freq(sg_policy, time, next_freq))
132 		return;
133 
134 	if (!sg_policy->work_in_progress) {
135 		sg_policy->work_in_progress = true;
136 		irq_work_queue(&sg_policy->irq_work);
137 	}
138 }
139 
140 /**
141  * get_next_freq - Compute a new frequency for a given cpufreq policy.
142  * @sg_policy: schedutil policy object to compute the new frequency for.
143  * @util: Current CPU utilization.
144  * @max: CPU capacity.
145  *
146  * If the utilization is frequency-invariant, choose the new frequency to be
147  * proportional to it, that is
148  *
149  * next_freq = C * max_freq * util / max
150  *
151  * Otherwise, approximate the would-be frequency-invariant utilization by
152  * util_raw * (curr_freq / max_freq) which leads to
153  *
154  * next_freq = C * curr_freq * util_raw / max
155  *
156  * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
157  *
158  * The lowest driver-supported frequency which is equal or greater than the raw
159  * next_freq (as calculated above) is returned, subject to policy min/max and
160  * cpufreq driver limitations.
161  */
162 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
163 				  unsigned long util, unsigned long max)
164 {
165 	struct cpufreq_policy *policy = sg_policy->policy;
166 	unsigned int freq = arch_scale_freq_invariant() ?
167 				policy->cpuinfo.max_freq : policy->cur;
168 
169 	freq = map_util_freq(util, freq, max);
170 
171 	if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
172 		return sg_policy->next_freq;
173 
174 	sg_policy->need_freq_update = false;
175 	sg_policy->cached_raw_freq = freq;
176 	return cpufreq_driver_resolve_freq(policy, freq);
177 }
178 
179 /*
180  * This function computes an effective utilization for the given CPU, to be
181  * used for frequency selection given the linear relation: f = u * f_max.
182  *
183  * The scheduler tracks the following metrics:
184  *
185  *   cpu_util_{cfs,rt,dl,irq}()
186  *   cpu_bw_dl()
187  *
188  * Where the cfs,rt and dl util numbers are tracked with the same metric and
189  * synchronized windows and are thus directly comparable.
190  *
191  * The cfs,rt,dl utilization are the running times measured with rq->clock_task
192  * which excludes things like IRQ and steal-time. These latter are then accrued
193  * in the irq utilization.
194  *
195  * The DL bandwidth number otoh is not a measured metric but a value computed
196  * based on the task model parameters and gives the minimal utilization
197  * required to meet deadlines.
198  */
199 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
200 				 unsigned long max, enum schedutil_type type,
201 				 struct task_struct *p)
202 {
203 	unsigned long dl_util, util, irq;
204 	struct rq *rq = cpu_rq(cpu);
205 
206 	if (!IS_BUILTIN(CONFIG_UCLAMP_TASK) &&
207 	    type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
208 		return max;
209 	}
210 
211 	/*
212 	 * Early check to see if IRQ/steal time saturates the CPU, can be
213 	 * because of inaccuracies in how we track these -- see
214 	 * update_irq_load_avg().
215 	 */
216 	irq = cpu_util_irq(rq);
217 	if (unlikely(irq >= max))
218 		return max;
219 
220 	/*
221 	 * Because the time spend on RT/DL tasks is visible as 'lost' time to
222 	 * CFS tasks and we use the same metric to track the effective
223 	 * utilization (PELT windows are synchronized) we can directly add them
224 	 * to obtain the CPU's actual utilization.
225 	 *
226 	 * CFS and RT utilization can be boosted or capped, depending on
227 	 * utilization clamp constraints requested by currently RUNNABLE
228 	 * tasks.
229 	 * When there are no CFS RUNNABLE tasks, clamps are released and
230 	 * frequency will be gracefully reduced with the utilization decay.
231 	 */
232 	util = util_cfs + cpu_util_rt(rq);
233 	if (type == FREQUENCY_UTIL)
234 		util = uclamp_util_with(rq, util, p);
235 
236 	dl_util = cpu_util_dl(rq);
237 
238 	/*
239 	 * For frequency selection we do not make cpu_util_dl() a permanent part
240 	 * of this sum because we want to use cpu_bw_dl() later on, but we need
241 	 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
242 	 * that we select f_max when there is no idle time.
243 	 *
244 	 * NOTE: numerical errors or stop class might cause us to not quite hit
245 	 * saturation when we should -- something for later.
246 	 */
247 	if (util + dl_util >= max)
248 		return max;
249 
250 	/*
251 	 * OTOH, for energy computation we need the estimated running time, so
252 	 * include util_dl and ignore dl_bw.
253 	 */
254 	if (type == ENERGY_UTIL)
255 		util += dl_util;
256 
257 	/*
258 	 * There is still idle time; further improve the number by using the
259 	 * irq metric. Because IRQ/steal time is hidden from the task clock we
260 	 * need to scale the task numbers:
261 	 *
262 	 *              1 - irq
263 	 *   U' = irq + ------- * U
264 	 *                max
265 	 */
266 	util = scale_irq_capacity(util, irq, max);
267 	util += irq;
268 
269 	/*
270 	 * Bandwidth required by DEADLINE must always be granted while, for
271 	 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
272 	 * to gracefully reduce the frequency when no tasks show up for longer
273 	 * periods of time.
274 	 *
275 	 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
276 	 * bw_dl as requested freq. However, cpufreq is not yet ready for such
277 	 * an interface. So, we only do the latter for now.
278 	 */
279 	if (type == FREQUENCY_UTIL)
280 		util += cpu_bw_dl(rq);
281 
282 	return min(max, util);
283 }
284 
285 static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
286 {
287 	struct rq *rq = cpu_rq(sg_cpu->cpu);
288 	unsigned long util = cpu_util_cfs(rq);
289 	unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
290 
291 	sg_cpu->max = max;
292 	sg_cpu->bw_dl = cpu_bw_dl(rq);
293 
294 	return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
295 }
296 
297 /**
298  * sugov_iowait_reset() - Reset the IO boost status of a CPU.
299  * @sg_cpu: the sugov data for the CPU to boost
300  * @time: the update time from the caller
301  * @set_iowait_boost: true if an IO boost has been requested
302  *
303  * The IO wait boost of a task is disabled after a tick since the last update
304  * of a CPU. If a new IO wait boost is requested after more then a tick, then
305  * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
306  * efficiency by ignoring sporadic wakeups from IO.
307  */
308 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
309 			       bool set_iowait_boost)
310 {
311 	s64 delta_ns = time - sg_cpu->last_update;
312 
313 	/* Reset boost only if a tick has elapsed since last request */
314 	if (delta_ns <= TICK_NSEC)
315 		return false;
316 
317 	sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
318 	sg_cpu->iowait_boost_pending = set_iowait_boost;
319 
320 	return true;
321 }
322 
323 /**
324  * sugov_iowait_boost() - Updates the IO boost status of a CPU.
325  * @sg_cpu: the sugov data for the CPU to boost
326  * @time: the update time from the caller
327  * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
328  *
329  * Each time a task wakes up after an IO operation, the CPU utilization can be
330  * boosted to a certain utilization which doubles at each "frequent and
331  * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
332  * of the maximum OPP.
333  *
334  * To keep doubling, an IO boost has to be requested at least once per tick,
335  * otherwise we restart from the utilization of the minimum OPP.
336  */
337 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
338 			       unsigned int flags)
339 {
340 	bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
341 
342 	/* Reset boost if the CPU appears to have been idle enough */
343 	if (sg_cpu->iowait_boost &&
344 	    sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
345 		return;
346 
347 	/* Boost only tasks waking up after IO */
348 	if (!set_iowait_boost)
349 		return;
350 
351 	/* Ensure boost doubles only one time at each request */
352 	if (sg_cpu->iowait_boost_pending)
353 		return;
354 	sg_cpu->iowait_boost_pending = true;
355 
356 	/* Double the boost at each request */
357 	if (sg_cpu->iowait_boost) {
358 		sg_cpu->iowait_boost =
359 			min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
360 		return;
361 	}
362 
363 	/* First wakeup after IO: start with minimum boost */
364 	sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
365 }
366 
367 /**
368  * sugov_iowait_apply() - Apply the IO boost to a CPU.
369  * @sg_cpu: the sugov data for the cpu to boost
370  * @time: the update time from the caller
371  * @util: the utilization to (eventually) boost
372  * @max: the maximum value the utilization can be boosted to
373  *
374  * A CPU running a task which woken up after an IO operation can have its
375  * utilization boosted to speed up the completion of those IO operations.
376  * The IO boost value is increased each time a task wakes up from IO, in
377  * sugov_iowait_apply(), and it's instead decreased by this function,
378  * each time an increase has not been requested (!iowait_boost_pending).
379  *
380  * A CPU which also appears to have been idle for at least one tick has also
381  * its IO boost utilization reset.
382  *
383  * This mechanism is designed to boost high frequently IO waiting tasks, while
384  * being more conservative on tasks which does sporadic IO operations.
385  */
386 static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
387 					unsigned long util, unsigned long max)
388 {
389 	unsigned long boost;
390 
391 	/* No boost currently required */
392 	if (!sg_cpu->iowait_boost)
393 		return util;
394 
395 	/* Reset boost if the CPU appears to have been idle enough */
396 	if (sugov_iowait_reset(sg_cpu, time, false))
397 		return util;
398 
399 	if (!sg_cpu->iowait_boost_pending) {
400 		/*
401 		 * No boost pending; reduce the boost value.
402 		 */
403 		sg_cpu->iowait_boost >>= 1;
404 		if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
405 			sg_cpu->iowait_boost = 0;
406 			return util;
407 		}
408 	}
409 
410 	sg_cpu->iowait_boost_pending = false;
411 
412 	/*
413 	 * @util is already in capacity scale; convert iowait_boost
414 	 * into the same scale so we can compare.
415 	 */
416 	boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
417 	return max(boost, util);
418 }
419 
420 #ifdef CONFIG_NO_HZ_COMMON
421 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
422 {
423 	unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
424 	bool ret = idle_calls == sg_cpu->saved_idle_calls;
425 
426 	sg_cpu->saved_idle_calls = idle_calls;
427 	return ret;
428 }
429 #else
430 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
431 #endif /* CONFIG_NO_HZ_COMMON */
432 
433 /*
434  * Make sugov_should_update_freq() ignore the rate limit when DL
435  * has increased the utilization.
436  */
437 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
438 {
439 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
440 		sg_policy->need_freq_update = true;
441 }
442 
443 static void sugov_update_single(struct update_util_data *hook, u64 time,
444 				unsigned int flags)
445 {
446 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
447 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
448 	unsigned long util, max;
449 	unsigned int next_f;
450 	bool busy;
451 
452 	sugov_iowait_boost(sg_cpu, time, flags);
453 	sg_cpu->last_update = time;
454 
455 	ignore_dl_rate_limit(sg_cpu, sg_policy);
456 
457 	if (!sugov_should_update_freq(sg_policy, time))
458 		return;
459 
460 	busy = sugov_cpu_is_busy(sg_cpu);
461 
462 	util = sugov_get_util(sg_cpu);
463 	max = sg_cpu->max;
464 	util = sugov_iowait_apply(sg_cpu, time, util, max);
465 	next_f = get_next_freq(sg_policy, util, max);
466 	/*
467 	 * Do not reduce the frequency if the CPU has not been idle
468 	 * recently, as the reduction is likely to be premature then.
469 	 */
470 	if (busy && next_f < sg_policy->next_freq) {
471 		next_f = sg_policy->next_freq;
472 
473 		/* Reset cached freq as next_freq has changed */
474 		sg_policy->cached_raw_freq = 0;
475 	}
476 
477 	/*
478 	 * This code runs under rq->lock for the target CPU, so it won't run
479 	 * concurrently on two different CPUs for the same target and it is not
480 	 * necessary to acquire the lock in the fast switch case.
481 	 */
482 	if (sg_policy->policy->fast_switch_enabled) {
483 		sugov_fast_switch(sg_policy, time, next_f);
484 	} else {
485 		raw_spin_lock(&sg_policy->update_lock);
486 		sugov_deferred_update(sg_policy, time, next_f);
487 		raw_spin_unlock(&sg_policy->update_lock);
488 	}
489 }
490 
491 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
492 {
493 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
494 	struct cpufreq_policy *policy = sg_policy->policy;
495 	unsigned long util = 0, max = 1;
496 	unsigned int j;
497 
498 	for_each_cpu(j, policy->cpus) {
499 		struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
500 		unsigned long j_util, j_max;
501 
502 		j_util = sugov_get_util(j_sg_cpu);
503 		j_max = j_sg_cpu->max;
504 		j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
505 
506 		if (j_util * max > j_max * util) {
507 			util = j_util;
508 			max = j_max;
509 		}
510 	}
511 
512 	return get_next_freq(sg_policy, util, max);
513 }
514 
515 static void
516 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
517 {
518 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
519 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
520 	unsigned int next_f;
521 
522 	raw_spin_lock(&sg_policy->update_lock);
523 
524 	sugov_iowait_boost(sg_cpu, time, flags);
525 	sg_cpu->last_update = time;
526 
527 	ignore_dl_rate_limit(sg_cpu, sg_policy);
528 
529 	if (sugov_should_update_freq(sg_policy, time)) {
530 		next_f = sugov_next_freq_shared(sg_cpu, time);
531 
532 		if (sg_policy->policy->fast_switch_enabled)
533 			sugov_fast_switch(sg_policy, time, next_f);
534 		else
535 			sugov_deferred_update(sg_policy, time, next_f);
536 	}
537 
538 	raw_spin_unlock(&sg_policy->update_lock);
539 }
540 
541 static void sugov_work(struct kthread_work *work)
542 {
543 	struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
544 	unsigned int freq;
545 	unsigned long flags;
546 
547 	/*
548 	 * Hold sg_policy->update_lock shortly to handle the case where:
549 	 * incase sg_policy->next_freq is read here, and then updated by
550 	 * sugov_deferred_update() just before work_in_progress is set to false
551 	 * here, we may miss queueing the new update.
552 	 *
553 	 * Note: If a work was queued after the update_lock is released,
554 	 * sugov_work() will just be called again by kthread_work code; and the
555 	 * request will be proceed before the sugov thread sleeps.
556 	 */
557 	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
558 	freq = sg_policy->next_freq;
559 	sg_policy->work_in_progress = false;
560 	raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
561 
562 	mutex_lock(&sg_policy->work_lock);
563 	__cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
564 	mutex_unlock(&sg_policy->work_lock);
565 }
566 
567 static void sugov_irq_work(struct irq_work *irq_work)
568 {
569 	struct sugov_policy *sg_policy;
570 
571 	sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
572 
573 	kthread_queue_work(&sg_policy->worker, &sg_policy->work);
574 }
575 
576 /************************** sysfs interface ************************/
577 
578 static struct sugov_tunables *global_tunables;
579 static DEFINE_MUTEX(global_tunables_lock);
580 
581 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
582 {
583 	return container_of(attr_set, struct sugov_tunables, attr_set);
584 }
585 
586 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
587 {
588 	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
589 
590 	return sprintf(buf, "%u\n", tunables->rate_limit_us);
591 }
592 
593 static ssize_t
594 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
595 {
596 	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
597 	struct sugov_policy *sg_policy;
598 	unsigned int rate_limit_us;
599 
600 	if (kstrtouint(buf, 10, &rate_limit_us))
601 		return -EINVAL;
602 
603 	tunables->rate_limit_us = rate_limit_us;
604 
605 	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
606 		sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
607 
608 	return count;
609 }
610 
611 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
612 
613 static struct attribute *sugov_attrs[] = {
614 	&rate_limit_us.attr,
615 	NULL
616 };
617 ATTRIBUTE_GROUPS(sugov);
618 
619 static struct kobj_type sugov_tunables_ktype = {
620 	.default_groups = sugov_groups,
621 	.sysfs_ops = &governor_sysfs_ops,
622 };
623 
624 /********************** cpufreq governor interface *********************/
625 
626 struct cpufreq_governor schedutil_gov;
627 
628 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
629 {
630 	struct sugov_policy *sg_policy;
631 
632 	sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
633 	if (!sg_policy)
634 		return NULL;
635 
636 	sg_policy->policy = policy;
637 	raw_spin_lock_init(&sg_policy->update_lock);
638 	return sg_policy;
639 }
640 
641 static void sugov_policy_free(struct sugov_policy *sg_policy)
642 {
643 	kfree(sg_policy);
644 }
645 
646 static int sugov_kthread_create(struct sugov_policy *sg_policy)
647 {
648 	struct task_struct *thread;
649 	struct sched_attr attr = {
650 		.size		= sizeof(struct sched_attr),
651 		.sched_policy	= SCHED_DEADLINE,
652 		.sched_flags	= SCHED_FLAG_SUGOV,
653 		.sched_nice	= 0,
654 		.sched_priority	= 0,
655 		/*
656 		 * Fake (unused) bandwidth; workaround to "fix"
657 		 * priority inheritance.
658 		 */
659 		.sched_runtime	=  1000000,
660 		.sched_deadline = 10000000,
661 		.sched_period	= 10000000,
662 	};
663 	struct cpufreq_policy *policy = sg_policy->policy;
664 	int ret;
665 
666 	/* kthread only required for slow path */
667 	if (policy->fast_switch_enabled)
668 		return 0;
669 
670 	kthread_init_work(&sg_policy->work, sugov_work);
671 	kthread_init_worker(&sg_policy->worker);
672 	thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
673 				"sugov:%d",
674 				cpumask_first(policy->related_cpus));
675 	if (IS_ERR(thread)) {
676 		pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
677 		return PTR_ERR(thread);
678 	}
679 
680 	ret = sched_setattr_nocheck(thread, &attr);
681 	if (ret) {
682 		kthread_stop(thread);
683 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
684 		return ret;
685 	}
686 
687 	sg_policy->thread = thread;
688 	kthread_bind_mask(thread, policy->related_cpus);
689 	init_irq_work(&sg_policy->irq_work, sugov_irq_work);
690 	mutex_init(&sg_policy->work_lock);
691 
692 	wake_up_process(thread);
693 
694 	return 0;
695 }
696 
697 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
698 {
699 	/* kthread only required for slow path */
700 	if (sg_policy->policy->fast_switch_enabled)
701 		return;
702 
703 	kthread_flush_worker(&sg_policy->worker);
704 	kthread_stop(sg_policy->thread);
705 	mutex_destroy(&sg_policy->work_lock);
706 }
707 
708 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
709 {
710 	struct sugov_tunables *tunables;
711 
712 	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
713 	if (tunables) {
714 		gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
715 		if (!have_governor_per_policy())
716 			global_tunables = tunables;
717 	}
718 	return tunables;
719 }
720 
721 static void sugov_tunables_free(struct sugov_tunables *tunables)
722 {
723 	if (!have_governor_per_policy())
724 		global_tunables = NULL;
725 
726 	kfree(tunables);
727 }
728 
729 static int sugov_init(struct cpufreq_policy *policy)
730 {
731 	struct sugov_policy *sg_policy;
732 	struct sugov_tunables *tunables;
733 	int ret = 0;
734 
735 	/* State should be equivalent to EXIT */
736 	if (policy->governor_data)
737 		return -EBUSY;
738 
739 	cpufreq_enable_fast_switch(policy);
740 
741 	sg_policy = sugov_policy_alloc(policy);
742 	if (!sg_policy) {
743 		ret = -ENOMEM;
744 		goto disable_fast_switch;
745 	}
746 
747 	ret = sugov_kthread_create(sg_policy);
748 	if (ret)
749 		goto free_sg_policy;
750 
751 	mutex_lock(&global_tunables_lock);
752 
753 	if (global_tunables) {
754 		if (WARN_ON(have_governor_per_policy())) {
755 			ret = -EINVAL;
756 			goto stop_kthread;
757 		}
758 		policy->governor_data = sg_policy;
759 		sg_policy->tunables = global_tunables;
760 
761 		gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
762 		goto out;
763 	}
764 
765 	tunables = sugov_tunables_alloc(sg_policy);
766 	if (!tunables) {
767 		ret = -ENOMEM;
768 		goto stop_kthread;
769 	}
770 
771 	tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
772 
773 	policy->governor_data = sg_policy;
774 	sg_policy->tunables = tunables;
775 
776 	ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
777 				   get_governor_parent_kobj(policy), "%s",
778 				   schedutil_gov.name);
779 	if (ret)
780 		goto fail;
781 
782 out:
783 	mutex_unlock(&global_tunables_lock);
784 	return 0;
785 
786 fail:
787 	kobject_put(&tunables->attr_set.kobj);
788 	policy->governor_data = NULL;
789 	sugov_tunables_free(tunables);
790 
791 stop_kthread:
792 	sugov_kthread_stop(sg_policy);
793 	mutex_unlock(&global_tunables_lock);
794 
795 free_sg_policy:
796 	sugov_policy_free(sg_policy);
797 
798 disable_fast_switch:
799 	cpufreq_disable_fast_switch(policy);
800 
801 	pr_err("initialization failed (error %d)\n", ret);
802 	return ret;
803 }
804 
805 static void sugov_exit(struct cpufreq_policy *policy)
806 {
807 	struct sugov_policy *sg_policy = policy->governor_data;
808 	struct sugov_tunables *tunables = sg_policy->tunables;
809 	unsigned int count;
810 
811 	mutex_lock(&global_tunables_lock);
812 
813 	count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
814 	policy->governor_data = NULL;
815 	if (!count)
816 		sugov_tunables_free(tunables);
817 
818 	mutex_unlock(&global_tunables_lock);
819 
820 	sugov_kthread_stop(sg_policy);
821 	sugov_policy_free(sg_policy);
822 	cpufreq_disable_fast_switch(policy);
823 }
824 
825 static int sugov_start(struct cpufreq_policy *policy)
826 {
827 	struct sugov_policy *sg_policy = policy->governor_data;
828 	unsigned int cpu;
829 
830 	sg_policy->freq_update_delay_ns	= sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
831 	sg_policy->last_freq_update_time	= 0;
832 	sg_policy->next_freq			= 0;
833 	sg_policy->work_in_progress		= false;
834 	sg_policy->need_freq_update		= false;
835 	sg_policy->cached_raw_freq		= 0;
836 
837 	for_each_cpu(cpu, policy->cpus) {
838 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
839 
840 		memset(sg_cpu, 0, sizeof(*sg_cpu));
841 		sg_cpu->cpu			= cpu;
842 		sg_cpu->sg_policy		= sg_policy;
843 	}
844 
845 	for_each_cpu(cpu, policy->cpus) {
846 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
847 
848 		cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
849 					     policy_is_shared(policy) ?
850 							sugov_update_shared :
851 							sugov_update_single);
852 	}
853 	return 0;
854 }
855 
856 static void sugov_stop(struct cpufreq_policy *policy)
857 {
858 	struct sugov_policy *sg_policy = policy->governor_data;
859 	unsigned int cpu;
860 
861 	for_each_cpu(cpu, policy->cpus)
862 		cpufreq_remove_update_util_hook(cpu);
863 
864 	synchronize_rcu();
865 
866 	if (!policy->fast_switch_enabled) {
867 		irq_work_sync(&sg_policy->irq_work);
868 		kthread_cancel_work_sync(&sg_policy->work);
869 	}
870 }
871 
872 static void sugov_limits(struct cpufreq_policy *policy)
873 {
874 	struct sugov_policy *sg_policy = policy->governor_data;
875 
876 	if (!policy->fast_switch_enabled) {
877 		mutex_lock(&sg_policy->work_lock);
878 		cpufreq_policy_apply_limits(policy);
879 		mutex_unlock(&sg_policy->work_lock);
880 	}
881 
882 	sg_policy->need_freq_update = true;
883 }
884 
885 struct cpufreq_governor schedutil_gov = {
886 	.name			= "schedutil",
887 	.owner			= THIS_MODULE,
888 	.dynamic_switching	= true,
889 	.init			= sugov_init,
890 	.exit			= sugov_exit,
891 	.start			= sugov_start,
892 	.stop			= sugov_stop,
893 	.limits			= sugov_limits,
894 };
895 
896 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
897 struct cpufreq_governor *cpufreq_default_governor(void)
898 {
899 	return &schedutil_gov;
900 }
901 #endif
902 
903 static int __init sugov_register(void)
904 {
905 	return cpufreq_register_governor(&schedutil_gov);
906 }
907 fs_initcall(sugov_register);
908 
909 #ifdef CONFIG_ENERGY_MODEL
910 extern bool sched_energy_update;
911 extern struct mutex sched_energy_mutex;
912 
913 static void rebuild_sd_workfn(struct work_struct *work)
914 {
915 	mutex_lock(&sched_energy_mutex);
916 	sched_energy_update = true;
917 	rebuild_sched_domains();
918 	sched_energy_update = false;
919 	mutex_unlock(&sched_energy_mutex);
920 }
921 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
922 
923 /*
924  * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
925  * on governor changes to make sure the scheduler knows about it.
926  */
927 void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
928 				  struct cpufreq_governor *old_gov)
929 {
930 	if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
931 		/*
932 		 * When called from the cpufreq_register_driver() path, the
933 		 * cpu_hotplug_lock is already held, so use a work item to
934 		 * avoid nested locking in rebuild_sched_domains().
935 		 */
936 		schedule_work(&rebuild_sd_work);
937 	}
938 
939 }
940 #endif
941