1108c35a9SDaniel Lezcano // SPDX-License-Identifier: GPL-2.0
29bdcb44eSRafael J. Wysocki /*
39bdcb44eSRafael J. Wysocki * CPUFreq governor based on scheduler-provided CPU utilization data.
49bdcb44eSRafael J. Wysocki *
59bdcb44eSRafael J. Wysocki * Copyright (C) 2016, Intel Corporation
69bdcb44eSRafael J. Wysocki * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
79bdcb44eSRafael J. Wysocki */
89bdcb44eSRafael J. Wysocki
99eca544bSRafael J. Wysocki #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
109eca544bSRafael J. Wysocki
119bdcb44eSRafael J. Wysocki struct sugov_tunables {
129bdcb44eSRafael J. Wysocki struct gov_attr_set attr_set;
139bdcb44eSRafael J. Wysocki unsigned int rate_limit_us;
149bdcb44eSRafael J. Wysocki };
159bdcb44eSRafael J. Wysocki
169bdcb44eSRafael J. Wysocki struct sugov_policy {
179bdcb44eSRafael J. Wysocki struct cpufreq_policy *policy;
189bdcb44eSRafael J. Wysocki
199bdcb44eSRafael J. Wysocki struct sugov_tunables *tunables;
209bdcb44eSRafael J. Wysocki struct list_head tunables_hook;
219bdcb44eSRafael J. Wysocki
22e209cb51SYue Hu raw_spinlock_t update_lock;
239bdcb44eSRafael J. Wysocki u64 last_freq_update_time;
249bdcb44eSRafael J. Wysocki s64 freq_update_delay_ns;
259bdcb44eSRafael J. Wysocki unsigned int next_freq;
266c4f0fa6SViresh Kumar unsigned int cached_raw_freq;
279bdcb44eSRafael J. Wysocki
2897fb7a0aSIngo Molnar /* The next fields are only needed if fast switch cannot be used: */
299bdcb44eSRafael J. Wysocki struct irq_work irq_work;
3002a7b1eeSViresh Kumar struct kthread_work work;
319bdcb44eSRafael J. Wysocki struct mutex work_lock;
3202a7b1eeSViresh Kumar struct kthread_worker worker;
3302a7b1eeSViresh Kumar struct task_struct *thread;
349bdcb44eSRafael J. Wysocki bool work_in_progress;
359bdcb44eSRafael J. Wysocki
36600f5badSViresh Kumar bool limits_changed;
379bdcb44eSRafael J. Wysocki bool need_freq_update;
389bdcb44eSRafael J. Wysocki };
399bdcb44eSRafael J. Wysocki
409bdcb44eSRafael J. Wysocki struct sugov_cpu {
419bdcb44eSRafael J. Wysocki struct update_util_data update_util;
429bdcb44eSRafael J. Wysocki struct sugov_policy *sg_policy;
43674e7541SViresh Kumar unsigned int cpu;
449bdcb44eSRafael J. Wysocki
45a5a0809bSJoel Fernandes bool iowait_boost_pending;
46251accf9SJoel Fernandes unsigned int iowait_boost;
4721ca6d2cSRafael J. Wysocki u64 last_update;
485cbea469SSteve Muckle
49ca6827deSRafael J. Wysocki unsigned long util;
508cc90515SVincent Guittot unsigned long bw_dl;
51b7eaf1aaSRafael J. Wysocki
5297fb7a0aSIngo Molnar /* The field below is for single-CPU policies only: */
53b7eaf1aaSRafael J. Wysocki #ifdef CONFIG_NO_HZ_COMMON
54b7eaf1aaSRafael J. Wysocki unsigned long saved_idle_calls;
55b7eaf1aaSRafael J. Wysocki #endif
569bdcb44eSRafael J. Wysocki };
579bdcb44eSRafael J. Wysocki
589bdcb44eSRafael J. Wysocki static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
599bdcb44eSRafael J. Wysocki
609bdcb44eSRafael J. Wysocki /************************ Governor internals ***********************/
619bdcb44eSRafael J. Wysocki
sugov_should_update_freq(struct sugov_policy * sg_policy,u64 time)629bdcb44eSRafael J. Wysocki static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
639bdcb44eSRafael J. Wysocki {
649bdcb44eSRafael J. Wysocki s64 delta_ns;
659bdcb44eSRafael J. Wysocki
66674e7541SViresh Kumar /*
67674e7541SViresh Kumar * Since cpufreq_update_util() is called with rq->lock held for
6897fb7a0aSIngo Molnar * the @target_cpu, our per-CPU data is fully serialized.
69674e7541SViresh Kumar *
7097fb7a0aSIngo Molnar * However, drivers cannot in general deal with cross-CPU
71674e7541SViresh Kumar * requests, so while get_next_freq() will work, our
72c49cbc19SViresh Kumar * sugov_update_commit() call may not for the fast switching platforms.
73674e7541SViresh Kumar *
74674e7541SViresh Kumar * Hence stop here for remote requests if they aren't supported
75674e7541SViresh Kumar * by the hardware, as calculating the frequency is pointless if
76674e7541SViresh Kumar * we cannot in fact act on it.
77c49cbc19SViresh Kumar *
7885572c2cSRafael J. Wysocki * This is needed on the slow switching platforms too to prevent CPUs
7985572c2cSRafael J. Wysocki * going offline from leaving stale IRQ work items behind.
80674e7541SViresh Kumar */
8185572c2cSRafael J. Wysocki if (!cpufreq_this_cpu_can_update(sg_policy->policy))
82674e7541SViresh Kumar return false;
83674e7541SViresh Kumar
84600f5badSViresh Kumar if (unlikely(sg_policy->limits_changed)) {
85600f5badSViresh Kumar sg_policy->limits_changed = false;
86600f5badSViresh Kumar sg_policy->need_freq_update = true;
879bdcb44eSRafael J. Wysocki return true;
88600f5badSViresh Kumar }
899bdcb44eSRafael J. Wysocki
909bdcb44eSRafael J. Wysocki delta_ns = time - sg_policy->last_freq_update_time;
9197fb7a0aSIngo Molnar
929bdcb44eSRafael J. Wysocki return delta_ns >= sg_policy->freq_update_delay_ns;
939bdcb44eSRafael J. Wysocki }
949bdcb44eSRafael J. Wysocki
sugov_update_next_freq(struct sugov_policy * sg_policy,u64 time,unsigned int next_freq)95a61dec74SRafael J. Wysocki static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
969bdcb44eSRafael J. Wysocki unsigned int next_freq)
979bdcb44eSRafael J. Wysocki {
9890ac908aSRafael J. Wysocki if (sg_policy->need_freq_update)
9923a88185SViresh Kumar sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
10090ac908aSRafael J. Wysocki else if (sg_policy->next_freq == next_freq)
10190ac908aSRafael J. Wysocki return false;
10238d4ea22SRafael J. Wysocki
1039bdcb44eSRafael J. Wysocki sg_policy->next_freq = next_freq;
1049bdcb44eSRafael J. Wysocki sg_policy->last_freq_update_time = time;
1059bdcb44eSRafael J. Wysocki
106a61dec74SRafael J. Wysocki return true;
107a61dec74SRafael J. Wysocki }
108a61dec74SRafael J. Wysocki
sugov_deferred_update(struct sugov_policy * sg_policy)109389e4ecfSYue Hu static void sugov_deferred_update(struct sugov_policy *sg_policy)
110a61dec74SRafael J. Wysocki {
111a61dec74SRafael J. Wysocki if (!sg_policy->work_in_progress) {
1129bdcb44eSRafael J. Wysocki sg_policy->work_in_progress = true;
1139bdcb44eSRafael J. Wysocki irq_work_queue(&sg_policy->irq_work);
1149bdcb44eSRafael J. Wysocki }
1159bdcb44eSRafael J. Wysocki }
1169bdcb44eSRafael J. Wysocki
1179bdcb44eSRafael J. Wysocki /**
1189bdcb44eSRafael J. Wysocki * get_next_freq - Compute a new frequency for a given cpufreq policy.
119655cb1ebSViresh Kumar * @sg_policy: schedutil policy object to compute the new frequency for.
1209bdcb44eSRafael J. Wysocki * @util: Current CPU utilization.
1219bdcb44eSRafael J. Wysocki * @max: CPU capacity.
1229bdcb44eSRafael J. Wysocki *
1239bdcb44eSRafael J. Wysocki * If the utilization is frequency-invariant, choose the new frequency to be
1249bdcb44eSRafael J. Wysocki * proportional to it, that is
1259bdcb44eSRafael J. Wysocki *
1269bdcb44eSRafael J. Wysocki * next_freq = C * max_freq * util / max
1279bdcb44eSRafael J. Wysocki *
1289bdcb44eSRafael J. Wysocki * Otherwise, approximate the would-be frequency-invariant utilization by
1299bdcb44eSRafael J. Wysocki * util_raw * (curr_freq / max_freq) which leads to
1309bdcb44eSRafael J. Wysocki *
1319bdcb44eSRafael J. Wysocki * next_freq = C * curr_freq * util_raw / max
1329bdcb44eSRafael J. Wysocki *
1339bdcb44eSRafael J. Wysocki * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
1345cbea469SSteve Muckle *
1355cbea469SSteve Muckle * The lowest driver-supported frequency which is equal or greater than the raw
1365cbea469SSteve Muckle * next_freq (as calculated above) is returned, subject to policy min/max and
1375cbea469SSteve Muckle * cpufreq driver limitations.
1389bdcb44eSRafael J. Wysocki */
get_next_freq(struct sugov_policy * sg_policy,unsigned long util,unsigned long max)139655cb1ebSViresh Kumar static unsigned int get_next_freq(struct sugov_policy *sg_policy,
140655cb1ebSViresh Kumar unsigned long util, unsigned long max)
1419bdcb44eSRafael J. Wysocki {
1425cbea469SSteve Muckle struct cpufreq_policy *policy = sg_policy->policy;
1439bdcb44eSRafael J. Wysocki unsigned int freq = arch_scale_freq_invariant() ?
1449bdcb44eSRafael J. Wysocki policy->cpuinfo.max_freq : policy->cur;
1459bdcb44eSRafael J. Wysocki
1468f1b971bSLukasz Luba util = map_util_perf(util);
147938e5e4bSQuentin Perret freq = map_util_freq(util, freq, max);
1485cbea469SSteve Muckle
149ecd28842SViresh Kumar if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
1505cbea469SSteve Muckle return sg_policy->next_freq;
151ecd28842SViresh Kumar
1526c4f0fa6SViresh Kumar sg_policy->cached_raw_freq = freq;
1535cbea469SSteve Muckle return cpufreq_driver_resolve_freq(policy, freq);
1549bdcb44eSRafael J. Wysocki }
1559bdcb44eSRafael J. Wysocki
sugov_get_util(struct sugov_cpu * sg_cpu)156ca6827deSRafael J. Wysocki static void sugov_get_util(struct sugov_cpu *sg_cpu)
157938e5e4bSQuentin Perret {
1587d0583cfSDietmar Eggemann unsigned long util = cpu_util_cfs_boost(sg_cpu->cpu);
159938e5e4bSQuentin Perret struct rq *rq = cpu_rq(sg_cpu->cpu);
160938e5e4bSQuentin Perret
161938e5e4bSQuentin Perret sg_cpu->bw_dl = cpu_bw_dl(rq);
1627d0583cfSDietmar Eggemann sg_cpu->util = effective_cpu_util(sg_cpu->cpu, util,
163ca6827deSRafael J. Wysocki FREQUENCY_UTIL, NULL);
16458919e83SRafael J. Wysocki }
16558919e83SRafael J. Wysocki
166fd7d5287SPatrick Bellasi /**
167fd7d5287SPatrick Bellasi * sugov_iowait_reset() - Reset the IO boost status of a CPU.
168fd7d5287SPatrick Bellasi * @sg_cpu: the sugov data for the CPU to boost
169fd7d5287SPatrick Bellasi * @time: the update time from the caller
170fd7d5287SPatrick Bellasi * @set_iowait_boost: true if an IO boost has been requested
171fd7d5287SPatrick Bellasi *
172fd7d5287SPatrick Bellasi * The IO wait boost of a task is disabled after a tick since the last update
173fd7d5287SPatrick Bellasi * of a CPU. If a new IO wait boost is requested after more then a tick, then
1749eca544bSRafael J. Wysocki * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
1759eca544bSRafael J. Wysocki * efficiency by ignoring sporadic wakeups from IO.
176fd7d5287SPatrick Bellasi */
sugov_iowait_reset(struct sugov_cpu * sg_cpu,u64 time,bool set_iowait_boost)177fd7d5287SPatrick Bellasi static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
178fd7d5287SPatrick Bellasi bool set_iowait_boost)
17921ca6d2cSRafael J. Wysocki {
180295f1a99SPatrick Bellasi s64 delta_ns = time - sg_cpu->last_update;
181295f1a99SPatrick Bellasi
182fd7d5287SPatrick Bellasi /* Reset boost only if a tick has elapsed since last request */
183fd7d5287SPatrick Bellasi if (delta_ns <= TICK_NSEC)
184fd7d5287SPatrick Bellasi return false;
185fd7d5287SPatrick Bellasi
1869eca544bSRafael J. Wysocki sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
187fd7d5287SPatrick Bellasi sg_cpu->iowait_boost_pending = set_iowait_boost;
188fd7d5287SPatrick Bellasi
189fd7d5287SPatrick Bellasi return true;
190295f1a99SPatrick Bellasi }
191295f1a99SPatrick Bellasi
192fd7d5287SPatrick Bellasi /**
193fd7d5287SPatrick Bellasi * sugov_iowait_boost() - Updates the IO boost status of a CPU.
194fd7d5287SPatrick Bellasi * @sg_cpu: the sugov data for the CPU to boost
195fd7d5287SPatrick Bellasi * @time: the update time from the caller
196fd7d5287SPatrick Bellasi * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
197fd7d5287SPatrick Bellasi *
198fd7d5287SPatrick Bellasi * Each time a task wakes up after an IO operation, the CPU utilization can be
199fd7d5287SPatrick Bellasi * boosted to a certain utilization which doubles at each "frequent and
2009eca544bSRafael J. Wysocki * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
2019eca544bSRafael J. Wysocki * of the maximum OPP.
2029eca544bSRafael J. Wysocki *
203fd7d5287SPatrick Bellasi * To keep doubling, an IO boost has to be requested at least once per tick,
204fd7d5287SPatrick Bellasi * otherwise we restart from the utilization of the minimum OPP.
205fd7d5287SPatrick Bellasi */
sugov_iowait_boost(struct sugov_cpu * sg_cpu,u64 time,unsigned int flags)206fd7d5287SPatrick Bellasi static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
207fd7d5287SPatrick Bellasi unsigned int flags)
208fd7d5287SPatrick Bellasi {
209fd7d5287SPatrick Bellasi bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
210fd7d5287SPatrick Bellasi
211fd7d5287SPatrick Bellasi /* Reset boost if the CPU appears to have been idle enough */
212fd7d5287SPatrick Bellasi if (sg_cpu->iowait_boost &&
213fd7d5287SPatrick Bellasi sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
214a5a0809bSJoel Fernandes return;
215a5a0809bSJoel Fernandes
216fd7d5287SPatrick Bellasi /* Boost only tasks waking up after IO */
217fd7d5287SPatrick Bellasi if (!set_iowait_boost)
218fd7d5287SPatrick Bellasi return;
219fd7d5287SPatrick Bellasi
220fd7d5287SPatrick Bellasi /* Ensure boost doubles only one time at each request */
221fd7d5287SPatrick Bellasi if (sg_cpu->iowait_boost_pending)
222fd7d5287SPatrick Bellasi return;
223a5a0809bSJoel Fernandes sg_cpu->iowait_boost_pending = true;
224a5a0809bSJoel Fernandes
225fd7d5287SPatrick Bellasi /* Double the boost at each request */
226a5a0809bSJoel Fernandes if (sg_cpu->iowait_boost) {
227a23314e9SPeter Zijlstra sg_cpu->iowait_boost =
228a23314e9SPeter Zijlstra min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
229fd7d5287SPatrick Bellasi return;
23021ca6d2cSRafael J. Wysocki }
23121ca6d2cSRafael J. Wysocki
232fd7d5287SPatrick Bellasi /* First wakeup after IO: start with minimum boost */
2339eca544bSRafael J. Wysocki sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
23421ca6d2cSRafael J. Wysocki }
23521ca6d2cSRafael J. Wysocki
236fd7d5287SPatrick Bellasi /**
237fd7d5287SPatrick Bellasi * sugov_iowait_apply() - Apply the IO boost to a CPU.
238fd7d5287SPatrick Bellasi * @sg_cpu: the sugov data for the cpu to boost
239fd7d5287SPatrick Bellasi * @time: the update time from the caller
240948fb4c4SLukasz Luba * @max_cap: the max CPU capacity
241fd7d5287SPatrick Bellasi *
242fd7d5287SPatrick Bellasi * A CPU running a task which woken up after an IO operation can have its
243fd7d5287SPatrick Bellasi * utilization boosted to speed up the completion of those IO operations.
244fd7d5287SPatrick Bellasi * The IO boost value is increased each time a task wakes up from IO, in
245fd7d5287SPatrick Bellasi * sugov_iowait_apply(), and it's instead decreased by this function,
246fd7d5287SPatrick Bellasi * each time an increase has not been requested (!iowait_boost_pending).
247fd7d5287SPatrick Bellasi *
248fd7d5287SPatrick Bellasi * A CPU which also appears to have been idle for at least one tick has also
249fd7d5287SPatrick Bellasi * its IO boost utilization reset.
250fd7d5287SPatrick Bellasi *
251fd7d5287SPatrick Bellasi * This mechanism is designed to boost high frequently IO waiting tasks, while
252fd7d5287SPatrick Bellasi * being more conservative on tasks which does sporadic IO operations.
253fd7d5287SPatrick Bellasi */
sugov_iowait_apply(struct sugov_cpu * sg_cpu,u64 time,unsigned long max_cap)254948fb4c4SLukasz Luba static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
255948fb4c4SLukasz Luba unsigned long max_cap)
25621ca6d2cSRafael J. Wysocki {
257a23314e9SPeter Zijlstra unsigned long boost;
25821ca6d2cSRafael J. Wysocki
259fd7d5287SPatrick Bellasi /* No boost currently required */
260a5a0809bSJoel Fernandes if (!sg_cpu->iowait_boost)
261ca6827deSRafael J. Wysocki return;
26221ca6d2cSRafael J. Wysocki
263fd7d5287SPatrick Bellasi /* Reset boost if the CPU appears to have been idle enough */
264fd7d5287SPatrick Bellasi if (sugov_iowait_reset(sg_cpu, time, false))
265ca6827deSRafael J. Wysocki return;
266fd7d5287SPatrick Bellasi
267a23314e9SPeter Zijlstra if (!sg_cpu->iowait_boost_pending) {
268fd7d5287SPatrick Bellasi /*
269a23314e9SPeter Zijlstra * No boost pending; reduce the boost value.
270fd7d5287SPatrick Bellasi */
271a5a0809bSJoel Fernandes sg_cpu->iowait_boost >>= 1;
2729eca544bSRafael J. Wysocki if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
273a5a0809bSJoel Fernandes sg_cpu->iowait_boost = 0;
274ca6827deSRafael J. Wysocki return;
275a5a0809bSJoel Fernandes }
276a5a0809bSJoel Fernandes }
277a5a0809bSJoel Fernandes
278a23314e9SPeter Zijlstra sg_cpu->iowait_boost_pending = false;
279a23314e9SPeter Zijlstra
280fd7d5287SPatrick Bellasi /*
281ca6827deSRafael J. Wysocki * sg_cpu->util is already in capacity scale; convert iowait_boost
282a23314e9SPeter Zijlstra * into the same scale so we can compare.
283fd7d5287SPatrick Bellasi */
284948fb4c4SLukasz Luba boost = (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
285d37aee90SQais Yousef boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
286ca6827deSRafael J. Wysocki if (sg_cpu->util < boost)
287ca6827deSRafael J. Wysocki sg_cpu->util = boost;
28821ca6d2cSRafael J. Wysocki }
28921ca6d2cSRafael J. Wysocki
290b7eaf1aaSRafael J. Wysocki #ifdef CONFIG_NO_HZ_COMMON
sugov_cpu_is_busy(struct sugov_cpu * sg_cpu)291b7eaf1aaSRafael J. Wysocki static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
292b7eaf1aaSRafael J. Wysocki {
293466a2b42SJoel Fernandes unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
294b7eaf1aaSRafael J. Wysocki bool ret = idle_calls == sg_cpu->saved_idle_calls;
295b7eaf1aaSRafael J. Wysocki
296b7eaf1aaSRafael J. Wysocki sg_cpu->saved_idle_calls = idle_calls;
297b7eaf1aaSRafael J. Wysocki return ret;
298b7eaf1aaSRafael J. Wysocki }
299b7eaf1aaSRafael J. Wysocki #else
sugov_cpu_is_busy(struct sugov_cpu * sg_cpu)300b7eaf1aaSRafael J. Wysocki static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
301b7eaf1aaSRafael J. Wysocki #endif /* CONFIG_NO_HZ_COMMON */
302b7eaf1aaSRafael J. Wysocki
303e97a90f7SClaudio Scordino /*
304e97a90f7SClaudio Scordino * Make sugov_should_update_freq() ignore the rate limit when DL
305e97a90f7SClaudio Scordino * has increased the utilization.
306e97a90f7SClaudio Scordino */
ignore_dl_rate_limit(struct sugov_cpu * sg_cpu)30771f1309fSYue Hu static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
308e97a90f7SClaudio Scordino {
3098cc90515SVincent Guittot if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
31071f1309fSYue Hu sg_cpu->sg_policy->limits_changed = true;
311e97a90f7SClaudio Scordino }
312e97a90f7SClaudio Scordino
sugov_update_single_common(struct sugov_cpu * sg_cpu,u64 time,unsigned long max_cap,unsigned int flags)313ee2cc427SRafael J. Wysocki static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
314948fb4c4SLukasz Luba u64 time, unsigned long max_cap,
315948fb4c4SLukasz Luba unsigned int flags)
3169bdcb44eSRafael J. Wysocki {
317fd7d5287SPatrick Bellasi sugov_iowait_boost(sg_cpu, time, flags);
31821ca6d2cSRafael J. Wysocki sg_cpu->last_update = time;
31921ca6d2cSRafael J. Wysocki
32071f1309fSYue Hu ignore_dl_rate_limit(sg_cpu);
321e97a90f7SClaudio Scordino
32271f1309fSYue Hu if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
323ee2cc427SRafael J. Wysocki return false;
3249bdcb44eSRafael J. Wysocki
325ca6827deSRafael J. Wysocki sugov_get_util(sg_cpu);
326948fb4c4SLukasz Luba sugov_iowait_apply(sg_cpu, time, max_cap);
327ca6827deSRafael J. Wysocki
328ee2cc427SRafael J. Wysocki return true;
329ee2cc427SRafael J. Wysocki }
330ee2cc427SRafael J. Wysocki
sugov_update_single_freq(struct update_util_data * hook,u64 time,unsigned int flags)331ee2cc427SRafael J. Wysocki static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
332ee2cc427SRafael J. Wysocki unsigned int flags)
333ee2cc427SRafael J. Wysocki {
334ee2cc427SRafael J. Wysocki struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
335ee2cc427SRafael J. Wysocki struct sugov_policy *sg_policy = sg_cpu->sg_policy;
336ee2cc427SRafael J. Wysocki unsigned int cached_freq = sg_policy->cached_raw_freq;
337948fb4c4SLukasz Luba unsigned long max_cap;
338ee2cc427SRafael J. Wysocki unsigned int next_f;
339ee2cc427SRafael J. Wysocki
340948fb4c4SLukasz Luba max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
341948fb4c4SLukasz Luba
342948fb4c4SLukasz Luba if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
3439bdcb44eSRafael J. Wysocki return;
3449bdcb44eSRafael J. Wysocki
345948fb4c4SLukasz Luba next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap);
346b7eaf1aaSRafael J. Wysocki /*
347b7eaf1aaSRafael J. Wysocki * Do not reduce the frequency if the CPU has not been idle
348b7eaf1aaSRafael J. Wysocki * recently, as the reduction is likely to be premature then.
3497a17e1dbSQais Yousef *
3507a17e1dbSQais Yousef * Except when the rq is capped by uclamp_max.
351b7eaf1aaSRafael J. Wysocki */
3527a17e1dbSQais Yousef if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
353*9e0bc36aSXuewen Yan sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq &&
354*9e0bc36aSXuewen Yan !sg_policy->need_freq_update) {
355b7eaf1aaSRafael J. Wysocki next_f = sg_policy->next_freq;
35607458f6aSViresh Kumar
3570070ea29SWei Wang /* Restore cached freq as next_freq has changed */
3580070ea29SWei Wang sg_policy->cached_raw_freq = cached_freq;
35907458f6aSViresh Kumar }
3608f111bc3SPeter Zijlstra
361389e4ecfSYue Hu if (!sugov_update_next_freq(sg_policy, time, next_f))
362389e4ecfSYue Hu return;
363389e4ecfSYue Hu
364a61dec74SRafael J. Wysocki /*
365a61dec74SRafael J. Wysocki * This code runs under rq->lock for the target CPU, so it won't run
366a61dec74SRafael J. Wysocki * concurrently on two different CPUs for the same target and it is not
367a61dec74SRafael J. Wysocki * necessary to acquire the lock in the fast switch case.
368a61dec74SRafael J. Wysocki */
369a61dec74SRafael J. Wysocki if (sg_policy->policy->fast_switch_enabled) {
370389e4ecfSYue Hu cpufreq_driver_fast_switch(sg_policy->policy, next_f);
371a61dec74SRafael J. Wysocki } else {
372a61dec74SRafael J. Wysocki raw_spin_lock(&sg_policy->update_lock);
373389e4ecfSYue Hu sugov_deferred_update(sg_policy);
374a61dec74SRafael J. Wysocki raw_spin_unlock(&sg_policy->update_lock);
375a61dec74SRafael J. Wysocki }
3769bdcb44eSRafael J. Wysocki }
3779bdcb44eSRafael J. Wysocki
sugov_update_single_perf(struct update_util_data * hook,u64 time,unsigned int flags)378ee2cc427SRafael J. Wysocki static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
379ee2cc427SRafael J. Wysocki unsigned int flags)
380ee2cc427SRafael J. Wysocki {
381ee2cc427SRafael J. Wysocki struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
382ee2cc427SRafael J. Wysocki unsigned long prev_util = sg_cpu->util;
383948fb4c4SLukasz Luba unsigned long max_cap;
384ee2cc427SRafael J. Wysocki
385ee2cc427SRafael J. Wysocki /*
386ee2cc427SRafael J. Wysocki * Fall back to the "frequency" path if frequency invariance is not
387ee2cc427SRafael J. Wysocki * supported, because the direct mapping between the utilization and
388ee2cc427SRafael J. Wysocki * the performance levels depends on the frequency invariance.
389ee2cc427SRafael J. Wysocki */
390ee2cc427SRafael J. Wysocki if (!arch_scale_freq_invariant()) {
391ee2cc427SRafael J. Wysocki sugov_update_single_freq(hook, time, flags);
392ee2cc427SRafael J. Wysocki return;
393ee2cc427SRafael J. Wysocki }
394ee2cc427SRafael J. Wysocki
395948fb4c4SLukasz Luba max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
396948fb4c4SLukasz Luba
397948fb4c4SLukasz Luba if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
398ee2cc427SRafael J. Wysocki return;
399ee2cc427SRafael J. Wysocki
400ee2cc427SRafael J. Wysocki /*
401ee2cc427SRafael J. Wysocki * Do not reduce the target performance level if the CPU has not been
402ee2cc427SRafael J. Wysocki * idle recently, as the reduction is likely to be premature then.
4037a17e1dbSQais Yousef *
4047a17e1dbSQais Yousef * Except when the rq is capped by uclamp_max.
405ee2cc427SRafael J. Wysocki */
4067a17e1dbSQais Yousef if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
4077a17e1dbSQais Yousef sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
408ee2cc427SRafael J. Wysocki sg_cpu->util = prev_util;
409ee2cc427SRafael J. Wysocki
410ee2cc427SRafael J. Wysocki cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
411948fb4c4SLukasz Luba map_util_perf(sg_cpu->util), max_cap);
412ee2cc427SRafael J. Wysocki
413ee2cc427SRafael J. Wysocki sg_cpu->sg_policy->last_freq_update_time = time;
414ee2cc427SRafael J. Wysocki }
415ee2cc427SRafael J. Wysocki
sugov_next_freq_shared(struct sugov_cpu * sg_cpu,u64 time)416d86ab9cfSJuri Lelli static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
4179bdcb44eSRafael J. Wysocki {
4185cbea469SSteve Muckle struct sugov_policy *sg_policy = sg_cpu->sg_policy;
4199bdcb44eSRafael J. Wysocki struct cpufreq_policy *policy = sg_policy->policy;
420948fb4c4SLukasz Luba unsigned long util = 0, max_cap;
4219bdcb44eSRafael J. Wysocki unsigned int j;
4229bdcb44eSRafael J. Wysocki
423948fb4c4SLukasz Luba max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
424948fb4c4SLukasz Luba
4259bdcb44eSRafael J. Wysocki for_each_cpu(j, policy->cpus) {
426cba1dfb5SViresh Kumar struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
4279bdcb44eSRafael J. Wysocki
428ca6827deSRafael J. Wysocki sugov_get_util(j_sg_cpu);
429948fb4c4SLukasz Luba sugov_iowait_apply(j_sg_cpu, time, max_cap);
430fd7d5287SPatrick Bellasi
431948fb4c4SLukasz Luba util = max(j_sg_cpu->util, util);
4329bdcb44eSRafael J. Wysocki }
4339bdcb44eSRafael J. Wysocki
434948fb4c4SLukasz Luba return get_next_freq(sg_policy, util, max_cap);
4359bdcb44eSRafael J. Wysocki }
4369bdcb44eSRafael J. Wysocki
43797fb7a0aSIngo Molnar static void
sugov_update_shared(struct update_util_data * hook,u64 time,unsigned int flags)43897fb7a0aSIngo Molnar sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
4399bdcb44eSRafael J. Wysocki {
4409bdcb44eSRafael J. Wysocki struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
4419bdcb44eSRafael J. Wysocki struct sugov_policy *sg_policy = sg_cpu->sg_policy;
4429bdcb44eSRafael J. Wysocki unsigned int next_f;
4439bdcb44eSRafael J. Wysocki
4449bdcb44eSRafael J. Wysocki raw_spin_lock(&sg_policy->update_lock);
4459bdcb44eSRafael J. Wysocki
446fd7d5287SPatrick Bellasi sugov_iowait_boost(sg_cpu, time, flags);
4479bdcb44eSRafael J. Wysocki sg_cpu->last_update = time;
4489bdcb44eSRafael J. Wysocki
44971f1309fSYue Hu ignore_dl_rate_limit(sg_cpu);
450cba1dfb5SViresh Kumar
4519bdcb44eSRafael J. Wysocki if (sugov_should_update_freq(sg_policy, time)) {
4529bdcb44eSRafael J. Wysocki next_f = sugov_next_freq_shared(sg_cpu, time);
453a61dec74SRafael J. Wysocki
454389e4ecfSYue Hu if (!sugov_update_next_freq(sg_policy, time, next_f))
455389e4ecfSYue Hu goto unlock;
4569bdcb44eSRafael J. Wysocki
457389e4ecfSYue Hu if (sg_policy->policy->fast_switch_enabled)
458389e4ecfSYue Hu cpufreq_driver_fast_switch(sg_policy->policy, next_f);
459389e4ecfSYue Hu else
460389e4ecfSYue Hu sugov_deferred_update(sg_policy);
461389e4ecfSYue Hu }
462389e4ecfSYue Hu unlock:
4639bdcb44eSRafael J. Wysocki raw_spin_unlock(&sg_policy->update_lock);
4649bdcb44eSRafael J. Wysocki }
4659bdcb44eSRafael J. Wysocki
sugov_work(struct kthread_work * work)4669bdcb44eSRafael J. Wysocki static void sugov_work(struct kthread_work *work)
4679bdcb44eSRafael J. Wysocki {
4689bdcb44eSRafael J. Wysocki struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
469152db033SJoel Fernandes (Google) unsigned int freq;
470152db033SJoel Fernandes (Google) unsigned long flags;
471152db033SJoel Fernandes (Google)
472152db033SJoel Fernandes (Google) /*
473152db033SJoel Fernandes (Google) * Hold sg_policy->update_lock shortly to handle the case where:
474152db033SJoel Fernandes (Google) * in case sg_policy->next_freq is read here, and then updated by
475a61dec74SRafael J. Wysocki * sugov_deferred_update() just before work_in_progress is set to false
476152db033SJoel Fernandes (Google) * here, we may miss queueing the new update.
477152db033SJoel Fernandes (Google) *
478152db033SJoel Fernandes (Google) * Note: If a work was queued after the update_lock is released,
479a61dec74SRafael J. Wysocki * sugov_work() will just be called again by kthread_work code; and the
480152db033SJoel Fernandes (Google) * request will be proceed before the sugov thread sleeps.
481152db033SJoel Fernandes (Google) */
482152db033SJoel Fernandes (Google) raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
483152db033SJoel Fernandes (Google) freq = sg_policy->next_freq;
484152db033SJoel Fernandes (Google) sg_policy->work_in_progress = false;
485152db033SJoel Fernandes (Google) raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
4869bdcb44eSRafael J. Wysocki
4879bdcb44eSRafael J. Wysocki mutex_lock(&sg_policy->work_lock);
488152db033SJoel Fernandes (Google) __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
4899bdcb44eSRafael J. Wysocki mutex_unlock(&sg_policy->work_lock);
4909bdcb44eSRafael J. Wysocki }
4919bdcb44eSRafael J. Wysocki
sugov_irq_work(struct irq_work * irq_work)4929bdcb44eSRafael J. Wysocki static void sugov_irq_work(struct irq_work *irq_work)
4939bdcb44eSRafael J. Wysocki {
4949bdcb44eSRafael J. Wysocki struct sugov_policy *sg_policy;
4959bdcb44eSRafael J. Wysocki
4969bdcb44eSRafael J. Wysocki sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
49702a7b1eeSViresh Kumar
49802a7b1eeSViresh Kumar kthread_queue_work(&sg_policy->worker, &sg_policy->work);
4999bdcb44eSRafael J. Wysocki }
5009bdcb44eSRafael J. Wysocki
5019bdcb44eSRafael J. Wysocki /************************** sysfs interface ************************/
5029bdcb44eSRafael J. Wysocki
5039bdcb44eSRafael J. Wysocki static struct sugov_tunables *global_tunables;
5049bdcb44eSRafael J. Wysocki static DEFINE_MUTEX(global_tunables_lock);
5059bdcb44eSRafael J. Wysocki
to_sugov_tunables(struct gov_attr_set * attr_set)5069bdcb44eSRafael J. Wysocki static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
5079bdcb44eSRafael J. Wysocki {
5089bdcb44eSRafael J. Wysocki return container_of(attr_set, struct sugov_tunables, attr_set);
5099bdcb44eSRafael J. Wysocki }
5109bdcb44eSRafael J. Wysocki
rate_limit_us_show(struct gov_attr_set * attr_set,char * buf)5119bdcb44eSRafael J. Wysocki static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
5129bdcb44eSRafael J. Wysocki {
5139bdcb44eSRafael J. Wysocki struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
5149bdcb44eSRafael J. Wysocki
5159bdcb44eSRafael J. Wysocki return sprintf(buf, "%u\n", tunables->rate_limit_us);
5169bdcb44eSRafael J. Wysocki }
5179bdcb44eSRafael J. Wysocki
51897fb7a0aSIngo Molnar static ssize_t
rate_limit_us_store(struct gov_attr_set * attr_set,const char * buf,size_t count)51997fb7a0aSIngo Molnar rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
5209bdcb44eSRafael J. Wysocki {
5219bdcb44eSRafael J. Wysocki struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
5229bdcb44eSRafael J. Wysocki struct sugov_policy *sg_policy;
5239bdcb44eSRafael J. Wysocki unsigned int rate_limit_us;
5249bdcb44eSRafael J. Wysocki
5259bdcb44eSRafael J. Wysocki if (kstrtouint(buf, 10, &rate_limit_us))
5269bdcb44eSRafael J. Wysocki return -EINVAL;
5279bdcb44eSRafael J. Wysocki
5289bdcb44eSRafael J. Wysocki tunables->rate_limit_us = rate_limit_us;
5299bdcb44eSRafael J. Wysocki
5309bdcb44eSRafael J. Wysocki list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
5319bdcb44eSRafael J. Wysocki sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
5329bdcb44eSRafael J. Wysocki
5339bdcb44eSRafael J. Wysocki return count;
5349bdcb44eSRafael J. Wysocki }
5359bdcb44eSRafael J. Wysocki
5369bdcb44eSRafael J. Wysocki static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
5379bdcb44eSRafael J. Wysocki
5389782adebSKimberly Brown static struct attribute *sugov_attrs[] = {
5399bdcb44eSRafael J. Wysocki &rate_limit_us.attr,
5409bdcb44eSRafael J. Wysocki NULL
5419bdcb44eSRafael J. Wysocki };
5429782adebSKimberly Brown ATTRIBUTE_GROUPS(sugov);
5439bdcb44eSRafael J. Wysocki
sugov_tunables_free(struct kobject * kobj)544e5c6b312SKevin Hao static void sugov_tunables_free(struct kobject *kobj)
545e5c6b312SKevin Hao {
54653725c4cSKevin Hao struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
547e5c6b312SKevin Hao
548e5c6b312SKevin Hao kfree(to_sugov_tunables(attr_set));
549e5c6b312SKevin Hao }
550e5c6b312SKevin Hao
55170ba26cbSThomas Weißschuh static const struct kobj_type sugov_tunables_ktype = {
5529782adebSKimberly Brown .default_groups = sugov_groups,
5539bdcb44eSRafael J. Wysocki .sysfs_ops = &governor_sysfs_ops,
554e5c6b312SKevin Hao .release = &sugov_tunables_free,
5559bdcb44eSRafael J. Wysocki };
5569bdcb44eSRafael J. Wysocki
5579bdcb44eSRafael J. Wysocki /********************** cpufreq governor interface *********************/
5589bdcb44eSRafael J. Wysocki
559531b5c9fSQuentin Perret struct cpufreq_governor schedutil_gov;
5609bdcb44eSRafael J. Wysocki
sugov_policy_alloc(struct cpufreq_policy * policy)5619bdcb44eSRafael J. Wysocki static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
5629bdcb44eSRafael J. Wysocki {
5639bdcb44eSRafael J. Wysocki struct sugov_policy *sg_policy;
5649bdcb44eSRafael J. Wysocki
5659bdcb44eSRafael J. Wysocki sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
5669bdcb44eSRafael J. Wysocki if (!sg_policy)
5679bdcb44eSRafael J. Wysocki return NULL;
5689bdcb44eSRafael J. Wysocki
5699bdcb44eSRafael J. Wysocki sg_policy->policy = policy;
5709bdcb44eSRafael J. Wysocki raw_spin_lock_init(&sg_policy->update_lock);
5719bdcb44eSRafael J. Wysocki return sg_policy;
5729bdcb44eSRafael J. Wysocki }
5739bdcb44eSRafael J. Wysocki
sugov_policy_free(struct sugov_policy * sg_policy)5749bdcb44eSRafael J. Wysocki static void sugov_policy_free(struct sugov_policy *sg_policy)
5759bdcb44eSRafael J. Wysocki {
5769bdcb44eSRafael J. Wysocki kfree(sg_policy);
5779bdcb44eSRafael J. Wysocki }
5789bdcb44eSRafael J. Wysocki
sugov_kthread_create(struct sugov_policy * sg_policy)57902a7b1eeSViresh Kumar static int sugov_kthread_create(struct sugov_policy *sg_policy)
58002a7b1eeSViresh Kumar {
58102a7b1eeSViresh Kumar struct task_struct *thread;
582794a56ebSJuri Lelli struct sched_attr attr = {
583794a56ebSJuri Lelli .size = sizeof(struct sched_attr),
584794a56ebSJuri Lelli .sched_policy = SCHED_DEADLINE,
585794a56ebSJuri Lelli .sched_flags = SCHED_FLAG_SUGOV,
586794a56ebSJuri Lelli .sched_nice = 0,
587794a56ebSJuri Lelli .sched_priority = 0,
588794a56ebSJuri Lelli /*
589794a56ebSJuri Lelli * Fake (unused) bandwidth; workaround to "fix"
590794a56ebSJuri Lelli * priority inheritance.
591794a56ebSJuri Lelli */
592794a56ebSJuri Lelli .sched_runtime = 1000000,
593794a56ebSJuri Lelli .sched_deadline = 10000000,
594794a56ebSJuri Lelli .sched_period = 10000000,
595794a56ebSJuri Lelli };
59602a7b1eeSViresh Kumar struct cpufreq_policy *policy = sg_policy->policy;
59702a7b1eeSViresh Kumar int ret;
59802a7b1eeSViresh Kumar
59902a7b1eeSViresh Kumar /* kthread only required for slow path */
60002a7b1eeSViresh Kumar if (policy->fast_switch_enabled)
60102a7b1eeSViresh Kumar return 0;
60202a7b1eeSViresh Kumar
60302a7b1eeSViresh Kumar kthread_init_work(&sg_policy->work, sugov_work);
60402a7b1eeSViresh Kumar kthread_init_worker(&sg_policy->worker);
60502a7b1eeSViresh Kumar thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
60602a7b1eeSViresh Kumar "sugov:%d",
60702a7b1eeSViresh Kumar cpumask_first(policy->related_cpus));
60802a7b1eeSViresh Kumar if (IS_ERR(thread)) {
60902a7b1eeSViresh Kumar pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
61002a7b1eeSViresh Kumar return PTR_ERR(thread);
61102a7b1eeSViresh Kumar }
61202a7b1eeSViresh Kumar
613794a56ebSJuri Lelli ret = sched_setattr_nocheck(thread, &attr);
61402a7b1eeSViresh Kumar if (ret) {
61502a7b1eeSViresh Kumar kthread_stop(thread);
616794a56ebSJuri Lelli pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
61702a7b1eeSViresh Kumar return ret;
61802a7b1eeSViresh Kumar }
61902a7b1eeSViresh Kumar
62002a7b1eeSViresh Kumar sg_policy->thread = thread;
62102a7b1eeSViresh Kumar kthread_bind_mask(thread, policy->related_cpus);
62221ef5729SViresh Kumar init_irq_work(&sg_policy->irq_work, sugov_irq_work);
62321ef5729SViresh Kumar mutex_init(&sg_policy->work_lock);
62421ef5729SViresh Kumar
62502a7b1eeSViresh Kumar wake_up_process(thread);
62602a7b1eeSViresh Kumar
62702a7b1eeSViresh Kumar return 0;
62802a7b1eeSViresh Kumar }
62902a7b1eeSViresh Kumar
sugov_kthread_stop(struct sugov_policy * sg_policy)63002a7b1eeSViresh Kumar static void sugov_kthread_stop(struct sugov_policy *sg_policy)
63102a7b1eeSViresh Kumar {
63202a7b1eeSViresh Kumar /* kthread only required for slow path */
63302a7b1eeSViresh Kumar if (sg_policy->policy->fast_switch_enabled)
63402a7b1eeSViresh Kumar return;
63502a7b1eeSViresh Kumar
63602a7b1eeSViresh Kumar kthread_flush_worker(&sg_policy->worker);
63702a7b1eeSViresh Kumar kthread_stop(sg_policy->thread);
63821ef5729SViresh Kumar mutex_destroy(&sg_policy->work_lock);
63902a7b1eeSViresh Kumar }
64002a7b1eeSViresh Kumar
sugov_tunables_alloc(struct sugov_policy * sg_policy)6419bdcb44eSRafael J. Wysocki static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
6429bdcb44eSRafael J. Wysocki {
6439bdcb44eSRafael J. Wysocki struct sugov_tunables *tunables;
6449bdcb44eSRafael J. Wysocki
6459bdcb44eSRafael J. Wysocki tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
6469bdcb44eSRafael J. Wysocki if (tunables) {
6479bdcb44eSRafael J. Wysocki gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
6489bdcb44eSRafael J. Wysocki if (!have_governor_per_policy())
6499bdcb44eSRafael J. Wysocki global_tunables = tunables;
6509bdcb44eSRafael J. Wysocki }
6519bdcb44eSRafael J. Wysocki return tunables;
6529bdcb44eSRafael J. Wysocki }
6539bdcb44eSRafael J. Wysocki
sugov_clear_global_tunables(void)654e5c6b312SKevin Hao static void sugov_clear_global_tunables(void)
6559bdcb44eSRafael J. Wysocki {
6569bdcb44eSRafael J. Wysocki if (!have_governor_per_policy())
6579bdcb44eSRafael J. Wysocki global_tunables = NULL;
6589bdcb44eSRafael J. Wysocki }
6599bdcb44eSRafael J. Wysocki
sugov_init(struct cpufreq_policy * policy)6609bdcb44eSRafael J. Wysocki static int sugov_init(struct cpufreq_policy *policy)
6619bdcb44eSRafael J. Wysocki {
6629bdcb44eSRafael J. Wysocki struct sugov_policy *sg_policy;
6639bdcb44eSRafael J. Wysocki struct sugov_tunables *tunables;
6649bdcb44eSRafael J. Wysocki int ret = 0;
6659bdcb44eSRafael J. Wysocki
6669bdcb44eSRafael J. Wysocki /* State should be equivalent to EXIT */
6679bdcb44eSRafael J. Wysocki if (policy->governor_data)
6689bdcb44eSRafael J. Wysocki return -EBUSY;
6699bdcb44eSRafael J. Wysocki
6704a71ce43SViresh Kumar cpufreq_enable_fast_switch(policy);
6714a71ce43SViresh Kumar
6729bdcb44eSRafael J. Wysocki sg_policy = sugov_policy_alloc(policy);
6734a71ce43SViresh Kumar if (!sg_policy) {
6744a71ce43SViresh Kumar ret = -ENOMEM;
6754a71ce43SViresh Kumar goto disable_fast_switch;
6764a71ce43SViresh Kumar }
6779bdcb44eSRafael J. Wysocki
67802a7b1eeSViresh Kumar ret = sugov_kthread_create(sg_policy);
67902a7b1eeSViresh Kumar if (ret)
68002a7b1eeSViresh Kumar goto free_sg_policy;
68102a7b1eeSViresh Kumar
6829bdcb44eSRafael J. Wysocki mutex_lock(&global_tunables_lock);
6839bdcb44eSRafael J. Wysocki
6849bdcb44eSRafael J. Wysocki if (global_tunables) {
6859bdcb44eSRafael J. Wysocki if (WARN_ON(have_governor_per_policy())) {
6869bdcb44eSRafael J. Wysocki ret = -EINVAL;
68702a7b1eeSViresh Kumar goto stop_kthread;
6889bdcb44eSRafael J. Wysocki }
6899bdcb44eSRafael J. Wysocki policy->governor_data = sg_policy;
6909bdcb44eSRafael J. Wysocki sg_policy->tunables = global_tunables;
6919bdcb44eSRafael J. Wysocki
6929bdcb44eSRafael J. Wysocki gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
6939bdcb44eSRafael J. Wysocki goto out;
6949bdcb44eSRafael J. Wysocki }
6959bdcb44eSRafael J. Wysocki
6969bdcb44eSRafael J. Wysocki tunables = sugov_tunables_alloc(sg_policy);
6979bdcb44eSRafael J. Wysocki if (!tunables) {
6989bdcb44eSRafael J. Wysocki ret = -ENOMEM;
69902a7b1eeSViresh Kumar goto stop_kthread;
7009bdcb44eSRafael J. Wysocki }
7019bdcb44eSRafael J. Wysocki
702aa7519afSViresh Kumar tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
7039bdcb44eSRafael J. Wysocki
7049bdcb44eSRafael J. Wysocki policy->governor_data = sg_policy;
7059bdcb44eSRafael J. Wysocki sg_policy->tunables = tunables;
7069bdcb44eSRafael J. Wysocki
7079bdcb44eSRafael J. Wysocki ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
7089bdcb44eSRafael J. Wysocki get_governor_parent_kobj(policy), "%s",
7099bdcb44eSRafael J. Wysocki schedutil_gov.name);
7109bdcb44eSRafael J. Wysocki if (ret)
7119bdcb44eSRafael J. Wysocki goto fail;
7129bdcb44eSRafael J. Wysocki
7139bdcb44eSRafael J. Wysocki out:
7149bdcb44eSRafael J. Wysocki mutex_unlock(&global_tunables_lock);
7159bdcb44eSRafael J. Wysocki return 0;
7169bdcb44eSRafael J. Wysocki
7179bdcb44eSRafael J. Wysocki fail:
7189a4f26ccSTobin C. Harding kobject_put(&tunables->attr_set.kobj);
7199bdcb44eSRafael J. Wysocki policy->governor_data = NULL;
720e5c6b312SKevin Hao sugov_clear_global_tunables();
7219bdcb44eSRafael J. Wysocki
72202a7b1eeSViresh Kumar stop_kthread:
72302a7b1eeSViresh Kumar sugov_kthread_stop(sg_policy);
7249bdcb44eSRafael J. Wysocki mutex_unlock(&global_tunables_lock);
7259bdcb44eSRafael J. Wysocki
7261b5d43cfSJules Maselbas free_sg_policy:
7279bdcb44eSRafael J. Wysocki sugov_policy_free(sg_policy);
7284a71ce43SViresh Kumar
7294a71ce43SViresh Kumar disable_fast_switch:
7304a71ce43SViresh Kumar cpufreq_disable_fast_switch(policy);
7314a71ce43SViresh Kumar
73260f05e86SViresh Kumar pr_err("initialization failed (error %d)\n", ret);
7339bdcb44eSRafael J. Wysocki return ret;
7349bdcb44eSRafael J. Wysocki }
7359bdcb44eSRafael J. Wysocki
sugov_exit(struct cpufreq_policy * policy)736e788892bSRafael J. Wysocki static void sugov_exit(struct cpufreq_policy *policy)
7379bdcb44eSRafael J. Wysocki {
7389bdcb44eSRafael J. Wysocki struct sugov_policy *sg_policy = policy->governor_data;
7399bdcb44eSRafael J. Wysocki struct sugov_tunables *tunables = sg_policy->tunables;
7409bdcb44eSRafael J. Wysocki unsigned int count;
7419bdcb44eSRafael J. Wysocki
7429bdcb44eSRafael J. Wysocki mutex_lock(&global_tunables_lock);
7439bdcb44eSRafael J. Wysocki
7449bdcb44eSRafael J. Wysocki count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
7459bdcb44eSRafael J. Wysocki policy->governor_data = NULL;
7469bdcb44eSRafael J. Wysocki if (!count)
747e5c6b312SKevin Hao sugov_clear_global_tunables();
7489bdcb44eSRafael J. Wysocki
7499bdcb44eSRafael J. Wysocki mutex_unlock(&global_tunables_lock);
7509bdcb44eSRafael J. Wysocki
75102a7b1eeSViresh Kumar sugov_kthread_stop(sg_policy);
7529bdcb44eSRafael J. Wysocki sugov_policy_free(sg_policy);
7534a71ce43SViresh Kumar cpufreq_disable_fast_switch(policy);
7549bdcb44eSRafael J. Wysocki }
7559bdcb44eSRafael J. Wysocki
sugov_start(struct cpufreq_policy * policy)7569bdcb44eSRafael J. Wysocki static int sugov_start(struct cpufreq_policy *policy)
7579bdcb44eSRafael J. Wysocki {
7589bdcb44eSRafael J. Wysocki struct sugov_policy *sg_policy = policy->governor_data;
759ee2cc427SRafael J. Wysocki void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
760cdcc5ef2SSam Wu unsigned int cpu;
7619bdcb44eSRafael J. Wysocki
7629bdcb44eSRafael J. Wysocki sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
7639bdcb44eSRafael J. Wysocki sg_policy->last_freq_update_time = 0;
764ecd28842SViresh Kumar sg_policy->next_freq = 0;
7659bdcb44eSRafael J. Wysocki sg_policy->work_in_progress = false;
766600f5badSViresh Kumar sg_policy->limits_changed = false;
7676c4f0fa6SViresh Kumar sg_policy->cached_raw_freq = 0;
7689bdcb44eSRafael J. Wysocki
76923a88185SViresh Kumar sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
77023a88185SViresh Kumar
7719bdcb44eSRafael J. Wysocki for_each_cpu(cpu, policy->cpus) {
7729bdcb44eSRafael J. Wysocki struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
7739bdcb44eSRafael J. Wysocki
7744296f23eSRafael J. Wysocki memset(sg_cpu, 0, sizeof(*sg_cpu));
775d62d813cSChris Redpath sg_cpu->cpu = cpu;
7769bdcb44eSRafael J. Wysocki sg_cpu->sg_policy = sg_policy;
777ab2f7cf1SVikram Mulukutla }
778ab2f7cf1SVikram Mulukutla
779ee2cc427SRafael J. Wysocki if (policy_is_shared(policy))
780ee2cc427SRafael J. Wysocki uu = sugov_update_shared;
781ee2cc427SRafael J. Wysocki else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
782ee2cc427SRafael J. Wysocki uu = sugov_update_single_perf;
783ee2cc427SRafael J. Wysocki else
784ee2cc427SRafael J. Wysocki uu = sugov_update_single_freq;
785ee2cc427SRafael J. Wysocki
786ab2f7cf1SVikram Mulukutla for_each_cpu(cpu, policy->cpus) {
787ab2f7cf1SVikram Mulukutla struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
788ab2f7cf1SVikram Mulukutla
789ee2cc427SRafael J. Wysocki cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
7909bdcb44eSRafael J. Wysocki }
7919bdcb44eSRafael J. Wysocki return 0;
7929bdcb44eSRafael J. Wysocki }
7939bdcb44eSRafael J. Wysocki
sugov_stop(struct cpufreq_policy * policy)794e788892bSRafael J. Wysocki static void sugov_stop(struct cpufreq_policy *policy)
7959bdcb44eSRafael J. Wysocki {
7969bdcb44eSRafael J. Wysocki struct sugov_policy *sg_policy = policy->governor_data;
7979bdcb44eSRafael J. Wysocki unsigned int cpu;
7989bdcb44eSRafael J. Wysocki
7999bdcb44eSRafael J. Wysocki for_each_cpu(cpu, policy->cpus)
8009bdcb44eSRafael J. Wysocki cpufreq_remove_update_util_hook(cpu);
8019bdcb44eSRafael J. Wysocki
802b290ebcfSPaul E. McKenney synchronize_rcu();
8039bdcb44eSRafael J. Wysocki
80421ef5729SViresh Kumar if (!policy->fast_switch_enabled) {
8059bdcb44eSRafael J. Wysocki irq_work_sync(&sg_policy->irq_work);
80602a7b1eeSViresh Kumar kthread_cancel_work_sync(&sg_policy->work);
8079bdcb44eSRafael J. Wysocki }
80821ef5729SViresh Kumar }
8099bdcb44eSRafael J. Wysocki
sugov_limits(struct cpufreq_policy * policy)810e788892bSRafael J. Wysocki static void sugov_limits(struct cpufreq_policy *policy)
8119bdcb44eSRafael J. Wysocki {
8129bdcb44eSRafael J. Wysocki struct sugov_policy *sg_policy = policy->governor_data;
8139bdcb44eSRafael J. Wysocki
8149bdcb44eSRafael J. Wysocki if (!policy->fast_switch_enabled) {
8159bdcb44eSRafael J. Wysocki mutex_lock(&sg_policy->work_lock);
816bf2be2deSViresh Kumar cpufreq_policy_apply_limits(policy);
8179bdcb44eSRafael J. Wysocki mutex_unlock(&sg_policy->work_lock);
8189bdcb44eSRafael J. Wysocki }
8199bdcb44eSRafael J. Wysocki
820600f5badSViresh Kumar sg_policy->limits_changed = true;
8219bdcb44eSRafael J. Wysocki }
8229bdcb44eSRafael J. Wysocki
823531b5c9fSQuentin Perret struct cpufreq_governor schedutil_gov = {
8249bdcb44eSRafael J. Wysocki .name = "schedutil",
8259bdcb44eSRafael J. Wysocki .owner = THIS_MODULE,
8269a2a9ebcSRafael J. Wysocki .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
827e788892bSRafael J. Wysocki .init = sugov_init,
828e788892bSRafael J. Wysocki .exit = sugov_exit,
829e788892bSRafael J. Wysocki .start = sugov_start,
830e788892bSRafael J. Wysocki .stop = sugov_stop,
831e788892bSRafael J. Wysocki .limits = sugov_limits,
8329bdcb44eSRafael J. Wysocki };
8339bdcb44eSRafael J. Wysocki
8349bdcb44eSRafael J. Wysocki #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
cpufreq_default_governor(void)8359bdcb44eSRafael J. Wysocki struct cpufreq_governor *cpufreq_default_governor(void)
8369bdcb44eSRafael J. Wysocki {
8379bdcb44eSRafael J. Wysocki return &schedutil_gov;
8389bdcb44eSRafael J. Wysocki }
8399bdcb44eSRafael J. Wysocki #endif
84058919e83SRafael J. Wysocki
84110dd8573SQuentin Perret cpufreq_governor_init(schedutil_gov);
842531b5c9fSQuentin Perret
843531b5c9fSQuentin Perret #ifdef CONFIG_ENERGY_MODEL
rebuild_sd_workfn(struct work_struct * work)844531b5c9fSQuentin Perret static void rebuild_sd_workfn(struct work_struct *work)
845531b5c9fSQuentin Perret {
84631f6a8c0SIonela Voinescu rebuild_sched_domains_energy();
847531b5c9fSQuentin Perret }
848531b5c9fSQuentin Perret static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
849531b5c9fSQuentin Perret
850531b5c9fSQuentin Perret /*
851531b5c9fSQuentin Perret * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
852531b5c9fSQuentin Perret * on governor changes to make sure the scheduler knows about it.
853531b5c9fSQuentin Perret */
sched_cpufreq_governor_change(struct cpufreq_policy * policy,struct cpufreq_governor * old_gov)854531b5c9fSQuentin Perret void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
855531b5c9fSQuentin Perret struct cpufreq_governor *old_gov)
856531b5c9fSQuentin Perret {
857531b5c9fSQuentin Perret if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
858531b5c9fSQuentin Perret /*
859531b5c9fSQuentin Perret * When called from the cpufreq_register_driver() path, the
860531b5c9fSQuentin Perret * cpu_hotplug_lock is already held, so use a work item to
861531b5c9fSQuentin Perret * avoid nested locking in rebuild_sched_domains().
862531b5c9fSQuentin Perret */
863531b5c9fSQuentin Perret schedule_work(&rebuild_sd_work);
864531b5c9fSQuentin Perret }
865531b5c9fSQuentin Perret
866531b5c9fSQuentin Perret }
867531b5c9fSQuentin Perret #endif
868