1 /*
2  * drivers/cpufreq/cpufreq_governor.c
3  *
4  * CPUFREQ governors common code
5  *
6  * Copyright	(C) 2001 Russell King
7  *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8  *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9  *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
10  *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 
24 #include "cpufreq_governor.h"
25 
26 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
27 
28 static DEFINE_MUTEX(gov_dbs_data_mutex);
29 
30 /* Common sysfs tunables */
31 /**
32  * store_sampling_rate - update sampling rate effective immediately if needed.
33  *
34  * If new rate is smaller than the old, simply updating
35  * dbs.sampling_rate might not be appropriate. For example, if the
36  * original sampling_rate was 1 second and the requested new sampling rate is 10
37  * ms because the user needs immediate reaction from ondemand governor, but not
38  * sure if higher frequency will be required or not, then, the governor may
39  * change the sampling rate too late; up to 1 second later. Thus, if we are
40  * reducing the sampling rate, we need to make the new value effective
41  * immediately.
42  *
43  * This must be called with dbs_data->mutex held, otherwise traversing
44  * policy_dbs_list isn't safe.
45  */
46 ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
47 			    size_t count)
48 {
49 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
50 	struct policy_dbs_info *policy_dbs;
51 	unsigned int rate;
52 	int ret;
53 	ret = sscanf(buf, "%u", &rate);
54 	if (ret != 1)
55 		return -EINVAL;
56 
57 	dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
58 
59 	/*
60 	 * We are operating under dbs_data->mutex and so the list and its
61 	 * entries can't be freed concurrently.
62 	 */
63 	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
64 		mutex_lock(&policy_dbs->timer_mutex);
65 		/*
66 		 * On 32-bit architectures this may race with the
67 		 * sample_delay_ns read in dbs_update_util_handler(), but that
68 		 * really doesn't matter.  If the read returns a value that's
69 		 * too big, the sample will be skipped, but the next invocation
70 		 * of dbs_update_util_handler() (when the update has been
71 		 * completed) will take a sample.
72 		 *
73 		 * If this runs in parallel with dbs_work_handler(), we may end
74 		 * up overwriting the sample_delay_ns value that it has just
75 		 * written, but it will be corrected next time a sample is
76 		 * taken, so it shouldn't be significant.
77 		 */
78 		gov_update_sample_delay(policy_dbs, 0);
79 		mutex_unlock(&policy_dbs->timer_mutex);
80 	}
81 
82 	return count;
83 }
84 EXPORT_SYMBOL_GPL(store_sampling_rate);
85 
86 /**
87  * gov_update_cpu_data - Update CPU load data.
88  * @dbs_data: Top-level governor data pointer.
89  *
90  * Update CPU load data for all CPUs in the domain governed by @dbs_data
91  * (that may be a single policy or a bunch of them if governor tunables are
92  * system-wide).
93  *
94  * Call under the @dbs_data mutex.
95  */
96 void gov_update_cpu_data(struct dbs_data *dbs_data)
97 {
98 	struct policy_dbs_info *policy_dbs;
99 
100 	list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
101 		unsigned int j;
102 
103 		for_each_cpu(j, policy_dbs->policy->cpus) {
104 			struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
105 
106 			j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
107 								  dbs_data->io_is_busy);
108 			if (dbs_data->ignore_nice_load)
109 				j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
110 		}
111 	}
112 }
113 EXPORT_SYMBOL_GPL(gov_update_cpu_data);
114 
115 unsigned int dbs_update(struct cpufreq_policy *policy)
116 {
117 	struct policy_dbs_info *policy_dbs = policy->governor_data;
118 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
119 	unsigned int ignore_nice = dbs_data->ignore_nice_load;
120 	unsigned int max_load = 0;
121 	unsigned int sampling_rate, io_busy, j;
122 
123 	/*
124 	 * Sometimes governors may use an additional multiplier to increase
125 	 * sample delays temporarily.  Apply that multiplier to sampling_rate
126 	 * so as to keep the wake-up-from-idle detection logic a bit
127 	 * conservative.
128 	 */
129 	sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
130 	/*
131 	 * For the purpose of ondemand, waiting for disk IO is an indication
132 	 * that you're performance critical, and not that the system is actually
133 	 * idle, so do not add the iowait time to the CPU idle time then.
134 	 */
135 	io_busy = dbs_data->io_is_busy;
136 
137 	/* Get Absolute Load */
138 	for_each_cpu(j, policy->cpus) {
139 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
140 		u64 update_time, cur_idle_time;
141 		unsigned int idle_time, time_elapsed;
142 		unsigned int load;
143 
144 		cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
145 
146 		time_elapsed = update_time - j_cdbs->prev_update_time;
147 		j_cdbs->prev_update_time = update_time;
148 
149 		idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
150 		j_cdbs->prev_cpu_idle = cur_idle_time;
151 
152 		if (ignore_nice) {
153 			u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
154 
155 			idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
156 			j_cdbs->prev_cpu_nice = cur_nice;
157 		}
158 
159 		if (unlikely(!time_elapsed)) {
160 			/*
161 			 * That can only happen when this function is called
162 			 * twice in a row with a very short interval between the
163 			 * calls, so the previous load value can be used then.
164 			 */
165 			load = j_cdbs->prev_load;
166 		} else if (unlikely(time_elapsed > 2 * sampling_rate &&
167 				    j_cdbs->prev_load)) {
168 			/*
169 			 * If the CPU had gone completely idle and a task has
170 			 * just woken up on this CPU now, it would be unfair to
171 			 * calculate 'load' the usual way for this elapsed
172 			 * time-window, because it would show near-zero load,
173 			 * irrespective of how CPU intensive that task actually
174 			 * was. This is undesirable for latency-sensitive bursty
175 			 * workloads.
176 			 *
177 			 * To avoid this, reuse the 'load' from the previous
178 			 * time-window and give this task a chance to start with
179 			 * a reasonably high CPU frequency. However, that
180 			 * shouldn't be over-done, lest we get stuck at a high
181 			 * load (high frequency) for too long, even when the
182 			 * current system load has actually dropped down, so
183 			 * clear prev_load to guarantee that the load will be
184 			 * computed again next time.
185 			 *
186 			 * Detecting this situation is easy: the governor's
187 			 * utilization update handler would not have run during
188 			 * CPU-idle periods.  Hence, an unusually large
189 			 * 'time_elapsed' (as compared to the sampling rate)
190 			 * indicates this scenario.
191 			 */
192 			load = j_cdbs->prev_load;
193 			j_cdbs->prev_load = 0;
194 		} else {
195 			if (time_elapsed >= idle_time) {
196 				load = 100 * (time_elapsed - idle_time) / time_elapsed;
197 			} else {
198 				/*
199 				 * That can happen if idle_time is returned by
200 				 * get_cpu_idle_time_jiffy().  In that case
201 				 * idle_time is roughly equal to the difference
202 				 * between time_elapsed and "busy time" obtained
203 				 * from CPU statistics.  Then, the "busy time"
204 				 * can end up being greater than time_elapsed
205 				 * (for example, if jiffies_64 and the CPU
206 				 * statistics are updated by different CPUs),
207 				 * so idle_time may in fact be negative.  That
208 				 * means, though, that the CPU was busy all
209 				 * the time (on the rough average) during the
210 				 * last sampling interval and 100 can be
211 				 * returned as the load.
212 				 */
213 				load = (int)idle_time < 0 ? 100 : 0;
214 			}
215 			j_cdbs->prev_load = load;
216 		}
217 
218 		if (load > max_load)
219 			max_load = load;
220 	}
221 	return max_load;
222 }
223 EXPORT_SYMBOL_GPL(dbs_update);
224 
225 static void dbs_work_handler(struct work_struct *work)
226 {
227 	struct policy_dbs_info *policy_dbs;
228 	struct cpufreq_policy *policy;
229 	struct dbs_governor *gov;
230 
231 	policy_dbs = container_of(work, struct policy_dbs_info, work);
232 	policy = policy_dbs->policy;
233 	gov = dbs_governor_of(policy);
234 
235 	/*
236 	 * Make sure cpufreq_governor_limits() isn't evaluating load or the
237 	 * ondemand governor isn't updating the sampling rate in parallel.
238 	 */
239 	mutex_lock(&policy_dbs->timer_mutex);
240 	gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy));
241 	mutex_unlock(&policy_dbs->timer_mutex);
242 
243 	/* Allow the utilization update handler to queue up more work. */
244 	atomic_set(&policy_dbs->work_count, 0);
245 	/*
246 	 * If the update below is reordered with respect to the sample delay
247 	 * modification, the utilization update handler may end up using a stale
248 	 * sample delay value.
249 	 */
250 	smp_wmb();
251 	policy_dbs->work_in_progress = false;
252 }
253 
254 static void dbs_irq_work(struct irq_work *irq_work)
255 {
256 	struct policy_dbs_info *policy_dbs;
257 
258 	policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
259 	schedule_work_on(smp_processor_id(), &policy_dbs->work);
260 }
261 
262 static void dbs_update_util_handler(struct update_util_data *data, u64 time,
263 				    unsigned long util, unsigned long max)
264 {
265 	struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
266 	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
267 	u64 delta_ns, lst;
268 
269 	/*
270 	 * The work may not be allowed to be queued up right now.
271 	 * Possible reasons:
272 	 * - Work has already been queued up or is in progress.
273 	 * - It is too early (too little time from the previous sample).
274 	 */
275 	if (policy_dbs->work_in_progress)
276 		return;
277 
278 	/*
279 	 * If the reads below are reordered before the check above, the value
280 	 * of sample_delay_ns used in the computation may be stale.
281 	 */
282 	smp_rmb();
283 	lst = READ_ONCE(policy_dbs->last_sample_time);
284 	delta_ns = time - lst;
285 	if ((s64)delta_ns < policy_dbs->sample_delay_ns)
286 		return;
287 
288 	/*
289 	 * If the policy is not shared, the irq_work may be queued up right away
290 	 * at this point.  Otherwise, we need to ensure that only one of the
291 	 * CPUs sharing the policy will do that.
292 	 */
293 	if (policy_dbs->is_shared) {
294 		if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
295 			return;
296 
297 		/*
298 		 * If another CPU updated last_sample_time in the meantime, we
299 		 * shouldn't be here, so clear the work counter and bail out.
300 		 */
301 		if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
302 			atomic_set(&policy_dbs->work_count, 0);
303 			return;
304 		}
305 	}
306 
307 	policy_dbs->last_sample_time = time;
308 	policy_dbs->work_in_progress = true;
309 	irq_work_queue(&policy_dbs->irq_work);
310 }
311 
312 static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
313 				unsigned int delay_us)
314 {
315 	struct cpufreq_policy *policy = policy_dbs->policy;
316 	int cpu;
317 
318 	gov_update_sample_delay(policy_dbs, delay_us);
319 	policy_dbs->last_sample_time = 0;
320 
321 	for_each_cpu(cpu, policy->cpus) {
322 		struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
323 
324 		cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
325 					     dbs_update_util_handler);
326 	}
327 }
328 
329 static inline void gov_clear_update_util(struct cpufreq_policy *policy)
330 {
331 	int i;
332 
333 	for_each_cpu(i, policy->cpus)
334 		cpufreq_remove_update_util_hook(i);
335 
336 	synchronize_sched();
337 }
338 
339 static void gov_cancel_work(struct cpufreq_policy *policy)
340 {
341 	struct policy_dbs_info *policy_dbs = policy->governor_data;
342 
343 	gov_clear_update_util(policy_dbs->policy);
344 	irq_work_sync(&policy_dbs->irq_work);
345 	cancel_work_sync(&policy_dbs->work);
346 	atomic_set(&policy_dbs->work_count, 0);
347 	policy_dbs->work_in_progress = false;
348 }
349 
350 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
351 						     struct dbs_governor *gov)
352 {
353 	struct policy_dbs_info *policy_dbs;
354 	int j;
355 
356 	/* Allocate memory for per-policy governor data. */
357 	policy_dbs = gov->alloc();
358 	if (!policy_dbs)
359 		return NULL;
360 
361 	policy_dbs->policy = policy;
362 	mutex_init(&policy_dbs->timer_mutex);
363 	atomic_set(&policy_dbs->work_count, 0);
364 	init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
365 	INIT_WORK(&policy_dbs->work, dbs_work_handler);
366 
367 	/* Set policy_dbs for all CPUs, online+offline */
368 	for_each_cpu(j, policy->related_cpus) {
369 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
370 
371 		j_cdbs->policy_dbs = policy_dbs;
372 	}
373 	return policy_dbs;
374 }
375 
376 static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
377 				 struct dbs_governor *gov)
378 {
379 	int j;
380 
381 	mutex_destroy(&policy_dbs->timer_mutex);
382 
383 	for_each_cpu(j, policy_dbs->policy->related_cpus) {
384 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
385 
386 		j_cdbs->policy_dbs = NULL;
387 		j_cdbs->update_util.func = NULL;
388 	}
389 	gov->free(policy_dbs);
390 }
391 
392 static int cpufreq_governor_init(struct cpufreq_policy *policy)
393 {
394 	struct dbs_governor *gov = dbs_governor_of(policy);
395 	struct dbs_data *dbs_data;
396 	struct policy_dbs_info *policy_dbs;
397 	unsigned int latency;
398 	int ret = 0;
399 
400 	/* State should be equivalent to EXIT */
401 	if (policy->governor_data)
402 		return -EBUSY;
403 
404 	policy_dbs = alloc_policy_dbs_info(policy, gov);
405 	if (!policy_dbs)
406 		return -ENOMEM;
407 
408 	/* Protect gov->gdbs_data against concurrent updates. */
409 	mutex_lock(&gov_dbs_data_mutex);
410 
411 	dbs_data = gov->gdbs_data;
412 	if (dbs_data) {
413 		if (WARN_ON(have_governor_per_policy())) {
414 			ret = -EINVAL;
415 			goto free_policy_dbs_info;
416 		}
417 		policy_dbs->dbs_data = dbs_data;
418 		policy->governor_data = policy_dbs;
419 
420 		gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
421 		goto out;
422 	}
423 
424 	dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
425 	if (!dbs_data) {
426 		ret = -ENOMEM;
427 		goto free_policy_dbs_info;
428 	}
429 
430 	gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
431 
432 	ret = gov->init(dbs_data, !policy->governor->initialized);
433 	if (ret)
434 		goto free_policy_dbs_info;
435 
436 	/* policy latency is in ns. Convert it to us first */
437 	latency = policy->cpuinfo.transition_latency / 1000;
438 	if (latency == 0)
439 		latency = 1;
440 
441 	/* Bring kernel and HW constraints together */
442 	dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
443 					  MIN_LATENCY_MULTIPLIER * latency);
444 	dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
445 				      LATENCY_MULTIPLIER * latency);
446 
447 	if (!have_governor_per_policy())
448 		gov->gdbs_data = dbs_data;
449 
450 	policy_dbs->dbs_data = dbs_data;
451 	policy->governor_data = policy_dbs;
452 
453 	gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
454 	ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
455 				   get_governor_parent_kobj(policy),
456 				   "%s", gov->gov.name);
457 	if (!ret)
458 		goto out;
459 
460 	/* Failure, so roll back. */
461 	pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
462 
463 	policy->governor_data = NULL;
464 
465 	if (!have_governor_per_policy())
466 		gov->gdbs_data = NULL;
467 	gov->exit(dbs_data, !policy->governor->initialized);
468 	kfree(dbs_data);
469 
470 free_policy_dbs_info:
471 	free_policy_dbs_info(policy_dbs, gov);
472 
473 out:
474 	mutex_unlock(&gov_dbs_data_mutex);
475 	return ret;
476 }
477 
478 static int cpufreq_governor_exit(struct cpufreq_policy *policy)
479 {
480 	struct dbs_governor *gov = dbs_governor_of(policy);
481 	struct policy_dbs_info *policy_dbs = policy->governor_data;
482 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
483 	unsigned int count;
484 
485 	/* Protect gov->gdbs_data against concurrent updates. */
486 	mutex_lock(&gov_dbs_data_mutex);
487 
488 	count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
489 
490 	policy->governor_data = NULL;
491 
492 	if (!count) {
493 		if (!have_governor_per_policy())
494 			gov->gdbs_data = NULL;
495 
496 		gov->exit(dbs_data, policy->governor->initialized == 1);
497 		kfree(dbs_data);
498 	}
499 
500 	free_policy_dbs_info(policy_dbs, gov);
501 
502 	mutex_unlock(&gov_dbs_data_mutex);
503 	return 0;
504 }
505 
506 static int cpufreq_governor_start(struct cpufreq_policy *policy)
507 {
508 	struct dbs_governor *gov = dbs_governor_of(policy);
509 	struct policy_dbs_info *policy_dbs = policy->governor_data;
510 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
511 	unsigned int sampling_rate, ignore_nice, j;
512 	unsigned int io_busy;
513 
514 	if (!policy->cur)
515 		return -EINVAL;
516 
517 	policy_dbs->is_shared = policy_is_shared(policy);
518 	policy_dbs->rate_mult = 1;
519 
520 	sampling_rate = dbs_data->sampling_rate;
521 	ignore_nice = dbs_data->ignore_nice_load;
522 	io_busy = dbs_data->io_is_busy;
523 
524 	for_each_cpu(j, policy->cpus) {
525 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
526 
527 		j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
528 		/*
529 		 * Make the first invocation of dbs_update() compute the load.
530 		 */
531 		j_cdbs->prev_load = 0;
532 
533 		if (ignore_nice)
534 			j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
535 	}
536 
537 	gov->start(policy);
538 
539 	gov_set_update_util(policy_dbs, sampling_rate);
540 	return 0;
541 }
542 
543 static int cpufreq_governor_stop(struct cpufreq_policy *policy)
544 {
545 	gov_cancel_work(policy);
546 	return 0;
547 }
548 
549 static int cpufreq_governor_limits(struct cpufreq_policy *policy)
550 {
551 	struct policy_dbs_info *policy_dbs = policy->governor_data;
552 
553 	mutex_lock(&policy_dbs->timer_mutex);
554 
555 	if (policy->max < policy->cur)
556 		__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
557 	else if (policy->min > policy->cur)
558 		__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
559 
560 	gov_update_sample_delay(policy_dbs, 0);
561 
562 	mutex_unlock(&policy_dbs->timer_mutex);
563 
564 	return 0;
565 }
566 
567 int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
568 {
569 	if (event == CPUFREQ_GOV_POLICY_INIT) {
570 		return cpufreq_governor_init(policy);
571 	} else if (policy->governor_data) {
572 		switch (event) {
573 		case CPUFREQ_GOV_POLICY_EXIT:
574 			return cpufreq_governor_exit(policy);
575 		case CPUFREQ_GOV_START:
576 			return cpufreq_governor_start(policy);
577 		case CPUFREQ_GOV_STOP:
578 			return cpufreq_governor_stop(policy);
579 		case CPUFREQ_GOV_LIMITS:
580 			return cpufreq_governor_limits(policy);
581 		}
582 	}
583 	return -EINVAL;
584 }
585 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
586