1 /* 2 * drivers/cpufreq/cpufreq_governor.c 3 * 4 * CPUFREQ governors common code 5 * 6 * Copyright (C) 2001 Russell King 7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> 9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk> 10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/export.h> 20 #include <linux/kernel_stat.h> 21 #include <linux/slab.h> 22 23 #include "cpufreq_governor.h" 24 25 static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data) 26 { 27 if (have_governor_per_policy()) 28 return dbs_data->cdata->attr_group_gov_pol; 29 else 30 return dbs_data->cdata->attr_group_gov_sys; 31 } 32 33 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) 34 { 35 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); 36 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 37 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 38 struct cpufreq_policy *policy; 39 unsigned int sampling_rate; 40 unsigned int max_load = 0; 41 unsigned int ignore_nice; 42 unsigned int j; 43 44 if (dbs_data->cdata->governor == GOV_ONDEMAND) { 45 struct od_cpu_dbs_info_s *od_dbs_info = 46 dbs_data->cdata->get_cpu_dbs_info_s(cpu); 47 48 /* 49 * Sometimes, the ondemand governor uses an additional 50 * multiplier to give long delays. So apply this multiplier to 51 * the 'sampling_rate', so as to keep the wake-up-from-idle 52 * detection logic a bit conservative. 53 */ 54 sampling_rate = od_tuners->sampling_rate; 55 sampling_rate *= od_dbs_info->rate_mult; 56 57 ignore_nice = od_tuners->ignore_nice_load; 58 } else { 59 sampling_rate = cs_tuners->sampling_rate; 60 ignore_nice = cs_tuners->ignore_nice_load; 61 } 62 63 policy = cdbs->cur_policy; 64 65 /* Get Absolute Load */ 66 for_each_cpu(j, policy->cpus) { 67 struct cpu_dbs_common_info *j_cdbs; 68 u64 cur_wall_time, cur_idle_time; 69 unsigned int idle_time, wall_time; 70 unsigned int load; 71 int io_busy = 0; 72 73 j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); 74 75 /* 76 * For the purpose of ondemand, waiting for disk IO is 77 * an indication that you're performance critical, and 78 * not that the system is actually idle. So do not add 79 * the iowait time to the cpu idle time. 80 */ 81 if (dbs_data->cdata->governor == GOV_ONDEMAND) 82 io_busy = od_tuners->io_is_busy; 83 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); 84 85 wall_time = (unsigned int) 86 (cur_wall_time - j_cdbs->prev_cpu_wall); 87 j_cdbs->prev_cpu_wall = cur_wall_time; 88 89 idle_time = (unsigned int) 90 (cur_idle_time - j_cdbs->prev_cpu_idle); 91 j_cdbs->prev_cpu_idle = cur_idle_time; 92 93 if (ignore_nice) { 94 u64 cur_nice; 95 unsigned long cur_nice_jiffies; 96 97 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - 98 cdbs->prev_cpu_nice; 99 /* 100 * Assumption: nice time between sampling periods will 101 * be less than 2^32 jiffies for 32 bit sys 102 */ 103 cur_nice_jiffies = (unsigned long) 104 cputime64_to_jiffies64(cur_nice); 105 106 cdbs->prev_cpu_nice = 107 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 108 idle_time += jiffies_to_usecs(cur_nice_jiffies); 109 } 110 111 if (unlikely(!wall_time || wall_time < idle_time)) 112 continue; 113 114 /* 115 * If the CPU had gone completely idle, and a task just woke up 116 * on this CPU now, it would be unfair to calculate 'load' the 117 * usual way for this elapsed time-window, because it will show 118 * near-zero load, irrespective of how CPU intensive that task 119 * actually is. This is undesirable for latency-sensitive bursty 120 * workloads. 121 * 122 * To avoid this, we reuse the 'load' from the previous 123 * time-window and give this task a chance to start with a 124 * reasonably high CPU frequency. (However, we shouldn't over-do 125 * this copy, lest we get stuck at a high load (high frequency) 126 * for too long, even when the current system load has actually 127 * dropped down. So we perform the copy only once, upon the 128 * first wake-up from idle.) 129 * 130 * Detecting this situation is easy: the governor's deferrable 131 * timer would not have fired during CPU-idle periods. Hence 132 * an unusually large 'wall_time' (as compared to the sampling 133 * rate) indicates this scenario. 134 * 135 * prev_load can be zero in two cases and we must recalculate it 136 * for both cases: 137 * - during long idle intervals 138 * - explicitly set to zero 139 */ 140 if (unlikely(wall_time > (2 * sampling_rate) && 141 j_cdbs->prev_load)) { 142 load = j_cdbs->prev_load; 143 144 /* 145 * Perform a destructive copy, to ensure that we copy 146 * the previous load only once, upon the first wake-up 147 * from idle. 148 */ 149 j_cdbs->prev_load = 0; 150 } else { 151 load = 100 * (wall_time - idle_time) / wall_time; 152 j_cdbs->prev_load = load; 153 } 154 155 if (load > max_load) 156 max_load = load; 157 } 158 159 dbs_data->cdata->gov_check_cpu(cpu, max_load); 160 } 161 EXPORT_SYMBOL_GPL(dbs_check_cpu); 162 163 static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data, 164 unsigned int delay) 165 { 166 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); 167 168 mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay); 169 } 170 171 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, 172 unsigned int delay, bool all_cpus) 173 { 174 int i; 175 176 mutex_lock(&cpufreq_governor_lock); 177 if (!policy->governor_enabled) 178 goto out_unlock; 179 180 if (!all_cpus) { 181 /* 182 * Use raw_smp_processor_id() to avoid preemptible warnings. 183 * We know that this is only called with all_cpus == false from 184 * works that have been queued with *_work_on() functions and 185 * those works are canceled during CPU_DOWN_PREPARE so they 186 * can't possibly run on any other CPU. 187 */ 188 __gov_queue_work(raw_smp_processor_id(), dbs_data, delay); 189 } else { 190 for_each_cpu(i, policy->cpus) 191 __gov_queue_work(i, dbs_data, delay); 192 } 193 194 out_unlock: 195 mutex_unlock(&cpufreq_governor_lock); 196 } 197 EXPORT_SYMBOL_GPL(gov_queue_work); 198 199 static inline void gov_cancel_work(struct dbs_data *dbs_data, 200 struct cpufreq_policy *policy) 201 { 202 struct cpu_dbs_common_info *cdbs; 203 int i; 204 205 for_each_cpu(i, policy->cpus) { 206 cdbs = dbs_data->cdata->get_cpu_cdbs(i); 207 cancel_delayed_work_sync(&cdbs->work); 208 } 209 } 210 211 /* Will return if we need to evaluate cpu load again or not */ 212 bool need_load_eval(struct cpu_dbs_common_info *cdbs, 213 unsigned int sampling_rate) 214 { 215 if (policy_is_shared(cdbs->cur_policy)) { 216 ktime_t time_now = ktime_get(); 217 s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp); 218 219 /* Do nothing if we recently have sampled */ 220 if (delta_us < (s64)(sampling_rate / 2)) 221 return false; 222 else 223 cdbs->time_stamp = time_now; 224 } 225 226 return true; 227 } 228 EXPORT_SYMBOL_GPL(need_load_eval); 229 230 static void set_sampling_rate(struct dbs_data *dbs_data, 231 unsigned int sampling_rate) 232 { 233 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 234 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 235 cs_tuners->sampling_rate = sampling_rate; 236 } else { 237 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 238 od_tuners->sampling_rate = sampling_rate; 239 } 240 } 241 242 static int cpufreq_governor_init(struct cpufreq_policy *policy, 243 struct dbs_data *dbs_data, 244 struct common_dbs_data *cdata) 245 { 246 unsigned int latency; 247 int ret; 248 249 if (dbs_data) { 250 if (WARN_ON(have_governor_per_policy())) 251 return -EINVAL; 252 dbs_data->usage_count++; 253 policy->governor_data = dbs_data; 254 return 0; 255 } 256 257 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); 258 if (!dbs_data) 259 return -ENOMEM; 260 261 dbs_data->cdata = cdata; 262 dbs_data->usage_count = 1; 263 264 ret = cdata->init(dbs_data, !policy->governor->initialized); 265 if (ret) 266 goto free_dbs_data; 267 268 /* policy latency is in ns. Convert it to us first */ 269 latency = policy->cpuinfo.transition_latency / 1000; 270 if (latency == 0) 271 latency = 1; 272 273 /* Bring kernel and HW constraints together */ 274 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, 275 MIN_LATENCY_MULTIPLIER * latency); 276 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, 277 latency * LATENCY_MULTIPLIER)); 278 279 if (!have_governor_per_policy()) { 280 if (WARN_ON(cpufreq_get_global_kobject())) { 281 ret = -EINVAL; 282 goto cdata_exit; 283 } 284 cdata->gdbs_data = dbs_data; 285 } 286 287 ret = sysfs_create_group(get_governor_parent_kobj(policy), 288 get_sysfs_attr(dbs_data)); 289 if (ret) 290 goto put_kobj; 291 292 policy->governor_data = dbs_data; 293 294 return 0; 295 296 put_kobj: 297 if (!have_governor_per_policy()) { 298 cdata->gdbs_data = NULL; 299 cpufreq_put_global_kobject(); 300 } 301 cdata_exit: 302 cdata->exit(dbs_data, !policy->governor->initialized); 303 free_dbs_data: 304 kfree(dbs_data); 305 return ret; 306 } 307 308 static void cpufreq_governor_exit(struct cpufreq_policy *policy, 309 struct dbs_data *dbs_data) 310 { 311 struct common_dbs_data *cdata = dbs_data->cdata; 312 313 policy->governor_data = NULL; 314 if (!--dbs_data->usage_count) { 315 sysfs_remove_group(get_governor_parent_kobj(policy), 316 get_sysfs_attr(dbs_data)); 317 318 if (!have_governor_per_policy()) { 319 cdata->gdbs_data = NULL; 320 cpufreq_put_global_kobject(); 321 } 322 323 cdata->exit(dbs_data, policy->governor->initialized == 1); 324 kfree(dbs_data); 325 } 326 } 327 328 static int cpufreq_governor_start(struct cpufreq_policy *policy, 329 struct dbs_data *dbs_data) 330 { 331 struct common_dbs_data *cdata = dbs_data->cdata; 332 unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu; 333 struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); 334 int io_busy = 0; 335 336 if (!policy->cur) 337 return -EINVAL; 338 339 if (cdata->governor == GOV_CONSERVATIVE) { 340 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 341 342 sampling_rate = cs_tuners->sampling_rate; 343 ignore_nice = cs_tuners->ignore_nice_load; 344 } else { 345 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 346 347 sampling_rate = od_tuners->sampling_rate; 348 ignore_nice = od_tuners->ignore_nice_load; 349 io_busy = od_tuners->io_is_busy; 350 } 351 352 for_each_cpu(j, policy->cpus) { 353 struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j); 354 unsigned int prev_load; 355 356 j_cdbs->cpu = j; 357 j_cdbs->cur_policy = policy; 358 j_cdbs->prev_cpu_idle = 359 get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); 360 361 prev_load = (unsigned int)(j_cdbs->prev_cpu_wall - 362 j_cdbs->prev_cpu_idle); 363 j_cdbs->prev_load = 100 * prev_load / 364 (unsigned int)j_cdbs->prev_cpu_wall; 365 366 if (ignore_nice) 367 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 368 369 mutex_init(&j_cdbs->timer_mutex); 370 INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer); 371 } 372 373 if (cdata->governor == GOV_CONSERVATIVE) { 374 struct cs_cpu_dbs_info_s *cs_dbs_info = 375 cdata->get_cpu_dbs_info_s(cpu); 376 377 cs_dbs_info->down_skip = 0; 378 cs_dbs_info->enable = 1; 379 cs_dbs_info->requested_freq = policy->cur; 380 } else { 381 struct od_ops *od_ops = cdata->gov_ops; 382 struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu); 383 384 od_dbs_info->rate_mult = 1; 385 od_dbs_info->sample_type = OD_NORMAL_SAMPLE; 386 od_ops->powersave_bias_init_cpu(cpu); 387 } 388 389 /* Initiate timer time stamp */ 390 cpu_cdbs->time_stamp = ktime_get(); 391 392 gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate), 393 true); 394 return 0; 395 } 396 397 static void cpufreq_governor_stop(struct cpufreq_policy *policy, 398 struct dbs_data *dbs_data) 399 { 400 struct common_dbs_data *cdata = dbs_data->cdata; 401 unsigned int cpu = policy->cpu; 402 struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); 403 404 if (cdata->governor == GOV_CONSERVATIVE) { 405 struct cs_cpu_dbs_info_s *cs_dbs_info = 406 cdata->get_cpu_dbs_info_s(cpu); 407 408 cs_dbs_info->enable = 0; 409 } 410 411 gov_cancel_work(dbs_data, policy); 412 413 mutex_destroy(&cpu_cdbs->timer_mutex); 414 cpu_cdbs->cur_policy = NULL; 415 } 416 417 static void cpufreq_governor_limits(struct cpufreq_policy *policy, 418 struct dbs_data *dbs_data) 419 { 420 struct common_dbs_data *cdata = dbs_data->cdata; 421 unsigned int cpu = policy->cpu; 422 struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); 423 424 if (!cpu_cdbs->cur_policy) 425 return; 426 427 mutex_lock(&cpu_cdbs->timer_mutex); 428 if (policy->max < cpu_cdbs->cur_policy->cur) 429 __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max, 430 CPUFREQ_RELATION_H); 431 else if (policy->min > cpu_cdbs->cur_policy->cur) 432 __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min, 433 CPUFREQ_RELATION_L); 434 dbs_check_cpu(dbs_data, cpu); 435 mutex_unlock(&cpu_cdbs->timer_mutex); 436 } 437 438 int cpufreq_governor_dbs(struct cpufreq_policy *policy, 439 struct common_dbs_data *cdata, unsigned int event) 440 { 441 struct dbs_data *dbs_data; 442 int ret = 0; 443 444 /* Lock governor to block concurrent initialization of governor */ 445 mutex_lock(&cdata->mutex); 446 447 if (have_governor_per_policy()) 448 dbs_data = policy->governor_data; 449 else 450 dbs_data = cdata->gdbs_data; 451 452 if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) { 453 ret = -EINVAL; 454 goto unlock; 455 } 456 457 switch (event) { 458 case CPUFREQ_GOV_POLICY_INIT: 459 ret = cpufreq_governor_init(policy, dbs_data, cdata); 460 break; 461 case CPUFREQ_GOV_POLICY_EXIT: 462 cpufreq_governor_exit(policy, dbs_data); 463 break; 464 case CPUFREQ_GOV_START: 465 ret = cpufreq_governor_start(policy, dbs_data); 466 break; 467 case CPUFREQ_GOV_STOP: 468 cpufreq_governor_stop(policy, dbs_data); 469 break; 470 case CPUFREQ_GOV_LIMITS: 471 cpufreq_governor_limits(policy, dbs_data); 472 break; 473 } 474 475 unlock: 476 mutex_unlock(&cdata->mutex); 477 478 return ret; 479 } 480 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); 481