1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Energy Model of devices 4 * 5 * Copyright (c) 2018-2021, Arm ltd. 6 * Written by: Quentin Perret, Arm ltd. 7 * Improvements provided by: Lukasz Luba, Arm ltd. 8 */ 9 10 #define pr_fmt(fmt) "energy_model: " fmt 11 12 #include <linux/cpu.h> 13 #include <linux/cpufreq.h> 14 #include <linux/cpumask.h> 15 #include <linux/debugfs.h> 16 #include <linux/energy_model.h> 17 #include <linux/sched/topology.h> 18 #include <linux/slab.h> 19 20 /* 21 * Mutex serializing the registrations of performance domains and letting 22 * callbacks defined by drivers sleep. 23 */ 24 static DEFINE_MUTEX(em_pd_mutex); 25 26 static bool _is_cpu_device(struct device *dev) 27 { 28 return (dev->bus == &cpu_subsys); 29 } 30 31 #ifdef CONFIG_DEBUG_FS 32 static struct dentry *rootdir; 33 34 static void em_debug_create_ps(struct em_perf_state *ps, struct dentry *pd) 35 { 36 struct dentry *d; 37 char name[24]; 38 39 snprintf(name, sizeof(name), "ps:%lu", ps->frequency); 40 41 /* Create per-ps directory */ 42 d = debugfs_create_dir(name, pd); 43 debugfs_create_ulong("frequency", 0444, d, &ps->frequency); 44 debugfs_create_ulong("power", 0444, d, &ps->power); 45 debugfs_create_ulong("cost", 0444, d, &ps->cost); 46 debugfs_create_ulong("inefficient", 0444, d, &ps->flags); 47 } 48 49 static int em_debug_cpus_show(struct seq_file *s, void *unused) 50 { 51 seq_printf(s, "%*pbl\n", cpumask_pr_args(to_cpumask(s->private))); 52 53 return 0; 54 } 55 DEFINE_SHOW_ATTRIBUTE(em_debug_cpus); 56 57 static int em_debug_flags_show(struct seq_file *s, void *unused) 58 { 59 struct em_perf_domain *pd = s->private; 60 61 seq_printf(s, "%#lx\n", pd->flags); 62 63 return 0; 64 } 65 DEFINE_SHOW_ATTRIBUTE(em_debug_flags); 66 67 static void em_debug_create_pd(struct device *dev) 68 { 69 struct dentry *d; 70 int i; 71 72 /* Create the directory of the performance domain */ 73 d = debugfs_create_dir(dev_name(dev), rootdir); 74 75 if (_is_cpu_device(dev)) 76 debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus, 77 &em_debug_cpus_fops); 78 79 debugfs_create_file("flags", 0444, d, dev->em_pd, 80 &em_debug_flags_fops); 81 82 /* Create a sub-directory for each performance state */ 83 for (i = 0; i < dev->em_pd->nr_perf_states; i++) 84 em_debug_create_ps(&dev->em_pd->table[i], d); 85 86 } 87 88 static void em_debug_remove_pd(struct device *dev) 89 { 90 struct dentry *debug_dir; 91 92 debug_dir = debugfs_lookup(dev_name(dev), rootdir); 93 debugfs_remove_recursive(debug_dir); 94 } 95 96 static int __init em_debug_init(void) 97 { 98 /* Create /sys/kernel/debug/energy_model directory */ 99 rootdir = debugfs_create_dir("energy_model", NULL); 100 101 return 0; 102 } 103 fs_initcall(em_debug_init); 104 #else /* CONFIG_DEBUG_FS */ 105 static void em_debug_create_pd(struct device *dev) {} 106 static void em_debug_remove_pd(struct device *dev) {} 107 #endif 108 109 static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd, 110 int nr_states, struct em_data_callback *cb, 111 unsigned long flags) 112 { 113 unsigned long power, freq, prev_freq = 0, prev_cost = ULONG_MAX; 114 struct em_perf_state *table; 115 int i, ret; 116 u64 fmax; 117 118 table = kcalloc(nr_states, sizeof(*table), GFP_KERNEL); 119 if (!table) 120 return -ENOMEM; 121 122 /* Build the list of performance states for this performance domain */ 123 for (i = 0, freq = 0; i < nr_states; i++, freq++) { 124 /* 125 * active_power() is a driver callback which ceils 'freq' to 126 * lowest performance state of 'dev' above 'freq' and updates 127 * 'power' and 'freq' accordingly. 128 */ 129 ret = cb->active_power(dev, &power, &freq); 130 if (ret) { 131 dev_err(dev, "EM: invalid perf. state: %d\n", 132 ret); 133 goto free_ps_table; 134 } 135 136 /* 137 * We expect the driver callback to increase the frequency for 138 * higher performance states. 139 */ 140 if (freq <= prev_freq) { 141 dev_err(dev, "EM: non-increasing freq: %lu\n", 142 freq); 143 goto free_ps_table; 144 } 145 146 /* 147 * The power returned by active_state() is expected to be 148 * positive and be in range. 149 */ 150 if (!power || power > EM_MAX_POWER) { 151 dev_err(dev, "EM: invalid power: %lu\n", 152 power); 153 goto free_ps_table; 154 } 155 156 table[i].power = power; 157 table[i].frequency = prev_freq = freq; 158 } 159 160 /* Compute the cost of each performance state. */ 161 fmax = (u64) table[nr_states - 1].frequency; 162 for (i = nr_states - 1; i >= 0; i--) { 163 unsigned long power_res, cost; 164 165 if (flags & EM_PERF_DOMAIN_ARTIFICIAL) { 166 ret = cb->get_cost(dev, table[i].frequency, &cost); 167 if (ret || !cost || cost > EM_MAX_POWER) { 168 dev_err(dev, "EM: invalid cost %lu %d\n", 169 cost, ret); 170 goto free_ps_table; 171 } 172 } else { 173 power_res = table[i].power; 174 cost = div64_u64(fmax * power_res, table[i].frequency); 175 } 176 177 table[i].cost = cost; 178 179 if (table[i].cost >= prev_cost) { 180 table[i].flags = EM_PERF_STATE_INEFFICIENT; 181 dev_dbg(dev, "EM: OPP:%lu is inefficient\n", 182 table[i].frequency); 183 } else { 184 prev_cost = table[i].cost; 185 } 186 } 187 188 pd->table = table; 189 pd->nr_perf_states = nr_states; 190 191 return 0; 192 193 free_ps_table: 194 kfree(table); 195 return -EINVAL; 196 } 197 198 static int em_create_pd(struct device *dev, int nr_states, 199 struct em_data_callback *cb, cpumask_t *cpus, 200 unsigned long flags) 201 { 202 struct em_perf_domain *pd; 203 struct device *cpu_dev; 204 int cpu, ret, num_cpus; 205 206 if (_is_cpu_device(dev)) { 207 num_cpus = cpumask_weight(cpus); 208 209 /* Prevent max possible energy calculation to not overflow */ 210 if (num_cpus > EM_MAX_NUM_CPUS) { 211 dev_err(dev, "EM: too many CPUs, overflow possible\n"); 212 return -EINVAL; 213 } 214 215 pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL); 216 if (!pd) 217 return -ENOMEM; 218 219 cpumask_copy(em_span_cpus(pd), cpus); 220 } else { 221 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 222 if (!pd) 223 return -ENOMEM; 224 } 225 226 ret = em_create_perf_table(dev, pd, nr_states, cb, flags); 227 if (ret) { 228 kfree(pd); 229 return ret; 230 } 231 232 if (_is_cpu_device(dev)) 233 for_each_cpu(cpu, cpus) { 234 cpu_dev = get_cpu_device(cpu); 235 cpu_dev->em_pd = pd; 236 } 237 238 dev->em_pd = pd; 239 240 return 0; 241 } 242 243 static void em_cpufreq_update_efficiencies(struct device *dev) 244 { 245 struct em_perf_domain *pd = dev->em_pd; 246 struct em_perf_state *table; 247 struct cpufreq_policy *policy; 248 int found = 0; 249 int i; 250 251 if (!_is_cpu_device(dev) || !pd) 252 return; 253 254 policy = cpufreq_cpu_get(cpumask_first(em_span_cpus(pd))); 255 if (!policy) { 256 dev_warn(dev, "EM: Access to CPUFreq policy failed"); 257 return; 258 } 259 260 table = pd->table; 261 262 for (i = 0; i < pd->nr_perf_states; i++) { 263 if (!(table[i].flags & EM_PERF_STATE_INEFFICIENT)) 264 continue; 265 266 if (!cpufreq_table_set_inefficient(policy, table[i].frequency)) 267 found++; 268 } 269 270 cpufreq_cpu_put(policy); 271 272 if (!found) 273 return; 274 275 /* 276 * Efficiencies have been installed in CPUFreq, inefficient frequencies 277 * will be skipped. The EM can do the same. 278 */ 279 pd->flags |= EM_PERF_DOMAIN_SKIP_INEFFICIENCIES; 280 } 281 282 /** 283 * em_pd_get() - Return the performance domain for a device 284 * @dev : Device to find the performance domain for 285 * 286 * Returns the performance domain to which @dev belongs, or NULL if it doesn't 287 * exist. 288 */ 289 struct em_perf_domain *em_pd_get(struct device *dev) 290 { 291 if (IS_ERR_OR_NULL(dev)) 292 return NULL; 293 294 return dev->em_pd; 295 } 296 EXPORT_SYMBOL_GPL(em_pd_get); 297 298 /** 299 * em_cpu_get() - Return the performance domain for a CPU 300 * @cpu : CPU to find the performance domain for 301 * 302 * Returns the performance domain to which @cpu belongs, or NULL if it doesn't 303 * exist. 304 */ 305 struct em_perf_domain *em_cpu_get(int cpu) 306 { 307 struct device *cpu_dev; 308 309 cpu_dev = get_cpu_device(cpu); 310 if (!cpu_dev) 311 return NULL; 312 313 return em_pd_get(cpu_dev); 314 } 315 EXPORT_SYMBOL_GPL(em_cpu_get); 316 317 /** 318 * em_dev_register_perf_domain() - Register the Energy Model (EM) for a device 319 * @dev : Device for which the EM is to register 320 * @nr_states : Number of performance states to register 321 * @cb : Callback functions providing the data of the Energy Model 322 * @cpus : Pointer to cpumask_t, which in case of a CPU device is 323 * obligatory. It can be taken from i.e. 'policy->cpus'. For other 324 * type of devices this should be set to NULL. 325 * @microwatts : Flag indicating that the power values are in micro-Watts or 326 * in some other scale. It must be set properly. 327 * 328 * Create Energy Model tables for a performance domain using the callbacks 329 * defined in cb. 330 * 331 * The @microwatts is important to set with correct value. Some kernel 332 * sub-systems might rely on this flag and check if all devices in the EM are 333 * using the same scale. 334 * 335 * If multiple clients register the same performance domain, all but the first 336 * registration will be ignored. 337 * 338 * Return 0 on success 339 */ 340 int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, 341 struct em_data_callback *cb, cpumask_t *cpus, 342 bool microwatts) 343 { 344 unsigned long cap, prev_cap = 0; 345 unsigned long flags = 0; 346 int cpu, ret; 347 348 if (!dev || !nr_states || !cb) 349 return -EINVAL; 350 351 /* 352 * Use a mutex to serialize the registration of performance domains and 353 * let the driver-defined callback functions sleep. 354 */ 355 mutex_lock(&em_pd_mutex); 356 357 if (dev->em_pd) { 358 ret = -EEXIST; 359 goto unlock; 360 } 361 362 if (_is_cpu_device(dev)) { 363 if (!cpus) { 364 dev_err(dev, "EM: invalid CPU mask\n"); 365 ret = -EINVAL; 366 goto unlock; 367 } 368 369 for_each_cpu(cpu, cpus) { 370 if (em_cpu_get(cpu)) { 371 dev_err(dev, "EM: exists for CPU%d\n", cpu); 372 ret = -EEXIST; 373 goto unlock; 374 } 375 /* 376 * All CPUs of a domain must have the same 377 * micro-architecture since they all share the same 378 * table. 379 */ 380 cap = arch_scale_cpu_capacity(cpu); 381 if (prev_cap && prev_cap != cap) { 382 dev_err(dev, "EM: CPUs of %*pbl must have the same capacity\n", 383 cpumask_pr_args(cpus)); 384 385 ret = -EINVAL; 386 goto unlock; 387 } 388 prev_cap = cap; 389 } 390 } 391 392 if (microwatts) 393 flags |= EM_PERF_DOMAIN_MICROWATTS; 394 else if (cb->get_cost) 395 flags |= EM_PERF_DOMAIN_ARTIFICIAL; 396 397 ret = em_create_pd(dev, nr_states, cb, cpus, flags); 398 if (ret) 399 goto unlock; 400 401 dev->em_pd->flags |= flags; 402 403 em_cpufreq_update_efficiencies(dev); 404 405 em_debug_create_pd(dev); 406 dev_info(dev, "EM: created perf domain\n"); 407 408 unlock: 409 mutex_unlock(&em_pd_mutex); 410 return ret; 411 } 412 EXPORT_SYMBOL_GPL(em_dev_register_perf_domain); 413 414 /** 415 * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device 416 * @dev : Device for which the EM is registered 417 * 418 * Unregister the EM for the specified @dev (but not a CPU device). 419 */ 420 void em_dev_unregister_perf_domain(struct device *dev) 421 { 422 if (IS_ERR_OR_NULL(dev) || !dev->em_pd) 423 return; 424 425 if (_is_cpu_device(dev)) 426 return; 427 428 /* 429 * The mutex separates all register/unregister requests and protects 430 * from potential clean-up/setup issues in the debugfs directories. 431 * The debugfs directory name is the same as device's name. 432 */ 433 mutex_lock(&em_pd_mutex); 434 em_debug_remove_pd(dev); 435 436 kfree(dev->em_pd->table); 437 kfree(dev->em_pd); 438 dev->em_pd = NULL; 439 mutex_unlock(&em_pd_mutex); 440 } 441 EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain); 442