1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Energy Model of devices 4 * 5 * Copyright (c) 2018-2021, Arm ltd. 6 * Written by: Quentin Perret, Arm ltd. 7 * Improvements provided by: Lukasz Luba, Arm ltd. 8 */ 9 10 #define pr_fmt(fmt) "energy_model: " fmt 11 12 #include <linux/cpu.h> 13 #include <linux/cpufreq.h> 14 #include <linux/cpumask.h> 15 #include <linux/debugfs.h> 16 #include <linux/energy_model.h> 17 #include <linux/sched/topology.h> 18 #include <linux/slab.h> 19 20 /* 21 * Mutex serializing the registrations of performance domains and letting 22 * callbacks defined by drivers sleep. 23 */ 24 static DEFINE_MUTEX(em_pd_mutex); 25 26 static bool _is_cpu_device(struct device *dev) 27 { 28 return (dev->bus == &cpu_subsys); 29 } 30 31 #ifdef CONFIG_DEBUG_FS 32 static struct dentry *rootdir; 33 34 static void em_debug_create_ps(struct em_perf_state *ps, struct dentry *pd) 35 { 36 struct dentry *d; 37 char name[24]; 38 39 snprintf(name, sizeof(name), "ps:%lu", ps->frequency); 40 41 /* Create per-ps directory */ 42 d = debugfs_create_dir(name, pd); 43 debugfs_create_ulong("frequency", 0444, d, &ps->frequency); 44 debugfs_create_ulong("power", 0444, d, &ps->power); 45 debugfs_create_ulong("cost", 0444, d, &ps->cost); 46 debugfs_create_ulong("inefficient", 0444, d, &ps->flags); 47 } 48 49 static int em_debug_cpus_show(struct seq_file *s, void *unused) 50 { 51 seq_printf(s, "%*pbl\n", cpumask_pr_args(to_cpumask(s->private))); 52 53 return 0; 54 } 55 DEFINE_SHOW_ATTRIBUTE(em_debug_cpus); 56 57 static int em_debug_units_show(struct seq_file *s, void *unused) 58 { 59 struct em_perf_domain *pd = s->private; 60 char *units = (pd->flags & EM_PERF_DOMAIN_MILLIWATTS) ? 61 "milliWatts" : "bogoWatts"; 62 63 seq_printf(s, "%s\n", units); 64 65 return 0; 66 } 67 DEFINE_SHOW_ATTRIBUTE(em_debug_units); 68 69 static int em_debug_skip_inefficiencies_show(struct seq_file *s, void *unused) 70 { 71 struct em_perf_domain *pd = s->private; 72 int enabled = (pd->flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES) ? 1 : 0; 73 74 seq_printf(s, "%d\n", enabled); 75 76 return 0; 77 } 78 DEFINE_SHOW_ATTRIBUTE(em_debug_skip_inefficiencies); 79 80 static void em_debug_create_pd(struct device *dev) 81 { 82 struct dentry *d; 83 int i; 84 85 /* Create the directory of the performance domain */ 86 d = debugfs_create_dir(dev_name(dev), rootdir); 87 88 if (_is_cpu_device(dev)) 89 debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus, 90 &em_debug_cpus_fops); 91 92 debugfs_create_file("units", 0444, d, dev->em_pd, &em_debug_units_fops); 93 debugfs_create_file("skip-inefficiencies", 0444, d, dev->em_pd, 94 &em_debug_skip_inefficiencies_fops); 95 96 /* Create a sub-directory for each performance state */ 97 for (i = 0; i < dev->em_pd->nr_perf_states; i++) 98 em_debug_create_ps(&dev->em_pd->table[i], d); 99 100 } 101 102 static void em_debug_remove_pd(struct device *dev) 103 { 104 struct dentry *debug_dir; 105 106 debug_dir = debugfs_lookup(dev_name(dev), rootdir); 107 debugfs_remove_recursive(debug_dir); 108 } 109 110 static int __init em_debug_init(void) 111 { 112 /* Create /sys/kernel/debug/energy_model directory */ 113 rootdir = debugfs_create_dir("energy_model", NULL); 114 115 return 0; 116 } 117 fs_initcall(em_debug_init); 118 #else /* CONFIG_DEBUG_FS */ 119 static void em_debug_create_pd(struct device *dev) {} 120 static void em_debug_remove_pd(struct device *dev) {} 121 #endif 122 123 static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd, 124 int nr_states, struct em_data_callback *cb) 125 { 126 unsigned long power, freq, prev_freq = 0, prev_cost = ULONG_MAX; 127 struct em_perf_state *table; 128 int i, ret; 129 u64 fmax; 130 131 table = kcalloc(nr_states, sizeof(*table), GFP_KERNEL); 132 if (!table) 133 return -ENOMEM; 134 135 /* Build the list of performance states for this performance domain */ 136 for (i = 0, freq = 0; i < nr_states; i++, freq++) { 137 /* 138 * active_power() is a driver callback which ceils 'freq' to 139 * lowest performance state of 'dev' above 'freq' and updates 140 * 'power' and 'freq' accordingly. 141 */ 142 ret = cb->active_power(&power, &freq, dev); 143 if (ret) { 144 dev_err(dev, "EM: invalid perf. state: %d\n", 145 ret); 146 goto free_ps_table; 147 } 148 149 /* 150 * We expect the driver callback to increase the frequency for 151 * higher performance states. 152 */ 153 if (freq <= prev_freq) { 154 dev_err(dev, "EM: non-increasing freq: %lu\n", 155 freq); 156 goto free_ps_table; 157 } 158 159 /* 160 * The power returned by active_state() is expected to be 161 * positive and to fit into 16 bits. 162 */ 163 if (!power || power > EM_MAX_POWER) { 164 dev_err(dev, "EM: invalid power: %lu\n", 165 power); 166 goto free_ps_table; 167 } 168 169 table[i].power = power; 170 table[i].frequency = prev_freq = freq; 171 } 172 173 /* Compute the cost of each performance state. */ 174 fmax = (u64) table[nr_states - 1].frequency; 175 for (i = nr_states - 1; i >= 0; i--) { 176 unsigned long power_res = em_scale_power(table[i].power); 177 178 table[i].cost = div64_u64(fmax * power_res, 179 table[i].frequency); 180 if (table[i].cost >= prev_cost) { 181 table[i].flags = EM_PERF_STATE_INEFFICIENT; 182 dev_dbg(dev, "EM: OPP:%lu is inefficient\n", 183 table[i].frequency); 184 } else { 185 prev_cost = table[i].cost; 186 } 187 } 188 189 pd->table = table; 190 pd->nr_perf_states = nr_states; 191 192 return 0; 193 194 free_ps_table: 195 kfree(table); 196 return -EINVAL; 197 } 198 199 static int em_create_pd(struct device *dev, int nr_states, 200 struct em_data_callback *cb, cpumask_t *cpus) 201 { 202 struct em_perf_domain *pd; 203 struct device *cpu_dev; 204 int cpu, ret; 205 206 if (_is_cpu_device(dev)) { 207 pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL); 208 if (!pd) 209 return -ENOMEM; 210 211 cpumask_copy(em_span_cpus(pd), cpus); 212 } else { 213 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 214 if (!pd) 215 return -ENOMEM; 216 } 217 218 ret = em_create_perf_table(dev, pd, nr_states, cb); 219 if (ret) { 220 kfree(pd); 221 return ret; 222 } 223 224 if (_is_cpu_device(dev)) 225 for_each_cpu(cpu, cpus) { 226 cpu_dev = get_cpu_device(cpu); 227 cpu_dev->em_pd = pd; 228 } 229 230 dev->em_pd = pd; 231 232 return 0; 233 } 234 235 static void em_cpufreq_update_efficiencies(struct device *dev) 236 { 237 struct em_perf_domain *pd = dev->em_pd; 238 struct em_perf_state *table; 239 struct cpufreq_policy *policy; 240 int found = 0; 241 int i; 242 243 if (!_is_cpu_device(dev) || !pd) 244 return; 245 246 policy = cpufreq_cpu_get(cpumask_first(em_span_cpus(pd))); 247 if (!policy) { 248 dev_warn(dev, "EM: Access to CPUFreq policy failed"); 249 return; 250 } 251 252 table = pd->table; 253 254 for (i = 0; i < pd->nr_perf_states; i++) { 255 if (!(table[i].flags & EM_PERF_STATE_INEFFICIENT)) 256 continue; 257 258 if (!cpufreq_table_set_inefficient(policy, table[i].frequency)) 259 found++; 260 } 261 262 if (!found) 263 return; 264 265 /* 266 * Efficiencies have been installed in CPUFreq, inefficient frequencies 267 * will be skipped. The EM can do the same. 268 */ 269 pd->flags |= EM_PERF_DOMAIN_SKIP_INEFFICIENCIES; 270 } 271 272 /** 273 * em_pd_get() - Return the performance domain for a device 274 * @dev : Device to find the performance domain for 275 * 276 * Returns the performance domain to which @dev belongs, or NULL if it doesn't 277 * exist. 278 */ 279 struct em_perf_domain *em_pd_get(struct device *dev) 280 { 281 if (IS_ERR_OR_NULL(dev)) 282 return NULL; 283 284 return dev->em_pd; 285 } 286 EXPORT_SYMBOL_GPL(em_pd_get); 287 288 /** 289 * em_cpu_get() - Return the performance domain for a CPU 290 * @cpu : CPU to find the performance domain for 291 * 292 * Returns the performance domain to which @cpu belongs, or NULL if it doesn't 293 * exist. 294 */ 295 struct em_perf_domain *em_cpu_get(int cpu) 296 { 297 struct device *cpu_dev; 298 299 cpu_dev = get_cpu_device(cpu); 300 if (!cpu_dev) 301 return NULL; 302 303 return em_pd_get(cpu_dev); 304 } 305 EXPORT_SYMBOL_GPL(em_cpu_get); 306 307 /** 308 * em_dev_register_perf_domain() - Register the Energy Model (EM) for a device 309 * @dev : Device for which the EM is to register 310 * @nr_states : Number of performance states to register 311 * @cb : Callback functions providing the data of the Energy Model 312 * @cpus : Pointer to cpumask_t, which in case of a CPU device is 313 * obligatory. It can be taken from i.e. 'policy->cpus'. For other 314 * type of devices this should be set to NULL. 315 * @milliwatts : Flag indicating that the power values are in milliWatts or 316 * in some other scale. It must be set properly. 317 * 318 * Create Energy Model tables for a performance domain using the callbacks 319 * defined in cb. 320 * 321 * The @milliwatts is important to set with correct value. Some kernel 322 * sub-systems might rely on this flag and check if all devices in the EM are 323 * using the same scale. 324 * 325 * If multiple clients register the same performance domain, all but the first 326 * registration will be ignored. 327 * 328 * Return 0 on success 329 */ 330 int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, 331 struct em_data_callback *cb, cpumask_t *cpus, 332 bool milliwatts) 333 { 334 unsigned long cap, prev_cap = 0; 335 int cpu, ret; 336 337 if (!dev || !nr_states || !cb) 338 return -EINVAL; 339 340 /* 341 * Use a mutex to serialize the registration of performance domains and 342 * let the driver-defined callback functions sleep. 343 */ 344 mutex_lock(&em_pd_mutex); 345 346 if (dev->em_pd) { 347 ret = -EEXIST; 348 goto unlock; 349 } 350 351 if (_is_cpu_device(dev)) { 352 if (!cpus) { 353 dev_err(dev, "EM: invalid CPU mask\n"); 354 ret = -EINVAL; 355 goto unlock; 356 } 357 358 for_each_cpu(cpu, cpus) { 359 if (em_cpu_get(cpu)) { 360 dev_err(dev, "EM: exists for CPU%d\n", cpu); 361 ret = -EEXIST; 362 goto unlock; 363 } 364 /* 365 * All CPUs of a domain must have the same 366 * micro-architecture since they all share the same 367 * table. 368 */ 369 cap = arch_scale_cpu_capacity(cpu); 370 if (prev_cap && prev_cap != cap) { 371 dev_err(dev, "EM: CPUs of %*pbl must have the same capacity\n", 372 cpumask_pr_args(cpus)); 373 374 ret = -EINVAL; 375 goto unlock; 376 } 377 prev_cap = cap; 378 } 379 } 380 381 ret = em_create_pd(dev, nr_states, cb, cpus); 382 if (ret) 383 goto unlock; 384 385 if (milliwatts) 386 dev->em_pd->flags |= EM_PERF_DOMAIN_MILLIWATTS; 387 388 em_cpufreq_update_efficiencies(dev); 389 390 em_debug_create_pd(dev); 391 dev_info(dev, "EM: created perf domain\n"); 392 393 unlock: 394 mutex_unlock(&em_pd_mutex); 395 return ret; 396 } 397 EXPORT_SYMBOL_GPL(em_dev_register_perf_domain); 398 399 /** 400 * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device 401 * @dev : Device for which the EM is registered 402 * 403 * Unregister the EM for the specified @dev (but not a CPU device). 404 */ 405 void em_dev_unregister_perf_domain(struct device *dev) 406 { 407 if (IS_ERR_OR_NULL(dev) || !dev->em_pd) 408 return; 409 410 if (_is_cpu_device(dev)) 411 return; 412 413 /* 414 * The mutex separates all register/unregister requests and protects 415 * from potential clean-up/setup issues in the debugfs directories. 416 * The debugfs directory name is the same as device's name. 417 */ 418 mutex_lock(&em_pd_mutex); 419 em_debug_remove_pd(dev); 420 421 kfree(dev->em_pd->table); 422 kfree(dev->em_pd); 423 dev->em_pd = NULL; 424 mutex_unlock(&em_pd_mutex); 425 } 426 EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain); 427