1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2020 Linaro Limited 4 * 5 * Author: Daniel Lezcano <daniel.lezcano@linaro.org> 6 * 7 * The DTPM CPU is based on the energy model. It hooks the CPU in the 8 * DTPM tree which in turns update the power number by propagating the 9 * power number from the CPU energy model information to the parents. 10 * 11 * The association between the power and the performance state, allows 12 * to set the power of the CPU at the OPP granularity. 13 * 14 * The CPU hotplug is supported and the power numbers will be updated 15 * if a CPU is hot plugged / unplugged. 16 */ 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/cpumask.h> 20 #include <linux/cpufreq.h> 21 #include <linux/cpuhotplug.h> 22 #include <linux/dtpm.h> 23 #include <linux/energy_model.h> 24 #include <linux/of.h> 25 #include <linux/pm_qos.h> 26 #include <linux/slab.h> 27 #include <linux/units.h> 28 29 struct dtpm_cpu { 30 struct dtpm dtpm; 31 struct freq_qos_request qos_req; 32 int cpu; 33 }; 34 35 static DEFINE_PER_CPU(struct dtpm_cpu *, dtpm_per_cpu); 36 37 static struct dtpm_cpu *to_dtpm_cpu(struct dtpm *dtpm) 38 { 39 return container_of(dtpm, struct dtpm_cpu, dtpm); 40 } 41 42 static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit) 43 { 44 struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm); 45 struct em_perf_domain *pd = em_cpu_get(dtpm_cpu->cpu); 46 struct cpumask cpus; 47 unsigned long freq; 48 u64 power; 49 int i, nr_cpus; 50 51 cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus)); 52 nr_cpus = cpumask_weight(&cpus); 53 54 for (i = 0; i < pd->nr_perf_states; i++) { 55 56 power = pd->table[i].power * MICROWATT_PER_MILLIWATT * nr_cpus; 57 58 if (power > power_limit) 59 break; 60 } 61 62 freq = pd->table[i - 1].frequency; 63 64 freq_qos_update_request(&dtpm_cpu->qos_req, freq); 65 66 power_limit = pd->table[i - 1].power * 67 MICROWATT_PER_MILLIWATT * nr_cpus; 68 69 return power_limit; 70 } 71 72 static u64 scale_pd_power_uw(struct cpumask *pd_mask, u64 power) 73 { 74 unsigned long max, sum_util = 0; 75 int cpu; 76 77 /* 78 * The capacity is the same for all CPUs belonging to 79 * the same perf domain. 80 */ 81 max = arch_scale_cpu_capacity(cpumask_first(pd_mask)); 82 83 for_each_cpu_and(cpu, pd_mask, cpu_online_mask) 84 sum_util += sched_cpu_util(cpu); 85 86 return (power * ((sum_util << 10) / max)) >> 10; 87 } 88 89 static u64 get_pd_power_uw(struct dtpm *dtpm) 90 { 91 struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm); 92 struct em_perf_domain *pd; 93 struct cpumask *pd_mask; 94 unsigned long freq; 95 int i; 96 97 pd = em_cpu_get(dtpm_cpu->cpu); 98 99 pd_mask = em_span_cpus(pd); 100 101 freq = cpufreq_quick_get(dtpm_cpu->cpu); 102 103 for (i = 0; i < pd->nr_perf_states; i++) { 104 105 if (pd->table[i].frequency < freq) 106 continue; 107 108 return scale_pd_power_uw(pd_mask, pd->table[i].power * 109 MICROWATT_PER_MILLIWATT); 110 } 111 112 return 0; 113 } 114 115 static int update_pd_power_uw(struct dtpm *dtpm) 116 { 117 struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm); 118 struct em_perf_domain *em = em_cpu_get(dtpm_cpu->cpu); 119 struct cpumask cpus; 120 int nr_cpus; 121 122 cpumask_and(&cpus, cpu_online_mask, to_cpumask(em->cpus)); 123 nr_cpus = cpumask_weight(&cpus); 124 125 dtpm->power_min = em->table[0].power; 126 dtpm->power_min *= MICROWATT_PER_MILLIWATT; 127 dtpm->power_min *= nr_cpus; 128 129 dtpm->power_max = em->table[em->nr_perf_states - 1].power; 130 dtpm->power_max *= MICROWATT_PER_MILLIWATT; 131 dtpm->power_max *= nr_cpus; 132 133 return 0; 134 } 135 136 static void pd_release(struct dtpm *dtpm) 137 { 138 struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm); 139 struct cpufreq_policy *policy; 140 141 if (freq_qos_request_active(&dtpm_cpu->qos_req)) 142 freq_qos_remove_request(&dtpm_cpu->qos_req); 143 144 policy = cpufreq_cpu_get(dtpm_cpu->cpu); 145 if (policy) { 146 for_each_cpu(dtpm_cpu->cpu, policy->related_cpus) 147 per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL; 148 } 149 150 kfree(dtpm_cpu); 151 } 152 153 static struct dtpm_ops dtpm_ops = { 154 .set_power_uw = set_pd_power_limit, 155 .get_power_uw = get_pd_power_uw, 156 .update_power_uw = update_pd_power_uw, 157 .release = pd_release, 158 }; 159 160 static int cpuhp_dtpm_cpu_offline(unsigned int cpu) 161 { 162 struct dtpm_cpu *dtpm_cpu; 163 164 dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); 165 if (dtpm_cpu) 166 dtpm_update_power(&dtpm_cpu->dtpm); 167 168 return 0; 169 } 170 171 static int cpuhp_dtpm_cpu_online(unsigned int cpu) 172 { 173 struct dtpm_cpu *dtpm_cpu; 174 175 dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); 176 if (dtpm_cpu) 177 return dtpm_update_power(&dtpm_cpu->dtpm); 178 179 return 0; 180 } 181 182 static int __dtpm_cpu_setup(int cpu, struct dtpm *parent) 183 { 184 struct dtpm_cpu *dtpm_cpu; 185 struct cpufreq_policy *policy; 186 struct em_perf_domain *pd; 187 char name[CPUFREQ_NAME_LEN]; 188 int ret = -ENOMEM; 189 190 dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); 191 if (dtpm_cpu) 192 return 0; 193 194 policy = cpufreq_cpu_get(cpu); 195 if (!policy) 196 return 0; 197 198 pd = em_cpu_get(cpu); 199 if (!pd || em_is_artificial(pd)) 200 return -EINVAL; 201 202 dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL); 203 if (!dtpm_cpu) 204 return -ENOMEM; 205 206 dtpm_init(&dtpm_cpu->dtpm, &dtpm_ops); 207 dtpm_cpu->cpu = cpu; 208 209 for_each_cpu(cpu, policy->related_cpus) 210 per_cpu(dtpm_per_cpu, cpu) = dtpm_cpu; 211 212 snprintf(name, sizeof(name), "cpu%d-cpufreq", dtpm_cpu->cpu); 213 214 ret = dtpm_register(name, &dtpm_cpu->dtpm, parent); 215 if (ret) 216 goto out_kfree_dtpm_cpu; 217 218 ret = freq_qos_add_request(&policy->constraints, 219 &dtpm_cpu->qos_req, FREQ_QOS_MAX, 220 pd->table[pd->nr_perf_states - 1].frequency); 221 if (ret) 222 goto out_dtpm_unregister; 223 224 return 0; 225 226 out_dtpm_unregister: 227 dtpm_unregister(&dtpm_cpu->dtpm); 228 dtpm_cpu = NULL; 229 230 out_kfree_dtpm_cpu: 231 for_each_cpu(cpu, policy->related_cpus) 232 per_cpu(dtpm_per_cpu, cpu) = NULL; 233 kfree(dtpm_cpu); 234 235 return ret; 236 } 237 238 static int dtpm_cpu_setup(struct dtpm *dtpm, struct device_node *np) 239 { 240 int cpu; 241 242 cpu = of_cpu_node_to_id(np); 243 if (cpu < 0) 244 return 0; 245 246 return __dtpm_cpu_setup(cpu, dtpm); 247 } 248 249 static int dtpm_cpu_init(void) 250 { 251 int ret; 252 253 /* 254 * The callbacks at CPU hotplug time are calling 255 * dtpm_update_power() which in turns calls update_pd_power(). 256 * 257 * The function update_pd_power() uses the online mask to 258 * figure out the power consumption limits. 259 * 260 * At CPUHP_AP_ONLINE_DYN, the CPU is present in the CPU 261 * online mask when the cpuhp_dtpm_cpu_online function is 262 * called, but the CPU is still in the online mask for the 263 * tear down callback. So the power can not be updated when 264 * the CPU is unplugged. 265 * 266 * At CPUHP_AP_DTPM_CPU_DEAD, the situation is the opposite as 267 * above. The CPU online mask is not up to date when the CPU 268 * is plugged in. 269 * 270 * For this reason, we need to call the online and offline 271 * callbacks at different moments when the CPU online mask is 272 * consistent with the power numbers we want to update. 273 */ 274 ret = cpuhp_setup_state(CPUHP_AP_DTPM_CPU_DEAD, "dtpm_cpu:offline", 275 NULL, cpuhp_dtpm_cpu_offline); 276 if (ret < 0) 277 return ret; 278 279 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dtpm_cpu:online", 280 cpuhp_dtpm_cpu_online, NULL); 281 if (ret < 0) 282 return ret; 283 284 return 0; 285 } 286 287 static void dtpm_cpu_exit(void) 288 { 289 cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN); 290 cpuhp_remove_state_nocalls(CPUHP_AP_DTPM_CPU_DEAD); 291 } 292 293 struct dtpm_subsys_ops dtpm_cpu_ops = { 294 .name = KBUILD_MODNAME, 295 .init = dtpm_cpu_init, 296 .exit = dtpm_cpu_exit, 297 .setup = dtpm_cpu_setup, 298 }; 299