1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Freescale Semiconductor, Inc. 4 * 5 * Copyright (C) 2014 Linaro. 6 * Viresh Kumar <viresh.kumar@linaro.org> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/clk.h> 12 #include <linux/cpu.h> 13 #include <linux/cpufreq.h> 14 #include <linux/cpumask.h> 15 #include <linux/err.h> 16 #include <linux/list.h> 17 #include <linux/module.h> 18 #include <linux/of.h> 19 #include <linux/pm_opp.h> 20 #include <linux/platform_device.h> 21 #include <linux/regulator/consumer.h> 22 #include <linux/slab.h> 23 #include <linux/thermal.h> 24 25 #include "cpufreq-dt.h" 26 27 struct private_data { 28 struct list_head node; 29 30 cpumask_var_t cpus; 31 struct device *cpu_dev; 32 struct opp_table *opp_table; 33 struct opp_table *reg_opp_table; 34 bool have_static_opps; 35 }; 36 37 static LIST_HEAD(priv_list); 38 39 static struct freq_attr *cpufreq_dt_attr[] = { 40 &cpufreq_freq_attr_scaling_available_freqs, 41 NULL, /* Extra space for boost-attr if required */ 42 NULL, 43 }; 44 45 static struct private_data *cpufreq_dt_find_data(int cpu) 46 { 47 struct private_data *priv; 48 49 list_for_each_entry(priv, &priv_list, node) { 50 if (cpumask_test_cpu(cpu, priv->cpus)) 51 return priv; 52 } 53 54 return NULL; 55 } 56 57 static int set_target(struct cpufreq_policy *policy, unsigned int index) 58 { 59 struct private_data *priv = policy->driver_data; 60 unsigned long freq = policy->freq_table[index].frequency; 61 62 return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000); 63 } 64 65 /* 66 * An earlier version of opp-v1 bindings used to name the regulator 67 * "cpu0-supply", we still need to handle that for backwards compatibility. 68 */ 69 static const char *find_supply_name(struct device *dev) 70 { 71 struct device_node *np; 72 struct property *pp; 73 int cpu = dev->id; 74 const char *name = NULL; 75 76 np = of_node_get(dev->of_node); 77 78 /* This must be valid for sure */ 79 if (WARN_ON(!np)) 80 return NULL; 81 82 /* Try "cpu0" for older DTs */ 83 if (!cpu) { 84 pp = of_find_property(np, "cpu0-supply", NULL); 85 if (pp) { 86 name = "cpu0"; 87 goto node_put; 88 } 89 } 90 91 pp = of_find_property(np, "cpu-supply", NULL); 92 if (pp) { 93 name = "cpu"; 94 goto node_put; 95 } 96 97 dev_dbg(dev, "no regulator for cpu%d\n", cpu); 98 node_put: 99 of_node_put(np); 100 return name; 101 } 102 103 static int cpufreq_init(struct cpufreq_policy *policy) 104 { 105 struct cpufreq_frequency_table *freq_table; 106 struct private_data *priv; 107 struct device *cpu_dev; 108 struct clk *cpu_clk; 109 unsigned int transition_latency; 110 int ret; 111 112 priv = cpufreq_dt_find_data(policy->cpu); 113 if (!priv) { 114 pr_err("failed to find data for cpu%d\n", policy->cpu); 115 return -ENODEV; 116 } 117 118 cpu_dev = priv->cpu_dev; 119 cpumask_copy(policy->cpus, priv->cpus); 120 121 cpu_clk = clk_get(cpu_dev, NULL); 122 if (IS_ERR(cpu_clk)) { 123 ret = PTR_ERR(cpu_clk); 124 dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret); 125 return ret; 126 } 127 128 /* 129 * Initialize OPP tables for all policy->cpus. They will be shared by 130 * all CPUs which have marked their CPUs shared with OPP bindings. 131 * 132 * For platforms not using operating-points-v2 bindings, we do this 133 * before updating policy->cpus. Otherwise, we will end up creating 134 * duplicate OPPs for policy->cpus. 135 * 136 * OPPs might be populated at runtime, don't check for error here 137 */ 138 if (!dev_pm_opp_of_cpumask_add_table(policy->cpus)) 139 priv->have_static_opps = true; 140 141 /* 142 * But we need OPP table to function so if it is not there let's 143 * give platform code chance to provide it for us. 144 */ 145 ret = dev_pm_opp_get_opp_count(cpu_dev); 146 if (ret <= 0) { 147 dev_err(cpu_dev, "OPP table can't be empty\n"); 148 ret = -ENODEV; 149 goto out_free_opp; 150 } 151 152 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 153 if (ret) { 154 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); 155 goto out_free_opp; 156 } 157 158 policy->driver_data = priv; 159 policy->clk = cpu_clk; 160 policy->freq_table = freq_table; 161 162 policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000; 163 164 /* Support turbo/boost mode */ 165 if (policy_has_boost_freq(policy)) { 166 /* This gets disabled by core on driver unregister */ 167 ret = cpufreq_enable_boost_support(); 168 if (ret) 169 goto out_free_cpufreq_table; 170 cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; 171 } 172 173 transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); 174 if (!transition_latency) 175 transition_latency = CPUFREQ_ETERNAL; 176 177 policy->cpuinfo.transition_latency = transition_latency; 178 policy->dvfs_possible_from_any_cpu = true; 179 180 dev_pm_opp_of_register_em(cpu_dev, policy->cpus); 181 182 return 0; 183 184 out_free_cpufreq_table: 185 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 186 out_free_opp: 187 if (priv->have_static_opps) 188 dev_pm_opp_of_cpumask_remove_table(policy->cpus); 189 clk_put(cpu_clk); 190 191 return ret; 192 } 193 194 static int cpufreq_online(struct cpufreq_policy *policy) 195 { 196 /* We did light-weight tear down earlier, nothing to do here */ 197 return 0; 198 } 199 200 static int cpufreq_offline(struct cpufreq_policy *policy) 201 { 202 /* 203 * Preserve policy->driver_data and don't free resources on light-weight 204 * tear down. 205 */ 206 return 0; 207 } 208 209 static int cpufreq_exit(struct cpufreq_policy *policy) 210 { 211 struct private_data *priv = policy->driver_data; 212 213 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 214 if (priv->have_static_opps) 215 dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); 216 clk_put(policy->clk); 217 return 0; 218 } 219 220 static struct cpufreq_driver dt_cpufreq_driver = { 221 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | 222 CPUFREQ_IS_COOLING_DEV, 223 .verify = cpufreq_generic_frequency_table_verify, 224 .target_index = set_target, 225 .get = cpufreq_generic_get, 226 .init = cpufreq_init, 227 .exit = cpufreq_exit, 228 .online = cpufreq_online, 229 .offline = cpufreq_offline, 230 .name = "cpufreq-dt", 231 .attr = cpufreq_dt_attr, 232 .suspend = cpufreq_generic_suspend, 233 }; 234 235 static int dt_cpufreq_early_init(struct device *dev, int cpu) 236 { 237 struct private_data *priv; 238 struct device *cpu_dev; 239 const char *reg_name; 240 int ret; 241 242 /* Check if this CPU is already covered by some other policy */ 243 if (cpufreq_dt_find_data(cpu)) 244 return 0; 245 246 cpu_dev = get_cpu_device(cpu); 247 if (!cpu_dev) 248 return -EPROBE_DEFER; 249 250 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 251 if (!priv) 252 return -ENOMEM; 253 254 if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL)) 255 return -ENOMEM; 256 257 priv->cpu_dev = cpu_dev; 258 259 /* Try to get OPP table early to ensure resources are available */ 260 priv->opp_table = dev_pm_opp_get_opp_table(cpu_dev); 261 if (IS_ERR(priv->opp_table)) { 262 ret = PTR_ERR(priv->opp_table); 263 if (ret != -EPROBE_DEFER) 264 dev_err(cpu_dev, "failed to get OPP table: %d\n", ret); 265 goto free_cpumask; 266 } 267 268 /* 269 * OPP layer will be taking care of regulators now, but it needs to know 270 * the name of the regulator first. 271 */ 272 reg_name = find_supply_name(cpu_dev); 273 if (reg_name) { 274 priv->reg_opp_table = dev_pm_opp_set_regulators(cpu_dev, 275 ®_name, 1); 276 if (IS_ERR(priv->reg_opp_table)) { 277 ret = PTR_ERR(priv->reg_opp_table); 278 if (ret != -EPROBE_DEFER) 279 dev_err(cpu_dev, "failed to set regulators: %d\n", 280 ret); 281 goto put_table; 282 } 283 } 284 285 /* Find OPP sharing information so we can fill pri->cpus here */ 286 /* Get OPP-sharing information from "operating-points-v2" bindings */ 287 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus); 288 if (ret) { 289 if (ret != -ENOENT) 290 goto put_reg; 291 292 /* 293 * operating-points-v2 not supported, fallback to all CPUs share 294 * OPP for backward compatibility if the platform hasn't set 295 * sharing CPUs. 296 */ 297 if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) { 298 cpumask_setall(priv->cpus); 299 300 /* 301 * OPP tables are initialized only for cpu, do it for 302 * others as well. 303 */ 304 ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus); 305 if (ret) 306 dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", 307 __func__, ret); 308 } 309 } 310 311 list_add(&priv->node, &priv_list); 312 return 0; 313 314 put_reg: 315 if (priv->reg_opp_table) 316 dev_pm_opp_put_regulators(priv->reg_opp_table); 317 put_table: 318 dev_pm_opp_put_opp_table(priv->opp_table); 319 free_cpumask: 320 free_cpumask_var(priv->cpus); 321 return ret; 322 } 323 324 static void dt_cpufreq_release(void) 325 { 326 struct private_data *priv, *tmp; 327 328 list_for_each_entry_safe(priv, tmp, &priv_list, node) { 329 if (priv->reg_opp_table) 330 dev_pm_opp_put_regulators(priv->reg_opp_table); 331 dev_pm_opp_put_opp_table(priv->opp_table); 332 free_cpumask_var(priv->cpus); 333 list_del(&priv->node); 334 } 335 } 336 337 static int dt_cpufreq_probe(struct platform_device *pdev) 338 { 339 struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev); 340 int ret, cpu; 341 342 /* Request resources early so we can return in case of -EPROBE_DEFER */ 343 for_each_possible_cpu(cpu) { 344 ret = dt_cpufreq_early_init(&pdev->dev, cpu); 345 if (ret) 346 goto err; 347 } 348 349 if (data) { 350 if (data->have_governor_per_policy) 351 dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY; 352 353 dt_cpufreq_driver.resume = data->resume; 354 if (data->suspend) 355 dt_cpufreq_driver.suspend = data->suspend; 356 if (data->get_intermediate) { 357 dt_cpufreq_driver.target_intermediate = data->target_intermediate; 358 dt_cpufreq_driver.get_intermediate = data->get_intermediate; 359 } 360 } 361 362 ret = cpufreq_register_driver(&dt_cpufreq_driver); 363 if (ret) { 364 dev_err(&pdev->dev, "failed register driver: %d\n", ret); 365 goto err; 366 } 367 368 return 0; 369 err: 370 dt_cpufreq_release(); 371 return ret; 372 } 373 374 static int dt_cpufreq_remove(struct platform_device *pdev) 375 { 376 cpufreq_unregister_driver(&dt_cpufreq_driver); 377 dt_cpufreq_release(); 378 return 0; 379 } 380 381 static struct platform_driver dt_cpufreq_platdrv = { 382 .driver = { 383 .name = "cpufreq-dt", 384 }, 385 .probe = dt_cpufreq_probe, 386 .remove = dt_cpufreq_remove, 387 }; 388 module_platform_driver(dt_cpufreq_platdrv); 389 390 MODULE_ALIAS("platform:cpufreq-dt"); 391 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); 392 MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); 393 MODULE_DESCRIPTION("Generic cpufreq driver"); 394 MODULE_LICENSE("GPL"); 395