1 /* 2 * Copyright (C) 2012 Freescale Semiconductor, Inc. 3 * 4 * Copyright (C) 2014 Linaro. 5 * Viresh Kumar <viresh.kumar@linaro.org> 6 * 7 * The OPP code in function set_target() is reused from 8 * drivers/cpufreq/omap-cpufreq.c 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/clk.h> 18 #include <linux/cpu.h> 19 #include <linux/cpu_cooling.h> 20 #include <linux/cpufreq.h> 21 #include <linux/cpufreq-dt.h> 22 #include <linux/cpumask.h> 23 #include <linux/err.h> 24 #include <linux/module.h> 25 #include <linux/of.h> 26 #include <linux/pm_opp.h> 27 #include <linux/platform_device.h> 28 #include <linux/regulator/consumer.h> 29 #include <linux/slab.h> 30 #include <linux/thermal.h> 31 32 struct private_data { 33 struct device *cpu_dev; 34 struct regulator *cpu_reg; 35 struct thermal_cooling_device *cdev; 36 unsigned int voltage_tolerance; /* in percentage */ 37 }; 38 39 static int set_target(struct cpufreq_policy *policy, unsigned int index) 40 { 41 struct dev_pm_opp *opp; 42 struct cpufreq_frequency_table *freq_table = policy->freq_table; 43 struct clk *cpu_clk = policy->clk; 44 struct private_data *priv = policy->driver_data; 45 struct device *cpu_dev = priv->cpu_dev; 46 struct regulator *cpu_reg = priv->cpu_reg; 47 unsigned long volt = 0, volt_old = 0, tol = 0; 48 unsigned int old_freq, new_freq; 49 long freq_Hz, freq_exact; 50 int ret; 51 52 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); 53 if (freq_Hz <= 0) 54 freq_Hz = freq_table[index].frequency * 1000; 55 56 freq_exact = freq_Hz; 57 new_freq = freq_Hz / 1000; 58 old_freq = clk_get_rate(cpu_clk) / 1000; 59 60 if (!IS_ERR(cpu_reg)) { 61 unsigned long opp_freq; 62 63 rcu_read_lock(); 64 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); 65 if (IS_ERR(opp)) { 66 rcu_read_unlock(); 67 dev_err(cpu_dev, "failed to find OPP for %ld\n", 68 freq_Hz); 69 return PTR_ERR(opp); 70 } 71 volt = dev_pm_opp_get_voltage(opp); 72 opp_freq = dev_pm_opp_get_freq(opp); 73 rcu_read_unlock(); 74 tol = volt * priv->voltage_tolerance / 100; 75 volt_old = regulator_get_voltage(cpu_reg); 76 dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n", 77 opp_freq / 1000, volt); 78 } 79 80 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", 81 old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1, 82 new_freq / 1000, volt ? volt / 1000 : -1); 83 84 /* scaling up? scale voltage before frequency */ 85 if (!IS_ERR(cpu_reg) && new_freq > old_freq) { 86 ret = regulator_set_voltage_tol(cpu_reg, volt, tol); 87 if (ret) { 88 dev_err(cpu_dev, "failed to scale voltage up: %d\n", 89 ret); 90 return ret; 91 } 92 } 93 94 ret = clk_set_rate(cpu_clk, freq_exact); 95 if (ret) { 96 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); 97 if (!IS_ERR(cpu_reg) && volt_old > 0) 98 regulator_set_voltage_tol(cpu_reg, volt_old, tol); 99 return ret; 100 } 101 102 /* scaling down? scale voltage after frequency */ 103 if (!IS_ERR(cpu_reg) && new_freq < old_freq) { 104 ret = regulator_set_voltage_tol(cpu_reg, volt, tol); 105 if (ret) { 106 dev_err(cpu_dev, "failed to scale voltage down: %d\n", 107 ret); 108 clk_set_rate(cpu_clk, old_freq * 1000); 109 } 110 } 111 112 return ret; 113 } 114 115 static int allocate_resources(int cpu, struct device **cdev, 116 struct regulator **creg, struct clk **cclk) 117 { 118 struct device *cpu_dev; 119 struct regulator *cpu_reg; 120 struct clk *cpu_clk; 121 int ret = 0; 122 char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg; 123 124 cpu_dev = get_cpu_device(cpu); 125 if (!cpu_dev) { 126 pr_err("failed to get cpu%d device\n", cpu); 127 return -ENODEV; 128 } 129 130 /* Try "cpu0" for older DTs */ 131 if (!cpu) 132 reg = reg_cpu0; 133 else 134 reg = reg_cpu; 135 136 try_again: 137 cpu_reg = regulator_get_optional(cpu_dev, reg); 138 if (IS_ERR(cpu_reg)) { 139 /* 140 * If cpu's regulator supply node is present, but regulator is 141 * not yet registered, we should try defering probe. 142 */ 143 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { 144 dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n", 145 cpu); 146 return -EPROBE_DEFER; 147 } 148 149 /* Try with "cpu-supply" */ 150 if (reg == reg_cpu0) { 151 reg = reg_cpu; 152 goto try_again; 153 } 154 155 dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n", 156 cpu, PTR_ERR(cpu_reg)); 157 } 158 159 cpu_clk = clk_get(cpu_dev, NULL); 160 if (IS_ERR(cpu_clk)) { 161 /* put regulator */ 162 if (!IS_ERR(cpu_reg)) 163 regulator_put(cpu_reg); 164 165 ret = PTR_ERR(cpu_clk); 166 167 /* 168 * If cpu's clk node is present, but clock is not yet 169 * registered, we should try defering probe. 170 */ 171 if (ret == -EPROBE_DEFER) 172 dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu); 173 else 174 dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu, 175 ret); 176 } else { 177 *cdev = cpu_dev; 178 *creg = cpu_reg; 179 *cclk = cpu_clk; 180 } 181 182 return ret; 183 } 184 185 static int cpufreq_init(struct cpufreq_policy *policy) 186 { 187 struct cpufreq_dt_platform_data *pd; 188 struct cpufreq_frequency_table *freq_table; 189 struct device_node *np; 190 struct private_data *priv; 191 struct device *cpu_dev; 192 struct regulator *cpu_reg; 193 struct clk *cpu_clk; 194 unsigned long min_uV = ~0, max_uV = 0; 195 unsigned int transition_latency; 196 int ret; 197 198 ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); 199 if (ret) { 200 pr_err("%s: Failed to allocate resources: %d\n", __func__, ret); 201 return ret; 202 } 203 204 np = of_node_get(cpu_dev->of_node); 205 if (!np) { 206 dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu); 207 ret = -ENOENT; 208 goto out_put_reg_clk; 209 } 210 211 /* OPPs might be populated at runtime, don't check for error here */ 212 of_init_opp_table(cpu_dev); 213 214 /* 215 * But we need OPP table to function so if it is not there let's 216 * give platform code chance to provide it for us. 217 */ 218 ret = dev_pm_opp_get_opp_count(cpu_dev); 219 if (ret <= 0) { 220 pr_debug("OPP table is not ready, deferring probe\n"); 221 ret = -EPROBE_DEFER; 222 goto out_free_opp; 223 } 224 225 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 226 if (!priv) { 227 ret = -ENOMEM; 228 goto out_free_opp; 229 } 230 231 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); 232 233 if (of_property_read_u32(np, "clock-latency", &transition_latency)) 234 transition_latency = CPUFREQ_ETERNAL; 235 236 if (!IS_ERR(cpu_reg)) { 237 unsigned long opp_freq = 0; 238 239 /* 240 * Disable any OPPs where the connected regulator isn't able to 241 * provide the specified voltage and record minimum and maximum 242 * voltage levels. 243 */ 244 while (1) { 245 struct dev_pm_opp *opp; 246 unsigned long opp_uV, tol_uV; 247 248 rcu_read_lock(); 249 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq); 250 if (IS_ERR(opp)) { 251 rcu_read_unlock(); 252 break; 253 } 254 opp_uV = dev_pm_opp_get_voltage(opp); 255 rcu_read_unlock(); 256 257 tol_uV = opp_uV * priv->voltage_tolerance / 100; 258 if (regulator_is_supported_voltage(cpu_reg, opp_uV, 259 opp_uV + tol_uV)) { 260 if (opp_uV < min_uV) 261 min_uV = opp_uV; 262 if (opp_uV > max_uV) 263 max_uV = opp_uV; 264 } else { 265 dev_pm_opp_disable(cpu_dev, opp_freq); 266 } 267 268 opp_freq++; 269 } 270 271 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); 272 if (ret > 0) 273 transition_latency += ret * 1000; 274 } 275 276 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 277 if (ret) { 278 pr_err("failed to init cpufreq table: %d\n", ret); 279 goto out_free_priv; 280 } 281 282 priv->cpu_dev = cpu_dev; 283 priv->cpu_reg = cpu_reg; 284 policy->driver_data = priv; 285 286 policy->clk = cpu_clk; 287 ret = cpufreq_table_validate_and_show(policy, freq_table); 288 if (ret) { 289 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, 290 ret); 291 goto out_free_cpufreq_table; 292 } 293 294 policy->cpuinfo.transition_latency = transition_latency; 295 296 pd = cpufreq_get_driver_data(); 297 if (!pd || !pd->independent_clocks) 298 cpumask_setall(policy->cpus); 299 300 of_node_put(np); 301 302 return 0; 303 304 out_free_cpufreq_table: 305 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 306 out_free_priv: 307 kfree(priv); 308 out_free_opp: 309 of_free_opp_table(cpu_dev); 310 of_node_put(np); 311 out_put_reg_clk: 312 clk_put(cpu_clk); 313 if (!IS_ERR(cpu_reg)) 314 regulator_put(cpu_reg); 315 316 return ret; 317 } 318 319 static int cpufreq_exit(struct cpufreq_policy *policy) 320 { 321 struct private_data *priv = policy->driver_data; 322 323 cpufreq_cooling_unregister(priv->cdev); 324 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 325 of_free_opp_table(priv->cpu_dev); 326 clk_put(policy->clk); 327 if (!IS_ERR(priv->cpu_reg)) 328 regulator_put(priv->cpu_reg); 329 kfree(priv); 330 331 return 0; 332 } 333 334 static void cpufreq_ready(struct cpufreq_policy *policy) 335 { 336 struct private_data *priv = policy->driver_data; 337 struct device_node *np = of_node_get(priv->cpu_dev->of_node); 338 339 if (WARN_ON(!np)) 340 return; 341 342 /* 343 * For now, just loading the cooling device; 344 * thermal DT code takes care of matching them. 345 */ 346 if (of_find_property(np, "#cooling-cells", NULL)) { 347 priv->cdev = of_cpufreq_cooling_register(np, 348 policy->related_cpus); 349 if (IS_ERR(priv->cdev)) { 350 dev_err(priv->cpu_dev, 351 "running cpufreq without cooling device: %ld\n", 352 PTR_ERR(priv->cdev)); 353 354 priv->cdev = NULL; 355 } 356 } 357 358 of_node_put(np); 359 } 360 361 static struct cpufreq_driver dt_cpufreq_driver = { 362 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, 363 .verify = cpufreq_generic_frequency_table_verify, 364 .target_index = set_target, 365 .get = cpufreq_generic_get, 366 .init = cpufreq_init, 367 .exit = cpufreq_exit, 368 .ready = cpufreq_ready, 369 .name = "cpufreq-dt", 370 .attr = cpufreq_generic_attr, 371 }; 372 373 static int dt_cpufreq_probe(struct platform_device *pdev) 374 { 375 struct device *cpu_dev; 376 struct regulator *cpu_reg; 377 struct clk *cpu_clk; 378 int ret; 379 380 /* 381 * All per-cluster (CPUs sharing clock/voltages) initialization is done 382 * from ->init(). In probe(), we just need to make sure that clk and 383 * regulators are available. Else defer probe and retry. 384 * 385 * FIXME: Is checking this only for CPU0 sufficient ? 386 */ 387 ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk); 388 if (ret) 389 return ret; 390 391 clk_put(cpu_clk); 392 if (!IS_ERR(cpu_reg)) 393 regulator_put(cpu_reg); 394 395 dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev); 396 397 ret = cpufreq_register_driver(&dt_cpufreq_driver); 398 if (ret) 399 dev_err(cpu_dev, "failed register driver: %d\n", ret); 400 401 return ret; 402 } 403 404 static int dt_cpufreq_remove(struct platform_device *pdev) 405 { 406 cpufreq_unregister_driver(&dt_cpufreq_driver); 407 return 0; 408 } 409 410 static struct platform_driver dt_cpufreq_platdrv = { 411 .driver = { 412 .name = "cpufreq-dt", 413 }, 414 .probe = dt_cpufreq_probe, 415 .remove = dt_cpufreq_remove, 416 }; 417 module_platform_driver(dt_cpufreq_platdrv); 418 419 MODULE_ALIAS("platform:cpufreq-dt"); 420 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); 421 MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); 422 MODULE_DESCRIPTION("Generic cpufreq driver"); 423 MODULE_LICENSE("GPL"); 424