1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 4 #include <linux/kernel.h> 5 #include <linux/init.h> 6 #include <linux/module.h> 7 #include <linux/platform_device.h> 8 #include <linux/err.h> 9 #include <linux/io.h> 10 #include <linux/of.h> 11 #include <linux/of_device.h> 12 #include <linux/clk.h> 13 #include <linux/clk-provider.h> 14 #include <linux/slab.h> 15 16 #include "clk-krait.h" 17 18 static unsigned int sec_mux_map[] = { 19 2, 20 0, 21 }; 22 23 static unsigned int pri_mux_map[] = { 24 1, 25 2, 26 0, 27 }; 28 29 /* 30 * Notifier function for switching the muxes to safe parent 31 * while the hfpll is getting reprogrammed. 32 */ 33 static int krait_notifier_cb(struct notifier_block *nb, 34 unsigned long event, 35 void *data) 36 { 37 int ret = 0; 38 struct krait_mux_clk *mux = container_of(nb, struct krait_mux_clk, 39 clk_nb); 40 /* Switch to safe parent */ 41 if (event == PRE_RATE_CHANGE) { 42 mux->old_index = krait_mux_clk_ops.get_parent(&mux->hw); 43 ret = krait_mux_clk_ops.set_parent(&mux->hw, mux->safe_sel); 44 mux->reparent = false; 45 /* 46 * By the time POST_RATE_CHANGE notifier is called, 47 * clk framework itself would have changed the parent for the new rate. 48 * Only otherwise, put back to the old parent. 49 */ 50 } else if (event == POST_RATE_CHANGE) { 51 if (!mux->reparent) 52 ret = krait_mux_clk_ops.set_parent(&mux->hw, 53 mux->old_index); 54 } 55 56 return notifier_from_errno(ret); 57 } 58 59 static int krait_notifier_register(struct device *dev, struct clk *clk, 60 struct krait_mux_clk *mux) 61 { 62 int ret = 0; 63 64 mux->clk_nb.notifier_call = krait_notifier_cb; 65 ret = clk_notifier_register(clk, &mux->clk_nb); 66 if (ret) 67 dev_err(dev, "failed to register clock notifier: %d\n", ret); 68 69 return ret; 70 } 71 72 static int 73 krait_add_div(struct device *dev, int id, const char *s, unsigned int offset) 74 { 75 struct krait_div2_clk *div; 76 struct clk_init_data init = { 77 .num_parents = 1, 78 .ops = &krait_div2_clk_ops, 79 .flags = CLK_SET_RATE_PARENT, 80 }; 81 const char *p_names[1]; 82 struct clk *clk; 83 84 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL); 85 if (!div) 86 return -ENOMEM; 87 88 div->width = 2; 89 div->shift = 6; 90 div->lpl = id >= 0; 91 div->offset = offset; 92 div->hw.init = &init; 93 94 init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s); 95 if (!init.name) 96 return -ENOMEM; 97 98 init.parent_names = p_names; 99 p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s); 100 if (!p_names[0]) { 101 kfree(init.name); 102 return -ENOMEM; 103 } 104 105 clk = devm_clk_register(dev, &div->hw); 106 kfree(p_names[0]); 107 kfree(init.name); 108 109 return PTR_ERR_OR_ZERO(clk); 110 } 111 112 static int 113 krait_add_sec_mux(struct device *dev, int id, const char *s, 114 unsigned int offset, bool unique_aux) 115 { 116 int ret; 117 struct krait_mux_clk *mux; 118 static const char *sec_mux_list[] = { 119 "acpu_aux", 120 "qsb", 121 }; 122 struct clk_init_data init = { 123 .parent_names = sec_mux_list, 124 .num_parents = ARRAY_SIZE(sec_mux_list), 125 .ops = &krait_mux_clk_ops, 126 .flags = CLK_SET_RATE_PARENT, 127 }; 128 struct clk *clk; 129 130 mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); 131 if (!mux) 132 return -ENOMEM; 133 134 mux->offset = offset; 135 mux->lpl = id >= 0; 136 mux->mask = 0x3; 137 mux->shift = 2; 138 mux->parent_map = sec_mux_map; 139 mux->hw.init = &init; 140 mux->safe_sel = 0; 141 142 init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s); 143 if (!init.name) 144 return -ENOMEM; 145 146 if (unique_aux) { 147 sec_mux_list[0] = kasprintf(GFP_KERNEL, "acpu%s_aux", s); 148 if (!sec_mux_list[0]) { 149 clk = ERR_PTR(-ENOMEM); 150 goto err_aux; 151 } 152 } 153 154 clk = devm_clk_register(dev, &mux->hw); 155 156 ret = krait_notifier_register(dev, clk, mux); 157 if (ret) 158 goto unique_aux; 159 160 unique_aux: 161 if (unique_aux) 162 kfree(sec_mux_list[0]); 163 err_aux: 164 kfree(init.name); 165 return PTR_ERR_OR_ZERO(clk); 166 } 167 168 static struct clk * 169 krait_add_pri_mux(struct device *dev, int id, const char *s, 170 unsigned int offset) 171 { 172 int ret; 173 struct krait_mux_clk *mux; 174 const char *p_names[3]; 175 struct clk_init_data init = { 176 .parent_names = p_names, 177 .num_parents = ARRAY_SIZE(p_names), 178 .ops = &krait_mux_clk_ops, 179 .flags = CLK_SET_RATE_PARENT, 180 }; 181 struct clk *clk; 182 183 mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); 184 if (!mux) 185 return ERR_PTR(-ENOMEM); 186 187 mux->mask = 0x3; 188 mux->shift = 0; 189 mux->offset = offset; 190 mux->lpl = id >= 0; 191 mux->parent_map = pri_mux_map; 192 mux->hw.init = &init; 193 mux->safe_sel = 2; 194 195 init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s); 196 if (!init.name) 197 return ERR_PTR(-ENOMEM); 198 199 p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s); 200 if (!p_names[0]) { 201 clk = ERR_PTR(-ENOMEM); 202 goto err_p0; 203 } 204 205 p_names[1] = kasprintf(GFP_KERNEL, "hfpll%s_div", s); 206 if (!p_names[1]) { 207 clk = ERR_PTR(-ENOMEM); 208 goto err_p1; 209 } 210 211 p_names[2] = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s); 212 if (!p_names[2]) { 213 clk = ERR_PTR(-ENOMEM); 214 goto err_p2; 215 } 216 217 clk = devm_clk_register(dev, &mux->hw); 218 219 ret = krait_notifier_register(dev, clk, mux); 220 if (ret) 221 goto err_p3; 222 err_p3: 223 kfree(p_names[2]); 224 err_p2: 225 kfree(p_names[1]); 226 err_p1: 227 kfree(p_names[0]); 228 err_p0: 229 kfree(init.name); 230 return clk; 231 } 232 233 /* id < 0 for L2, otherwise id == physical CPU number */ 234 static struct clk *krait_add_clks(struct device *dev, int id, bool unique_aux) 235 { 236 int ret; 237 unsigned int offset; 238 void *p = NULL; 239 const char *s; 240 struct clk *clk; 241 242 if (id >= 0) { 243 offset = 0x4501 + (0x1000 * id); 244 s = p = kasprintf(GFP_KERNEL, "%d", id); 245 if (!s) 246 return ERR_PTR(-ENOMEM); 247 } else { 248 offset = 0x500; 249 s = "_l2"; 250 } 251 252 ret = krait_add_div(dev, id, s, offset); 253 if (ret) { 254 clk = ERR_PTR(ret); 255 goto err; 256 } 257 258 ret = krait_add_sec_mux(dev, id, s, offset, unique_aux); 259 if (ret) { 260 clk = ERR_PTR(ret); 261 goto err; 262 } 263 264 clk = krait_add_pri_mux(dev, id, s, offset); 265 err: 266 kfree(p); 267 return clk; 268 } 269 270 static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data) 271 { 272 unsigned int idx = clkspec->args[0]; 273 struct clk **clks = data; 274 275 if (idx >= 5) { 276 pr_err("%s: invalid clock index %d\n", __func__, idx); 277 return ERR_PTR(-EINVAL); 278 } 279 280 return clks[idx] ? : ERR_PTR(-ENODEV); 281 } 282 283 static const struct of_device_id krait_cc_match_table[] = { 284 { .compatible = "qcom,krait-cc-v1", (void *)1UL }, 285 { .compatible = "qcom,krait-cc-v2" }, 286 {} 287 }; 288 MODULE_DEVICE_TABLE(of, krait_cc_match_table); 289 290 static int krait_cc_probe(struct platform_device *pdev) 291 { 292 struct device *dev = &pdev->dev; 293 const struct of_device_id *id; 294 unsigned long cur_rate, aux_rate; 295 int cpu; 296 struct clk *clk; 297 struct clk **clks; 298 struct clk *l2_pri_mux_clk; 299 300 id = of_match_device(krait_cc_match_table, dev); 301 if (!id) 302 return -ENODEV; 303 304 /* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */ 305 clk = clk_register_fixed_rate(dev, "qsb", NULL, 0, 1); 306 if (IS_ERR(clk)) 307 return PTR_ERR(clk); 308 309 if (!id->data) { 310 clk = clk_register_fixed_factor(dev, "acpu_aux", 311 "gpll0_vote", 0, 1, 2); 312 if (IS_ERR(clk)) 313 return PTR_ERR(clk); 314 } 315 316 /* Krait configurations have at most 4 CPUs and one L2 */ 317 clks = devm_kcalloc(dev, 5, sizeof(*clks), GFP_KERNEL); 318 if (!clks) 319 return -ENOMEM; 320 321 for_each_possible_cpu(cpu) { 322 clk = krait_add_clks(dev, cpu, id->data); 323 if (IS_ERR(clk)) 324 return PTR_ERR(clk); 325 clks[cpu] = clk; 326 } 327 328 l2_pri_mux_clk = krait_add_clks(dev, -1, id->data); 329 if (IS_ERR(l2_pri_mux_clk)) 330 return PTR_ERR(l2_pri_mux_clk); 331 clks[4] = l2_pri_mux_clk; 332 333 /* 334 * We don't want the CPU or L2 clocks to be turned off at late init 335 * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the 336 * refcount of these clocks. Any cpufreq/hotplug manager can assume 337 * that the clocks have already been prepared and enabled by the time 338 * they take over. 339 */ 340 for_each_online_cpu(cpu) { 341 clk_prepare_enable(l2_pri_mux_clk); 342 WARN(clk_prepare_enable(clks[cpu]), 343 "Unable to turn on CPU%d clock", cpu); 344 } 345 346 /* 347 * Force reinit of HFPLLs and muxes to overwrite any potential 348 * incorrect configuration of HFPLLs and muxes by the bootloader. 349 * While at it, also make sure the cores are running at known rates 350 * and print the current rate. 351 * 352 * The clocks are set to aux clock rate first to make sure the 353 * secondary mux is not sourcing off of QSB. The rate is then set to 354 * two different rates to force a HFPLL reinit under all 355 * circumstances. 356 */ 357 cur_rate = clk_get_rate(l2_pri_mux_clk); 358 aux_rate = 384000000; 359 if (cur_rate == 1) { 360 pr_info("L2 @ QSB rate. Forcing new rate.\n"); 361 cur_rate = aux_rate; 362 } 363 clk_set_rate(l2_pri_mux_clk, aux_rate); 364 clk_set_rate(l2_pri_mux_clk, 2); 365 clk_set_rate(l2_pri_mux_clk, cur_rate); 366 pr_info("L2 @ %lu KHz\n", clk_get_rate(l2_pri_mux_clk) / 1000); 367 for_each_possible_cpu(cpu) { 368 clk = clks[cpu]; 369 cur_rate = clk_get_rate(clk); 370 if (cur_rate == 1) { 371 pr_info("CPU%d @ QSB rate. Forcing new rate.\n", cpu); 372 cur_rate = aux_rate; 373 } 374 375 clk_set_rate(clk, aux_rate); 376 clk_set_rate(clk, 2); 377 clk_set_rate(clk, cur_rate); 378 pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000); 379 } 380 381 of_clk_add_provider(dev->of_node, krait_of_get, clks); 382 383 return 0; 384 } 385 386 static struct platform_driver krait_cc_driver = { 387 .probe = krait_cc_probe, 388 .driver = { 389 .name = "krait-cc", 390 .of_match_table = krait_cc_match_table, 391 }, 392 }; 393 module_platform_driver(krait_cc_driver); 394 395 MODULE_DESCRIPTION("Krait CPU Clock Driver"); 396 MODULE_LICENSE("GPL v2"); 397 MODULE_ALIAS("platform:krait-cc"); 398