1 /* 2 * Copyright (c) 2014 MundoReader S.L. 3 * Author: Heiko Stuebner <heiko@sntech.de> 4 * 5 * Copyright (c) 2016 Rockchip Electronics Co. Ltd. 6 * Author: Xing Zheng <zhengxing@rock-chips.com> 7 * 8 * based on 9 * 10 * samsung/clk.c 11 * Copyright (c) 2013 Samsung Electronics Co., Ltd. 12 * Copyright (c) 2013 Linaro Ltd. 13 * Author: Thomas Abraham <thomas.ab@samsung.com> 14 * 15 * This program is free software; you can redistribute it and/or modify 16 * it under the terms of the GNU General Public License as published by 17 * the Free Software Foundation; either version 2 of the License, or 18 * (at your option) any later version. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/slab.h> 27 #include <linux/clk.h> 28 #include <linux/clk-provider.h> 29 #include <linux/mfd/syscon.h> 30 #include <linux/regmap.h> 31 #include <linux/reboot.h> 32 #include "clk.h" 33 34 /** 35 * Register a clock branch. 36 * Most clock branches have a form like 37 * 38 * src1 --|--\ 39 * |M |--[GATE]-[DIV]- 40 * src2 --|--/ 41 * 42 * sometimes without one of those components. 43 */ 44 static struct clk *rockchip_clk_register_branch(const char *name, 45 const char *const *parent_names, u8 num_parents, 46 void __iomem *base, 47 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, 48 u8 div_shift, u8 div_width, u8 div_flags, 49 struct clk_div_table *div_table, int gate_offset, 50 u8 gate_shift, u8 gate_flags, unsigned long flags, 51 spinlock_t *lock) 52 { 53 struct clk *clk; 54 struct clk_mux *mux = NULL; 55 struct clk_gate *gate = NULL; 56 struct clk_divider *div = NULL; 57 const struct clk_ops *mux_ops = NULL, *div_ops = NULL, 58 *gate_ops = NULL; 59 60 if (num_parents > 1) { 61 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 62 if (!mux) 63 return ERR_PTR(-ENOMEM); 64 65 mux->reg = base + muxdiv_offset; 66 mux->shift = mux_shift; 67 mux->mask = BIT(mux_width) - 1; 68 mux->flags = mux_flags; 69 mux->lock = lock; 70 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops 71 : &clk_mux_ops; 72 } 73 74 if (gate_offset >= 0) { 75 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 76 if (!gate) 77 goto err_gate; 78 79 gate->flags = gate_flags; 80 gate->reg = base + gate_offset; 81 gate->bit_idx = gate_shift; 82 gate->lock = lock; 83 gate_ops = &clk_gate_ops; 84 } 85 86 if (div_width > 0) { 87 div = kzalloc(sizeof(*div), GFP_KERNEL); 88 if (!div) 89 goto err_div; 90 91 div->flags = div_flags; 92 div->reg = base + muxdiv_offset; 93 div->shift = div_shift; 94 div->width = div_width; 95 div->lock = lock; 96 div->table = div_table; 97 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) 98 ? &clk_divider_ro_ops 99 : &clk_divider_ops; 100 } 101 102 clk = clk_register_composite(NULL, name, parent_names, num_parents, 103 mux ? &mux->hw : NULL, mux_ops, 104 div ? &div->hw : NULL, div_ops, 105 gate ? &gate->hw : NULL, gate_ops, 106 flags); 107 108 return clk; 109 err_div: 110 kfree(gate); 111 err_gate: 112 kfree(mux); 113 return ERR_PTR(-ENOMEM); 114 } 115 116 struct rockchip_clk_frac { 117 struct notifier_block clk_nb; 118 struct clk_fractional_divider div; 119 struct clk_gate gate; 120 121 struct clk_mux mux; 122 const struct clk_ops *mux_ops; 123 int mux_frac_idx; 124 125 bool rate_change_remuxed; 126 int rate_change_idx; 127 }; 128 129 #define to_rockchip_clk_frac_nb(nb) \ 130 container_of(nb, struct rockchip_clk_frac, clk_nb) 131 132 static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb, 133 unsigned long event, void *data) 134 { 135 struct clk_notifier_data *ndata = data; 136 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb); 137 struct clk_mux *frac_mux = &frac->mux; 138 int ret = 0; 139 140 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n", 141 __func__, event, ndata->old_rate, ndata->new_rate); 142 if (event == PRE_RATE_CHANGE) { 143 frac->rate_change_idx = 144 frac->mux_ops->get_parent(&frac_mux->hw); 145 if (frac->rate_change_idx != frac->mux_frac_idx) { 146 frac->mux_ops->set_parent(&frac_mux->hw, 147 frac->mux_frac_idx); 148 frac->rate_change_remuxed = 1; 149 } 150 } else if (event == POST_RATE_CHANGE) { 151 /* 152 * The POST_RATE_CHANGE notifier runs directly after the 153 * divider clock is set in clk_change_rate, so we'll have 154 * remuxed back to the original parent before clk_change_rate 155 * reaches the mux itself. 156 */ 157 if (frac->rate_change_remuxed) { 158 frac->mux_ops->set_parent(&frac_mux->hw, 159 frac->rate_change_idx); 160 frac->rate_change_remuxed = 0; 161 } 162 } 163 164 return notifier_from_errno(ret); 165 } 166 167 static struct clk *rockchip_clk_register_frac_branch( 168 struct rockchip_clk_provider *ctx, const char *name, 169 const char *const *parent_names, u8 num_parents, 170 void __iomem *base, int muxdiv_offset, u8 div_flags, 171 int gate_offset, u8 gate_shift, u8 gate_flags, 172 unsigned long flags, struct rockchip_clk_branch *child, 173 spinlock_t *lock) 174 { 175 struct rockchip_clk_frac *frac; 176 struct clk *clk; 177 struct clk_gate *gate = NULL; 178 struct clk_fractional_divider *div = NULL; 179 const struct clk_ops *div_ops = NULL, *gate_ops = NULL; 180 181 if (muxdiv_offset < 0) 182 return ERR_PTR(-EINVAL); 183 184 if (child && child->branch_type != branch_mux) { 185 pr_err("%s: fractional child clock for %s can only be a mux\n", 186 __func__, name); 187 return ERR_PTR(-EINVAL); 188 } 189 190 frac = kzalloc(sizeof(*frac), GFP_KERNEL); 191 if (!frac) 192 return ERR_PTR(-ENOMEM); 193 194 if (gate_offset >= 0) { 195 gate = &frac->gate; 196 gate->flags = gate_flags; 197 gate->reg = base + gate_offset; 198 gate->bit_idx = gate_shift; 199 gate->lock = lock; 200 gate_ops = &clk_gate_ops; 201 } 202 203 div = &frac->div; 204 div->flags = div_flags; 205 div->reg = base + muxdiv_offset; 206 div->mshift = 16; 207 div->mwidth = 16; 208 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift; 209 div->nshift = 0; 210 div->nwidth = 16; 211 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; 212 div->lock = lock; 213 div_ops = &clk_fractional_divider_ops; 214 215 clk = clk_register_composite(NULL, name, parent_names, num_parents, 216 NULL, NULL, 217 &div->hw, div_ops, 218 gate ? &gate->hw : NULL, gate_ops, 219 flags | CLK_SET_RATE_UNGATE); 220 if (IS_ERR(clk)) { 221 kfree(frac); 222 return clk; 223 } 224 225 if (child) { 226 struct clk_mux *frac_mux = &frac->mux; 227 struct clk_init_data init; 228 struct clk *mux_clk; 229 int i, ret; 230 231 frac->mux_frac_idx = -1; 232 for (i = 0; i < child->num_parents; i++) { 233 if (!strcmp(name, child->parent_names[i])) { 234 pr_debug("%s: found fractional parent in mux at pos %d\n", 235 __func__, i); 236 frac->mux_frac_idx = i; 237 break; 238 } 239 } 240 241 frac->mux_ops = &clk_mux_ops; 242 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb; 243 244 frac_mux->reg = base + child->muxdiv_offset; 245 frac_mux->shift = child->mux_shift; 246 frac_mux->mask = BIT(child->mux_width) - 1; 247 frac_mux->flags = child->mux_flags; 248 frac_mux->lock = lock; 249 frac_mux->hw.init = &init; 250 251 init.name = child->name; 252 init.flags = child->flags | CLK_SET_RATE_PARENT; 253 init.ops = frac->mux_ops; 254 init.parent_names = child->parent_names; 255 init.num_parents = child->num_parents; 256 257 mux_clk = clk_register(NULL, &frac_mux->hw); 258 if (IS_ERR(mux_clk)) 259 return clk; 260 261 rockchip_clk_add_lookup(ctx, mux_clk, child->id); 262 263 /* notifier on the fraction divider to catch rate changes */ 264 if (frac->mux_frac_idx >= 0) { 265 ret = clk_notifier_register(clk, &frac->clk_nb); 266 if (ret) 267 pr_err("%s: failed to register clock notifier for %s\n", 268 __func__, name); 269 } else { 270 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n", 271 __func__, name, child->name); 272 } 273 } 274 275 return clk; 276 } 277 278 static struct clk *rockchip_clk_register_factor_branch(const char *name, 279 const char *const *parent_names, u8 num_parents, 280 void __iomem *base, unsigned int mult, unsigned int div, 281 int gate_offset, u8 gate_shift, u8 gate_flags, 282 unsigned long flags, spinlock_t *lock) 283 { 284 struct clk *clk; 285 struct clk_gate *gate = NULL; 286 struct clk_fixed_factor *fix = NULL; 287 288 /* without gate, register a simple factor clock */ 289 if (gate_offset == 0) { 290 return clk_register_fixed_factor(NULL, name, 291 parent_names[0], flags, mult, 292 div); 293 } 294 295 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 296 if (!gate) 297 return ERR_PTR(-ENOMEM); 298 299 gate->flags = gate_flags; 300 gate->reg = base + gate_offset; 301 gate->bit_idx = gate_shift; 302 gate->lock = lock; 303 304 fix = kzalloc(sizeof(*fix), GFP_KERNEL); 305 if (!fix) { 306 kfree(gate); 307 return ERR_PTR(-ENOMEM); 308 } 309 310 fix->mult = mult; 311 fix->div = div; 312 313 clk = clk_register_composite(NULL, name, parent_names, num_parents, 314 NULL, NULL, 315 &fix->hw, &clk_fixed_factor_ops, 316 &gate->hw, &clk_gate_ops, flags); 317 if (IS_ERR(clk)) { 318 kfree(fix); 319 kfree(gate); 320 } 321 322 return clk; 323 } 324 325 struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np, 326 void __iomem *base, unsigned long nr_clks) 327 { 328 struct rockchip_clk_provider *ctx; 329 struct clk **clk_table; 330 int i; 331 332 ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL); 333 if (!ctx) 334 return ERR_PTR(-ENOMEM); 335 336 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL); 337 if (!clk_table) 338 goto err_free; 339 340 for (i = 0; i < nr_clks; ++i) 341 clk_table[i] = ERR_PTR(-ENOENT); 342 343 ctx->reg_base = base; 344 ctx->clk_data.clks = clk_table; 345 ctx->clk_data.clk_num = nr_clks; 346 ctx->cru_node = np; 347 ctx->grf = ERR_PTR(-EPROBE_DEFER); 348 spin_lock_init(&ctx->lock); 349 350 ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node, 351 "rockchip,grf"); 352 353 return ctx; 354 355 err_free: 356 kfree(ctx); 357 return ERR_PTR(-ENOMEM); 358 } 359 360 void __init rockchip_clk_of_add_provider(struct device_node *np, 361 struct rockchip_clk_provider *ctx) 362 { 363 if (of_clk_add_provider(np, of_clk_src_onecell_get, 364 &ctx->clk_data)) 365 pr_err("%s: could not register clk provider\n", __func__); 366 } 367 368 void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx, 369 struct clk *clk, unsigned int id) 370 { 371 if (ctx->clk_data.clks && id) 372 ctx->clk_data.clks[id] = clk; 373 } 374 375 void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx, 376 struct rockchip_pll_clock *list, 377 unsigned int nr_pll, int grf_lock_offset) 378 { 379 struct clk *clk; 380 int idx; 381 382 for (idx = 0; idx < nr_pll; idx++, list++) { 383 clk = rockchip_clk_register_pll(ctx, list->type, list->name, 384 list->parent_names, list->num_parents, 385 list->con_offset, grf_lock_offset, 386 list->lock_shift, list->mode_offset, 387 list->mode_shift, list->rate_table, 388 list->flags, list->pll_flags); 389 if (IS_ERR(clk)) { 390 pr_err("%s: failed to register clock %s\n", __func__, 391 list->name); 392 continue; 393 } 394 395 rockchip_clk_add_lookup(ctx, clk, list->id); 396 } 397 } 398 399 void __init rockchip_clk_register_branches( 400 struct rockchip_clk_provider *ctx, 401 struct rockchip_clk_branch *list, 402 unsigned int nr_clk) 403 { 404 struct clk *clk = NULL; 405 unsigned int idx; 406 unsigned long flags; 407 408 for (idx = 0; idx < nr_clk; idx++, list++) { 409 flags = list->flags; 410 411 /* catch simple muxes */ 412 switch (list->branch_type) { 413 case branch_mux: 414 clk = clk_register_mux(NULL, list->name, 415 list->parent_names, list->num_parents, 416 flags, ctx->reg_base + list->muxdiv_offset, 417 list->mux_shift, list->mux_width, 418 list->mux_flags, &ctx->lock); 419 break; 420 case branch_divider: 421 if (list->div_table) 422 clk = clk_register_divider_table(NULL, 423 list->name, list->parent_names[0], 424 flags, 425 ctx->reg_base + list->muxdiv_offset, 426 list->div_shift, list->div_width, 427 list->div_flags, list->div_table, 428 &ctx->lock); 429 else 430 clk = clk_register_divider(NULL, list->name, 431 list->parent_names[0], flags, 432 ctx->reg_base + list->muxdiv_offset, 433 list->div_shift, list->div_width, 434 list->div_flags, &ctx->lock); 435 break; 436 case branch_fraction_divider: 437 clk = rockchip_clk_register_frac_branch(ctx, list->name, 438 list->parent_names, list->num_parents, 439 ctx->reg_base, list->muxdiv_offset, 440 list->div_flags, 441 list->gate_offset, list->gate_shift, 442 list->gate_flags, flags, list->child, 443 &ctx->lock); 444 break; 445 case branch_gate: 446 flags |= CLK_SET_RATE_PARENT; 447 448 clk = clk_register_gate(NULL, list->name, 449 list->parent_names[0], flags, 450 ctx->reg_base + list->gate_offset, 451 list->gate_shift, list->gate_flags, &ctx->lock); 452 break; 453 case branch_composite: 454 clk = rockchip_clk_register_branch(list->name, 455 list->parent_names, list->num_parents, 456 ctx->reg_base, list->muxdiv_offset, 457 list->mux_shift, 458 list->mux_width, list->mux_flags, 459 list->div_shift, list->div_width, 460 list->div_flags, list->div_table, 461 list->gate_offset, list->gate_shift, 462 list->gate_flags, flags, &ctx->lock); 463 break; 464 case branch_mmc: 465 clk = rockchip_clk_register_mmc( 466 list->name, 467 list->parent_names, list->num_parents, 468 ctx->reg_base + list->muxdiv_offset, 469 list->div_shift 470 ); 471 break; 472 case branch_inverter: 473 clk = rockchip_clk_register_inverter( 474 list->name, list->parent_names, 475 list->num_parents, 476 ctx->reg_base + list->muxdiv_offset, 477 list->div_shift, list->div_flags, &ctx->lock); 478 break; 479 case branch_factor: 480 clk = rockchip_clk_register_factor_branch( 481 list->name, list->parent_names, 482 list->num_parents, ctx->reg_base, 483 list->div_shift, list->div_width, 484 list->gate_offset, list->gate_shift, 485 list->gate_flags, flags, &ctx->lock); 486 break; 487 case branch_ddrclk: 488 clk = rockchip_clk_register_ddrclk( 489 list->name, list->flags, 490 list->parent_names, list->num_parents, 491 list->muxdiv_offset, list->mux_shift, 492 list->mux_width, list->div_shift, 493 list->div_width, list->div_flags, 494 ctx->reg_base, &ctx->lock); 495 break; 496 } 497 498 /* none of the cases above matched */ 499 if (!clk) { 500 pr_err("%s: unknown clock type %d\n", 501 __func__, list->branch_type); 502 continue; 503 } 504 505 if (IS_ERR(clk)) { 506 pr_err("%s: failed to register clock %s: %ld\n", 507 __func__, list->name, PTR_ERR(clk)); 508 continue; 509 } 510 511 rockchip_clk_add_lookup(ctx, clk, list->id); 512 } 513 } 514 515 void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, 516 unsigned int lookup_id, 517 const char *name, const char *const *parent_names, 518 u8 num_parents, 519 const struct rockchip_cpuclk_reg_data *reg_data, 520 const struct rockchip_cpuclk_rate_table *rates, 521 int nrates) 522 { 523 struct clk *clk; 524 525 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents, 526 reg_data, rates, nrates, 527 ctx->reg_base, &ctx->lock); 528 if (IS_ERR(clk)) { 529 pr_err("%s: failed to register clock %s: %ld\n", 530 __func__, name, PTR_ERR(clk)); 531 return; 532 } 533 534 rockchip_clk_add_lookup(ctx, clk, lookup_id); 535 } 536 537 void __init rockchip_clk_protect_critical(const char *const clocks[], 538 int nclocks) 539 { 540 int i; 541 542 /* Protect the clocks that needs to stay on */ 543 for (i = 0; i < nclocks; i++) { 544 struct clk *clk = __clk_lookup(clocks[i]); 545 546 if (clk) 547 clk_prepare_enable(clk); 548 } 549 } 550 551 static void __iomem *rst_base; 552 static unsigned int reg_restart; 553 static void (*cb_restart)(void); 554 static int rockchip_restart_notify(struct notifier_block *this, 555 unsigned long mode, void *cmd) 556 { 557 if (cb_restart) 558 cb_restart(); 559 560 writel(0xfdb9, rst_base + reg_restart); 561 return NOTIFY_DONE; 562 } 563 564 static struct notifier_block rockchip_restart_handler = { 565 .notifier_call = rockchip_restart_notify, 566 .priority = 128, 567 }; 568 569 void __init 570 rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx, 571 unsigned int reg, 572 void (*cb)(void)) 573 { 574 int ret; 575 576 rst_base = ctx->reg_base; 577 reg_restart = reg; 578 cb_restart = cb; 579 ret = register_restart_handler(&rockchip_restart_handler); 580 if (ret) 581 pr_err("%s: cannot register restart handler, %d\n", 582 __func__, ret); 583 } 584