1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2014 MundoReader S.L. 4 * Author: Heiko Stuebner <heiko@sntech.de> 5 * 6 * Copyright (c) 2016 Rockchip Electronics Co. Ltd. 7 * Author: Xing Zheng <zhengxing@rock-chips.com> 8 * 9 * based on 10 * 11 * samsung/clk.c 12 * Copyright (c) 2013 Samsung Electronics Co., Ltd. 13 * Copyright (c) 2013 Linaro Ltd. 14 * Author: Thomas Abraham <thomas.ab@samsung.com> 15 */ 16 17 #include <linux/slab.h> 18 #include <linux/clk.h> 19 #include <linux/clk-provider.h> 20 #include <linux/io.h> 21 #include <linux/mfd/syscon.h> 22 #include <linux/regmap.h> 23 #include <linux/reboot.h> 24 #include <linux/rational.h> 25 #include "clk.h" 26 27 /** 28 * Register a clock branch. 29 * Most clock branches have a form like 30 * 31 * src1 --|--\ 32 * |M |--[GATE]-[DIV]- 33 * src2 --|--/ 34 * 35 * sometimes without one of those components. 36 */ 37 static struct clk *rockchip_clk_register_branch(const char *name, 38 const char *const *parent_names, u8 num_parents, 39 void __iomem *base, 40 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, 41 int div_offset, u8 div_shift, u8 div_width, u8 div_flags, 42 struct clk_div_table *div_table, int gate_offset, 43 u8 gate_shift, u8 gate_flags, unsigned long flags, 44 spinlock_t *lock) 45 { 46 struct clk_hw *hw; 47 struct clk_mux *mux = NULL; 48 struct clk_gate *gate = NULL; 49 struct clk_divider *div = NULL; 50 const struct clk_ops *mux_ops = NULL, *div_ops = NULL, 51 *gate_ops = NULL; 52 int ret; 53 54 if (num_parents > 1) { 55 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 56 if (!mux) 57 return ERR_PTR(-ENOMEM); 58 59 mux->reg = base + muxdiv_offset; 60 mux->shift = mux_shift; 61 mux->mask = BIT(mux_width) - 1; 62 mux->flags = mux_flags; 63 mux->lock = lock; 64 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops 65 : &clk_mux_ops; 66 } 67 68 if (gate_offset >= 0) { 69 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 70 if (!gate) { 71 ret = -ENOMEM; 72 goto err_gate; 73 } 74 75 gate->flags = gate_flags; 76 gate->reg = base + gate_offset; 77 gate->bit_idx = gate_shift; 78 gate->lock = lock; 79 gate_ops = &clk_gate_ops; 80 } 81 82 if (div_width > 0) { 83 div = kzalloc(sizeof(*div), GFP_KERNEL); 84 if (!div) { 85 ret = -ENOMEM; 86 goto err_div; 87 } 88 89 div->flags = div_flags; 90 if (div_offset) 91 div->reg = base + div_offset; 92 else 93 div->reg = base + muxdiv_offset; 94 div->shift = div_shift; 95 div->width = div_width; 96 div->lock = lock; 97 div->table = div_table; 98 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) 99 ? &clk_divider_ro_ops 100 : &clk_divider_ops; 101 } 102 103 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, 104 mux ? &mux->hw : NULL, mux_ops, 105 div ? &div->hw : NULL, div_ops, 106 gate ? &gate->hw : NULL, gate_ops, 107 flags); 108 if (IS_ERR(hw)) { 109 kfree(div); 110 kfree(gate); 111 return ERR_CAST(hw); 112 } 113 114 return hw->clk; 115 err_div: 116 kfree(gate); 117 err_gate: 118 kfree(mux); 119 return ERR_PTR(ret); 120 } 121 122 struct rockchip_clk_frac { 123 struct notifier_block clk_nb; 124 struct clk_fractional_divider div; 125 struct clk_gate gate; 126 127 struct clk_mux mux; 128 const struct clk_ops *mux_ops; 129 int mux_frac_idx; 130 131 bool rate_change_remuxed; 132 int rate_change_idx; 133 }; 134 135 #define to_rockchip_clk_frac_nb(nb) \ 136 container_of(nb, struct rockchip_clk_frac, clk_nb) 137 138 static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb, 139 unsigned long event, void *data) 140 { 141 struct clk_notifier_data *ndata = data; 142 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb); 143 struct clk_mux *frac_mux = &frac->mux; 144 int ret = 0; 145 146 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n", 147 __func__, event, ndata->old_rate, ndata->new_rate); 148 if (event == PRE_RATE_CHANGE) { 149 frac->rate_change_idx = 150 frac->mux_ops->get_parent(&frac_mux->hw); 151 if (frac->rate_change_idx != frac->mux_frac_idx) { 152 frac->mux_ops->set_parent(&frac_mux->hw, 153 frac->mux_frac_idx); 154 frac->rate_change_remuxed = 1; 155 } 156 } else if (event == POST_RATE_CHANGE) { 157 /* 158 * The POST_RATE_CHANGE notifier runs directly after the 159 * divider clock is set in clk_change_rate, so we'll have 160 * remuxed back to the original parent before clk_change_rate 161 * reaches the mux itself. 162 */ 163 if (frac->rate_change_remuxed) { 164 frac->mux_ops->set_parent(&frac_mux->hw, 165 frac->rate_change_idx); 166 frac->rate_change_remuxed = 0; 167 } 168 } 169 170 return notifier_from_errno(ret); 171 } 172 173 /** 174 * fractional divider must set that denominator is 20 times larger than 175 * numerator to generate precise clock frequency. 176 */ 177 static void rockchip_fractional_approximation(struct clk_hw *hw, 178 unsigned long rate, unsigned long *parent_rate, 179 unsigned long *m, unsigned long *n) 180 { 181 struct clk_fractional_divider *fd = to_clk_fd(hw); 182 unsigned long p_rate, p_parent_rate; 183 struct clk_hw *p_parent; 184 unsigned long scale; 185 186 p_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); 187 if ((rate * 20 > p_rate) && (p_rate % rate != 0)) { 188 p_parent = clk_hw_get_parent(clk_hw_get_parent(hw)); 189 p_parent_rate = clk_hw_get_rate(p_parent); 190 *parent_rate = p_parent_rate; 191 } 192 193 /* 194 * Get rate closer to *parent_rate to guarantee there is no overflow 195 * for m and n. In the result it will be the nearest rate left shifted 196 * by (scale - fd->nwidth) bits. 197 */ 198 scale = fls_long(*parent_rate / rate - 1); 199 if (scale > fd->nwidth) 200 rate <<= scale - fd->nwidth; 201 202 rational_best_approximation(rate, *parent_rate, 203 GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), 204 m, n); 205 } 206 207 static struct clk *rockchip_clk_register_frac_branch( 208 struct rockchip_clk_provider *ctx, const char *name, 209 const char *const *parent_names, u8 num_parents, 210 void __iomem *base, int muxdiv_offset, u8 div_flags, 211 int gate_offset, u8 gate_shift, u8 gate_flags, 212 unsigned long flags, struct rockchip_clk_branch *child, 213 spinlock_t *lock) 214 { 215 struct clk_hw *hw; 216 struct rockchip_clk_frac *frac; 217 struct clk_gate *gate = NULL; 218 struct clk_fractional_divider *div = NULL; 219 const struct clk_ops *div_ops = NULL, *gate_ops = NULL; 220 221 if (muxdiv_offset < 0) 222 return ERR_PTR(-EINVAL); 223 224 if (child && child->branch_type != branch_mux) { 225 pr_err("%s: fractional child clock for %s can only be a mux\n", 226 __func__, name); 227 return ERR_PTR(-EINVAL); 228 } 229 230 frac = kzalloc(sizeof(*frac), GFP_KERNEL); 231 if (!frac) 232 return ERR_PTR(-ENOMEM); 233 234 if (gate_offset >= 0) { 235 gate = &frac->gate; 236 gate->flags = gate_flags; 237 gate->reg = base + gate_offset; 238 gate->bit_idx = gate_shift; 239 gate->lock = lock; 240 gate_ops = &clk_gate_ops; 241 } 242 243 div = &frac->div; 244 div->flags = div_flags; 245 div->reg = base + muxdiv_offset; 246 div->mshift = 16; 247 div->mwidth = 16; 248 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift; 249 div->nshift = 0; 250 div->nwidth = 16; 251 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; 252 div->lock = lock; 253 div->approximation = rockchip_fractional_approximation; 254 div_ops = &clk_fractional_divider_ops; 255 256 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, 257 NULL, NULL, 258 &div->hw, div_ops, 259 gate ? &gate->hw : NULL, gate_ops, 260 flags | CLK_SET_RATE_UNGATE); 261 if (IS_ERR(hw)) { 262 kfree(frac); 263 return ERR_CAST(hw); 264 } 265 266 if (child) { 267 struct clk_mux *frac_mux = &frac->mux; 268 struct clk_init_data init; 269 struct clk *mux_clk; 270 int ret; 271 272 frac->mux_frac_idx = match_string(child->parent_names, 273 child->num_parents, name); 274 frac->mux_ops = &clk_mux_ops; 275 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb; 276 277 frac_mux->reg = base + child->muxdiv_offset; 278 frac_mux->shift = child->mux_shift; 279 frac_mux->mask = BIT(child->mux_width) - 1; 280 frac_mux->flags = child->mux_flags; 281 frac_mux->lock = lock; 282 frac_mux->hw.init = &init; 283 284 init.name = child->name; 285 init.flags = child->flags | CLK_SET_RATE_PARENT; 286 init.ops = frac->mux_ops; 287 init.parent_names = child->parent_names; 288 init.num_parents = child->num_parents; 289 290 mux_clk = clk_register(NULL, &frac_mux->hw); 291 if (IS_ERR(mux_clk)) { 292 kfree(frac); 293 return mux_clk; 294 } 295 296 rockchip_clk_add_lookup(ctx, mux_clk, child->id); 297 298 /* notifier on the fraction divider to catch rate changes */ 299 if (frac->mux_frac_idx >= 0) { 300 pr_debug("%s: found fractional parent in mux at pos %d\n", 301 __func__, frac->mux_frac_idx); 302 ret = clk_notifier_register(hw->clk, &frac->clk_nb); 303 if (ret) 304 pr_err("%s: failed to register clock notifier for %s\n", 305 __func__, name); 306 } else { 307 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n", 308 __func__, name, child->name); 309 } 310 } 311 312 return hw->clk; 313 } 314 315 static struct clk *rockchip_clk_register_factor_branch(const char *name, 316 const char *const *parent_names, u8 num_parents, 317 void __iomem *base, unsigned int mult, unsigned int div, 318 int gate_offset, u8 gate_shift, u8 gate_flags, 319 unsigned long flags, spinlock_t *lock) 320 { 321 struct clk_hw *hw; 322 struct clk_gate *gate = NULL; 323 struct clk_fixed_factor *fix = NULL; 324 325 /* without gate, register a simple factor clock */ 326 if (gate_offset == 0) { 327 return clk_register_fixed_factor(NULL, name, 328 parent_names[0], flags, mult, 329 div); 330 } 331 332 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 333 if (!gate) 334 return ERR_PTR(-ENOMEM); 335 336 gate->flags = gate_flags; 337 gate->reg = base + gate_offset; 338 gate->bit_idx = gate_shift; 339 gate->lock = lock; 340 341 fix = kzalloc(sizeof(*fix), GFP_KERNEL); 342 if (!fix) { 343 kfree(gate); 344 return ERR_PTR(-ENOMEM); 345 } 346 347 fix->mult = mult; 348 fix->div = div; 349 350 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, 351 NULL, NULL, 352 &fix->hw, &clk_fixed_factor_ops, 353 &gate->hw, &clk_gate_ops, flags); 354 if (IS_ERR(hw)) { 355 kfree(fix); 356 kfree(gate); 357 return ERR_CAST(hw); 358 } 359 360 return hw->clk; 361 } 362 363 struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np, 364 void __iomem *base, 365 unsigned long nr_clks) 366 { 367 struct rockchip_clk_provider *ctx; 368 struct clk **clk_table; 369 int i; 370 371 ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL); 372 if (!ctx) 373 return ERR_PTR(-ENOMEM); 374 375 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL); 376 if (!clk_table) 377 goto err_free; 378 379 for (i = 0; i < nr_clks; ++i) 380 clk_table[i] = ERR_PTR(-ENOENT); 381 382 ctx->reg_base = base; 383 ctx->clk_data.clks = clk_table; 384 ctx->clk_data.clk_num = nr_clks; 385 ctx->cru_node = np; 386 spin_lock_init(&ctx->lock); 387 388 ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node, 389 "rockchip,grf"); 390 391 return ctx; 392 393 err_free: 394 kfree(ctx); 395 return ERR_PTR(-ENOMEM); 396 } 397 EXPORT_SYMBOL_GPL(rockchip_clk_init); 398 399 void rockchip_clk_of_add_provider(struct device_node *np, 400 struct rockchip_clk_provider *ctx) 401 { 402 if (of_clk_add_provider(np, of_clk_src_onecell_get, 403 &ctx->clk_data)) 404 pr_err("%s: could not register clk provider\n", __func__); 405 } 406 EXPORT_SYMBOL_GPL(rockchip_clk_of_add_provider); 407 408 void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx, 409 struct clk *clk, unsigned int id) 410 { 411 if (ctx->clk_data.clks && id) 412 ctx->clk_data.clks[id] = clk; 413 } 414 EXPORT_SYMBOL_GPL(rockchip_clk_add_lookup); 415 416 void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx, 417 struct rockchip_pll_clock *list, 418 unsigned int nr_pll, int grf_lock_offset) 419 { 420 struct clk *clk; 421 int idx; 422 423 for (idx = 0; idx < nr_pll; idx++, list++) { 424 clk = rockchip_clk_register_pll(ctx, list->type, list->name, 425 list->parent_names, list->num_parents, 426 list->con_offset, grf_lock_offset, 427 list->lock_shift, list->mode_offset, 428 list->mode_shift, list->rate_table, 429 list->flags, list->pll_flags); 430 if (IS_ERR(clk)) { 431 pr_err("%s: failed to register clock %s\n", __func__, 432 list->name); 433 continue; 434 } 435 436 rockchip_clk_add_lookup(ctx, clk, list->id); 437 } 438 } 439 EXPORT_SYMBOL_GPL(rockchip_clk_register_plls); 440 441 void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, 442 struct rockchip_clk_branch *list, 443 unsigned int nr_clk) 444 { 445 struct clk *clk = NULL; 446 unsigned int idx; 447 unsigned long flags; 448 449 for (idx = 0; idx < nr_clk; idx++, list++) { 450 flags = list->flags; 451 452 /* catch simple muxes */ 453 switch (list->branch_type) { 454 case branch_mux: 455 clk = clk_register_mux(NULL, list->name, 456 list->parent_names, list->num_parents, 457 flags, ctx->reg_base + list->muxdiv_offset, 458 list->mux_shift, list->mux_width, 459 list->mux_flags, &ctx->lock); 460 break; 461 case branch_muxgrf: 462 clk = rockchip_clk_register_muxgrf(list->name, 463 list->parent_names, list->num_parents, 464 flags, ctx->grf, list->muxdiv_offset, 465 list->mux_shift, list->mux_width, 466 list->mux_flags); 467 break; 468 case branch_divider: 469 if (list->div_table) 470 clk = clk_register_divider_table(NULL, 471 list->name, list->parent_names[0], 472 flags, 473 ctx->reg_base + list->muxdiv_offset, 474 list->div_shift, list->div_width, 475 list->div_flags, list->div_table, 476 &ctx->lock); 477 else 478 clk = clk_register_divider(NULL, list->name, 479 list->parent_names[0], flags, 480 ctx->reg_base + list->muxdiv_offset, 481 list->div_shift, list->div_width, 482 list->div_flags, &ctx->lock); 483 break; 484 case branch_fraction_divider: 485 clk = rockchip_clk_register_frac_branch(ctx, list->name, 486 list->parent_names, list->num_parents, 487 ctx->reg_base, list->muxdiv_offset, 488 list->div_flags, 489 list->gate_offset, list->gate_shift, 490 list->gate_flags, flags, list->child, 491 &ctx->lock); 492 break; 493 case branch_half_divider: 494 clk = rockchip_clk_register_halfdiv(list->name, 495 list->parent_names, list->num_parents, 496 ctx->reg_base, list->muxdiv_offset, 497 list->mux_shift, list->mux_width, 498 list->mux_flags, list->div_shift, 499 list->div_width, list->div_flags, 500 list->gate_offset, list->gate_shift, 501 list->gate_flags, flags, &ctx->lock); 502 break; 503 case branch_gate: 504 flags |= CLK_SET_RATE_PARENT; 505 506 clk = clk_register_gate(NULL, list->name, 507 list->parent_names[0], flags, 508 ctx->reg_base + list->gate_offset, 509 list->gate_shift, list->gate_flags, &ctx->lock); 510 break; 511 case branch_composite: 512 clk = rockchip_clk_register_branch(list->name, 513 list->parent_names, list->num_parents, 514 ctx->reg_base, list->muxdiv_offset, 515 list->mux_shift, 516 list->mux_width, list->mux_flags, 517 list->div_offset, list->div_shift, list->div_width, 518 list->div_flags, list->div_table, 519 list->gate_offset, list->gate_shift, 520 list->gate_flags, flags, &ctx->lock); 521 break; 522 case branch_mmc: 523 clk = rockchip_clk_register_mmc( 524 list->name, 525 list->parent_names, list->num_parents, 526 ctx->reg_base + list->muxdiv_offset, 527 list->div_shift 528 ); 529 break; 530 case branch_inverter: 531 clk = rockchip_clk_register_inverter( 532 list->name, list->parent_names, 533 list->num_parents, 534 ctx->reg_base + list->muxdiv_offset, 535 list->div_shift, list->div_flags, &ctx->lock); 536 break; 537 case branch_factor: 538 clk = rockchip_clk_register_factor_branch( 539 list->name, list->parent_names, 540 list->num_parents, ctx->reg_base, 541 list->div_shift, list->div_width, 542 list->gate_offset, list->gate_shift, 543 list->gate_flags, flags, &ctx->lock); 544 break; 545 case branch_ddrclk: 546 clk = rockchip_clk_register_ddrclk( 547 list->name, list->flags, 548 list->parent_names, list->num_parents, 549 list->muxdiv_offset, list->mux_shift, 550 list->mux_width, list->div_shift, 551 list->div_width, list->div_flags, 552 ctx->reg_base, &ctx->lock); 553 break; 554 } 555 556 /* none of the cases above matched */ 557 if (!clk) { 558 pr_err("%s: unknown clock type %d\n", 559 __func__, list->branch_type); 560 continue; 561 } 562 563 if (IS_ERR(clk)) { 564 pr_err("%s: failed to register clock %s: %ld\n", 565 __func__, list->name, PTR_ERR(clk)); 566 continue; 567 } 568 569 rockchip_clk_add_lookup(ctx, clk, list->id); 570 } 571 } 572 EXPORT_SYMBOL_GPL(rockchip_clk_register_branches); 573 574 void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, 575 unsigned int lookup_id, 576 const char *name, const char *const *parent_names, 577 u8 num_parents, 578 const struct rockchip_cpuclk_reg_data *reg_data, 579 const struct rockchip_cpuclk_rate_table *rates, 580 int nrates) 581 { 582 struct clk *clk; 583 584 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents, 585 reg_data, rates, nrates, 586 ctx->reg_base, &ctx->lock); 587 if (IS_ERR(clk)) { 588 pr_err("%s: failed to register clock %s: %ld\n", 589 __func__, name, PTR_ERR(clk)); 590 return; 591 } 592 593 rockchip_clk_add_lookup(ctx, clk, lookup_id); 594 } 595 EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk); 596 597 void rockchip_clk_protect_critical(const char *const clocks[], 598 int nclocks) 599 { 600 int i; 601 602 /* Protect the clocks that needs to stay on */ 603 for (i = 0; i < nclocks; i++) { 604 struct clk *clk = __clk_lookup(clocks[i]); 605 606 clk_prepare_enable(clk); 607 } 608 } 609 EXPORT_SYMBOL_GPL(rockchip_clk_protect_critical); 610 611 static void __iomem *rst_base; 612 static unsigned int reg_restart; 613 static void (*cb_restart)(void); 614 static int rockchip_restart_notify(struct notifier_block *this, 615 unsigned long mode, void *cmd) 616 { 617 if (cb_restart) 618 cb_restart(); 619 620 writel(0xfdb9, rst_base + reg_restart); 621 return NOTIFY_DONE; 622 } 623 624 static struct notifier_block rockchip_restart_handler = { 625 .notifier_call = rockchip_restart_notify, 626 .priority = 128, 627 }; 628 629 void 630 rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx, 631 unsigned int reg, 632 void (*cb)(void)) 633 { 634 int ret; 635 636 rst_base = ctx->reg_base; 637 reg_restart = reg; 638 cb_restart = cb; 639 ret = register_restart_handler(&rockchip_restart_handler); 640 if (ret) 641 pr_err("%s: cannot register restart handler, %d\n", 642 __func__, ret); 643 } 644 EXPORT_SYMBOL_GPL(rockchip_register_restart_notifier); 645