1 /* 2 * Copyright (c) 2014 MundoReader S.L. 3 * Author: Heiko Stuebner <heiko@sntech.de> 4 * 5 * Copyright (c) 2016 Rockchip Electronics Co. Ltd. 6 * Author: Xing Zheng <zhengxing@rock-chips.com> 7 * 8 * based on 9 * 10 * samsung/clk.c 11 * Copyright (c) 2013 Samsung Electronics Co., Ltd. 12 * Copyright (c) 2013 Linaro Ltd. 13 * Author: Thomas Abraham <thomas.ab@samsung.com> 14 * 15 * This program is free software; you can redistribute it and/or modify 16 * it under the terms of the GNU General Public License as published by 17 * the Free Software Foundation; either version 2 of the License, or 18 * (at your option) any later version. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/slab.h> 27 #include <linux/clk.h> 28 #include <linux/clk-provider.h> 29 #include <linux/mfd/syscon.h> 30 #include <linux/regmap.h> 31 #include <linux/reboot.h> 32 #include <linux/rational.h> 33 #include "clk.h" 34 35 /** 36 * Register a clock branch. 37 * Most clock branches have a form like 38 * 39 * src1 --|--\ 40 * |M |--[GATE]-[DIV]- 41 * src2 --|--/ 42 * 43 * sometimes without one of those components. 44 */ 45 static struct clk *rockchip_clk_register_branch(const char *name, 46 const char *const *parent_names, u8 num_parents, 47 void __iomem *base, 48 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, 49 int div_offset, u8 div_shift, u8 div_width, u8 div_flags, 50 struct clk_div_table *div_table, int gate_offset, 51 u8 gate_shift, u8 gate_flags, unsigned long flags, 52 spinlock_t *lock) 53 { 54 struct clk *clk; 55 struct clk_mux *mux = NULL; 56 struct clk_gate *gate = NULL; 57 struct clk_divider *div = NULL; 58 const struct clk_ops *mux_ops = NULL, *div_ops = NULL, 59 *gate_ops = NULL; 60 int ret; 61 62 if (num_parents > 1) { 63 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 64 if (!mux) 65 return ERR_PTR(-ENOMEM); 66 67 mux->reg = base + muxdiv_offset; 68 mux->shift = mux_shift; 69 mux->mask = BIT(mux_width) - 1; 70 mux->flags = mux_flags; 71 mux->lock = lock; 72 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops 73 : &clk_mux_ops; 74 } 75 76 if (gate_offset >= 0) { 77 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 78 if (!gate) { 79 ret = -ENOMEM; 80 goto err_gate; 81 } 82 83 gate->flags = gate_flags; 84 gate->reg = base + gate_offset; 85 gate->bit_idx = gate_shift; 86 gate->lock = lock; 87 gate_ops = &clk_gate_ops; 88 } 89 90 if (div_width > 0) { 91 div = kzalloc(sizeof(*div), GFP_KERNEL); 92 if (!div) { 93 ret = -ENOMEM; 94 goto err_div; 95 } 96 97 div->flags = div_flags; 98 if (div_offset) 99 div->reg = base + div_offset; 100 else 101 div->reg = base + muxdiv_offset; 102 div->shift = div_shift; 103 div->width = div_width; 104 div->lock = lock; 105 div->table = div_table; 106 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) 107 ? &clk_divider_ro_ops 108 : &clk_divider_ops; 109 } 110 111 clk = clk_register_composite(NULL, name, parent_names, num_parents, 112 mux ? &mux->hw : NULL, mux_ops, 113 div ? &div->hw : NULL, div_ops, 114 gate ? &gate->hw : NULL, gate_ops, 115 flags); 116 117 if (IS_ERR(clk)) { 118 ret = PTR_ERR(clk); 119 goto err_composite; 120 } 121 122 return clk; 123 err_composite: 124 kfree(div); 125 err_div: 126 kfree(gate); 127 err_gate: 128 kfree(mux); 129 return ERR_PTR(ret); 130 } 131 132 struct rockchip_clk_frac { 133 struct notifier_block clk_nb; 134 struct clk_fractional_divider div; 135 struct clk_gate gate; 136 137 struct clk_mux mux; 138 const struct clk_ops *mux_ops; 139 int mux_frac_idx; 140 141 bool rate_change_remuxed; 142 int rate_change_idx; 143 }; 144 145 #define to_rockchip_clk_frac_nb(nb) \ 146 container_of(nb, struct rockchip_clk_frac, clk_nb) 147 148 static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb, 149 unsigned long event, void *data) 150 { 151 struct clk_notifier_data *ndata = data; 152 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb); 153 struct clk_mux *frac_mux = &frac->mux; 154 int ret = 0; 155 156 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n", 157 __func__, event, ndata->old_rate, ndata->new_rate); 158 if (event == PRE_RATE_CHANGE) { 159 frac->rate_change_idx = 160 frac->mux_ops->get_parent(&frac_mux->hw); 161 if (frac->rate_change_idx != frac->mux_frac_idx) { 162 frac->mux_ops->set_parent(&frac_mux->hw, 163 frac->mux_frac_idx); 164 frac->rate_change_remuxed = 1; 165 } 166 } else if (event == POST_RATE_CHANGE) { 167 /* 168 * The POST_RATE_CHANGE notifier runs directly after the 169 * divider clock is set in clk_change_rate, so we'll have 170 * remuxed back to the original parent before clk_change_rate 171 * reaches the mux itself. 172 */ 173 if (frac->rate_change_remuxed) { 174 frac->mux_ops->set_parent(&frac_mux->hw, 175 frac->rate_change_idx); 176 frac->rate_change_remuxed = 0; 177 } 178 } 179 180 return notifier_from_errno(ret); 181 } 182 183 /** 184 * fractional divider must set that denominator is 20 times larger than 185 * numerator to generate precise clock frequency. 186 */ 187 static void rockchip_fractional_approximation(struct clk_hw *hw, 188 unsigned long rate, unsigned long *parent_rate, 189 unsigned long *m, unsigned long *n) 190 { 191 struct clk_fractional_divider *fd = to_clk_fd(hw); 192 unsigned long p_rate, p_parent_rate; 193 struct clk_hw *p_parent; 194 unsigned long scale; 195 196 p_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); 197 if ((rate * 20 > p_rate) && (p_rate % rate != 0)) { 198 p_parent = clk_hw_get_parent(clk_hw_get_parent(hw)); 199 p_parent_rate = clk_hw_get_rate(p_parent); 200 *parent_rate = p_parent_rate; 201 } 202 203 /* 204 * Get rate closer to *parent_rate to guarantee there is no overflow 205 * for m and n. In the result it will be the nearest rate left shifted 206 * by (scale - fd->nwidth) bits. 207 */ 208 scale = fls_long(*parent_rate / rate - 1); 209 if (scale > fd->nwidth) 210 rate <<= scale - fd->nwidth; 211 212 rational_best_approximation(rate, *parent_rate, 213 GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), 214 m, n); 215 } 216 217 static struct clk *rockchip_clk_register_frac_branch( 218 struct rockchip_clk_provider *ctx, const char *name, 219 const char *const *parent_names, u8 num_parents, 220 void __iomem *base, int muxdiv_offset, u8 div_flags, 221 int gate_offset, u8 gate_shift, u8 gate_flags, 222 unsigned long flags, struct rockchip_clk_branch *child, 223 spinlock_t *lock) 224 { 225 struct rockchip_clk_frac *frac; 226 struct clk *clk; 227 struct clk_gate *gate = NULL; 228 struct clk_fractional_divider *div = NULL; 229 const struct clk_ops *div_ops = NULL, *gate_ops = NULL; 230 231 if (muxdiv_offset < 0) 232 return ERR_PTR(-EINVAL); 233 234 if (child && child->branch_type != branch_mux) { 235 pr_err("%s: fractional child clock for %s can only be a mux\n", 236 __func__, name); 237 return ERR_PTR(-EINVAL); 238 } 239 240 frac = kzalloc(sizeof(*frac), GFP_KERNEL); 241 if (!frac) 242 return ERR_PTR(-ENOMEM); 243 244 if (gate_offset >= 0) { 245 gate = &frac->gate; 246 gate->flags = gate_flags; 247 gate->reg = base + gate_offset; 248 gate->bit_idx = gate_shift; 249 gate->lock = lock; 250 gate_ops = &clk_gate_ops; 251 } 252 253 div = &frac->div; 254 div->flags = div_flags; 255 div->reg = base + muxdiv_offset; 256 div->mshift = 16; 257 div->mwidth = 16; 258 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift; 259 div->nshift = 0; 260 div->nwidth = 16; 261 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; 262 div->lock = lock; 263 div->approximation = rockchip_fractional_approximation; 264 div_ops = &clk_fractional_divider_ops; 265 266 clk = clk_register_composite(NULL, name, parent_names, num_parents, 267 NULL, NULL, 268 &div->hw, div_ops, 269 gate ? &gate->hw : NULL, gate_ops, 270 flags | CLK_SET_RATE_UNGATE); 271 if (IS_ERR(clk)) { 272 kfree(frac); 273 return clk; 274 } 275 276 if (child) { 277 struct clk_mux *frac_mux = &frac->mux; 278 struct clk_init_data init; 279 struct clk *mux_clk; 280 int ret; 281 282 frac->mux_frac_idx = match_string(child->parent_names, 283 child->num_parents, name); 284 frac->mux_ops = &clk_mux_ops; 285 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb; 286 287 frac_mux->reg = base + child->muxdiv_offset; 288 frac_mux->shift = child->mux_shift; 289 frac_mux->mask = BIT(child->mux_width) - 1; 290 frac_mux->flags = child->mux_flags; 291 frac_mux->lock = lock; 292 frac_mux->hw.init = &init; 293 294 init.name = child->name; 295 init.flags = child->flags | CLK_SET_RATE_PARENT; 296 init.ops = frac->mux_ops; 297 init.parent_names = child->parent_names; 298 init.num_parents = child->num_parents; 299 300 mux_clk = clk_register(NULL, &frac_mux->hw); 301 if (IS_ERR(mux_clk)) { 302 kfree(frac); 303 return clk; 304 } 305 306 rockchip_clk_add_lookup(ctx, mux_clk, child->id); 307 308 /* notifier on the fraction divider to catch rate changes */ 309 if (frac->mux_frac_idx >= 0) { 310 pr_debug("%s: found fractional parent in mux at pos %d\n", 311 __func__, frac->mux_frac_idx); 312 ret = clk_notifier_register(clk, &frac->clk_nb); 313 if (ret) 314 pr_err("%s: failed to register clock notifier for %s\n", 315 __func__, name); 316 } else { 317 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n", 318 __func__, name, child->name); 319 } 320 } 321 322 return clk; 323 } 324 325 static struct clk *rockchip_clk_register_factor_branch(const char *name, 326 const char *const *parent_names, u8 num_parents, 327 void __iomem *base, unsigned int mult, unsigned int div, 328 int gate_offset, u8 gate_shift, u8 gate_flags, 329 unsigned long flags, spinlock_t *lock) 330 { 331 struct clk *clk; 332 struct clk_gate *gate = NULL; 333 struct clk_fixed_factor *fix = NULL; 334 335 /* without gate, register a simple factor clock */ 336 if (gate_offset == 0) { 337 return clk_register_fixed_factor(NULL, name, 338 parent_names[0], flags, mult, 339 div); 340 } 341 342 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 343 if (!gate) 344 return ERR_PTR(-ENOMEM); 345 346 gate->flags = gate_flags; 347 gate->reg = base + gate_offset; 348 gate->bit_idx = gate_shift; 349 gate->lock = lock; 350 351 fix = kzalloc(sizeof(*fix), GFP_KERNEL); 352 if (!fix) { 353 kfree(gate); 354 return ERR_PTR(-ENOMEM); 355 } 356 357 fix->mult = mult; 358 fix->div = div; 359 360 clk = clk_register_composite(NULL, name, parent_names, num_parents, 361 NULL, NULL, 362 &fix->hw, &clk_fixed_factor_ops, 363 &gate->hw, &clk_gate_ops, flags); 364 if (IS_ERR(clk)) { 365 kfree(fix); 366 kfree(gate); 367 } 368 369 return clk; 370 } 371 372 struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np, 373 void __iomem *base, unsigned long nr_clks) 374 { 375 struct rockchip_clk_provider *ctx; 376 struct clk **clk_table; 377 int i; 378 379 ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL); 380 if (!ctx) 381 return ERR_PTR(-ENOMEM); 382 383 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL); 384 if (!clk_table) 385 goto err_free; 386 387 for (i = 0; i < nr_clks; ++i) 388 clk_table[i] = ERR_PTR(-ENOENT); 389 390 ctx->reg_base = base; 391 ctx->clk_data.clks = clk_table; 392 ctx->clk_data.clk_num = nr_clks; 393 ctx->cru_node = np; 394 spin_lock_init(&ctx->lock); 395 396 ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node, 397 "rockchip,grf"); 398 399 return ctx; 400 401 err_free: 402 kfree(ctx); 403 return ERR_PTR(-ENOMEM); 404 } 405 406 void __init rockchip_clk_of_add_provider(struct device_node *np, 407 struct rockchip_clk_provider *ctx) 408 { 409 if (of_clk_add_provider(np, of_clk_src_onecell_get, 410 &ctx->clk_data)) 411 pr_err("%s: could not register clk provider\n", __func__); 412 } 413 414 void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx, 415 struct clk *clk, unsigned int id) 416 { 417 if (ctx->clk_data.clks && id) 418 ctx->clk_data.clks[id] = clk; 419 } 420 421 void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx, 422 struct rockchip_pll_clock *list, 423 unsigned int nr_pll, int grf_lock_offset) 424 { 425 struct clk *clk; 426 int idx; 427 428 for (idx = 0; idx < nr_pll; idx++, list++) { 429 clk = rockchip_clk_register_pll(ctx, list->type, list->name, 430 list->parent_names, list->num_parents, 431 list->con_offset, grf_lock_offset, 432 list->lock_shift, list->mode_offset, 433 list->mode_shift, list->rate_table, 434 list->flags, list->pll_flags); 435 if (IS_ERR(clk)) { 436 pr_err("%s: failed to register clock %s\n", __func__, 437 list->name); 438 continue; 439 } 440 441 rockchip_clk_add_lookup(ctx, clk, list->id); 442 } 443 } 444 445 void __init rockchip_clk_register_branches( 446 struct rockchip_clk_provider *ctx, 447 struct rockchip_clk_branch *list, 448 unsigned int nr_clk) 449 { 450 struct clk *clk = NULL; 451 unsigned int idx; 452 unsigned long flags; 453 454 for (idx = 0; idx < nr_clk; idx++, list++) { 455 flags = list->flags; 456 457 /* catch simple muxes */ 458 switch (list->branch_type) { 459 case branch_mux: 460 clk = clk_register_mux(NULL, list->name, 461 list->parent_names, list->num_parents, 462 flags, ctx->reg_base + list->muxdiv_offset, 463 list->mux_shift, list->mux_width, 464 list->mux_flags, &ctx->lock); 465 break; 466 case branch_muxgrf: 467 clk = rockchip_clk_register_muxgrf(list->name, 468 list->parent_names, list->num_parents, 469 flags, ctx->grf, list->muxdiv_offset, 470 list->mux_shift, list->mux_width, 471 list->mux_flags); 472 break; 473 case branch_divider: 474 if (list->div_table) 475 clk = clk_register_divider_table(NULL, 476 list->name, list->parent_names[0], 477 flags, 478 ctx->reg_base + list->muxdiv_offset, 479 list->div_shift, list->div_width, 480 list->div_flags, list->div_table, 481 &ctx->lock); 482 else 483 clk = clk_register_divider(NULL, list->name, 484 list->parent_names[0], flags, 485 ctx->reg_base + list->muxdiv_offset, 486 list->div_shift, list->div_width, 487 list->div_flags, &ctx->lock); 488 break; 489 case branch_fraction_divider: 490 clk = rockchip_clk_register_frac_branch(ctx, list->name, 491 list->parent_names, list->num_parents, 492 ctx->reg_base, list->muxdiv_offset, 493 list->div_flags, 494 list->gate_offset, list->gate_shift, 495 list->gate_flags, flags, list->child, 496 &ctx->lock); 497 break; 498 case branch_half_divider: 499 clk = rockchip_clk_register_halfdiv(list->name, 500 list->parent_names, list->num_parents, 501 ctx->reg_base, list->muxdiv_offset, 502 list->mux_shift, list->mux_width, 503 list->mux_flags, list->div_shift, 504 list->div_width, list->div_flags, 505 list->gate_offset, list->gate_shift, 506 list->gate_flags, flags, &ctx->lock); 507 break; 508 case branch_gate: 509 flags |= CLK_SET_RATE_PARENT; 510 511 clk = clk_register_gate(NULL, list->name, 512 list->parent_names[0], flags, 513 ctx->reg_base + list->gate_offset, 514 list->gate_shift, list->gate_flags, &ctx->lock); 515 break; 516 case branch_composite: 517 clk = rockchip_clk_register_branch(list->name, 518 list->parent_names, list->num_parents, 519 ctx->reg_base, list->muxdiv_offset, 520 list->mux_shift, 521 list->mux_width, list->mux_flags, 522 list->div_offset, list->div_shift, list->div_width, 523 list->div_flags, list->div_table, 524 list->gate_offset, list->gate_shift, 525 list->gate_flags, flags, &ctx->lock); 526 break; 527 case branch_mmc: 528 clk = rockchip_clk_register_mmc( 529 list->name, 530 list->parent_names, list->num_parents, 531 ctx->reg_base + list->muxdiv_offset, 532 list->div_shift 533 ); 534 break; 535 case branch_inverter: 536 clk = rockchip_clk_register_inverter( 537 list->name, list->parent_names, 538 list->num_parents, 539 ctx->reg_base + list->muxdiv_offset, 540 list->div_shift, list->div_flags, &ctx->lock); 541 break; 542 case branch_factor: 543 clk = rockchip_clk_register_factor_branch( 544 list->name, list->parent_names, 545 list->num_parents, ctx->reg_base, 546 list->div_shift, list->div_width, 547 list->gate_offset, list->gate_shift, 548 list->gate_flags, flags, &ctx->lock); 549 break; 550 case branch_ddrclk: 551 clk = rockchip_clk_register_ddrclk( 552 list->name, list->flags, 553 list->parent_names, list->num_parents, 554 list->muxdiv_offset, list->mux_shift, 555 list->mux_width, list->div_shift, 556 list->div_width, list->div_flags, 557 ctx->reg_base, &ctx->lock); 558 break; 559 } 560 561 /* none of the cases above matched */ 562 if (!clk) { 563 pr_err("%s: unknown clock type %d\n", 564 __func__, list->branch_type); 565 continue; 566 } 567 568 if (IS_ERR(clk)) { 569 pr_err("%s: failed to register clock %s: %ld\n", 570 __func__, list->name, PTR_ERR(clk)); 571 continue; 572 } 573 574 rockchip_clk_add_lookup(ctx, clk, list->id); 575 } 576 } 577 578 void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, 579 unsigned int lookup_id, 580 const char *name, const char *const *parent_names, 581 u8 num_parents, 582 const struct rockchip_cpuclk_reg_data *reg_data, 583 const struct rockchip_cpuclk_rate_table *rates, 584 int nrates) 585 { 586 struct clk *clk; 587 588 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents, 589 reg_data, rates, nrates, 590 ctx->reg_base, &ctx->lock); 591 if (IS_ERR(clk)) { 592 pr_err("%s: failed to register clock %s: %ld\n", 593 __func__, name, PTR_ERR(clk)); 594 return; 595 } 596 597 rockchip_clk_add_lookup(ctx, clk, lookup_id); 598 } 599 600 void __init rockchip_clk_protect_critical(const char *const clocks[], 601 int nclocks) 602 { 603 int i; 604 605 /* Protect the clocks that needs to stay on */ 606 for (i = 0; i < nclocks; i++) { 607 struct clk *clk = __clk_lookup(clocks[i]); 608 609 if (clk) 610 clk_prepare_enable(clk); 611 } 612 } 613 614 static void __iomem *rst_base; 615 static unsigned int reg_restart; 616 static void (*cb_restart)(void); 617 static int rockchip_restart_notify(struct notifier_block *this, 618 unsigned long mode, void *cmd) 619 { 620 if (cb_restart) 621 cb_restart(); 622 623 writel(0xfdb9, rst_base + reg_restart); 624 return NOTIFY_DONE; 625 } 626 627 static struct notifier_block rockchip_restart_handler = { 628 .notifier_call = rockchip_restart_notify, 629 .priority = 128, 630 }; 631 632 void __init 633 rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx, 634 unsigned int reg, 635 void (*cb)(void)) 636 { 637 int ret; 638 639 rst_base = ctx->reg_base; 640 reg_restart = reg; 641 cb_restart = cb; 642 ret = register_restart_handler(&rockchip_restart_handler); 643 if (ret) 644 pr_err("%s: cannot register restart handler, %d\n", 645 __func__, ret); 646 } 647