1 /* 2 * Copyright (c) 2013, The Linux Foundation. All rights reserved. 3 * 4 * This software is licensed under the terms of the GNU General Public 5 * License version 2, as published by the Free Software Foundation, and 6 * may be copied, distributed, and modified under those terms. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/bitops.h> 16 #include <linux/err.h> 17 #include <linux/export.h> 18 #include <linux/clk-provider.h> 19 #include <linux/regmap.h> 20 21 #include <asm/div64.h> 22 23 #include "clk-rcg.h" 24 #include "common.h" 25 26 static u32 ns_to_src(struct src_sel *s, u32 ns) 27 { 28 ns >>= s->src_sel_shift; 29 ns &= SRC_SEL_MASK; 30 return ns; 31 } 32 33 static u32 src_to_ns(struct src_sel *s, u8 src, u32 ns) 34 { 35 u32 mask; 36 37 mask = SRC_SEL_MASK; 38 mask <<= s->src_sel_shift; 39 ns &= ~mask; 40 41 ns |= src << s->src_sel_shift; 42 return ns; 43 } 44 45 static u8 clk_rcg_get_parent(struct clk_hw *hw) 46 { 47 struct clk_rcg *rcg = to_clk_rcg(hw); 48 int num_parents = clk_hw_get_num_parents(hw); 49 u32 ns; 50 int i, ret; 51 52 ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns); 53 if (ret) 54 goto err; 55 ns = ns_to_src(&rcg->s, ns); 56 for (i = 0; i < num_parents; i++) 57 if (ns == rcg->s.parent_map[i].cfg) 58 return i; 59 60 err: 61 pr_debug("%s: Clock %s has invalid parent, using default.\n", 62 __func__, __clk_get_name(hw->clk)); 63 return 0; 64 } 65 66 static int reg_to_bank(struct clk_dyn_rcg *rcg, u32 bank) 67 { 68 bank &= BIT(rcg->mux_sel_bit); 69 return !!bank; 70 } 71 72 static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw) 73 { 74 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 75 int num_parents = clk_hw_get_num_parents(hw); 76 u32 ns, reg; 77 int bank; 78 int i, ret; 79 struct src_sel *s; 80 81 ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 82 if (ret) 83 goto err; 84 bank = reg_to_bank(rcg, reg); 85 s = &rcg->s[bank]; 86 87 ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns); 88 if (ret) 89 goto err; 90 ns = ns_to_src(s, ns); 91 92 for (i = 0; i < num_parents; i++) 93 if (ns == s->parent_map[i].cfg) 94 return i; 95 96 err: 97 pr_debug("%s: Clock %s has invalid parent, using default.\n", 98 __func__, __clk_get_name(hw->clk)); 99 return 0; 100 } 101 102 static int clk_rcg_set_parent(struct clk_hw *hw, u8 index) 103 { 104 struct clk_rcg *rcg = to_clk_rcg(hw); 105 u32 ns; 106 107 regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns); 108 ns = src_to_ns(&rcg->s, rcg->s.parent_map[index].cfg, ns); 109 regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns); 110 111 return 0; 112 } 113 114 static u32 md_to_m(struct mn *mn, u32 md) 115 { 116 md >>= mn->m_val_shift; 117 md &= BIT(mn->width) - 1; 118 return md; 119 } 120 121 static u32 ns_to_pre_div(struct pre_div *p, u32 ns) 122 { 123 ns >>= p->pre_div_shift; 124 ns &= BIT(p->pre_div_width) - 1; 125 return ns; 126 } 127 128 static u32 pre_div_to_ns(struct pre_div *p, u8 pre_div, u32 ns) 129 { 130 u32 mask; 131 132 mask = BIT(p->pre_div_width) - 1; 133 mask <<= p->pre_div_shift; 134 ns &= ~mask; 135 136 ns |= pre_div << p->pre_div_shift; 137 return ns; 138 } 139 140 static u32 mn_to_md(struct mn *mn, u32 m, u32 n, u32 md) 141 { 142 u32 mask, mask_w; 143 144 mask_w = BIT(mn->width) - 1; 145 mask = (mask_w << mn->m_val_shift) | mask_w; 146 md &= ~mask; 147 148 if (n) { 149 m <<= mn->m_val_shift; 150 md |= m; 151 md |= ~n & mask_w; 152 } 153 154 return md; 155 } 156 157 static u32 ns_m_to_n(struct mn *mn, u32 ns, u32 m) 158 { 159 ns = ~ns >> mn->n_val_shift; 160 ns &= BIT(mn->width) - 1; 161 return ns + m; 162 } 163 164 static u32 reg_to_mnctr_mode(struct mn *mn, u32 val) 165 { 166 val >>= mn->mnctr_mode_shift; 167 val &= MNCTR_MODE_MASK; 168 return val; 169 } 170 171 static u32 mn_to_ns(struct mn *mn, u32 m, u32 n, u32 ns) 172 { 173 u32 mask; 174 175 mask = BIT(mn->width) - 1; 176 mask <<= mn->n_val_shift; 177 ns &= ~mask; 178 179 if (n) { 180 n = n - m; 181 n = ~n; 182 n &= BIT(mn->width) - 1; 183 n <<= mn->n_val_shift; 184 ns |= n; 185 } 186 187 return ns; 188 } 189 190 static u32 mn_to_reg(struct mn *mn, u32 m, u32 n, u32 val) 191 { 192 u32 mask; 193 194 mask = MNCTR_MODE_MASK << mn->mnctr_mode_shift; 195 mask |= BIT(mn->mnctr_en_bit); 196 val &= ~mask; 197 198 if (n) { 199 val |= BIT(mn->mnctr_en_bit); 200 val |= MNCTR_MODE_DUAL << mn->mnctr_mode_shift; 201 } 202 203 return val; 204 } 205 206 static int configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f) 207 { 208 u32 ns, md, reg; 209 int bank, new_bank, ret, index; 210 struct mn *mn; 211 struct pre_div *p; 212 struct src_sel *s; 213 bool enabled; 214 u32 md_reg, ns_reg; 215 bool banked_mn = !!rcg->mn[1].width; 216 bool banked_p = !!rcg->p[1].pre_div_width; 217 struct clk_hw *hw = &rcg->clkr.hw; 218 219 enabled = __clk_is_enabled(hw->clk); 220 221 ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 222 if (ret) 223 return ret; 224 bank = reg_to_bank(rcg, reg); 225 new_bank = enabled ? !bank : bank; 226 227 ns_reg = rcg->ns_reg[new_bank]; 228 ret = regmap_read(rcg->clkr.regmap, ns_reg, &ns); 229 if (ret) 230 return ret; 231 232 if (banked_mn) { 233 mn = &rcg->mn[new_bank]; 234 md_reg = rcg->md_reg[new_bank]; 235 236 ns |= BIT(mn->mnctr_reset_bit); 237 ret = regmap_write(rcg->clkr.regmap, ns_reg, ns); 238 if (ret) 239 return ret; 240 241 ret = regmap_read(rcg->clkr.regmap, md_reg, &md); 242 if (ret) 243 return ret; 244 md = mn_to_md(mn, f->m, f->n, md); 245 ret = regmap_write(rcg->clkr.regmap, md_reg, md); 246 if (ret) 247 return ret; 248 ns = mn_to_ns(mn, f->m, f->n, ns); 249 ret = regmap_write(rcg->clkr.regmap, ns_reg, ns); 250 if (ret) 251 return ret; 252 253 /* Two NS registers means mode control is in NS register */ 254 if (rcg->ns_reg[0] != rcg->ns_reg[1]) { 255 ns = mn_to_reg(mn, f->m, f->n, ns); 256 ret = regmap_write(rcg->clkr.regmap, ns_reg, ns); 257 if (ret) 258 return ret; 259 } else { 260 reg = mn_to_reg(mn, f->m, f->n, reg); 261 ret = regmap_write(rcg->clkr.regmap, rcg->bank_reg, 262 reg); 263 if (ret) 264 return ret; 265 } 266 267 ns &= ~BIT(mn->mnctr_reset_bit); 268 ret = regmap_write(rcg->clkr.regmap, ns_reg, ns); 269 if (ret) 270 return ret; 271 } 272 273 if (banked_p) { 274 p = &rcg->p[new_bank]; 275 ns = pre_div_to_ns(p, f->pre_div - 1, ns); 276 } 277 278 s = &rcg->s[new_bank]; 279 index = qcom_find_src_index(hw, s->parent_map, f->src); 280 if (index < 0) 281 return index; 282 ns = src_to_ns(s, s->parent_map[index].cfg, ns); 283 ret = regmap_write(rcg->clkr.regmap, ns_reg, ns); 284 if (ret) 285 return ret; 286 287 if (enabled) { 288 ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 289 if (ret) 290 return ret; 291 reg ^= BIT(rcg->mux_sel_bit); 292 ret = regmap_write(rcg->clkr.regmap, rcg->bank_reg, reg); 293 if (ret) 294 return ret; 295 } 296 return 0; 297 } 298 299 static int clk_dyn_rcg_set_parent(struct clk_hw *hw, u8 index) 300 { 301 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 302 u32 ns, md, reg; 303 int bank; 304 struct freq_tbl f = { 0 }; 305 bool banked_mn = !!rcg->mn[1].width; 306 bool banked_p = !!rcg->p[1].pre_div_width; 307 308 regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 309 bank = reg_to_bank(rcg, reg); 310 311 regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns); 312 313 if (banked_mn) { 314 regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md); 315 f.m = md_to_m(&rcg->mn[bank], md); 316 f.n = ns_m_to_n(&rcg->mn[bank], ns, f.m); 317 } 318 319 if (banked_p) 320 f.pre_div = ns_to_pre_div(&rcg->p[bank], ns) + 1; 321 322 f.src = qcom_find_src_index(hw, rcg->s[bank].parent_map, index); 323 return configure_bank(rcg, &f); 324 } 325 326 /* 327 * Calculate m/n:d rate 328 * 329 * parent_rate m 330 * rate = ----------- x --- 331 * pre_div n 332 */ 333 static unsigned long 334 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 pre_div) 335 { 336 if (pre_div) 337 rate /= pre_div + 1; 338 339 if (mode) { 340 u64 tmp = rate; 341 tmp *= m; 342 do_div(tmp, n); 343 rate = tmp; 344 } 345 346 return rate; 347 } 348 349 static unsigned long 350 clk_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 351 { 352 struct clk_rcg *rcg = to_clk_rcg(hw); 353 u32 pre_div, m = 0, n = 0, ns, md, mode = 0; 354 struct mn *mn = &rcg->mn; 355 356 regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns); 357 pre_div = ns_to_pre_div(&rcg->p, ns); 358 359 if (rcg->mn.width) { 360 regmap_read(rcg->clkr.regmap, rcg->md_reg, &md); 361 m = md_to_m(mn, md); 362 n = ns_m_to_n(mn, ns, m); 363 /* MN counter mode is in hw.enable_reg sometimes */ 364 if (rcg->clkr.enable_reg != rcg->ns_reg) 365 regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &mode); 366 else 367 mode = ns; 368 mode = reg_to_mnctr_mode(mn, mode); 369 } 370 371 return calc_rate(parent_rate, m, n, mode, pre_div); 372 } 373 374 static unsigned long 375 clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 376 { 377 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 378 u32 m, n, pre_div, ns, md, mode, reg; 379 int bank; 380 struct mn *mn; 381 bool banked_p = !!rcg->p[1].pre_div_width; 382 bool banked_mn = !!rcg->mn[1].width; 383 384 regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 385 bank = reg_to_bank(rcg, reg); 386 387 regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns); 388 m = n = pre_div = mode = 0; 389 390 if (banked_mn) { 391 mn = &rcg->mn[bank]; 392 regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md); 393 m = md_to_m(mn, md); 394 n = ns_m_to_n(mn, ns, m); 395 /* Two NS registers means mode control is in NS register */ 396 if (rcg->ns_reg[0] != rcg->ns_reg[1]) 397 reg = ns; 398 mode = reg_to_mnctr_mode(mn, reg); 399 } 400 401 if (banked_p) 402 pre_div = ns_to_pre_div(&rcg->p[bank], ns); 403 404 return calc_rate(parent_rate, m, n, mode, pre_div); 405 } 406 407 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, 408 struct clk_rate_request *req, 409 const struct parent_map *parent_map) 410 { 411 unsigned long clk_flags, rate = req->rate; 412 struct clk *p; 413 int index; 414 415 f = qcom_find_freq(f, rate); 416 if (!f) 417 return -EINVAL; 418 419 index = qcom_find_src_index(hw, parent_map, f->src); 420 if (index < 0) 421 return index; 422 423 clk_flags = clk_hw_get_flags(hw); 424 p = clk_get_parent_by_index(hw->clk, index); 425 if (clk_flags & CLK_SET_RATE_PARENT) { 426 rate = rate * f->pre_div; 427 if (f->n) { 428 u64 tmp = rate; 429 tmp = tmp * f->n; 430 do_div(tmp, f->m); 431 rate = tmp; 432 } 433 } else { 434 rate = __clk_get_rate(p); 435 } 436 req->best_parent_hw = __clk_get_hw(p); 437 req->best_parent_rate = rate; 438 req->rate = f->freq; 439 440 return 0; 441 } 442 443 static int clk_rcg_determine_rate(struct clk_hw *hw, 444 struct clk_rate_request *req) 445 { 446 struct clk_rcg *rcg = to_clk_rcg(hw); 447 448 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, 449 rcg->s.parent_map); 450 } 451 452 static int clk_dyn_rcg_determine_rate(struct clk_hw *hw, 453 struct clk_rate_request *req) 454 { 455 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 456 u32 reg; 457 int bank; 458 struct src_sel *s; 459 460 regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 461 bank = reg_to_bank(rcg, reg); 462 s = &rcg->s[bank]; 463 464 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, s->parent_map); 465 } 466 467 static int clk_rcg_bypass_determine_rate(struct clk_hw *hw, 468 struct clk_rate_request *req) 469 { 470 struct clk_rcg *rcg = to_clk_rcg(hw); 471 const struct freq_tbl *f = rcg->freq_tbl; 472 struct clk *p; 473 int index = qcom_find_src_index(hw, rcg->s.parent_map, f->src); 474 475 p = clk_get_parent_by_index(hw->clk, index); 476 req->best_parent_hw = __clk_get_hw(p); 477 req->best_parent_rate = __clk_round_rate(p, req->rate); 478 req->rate = req->best_parent_rate; 479 480 return 0; 481 } 482 483 static int __clk_rcg_set_rate(struct clk_rcg *rcg, const struct freq_tbl *f) 484 { 485 u32 ns, md, ctl; 486 struct mn *mn = &rcg->mn; 487 u32 mask = 0; 488 unsigned int reset_reg; 489 490 if (rcg->mn.reset_in_cc) 491 reset_reg = rcg->clkr.enable_reg; 492 else 493 reset_reg = rcg->ns_reg; 494 495 if (rcg->mn.width) { 496 mask = BIT(mn->mnctr_reset_bit); 497 regmap_update_bits(rcg->clkr.regmap, reset_reg, mask, mask); 498 499 regmap_read(rcg->clkr.regmap, rcg->md_reg, &md); 500 md = mn_to_md(mn, f->m, f->n, md); 501 regmap_write(rcg->clkr.regmap, rcg->md_reg, md); 502 503 regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns); 504 /* MN counter mode is in hw.enable_reg sometimes */ 505 if (rcg->clkr.enable_reg != rcg->ns_reg) { 506 regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl); 507 ctl = mn_to_reg(mn, f->m, f->n, ctl); 508 regmap_write(rcg->clkr.regmap, rcg->clkr.enable_reg, ctl); 509 } else { 510 ns = mn_to_reg(mn, f->m, f->n, ns); 511 } 512 ns = mn_to_ns(mn, f->m, f->n, ns); 513 } else { 514 regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns); 515 } 516 517 ns = pre_div_to_ns(&rcg->p, f->pre_div - 1, ns); 518 regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns); 519 520 regmap_update_bits(rcg->clkr.regmap, reset_reg, mask, 0); 521 522 return 0; 523 } 524 525 static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate, 526 unsigned long parent_rate) 527 { 528 struct clk_rcg *rcg = to_clk_rcg(hw); 529 const struct freq_tbl *f; 530 531 f = qcom_find_freq(rcg->freq_tbl, rate); 532 if (!f) 533 return -EINVAL; 534 535 return __clk_rcg_set_rate(rcg, f); 536 } 537 538 static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate, 539 unsigned long parent_rate) 540 { 541 struct clk_rcg *rcg = to_clk_rcg(hw); 542 543 return __clk_rcg_set_rate(rcg, rcg->freq_tbl); 544 } 545 546 /* 547 * This type of clock has a glitch-free mux that switches between the output of 548 * the M/N counter and an always on clock source (XO). When clk_set_rate() is 549 * called we need to make sure that we don't switch to the M/N counter if it 550 * isn't clocking because the mux will get stuck and the clock will stop 551 * outputting a clock. This can happen if the framework isn't aware that this 552 * clock is on and so clk_set_rate() doesn't turn on the new parent. To fix 553 * this we switch the mux in the enable/disable ops and reprogram the M/N 554 * counter in the set_rate op. We also make sure to switch away from the M/N 555 * counter in set_rate if software thinks the clock is off. 556 */ 557 static int clk_rcg_lcc_set_rate(struct clk_hw *hw, unsigned long rate, 558 unsigned long parent_rate) 559 { 560 struct clk_rcg *rcg = to_clk_rcg(hw); 561 const struct freq_tbl *f; 562 int ret; 563 u32 gfm = BIT(10); 564 565 f = qcom_find_freq(rcg->freq_tbl, rate); 566 if (!f) 567 return -EINVAL; 568 569 /* Switch to XO to avoid glitches */ 570 regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0); 571 ret = __clk_rcg_set_rate(rcg, f); 572 /* Switch back to M/N if it's clocking */ 573 if (__clk_is_enabled(hw->clk)) 574 regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm); 575 576 return ret; 577 } 578 579 static int clk_rcg_lcc_enable(struct clk_hw *hw) 580 { 581 struct clk_rcg *rcg = to_clk_rcg(hw); 582 u32 gfm = BIT(10); 583 584 /* Use M/N */ 585 return regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm); 586 } 587 588 static void clk_rcg_lcc_disable(struct clk_hw *hw) 589 { 590 struct clk_rcg *rcg = to_clk_rcg(hw); 591 u32 gfm = BIT(10); 592 593 /* Use XO */ 594 regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0); 595 } 596 597 static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate) 598 { 599 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 600 const struct freq_tbl *f; 601 602 f = qcom_find_freq(rcg->freq_tbl, rate); 603 if (!f) 604 return -EINVAL; 605 606 return configure_bank(rcg, f); 607 } 608 609 static int clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate, 610 unsigned long parent_rate) 611 { 612 return __clk_dyn_rcg_set_rate(hw, rate); 613 } 614 615 static int clk_dyn_rcg_set_rate_and_parent(struct clk_hw *hw, 616 unsigned long rate, unsigned long parent_rate, u8 index) 617 { 618 return __clk_dyn_rcg_set_rate(hw, rate); 619 } 620 621 const struct clk_ops clk_rcg_ops = { 622 .enable = clk_enable_regmap, 623 .disable = clk_disable_regmap, 624 .get_parent = clk_rcg_get_parent, 625 .set_parent = clk_rcg_set_parent, 626 .recalc_rate = clk_rcg_recalc_rate, 627 .determine_rate = clk_rcg_determine_rate, 628 .set_rate = clk_rcg_set_rate, 629 }; 630 EXPORT_SYMBOL_GPL(clk_rcg_ops); 631 632 const struct clk_ops clk_rcg_bypass_ops = { 633 .enable = clk_enable_regmap, 634 .disable = clk_disable_regmap, 635 .get_parent = clk_rcg_get_parent, 636 .set_parent = clk_rcg_set_parent, 637 .recalc_rate = clk_rcg_recalc_rate, 638 .determine_rate = clk_rcg_bypass_determine_rate, 639 .set_rate = clk_rcg_bypass_set_rate, 640 }; 641 EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops); 642 643 const struct clk_ops clk_rcg_lcc_ops = { 644 .enable = clk_rcg_lcc_enable, 645 .disable = clk_rcg_lcc_disable, 646 .get_parent = clk_rcg_get_parent, 647 .set_parent = clk_rcg_set_parent, 648 .recalc_rate = clk_rcg_recalc_rate, 649 .determine_rate = clk_rcg_determine_rate, 650 .set_rate = clk_rcg_lcc_set_rate, 651 }; 652 EXPORT_SYMBOL_GPL(clk_rcg_lcc_ops); 653 654 const struct clk_ops clk_dyn_rcg_ops = { 655 .enable = clk_enable_regmap, 656 .is_enabled = clk_is_enabled_regmap, 657 .disable = clk_disable_regmap, 658 .get_parent = clk_dyn_rcg_get_parent, 659 .set_parent = clk_dyn_rcg_set_parent, 660 .recalc_rate = clk_dyn_rcg_recalc_rate, 661 .determine_rate = clk_dyn_rcg_determine_rate, 662 .set_rate = clk_dyn_rcg_set_rate, 663 .set_rate_and_parent = clk_dyn_rcg_set_rate_and_parent, 664 }; 665 EXPORT_SYMBOL_GPL(clk_dyn_rcg_ops); 666