1 /* 2 * Copyright (c) 2013, The Linux Foundation. All rights reserved. 3 * 4 * This software is licensed under the terms of the GNU General Public 5 * License version 2, as published by the Free Software Foundation, and 6 * may be copied, distributed, and modified under those terms. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/bitops.h> 16 #include <linux/err.h> 17 #include <linux/export.h> 18 #include <linux/clk-provider.h> 19 #include <linux/regmap.h> 20 21 #include <asm/div64.h> 22 23 #include "clk-rcg.h" 24 #include "common.h" 25 26 static u32 ns_to_src(struct src_sel *s, u32 ns) 27 { 28 ns >>= s->src_sel_shift; 29 ns &= SRC_SEL_MASK; 30 return ns; 31 } 32 33 static u32 src_to_ns(struct src_sel *s, u8 src, u32 ns) 34 { 35 u32 mask; 36 37 mask = SRC_SEL_MASK; 38 mask <<= s->src_sel_shift; 39 ns &= ~mask; 40 41 ns |= src << s->src_sel_shift; 42 return ns; 43 } 44 45 static u8 clk_rcg_get_parent(struct clk_hw *hw) 46 { 47 struct clk_rcg *rcg = to_clk_rcg(hw); 48 int num_parents = clk_hw_get_num_parents(hw); 49 u32 ns; 50 int i, ret; 51 52 ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns); 53 if (ret) 54 goto err; 55 ns = ns_to_src(&rcg->s, ns); 56 for (i = 0; i < num_parents; i++) 57 if (ns == rcg->s.parent_map[i].cfg) 58 return i; 59 60 err: 61 pr_debug("%s: Clock %s has invalid parent, using default.\n", 62 __func__, clk_hw_get_name(hw)); 63 return 0; 64 } 65 66 static int reg_to_bank(struct clk_dyn_rcg *rcg, u32 bank) 67 { 68 bank &= BIT(rcg->mux_sel_bit); 69 return !!bank; 70 } 71 72 static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw) 73 { 74 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 75 int num_parents = clk_hw_get_num_parents(hw); 76 u32 ns, reg; 77 int bank; 78 int i, ret; 79 struct src_sel *s; 80 81 ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 82 if (ret) 83 goto err; 84 bank = reg_to_bank(rcg, reg); 85 s = &rcg->s[bank]; 86 87 ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns); 88 if (ret) 89 goto err; 90 ns = ns_to_src(s, ns); 91 92 for (i = 0; i < num_parents; i++) 93 if (ns == s->parent_map[i].cfg) 94 return i; 95 96 err: 97 pr_debug("%s: Clock %s has invalid parent, using default.\n", 98 __func__, clk_hw_get_name(hw)); 99 return 0; 100 } 101 102 static int clk_rcg_set_parent(struct clk_hw *hw, u8 index) 103 { 104 struct clk_rcg *rcg = to_clk_rcg(hw); 105 u32 ns; 106 107 regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns); 108 ns = src_to_ns(&rcg->s, rcg->s.parent_map[index].cfg, ns); 109 regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns); 110 111 return 0; 112 } 113 114 static u32 md_to_m(struct mn *mn, u32 md) 115 { 116 md >>= mn->m_val_shift; 117 md &= BIT(mn->width) - 1; 118 return md; 119 } 120 121 static u32 ns_to_pre_div(struct pre_div *p, u32 ns) 122 { 123 ns >>= p->pre_div_shift; 124 ns &= BIT(p->pre_div_width) - 1; 125 return ns; 126 } 127 128 static u32 pre_div_to_ns(struct pre_div *p, u8 pre_div, u32 ns) 129 { 130 u32 mask; 131 132 mask = BIT(p->pre_div_width) - 1; 133 mask <<= p->pre_div_shift; 134 ns &= ~mask; 135 136 ns |= pre_div << p->pre_div_shift; 137 return ns; 138 } 139 140 static u32 mn_to_md(struct mn *mn, u32 m, u32 n, u32 md) 141 { 142 u32 mask, mask_w; 143 144 mask_w = BIT(mn->width) - 1; 145 mask = (mask_w << mn->m_val_shift) | mask_w; 146 md &= ~mask; 147 148 if (n) { 149 m <<= mn->m_val_shift; 150 md |= m; 151 md |= ~n & mask_w; 152 } 153 154 return md; 155 } 156 157 static u32 ns_m_to_n(struct mn *mn, u32 ns, u32 m) 158 { 159 ns = ~ns >> mn->n_val_shift; 160 ns &= BIT(mn->width) - 1; 161 return ns + m; 162 } 163 164 static u32 reg_to_mnctr_mode(struct mn *mn, u32 val) 165 { 166 val >>= mn->mnctr_mode_shift; 167 val &= MNCTR_MODE_MASK; 168 return val; 169 } 170 171 static u32 mn_to_ns(struct mn *mn, u32 m, u32 n, u32 ns) 172 { 173 u32 mask; 174 175 mask = BIT(mn->width) - 1; 176 mask <<= mn->n_val_shift; 177 ns &= ~mask; 178 179 if (n) { 180 n = n - m; 181 n = ~n; 182 n &= BIT(mn->width) - 1; 183 n <<= mn->n_val_shift; 184 ns |= n; 185 } 186 187 return ns; 188 } 189 190 static u32 mn_to_reg(struct mn *mn, u32 m, u32 n, u32 val) 191 { 192 u32 mask; 193 194 mask = MNCTR_MODE_MASK << mn->mnctr_mode_shift; 195 mask |= BIT(mn->mnctr_en_bit); 196 val &= ~mask; 197 198 if (n) { 199 val |= BIT(mn->mnctr_en_bit); 200 val |= MNCTR_MODE_DUAL << mn->mnctr_mode_shift; 201 } 202 203 return val; 204 } 205 206 static int configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f) 207 { 208 u32 ns, md, reg; 209 int bank, new_bank, ret, index; 210 struct mn *mn; 211 struct pre_div *p; 212 struct src_sel *s; 213 bool enabled; 214 u32 md_reg, ns_reg; 215 bool banked_mn = !!rcg->mn[1].width; 216 bool banked_p = !!rcg->p[1].pre_div_width; 217 struct clk_hw *hw = &rcg->clkr.hw; 218 219 enabled = __clk_is_enabled(hw->clk); 220 221 ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 222 if (ret) 223 return ret; 224 bank = reg_to_bank(rcg, reg); 225 new_bank = enabled ? !bank : bank; 226 227 ns_reg = rcg->ns_reg[new_bank]; 228 ret = regmap_read(rcg->clkr.regmap, ns_reg, &ns); 229 if (ret) 230 return ret; 231 232 if (banked_mn) { 233 mn = &rcg->mn[new_bank]; 234 md_reg = rcg->md_reg[new_bank]; 235 236 ns |= BIT(mn->mnctr_reset_bit); 237 ret = regmap_write(rcg->clkr.regmap, ns_reg, ns); 238 if (ret) 239 return ret; 240 241 ret = regmap_read(rcg->clkr.regmap, md_reg, &md); 242 if (ret) 243 return ret; 244 md = mn_to_md(mn, f->m, f->n, md); 245 ret = regmap_write(rcg->clkr.regmap, md_reg, md); 246 if (ret) 247 return ret; 248 ns = mn_to_ns(mn, f->m, f->n, ns); 249 ret = regmap_write(rcg->clkr.regmap, ns_reg, ns); 250 if (ret) 251 return ret; 252 253 /* Two NS registers means mode control is in NS register */ 254 if (rcg->ns_reg[0] != rcg->ns_reg[1]) { 255 ns = mn_to_reg(mn, f->m, f->n, ns); 256 ret = regmap_write(rcg->clkr.regmap, ns_reg, ns); 257 if (ret) 258 return ret; 259 } else { 260 reg = mn_to_reg(mn, f->m, f->n, reg); 261 ret = regmap_write(rcg->clkr.regmap, rcg->bank_reg, 262 reg); 263 if (ret) 264 return ret; 265 } 266 267 ns &= ~BIT(mn->mnctr_reset_bit); 268 ret = regmap_write(rcg->clkr.regmap, ns_reg, ns); 269 if (ret) 270 return ret; 271 } 272 273 if (banked_p) { 274 p = &rcg->p[new_bank]; 275 ns = pre_div_to_ns(p, f->pre_div - 1, ns); 276 } 277 278 s = &rcg->s[new_bank]; 279 index = qcom_find_src_index(hw, s->parent_map, f->src); 280 if (index < 0) 281 return index; 282 ns = src_to_ns(s, s->parent_map[index].cfg, ns); 283 ret = regmap_write(rcg->clkr.regmap, ns_reg, ns); 284 if (ret) 285 return ret; 286 287 if (enabled) { 288 ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 289 if (ret) 290 return ret; 291 reg ^= BIT(rcg->mux_sel_bit); 292 ret = regmap_write(rcg->clkr.regmap, rcg->bank_reg, reg); 293 if (ret) 294 return ret; 295 } 296 return 0; 297 } 298 299 static int clk_dyn_rcg_set_parent(struct clk_hw *hw, u8 index) 300 { 301 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 302 u32 ns, md, reg; 303 int bank; 304 struct freq_tbl f = { 0 }; 305 bool banked_mn = !!rcg->mn[1].width; 306 bool banked_p = !!rcg->p[1].pre_div_width; 307 308 regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 309 bank = reg_to_bank(rcg, reg); 310 311 regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns); 312 313 if (banked_mn) { 314 regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md); 315 f.m = md_to_m(&rcg->mn[bank], md); 316 f.n = ns_m_to_n(&rcg->mn[bank], ns, f.m); 317 } 318 319 if (banked_p) 320 f.pre_div = ns_to_pre_div(&rcg->p[bank], ns) + 1; 321 322 f.src = qcom_find_src_index(hw, rcg->s[bank].parent_map, index); 323 return configure_bank(rcg, &f); 324 } 325 326 /* 327 * Calculate m/n:d rate 328 * 329 * parent_rate m 330 * rate = ----------- x --- 331 * pre_div n 332 */ 333 static unsigned long 334 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 pre_div) 335 { 336 if (pre_div) 337 rate /= pre_div + 1; 338 339 if (mode) { 340 u64 tmp = rate; 341 tmp *= m; 342 do_div(tmp, n); 343 rate = tmp; 344 } 345 346 return rate; 347 } 348 349 static unsigned long 350 clk_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 351 { 352 struct clk_rcg *rcg = to_clk_rcg(hw); 353 u32 pre_div, m = 0, n = 0, ns, md, mode = 0; 354 struct mn *mn = &rcg->mn; 355 356 regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns); 357 pre_div = ns_to_pre_div(&rcg->p, ns); 358 359 if (rcg->mn.width) { 360 regmap_read(rcg->clkr.regmap, rcg->md_reg, &md); 361 m = md_to_m(mn, md); 362 n = ns_m_to_n(mn, ns, m); 363 /* MN counter mode is in hw.enable_reg sometimes */ 364 if (rcg->clkr.enable_reg != rcg->ns_reg) 365 regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &mode); 366 else 367 mode = ns; 368 mode = reg_to_mnctr_mode(mn, mode); 369 } 370 371 return calc_rate(parent_rate, m, n, mode, pre_div); 372 } 373 374 static unsigned long 375 clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 376 { 377 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 378 u32 m, n, pre_div, ns, md, mode, reg; 379 int bank; 380 struct mn *mn; 381 bool banked_p = !!rcg->p[1].pre_div_width; 382 bool banked_mn = !!rcg->mn[1].width; 383 384 regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 385 bank = reg_to_bank(rcg, reg); 386 387 regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns); 388 m = n = pre_div = mode = 0; 389 390 if (banked_mn) { 391 mn = &rcg->mn[bank]; 392 regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md); 393 m = md_to_m(mn, md); 394 n = ns_m_to_n(mn, ns, m); 395 /* Two NS registers means mode control is in NS register */ 396 if (rcg->ns_reg[0] != rcg->ns_reg[1]) 397 reg = ns; 398 mode = reg_to_mnctr_mode(mn, reg); 399 } 400 401 if (banked_p) 402 pre_div = ns_to_pre_div(&rcg->p[bank], ns); 403 404 return calc_rate(parent_rate, m, n, mode, pre_div); 405 } 406 407 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, 408 struct clk_rate_request *req, 409 const struct parent_map *parent_map) 410 { 411 unsigned long clk_flags, rate = req->rate; 412 struct clk_hw *p; 413 int index; 414 415 f = qcom_find_freq(f, rate); 416 if (!f) 417 return -EINVAL; 418 419 index = qcom_find_src_index(hw, parent_map, f->src); 420 if (index < 0) 421 return index; 422 423 clk_flags = clk_hw_get_flags(hw); 424 p = clk_hw_get_parent_by_index(hw, index); 425 if (clk_flags & CLK_SET_RATE_PARENT) { 426 rate = rate * f->pre_div; 427 if (f->n) { 428 u64 tmp = rate; 429 tmp = tmp * f->n; 430 do_div(tmp, f->m); 431 rate = tmp; 432 } 433 } else { 434 rate = clk_hw_get_rate(p); 435 } 436 req->best_parent_hw = p; 437 req->best_parent_rate = rate; 438 req->rate = f->freq; 439 440 return 0; 441 } 442 443 static int clk_rcg_determine_rate(struct clk_hw *hw, 444 struct clk_rate_request *req) 445 { 446 struct clk_rcg *rcg = to_clk_rcg(hw); 447 448 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, 449 rcg->s.parent_map); 450 } 451 452 static int clk_dyn_rcg_determine_rate(struct clk_hw *hw, 453 struct clk_rate_request *req) 454 { 455 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 456 u32 reg; 457 int bank; 458 struct src_sel *s; 459 460 regmap_read(rcg->clkr.regmap, rcg->bank_reg, ®); 461 bank = reg_to_bank(rcg, reg); 462 s = &rcg->s[bank]; 463 464 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, s->parent_map); 465 } 466 467 static int clk_rcg_bypass_determine_rate(struct clk_hw *hw, 468 struct clk_rate_request *req) 469 { 470 struct clk_rcg *rcg = to_clk_rcg(hw); 471 const struct freq_tbl *f = rcg->freq_tbl; 472 struct clk_hw *p; 473 int index = qcom_find_src_index(hw, rcg->s.parent_map, f->src); 474 475 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index); 476 req->best_parent_rate = clk_hw_round_rate(p, req->rate); 477 req->rate = req->best_parent_rate; 478 479 return 0; 480 } 481 482 static int __clk_rcg_set_rate(struct clk_rcg *rcg, const struct freq_tbl *f) 483 { 484 u32 ns, md, ctl; 485 struct mn *mn = &rcg->mn; 486 u32 mask = 0; 487 unsigned int reset_reg; 488 489 if (rcg->mn.reset_in_cc) 490 reset_reg = rcg->clkr.enable_reg; 491 else 492 reset_reg = rcg->ns_reg; 493 494 if (rcg->mn.width) { 495 mask = BIT(mn->mnctr_reset_bit); 496 regmap_update_bits(rcg->clkr.regmap, reset_reg, mask, mask); 497 498 regmap_read(rcg->clkr.regmap, rcg->md_reg, &md); 499 md = mn_to_md(mn, f->m, f->n, md); 500 regmap_write(rcg->clkr.regmap, rcg->md_reg, md); 501 502 regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns); 503 /* MN counter mode is in hw.enable_reg sometimes */ 504 if (rcg->clkr.enable_reg != rcg->ns_reg) { 505 regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl); 506 ctl = mn_to_reg(mn, f->m, f->n, ctl); 507 regmap_write(rcg->clkr.regmap, rcg->clkr.enable_reg, ctl); 508 } else { 509 ns = mn_to_reg(mn, f->m, f->n, ns); 510 } 511 ns = mn_to_ns(mn, f->m, f->n, ns); 512 } else { 513 regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns); 514 } 515 516 ns = pre_div_to_ns(&rcg->p, f->pre_div - 1, ns); 517 regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns); 518 519 regmap_update_bits(rcg->clkr.regmap, reset_reg, mask, 0); 520 521 return 0; 522 } 523 524 static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate, 525 unsigned long parent_rate) 526 { 527 struct clk_rcg *rcg = to_clk_rcg(hw); 528 const struct freq_tbl *f; 529 530 f = qcom_find_freq(rcg->freq_tbl, rate); 531 if (!f) 532 return -EINVAL; 533 534 return __clk_rcg_set_rate(rcg, f); 535 } 536 537 static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate, 538 unsigned long parent_rate) 539 { 540 struct clk_rcg *rcg = to_clk_rcg(hw); 541 542 return __clk_rcg_set_rate(rcg, rcg->freq_tbl); 543 } 544 545 /* 546 * This type of clock has a glitch-free mux that switches between the output of 547 * the M/N counter and an always on clock source (XO). When clk_set_rate() is 548 * called we need to make sure that we don't switch to the M/N counter if it 549 * isn't clocking because the mux will get stuck and the clock will stop 550 * outputting a clock. This can happen if the framework isn't aware that this 551 * clock is on and so clk_set_rate() doesn't turn on the new parent. To fix 552 * this we switch the mux in the enable/disable ops and reprogram the M/N 553 * counter in the set_rate op. We also make sure to switch away from the M/N 554 * counter in set_rate if software thinks the clock is off. 555 */ 556 static int clk_rcg_lcc_set_rate(struct clk_hw *hw, unsigned long rate, 557 unsigned long parent_rate) 558 { 559 struct clk_rcg *rcg = to_clk_rcg(hw); 560 const struct freq_tbl *f; 561 int ret; 562 u32 gfm = BIT(10); 563 564 f = qcom_find_freq(rcg->freq_tbl, rate); 565 if (!f) 566 return -EINVAL; 567 568 /* Switch to XO to avoid glitches */ 569 regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0); 570 ret = __clk_rcg_set_rate(rcg, f); 571 /* Switch back to M/N if it's clocking */ 572 if (__clk_is_enabled(hw->clk)) 573 regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm); 574 575 return ret; 576 } 577 578 static int clk_rcg_lcc_enable(struct clk_hw *hw) 579 { 580 struct clk_rcg *rcg = to_clk_rcg(hw); 581 u32 gfm = BIT(10); 582 583 /* Use M/N */ 584 return regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm); 585 } 586 587 static void clk_rcg_lcc_disable(struct clk_hw *hw) 588 { 589 struct clk_rcg *rcg = to_clk_rcg(hw); 590 u32 gfm = BIT(10); 591 592 /* Use XO */ 593 regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0); 594 } 595 596 static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate) 597 { 598 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 599 const struct freq_tbl *f; 600 601 f = qcom_find_freq(rcg->freq_tbl, rate); 602 if (!f) 603 return -EINVAL; 604 605 return configure_bank(rcg, f); 606 } 607 608 static int clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate, 609 unsigned long parent_rate) 610 { 611 return __clk_dyn_rcg_set_rate(hw, rate); 612 } 613 614 static int clk_dyn_rcg_set_rate_and_parent(struct clk_hw *hw, 615 unsigned long rate, unsigned long parent_rate, u8 index) 616 { 617 return __clk_dyn_rcg_set_rate(hw, rate); 618 } 619 620 const struct clk_ops clk_rcg_ops = { 621 .enable = clk_enable_regmap, 622 .disable = clk_disable_regmap, 623 .get_parent = clk_rcg_get_parent, 624 .set_parent = clk_rcg_set_parent, 625 .recalc_rate = clk_rcg_recalc_rate, 626 .determine_rate = clk_rcg_determine_rate, 627 .set_rate = clk_rcg_set_rate, 628 }; 629 EXPORT_SYMBOL_GPL(clk_rcg_ops); 630 631 const struct clk_ops clk_rcg_bypass_ops = { 632 .enable = clk_enable_regmap, 633 .disable = clk_disable_regmap, 634 .get_parent = clk_rcg_get_parent, 635 .set_parent = clk_rcg_set_parent, 636 .recalc_rate = clk_rcg_recalc_rate, 637 .determine_rate = clk_rcg_bypass_determine_rate, 638 .set_rate = clk_rcg_bypass_set_rate, 639 }; 640 EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops); 641 642 const struct clk_ops clk_rcg_lcc_ops = { 643 .enable = clk_rcg_lcc_enable, 644 .disable = clk_rcg_lcc_disable, 645 .get_parent = clk_rcg_get_parent, 646 .set_parent = clk_rcg_set_parent, 647 .recalc_rate = clk_rcg_recalc_rate, 648 .determine_rate = clk_rcg_determine_rate, 649 .set_rate = clk_rcg_lcc_set_rate, 650 }; 651 EXPORT_SYMBOL_GPL(clk_rcg_lcc_ops); 652 653 const struct clk_ops clk_dyn_rcg_ops = { 654 .enable = clk_enable_regmap, 655 .is_enabled = clk_is_enabled_regmap, 656 .disable = clk_disable_regmap, 657 .get_parent = clk_dyn_rcg_get_parent, 658 .set_parent = clk_dyn_rcg_set_parent, 659 .recalc_rate = clk_dyn_rcg_recalc_rate, 660 .determine_rate = clk_dyn_rcg_determine_rate, 661 .set_rate = clk_dyn_rcg_set_rate, 662 .set_rate_and_parent = clk_dyn_rcg_set_rate_and_parent, 663 }; 664 EXPORT_SYMBOL_GPL(clk_dyn_rcg_ops); 665