1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/bitops.h> 8 #include <linux/err.h> 9 #include <linux/bug.h> 10 #include <linux/export.h> 11 #include <linux/clk-provider.h> 12 #include <linux/delay.h> 13 #include <linux/rational.h> 14 #include <linux/regmap.h> 15 #include <linux/math64.h> 16 #include <linux/minmax.h> 17 #include <linux/slab.h> 18 19 #include <asm/div64.h> 20 21 #include "clk-rcg.h" 22 #include "common.h" 23 24 #define CMD_REG 0x0 25 #define CMD_UPDATE BIT(0) 26 #define CMD_ROOT_EN BIT(1) 27 #define CMD_DIRTY_CFG BIT(4) 28 #define CMD_DIRTY_N BIT(5) 29 #define CMD_DIRTY_M BIT(6) 30 #define CMD_DIRTY_D BIT(7) 31 #define CMD_ROOT_OFF BIT(31) 32 33 #define CFG_REG 0x4 34 #define CFG_SRC_DIV_SHIFT 0 35 #define CFG_SRC_SEL_SHIFT 8 36 #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT) 37 #define CFG_MODE_SHIFT 12 38 #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT) 39 #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT) 40 #define CFG_HW_CLK_CTRL_MASK BIT(20) 41 42 #define M_REG 0x8 43 #define N_REG 0xc 44 #define D_REG 0x10 45 46 #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG) 47 #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG) 48 #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG) 49 #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG) 50 51 /* Dynamic Frequency Scaling */ 52 #define MAX_PERF_LEVEL 8 53 #define SE_CMD_DFSR_OFFSET 0x14 54 #define SE_CMD_DFS_EN BIT(0) 55 #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level)) 56 #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level)) 57 #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level)) 58 59 enum freq_policy { 60 FLOOR, 61 CEIL, 62 }; 63 64 static int clk_rcg2_is_enabled(struct clk_hw *hw) 65 { 66 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 67 u32 cmd; 68 int ret; 69 70 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd); 71 if (ret) 72 return ret; 73 74 return (cmd & CMD_ROOT_OFF) == 0; 75 } 76 77 static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg) 78 { 79 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 80 int num_parents = clk_hw_get_num_parents(hw); 81 int i; 82 83 cfg &= CFG_SRC_SEL_MASK; 84 cfg >>= CFG_SRC_SEL_SHIFT; 85 86 for (i = 0; i < num_parents; i++) 87 if (cfg == rcg->parent_map[i].cfg) 88 return i; 89 90 pr_debug("%s: Clock %s has invalid parent, using default.\n", 91 __func__, clk_hw_get_name(hw)); 92 return 0; 93 } 94 95 static u8 clk_rcg2_get_parent(struct clk_hw *hw) 96 { 97 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 98 u32 cfg; 99 int ret; 100 101 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 102 if (ret) { 103 pr_debug("%s: Unable to read CFG register for %s\n", 104 __func__, clk_hw_get_name(hw)); 105 return 0; 106 } 107 108 return __clk_rcg2_get_parent(hw, cfg); 109 } 110 111 static int update_config(struct clk_rcg2 *rcg) 112 { 113 int count, ret; 114 u32 cmd; 115 struct clk_hw *hw = &rcg->clkr.hw; 116 const char *name = clk_hw_get_name(hw); 117 118 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 119 CMD_UPDATE, CMD_UPDATE); 120 if (ret) 121 return ret; 122 123 /* Wait for update to take effect */ 124 for (count = 500; count > 0; count--) { 125 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd); 126 if (ret) 127 return ret; 128 if (!(cmd & CMD_UPDATE)) 129 return 0; 130 udelay(1); 131 } 132 133 WARN(1, "%s: rcg didn't update its configuration.", name); 134 return -EBUSY; 135 } 136 137 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index) 138 { 139 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 140 int ret; 141 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 142 143 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), 144 CFG_SRC_SEL_MASK, cfg); 145 if (ret) 146 return ret; 147 148 return update_config(rcg); 149 } 150 151 /* 152 * Calculate m/n:d rate 153 * 154 * parent_rate m 155 * rate = ----------- x --- 156 * hid_div n 157 */ 158 static unsigned long 159 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div) 160 { 161 if (hid_div) 162 rate = mult_frac(rate, 2, hid_div + 1); 163 164 if (mode) 165 rate = mult_frac(rate, m, n); 166 167 return rate; 168 } 169 170 static unsigned long 171 __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg) 172 { 173 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 174 u32 hid_div, m = 0, n = 0, mode = 0, mask; 175 176 if (rcg->mnd_width) { 177 mask = BIT(rcg->mnd_width) - 1; 178 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 179 m &= mask; 180 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n); 181 n = ~n; 182 n &= mask; 183 n += m; 184 mode = cfg & CFG_MODE_MASK; 185 mode >>= CFG_MODE_SHIFT; 186 } 187 188 mask = BIT(rcg->hid_width) - 1; 189 hid_div = cfg >> CFG_SRC_DIV_SHIFT; 190 hid_div &= mask; 191 192 return calc_rate(parent_rate, m, n, mode, hid_div); 193 } 194 195 static unsigned long 196 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 197 { 198 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 199 u32 cfg; 200 201 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 202 203 return __clk_rcg2_recalc_rate(hw, parent_rate, cfg); 204 } 205 206 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, 207 struct clk_rate_request *req, 208 enum freq_policy policy) 209 { 210 unsigned long clk_flags, rate = req->rate; 211 struct clk_hw *p; 212 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 213 int index; 214 215 switch (policy) { 216 case FLOOR: 217 f = qcom_find_freq_floor(f, rate); 218 break; 219 case CEIL: 220 f = qcom_find_freq(f, rate); 221 break; 222 default: 223 return -EINVAL; 224 } 225 226 if (!f) 227 return -EINVAL; 228 229 index = qcom_find_src_index(hw, rcg->parent_map, f->src); 230 if (index < 0) 231 return index; 232 233 clk_flags = clk_hw_get_flags(hw); 234 p = clk_hw_get_parent_by_index(hw, index); 235 if (!p) 236 return -EINVAL; 237 238 if (clk_flags & CLK_SET_RATE_PARENT) { 239 rate = f->freq; 240 if (f->pre_div) { 241 if (!rate) 242 rate = req->rate; 243 rate /= 2; 244 rate *= f->pre_div + 1; 245 } 246 247 if (f->n) { 248 u64 tmp = rate; 249 tmp = tmp * f->n; 250 do_div(tmp, f->m); 251 rate = tmp; 252 } 253 } else { 254 rate = clk_hw_get_rate(p); 255 } 256 req->best_parent_hw = p; 257 req->best_parent_rate = rate; 258 req->rate = f->freq; 259 260 return 0; 261 } 262 263 static int clk_rcg2_determine_rate(struct clk_hw *hw, 264 struct clk_rate_request *req) 265 { 266 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 267 268 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL); 269 } 270 271 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, 272 struct clk_rate_request *req) 273 { 274 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 275 276 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR); 277 } 278 279 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f, 280 u32 *_cfg) 281 { 282 u32 cfg, mask, d_val, not2d_val, n_minus_m; 283 struct clk_hw *hw = &rcg->clkr.hw; 284 int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src); 285 286 if (index < 0) 287 return index; 288 289 if (rcg->mnd_width && f->n) { 290 mask = BIT(rcg->mnd_width) - 1; 291 ret = regmap_update_bits(rcg->clkr.regmap, 292 RCG_M_OFFSET(rcg), mask, f->m); 293 if (ret) 294 return ret; 295 296 ret = regmap_update_bits(rcg->clkr.regmap, 297 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m)); 298 if (ret) 299 return ret; 300 301 /* Calculate 2d value */ 302 d_val = f->n; 303 304 n_minus_m = f->n - f->m; 305 n_minus_m *= 2; 306 307 d_val = clamp_t(u32, d_val, f->m, n_minus_m); 308 not2d_val = ~d_val & mask; 309 310 ret = regmap_update_bits(rcg->clkr.regmap, 311 RCG_D_OFFSET(rcg), mask, not2d_val); 312 if (ret) 313 return ret; 314 } 315 316 mask = BIT(rcg->hid_width) - 1; 317 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK; 318 cfg = f->pre_div << CFG_SRC_DIV_SHIFT; 319 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 320 if (rcg->mnd_width && f->n && (f->m != f->n)) 321 cfg |= CFG_MODE_DUAL_EDGE; 322 if (rcg->hw_clk_ctrl) 323 cfg |= CFG_HW_CLK_CTRL_MASK; 324 325 *_cfg &= ~mask; 326 *_cfg |= cfg; 327 328 return 0; 329 } 330 331 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) 332 { 333 u32 cfg; 334 int ret; 335 336 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 337 if (ret) 338 return ret; 339 340 ret = __clk_rcg2_configure(rcg, f, &cfg); 341 if (ret) 342 return ret; 343 344 ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg); 345 if (ret) 346 return ret; 347 348 return update_config(rcg); 349 } 350 351 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, 352 enum freq_policy policy) 353 { 354 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 355 const struct freq_tbl *f; 356 357 switch (policy) { 358 case FLOOR: 359 f = qcom_find_freq_floor(rcg->freq_tbl, rate); 360 break; 361 case CEIL: 362 f = qcom_find_freq(rcg->freq_tbl, rate); 363 break; 364 default: 365 return -EINVAL; 366 } 367 368 if (!f) 369 return -EINVAL; 370 371 return clk_rcg2_configure(rcg, f); 372 } 373 374 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, 375 unsigned long parent_rate) 376 { 377 return __clk_rcg2_set_rate(hw, rate, CEIL); 378 } 379 380 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate, 381 unsigned long parent_rate) 382 { 383 return __clk_rcg2_set_rate(hw, rate, FLOOR); 384 } 385 386 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw, 387 unsigned long rate, unsigned long parent_rate, u8 index) 388 { 389 return __clk_rcg2_set_rate(hw, rate, CEIL); 390 } 391 392 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw, 393 unsigned long rate, unsigned long parent_rate, u8 index) 394 { 395 return __clk_rcg2_set_rate(hw, rate, FLOOR); 396 } 397 398 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) 399 { 400 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 401 u32 notn_m, n, m, d, not2d, mask; 402 403 if (!rcg->mnd_width) { 404 /* 50 % duty-cycle for Non-MND RCGs */ 405 duty->num = 1; 406 duty->den = 2; 407 return 0; 408 } 409 410 regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), ¬2d); 411 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 412 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m); 413 414 if (!not2d && !m && !notn_m) { 415 /* 50 % duty-cycle always */ 416 duty->num = 1; 417 duty->den = 2; 418 return 0; 419 } 420 421 mask = BIT(rcg->mnd_width) - 1; 422 423 d = ~(not2d) & mask; 424 d = DIV_ROUND_CLOSEST(d, 2); 425 426 n = (~(notn_m) + m) & mask; 427 428 duty->num = d; 429 duty->den = n; 430 431 return 0; 432 } 433 434 static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) 435 { 436 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 437 u32 notn_m, n, m, d, not2d, mask, duty_per, cfg; 438 int ret; 439 440 /* Duty-cycle cannot be modified for non-MND RCGs */ 441 if (!rcg->mnd_width) 442 return -EINVAL; 443 444 mask = BIT(rcg->mnd_width) - 1; 445 446 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m); 447 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 448 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 449 450 /* Duty-cycle cannot be modified if MND divider is in bypass mode. */ 451 if (!(cfg & CFG_MODE_MASK)) 452 return -EINVAL; 453 454 n = (~(notn_m) + m) & mask; 455 456 duty_per = (duty->num * 100) / duty->den; 457 458 /* Calculate 2d value */ 459 d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100); 460 461 /* 462 * Check bit widths of 2d. If D is too big reduce duty cycle. 463 * Also make sure it is never zero. 464 */ 465 d = clamp_val(d, 1, mask); 466 467 if ((d / 2) > (n - m)) 468 d = (n - m) * 2; 469 else if ((d / 2) < (m / 2)) 470 d = m; 471 472 not2d = ~d & mask; 473 474 ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask, 475 not2d); 476 if (ret) 477 return ret; 478 479 return update_config(rcg); 480 } 481 482 const struct clk_ops clk_rcg2_ops = { 483 .is_enabled = clk_rcg2_is_enabled, 484 .get_parent = clk_rcg2_get_parent, 485 .set_parent = clk_rcg2_set_parent, 486 .recalc_rate = clk_rcg2_recalc_rate, 487 .determine_rate = clk_rcg2_determine_rate, 488 .set_rate = clk_rcg2_set_rate, 489 .set_rate_and_parent = clk_rcg2_set_rate_and_parent, 490 .get_duty_cycle = clk_rcg2_get_duty_cycle, 491 .set_duty_cycle = clk_rcg2_set_duty_cycle, 492 }; 493 EXPORT_SYMBOL_GPL(clk_rcg2_ops); 494 495 const struct clk_ops clk_rcg2_floor_ops = { 496 .is_enabled = clk_rcg2_is_enabled, 497 .get_parent = clk_rcg2_get_parent, 498 .set_parent = clk_rcg2_set_parent, 499 .recalc_rate = clk_rcg2_recalc_rate, 500 .determine_rate = clk_rcg2_determine_floor_rate, 501 .set_rate = clk_rcg2_set_floor_rate, 502 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent, 503 .get_duty_cycle = clk_rcg2_get_duty_cycle, 504 .set_duty_cycle = clk_rcg2_set_duty_cycle, 505 }; 506 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops); 507 508 const struct clk_ops clk_rcg2_mux_closest_ops = { 509 .determine_rate = __clk_mux_determine_rate_closest, 510 .get_parent = clk_rcg2_get_parent, 511 .set_parent = clk_rcg2_set_parent, 512 }; 513 EXPORT_SYMBOL_GPL(clk_rcg2_mux_closest_ops); 514 515 struct frac_entry { 516 int num; 517 int den; 518 }; 519 520 static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */ 521 { 52, 295 }, /* 119 M */ 522 { 11, 57 }, /* 130.25 M */ 523 { 63, 307 }, /* 138.50 M */ 524 { 11, 50 }, /* 148.50 M */ 525 { 47, 206 }, /* 154 M */ 526 { 31, 100 }, /* 205.25 M */ 527 { 107, 269 }, /* 268.50 M */ 528 { }, 529 }; 530 531 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */ 532 { 31, 211 }, /* 119 M */ 533 { 32, 199 }, /* 130.25 M */ 534 { 63, 307 }, /* 138.50 M */ 535 { 11, 60 }, /* 148.50 M */ 536 { 50, 263 }, /* 154 M */ 537 { 31, 120 }, /* 205.25 M */ 538 { 119, 359 }, /* 268.50 M */ 539 { }, 540 }; 541 542 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate, 543 unsigned long parent_rate) 544 { 545 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 546 struct freq_tbl f = *rcg->freq_tbl; 547 const struct frac_entry *frac; 548 int delta = 100000; 549 s64 src_rate = parent_rate; 550 s64 request; 551 u32 mask = BIT(rcg->hid_width) - 1; 552 u32 hid_div; 553 554 if (src_rate == 810000000) 555 frac = frac_table_810m; 556 else 557 frac = frac_table_675m; 558 559 for (; frac->num; frac++) { 560 request = rate; 561 request *= frac->den; 562 request = div_s64(request, frac->num); 563 if ((src_rate < (request - delta)) || 564 (src_rate > (request + delta))) 565 continue; 566 567 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 568 &hid_div); 569 f.pre_div = hid_div; 570 f.pre_div >>= CFG_SRC_DIV_SHIFT; 571 f.pre_div &= mask; 572 f.m = frac->num; 573 f.n = frac->den; 574 575 return clk_rcg2_configure(rcg, &f); 576 } 577 578 return -EINVAL; 579 } 580 581 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw, 582 unsigned long rate, unsigned long parent_rate, u8 index) 583 { 584 /* Parent index is set statically in frequency table */ 585 return clk_edp_pixel_set_rate(hw, rate, parent_rate); 586 } 587 588 static int clk_edp_pixel_determine_rate(struct clk_hw *hw, 589 struct clk_rate_request *req) 590 { 591 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 592 const struct freq_tbl *f = rcg->freq_tbl; 593 const struct frac_entry *frac; 594 int delta = 100000; 595 s64 request; 596 u32 mask = BIT(rcg->hid_width) - 1; 597 u32 hid_div; 598 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 599 600 /* Force the correct parent */ 601 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index); 602 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw); 603 604 if (req->best_parent_rate == 810000000) 605 frac = frac_table_810m; 606 else 607 frac = frac_table_675m; 608 609 for (; frac->num; frac++) { 610 request = req->rate; 611 request *= frac->den; 612 request = div_s64(request, frac->num); 613 if ((req->best_parent_rate < (request - delta)) || 614 (req->best_parent_rate > (request + delta))) 615 continue; 616 617 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 618 &hid_div); 619 hid_div >>= CFG_SRC_DIV_SHIFT; 620 hid_div &= mask; 621 622 req->rate = calc_rate(req->best_parent_rate, 623 frac->num, frac->den, 624 !!frac->den, hid_div); 625 return 0; 626 } 627 628 return -EINVAL; 629 } 630 631 const struct clk_ops clk_edp_pixel_ops = { 632 .is_enabled = clk_rcg2_is_enabled, 633 .get_parent = clk_rcg2_get_parent, 634 .set_parent = clk_rcg2_set_parent, 635 .recalc_rate = clk_rcg2_recalc_rate, 636 .set_rate = clk_edp_pixel_set_rate, 637 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent, 638 .determine_rate = clk_edp_pixel_determine_rate, 639 }; 640 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); 641 642 static int clk_byte_determine_rate(struct clk_hw *hw, 643 struct clk_rate_request *req) 644 { 645 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 646 const struct freq_tbl *f = rcg->freq_tbl; 647 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 648 unsigned long parent_rate, div; 649 u32 mask = BIT(rcg->hid_width) - 1; 650 struct clk_hw *p; 651 652 if (req->rate == 0) 653 return -EINVAL; 654 655 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index); 656 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate); 657 658 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1; 659 div = min_t(u32, div, mask); 660 661 req->rate = calc_rate(parent_rate, 0, 0, 0, div); 662 663 return 0; 664 } 665 666 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate, 667 unsigned long parent_rate) 668 { 669 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 670 struct freq_tbl f = *rcg->freq_tbl; 671 unsigned long div; 672 u32 mask = BIT(rcg->hid_width) - 1; 673 674 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 675 div = min_t(u32, div, mask); 676 677 f.pre_div = div; 678 679 return clk_rcg2_configure(rcg, &f); 680 } 681 682 static int clk_byte_set_rate_and_parent(struct clk_hw *hw, 683 unsigned long rate, unsigned long parent_rate, u8 index) 684 { 685 /* Parent index is set statically in frequency table */ 686 return clk_byte_set_rate(hw, rate, parent_rate); 687 } 688 689 const struct clk_ops clk_byte_ops = { 690 .is_enabled = clk_rcg2_is_enabled, 691 .get_parent = clk_rcg2_get_parent, 692 .set_parent = clk_rcg2_set_parent, 693 .recalc_rate = clk_rcg2_recalc_rate, 694 .set_rate = clk_byte_set_rate, 695 .set_rate_and_parent = clk_byte_set_rate_and_parent, 696 .determine_rate = clk_byte_determine_rate, 697 }; 698 EXPORT_SYMBOL_GPL(clk_byte_ops); 699 700 static int clk_byte2_determine_rate(struct clk_hw *hw, 701 struct clk_rate_request *req) 702 { 703 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 704 unsigned long parent_rate, div; 705 u32 mask = BIT(rcg->hid_width) - 1; 706 struct clk_hw *p; 707 unsigned long rate = req->rate; 708 709 if (rate == 0) 710 return -EINVAL; 711 712 p = req->best_parent_hw; 713 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate); 714 715 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 716 div = min_t(u32, div, mask); 717 718 req->rate = calc_rate(parent_rate, 0, 0, 0, div); 719 720 return 0; 721 } 722 723 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate, 724 unsigned long parent_rate) 725 { 726 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 727 struct freq_tbl f = { 0 }; 728 unsigned long div; 729 int i, num_parents = clk_hw_get_num_parents(hw); 730 u32 mask = BIT(rcg->hid_width) - 1; 731 u32 cfg; 732 733 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 734 div = min_t(u32, div, mask); 735 736 f.pre_div = div; 737 738 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 739 cfg &= CFG_SRC_SEL_MASK; 740 cfg >>= CFG_SRC_SEL_SHIFT; 741 742 for (i = 0; i < num_parents; i++) { 743 if (cfg == rcg->parent_map[i].cfg) { 744 f.src = rcg->parent_map[i].src; 745 return clk_rcg2_configure(rcg, &f); 746 } 747 } 748 749 return -EINVAL; 750 } 751 752 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw, 753 unsigned long rate, unsigned long parent_rate, u8 index) 754 { 755 /* Read the hardware to determine parent during set_rate */ 756 return clk_byte2_set_rate(hw, rate, parent_rate); 757 } 758 759 const struct clk_ops clk_byte2_ops = { 760 .is_enabled = clk_rcg2_is_enabled, 761 .get_parent = clk_rcg2_get_parent, 762 .set_parent = clk_rcg2_set_parent, 763 .recalc_rate = clk_rcg2_recalc_rate, 764 .set_rate = clk_byte2_set_rate, 765 .set_rate_and_parent = clk_byte2_set_rate_and_parent, 766 .determine_rate = clk_byte2_determine_rate, 767 }; 768 EXPORT_SYMBOL_GPL(clk_byte2_ops); 769 770 static const struct frac_entry frac_table_pixel[] = { 771 { 3, 8 }, 772 { 2, 9 }, 773 { 4, 9 }, 774 { 1, 1 }, 775 { 2, 3 }, 776 { } 777 }; 778 779 static int clk_pixel_determine_rate(struct clk_hw *hw, 780 struct clk_rate_request *req) 781 { 782 unsigned long request, src_rate; 783 int delta = 100000; 784 const struct frac_entry *frac = frac_table_pixel; 785 786 for (; frac->num; frac++) { 787 request = (req->rate * frac->den) / frac->num; 788 789 src_rate = clk_hw_round_rate(req->best_parent_hw, request); 790 if ((src_rate < (request - delta)) || 791 (src_rate > (request + delta))) 792 continue; 793 794 req->best_parent_rate = src_rate; 795 req->rate = (src_rate * frac->num) / frac->den; 796 return 0; 797 } 798 799 return -EINVAL; 800 } 801 802 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate, 803 unsigned long parent_rate) 804 { 805 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 806 struct freq_tbl f = { 0 }; 807 const struct frac_entry *frac = frac_table_pixel; 808 unsigned long request; 809 int delta = 100000; 810 u32 mask = BIT(rcg->hid_width) - 1; 811 u32 hid_div, cfg; 812 int i, num_parents = clk_hw_get_num_parents(hw); 813 814 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 815 cfg &= CFG_SRC_SEL_MASK; 816 cfg >>= CFG_SRC_SEL_SHIFT; 817 818 for (i = 0; i < num_parents; i++) 819 if (cfg == rcg->parent_map[i].cfg) { 820 f.src = rcg->parent_map[i].src; 821 break; 822 } 823 824 for (; frac->num; frac++) { 825 request = (rate * frac->den) / frac->num; 826 827 if ((parent_rate < (request - delta)) || 828 (parent_rate > (request + delta))) 829 continue; 830 831 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 832 &hid_div); 833 f.pre_div = hid_div; 834 f.pre_div >>= CFG_SRC_DIV_SHIFT; 835 f.pre_div &= mask; 836 f.m = frac->num; 837 f.n = frac->den; 838 839 return clk_rcg2_configure(rcg, &f); 840 } 841 return -EINVAL; 842 } 843 844 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, 845 unsigned long parent_rate, u8 index) 846 { 847 return clk_pixel_set_rate(hw, rate, parent_rate); 848 } 849 850 const struct clk_ops clk_pixel_ops = { 851 .is_enabled = clk_rcg2_is_enabled, 852 .get_parent = clk_rcg2_get_parent, 853 .set_parent = clk_rcg2_set_parent, 854 .recalc_rate = clk_rcg2_recalc_rate, 855 .set_rate = clk_pixel_set_rate, 856 .set_rate_and_parent = clk_pixel_set_rate_and_parent, 857 .determine_rate = clk_pixel_determine_rate, 858 }; 859 EXPORT_SYMBOL_GPL(clk_pixel_ops); 860 861 static int clk_gfx3d_determine_rate(struct clk_hw *hw, 862 struct clk_rate_request *req) 863 { 864 struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX }; 865 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); 866 struct clk_hw *xo, *p0, *p1, *p2; 867 unsigned long p0_rate; 868 u8 mux_div = cgfx->div; 869 int ret; 870 871 p0 = cgfx->hws[0]; 872 p1 = cgfx->hws[1]; 873 p2 = cgfx->hws[2]; 874 /* 875 * This function does ping-pong the RCG between PLLs: if we don't 876 * have at least one fixed PLL and two variable ones, 877 * then it's not going to work correctly. 878 */ 879 if (WARN_ON(!p0 || !p1 || !p2)) 880 return -EINVAL; 881 882 xo = clk_hw_get_parent_by_index(hw, 0); 883 if (req->rate == clk_hw_get_rate(xo)) { 884 req->best_parent_hw = xo; 885 return 0; 886 } 887 888 if (mux_div == 0) 889 mux_div = 1; 890 891 parent_req.rate = req->rate * mux_div; 892 893 /* This has to be a fixed rate PLL */ 894 p0_rate = clk_hw_get_rate(p0); 895 896 if (parent_req.rate == p0_rate) { 897 req->rate = req->best_parent_rate = p0_rate; 898 req->best_parent_hw = p0; 899 return 0; 900 } 901 902 if (req->best_parent_hw == p0) { 903 /* Are we going back to a previously used rate? */ 904 if (clk_hw_get_rate(p2) == parent_req.rate) 905 req->best_parent_hw = p2; 906 else 907 req->best_parent_hw = p1; 908 } else if (req->best_parent_hw == p2) { 909 req->best_parent_hw = p1; 910 } else { 911 req->best_parent_hw = p2; 912 } 913 914 clk_hw_get_rate_range(req->best_parent_hw, 915 &parent_req.min_rate, &parent_req.max_rate); 916 917 if (req->min_rate > parent_req.min_rate) 918 parent_req.min_rate = req->min_rate; 919 920 if (req->max_rate < parent_req.max_rate) 921 parent_req.max_rate = req->max_rate; 922 923 ret = __clk_determine_rate(req->best_parent_hw, &parent_req); 924 if (ret) 925 return ret; 926 927 req->rate = req->best_parent_rate = parent_req.rate; 928 req->rate /= mux_div; 929 930 return 0; 931 } 932 933 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, 934 unsigned long parent_rate, u8 index) 935 { 936 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); 937 struct clk_rcg2 *rcg = &cgfx->rcg; 938 u32 cfg; 939 int ret; 940 941 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 942 /* On some targets, the GFX3D RCG may need to divide PLL frequency */ 943 if (cgfx->div > 1) 944 cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT; 945 946 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg); 947 if (ret) 948 return ret; 949 950 return update_config(rcg); 951 } 952 953 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate, 954 unsigned long parent_rate) 955 { 956 /* 957 * We should never get here; clk_gfx3d_determine_rate() should always 958 * make us use a different parent than what we're currently using, so 959 * clk_gfx3d_set_rate_and_parent() should always be called. 960 */ 961 return 0; 962 } 963 964 const struct clk_ops clk_gfx3d_ops = { 965 .is_enabled = clk_rcg2_is_enabled, 966 .get_parent = clk_rcg2_get_parent, 967 .set_parent = clk_rcg2_set_parent, 968 .recalc_rate = clk_rcg2_recalc_rate, 969 .set_rate = clk_gfx3d_set_rate, 970 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent, 971 .determine_rate = clk_gfx3d_determine_rate, 972 }; 973 EXPORT_SYMBOL_GPL(clk_gfx3d_ops); 974 975 static int clk_rcg2_set_force_enable(struct clk_hw *hw) 976 { 977 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 978 const char *name = clk_hw_get_name(hw); 979 int ret, count; 980 981 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 982 CMD_ROOT_EN, CMD_ROOT_EN); 983 if (ret) 984 return ret; 985 986 /* wait for RCG to turn ON */ 987 for (count = 500; count > 0; count--) { 988 if (clk_rcg2_is_enabled(hw)) 989 return 0; 990 991 udelay(1); 992 } 993 994 pr_err("%s: RCG did not turn on\n", name); 995 return -ETIMEDOUT; 996 } 997 998 static int clk_rcg2_clear_force_enable(struct clk_hw *hw) 999 { 1000 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1001 1002 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 1003 CMD_ROOT_EN, 0); 1004 } 1005 1006 static int 1007 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f) 1008 { 1009 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1010 int ret; 1011 1012 ret = clk_rcg2_set_force_enable(hw); 1013 if (ret) 1014 return ret; 1015 1016 ret = clk_rcg2_configure(rcg, f); 1017 if (ret) 1018 return ret; 1019 1020 return clk_rcg2_clear_force_enable(hw); 1021 } 1022 1023 static int __clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, 1024 unsigned long parent_rate, 1025 enum freq_policy policy) 1026 { 1027 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1028 const struct freq_tbl *f; 1029 1030 switch (policy) { 1031 case FLOOR: 1032 f = qcom_find_freq_floor(rcg->freq_tbl, rate); 1033 break; 1034 case CEIL: 1035 f = qcom_find_freq(rcg->freq_tbl, rate); 1036 break; 1037 default: 1038 return -EINVAL; 1039 } 1040 1041 /* 1042 * In case clock is disabled, update the M, N and D registers, cache 1043 * the CFG value in parked_cfg and don't hit the update bit of CMD 1044 * register. 1045 */ 1046 if (!clk_hw_is_enabled(hw)) 1047 return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg); 1048 1049 return clk_rcg2_shared_force_enable_clear(hw, f); 1050 } 1051 1052 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, 1053 unsigned long parent_rate) 1054 { 1055 return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL); 1056 } 1057 1058 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw, 1059 unsigned long rate, unsigned long parent_rate, u8 index) 1060 { 1061 return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL); 1062 } 1063 1064 static int clk_rcg2_shared_set_floor_rate(struct clk_hw *hw, unsigned long rate, 1065 unsigned long parent_rate) 1066 { 1067 return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR); 1068 } 1069 1070 static int clk_rcg2_shared_set_floor_rate_and_parent(struct clk_hw *hw, 1071 unsigned long rate, unsigned long parent_rate, u8 index) 1072 { 1073 return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR); 1074 } 1075 1076 static int clk_rcg2_shared_enable(struct clk_hw *hw) 1077 { 1078 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1079 int ret; 1080 1081 /* 1082 * Set the update bit because required configuration has already 1083 * been written in clk_rcg2_shared_set_rate() 1084 */ 1085 ret = clk_rcg2_set_force_enable(hw); 1086 if (ret) 1087 return ret; 1088 1089 /* Write back the stored configuration corresponding to current rate */ 1090 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg); 1091 if (ret) 1092 return ret; 1093 1094 ret = update_config(rcg); 1095 if (ret) 1096 return ret; 1097 1098 return clk_rcg2_clear_force_enable(hw); 1099 } 1100 1101 static void clk_rcg2_shared_disable(struct clk_hw *hw) 1102 { 1103 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1104 1105 /* 1106 * Store current configuration as switching to safe source would clear 1107 * the SRC and DIV of CFG register 1108 */ 1109 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg); 1110 1111 /* 1112 * Park the RCG at a safe configuration - sourced off of safe source. 1113 * Force enable and disable the RCG while configuring it to safeguard 1114 * against any update signal coming from the downstream clock. 1115 * The current parent is still prepared and enabled at this point, and 1116 * the safe source is always on while application processor subsystem 1117 * is online. Therefore, the RCG can safely switch its parent. 1118 */ 1119 clk_rcg2_set_force_enable(hw); 1120 1121 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 1122 rcg->safe_src_index << CFG_SRC_SEL_SHIFT); 1123 1124 update_config(rcg); 1125 1126 clk_rcg2_clear_force_enable(hw); 1127 } 1128 1129 static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw) 1130 { 1131 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1132 1133 /* If the shared rcg is parked use the cached cfg instead */ 1134 if (!clk_hw_is_enabled(hw)) 1135 return __clk_rcg2_get_parent(hw, rcg->parked_cfg); 1136 1137 return clk_rcg2_get_parent(hw); 1138 } 1139 1140 static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index) 1141 { 1142 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1143 1144 /* If the shared rcg is parked only update the cached cfg */ 1145 if (!clk_hw_is_enabled(hw)) { 1146 rcg->parked_cfg &= ~CFG_SRC_SEL_MASK; 1147 rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 1148 1149 return 0; 1150 } 1151 1152 return clk_rcg2_set_parent(hw, index); 1153 } 1154 1155 static unsigned long 1156 clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 1157 { 1158 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1159 1160 /* If the shared rcg is parked use the cached cfg instead */ 1161 if (!clk_hw_is_enabled(hw)) 1162 return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg); 1163 1164 return clk_rcg2_recalc_rate(hw, parent_rate); 1165 } 1166 1167 static int clk_rcg2_shared_init(struct clk_hw *hw) 1168 { 1169 /* 1170 * This does a few things: 1171 * 1172 * 1. Sets rcg->parked_cfg to reflect the value at probe so that the 1173 * proper parent is reported from clk_rcg2_shared_get_parent(). 1174 * 1175 * 2. Clears the force enable bit of the RCG because we rely on child 1176 * clks (branches) to turn the RCG on/off with a hardware feedback 1177 * mechanism and only set the force enable bit in the RCG when we 1178 * want to make sure the clk stays on for parent switches or 1179 * parking. 1180 * 1181 * 3. Parks shared RCGs on the safe source at registration because we 1182 * can't be certain that the parent clk will stay on during boot, 1183 * especially if the parent is shared. If this RCG is enabled at 1184 * boot, and the parent is turned off, the RCG will get stuck on. A 1185 * GDSC can wedge if is turned on and the RCG is stuck on because 1186 * the GDSC's controller will hang waiting for the clk status to 1187 * toggle on when it never does. 1188 * 1189 * The safest option here is to "park" the RCG at init so that the clk 1190 * can never get stuck on or off. This ensures the GDSC can't get 1191 * wedged. 1192 */ 1193 clk_rcg2_shared_disable(hw); 1194 1195 return 0; 1196 } 1197 1198 const struct clk_ops clk_rcg2_shared_ops = { 1199 .init = clk_rcg2_shared_init, 1200 .enable = clk_rcg2_shared_enable, 1201 .disable = clk_rcg2_shared_disable, 1202 .get_parent = clk_rcg2_shared_get_parent, 1203 .set_parent = clk_rcg2_shared_set_parent, 1204 .recalc_rate = clk_rcg2_shared_recalc_rate, 1205 .determine_rate = clk_rcg2_determine_rate, 1206 .set_rate = clk_rcg2_shared_set_rate, 1207 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, 1208 }; 1209 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); 1210 1211 const struct clk_ops clk_rcg2_shared_floor_ops = { 1212 .enable = clk_rcg2_shared_enable, 1213 .disable = clk_rcg2_shared_disable, 1214 .get_parent = clk_rcg2_shared_get_parent, 1215 .set_parent = clk_rcg2_shared_set_parent, 1216 .recalc_rate = clk_rcg2_shared_recalc_rate, 1217 .determine_rate = clk_rcg2_determine_floor_rate, 1218 .set_rate = clk_rcg2_shared_set_floor_rate, 1219 .set_rate_and_parent = clk_rcg2_shared_set_floor_rate_and_parent, 1220 }; 1221 EXPORT_SYMBOL_GPL(clk_rcg2_shared_floor_ops); 1222 1223 static int clk_rcg2_shared_no_init_park(struct clk_hw *hw) 1224 { 1225 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1226 1227 /* 1228 * Read the config register so that the parent is properly mapped at 1229 * registration time. 1230 */ 1231 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg); 1232 1233 return 0; 1234 } 1235 1236 /* 1237 * Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left 1238 * unchanged at registration time. 1239 */ 1240 const struct clk_ops clk_rcg2_shared_no_init_park_ops = { 1241 .init = clk_rcg2_shared_no_init_park, 1242 .enable = clk_rcg2_shared_enable, 1243 .disable = clk_rcg2_shared_disable, 1244 .get_parent = clk_rcg2_shared_get_parent, 1245 .set_parent = clk_rcg2_shared_set_parent, 1246 .recalc_rate = clk_rcg2_shared_recalc_rate, 1247 .determine_rate = clk_rcg2_determine_rate, 1248 .set_rate = clk_rcg2_shared_set_rate, 1249 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, 1250 }; 1251 EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops); 1252 1253 /* Common APIs to be used for DFS based RCGR */ 1254 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, 1255 struct freq_tbl *f) 1256 { 1257 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1258 struct clk_hw *p; 1259 unsigned long prate = 0; 1260 u32 val, mask, cfg, mode, src; 1261 int i, num_parents; 1262 1263 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg); 1264 1265 mask = BIT(rcg->hid_width) - 1; 1266 f->pre_div = 1; 1267 if (cfg & mask) 1268 f->pre_div = cfg & mask; 1269 1270 src = cfg & CFG_SRC_SEL_MASK; 1271 src >>= CFG_SRC_SEL_SHIFT; 1272 1273 num_parents = clk_hw_get_num_parents(hw); 1274 for (i = 0; i < num_parents; i++) { 1275 if (src == rcg->parent_map[i].cfg) { 1276 f->src = rcg->parent_map[i].src; 1277 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i); 1278 prate = clk_hw_get_rate(p); 1279 } 1280 } 1281 1282 mode = cfg & CFG_MODE_MASK; 1283 mode >>= CFG_MODE_SHIFT; 1284 if (mode) { 1285 mask = BIT(rcg->mnd_width) - 1; 1286 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l), 1287 &val); 1288 val &= mask; 1289 f->m = val; 1290 1291 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l), 1292 &val); 1293 val = ~val; 1294 val &= mask; 1295 val += f->m; 1296 f->n = val; 1297 } 1298 1299 f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div); 1300 } 1301 1302 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg) 1303 { 1304 struct freq_tbl *freq_tbl; 1305 int i; 1306 1307 /* Allocate space for 1 extra since table is NULL terminated */ 1308 freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL); 1309 if (!freq_tbl) 1310 return -ENOMEM; 1311 rcg->freq_tbl = freq_tbl; 1312 1313 for (i = 0; i < MAX_PERF_LEVEL; i++) 1314 clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i); 1315 1316 return 0; 1317 } 1318 1319 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw, 1320 struct clk_rate_request *req) 1321 { 1322 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1323 int ret; 1324 1325 if (!rcg->freq_tbl) { 1326 ret = clk_rcg2_dfs_populate_freq_table(rcg); 1327 if (ret) { 1328 pr_err("Failed to update DFS tables for %s\n", 1329 clk_hw_get_name(hw)); 1330 return ret; 1331 } 1332 } 1333 1334 return clk_rcg2_determine_rate(hw, req); 1335 } 1336 1337 static unsigned long 1338 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 1339 { 1340 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1341 u32 level, mask, cfg, m = 0, n = 0, mode, pre_div; 1342 1343 regmap_read(rcg->clkr.regmap, 1344 rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level); 1345 level &= GENMASK(4, 1); 1346 level >>= 1; 1347 1348 if (rcg->freq_tbl) 1349 return rcg->freq_tbl[level].freq; 1350 1351 /* 1352 * Assume that parent_rate is actually the parent because 1353 * we can't do any better at figuring it out when the table 1354 * hasn't been populated yet. We only populate the table 1355 * in determine_rate because we can't guarantee the parents 1356 * will be registered with the framework until then. 1357 */ 1358 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level), 1359 &cfg); 1360 1361 mask = BIT(rcg->hid_width) - 1; 1362 pre_div = 1; 1363 if (cfg & mask) 1364 pre_div = cfg & mask; 1365 1366 mode = cfg & CFG_MODE_MASK; 1367 mode >>= CFG_MODE_SHIFT; 1368 if (mode) { 1369 mask = BIT(rcg->mnd_width) - 1; 1370 regmap_read(rcg->clkr.regmap, 1371 rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m); 1372 m &= mask; 1373 1374 regmap_read(rcg->clkr.regmap, 1375 rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n); 1376 n = ~n; 1377 n &= mask; 1378 n += m; 1379 } 1380 1381 return calc_rate(parent_rate, m, n, mode, pre_div); 1382 } 1383 1384 static const struct clk_ops clk_rcg2_dfs_ops = { 1385 .is_enabled = clk_rcg2_is_enabled, 1386 .get_parent = clk_rcg2_get_parent, 1387 .determine_rate = clk_rcg2_dfs_determine_rate, 1388 .recalc_rate = clk_rcg2_dfs_recalc_rate, 1389 }; 1390 1391 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data, 1392 struct regmap *regmap) 1393 { 1394 struct clk_rcg2 *rcg = data->rcg; 1395 struct clk_init_data *init = data->init; 1396 u32 val; 1397 int ret; 1398 1399 ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val); 1400 if (ret) 1401 return -EINVAL; 1402 1403 if (!(val & SE_CMD_DFS_EN)) 1404 return 0; 1405 1406 /* 1407 * Rate changes with consumer writing a register in 1408 * their own I/O region 1409 */ 1410 init->flags |= CLK_GET_RATE_NOCACHE; 1411 init->ops = &clk_rcg2_dfs_ops; 1412 1413 rcg->freq_tbl = NULL; 1414 1415 return 0; 1416 } 1417 1418 int qcom_cc_register_rcg_dfs(struct regmap *regmap, 1419 const struct clk_rcg_dfs_data *rcgs, size_t len) 1420 { 1421 int i, ret; 1422 1423 for (i = 0; i < len; i++) { 1424 ret = clk_rcg2_enable_dfs(&rcgs[i], regmap); 1425 if (ret) 1426 return ret; 1427 } 1428 1429 return 0; 1430 } 1431 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs); 1432 1433 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate, 1434 unsigned long parent_rate) 1435 { 1436 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1437 struct freq_tbl f = { 0 }; 1438 u32 mask = BIT(rcg->hid_width) - 1; 1439 u32 hid_div, cfg; 1440 int i, num_parents = clk_hw_get_num_parents(hw); 1441 unsigned long num, den; 1442 1443 rational_best_approximation(parent_rate, rate, 1444 GENMASK(rcg->mnd_width - 1, 0), 1445 GENMASK(rcg->mnd_width - 1, 0), &den, &num); 1446 1447 if (!num || !den) 1448 return -EINVAL; 1449 1450 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 1451 hid_div = cfg; 1452 cfg &= CFG_SRC_SEL_MASK; 1453 cfg >>= CFG_SRC_SEL_SHIFT; 1454 1455 for (i = 0; i < num_parents; i++) { 1456 if (cfg == rcg->parent_map[i].cfg) { 1457 f.src = rcg->parent_map[i].src; 1458 break; 1459 } 1460 } 1461 1462 f.pre_div = hid_div; 1463 f.pre_div >>= CFG_SRC_DIV_SHIFT; 1464 f.pre_div &= mask; 1465 1466 if (num != den) { 1467 f.m = num; 1468 f.n = den; 1469 } else { 1470 f.m = 0; 1471 f.n = 0; 1472 } 1473 1474 return clk_rcg2_configure(rcg, &f); 1475 } 1476 1477 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw, 1478 unsigned long rate, unsigned long parent_rate, u8 index) 1479 { 1480 return clk_rcg2_dp_set_rate(hw, rate, parent_rate); 1481 } 1482 1483 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw, 1484 struct clk_rate_request *req) 1485 { 1486 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1487 unsigned long num, den; 1488 u64 tmp; 1489 1490 /* Parent rate is a fixed phy link rate */ 1491 rational_best_approximation(req->best_parent_rate, req->rate, 1492 GENMASK(rcg->mnd_width - 1, 0), 1493 GENMASK(rcg->mnd_width - 1, 0), &den, &num); 1494 1495 if (!num || !den) 1496 return -EINVAL; 1497 1498 tmp = req->best_parent_rate * num; 1499 do_div(tmp, den); 1500 req->rate = tmp; 1501 1502 return 0; 1503 } 1504 1505 const struct clk_ops clk_dp_ops = { 1506 .is_enabled = clk_rcg2_is_enabled, 1507 .get_parent = clk_rcg2_get_parent, 1508 .set_parent = clk_rcg2_set_parent, 1509 .recalc_rate = clk_rcg2_recalc_rate, 1510 .set_rate = clk_rcg2_dp_set_rate, 1511 .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent, 1512 .determine_rate = clk_rcg2_dp_determine_rate, 1513 }; 1514 EXPORT_SYMBOL_GPL(clk_dp_ops); 1515