1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/bitops.h> 8 #include <linux/err.h> 9 #include <linux/bug.h> 10 #include <linux/export.h> 11 #include <linux/clk-provider.h> 12 #include <linux/delay.h> 13 #include <linux/rational.h> 14 #include <linux/regmap.h> 15 #include <linux/math64.h> 16 #include <linux/minmax.h> 17 #include <linux/slab.h> 18 19 #include <asm/div64.h> 20 21 #include "clk-rcg.h" 22 #include "common.h" 23 24 #define CMD_REG 0x0 25 #define CMD_UPDATE BIT(0) 26 #define CMD_ROOT_EN BIT(1) 27 #define CMD_DIRTY_CFG BIT(4) 28 #define CMD_DIRTY_N BIT(5) 29 #define CMD_DIRTY_M BIT(6) 30 #define CMD_DIRTY_D BIT(7) 31 #define CMD_ROOT_OFF BIT(31) 32 33 #define CFG_REG 0x4 34 #define CFG_SRC_DIV_SHIFT 0 35 #define CFG_SRC_SEL_SHIFT 8 36 #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT) 37 #define CFG_MODE_SHIFT 12 38 #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT) 39 #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT) 40 #define CFG_HW_CLK_CTRL_MASK BIT(20) 41 42 #define M_REG 0x8 43 #define N_REG 0xc 44 #define D_REG 0x10 45 46 #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG) 47 #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG) 48 #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG) 49 #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG) 50 51 /* Dynamic Frequency Scaling */ 52 #define MAX_PERF_LEVEL 8 53 #define SE_CMD_DFSR_OFFSET 0x14 54 #define SE_CMD_DFS_EN BIT(0) 55 #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level)) 56 #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level)) 57 #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level)) 58 59 enum freq_policy { 60 FLOOR, 61 CEIL, 62 }; 63 64 static int clk_rcg2_is_enabled(struct clk_hw *hw) 65 { 66 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 67 u32 cmd; 68 int ret; 69 70 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd); 71 if (ret) 72 return ret; 73 74 return (cmd & CMD_ROOT_OFF) == 0; 75 } 76 77 static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg) 78 { 79 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 80 int num_parents = clk_hw_get_num_parents(hw); 81 int i; 82 83 cfg &= CFG_SRC_SEL_MASK; 84 cfg >>= CFG_SRC_SEL_SHIFT; 85 86 for (i = 0; i < num_parents; i++) 87 if (cfg == rcg->parent_map[i].cfg) 88 return i; 89 90 pr_debug("%s: Clock %s has invalid parent, using default.\n", 91 __func__, clk_hw_get_name(hw)); 92 return 0; 93 } 94 95 static u8 clk_rcg2_get_parent(struct clk_hw *hw) 96 { 97 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 98 u32 cfg; 99 int ret; 100 101 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 102 if (ret) { 103 pr_debug("%s: Unable to read CFG register for %s\n", 104 __func__, clk_hw_get_name(hw)); 105 return 0; 106 } 107 108 return __clk_rcg2_get_parent(hw, cfg); 109 } 110 111 static int update_config(struct clk_rcg2 *rcg) 112 { 113 int count, ret; 114 u32 cmd; 115 struct clk_hw *hw = &rcg->clkr.hw; 116 const char *name = clk_hw_get_name(hw); 117 118 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 119 CMD_UPDATE, CMD_UPDATE); 120 if (ret) 121 return ret; 122 123 /* Wait for update to take effect */ 124 for (count = 500; count > 0; count--) { 125 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd); 126 if (ret) 127 return ret; 128 if (!(cmd & CMD_UPDATE)) 129 return 0; 130 udelay(1); 131 } 132 133 WARN(1, "%s: rcg didn't update its configuration.", name); 134 return -EBUSY; 135 } 136 137 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index) 138 { 139 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 140 int ret; 141 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 142 143 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), 144 CFG_SRC_SEL_MASK, cfg); 145 if (ret) 146 return ret; 147 148 return update_config(rcg); 149 } 150 151 /* 152 * Calculate m/n:d rate 153 * 154 * parent_rate m 155 * rate = ----------- x --- 156 * hid_div n 157 */ 158 static unsigned long 159 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div) 160 { 161 if (hid_div) 162 rate = mult_frac(rate, 2, hid_div + 1); 163 164 if (mode) 165 rate = mult_frac(rate, m, n); 166 167 return rate; 168 } 169 170 static unsigned long 171 __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg) 172 { 173 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 174 u32 hid_div, m = 0, n = 0, mode = 0, mask; 175 176 if (rcg->mnd_width) { 177 mask = BIT(rcg->mnd_width) - 1; 178 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 179 m &= mask; 180 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n); 181 n = ~n; 182 n &= mask; 183 n += m; 184 mode = cfg & CFG_MODE_MASK; 185 mode >>= CFG_MODE_SHIFT; 186 } 187 188 mask = BIT(rcg->hid_width) - 1; 189 hid_div = cfg >> CFG_SRC_DIV_SHIFT; 190 hid_div &= mask; 191 192 return calc_rate(parent_rate, m, n, mode, hid_div); 193 } 194 195 static unsigned long 196 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 197 { 198 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 199 u32 cfg; 200 201 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 202 203 return __clk_rcg2_recalc_rate(hw, parent_rate, cfg); 204 } 205 206 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, 207 struct clk_rate_request *req, 208 enum freq_policy policy) 209 { 210 unsigned long clk_flags, rate = req->rate; 211 struct clk_hw *p; 212 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 213 int index; 214 215 switch (policy) { 216 case FLOOR: 217 f = qcom_find_freq_floor(f, rate); 218 break; 219 case CEIL: 220 f = qcom_find_freq(f, rate); 221 break; 222 default: 223 return -EINVAL; 224 } 225 226 if (!f) 227 return -EINVAL; 228 229 index = qcom_find_src_index(hw, rcg->parent_map, f->src); 230 if (index < 0) 231 return index; 232 233 clk_flags = clk_hw_get_flags(hw); 234 p = clk_hw_get_parent_by_index(hw, index); 235 if (!p) 236 return -EINVAL; 237 238 if (clk_flags & CLK_SET_RATE_PARENT) { 239 rate = f->freq; 240 if (f->pre_div) { 241 if (!rate) 242 rate = req->rate; 243 rate /= 2; 244 rate *= f->pre_div + 1; 245 } 246 247 if (f->n) { 248 u64 tmp = rate; 249 tmp = tmp * f->n; 250 do_div(tmp, f->m); 251 rate = tmp; 252 } 253 } else { 254 rate = clk_hw_get_rate(p); 255 } 256 req->best_parent_hw = p; 257 req->best_parent_rate = rate; 258 req->rate = f->freq; 259 260 return 0; 261 } 262 263 static int clk_rcg2_determine_rate(struct clk_hw *hw, 264 struct clk_rate_request *req) 265 { 266 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 267 268 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL); 269 } 270 271 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, 272 struct clk_rate_request *req) 273 { 274 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 275 276 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR); 277 } 278 279 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f, 280 u32 *_cfg) 281 { 282 u32 cfg, mask, d_val, not2d_val, n_minus_m; 283 struct clk_hw *hw = &rcg->clkr.hw; 284 int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src); 285 286 if (index < 0) 287 return index; 288 289 if (rcg->mnd_width && f->n) { 290 mask = BIT(rcg->mnd_width) - 1; 291 ret = regmap_update_bits(rcg->clkr.regmap, 292 RCG_M_OFFSET(rcg), mask, f->m); 293 if (ret) 294 return ret; 295 296 ret = regmap_update_bits(rcg->clkr.regmap, 297 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m)); 298 if (ret) 299 return ret; 300 301 /* Calculate 2d value */ 302 d_val = f->n; 303 304 n_minus_m = f->n - f->m; 305 n_minus_m *= 2; 306 307 d_val = clamp_t(u32, d_val, f->m, n_minus_m); 308 not2d_val = ~d_val & mask; 309 310 ret = regmap_update_bits(rcg->clkr.regmap, 311 RCG_D_OFFSET(rcg), mask, not2d_val); 312 if (ret) 313 return ret; 314 } 315 316 mask = BIT(rcg->hid_width) - 1; 317 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK; 318 cfg = f->pre_div << CFG_SRC_DIV_SHIFT; 319 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 320 if (rcg->mnd_width && f->n && (f->m != f->n)) 321 cfg |= CFG_MODE_DUAL_EDGE; 322 if (rcg->hw_clk_ctrl) 323 cfg |= CFG_HW_CLK_CTRL_MASK; 324 325 *_cfg &= ~mask; 326 *_cfg |= cfg; 327 328 return 0; 329 } 330 331 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) 332 { 333 u32 cfg; 334 int ret; 335 336 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 337 if (ret) 338 return ret; 339 340 ret = __clk_rcg2_configure(rcg, f, &cfg); 341 if (ret) 342 return ret; 343 344 ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg); 345 if (ret) 346 return ret; 347 348 return update_config(rcg); 349 } 350 351 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, 352 enum freq_policy policy) 353 { 354 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 355 const struct freq_tbl *f; 356 357 switch (policy) { 358 case FLOOR: 359 f = qcom_find_freq_floor(rcg->freq_tbl, rate); 360 break; 361 case CEIL: 362 f = qcom_find_freq(rcg->freq_tbl, rate); 363 break; 364 default: 365 return -EINVAL; 366 } 367 368 if (!f) 369 return -EINVAL; 370 371 return clk_rcg2_configure(rcg, f); 372 } 373 374 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, 375 unsigned long parent_rate) 376 { 377 return __clk_rcg2_set_rate(hw, rate, CEIL); 378 } 379 380 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate, 381 unsigned long parent_rate) 382 { 383 return __clk_rcg2_set_rate(hw, rate, FLOOR); 384 } 385 386 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw, 387 unsigned long rate, unsigned long parent_rate, u8 index) 388 { 389 return __clk_rcg2_set_rate(hw, rate, CEIL); 390 } 391 392 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw, 393 unsigned long rate, unsigned long parent_rate, u8 index) 394 { 395 return __clk_rcg2_set_rate(hw, rate, FLOOR); 396 } 397 398 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) 399 { 400 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 401 u32 notn_m, n, m, d, not2d, mask; 402 403 if (!rcg->mnd_width) { 404 /* 50 % duty-cycle for Non-MND RCGs */ 405 duty->num = 1; 406 duty->den = 2; 407 return 0; 408 } 409 410 regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), ¬2d); 411 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 412 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m); 413 414 if (!not2d && !m && !notn_m) { 415 /* 50 % duty-cycle always */ 416 duty->num = 1; 417 duty->den = 2; 418 return 0; 419 } 420 421 mask = BIT(rcg->mnd_width) - 1; 422 423 d = ~(not2d) & mask; 424 d = DIV_ROUND_CLOSEST(d, 2); 425 426 n = (~(notn_m) + m) & mask; 427 428 duty->num = d; 429 duty->den = n; 430 431 return 0; 432 } 433 434 static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) 435 { 436 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 437 u32 notn_m, n, m, d, not2d, mask, duty_per, cfg; 438 int ret; 439 440 /* Duty-cycle cannot be modified for non-MND RCGs */ 441 if (!rcg->mnd_width) 442 return -EINVAL; 443 444 mask = BIT(rcg->mnd_width) - 1; 445 446 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m); 447 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 448 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 449 450 /* Duty-cycle cannot be modified if MND divider is in bypass mode. */ 451 if (!(cfg & CFG_MODE_MASK)) 452 return -EINVAL; 453 454 n = (~(notn_m) + m) & mask; 455 456 duty_per = (duty->num * 100) / duty->den; 457 458 /* Calculate 2d value */ 459 d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100); 460 461 /* 462 * Check bit widths of 2d. If D is too big reduce duty cycle. 463 * Also make sure it is never zero. 464 */ 465 d = clamp_val(d, 1, mask); 466 467 if ((d / 2) > (n - m)) 468 d = (n - m) * 2; 469 else if ((d / 2) < (m / 2)) 470 d = m; 471 472 not2d = ~d & mask; 473 474 ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask, 475 not2d); 476 if (ret) 477 return ret; 478 479 return update_config(rcg); 480 } 481 482 const struct clk_ops clk_rcg2_ops = { 483 .is_enabled = clk_rcg2_is_enabled, 484 .get_parent = clk_rcg2_get_parent, 485 .set_parent = clk_rcg2_set_parent, 486 .recalc_rate = clk_rcg2_recalc_rate, 487 .determine_rate = clk_rcg2_determine_rate, 488 .set_rate = clk_rcg2_set_rate, 489 .set_rate_and_parent = clk_rcg2_set_rate_and_parent, 490 .get_duty_cycle = clk_rcg2_get_duty_cycle, 491 .set_duty_cycle = clk_rcg2_set_duty_cycle, 492 }; 493 EXPORT_SYMBOL_GPL(clk_rcg2_ops); 494 495 const struct clk_ops clk_rcg2_floor_ops = { 496 .is_enabled = clk_rcg2_is_enabled, 497 .get_parent = clk_rcg2_get_parent, 498 .set_parent = clk_rcg2_set_parent, 499 .recalc_rate = clk_rcg2_recalc_rate, 500 .determine_rate = clk_rcg2_determine_floor_rate, 501 .set_rate = clk_rcg2_set_floor_rate, 502 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent, 503 .get_duty_cycle = clk_rcg2_get_duty_cycle, 504 .set_duty_cycle = clk_rcg2_set_duty_cycle, 505 }; 506 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops); 507 508 const struct clk_ops clk_rcg2_mux_closest_ops = { 509 .determine_rate = __clk_mux_determine_rate_closest, 510 .get_parent = clk_rcg2_get_parent, 511 .set_parent = clk_rcg2_set_parent, 512 }; 513 EXPORT_SYMBOL_GPL(clk_rcg2_mux_closest_ops); 514 515 struct frac_entry { 516 int num; 517 int den; 518 }; 519 520 static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */ 521 { 52, 295 }, /* 119 M */ 522 { 11, 57 }, /* 130.25 M */ 523 { 63, 307 }, /* 138.50 M */ 524 { 11, 50 }, /* 148.50 M */ 525 { 47, 206 }, /* 154 M */ 526 { 31, 100 }, /* 205.25 M */ 527 { 107, 269 }, /* 268.50 M */ 528 { }, 529 }; 530 531 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */ 532 { 31, 211 }, /* 119 M */ 533 { 32, 199 }, /* 130.25 M */ 534 { 63, 307 }, /* 138.50 M */ 535 { 11, 60 }, /* 148.50 M */ 536 { 50, 263 }, /* 154 M */ 537 { 31, 120 }, /* 205.25 M */ 538 { 119, 359 }, /* 268.50 M */ 539 { }, 540 }; 541 542 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate, 543 unsigned long parent_rate) 544 { 545 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 546 struct freq_tbl f = *rcg->freq_tbl; 547 const struct frac_entry *frac; 548 int delta = 100000; 549 s64 src_rate = parent_rate; 550 s64 request; 551 u32 mask = BIT(rcg->hid_width) - 1; 552 u32 hid_div; 553 554 if (src_rate == 810000000) 555 frac = frac_table_810m; 556 else 557 frac = frac_table_675m; 558 559 for (; frac->num; frac++) { 560 request = rate; 561 request *= frac->den; 562 request = div_s64(request, frac->num); 563 if ((src_rate < (request - delta)) || 564 (src_rate > (request + delta))) 565 continue; 566 567 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 568 &hid_div); 569 f.pre_div = hid_div; 570 f.pre_div >>= CFG_SRC_DIV_SHIFT; 571 f.pre_div &= mask; 572 f.m = frac->num; 573 f.n = frac->den; 574 575 return clk_rcg2_configure(rcg, &f); 576 } 577 578 return -EINVAL; 579 } 580 581 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw, 582 unsigned long rate, unsigned long parent_rate, u8 index) 583 { 584 /* Parent index is set statically in frequency table */ 585 return clk_edp_pixel_set_rate(hw, rate, parent_rate); 586 } 587 588 static int clk_edp_pixel_determine_rate(struct clk_hw *hw, 589 struct clk_rate_request *req) 590 { 591 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 592 const struct freq_tbl *f = rcg->freq_tbl; 593 const struct frac_entry *frac; 594 int delta = 100000; 595 s64 request; 596 u32 mask = BIT(rcg->hid_width) - 1; 597 u32 hid_div; 598 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 599 600 /* Force the correct parent */ 601 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index); 602 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw); 603 604 if (req->best_parent_rate == 810000000) 605 frac = frac_table_810m; 606 else 607 frac = frac_table_675m; 608 609 for (; frac->num; frac++) { 610 request = req->rate; 611 request *= frac->den; 612 request = div_s64(request, frac->num); 613 if ((req->best_parent_rate < (request - delta)) || 614 (req->best_parent_rate > (request + delta))) 615 continue; 616 617 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 618 &hid_div); 619 hid_div >>= CFG_SRC_DIV_SHIFT; 620 hid_div &= mask; 621 622 req->rate = calc_rate(req->best_parent_rate, 623 frac->num, frac->den, 624 !!frac->den, hid_div); 625 return 0; 626 } 627 628 return -EINVAL; 629 } 630 631 const struct clk_ops clk_edp_pixel_ops = { 632 .is_enabled = clk_rcg2_is_enabled, 633 .get_parent = clk_rcg2_get_parent, 634 .set_parent = clk_rcg2_set_parent, 635 .recalc_rate = clk_rcg2_recalc_rate, 636 .set_rate = clk_edp_pixel_set_rate, 637 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent, 638 .determine_rate = clk_edp_pixel_determine_rate, 639 }; 640 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); 641 642 static int clk_byte_determine_rate(struct clk_hw *hw, 643 struct clk_rate_request *req) 644 { 645 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 646 const struct freq_tbl *f = rcg->freq_tbl; 647 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 648 unsigned long parent_rate, div; 649 u32 mask = BIT(rcg->hid_width) - 1; 650 struct clk_hw *p; 651 652 if (req->rate == 0) 653 return -EINVAL; 654 655 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index); 656 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate); 657 658 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1; 659 div = min_t(u32, div, mask); 660 661 req->rate = calc_rate(parent_rate, 0, 0, 0, div); 662 663 return 0; 664 } 665 666 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate, 667 unsigned long parent_rate) 668 { 669 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 670 struct freq_tbl f = *rcg->freq_tbl; 671 unsigned long div; 672 u32 mask = BIT(rcg->hid_width) - 1; 673 674 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 675 div = min_t(u32, div, mask); 676 677 f.pre_div = div; 678 679 return clk_rcg2_configure(rcg, &f); 680 } 681 682 static int clk_byte_set_rate_and_parent(struct clk_hw *hw, 683 unsigned long rate, unsigned long parent_rate, u8 index) 684 { 685 /* Parent index is set statically in frequency table */ 686 return clk_byte_set_rate(hw, rate, parent_rate); 687 } 688 689 const struct clk_ops clk_byte_ops = { 690 .is_enabled = clk_rcg2_is_enabled, 691 .get_parent = clk_rcg2_get_parent, 692 .set_parent = clk_rcg2_set_parent, 693 .recalc_rate = clk_rcg2_recalc_rate, 694 .set_rate = clk_byte_set_rate, 695 .set_rate_and_parent = clk_byte_set_rate_and_parent, 696 .determine_rate = clk_byte_determine_rate, 697 }; 698 EXPORT_SYMBOL_GPL(clk_byte_ops); 699 700 static int clk_byte2_determine_rate(struct clk_hw *hw, 701 struct clk_rate_request *req) 702 { 703 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 704 unsigned long parent_rate, div; 705 u32 mask = BIT(rcg->hid_width) - 1; 706 struct clk_hw *p; 707 unsigned long rate = req->rate; 708 709 if (rate == 0) 710 return -EINVAL; 711 712 p = req->best_parent_hw; 713 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate); 714 715 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 716 div = min_t(u32, div, mask); 717 718 req->rate = calc_rate(parent_rate, 0, 0, 0, div); 719 720 return 0; 721 } 722 723 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate, 724 unsigned long parent_rate) 725 { 726 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 727 struct freq_tbl f = { 0 }; 728 unsigned long div; 729 int i, num_parents = clk_hw_get_num_parents(hw); 730 u32 mask = BIT(rcg->hid_width) - 1; 731 u32 cfg; 732 733 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 734 div = min_t(u32, div, mask); 735 736 f.pre_div = div; 737 738 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 739 cfg &= CFG_SRC_SEL_MASK; 740 cfg >>= CFG_SRC_SEL_SHIFT; 741 742 for (i = 0; i < num_parents; i++) { 743 if (cfg == rcg->parent_map[i].cfg) { 744 f.src = rcg->parent_map[i].src; 745 return clk_rcg2_configure(rcg, &f); 746 } 747 } 748 749 return -EINVAL; 750 } 751 752 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw, 753 unsigned long rate, unsigned long parent_rate, u8 index) 754 { 755 /* Read the hardware to determine parent during set_rate */ 756 return clk_byte2_set_rate(hw, rate, parent_rate); 757 } 758 759 const struct clk_ops clk_byte2_ops = { 760 .is_enabled = clk_rcg2_is_enabled, 761 .get_parent = clk_rcg2_get_parent, 762 .set_parent = clk_rcg2_set_parent, 763 .recalc_rate = clk_rcg2_recalc_rate, 764 .set_rate = clk_byte2_set_rate, 765 .set_rate_and_parent = clk_byte2_set_rate_and_parent, 766 .determine_rate = clk_byte2_determine_rate, 767 }; 768 EXPORT_SYMBOL_GPL(clk_byte2_ops); 769 770 static const struct frac_entry frac_table_pixel[] = { 771 { 3, 8 }, 772 { 2, 9 }, 773 { 4, 9 }, 774 { 1, 1 }, 775 { 2, 3 }, 776 { } 777 }; 778 779 static int clk_pixel_determine_rate(struct clk_hw *hw, 780 struct clk_rate_request *req) 781 { 782 unsigned long request, src_rate; 783 int delta = 100000; 784 const struct frac_entry *frac = frac_table_pixel; 785 786 for (; frac->num; frac++) { 787 request = (req->rate * frac->den) / frac->num; 788 789 src_rate = clk_hw_round_rate(req->best_parent_hw, request); 790 if ((src_rate < (request - delta)) || 791 (src_rate > (request + delta))) 792 continue; 793 794 req->best_parent_rate = src_rate; 795 req->rate = (src_rate * frac->num) / frac->den; 796 return 0; 797 } 798 799 return -EINVAL; 800 } 801 802 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate, 803 unsigned long parent_rate) 804 { 805 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 806 struct freq_tbl f = { 0 }; 807 const struct frac_entry *frac = frac_table_pixel; 808 unsigned long request; 809 int delta = 100000; 810 u32 mask = BIT(rcg->hid_width) - 1; 811 u32 hid_div, cfg; 812 int i, num_parents = clk_hw_get_num_parents(hw); 813 814 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 815 cfg &= CFG_SRC_SEL_MASK; 816 cfg >>= CFG_SRC_SEL_SHIFT; 817 818 for (i = 0; i < num_parents; i++) 819 if (cfg == rcg->parent_map[i].cfg) { 820 f.src = rcg->parent_map[i].src; 821 break; 822 } 823 824 for (; frac->num; frac++) { 825 request = (rate * frac->den) / frac->num; 826 827 if ((parent_rate < (request - delta)) || 828 (parent_rate > (request + delta))) 829 continue; 830 831 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 832 &hid_div); 833 f.pre_div = hid_div; 834 f.pre_div >>= CFG_SRC_DIV_SHIFT; 835 f.pre_div &= mask; 836 f.m = frac->num; 837 f.n = frac->den; 838 839 return clk_rcg2_configure(rcg, &f); 840 } 841 return -EINVAL; 842 } 843 844 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, 845 unsigned long parent_rate, u8 index) 846 { 847 return clk_pixel_set_rate(hw, rate, parent_rate); 848 } 849 850 const struct clk_ops clk_pixel_ops = { 851 .is_enabled = clk_rcg2_is_enabled, 852 .get_parent = clk_rcg2_get_parent, 853 .set_parent = clk_rcg2_set_parent, 854 .recalc_rate = clk_rcg2_recalc_rate, 855 .set_rate = clk_pixel_set_rate, 856 .set_rate_and_parent = clk_pixel_set_rate_and_parent, 857 .determine_rate = clk_pixel_determine_rate, 858 }; 859 EXPORT_SYMBOL_GPL(clk_pixel_ops); 860 861 static int clk_gfx3d_determine_rate(struct clk_hw *hw, 862 struct clk_rate_request *req) 863 { 864 struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX }; 865 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); 866 struct clk_hw *xo, *p0, *p1, *p2; 867 unsigned long p0_rate; 868 u8 mux_div = cgfx->div; 869 int ret; 870 871 p0 = cgfx->hws[0]; 872 p1 = cgfx->hws[1]; 873 p2 = cgfx->hws[2]; 874 /* 875 * This function does ping-pong the RCG between PLLs: if we don't 876 * have at least one fixed PLL and two variable ones, 877 * then it's not going to work correctly. 878 */ 879 if (WARN_ON(!p0 || !p1 || !p2)) 880 return -EINVAL; 881 882 xo = clk_hw_get_parent_by_index(hw, 0); 883 if (req->rate == clk_hw_get_rate(xo)) { 884 req->best_parent_hw = xo; 885 return 0; 886 } 887 888 if (mux_div == 0) 889 mux_div = 1; 890 891 parent_req.rate = req->rate * mux_div; 892 893 /* This has to be a fixed rate PLL */ 894 p0_rate = clk_hw_get_rate(p0); 895 896 if (parent_req.rate == p0_rate) { 897 req->rate = req->best_parent_rate = p0_rate; 898 req->best_parent_hw = p0; 899 return 0; 900 } 901 902 if (req->best_parent_hw == p0) { 903 /* Are we going back to a previously used rate? */ 904 if (clk_hw_get_rate(p2) == parent_req.rate) 905 req->best_parent_hw = p2; 906 else 907 req->best_parent_hw = p1; 908 } else if (req->best_parent_hw == p2) { 909 req->best_parent_hw = p1; 910 } else { 911 req->best_parent_hw = p2; 912 } 913 914 clk_hw_get_rate_range(req->best_parent_hw, 915 &parent_req.min_rate, &parent_req.max_rate); 916 917 if (req->min_rate > parent_req.min_rate) 918 parent_req.min_rate = req->min_rate; 919 920 if (req->max_rate < parent_req.max_rate) 921 parent_req.max_rate = req->max_rate; 922 923 ret = __clk_determine_rate(req->best_parent_hw, &parent_req); 924 if (ret) 925 return ret; 926 927 req->rate = req->best_parent_rate = parent_req.rate; 928 req->rate /= mux_div; 929 930 return 0; 931 } 932 933 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, 934 unsigned long parent_rate, u8 index) 935 { 936 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); 937 struct clk_rcg2 *rcg = &cgfx->rcg; 938 u32 cfg; 939 int ret; 940 941 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 942 /* On some targets, the GFX3D RCG may need to divide PLL frequency */ 943 if (cgfx->div > 1) 944 cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT; 945 946 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg); 947 if (ret) 948 return ret; 949 950 return update_config(rcg); 951 } 952 953 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate, 954 unsigned long parent_rate) 955 { 956 /* 957 * We should never get here; clk_gfx3d_determine_rate() should always 958 * make us use a different parent than what we're currently using, so 959 * clk_gfx3d_set_rate_and_parent() should always be called. 960 */ 961 return 0; 962 } 963 964 const struct clk_ops clk_gfx3d_ops = { 965 .is_enabled = clk_rcg2_is_enabled, 966 .get_parent = clk_rcg2_get_parent, 967 .set_parent = clk_rcg2_set_parent, 968 .recalc_rate = clk_rcg2_recalc_rate, 969 .set_rate = clk_gfx3d_set_rate, 970 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent, 971 .determine_rate = clk_gfx3d_determine_rate, 972 }; 973 EXPORT_SYMBOL_GPL(clk_gfx3d_ops); 974 975 static int clk_rcg2_set_force_enable(struct clk_hw *hw) 976 { 977 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 978 const char *name = clk_hw_get_name(hw); 979 int ret, count; 980 981 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 982 CMD_ROOT_EN, CMD_ROOT_EN); 983 if (ret) 984 return ret; 985 986 /* wait for RCG to turn ON */ 987 for (count = 500; count > 0; count--) { 988 if (clk_rcg2_is_enabled(hw)) 989 return 0; 990 991 udelay(1); 992 } 993 994 pr_err("%s: RCG did not turn on\n", name); 995 return -ETIMEDOUT; 996 } 997 998 static int clk_rcg2_clear_force_enable(struct clk_hw *hw) 999 { 1000 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1001 1002 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 1003 CMD_ROOT_EN, 0); 1004 } 1005 1006 static int 1007 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f) 1008 { 1009 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1010 int ret; 1011 1012 ret = clk_rcg2_set_force_enable(hw); 1013 if (ret) 1014 return ret; 1015 1016 ret = clk_rcg2_configure(rcg, f); 1017 if (ret) 1018 return ret; 1019 1020 return clk_rcg2_clear_force_enable(hw); 1021 } 1022 1023 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, 1024 unsigned long parent_rate) 1025 { 1026 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1027 const struct freq_tbl *f; 1028 1029 f = qcom_find_freq(rcg->freq_tbl, rate); 1030 if (!f) 1031 return -EINVAL; 1032 1033 /* 1034 * In case clock is disabled, update the M, N and D registers, cache 1035 * the CFG value in parked_cfg and don't hit the update bit of CMD 1036 * register. 1037 */ 1038 if (!clk_hw_is_enabled(hw)) 1039 return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg); 1040 1041 return clk_rcg2_shared_force_enable_clear(hw, f); 1042 } 1043 1044 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw, 1045 unsigned long rate, unsigned long parent_rate, u8 index) 1046 { 1047 return clk_rcg2_shared_set_rate(hw, rate, parent_rate); 1048 } 1049 1050 static int clk_rcg2_shared_enable(struct clk_hw *hw) 1051 { 1052 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1053 int ret; 1054 1055 /* 1056 * Set the update bit because required configuration has already 1057 * been written in clk_rcg2_shared_set_rate() 1058 */ 1059 ret = clk_rcg2_set_force_enable(hw); 1060 if (ret) 1061 return ret; 1062 1063 /* Write back the stored configuration corresponding to current rate */ 1064 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg); 1065 if (ret) 1066 return ret; 1067 1068 ret = update_config(rcg); 1069 if (ret) 1070 return ret; 1071 1072 return clk_rcg2_clear_force_enable(hw); 1073 } 1074 1075 static void clk_rcg2_shared_disable(struct clk_hw *hw) 1076 { 1077 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1078 1079 /* 1080 * Store current configuration as switching to safe source would clear 1081 * the SRC and DIV of CFG register 1082 */ 1083 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg); 1084 1085 /* 1086 * Park the RCG at a safe configuration - sourced off of safe source. 1087 * Force enable and disable the RCG while configuring it to safeguard 1088 * against any update signal coming from the downstream clock. 1089 * The current parent is still prepared and enabled at this point, and 1090 * the safe source is always on while application processor subsystem 1091 * is online. Therefore, the RCG can safely switch its parent. 1092 */ 1093 clk_rcg2_set_force_enable(hw); 1094 1095 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 1096 rcg->safe_src_index << CFG_SRC_SEL_SHIFT); 1097 1098 update_config(rcg); 1099 1100 clk_rcg2_clear_force_enable(hw); 1101 } 1102 1103 static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw) 1104 { 1105 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1106 1107 /* If the shared rcg is parked use the cached cfg instead */ 1108 if (!clk_hw_is_enabled(hw)) 1109 return __clk_rcg2_get_parent(hw, rcg->parked_cfg); 1110 1111 return clk_rcg2_get_parent(hw); 1112 } 1113 1114 static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index) 1115 { 1116 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1117 1118 /* If the shared rcg is parked only update the cached cfg */ 1119 if (!clk_hw_is_enabled(hw)) { 1120 rcg->parked_cfg &= ~CFG_SRC_SEL_MASK; 1121 rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 1122 1123 return 0; 1124 } 1125 1126 return clk_rcg2_set_parent(hw, index); 1127 } 1128 1129 static unsigned long 1130 clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 1131 { 1132 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1133 1134 /* If the shared rcg is parked use the cached cfg instead */ 1135 if (!clk_hw_is_enabled(hw)) 1136 return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg); 1137 1138 return clk_rcg2_recalc_rate(hw, parent_rate); 1139 } 1140 1141 static int clk_rcg2_shared_init(struct clk_hw *hw) 1142 { 1143 /* 1144 * This does a few things: 1145 * 1146 * 1. Sets rcg->parked_cfg to reflect the value at probe so that the 1147 * proper parent is reported from clk_rcg2_shared_get_parent(). 1148 * 1149 * 2. Clears the force enable bit of the RCG because we rely on child 1150 * clks (branches) to turn the RCG on/off with a hardware feedback 1151 * mechanism and only set the force enable bit in the RCG when we 1152 * want to make sure the clk stays on for parent switches or 1153 * parking. 1154 * 1155 * 3. Parks shared RCGs on the safe source at registration because we 1156 * can't be certain that the parent clk will stay on during boot, 1157 * especially if the parent is shared. If this RCG is enabled at 1158 * boot, and the parent is turned off, the RCG will get stuck on. A 1159 * GDSC can wedge if is turned on and the RCG is stuck on because 1160 * the GDSC's controller will hang waiting for the clk status to 1161 * toggle on when it never does. 1162 * 1163 * The safest option here is to "park" the RCG at init so that the clk 1164 * can never get stuck on or off. This ensures the GDSC can't get 1165 * wedged. 1166 */ 1167 clk_rcg2_shared_disable(hw); 1168 1169 return 0; 1170 } 1171 1172 const struct clk_ops clk_rcg2_shared_ops = { 1173 .init = clk_rcg2_shared_init, 1174 .enable = clk_rcg2_shared_enable, 1175 .disable = clk_rcg2_shared_disable, 1176 .get_parent = clk_rcg2_shared_get_parent, 1177 .set_parent = clk_rcg2_shared_set_parent, 1178 .recalc_rate = clk_rcg2_shared_recalc_rate, 1179 .determine_rate = clk_rcg2_determine_rate, 1180 .set_rate = clk_rcg2_shared_set_rate, 1181 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, 1182 }; 1183 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); 1184 1185 /* Common APIs to be used for DFS based RCGR */ 1186 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, 1187 struct freq_tbl *f) 1188 { 1189 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1190 struct clk_hw *p; 1191 unsigned long prate = 0; 1192 u32 val, mask, cfg, mode, src; 1193 int i, num_parents; 1194 1195 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg); 1196 1197 mask = BIT(rcg->hid_width) - 1; 1198 f->pre_div = 1; 1199 if (cfg & mask) 1200 f->pre_div = cfg & mask; 1201 1202 src = cfg & CFG_SRC_SEL_MASK; 1203 src >>= CFG_SRC_SEL_SHIFT; 1204 1205 num_parents = clk_hw_get_num_parents(hw); 1206 for (i = 0; i < num_parents; i++) { 1207 if (src == rcg->parent_map[i].cfg) { 1208 f->src = rcg->parent_map[i].src; 1209 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i); 1210 prate = clk_hw_get_rate(p); 1211 } 1212 } 1213 1214 mode = cfg & CFG_MODE_MASK; 1215 mode >>= CFG_MODE_SHIFT; 1216 if (mode) { 1217 mask = BIT(rcg->mnd_width) - 1; 1218 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l), 1219 &val); 1220 val &= mask; 1221 f->m = val; 1222 1223 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l), 1224 &val); 1225 val = ~val; 1226 val &= mask; 1227 val += f->m; 1228 f->n = val; 1229 } 1230 1231 f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div); 1232 } 1233 1234 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg) 1235 { 1236 struct freq_tbl *freq_tbl; 1237 int i; 1238 1239 /* Allocate space for 1 extra since table is NULL terminated */ 1240 freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL); 1241 if (!freq_tbl) 1242 return -ENOMEM; 1243 rcg->freq_tbl = freq_tbl; 1244 1245 for (i = 0; i < MAX_PERF_LEVEL; i++) 1246 clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i); 1247 1248 return 0; 1249 } 1250 1251 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw, 1252 struct clk_rate_request *req) 1253 { 1254 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1255 int ret; 1256 1257 if (!rcg->freq_tbl) { 1258 ret = clk_rcg2_dfs_populate_freq_table(rcg); 1259 if (ret) { 1260 pr_err("Failed to update DFS tables for %s\n", 1261 clk_hw_get_name(hw)); 1262 return ret; 1263 } 1264 } 1265 1266 return clk_rcg2_determine_rate(hw, req); 1267 } 1268 1269 static unsigned long 1270 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 1271 { 1272 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1273 u32 level, mask, cfg, m = 0, n = 0, mode, pre_div; 1274 1275 regmap_read(rcg->clkr.regmap, 1276 rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level); 1277 level &= GENMASK(4, 1); 1278 level >>= 1; 1279 1280 if (rcg->freq_tbl) 1281 return rcg->freq_tbl[level].freq; 1282 1283 /* 1284 * Assume that parent_rate is actually the parent because 1285 * we can't do any better at figuring it out when the table 1286 * hasn't been populated yet. We only populate the table 1287 * in determine_rate because we can't guarantee the parents 1288 * will be registered with the framework until then. 1289 */ 1290 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level), 1291 &cfg); 1292 1293 mask = BIT(rcg->hid_width) - 1; 1294 pre_div = 1; 1295 if (cfg & mask) 1296 pre_div = cfg & mask; 1297 1298 mode = cfg & CFG_MODE_MASK; 1299 mode >>= CFG_MODE_SHIFT; 1300 if (mode) { 1301 mask = BIT(rcg->mnd_width) - 1; 1302 regmap_read(rcg->clkr.regmap, 1303 rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m); 1304 m &= mask; 1305 1306 regmap_read(rcg->clkr.regmap, 1307 rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n); 1308 n = ~n; 1309 n &= mask; 1310 n += m; 1311 } 1312 1313 return calc_rate(parent_rate, m, n, mode, pre_div); 1314 } 1315 1316 static const struct clk_ops clk_rcg2_dfs_ops = { 1317 .is_enabled = clk_rcg2_is_enabled, 1318 .get_parent = clk_rcg2_get_parent, 1319 .determine_rate = clk_rcg2_dfs_determine_rate, 1320 .recalc_rate = clk_rcg2_dfs_recalc_rate, 1321 }; 1322 1323 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data, 1324 struct regmap *regmap) 1325 { 1326 struct clk_rcg2 *rcg = data->rcg; 1327 struct clk_init_data *init = data->init; 1328 u32 val; 1329 int ret; 1330 1331 ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val); 1332 if (ret) 1333 return -EINVAL; 1334 1335 if (!(val & SE_CMD_DFS_EN)) 1336 return 0; 1337 1338 /* 1339 * Rate changes with consumer writing a register in 1340 * their own I/O region 1341 */ 1342 init->flags |= CLK_GET_RATE_NOCACHE; 1343 init->ops = &clk_rcg2_dfs_ops; 1344 1345 rcg->freq_tbl = NULL; 1346 1347 return 0; 1348 } 1349 1350 int qcom_cc_register_rcg_dfs(struct regmap *regmap, 1351 const struct clk_rcg_dfs_data *rcgs, size_t len) 1352 { 1353 int i, ret; 1354 1355 for (i = 0; i < len; i++) { 1356 ret = clk_rcg2_enable_dfs(&rcgs[i], regmap); 1357 if (ret) 1358 return ret; 1359 } 1360 1361 return 0; 1362 } 1363 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs); 1364 1365 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate, 1366 unsigned long parent_rate) 1367 { 1368 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1369 struct freq_tbl f = { 0 }; 1370 u32 mask = BIT(rcg->hid_width) - 1; 1371 u32 hid_div, cfg; 1372 int i, num_parents = clk_hw_get_num_parents(hw); 1373 unsigned long num, den; 1374 1375 rational_best_approximation(parent_rate, rate, 1376 GENMASK(rcg->mnd_width - 1, 0), 1377 GENMASK(rcg->mnd_width - 1, 0), &den, &num); 1378 1379 if (!num || !den) 1380 return -EINVAL; 1381 1382 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 1383 hid_div = cfg; 1384 cfg &= CFG_SRC_SEL_MASK; 1385 cfg >>= CFG_SRC_SEL_SHIFT; 1386 1387 for (i = 0; i < num_parents; i++) { 1388 if (cfg == rcg->parent_map[i].cfg) { 1389 f.src = rcg->parent_map[i].src; 1390 break; 1391 } 1392 } 1393 1394 f.pre_div = hid_div; 1395 f.pre_div >>= CFG_SRC_DIV_SHIFT; 1396 f.pre_div &= mask; 1397 1398 if (num != den) { 1399 f.m = num; 1400 f.n = den; 1401 } else { 1402 f.m = 0; 1403 f.n = 0; 1404 } 1405 1406 return clk_rcg2_configure(rcg, &f); 1407 } 1408 1409 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw, 1410 unsigned long rate, unsigned long parent_rate, u8 index) 1411 { 1412 return clk_rcg2_dp_set_rate(hw, rate, parent_rate); 1413 } 1414 1415 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw, 1416 struct clk_rate_request *req) 1417 { 1418 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1419 unsigned long num, den; 1420 u64 tmp; 1421 1422 /* Parent rate is a fixed phy link rate */ 1423 rational_best_approximation(req->best_parent_rate, req->rate, 1424 GENMASK(rcg->mnd_width - 1, 0), 1425 GENMASK(rcg->mnd_width - 1, 0), &den, &num); 1426 1427 if (!num || !den) 1428 return -EINVAL; 1429 1430 tmp = req->best_parent_rate * num; 1431 do_div(tmp, den); 1432 req->rate = tmp; 1433 1434 return 0; 1435 } 1436 1437 const struct clk_ops clk_dp_ops = { 1438 .is_enabled = clk_rcg2_is_enabled, 1439 .get_parent = clk_rcg2_get_parent, 1440 .set_parent = clk_rcg2_set_parent, 1441 .recalc_rate = clk_rcg2_recalc_rate, 1442 .set_rate = clk_rcg2_dp_set_rate, 1443 .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent, 1444 .determine_rate = clk_rcg2_dp_determine_rate, 1445 }; 1446 EXPORT_SYMBOL_GPL(clk_dp_ops); 1447