1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 4 * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org> 5 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> 6 * 7 * Adjustable divider clock implementation 8 */ 9 10 #include <linux/clk-provider.h> 11 #include <linux/device.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/io.h> 15 #include <linux/err.h> 16 #include <linux/string.h> 17 #include <linux/log2.h> 18 19 /* 20 * DOC: basic adjustable divider clock that cannot gate 21 * 22 * Traits of this clock: 23 * prepare - clk_prepare only ensures that parents are prepared 24 * enable - clk_enable only ensures that parents are enabled 25 * rate - rate is adjustable. clk->rate = ceiling(parent->rate / divisor) 26 * parent - fixed parent. No clk_set_parent support 27 */ 28 29 static inline u32 clk_div_readl(struct clk_divider *divider) 30 { 31 if (divider->flags & CLK_DIVIDER_BIG_ENDIAN) 32 return ioread32be(divider->reg); 33 34 return readl(divider->reg); 35 } 36 37 static inline void clk_div_writel(struct clk_divider *divider, u32 val) 38 { 39 if (divider->flags & CLK_DIVIDER_BIG_ENDIAN) 40 iowrite32be(val, divider->reg); 41 else 42 writel(val, divider->reg); 43 } 44 45 static unsigned int _get_table_maxdiv(const struct clk_div_table *table, 46 u8 width) 47 { 48 unsigned int maxdiv = 0, mask = clk_div_mask(width); 49 const struct clk_div_table *clkt; 50 51 for (clkt = table; clkt->div; clkt++) 52 if (clkt->div > maxdiv && clkt->val <= mask) 53 maxdiv = clkt->div; 54 return maxdiv; 55 } 56 57 static unsigned int _get_table_mindiv(const struct clk_div_table *table) 58 { 59 unsigned int mindiv = UINT_MAX; 60 const struct clk_div_table *clkt; 61 62 for (clkt = table; clkt->div; clkt++) 63 if (clkt->div < mindiv) 64 mindiv = clkt->div; 65 return mindiv; 66 } 67 68 static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width, 69 unsigned long flags) 70 { 71 if (flags & CLK_DIVIDER_ONE_BASED) 72 return clk_div_mask(width); 73 if (flags & CLK_DIVIDER_POWER_OF_TWO) 74 return 1 << clk_div_mask(width); 75 if (table) 76 return _get_table_maxdiv(table, width); 77 return clk_div_mask(width) + 1; 78 } 79 80 static unsigned int _get_table_div(const struct clk_div_table *table, 81 unsigned int val) 82 { 83 const struct clk_div_table *clkt; 84 85 for (clkt = table; clkt->div; clkt++) 86 if (clkt->val == val) 87 return clkt->div; 88 return 0; 89 } 90 91 static unsigned int _get_div(const struct clk_div_table *table, 92 unsigned int val, unsigned long flags, u8 width) 93 { 94 if (flags & CLK_DIVIDER_ONE_BASED) 95 return val; 96 if (flags & CLK_DIVIDER_POWER_OF_TWO) 97 return 1 << val; 98 if (flags & CLK_DIVIDER_MAX_AT_ZERO) 99 return val ? val : clk_div_mask(width) + 1; 100 if (table) 101 return _get_table_div(table, val); 102 return val + 1; 103 } 104 105 static unsigned int _get_table_val(const struct clk_div_table *table, 106 unsigned int div) 107 { 108 const struct clk_div_table *clkt; 109 110 for (clkt = table; clkt->div; clkt++) 111 if (clkt->div == div) 112 return clkt->val; 113 return 0; 114 } 115 116 static unsigned int _get_val(const struct clk_div_table *table, 117 unsigned int div, unsigned long flags, u8 width) 118 { 119 if (flags & CLK_DIVIDER_ONE_BASED) 120 return div; 121 if (flags & CLK_DIVIDER_POWER_OF_TWO) 122 return __ffs(div); 123 if (flags & CLK_DIVIDER_MAX_AT_ZERO) 124 return (div == clk_div_mask(width) + 1) ? 0 : div; 125 if (table) 126 return _get_table_val(table, div); 127 return div - 1; 128 } 129 130 unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, 131 unsigned int val, 132 const struct clk_div_table *table, 133 unsigned long flags, unsigned long width) 134 { 135 unsigned int div; 136 137 div = _get_div(table, val, flags, width); 138 if (!div) { 139 WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO), 140 "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", 141 clk_hw_get_name(hw)); 142 return parent_rate; 143 } 144 145 return DIV_ROUND_UP_ULL((u64)parent_rate, div); 146 } 147 EXPORT_SYMBOL_GPL(divider_recalc_rate); 148 149 static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, 150 unsigned long parent_rate) 151 { 152 struct clk_divider *divider = to_clk_divider(hw); 153 unsigned int val; 154 155 val = clk_div_readl(divider) >> divider->shift; 156 val &= clk_div_mask(divider->width); 157 158 return divider_recalc_rate(hw, parent_rate, val, divider->table, 159 divider->flags, divider->width); 160 } 161 162 static bool _is_valid_table_div(const struct clk_div_table *table, 163 unsigned int div) 164 { 165 const struct clk_div_table *clkt; 166 167 for (clkt = table; clkt->div; clkt++) 168 if (clkt->div == div) 169 return true; 170 return false; 171 } 172 173 static bool _is_valid_div(const struct clk_div_table *table, unsigned int div, 174 unsigned long flags) 175 { 176 if (flags & CLK_DIVIDER_POWER_OF_TWO) 177 return is_power_of_2(div); 178 if (table) 179 return _is_valid_table_div(table, div); 180 return true; 181 } 182 183 static int _round_up_table(const struct clk_div_table *table, int div) 184 { 185 const struct clk_div_table *clkt; 186 int up = INT_MAX; 187 188 for (clkt = table; clkt->div; clkt++) { 189 if (clkt->div == div) 190 return clkt->div; 191 else if (clkt->div < div) 192 continue; 193 194 if ((clkt->div - div) < (up - div)) 195 up = clkt->div; 196 } 197 198 return up; 199 } 200 201 static int _round_down_table(const struct clk_div_table *table, int div) 202 { 203 const struct clk_div_table *clkt; 204 int down = _get_table_mindiv(table); 205 206 for (clkt = table; clkt->div; clkt++) { 207 if (clkt->div == div) 208 return clkt->div; 209 else if (clkt->div > div) 210 continue; 211 212 if ((div - clkt->div) < (div - down)) 213 down = clkt->div; 214 } 215 216 return down; 217 } 218 219 static int _div_round_up(const struct clk_div_table *table, 220 unsigned long parent_rate, unsigned long rate, 221 unsigned long flags) 222 { 223 int div = DIV_ROUND_UP_ULL((u64)parent_rate, rate); 224 225 if (flags & CLK_DIVIDER_POWER_OF_TWO) 226 div = __roundup_pow_of_two(div); 227 if (table) 228 div = _round_up_table(table, div); 229 230 return div; 231 } 232 233 static int _div_round_closest(const struct clk_div_table *table, 234 unsigned long parent_rate, unsigned long rate, 235 unsigned long flags) 236 { 237 int up, down; 238 unsigned long up_rate, down_rate; 239 240 up = DIV_ROUND_UP_ULL((u64)parent_rate, rate); 241 down = parent_rate / rate; 242 243 if (flags & CLK_DIVIDER_POWER_OF_TWO) { 244 up = __roundup_pow_of_two(up); 245 down = __rounddown_pow_of_two(down); 246 } else if (table) { 247 up = _round_up_table(table, up); 248 down = _round_down_table(table, down); 249 } 250 251 up_rate = DIV_ROUND_UP_ULL((u64)parent_rate, up); 252 down_rate = DIV_ROUND_UP_ULL((u64)parent_rate, down); 253 254 return (rate - up_rate) <= (down_rate - rate) ? up : down; 255 } 256 257 static int _div_round(const struct clk_div_table *table, 258 unsigned long parent_rate, unsigned long rate, 259 unsigned long flags) 260 { 261 if (flags & CLK_DIVIDER_ROUND_CLOSEST) 262 return _div_round_closest(table, parent_rate, rate, flags); 263 264 return _div_round_up(table, parent_rate, rate, flags); 265 } 266 267 static bool _is_best_div(unsigned long rate, unsigned long now, 268 unsigned long best, unsigned long flags) 269 { 270 if (flags & CLK_DIVIDER_ROUND_CLOSEST) 271 return abs(rate - now) < abs(rate - best); 272 273 return now <= rate && now > best; 274 } 275 276 static int _next_div(const struct clk_div_table *table, int div, 277 unsigned long flags) 278 { 279 div++; 280 281 if (flags & CLK_DIVIDER_POWER_OF_TWO) 282 return __roundup_pow_of_two(div); 283 if (table) 284 return _round_up_table(table, div); 285 286 return div; 287 } 288 289 static int clk_divider_bestdiv(struct clk_hw *hw, struct clk_hw *parent, 290 unsigned long rate, 291 unsigned long *best_parent_rate, 292 const struct clk_div_table *table, u8 width, 293 unsigned long flags) 294 { 295 int i, bestdiv = 0; 296 unsigned long parent_rate, best = 0, now, maxdiv; 297 unsigned long parent_rate_saved = *best_parent_rate; 298 299 if (!rate) 300 rate = 1; 301 302 maxdiv = _get_maxdiv(table, width, flags); 303 304 if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { 305 parent_rate = *best_parent_rate; 306 bestdiv = _div_round(table, parent_rate, rate, flags); 307 bestdiv = bestdiv == 0 ? 1 : bestdiv; 308 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; 309 return bestdiv; 310 } 311 312 /* 313 * The maximum divider we can use without overflowing 314 * unsigned long in rate * i below 315 */ 316 maxdiv = min(ULONG_MAX / rate, maxdiv); 317 318 for (i = _next_div(table, 0, flags); i <= maxdiv; 319 i = _next_div(table, i, flags)) { 320 if (rate * i == parent_rate_saved) { 321 /* 322 * It's the most ideal case if the requested rate can be 323 * divided from parent clock without needing to change 324 * parent rate, so return the divider immediately. 325 */ 326 *best_parent_rate = parent_rate_saved; 327 return i; 328 } 329 parent_rate = clk_hw_round_rate(parent, rate * i); 330 now = DIV_ROUND_UP_ULL((u64)parent_rate, i); 331 if (_is_best_div(rate, now, best, flags)) { 332 bestdiv = i; 333 best = now; 334 *best_parent_rate = parent_rate; 335 } 336 } 337 338 if (!bestdiv) { 339 bestdiv = _get_maxdiv(table, width, flags); 340 *best_parent_rate = clk_hw_round_rate(parent, 1); 341 } 342 343 return bestdiv; 344 } 345 346 int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req, 347 const struct clk_div_table *table, u8 width, 348 unsigned long flags) 349 { 350 int div; 351 352 div = clk_divider_bestdiv(hw, req->best_parent_hw, req->rate, 353 &req->best_parent_rate, table, width, flags); 354 355 req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, div); 356 357 return 0; 358 } 359 EXPORT_SYMBOL_GPL(divider_determine_rate); 360 361 int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req, 362 const struct clk_div_table *table, u8 width, 363 unsigned long flags, unsigned int val) 364 { 365 int div; 366 367 div = _get_div(table, val, flags, width); 368 369 /* Even a read-only clock can propagate a rate change */ 370 if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { 371 if (!req->best_parent_hw) 372 return -EINVAL; 373 374 req->best_parent_rate = clk_hw_round_rate(req->best_parent_hw, 375 req->rate * div); 376 } 377 378 req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, div); 379 380 return 0; 381 } 382 EXPORT_SYMBOL_GPL(divider_ro_determine_rate); 383 384 long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, 385 unsigned long rate, unsigned long *prate, 386 const struct clk_div_table *table, 387 u8 width, unsigned long flags) 388 { 389 struct clk_rate_request req; 390 int ret; 391 392 clk_hw_init_rate_request(hw, &req, rate); 393 req.best_parent_rate = *prate; 394 req.best_parent_hw = parent; 395 396 ret = divider_determine_rate(hw, &req, table, width, flags); 397 if (ret) 398 return ret; 399 400 *prate = req.best_parent_rate; 401 402 return req.rate; 403 } 404 EXPORT_SYMBOL_GPL(divider_round_rate_parent); 405 406 long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, 407 unsigned long rate, unsigned long *prate, 408 const struct clk_div_table *table, u8 width, 409 unsigned long flags, unsigned int val) 410 { 411 struct clk_rate_request req; 412 int ret; 413 414 clk_hw_init_rate_request(hw, &req, rate); 415 req.best_parent_rate = *prate; 416 req.best_parent_hw = parent; 417 418 ret = divider_ro_determine_rate(hw, &req, table, width, flags, val); 419 if (ret) 420 return ret; 421 422 *prate = req.best_parent_rate; 423 424 return req.rate; 425 } 426 EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent); 427 428 static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, 429 unsigned long *prate) 430 { 431 struct clk_divider *divider = to_clk_divider(hw); 432 433 /* if read only, just return current value */ 434 if (divider->flags & CLK_DIVIDER_READ_ONLY) { 435 u32 val; 436 437 val = clk_div_readl(divider) >> divider->shift; 438 val &= clk_div_mask(divider->width); 439 440 return divider_ro_round_rate(hw, rate, prate, divider->table, 441 divider->width, divider->flags, 442 val); 443 } 444 445 return divider_round_rate(hw, rate, prate, divider->table, 446 divider->width, divider->flags); 447 } 448 449 static int clk_divider_determine_rate(struct clk_hw *hw, 450 struct clk_rate_request *req) 451 { 452 struct clk_divider *divider = to_clk_divider(hw); 453 454 /* if read only, just return current value */ 455 if (divider->flags & CLK_DIVIDER_READ_ONLY) { 456 u32 val; 457 458 val = clk_div_readl(divider) >> divider->shift; 459 val &= clk_div_mask(divider->width); 460 461 return divider_ro_determine_rate(hw, req, divider->table, 462 divider->width, 463 divider->flags, val); 464 } 465 466 return divider_determine_rate(hw, req, divider->table, divider->width, 467 divider->flags); 468 } 469 470 int divider_get_val(unsigned long rate, unsigned long parent_rate, 471 const struct clk_div_table *table, u8 width, 472 unsigned long flags) 473 { 474 unsigned int div, value; 475 476 div = DIV_ROUND_UP_ULL((u64)parent_rate, rate); 477 478 if (!_is_valid_div(table, div, flags)) 479 return -EINVAL; 480 481 value = _get_val(table, div, flags, width); 482 483 return min_t(unsigned int, value, clk_div_mask(width)); 484 } 485 EXPORT_SYMBOL_GPL(divider_get_val); 486 487 static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, 488 unsigned long parent_rate) 489 { 490 struct clk_divider *divider = to_clk_divider(hw); 491 int value; 492 unsigned long flags = 0; 493 u32 val; 494 495 value = divider_get_val(rate, parent_rate, divider->table, 496 divider->width, divider->flags); 497 if (value < 0) 498 return value; 499 500 if (divider->lock) 501 spin_lock_irqsave(divider->lock, flags); 502 else 503 __acquire(divider->lock); 504 505 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { 506 val = clk_div_mask(divider->width) << (divider->shift + 16); 507 } else { 508 val = clk_div_readl(divider); 509 val &= ~(clk_div_mask(divider->width) << divider->shift); 510 } 511 val |= (u32)value << divider->shift; 512 clk_div_writel(divider, val); 513 514 if (divider->lock) 515 spin_unlock_irqrestore(divider->lock, flags); 516 else 517 __release(divider->lock); 518 519 return 0; 520 } 521 522 const struct clk_ops clk_divider_ops = { 523 .recalc_rate = clk_divider_recalc_rate, 524 .round_rate = clk_divider_round_rate, 525 .determine_rate = clk_divider_determine_rate, 526 .set_rate = clk_divider_set_rate, 527 }; 528 EXPORT_SYMBOL_GPL(clk_divider_ops); 529 530 const struct clk_ops clk_divider_ro_ops = { 531 .recalc_rate = clk_divider_recalc_rate, 532 .round_rate = clk_divider_round_rate, 533 .determine_rate = clk_divider_determine_rate, 534 }; 535 EXPORT_SYMBOL_GPL(clk_divider_ro_ops); 536 537 struct clk_hw *__clk_hw_register_divider(struct device *dev, 538 struct device_node *np, const char *name, 539 const char *parent_name, const struct clk_hw *parent_hw, 540 const struct clk_parent_data *parent_data, unsigned long flags, 541 void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, 542 const struct clk_div_table *table, spinlock_t *lock) 543 { 544 struct clk_divider *div; 545 struct clk_hw *hw; 546 struct clk_init_data init = {}; 547 int ret; 548 549 if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) { 550 if (width + shift > 16) { 551 pr_warn("divider value exceeds LOWORD field\n"); 552 return ERR_PTR(-EINVAL); 553 } 554 } 555 556 /* allocate the divider */ 557 div = kzalloc(sizeof(*div), GFP_KERNEL); 558 if (!div) 559 return ERR_PTR(-ENOMEM); 560 561 init.name = name; 562 if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) 563 init.ops = &clk_divider_ro_ops; 564 else 565 init.ops = &clk_divider_ops; 566 init.flags = flags; 567 init.parent_names = parent_name ? &parent_name : NULL; 568 init.parent_hws = parent_hw ? &parent_hw : NULL; 569 init.parent_data = parent_data; 570 if (parent_name || parent_hw || parent_data) 571 init.num_parents = 1; 572 else 573 init.num_parents = 0; 574 575 /* struct clk_divider assignments */ 576 div->reg = reg; 577 div->shift = shift; 578 div->width = width; 579 div->flags = clk_divider_flags; 580 div->lock = lock; 581 div->hw.init = &init; 582 div->table = table; 583 584 /* register the clock */ 585 hw = &div->hw; 586 ret = clk_hw_register(dev, hw); 587 if (ret) { 588 kfree(div); 589 hw = ERR_PTR(ret); 590 } 591 592 return hw; 593 } 594 EXPORT_SYMBOL_GPL(__clk_hw_register_divider); 595 596 /** 597 * clk_register_divider_table - register a table based divider clock with 598 * the clock framework 599 * @dev: device registering this clock 600 * @name: name of this clock 601 * @parent_name: name of clock's parent 602 * @flags: framework-specific flags 603 * @reg: register address to adjust divider 604 * @shift: number of bits to shift the bitfield 605 * @width: width of the bitfield 606 * @clk_divider_flags: divider-specific flags for this clock 607 * @table: array of divider/value pairs ending with a div set to 0 608 * @lock: shared register lock for this clock 609 */ 610 struct clk *clk_register_divider_table(struct device *dev, const char *name, 611 const char *parent_name, unsigned long flags, 612 void __iomem *reg, u8 shift, u8 width, 613 u8 clk_divider_flags, const struct clk_div_table *table, 614 spinlock_t *lock) 615 { 616 struct clk_hw *hw; 617 618 hw = __clk_hw_register_divider(dev, NULL, name, parent_name, NULL, 619 NULL, flags, reg, shift, width, clk_divider_flags, 620 table, lock); 621 if (IS_ERR(hw)) 622 return ERR_CAST(hw); 623 return hw->clk; 624 } 625 EXPORT_SYMBOL_GPL(clk_register_divider_table); 626 627 void clk_unregister_divider(struct clk *clk) 628 { 629 struct clk_divider *div; 630 struct clk_hw *hw; 631 632 hw = __clk_get_hw(clk); 633 if (!hw) 634 return; 635 636 div = to_clk_divider(hw); 637 638 clk_unregister(clk); 639 kfree(div); 640 } 641 EXPORT_SYMBOL_GPL(clk_unregister_divider); 642 643 /** 644 * clk_hw_unregister_divider - unregister a clk divider 645 * @hw: hardware-specific clock data to unregister 646 */ 647 void clk_hw_unregister_divider(struct clk_hw *hw) 648 { 649 struct clk_divider *div; 650 651 div = to_clk_divider(hw); 652 653 clk_hw_unregister(hw); 654 kfree(div); 655 } 656 EXPORT_SYMBOL_GPL(clk_hw_unregister_divider); 657 658 static void devm_clk_hw_release_divider(struct device *dev, void *res) 659 { 660 clk_hw_unregister_divider(*(struct clk_hw **)res); 661 } 662 663 struct clk_hw *__devm_clk_hw_register_divider(struct device *dev, 664 struct device_node *np, const char *name, 665 const char *parent_name, const struct clk_hw *parent_hw, 666 const struct clk_parent_data *parent_data, unsigned long flags, 667 void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, 668 const struct clk_div_table *table, spinlock_t *lock) 669 { 670 struct clk_hw **ptr, *hw; 671 672 ptr = devres_alloc(devm_clk_hw_release_divider, sizeof(*ptr), GFP_KERNEL); 673 if (!ptr) 674 return ERR_PTR(-ENOMEM); 675 676 hw = __clk_hw_register_divider(dev, np, name, parent_name, parent_hw, 677 parent_data, flags, reg, shift, width, 678 clk_divider_flags, table, lock); 679 680 if (!IS_ERR(hw)) { 681 *ptr = hw; 682 devres_add(dev, ptr); 683 } else { 684 devres_free(ptr); 685 } 686 687 return hw; 688 } 689 EXPORT_SYMBOL_GPL(__devm_clk_hw_register_divider); 690