1 /* 2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/clk-provider.h> 14 #include <linux/clk/clk-conf.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/spinlock.h> 18 #include <linux/err.h> 19 #include <linux/list.h> 20 #include <linux/slab.h> 21 #include <linux/of.h> 22 #include <linux/device.h> 23 #include <linux/init.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/sched.h> 26 #include <linux/clkdev.h> 27 28 #include "clk.h" 29 30 static DEFINE_SPINLOCK(enable_lock); 31 static DEFINE_MUTEX(prepare_lock); 32 33 static struct task_struct *prepare_owner; 34 static struct task_struct *enable_owner; 35 36 static int prepare_refcnt; 37 static int enable_refcnt; 38 39 static HLIST_HEAD(clk_root_list); 40 static HLIST_HEAD(clk_orphan_list); 41 static LIST_HEAD(clk_notifier_list); 42 43 /*** private data structures ***/ 44 45 struct clk_core { 46 const char *name; 47 const struct clk_ops *ops; 48 struct clk_hw *hw; 49 struct module *owner; 50 struct device *dev; 51 struct clk_core *parent; 52 const char **parent_names; 53 struct clk_core **parents; 54 u8 num_parents; 55 u8 new_parent_index; 56 unsigned long rate; 57 unsigned long req_rate; 58 unsigned long new_rate; 59 struct clk_core *new_parent; 60 struct clk_core *new_child; 61 unsigned long flags; 62 bool orphan; 63 unsigned int enable_count; 64 unsigned int prepare_count; 65 unsigned int protect_count; 66 unsigned long min_rate; 67 unsigned long max_rate; 68 unsigned long accuracy; 69 int phase; 70 struct clk_duty duty; 71 struct hlist_head children; 72 struct hlist_node child_node; 73 struct hlist_head clks; 74 unsigned int notifier_count; 75 #ifdef CONFIG_DEBUG_FS 76 struct dentry *dentry; 77 struct hlist_node debug_node; 78 #endif 79 struct kref ref; 80 }; 81 82 #define CREATE_TRACE_POINTS 83 #include <trace/events/clk.h> 84 85 struct clk { 86 struct clk_core *core; 87 const char *dev_id; 88 const char *con_id; 89 unsigned long min_rate; 90 unsigned long max_rate; 91 unsigned int exclusive_count; 92 struct hlist_node clks_node; 93 }; 94 95 /*** runtime pm ***/ 96 static int clk_pm_runtime_get(struct clk_core *core) 97 { 98 int ret = 0; 99 100 if (!core->dev) 101 return 0; 102 103 ret = pm_runtime_get_sync(core->dev); 104 return ret < 0 ? ret : 0; 105 } 106 107 static void clk_pm_runtime_put(struct clk_core *core) 108 { 109 if (!core->dev) 110 return; 111 112 pm_runtime_put_sync(core->dev); 113 } 114 115 /*** locking ***/ 116 static void clk_prepare_lock(void) 117 { 118 if (!mutex_trylock(&prepare_lock)) { 119 if (prepare_owner == current) { 120 prepare_refcnt++; 121 return; 122 } 123 mutex_lock(&prepare_lock); 124 } 125 WARN_ON_ONCE(prepare_owner != NULL); 126 WARN_ON_ONCE(prepare_refcnt != 0); 127 prepare_owner = current; 128 prepare_refcnt = 1; 129 } 130 131 static void clk_prepare_unlock(void) 132 { 133 WARN_ON_ONCE(prepare_owner != current); 134 WARN_ON_ONCE(prepare_refcnt == 0); 135 136 if (--prepare_refcnt) 137 return; 138 prepare_owner = NULL; 139 mutex_unlock(&prepare_lock); 140 } 141 142 static unsigned long clk_enable_lock(void) 143 __acquires(enable_lock) 144 { 145 unsigned long flags; 146 147 /* 148 * On UP systems, spin_trylock_irqsave() always returns true, even if 149 * we already hold the lock. So, in that case, we rely only on 150 * reference counting. 151 */ 152 if (!IS_ENABLED(CONFIG_SMP) || 153 !spin_trylock_irqsave(&enable_lock, flags)) { 154 if (enable_owner == current) { 155 enable_refcnt++; 156 __acquire(enable_lock); 157 if (!IS_ENABLED(CONFIG_SMP)) 158 local_save_flags(flags); 159 return flags; 160 } 161 spin_lock_irqsave(&enable_lock, flags); 162 } 163 WARN_ON_ONCE(enable_owner != NULL); 164 WARN_ON_ONCE(enable_refcnt != 0); 165 enable_owner = current; 166 enable_refcnt = 1; 167 return flags; 168 } 169 170 static void clk_enable_unlock(unsigned long flags) 171 __releases(enable_lock) 172 { 173 WARN_ON_ONCE(enable_owner != current); 174 WARN_ON_ONCE(enable_refcnt == 0); 175 176 if (--enable_refcnt) { 177 __release(enable_lock); 178 return; 179 } 180 enable_owner = NULL; 181 spin_unlock_irqrestore(&enable_lock, flags); 182 } 183 184 static bool clk_core_rate_is_protected(struct clk_core *core) 185 { 186 return core->protect_count; 187 } 188 189 static bool clk_core_is_prepared(struct clk_core *core) 190 { 191 bool ret = false; 192 193 /* 194 * .is_prepared is optional for clocks that can prepare 195 * fall back to software usage counter if it is missing 196 */ 197 if (!core->ops->is_prepared) 198 return core->prepare_count; 199 200 if (!clk_pm_runtime_get(core)) { 201 ret = core->ops->is_prepared(core->hw); 202 clk_pm_runtime_put(core); 203 } 204 205 return ret; 206 } 207 208 static bool clk_core_is_enabled(struct clk_core *core) 209 { 210 bool ret = false; 211 212 /* 213 * .is_enabled is only mandatory for clocks that gate 214 * fall back to software usage counter if .is_enabled is missing 215 */ 216 if (!core->ops->is_enabled) 217 return core->enable_count; 218 219 /* 220 * Check if clock controller's device is runtime active before 221 * calling .is_enabled callback. If not, assume that clock is 222 * disabled, because we might be called from atomic context, from 223 * which pm_runtime_get() is not allowed. 224 * This function is called mainly from clk_disable_unused_subtree, 225 * which ensures proper runtime pm activation of controller before 226 * taking enable spinlock, but the below check is needed if one tries 227 * to call it from other places. 228 */ 229 if (core->dev) { 230 pm_runtime_get_noresume(core->dev); 231 if (!pm_runtime_active(core->dev)) { 232 ret = false; 233 goto done; 234 } 235 } 236 237 ret = core->ops->is_enabled(core->hw); 238 done: 239 if (core->dev) 240 pm_runtime_put(core->dev); 241 242 return ret; 243 } 244 245 /*** helper functions ***/ 246 247 const char *__clk_get_name(const struct clk *clk) 248 { 249 return !clk ? NULL : clk->core->name; 250 } 251 EXPORT_SYMBOL_GPL(__clk_get_name); 252 253 const char *clk_hw_get_name(const struct clk_hw *hw) 254 { 255 return hw->core->name; 256 } 257 EXPORT_SYMBOL_GPL(clk_hw_get_name); 258 259 struct clk_hw *__clk_get_hw(struct clk *clk) 260 { 261 return !clk ? NULL : clk->core->hw; 262 } 263 EXPORT_SYMBOL_GPL(__clk_get_hw); 264 265 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) 266 { 267 return hw->core->num_parents; 268 } 269 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); 270 271 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) 272 { 273 return hw->core->parent ? hw->core->parent->hw : NULL; 274 } 275 EXPORT_SYMBOL_GPL(clk_hw_get_parent); 276 277 static struct clk_core *__clk_lookup_subtree(const char *name, 278 struct clk_core *core) 279 { 280 struct clk_core *child; 281 struct clk_core *ret; 282 283 if (!strcmp(core->name, name)) 284 return core; 285 286 hlist_for_each_entry(child, &core->children, child_node) { 287 ret = __clk_lookup_subtree(name, child); 288 if (ret) 289 return ret; 290 } 291 292 return NULL; 293 } 294 295 static struct clk_core *clk_core_lookup(const char *name) 296 { 297 struct clk_core *root_clk; 298 struct clk_core *ret; 299 300 if (!name) 301 return NULL; 302 303 /* search the 'proper' clk tree first */ 304 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 305 ret = __clk_lookup_subtree(name, root_clk); 306 if (ret) 307 return ret; 308 } 309 310 /* if not found, then search the orphan tree */ 311 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 312 ret = __clk_lookup_subtree(name, root_clk); 313 if (ret) 314 return ret; 315 } 316 317 return NULL; 318 } 319 320 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, 321 u8 index) 322 { 323 if (!core || index >= core->num_parents) 324 return NULL; 325 326 if (!core->parents[index]) 327 core->parents[index] = 328 clk_core_lookup(core->parent_names[index]); 329 330 return core->parents[index]; 331 } 332 333 struct clk_hw * 334 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) 335 { 336 struct clk_core *parent; 337 338 parent = clk_core_get_parent_by_index(hw->core, index); 339 340 return !parent ? NULL : parent->hw; 341 } 342 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); 343 344 unsigned int __clk_get_enable_count(struct clk *clk) 345 { 346 return !clk ? 0 : clk->core->enable_count; 347 } 348 349 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) 350 { 351 unsigned long ret; 352 353 if (!core) { 354 ret = 0; 355 goto out; 356 } 357 358 ret = core->rate; 359 360 if (!core->num_parents) 361 goto out; 362 363 if (!core->parent) 364 ret = 0; 365 366 out: 367 return ret; 368 } 369 370 unsigned long clk_hw_get_rate(const struct clk_hw *hw) 371 { 372 return clk_core_get_rate_nolock(hw->core); 373 } 374 EXPORT_SYMBOL_GPL(clk_hw_get_rate); 375 376 static unsigned long __clk_get_accuracy(struct clk_core *core) 377 { 378 if (!core) 379 return 0; 380 381 return core->accuracy; 382 } 383 384 unsigned long __clk_get_flags(struct clk *clk) 385 { 386 return !clk ? 0 : clk->core->flags; 387 } 388 EXPORT_SYMBOL_GPL(__clk_get_flags); 389 390 unsigned long clk_hw_get_flags(const struct clk_hw *hw) 391 { 392 return hw->core->flags; 393 } 394 EXPORT_SYMBOL_GPL(clk_hw_get_flags); 395 396 bool clk_hw_is_prepared(const struct clk_hw *hw) 397 { 398 return clk_core_is_prepared(hw->core); 399 } 400 401 bool clk_hw_rate_is_protected(const struct clk_hw *hw) 402 { 403 return clk_core_rate_is_protected(hw->core); 404 } 405 406 bool clk_hw_is_enabled(const struct clk_hw *hw) 407 { 408 return clk_core_is_enabled(hw->core); 409 } 410 411 bool __clk_is_enabled(struct clk *clk) 412 { 413 if (!clk) 414 return false; 415 416 return clk_core_is_enabled(clk->core); 417 } 418 EXPORT_SYMBOL_GPL(__clk_is_enabled); 419 420 static bool mux_is_better_rate(unsigned long rate, unsigned long now, 421 unsigned long best, unsigned long flags) 422 { 423 if (flags & CLK_MUX_ROUND_CLOSEST) 424 return abs(now - rate) < abs(best - rate); 425 426 return now <= rate && now > best; 427 } 428 429 int clk_mux_determine_rate_flags(struct clk_hw *hw, 430 struct clk_rate_request *req, 431 unsigned long flags) 432 { 433 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 434 int i, num_parents, ret; 435 unsigned long best = 0; 436 struct clk_rate_request parent_req = *req; 437 438 /* if NO_REPARENT flag set, pass through to current parent */ 439 if (core->flags & CLK_SET_RATE_NO_REPARENT) { 440 parent = core->parent; 441 if (core->flags & CLK_SET_RATE_PARENT) { 442 ret = __clk_determine_rate(parent ? parent->hw : NULL, 443 &parent_req); 444 if (ret) 445 return ret; 446 447 best = parent_req.rate; 448 } else if (parent) { 449 best = clk_core_get_rate_nolock(parent); 450 } else { 451 best = clk_core_get_rate_nolock(core); 452 } 453 454 goto out; 455 } 456 457 /* find the parent that can provide the fastest rate <= rate */ 458 num_parents = core->num_parents; 459 for (i = 0; i < num_parents; i++) { 460 parent = clk_core_get_parent_by_index(core, i); 461 if (!parent) 462 continue; 463 464 if (core->flags & CLK_SET_RATE_PARENT) { 465 parent_req = *req; 466 ret = __clk_determine_rate(parent->hw, &parent_req); 467 if (ret) 468 continue; 469 } else { 470 parent_req.rate = clk_core_get_rate_nolock(parent); 471 } 472 473 if (mux_is_better_rate(req->rate, parent_req.rate, 474 best, flags)) { 475 best_parent = parent; 476 best = parent_req.rate; 477 } 478 } 479 480 if (!best_parent) 481 return -EINVAL; 482 483 out: 484 if (best_parent) 485 req->best_parent_hw = best_parent->hw; 486 req->best_parent_rate = best; 487 req->rate = best; 488 489 return 0; 490 } 491 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); 492 493 struct clk *__clk_lookup(const char *name) 494 { 495 struct clk_core *core = clk_core_lookup(name); 496 497 return !core ? NULL : core->hw->clk; 498 } 499 500 static void clk_core_get_boundaries(struct clk_core *core, 501 unsigned long *min_rate, 502 unsigned long *max_rate) 503 { 504 struct clk *clk_user; 505 506 *min_rate = core->min_rate; 507 *max_rate = core->max_rate; 508 509 hlist_for_each_entry(clk_user, &core->clks, clks_node) 510 *min_rate = max(*min_rate, clk_user->min_rate); 511 512 hlist_for_each_entry(clk_user, &core->clks, clks_node) 513 *max_rate = min(*max_rate, clk_user->max_rate); 514 } 515 516 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, 517 unsigned long max_rate) 518 { 519 hw->core->min_rate = min_rate; 520 hw->core->max_rate = max_rate; 521 } 522 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); 523 524 /* 525 * Helper for finding best parent to provide a given frequency. This can be used 526 * directly as a determine_rate callback (e.g. for a mux), or from a more 527 * complex clock that may combine a mux with other operations. 528 */ 529 int __clk_mux_determine_rate(struct clk_hw *hw, 530 struct clk_rate_request *req) 531 { 532 return clk_mux_determine_rate_flags(hw, req, 0); 533 } 534 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 535 536 int __clk_mux_determine_rate_closest(struct clk_hw *hw, 537 struct clk_rate_request *req) 538 { 539 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); 540 } 541 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 542 543 /*** clk api ***/ 544 545 static void clk_core_rate_unprotect(struct clk_core *core) 546 { 547 lockdep_assert_held(&prepare_lock); 548 549 if (!core) 550 return; 551 552 if (WARN(core->protect_count == 0, 553 "%s already unprotected\n", core->name)) 554 return; 555 556 if (--core->protect_count > 0) 557 return; 558 559 clk_core_rate_unprotect(core->parent); 560 } 561 562 static int clk_core_rate_nuke_protect(struct clk_core *core) 563 { 564 int ret; 565 566 lockdep_assert_held(&prepare_lock); 567 568 if (!core) 569 return -EINVAL; 570 571 if (core->protect_count == 0) 572 return 0; 573 574 ret = core->protect_count; 575 core->protect_count = 1; 576 clk_core_rate_unprotect(core); 577 578 return ret; 579 } 580 581 /** 582 * clk_rate_exclusive_put - release exclusivity over clock rate control 583 * @clk: the clk over which the exclusivity is released 584 * 585 * clk_rate_exclusive_put() completes a critical section during which a clock 586 * consumer cannot tolerate any other consumer making any operation on the 587 * clock which could result in a rate change or rate glitch. Exclusive clocks 588 * cannot have their rate changed, either directly or indirectly due to changes 589 * further up the parent chain of clocks. As a result, clocks up parent chain 590 * also get under exclusive control of the calling consumer. 591 * 592 * If exlusivity is claimed more than once on clock, even by the same consumer, 593 * the rate effectively gets locked as exclusivity can't be preempted. 594 * 595 * Calls to clk_rate_exclusive_put() must be balanced with calls to 596 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return 597 * error status. 598 */ 599 void clk_rate_exclusive_put(struct clk *clk) 600 { 601 if (!clk) 602 return; 603 604 clk_prepare_lock(); 605 606 /* 607 * if there is something wrong with this consumer protect count, stop 608 * here before messing with the provider 609 */ 610 if (WARN_ON(clk->exclusive_count <= 0)) 611 goto out; 612 613 clk_core_rate_unprotect(clk->core); 614 clk->exclusive_count--; 615 out: 616 clk_prepare_unlock(); 617 } 618 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); 619 620 static void clk_core_rate_protect(struct clk_core *core) 621 { 622 lockdep_assert_held(&prepare_lock); 623 624 if (!core) 625 return; 626 627 if (core->protect_count == 0) 628 clk_core_rate_protect(core->parent); 629 630 core->protect_count++; 631 } 632 633 static void clk_core_rate_restore_protect(struct clk_core *core, int count) 634 { 635 lockdep_assert_held(&prepare_lock); 636 637 if (!core) 638 return; 639 640 if (count == 0) 641 return; 642 643 clk_core_rate_protect(core); 644 core->protect_count = count; 645 } 646 647 /** 648 * clk_rate_exclusive_get - get exclusivity over the clk rate control 649 * @clk: the clk over which the exclusity of rate control is requested 650 * 651 * clk_rate_exlusive_get() begins a critical section during which a clock 652 * consumer cannot tolerate any other consumer making any operation on the 653 * clock which could result in a rate change or rate glitch. Exclusive clocks 654 * cannot have their rate changed, either directly or indirectly due to changes 655 * further up the parent chain of clocks. As a result, clocks up parent chain 656 * also get under exclusive control of the calling consumer. 657 * 658 * If exlusivity is claimed more than once on clock, even by the same consumer, 659 * the rate effectively gets locked as exclusivity can't be preempted. 660 * 661 * Calls to clk_rate_exclusive_get() should be balanced with calls to 662 * clk_rate_exclusive_put(). Calls to this function may sleep. 663 * Returns 0 on success, -EERROR otherwise 664 */ 665 int clk_rate_exclusive_get(struct clk *clk) 666 { 667 if (!clk) 668 return 0; 669 670 clk_prepare_lock(); 671 clk_core_rate_protect(clk->core); 672 clk->exclusive_count++; 673 clk_prepare_unlock(); 674 675 return 0; 676 } 677 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); 678 679 static void clk_core_unprepare(struct clk_core *core) 680 { 681 lockdep_assert_held(&prepare_lock); 682 683 if (!core) 684 return; 685 686 if (WARN(core->prepare_count == 0, 687 "%s already unprepared\n", core->name)) 688 return; 689 690 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, 691 "Unpreparing critical %s\n", core->name)) 692 return; 693 694 if (core->flags & CLK_SET_RATE_GATE) 695 clk_core_rate_unprotect(core); 696 697 if (--core->prepare_count > 0) 698 return; 699 700 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); 701 702 trace_clk_unprepare(core); 703 704 if (core->ops->unprepare) 705 core->ops->unprepare(core->hw); 706 707 clk_pm_runtime_put(core); 708 709 trace_clk_unprepare_complete(core); 710 clk_core_unprepare(core->parent); 711 } 712 713 static void clk_core_unprepare_lock(struct clk_core *core) 714 { 715 clk_prepare_lock(); 716 clk_core_unprepare(core); 717 clk_prepare_unlock(); 718 } 719 720 /** 721 * clk_unprepare - undo preparation of a clock source 722 * @clk: the clk being unprepared 723 * 724 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 725 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 726 * if the operation may sleep. One example is a clk which is accessed over 727 * I2c. In the complex case a clk gate operation may require a fast and a slow 728 * part. It is this reason that clk_unprepare and clk_disable are not mutually 729 * exclusive. In fact clk_disable must be called before clk_unprepare. 730 */ 731 void clk_unprepare(struct clk *clk) 732 { 733 if (IS_ERR_OR_NULL(clk)) 734 return; 735 736 clk_core_unprepare_lock(clk->core); 737 } 738 EXPORT_SYMBOL_GPL(clk_unprepare); 739 740 static int clk_core_prepare(struct clk_core *core) 741 { 742 int ret = 0; 743 744 lockdep_assert_held(&prepare_lock); 745 746 if (!core) 747 return 0; 748 749 if (core->prepare_count == 0) { 750 ret = clk_pm_runtime_get(core); 751 if (ret) 752 return ret; 753 754 ret = clk_core_prepare(core->parent); 755 if (ret) 756 goto runtime_put; 757 758 trace_clk_prepare(core); 759 760 if (core->ops->prepare) 761 ret = core->ops->prepare(core->hw); 762 763 trace_clk_prepare_complete(core); 764 765 if (ret) 766 goto unprepare; 767 } 768 769 core->prepare_count++; 770 771 /* 772 * CLK_SET_RATE_GATE is a special case of clock protection 773 * Instead of a consumer claiming exclusive rate control, it is 774 * actually the provider which prevents any consumer from making any 775 * operation which could result in a rate change or rate glitch while 776 * the clock is prepared. 777 */ 778 if (core->flags & CLK_SET_RATE_GATE) 779 clk_core_rate_protect(core); 780 781 return 0; 782 unprepare: 783 clk_core_unprepare(core->parent); 784 runtime_put: 785 clk_pm_runtime_put(core); 786 return ret; 787 } 788 789 static int clk_core_prepare_lock(struct clk_core *core) 790 { 791 int ret; 792 793 clk_prepare_lock(); 794 ret = clk_core_prepare(core); 795 clk_prepare_unlock(); 796 797 return ret; 798 } 799 800 /** 801 * clk_prepare - prepare a clock source 802 * @clk: the clk being prepared 803 * 804 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 805 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 806 * operation may sleep. One example is a clk which is accessed over I2c. In 807 * the complex case a clk ungate operation may require a fast and a slow part. 808 * It is this reason that clk_prepare and clk_enable are not mutually 809 * exclusive. In fact clk_prepare must be called before clk_enable. 810 * Returns 0 on success, -EERROR otherwise. 811 */ 812 int clk_prepare(struct clk *clk) 813 { 814 if (!clk) 815 return 0; 816 817 return clk_core_prepare_lock(clk->core); 818 } 819 EXPORT_SYMBOL_GPL(clk_prepare); 820 821 static void clk_core_disable(struct clk_core *core) 822 { 823 lockdep_assert_held(&enable_lock); 824 825 if (!core) 826 return; 827 828 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) 829 return; 830 831 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, 832 "Disabling critical %s\n", core->name)) 833 return; 834 835 if (--core->enable_count > 0) 836 return; 837 838 trace_clk_disable_rcuidle(core); 839 840 if (core->ops->disable) 841 core->ops->disable(core->hw); 842 843 trace_clk_disable_complete_rcuidle(core); 844 845 clk_core_disable(core->parent); 846 } 847 848 static void clk_core_disable_lock(struct clk_core *core) 849 { 850 unsigned long flags; 851 852 flags = clk_enable_lock(); 853 clk_core_disable(core); 854 clk_enable_unlock(flags); 855 } 856 857 /** 858 * clk_disable - gate a clock 859 * @clk: the clk being gated 860 * 861 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 862 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 863 * clk if the operation is fast and will never sleep. One example is a 864 * SoC-internal clk which is controlled via simple register writes. In the 865 * complex case a clk gate operation may require a fast and a slow part. It is 866 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 867 * In fact clk_disable must be called before clk_unprepare. 868 */ 869 void clk_disable(struct clk *clk) 870 { 871 if (IS_ERR_OR_NULL(clk)) 872 return; 873 874 clk_core_disable_lock(clk->core); 875 } 876 EXPORT_SYMBOL_GPL(clk_disable); 877 878 static int clk_core_enable(struct clk_core *core) 879 { 880 int ret = 0; 881 882 lockdep_assert_held(&enable_lock); 883 884 if (!core) 885 return 0; 886 887 if (WARN(core->prepare_count == 0, 888 "Enabling unprepared %s\n", core->name)) 889 return -ESHUTDOWN; 890 891 if (core->enable_count == 0) { 892 ret = clk_core_enable(core->parent); 893 894 if (ret) 895 return ret; 896 897 trace_clk_enable_rcuidle(core); 898 899 if (core->ops->enable) 900 ret = core->ops->enable(core->hw); 901 902 trace_clk_enable_complete_rcuidle(core); 903 904 if (ret) { 905 clk_core_disable(core->parent); 906 return ret; 907 } 908 } 909 910 core->enable_count++; 911 return 0; 912 } 913 914 static int clk_core_enable_lock(struct clk_core *core) 915 { 916 unsigned long flags; 917 int ret; 918 919 flags = clk_enable_lock(); 920 ret = clk_core_enable(core); 921 clk_enable_unlock(flags); 922 923 return ret; 924 } 925 926 /** 927 * clk_enable - ungate a clock 928 * @clk: the clk being ungated 929 * 930 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 931 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 932 * if the operation will never sleep. One example is a SoC-internal clk which 933 * is controlled via simple register writes. In the complex case a clk ungate 934 * operation may require a fast and a slow part. It is this reason that 935 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 936 * must be called before clk_enable. Returns 0 on success, -EERROR 937 * otherwise. 938 */ 939 int clk_enable(struct clk *clk) 940 { 941 if (!clk) 942 return 0; 943 944 return clk_core_enable_lock(clk->core); 945 } 946 EXPORT_SYMBOL_GPL(clk_enable); 947 948 static int clk_core_prepare_enable(struct clk_core *core) 949 { 950 int ret; 951 952 ret = clk_core_prepare_lock(core); 953 if (ret) 954 return ret; 955 956 ret = clk_core_enable_lock(core); 957 if (ret) 958 clk_core_unprepare_lock(core); 959 960 return ret; 961 } 962 963 static void clk_core_disable_unprepare(struct clk_core *core) 964 { 965 clk_core_disable_lock(core); 966 clk_core_unprepare_lock(core); 967 } 968 969 static void clk_unprepare_unused_subtree(struct clk_core *core) 970 { 971 struct clk_core *child; 972 973 lockdep_assert_held(&prepare_lock); 974 975 hlist_for_each_entry(child, &core->children, child_node) 976 clk_unprepare_unused_subtree(child); 977 978 if (core->prepare_count) 979 return; 980 981 if (core->flags & CLK_IGNORE_UNUSED) 982 return; 983 984 if (clk_pm_runtime_get(core)) 985 return; 986 987 if (clk_core_is_prepared(core)) { 988 trace_clk_unprepare(core); 989 if (core->ops->unprepare_unused) 990 core->ops->unprepare_unused(core->hw); 991 else if (core->ops->unprepare) 992 core->ops->unprepare(core->hw); 993 trace_clk_unprepare_complete(core); 994 } 995 996 clk_pm_runtime_put(core); 997 } 998 999 static void clk_disable_unused_subtree(struct clk_core *core) 1000 { 1001 struct clk_core *child; 1002 unsigned long flags; 1003 1004 lockdep_assert_held(&prepare_lock); 1005 1006 hlist_for_each_entry(child, &core->children, child_node) 1007 clk_disable_unused_subtree(child); 1008 1009 if (core->flags & CLK_OPS_PARENT_ENABLE) 1010 clk_core_prepare_enable(core->parent); 1011 1012 if (clk_pm_runtime_get(core)) 1013 goto unprepare_out; 1014 1015 flags = clk_enable_lock(); 1016 1017 if (core->enable_count) 1018 goto unlock_out; 1019 1020 if (core->flags & CLK_IGNORE_UNUSED) 1021 goto unlock_out; 1022 1023 /* 1024 * some gate clocks have special needs during the disable-unused 1025 * sequence. call .disable_unused if available, otherwise fall 1026 * back to .disable 1027 */ 1028 if (clk_core_is_enabled(core)) { 1029 trace_clk_disable(core); 1030 if (core->ops->disable_unused) 1031 core->ops->disable_unused(core->hw); 1032 else if (core->ops->disable) 1033 core->ops->disable(core->hw); 1034 trace_clk_disable_complete(core); 1035 } 1036 1037 unlock_out: 1038 clk_enable_unlock(flags); 1039 clk_pm_runtime_put(core); 1040 unprepare_out: 1041 if (core->flags & CLK_OPS_PARENT_ENABLE) 1042 clk_core_disable_unprepare(core->parent); 1043 } 1044 1045 static bool clk_ignore_unused; 1046 static int __init clk_ignore_unused_setup(char *__unused) 1047 { 1048 clk_ignore_unused = true; 1049 return 1; 1050 } 1051 __setup("clk_ignore_unused", clk_ignore_unused_setup); 1052 1053 static int clk_disable_unused(void) 1054 { 1055 struct clk_core *core; 1056 1057 if (clk_ignore_unused) { 1058 pr_warn("clk: Not disabling unused clocks\n"); 1059 return 0; 1060 } 1061 1062 clk_prepare_lock(); 1063 1064 hlist_for_each_entry(core, &clk_root_list, child_node) 1065 clk_disable_unused_subtree(core); 1066 1067 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1068 clk_disable_unused_subtree(core); 1069 1070 hlist_for_each_entry(core, &clk_root_list, child_node) 1071 clk_unprepare_unused_subtree(core); 1072 1073 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1074 clk_unprepare_unused_subtree(core); 1075 1076 clk_prepare_unlock(); 1077 1078 return 0; 1079 } 1080 late_initcall_sync(clk_disable_unused); 1081 1082 static int clk_core_determine_round_nolock(struct clk_core *core, 1083 struct clk_rate_request *req) 1084 { 1085 long rate; 1086 1087 lockdep_assert_held(&prepare_lock); 1088 1089 if (!core) 1090 return 0; 1091 1092 /* 1093 * At this point, core protection will be disabled if 1094 * - if the provider is not protected at all 1095 * - if the calling consumer is the only one which has exclusivity 1096 * over the provider 1097 */ 1098 if (clk_core_rate_is_protected(core)) { 1099 req->rate = core->rate; 1100 } else if (core->ops->determine_rate) { 1101 return core->ops->determine_rate(core->hw, req); 1102 } else if (core->ops->round_rate) { 1103 rate = core->ops->round_rate(core->hw, req->rate, 1104 &req->best_parent_rate); 1105 if (rate < 0) 1106 return rate; 1107 1108 req->rate = rate; 1109 } else { 1110 return -EINVAL; 1111 } 1112 1113 return 0; 1114 } 1115 1116 static void clk_core_init_rate_req(struct clk_core * const core, 1117 struct clk_rate_request *req) 1118 { 1119 struct clk_core *parent; 1120 1121 if (WARN_ON(!core || !req)) 1122 return; 1123 1124 parent = core->parent; 1125 if (parent) { 1126 req->best_parent_hw = parent->hw; 1127 req->best_parent_rate = parent->rate; 1128 } else { 1129 req->best_parent_hw = NULL; 1130 req->best_parent_rate = 0; 1131 } 1132 } 1133 1134 static bool clk_core_can_round(struct clk_core * const core) 1135 { 1136 if (core->ops->determine_rate || core->ops->round_rate) 1137 return true; 1138 1139 return false; 1140 } 1141 1142 static int clk_core_round_rate_nolock(struct clk_core *core, 1143 struct clk_rate_request *req) 1144 { 1145 lockdep_assert_held(&prepare_lock); 1146 1147 if (!core) { 1148 req->rate = 0; 1149 return 0; 1150 } 1151 1152 clk_core_init_rate_req(core, req); 1153 1154 if (clk_core_can_round(core)) 1155 return clk_core_determine_round_nolock(core, req); 1156 else if (core->flags & CLK_SET_RATE_PARENT) 1157 return clk_core_round_rate_nolock(core->parent, req); 1158 1159 req->rate = core->rate; 1160 return 0; 1161 } 1162 1163 /** 1164 * __clk_determine_rate - get the closest rate actually supported by a clock 1165 * @hw: determine the rate of this clock 1166 * @req: target rate request 1167 * 1168 * Useful for clk_ops such as .set_rate and .determine_rate. 1169 */ 1170 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 1171 { 1172 if (!hw) { 1173 req->rate = 0; 1174 return 0; 1175 } 1176 1177 return clk_core_round_rate_nolock(hw->core, req); 1178 } 1179 EXPORT_SYMBOL_GPL(__clk_determine_rate); 1180 1181 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) 1182 { 1183 int ret; 1184 struct clk_rate_request req; 1185 1186 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); 1187 req.rate = rate; 1188 1189 ret = clk_core_round_rate_nolock(hw->core, &req); 1190 if (ret) 1191 return 0; 1192 1193 return req.rate; 1194 } 1195 EXPORT_SYMBOL_GPL(clk_hw_round_rate); 1196 1197 /** 1198 * clk_round_rate - round the given rate for a clk 1199 * @clk: the clk for which we are rounding a rate 1200 * @rate: the rate which is to be rounded 1201 * 1202 * Takes in a rate as input and rounds it to a rate that the clk can actually 1203 * use which is then returned. If clk doesn't support round_rate operation 1204 * then the parent rate is returned. 1205 */ 1206 long clk_round_rate(struct clk *clk, unsigned long rate) 1207 { 1208 struct clk_rate_request req; 1209 int ret; 1210 1211 if (!clk) 1212 return 0; 1213 1214 clk_prepare_lock(); 1215 1216 if (clk->exclusive_count) 1217 clk_core_rate_unprotect(clk->core); 1218 1219 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); 1220 req.rate = rate; 1221 1222 ret = clk_core_round_rate_nolock(clk->core, &req); 1223 1224 if (clk->exclusive_count) 1225 clk_core_rate_protect(clk->core); 1226 1227 clk_prepare_unlock(); 1228 1229 if (ret) 1230 return ret; 1231 1232 return req.rate; 1233 } 1234 EXPORT_SYMBOL_GPL(clk_round_rate); 1235 1236 /** 1237 * __clk_notify - call clk notifier chain 1238 * @core: clk that is changing rate 1239 * @msg: clk notifier type (see include/linux/clk.h) 1240 * @old_rate: old clk rate 1241 * @new_rate: new clk rate 1242 * 1243 * Triggers a notifier call chain on the clk rate-change notification 1244 * for 'clk'. Passes a pointer to the struct clk and the previous 1245 * and current rates to the notifier callback. Intended to be called by 1246 * internal clock code only. Returns NOTIFY_DONE from the last driver 1247 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1248 * a driver returns that. 1249 */ 1250 static int __clk_notify(struct clk_core *core, unsigned long msg, 1251 unsigned long old_rate, unsigned long new_rate) 1252 { 1253 struct clk_notifier *cn; 1254 struct clk_notifier_data cnd; 1255 int ret = NOTIFY_DONE; 1256 1257 cnd.old_rate = old_rate; 1258 cnd.new_rate = new_rate; 1259 1260 list_for_each_entry(cn, &clk_notifier_list, node) { 1261 if (cn->clk->core == core) { 1262 cnd.clk = cn->clk; 1263 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1264 &cnd); 1265 if (ret & NOTIFY_STOP_MASK) 1266 return ret; 1267 } 1268 } 1269 1270 return ret; 1271 } 1272 1273 /** 1274 * __clk_recalc_accuracies 1275 * @core: first clk in the subtree 1276 * 1277 * Walks the subtree of clks starting with clk and recalculates accuracies as 1278 * it goes. Note that if a clk does not implement the .recalc_accuracy 1279 * callback then it is assumed that the clock will take on the accuracy of its 1280 * parent. 1281 */ 1282 static void __clk_recalc_accuracies(struct clk_core *core) 1283 { 1284 unsigned long parent_accuracy = 0; 1285 struct clk_core *child; 1286 1287 lockdep_assert_held(&prepare_lock); 1288 1289 if (core->parent) 1290 parent_accuracy = core->parent->accuracy; 1291 1292 if (core->ops->recalc_accuracy) 1293 core->accuracy = core->ops->recalc_accuracy(core->hw, 1294 parent_accuracy); 1295 else 1296 core->accuracy = parent_accuracy; 1297 1298 hlist_for_each_entry(child, &core->children, child_node) 1299 __clk_recalc_accuracies(child); 1300 } 1301 1302 static long clk_core_get_accuracy(struct clk_core *core) 1303 { 1304 unsigned long accuracy; 1305 1306 clk_prepare_lock(); 1307 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) 1308 __clk_recalc_accuracies(core); 1309 1310 accuracy = __clk_get_accuracy(core); 1311 clk_prepare_unlock(); 1312 1313 return accuracy; 1314 } 1315 1316 /** 1317 * clk_get_accuracy - return the accuracy of clk 1318 * @clk: the clk whose accuracy is being returned 1319 * 1320 * Simply returns the cached accuracy of the clk, unless 1321 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1322 * issued. 1323 * If clk is NULL then returns 0. 1324 */ 1325 long clk_get_accuracy(struct clk *clk) 1326 { 1327 if (!clk) 1328 return 0; 1329 1330 return clk_core_get_accuracy(clk->core); 1331 } 1332 EXPORT_SYMBOL_GPL(clk_get_accuracy); 1333 1334 static unsigned long clk_recalc(struct clk_core *core, 1335 unsigned long parent_rate) 1336 { 1337 unsigned long rate = parent_rate; 1338 1339 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { 1340 rate = core->ops->recalc_rate(core->hw, parent_rate); 1341 clk_pm_runtime_put(core); 1342 } 1343 return rate; 1344 } 1345 1346 /** 1347 * __clk_recalc_rates 1348 * @core: first clk in the subtree 1349 * @msg: notification type (see include/linux/clk.h) 1350 * 1351 * Walks the subtree of clks starting with clk and recalculates rates as it 1352 * goes. Note that if a clk does not implement the .recalc_rate callback then 1353 * it is assumed that the clock will take on the rate of its parent. 1354 * 1355 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1356 * if necessary. 1357 */ 1358 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) 1359 { 1360 unsigned long old_rate; 1361 unsigned long parent_rate = 0; 1362 struct clk_core *child; 1363 1364 lockdep_assert_held(&prepare_lock); 1365 1366 old_rate = core->rate; 1367 1368 if (core->parent) 1369 parent_rate = core->parent->rate; 1370 1371 core->rate = clk_recalc(core, parent_rate); 1372 1373 /* 1374 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1375 * & ABORT_RATE_CHANGE notifiers 1376 */ 1377 if (core->notifier_count && msg) 1378 __clk_notify(core, msg, old_rate, core->rate); 1379 1380 hlist_for_each_entry(child, &core->children, child_node) 1381 __clk_recalc_rates(child, msg); 1382 } 1383 1384 static unsigned long clk_core_get_rate(struct clk_core *core) 1385 { 1386 unsigned long rate; 1387 1388 clk_prepare_lock(); 1389 1390 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) 1391 __clk_recalc_rates(core, 0); 1392 1393 rate = clk_core_get_rate_nolock(core); 1394 clk_prepare_unlock(); 1395 1396 return rate; 1397 } 1398 1399 /** 1400 * clk_get_rate - return the rate of clk 1401 * @clk: the clk whose rate is being returned 1402 * 1403 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1404 * is set, which means a recalc_rate will be issued. 1405 * If clk is NULL then returns 0. 1406 */ 1407 unsigned long clk_get_rate(struct clk *clk) 1408 { 1409 if (!clk) 1410 return 0; 1411 1412 return clk_core_get_rate(clk->core); 1413 } 1414 EXPORT_SYMBOL_GPL(clk_get_rate); 1415 1416 static int clk_fetch_parent_index(struct clk_core *core, 1417 struct clk_core *parent) 1418 { 1419 int i; 1420 1421 if (!parent) 1422 return -EINVAL; 1423 1424 for (i = 0; i < core->num_parents; i++) 1425 if (clk_core_get_parent_by_index(core, i) == parent) 1426 return i; 1427 1428 return -EINVAL; 1429 } 1430 1431 /* 1432 * Update the orphan status of @core and all its children. 1433 */ 1434 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) 1435 { 1436 struct clk_core *child; 1437 1438 core->orphan = is_orphan; 1439 1440 hlist_for_each_entry(child, &core->children, child_node) 1441 clk_core_update_orphan_status(child, is_orphan); 1442 } 1443 1444 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) 1445 { 1446 bool was_orphan = core->orphan; 1447 1448 hlist_del(&core->child_node); 1449 1450 if (new_parent) { 1451 bool becomes_orphan = new_parent->orphan; 1452 1453 /* avoid duplicate POST_RATE_CHANGE notifications */ 1454 if (new_parent->new_child == core) 1455 new_parent->new_child = NULL; 1456 1457 hlist_add_head(&core->child_node, &new_parent->children); 1458 1459 if (was_orphan != becomes_orphan) 1460 clk_core_update_orphan_status(core, becomes_orphan); 1461 } else { 1462 hlist_add_head(&core->child_node, &clk_orphan_list); 1463 if (!was_orphan) 1464 clk_core_update_orphan_status(core, true); 1465 } 1466 1467 core->parent = new_parent; 1468 } 1469 1470 static struct clk_core *__clk_set_parent_before(struct clk_core *core, 1471 struct clk_core *parent) 1472 { 1473 unsigned long flags; 1474 struct clk_core *old_parent = core->parent; 1475 1476 /* 1477 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock 1478 * 1479 * 2. Migrate prepare state between parents and prevent race with 1480 * clk_enable(). 1481 * 1482 * If the clock is not prepared, then a race with 1483 * clk_enable/disable() is impossible since we already have the 1484 * prepare lock (future calls to clk_enable() need to be preceded by 1485 * a clk_prepare()). 1486 * 1487 * If the clock is prepared, migrate the prepared state to the new 1488 * parent and also protect against a race with clk_enable() by 1489 * forcing the clock and the new parent on. This ensures that all 1490 * future calls to clk_enable() are practically NOPs with respect to 1491 * hardware and software states. 1492 * 1493 * See also: Comment for clk_set_parent() below. 1494 */ 1495 1496 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ 1497 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1498 clk_core_prepare_enable(old_parent); 1499 clk_core_prepare_enable(parent); 1500 } 1501 1502 /* migrate prepare count if > 0 */ 1503 if (core->prepare_count) { 1504 clk_core_prepare_enable(parent); 1505 clk_core_enable_lock(core); 1506 } 1507 1508 /* update the clk tree topology */ 1509 flags = clk_enable_lock(); 1510 clk_reparent(core, parent); 1511 clk_enable_unlock(flags); 1512 1513 return old_parent; 1514 } 1515 1516 static void __clk_set_parent_after(struct clk_core *core, 1517 struct clk_core *parent, 1518 struct clk_core *old_parent) 1519 { 1520 /* 1521 * Finish the migration of prepare state and undo the changes done 1522 * for preventing a race with clk_enable(). 1523 */ 1524 if (core->prepare_count) { 1525 clk_core_disable_lock(core); 1526 clk_core_disable_unprepare(old_parent); 1527 } 1528 1529 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ 1530 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1531 clk_core_disable_unprepare(parent); 1532 clk_core_disable_unprepare(old_parent); 1533 } 1534 } 1535 1536 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, 1537 u8 p_index) 1538 { 1539 unsigned long flags; 1540 int ret = 0; 1541 struct clk_core *old_parent; 1542 1543 old_parent = __clk_set_parent_before(core, parent); 1544 1545 trace_clk_set_parent(core, parent); 1546 1547 /* change clock input source */ 1548 if (parent && core->ops->set_parent) 1549 ret = core->ops->set_parent(core->hw, p_index); 1550 1551 trace_clk_set_parent_complete(core, parent); 1552 1553 if (ret) { 1554 flags = clk_enable_lock(); 1555 clk_reparent(core, old_parent); 1556 clk_enable_unlock(flags); 1557 __clk_set_parent_after(core, old_parent, parent); 1558 1559 return ret; 1560 } 1561 1562 __clk_set_parent_after(core, parent, old_parent); 1563 1564 return 0; 1565 } 1566 1567 /** 1568 * __clk_speculate_rates 1569 * @core: first clk in the subtree 1570 * @parent_rate: the "future" rate of clk's parent 1571 * 1572 * Walks the subtree of clks starting with clk, speculating rates as it 1573 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 1574 * 1575 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 1576 * pre-rate change notifications and returns early if no clks in the 1577 * subtree have subscribed to the notifications. Note that if a clk does not 1578 * implement the .recalc_rate callback then it is assumed that the clock will 1579 * take on the rate of its parent. 1580 */ 1581 static int __clk_speculate_rates(struct clk_core *core, 1582 unsigned long parent_rate) 1583 { 1584 struct clk_core *child; 1585 unsigned long new_rate; 1586 int ret = NOTIFY_DONE; 1587 1588 lockdep_assert_held(&prepare_lock); 1589 1590 new_rate = clk_recalc(core, parent_rate); 1591 1592 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 1593 if (core->notifier_count) 1594 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); 1595 1596 if (ret & NOTIFY_STOP_MASK) { 1597 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 1598 __func__, core->name, ret); 1599 goto out; 1600 } 1601 1602 hlist_for_each_entry(child, &core->children, child_node) { 1603 ret = __clk_speculate_rates(child, new_rate); 1604 if (ret & NOTIFY_STOP_MASK) 1605 break; 1606 } 1607 1608 out: 1609 return ret; 1610 } 1611 1612 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, 1613 struct clk_core *new_parent, u8 p_index) 1614 { 1615 struct clk_core *child; 1616 1617 core->new_rate = new_rate; 1618 core->new_parent = new_parent; 1619 core->new_parent_index = p_index; 1620 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 1621 core->new_child = NULL; 1622 if (new_parent && new_parent != core->parent) 1623 new_parent->new_child = core; 1624 1625 hlist_for_each_entry(child, &core->children, child_node) { 1626 child->new_rate = clk_recalc(child, new_rate); 1627 clk_calc_subtree(child, child->new_rate, NULL, 0); 1628 } 1629 } 1630 1631 /* 1632 * calculate the new rates returning the topmost clock that has to be 1633 * changed. 1634 */ 1635 static struct clk_core *clk_calc_new_rates(struct clk_core *core, 1636 unsigned long rate) 1637 { 1638 struct clk_core *top = core; 1639 struct clk_core *old_parent, *parent; 1640 unsigned long best_parent_rate = 0; 1641 unsigned long new_rate; 1642 unsigned long min_rate; 1643 unsigned long max_rate; 1644 int p_index = 0; 1645 long ret; 1646 1647 /* sanity */ 1648 if (IS_ERR_OR_NULL(core)) 1649 return NULL; 1650 1651 /* save parent rate, if it exists */ 1652 parent = old_parent = core->parent; 1653 if (parent) 1654 best_parent_rate = parent->rate; 1655 1656 clk_core_get_boundaries(core, &min_rate, &max_rate); 1657 1658 /* find the closest rate and parent clk/rate */ 1659 if (clk_core_can_round(core)) { 1660 struct clk_rate_request req; 1661 1662 req.rate = rate; 1663 req.min_rate = min_rate; 1664 req.max_rate = max_rate; 1665 1666 clk_core_init_rate_req(core, &req); 1667 1668 ret = clk_core_determine_round_nolock(core, &req); 1669 if (ret < 0) 1670 return NULL; 1671 1672 best_parent_rate = req.best_parent_rate; 1673 new_rate = req.rate; 1674 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; 1675 1676 if (new_rate < min_rate || new_rate > max_rate) 1677 return NULL; 1678 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { 1679 /* pass-through clock without adjustable parent */ 1680 core->new_rate = core->rate; 1681 return NULL; 1682 } else { 1683 /* pass-through clock with adjustable parent */ 1684 top = clk_calc_new_rates(parent, rate); 1685 new_rate = parent->new_rate; 1686 goto out; 1687 } 1688 1689 /* some clocks must be gated to change parent */ 1690 if (parent != old_parent && 1691 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 1692 pr_debug("%s: %s not gated but wants to reparent\n", 1693 __func__, core->name); 1694 return NULL; 1695 } 1696 1697 /* try finding the new parent index */ 1698 if (parent && core->num_parents > 1) { 1699 p_index = clk_fetch_parent_index(core, parent); 1700 if (p_index < 0) { 1701 pr_debug("%s: clk %s can not be parent of clk %s\n", 1702 __func__, parent->name, core->name); 1703 return NULL; 1704 } 1705 } 1706 1707 if ((core->flags & CLK_SET_RATE_PARENT) && parent && 1708 best_parent_rate != parent->rate) 1709 top = clk_calc_new_rates(parent, best_parent_rate); 1710 1711 out: 1712 clk_calc_subtree(core, new_rate, parent, p_index); 1713 1714 return top; 1715 } 1716 1717 /* 1718 * Notify about rate changes in a subtree. Always walk down the whole tree 1719 * so that in case of an error we can walk down the whole tree again and 1720 * abort the change. 1721 */ 1722 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, 1723 unsigned long event) 1724 { 1725 struct clk_core *child, *tmp_clk, *fail_clk = NULL; 1726 int ret = NOTIFY_DONE; 1727 1728 if (core->rate == core->new_rate) 1729 return NULL; 1730 1731 if (core->notifier_count) { 1732 ret = __clk_notify(core, event, core->rate, core->new_rate); 1733 if (ret & NOTIFY_STOP_MASK) 1734 fail_clk = core; 1735 } 1736 1737 hlist_for_each_entry(child, &core->children, child_node) { 1738 /* Skip children who will be reparented to another clock */ 1739 if (child->new_parent && child->new_parent != core) 1740 continue; 1741 tmp_clk = clk_propagate_rate_change(child, event); 1742 if (tmp_clk) 1743 fail_clk = tmp_clk; 1744 } 1745 1746 /* handle the new child who might not be in core->children yet */ 1747 if (core->new_child) { 1748 tmp_clk = clk_propagate_rate_change(core->new_child, event); 1749 if (tmp_clk) 1750 fail_clk = tmp_clk; 1751 } 1752 1753 return fail_clk; 1754 } 1755 1756 /* 1757 * walk down a subtree and set the new rates notifying the rate 1758 * change on the way 1759 */ 1760 static void clk_change_rate(struct clk_core *core) 1761 { 1762 struct clk_core *child; 1763 struct hlist_node *tmp; 1764 unsigned long old_rate; 1765 unsigned long best_parent_rate = 0; 1766 bool skip_set_rate = false; 1767 struct clk_core *old_parent; 1768 struct clk_core *parent = NULL; 1769 1770 old_rate = core->rate; 1771 1772 if (core->new_parent) { 1773 parent = core->new_parent; 1774 best_parent_rate = core->new_parent->rate; 1775 } else if (core->parent) { 1776 parent = core->parent; 1777 best_parent_rate = core->parent->rate; 1778 } 1779 1780 if (clk_pm_runtime_get(core)) 1781 return; 1782 1783 if (core->flags & CLK_SET_RATE_UNGATE) { 1784 unsigned long flags; 1785 1786 clk_core_prepare(core); 1787 flags = clk_enable_lock(); 1788 clk_core_enable(core); 1789 clk_enable_unlock(flags); 1790 } 1791 1792 if (core->new_parent && core->new_parent != core->parent) { 1793 old_parent = __clk_set_parent_before(core, core->new_parent); 1794 trace_clk_set_parent(core, core->new_parent); 1795 1796 if (core->ops->set_rate_and_parent) { 1797 skip_set_rate = true; 1798 core->ops->set_rate_and_parent(core->hw, core->new_rate, 1799 best_parent_rate, 1800 core->new_parent_index); 1801 } else if (core->ops->set_parent) { 1802 core->ops->set_parent(core->hw, core->new_parent_index); 1803 } 1804 1805 trace_clk_set_parent_complete(core, core->new_parent); 1806 __clk_set_parent_after(core, core->new_parent, old_parent); 1807 } 1808 1809 if (core->flags & CLK_OPS_PARENT_ENABLE) 1810 clk_core_prepare_enable(parent); 1811 1812 trace_clk_set_rate(core, core->new_rate); 1813 1814 if (!skip_set_rate && core->ops->set_rate) 1815 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); 1816 1817 trace_clk_set_rate_complete(core, core->new_rate); 1818 1819 core->rate = clk_recalc(core, best_parent_rate); 1820 1821 if (core->flags & CLK_SET_RATE_UNGATE) { 1822 unsigned long flags; 1823 1824 flags = clk_enable_lock(); 1825 clk_core_disable(core); 1826 clk_enable_unlock(flags); 1827 clk_core_unprepare(core); 1828 } 1829 1830 if (core->flags & CLK_OPS_PARENT_ENABLE) 1831 clk_core_disable_unprepare(parent); 1832 1833 if (core->notifier_count && old_rate != core->rate) 1834 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); 1835 1836 if (core->flags & CLK_RECALC_NEW_RATES) 1837 (void)clk_calc_new_rates(core, core->new_rate); 1838 1839 /* 1840 * Use safe iteration, as change_rate can actually swap parents 1841 * for certain clock types. 1842 */ 1843 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { 1844 /* Skip children who will be reparented to another clock */ 1845 if (child->new_parent && child->new_parent != core) 1846 continue; 1847 clk_change_rate(child); 1848 } 1849 1850 /* handle the new child who might not be in core->children yet */ 1851 if (core->new_child) 1852 clk_change_rate(core->new_child); 1853 1854 clk_pm_runtime_put(core); 1855 } 1856 1857 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, 1858 unsigned long req_rate) 1859 { 1860 int ret, cnt; 1861 struct clk_rate_request req; 1862 1863 lockdep_assert_held(&prepare_lock); 1864 1865 if (!core) 1866 return 0; 1867 1868 /* simulate what the rate would be if it could be freely set */ 1869 cnt = clk_core_rate_nuke_protect(core); 1870 if (cnt < 0) 1871 return cnt; 1872 1873 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); 1874 req.rate = req_rate; 1875 1876 ret = clk_core_round_rate_nolock(core, &req); 1877 1878 /* restore the protection */ 1879 clk_core_rate_restore_protect(core, cnt); 1880 1881 return ret ? 0 : req.rate; 1882 } 1883 1884 static int clk_core_set_rate_nolock(struct clk_core *core, 1885 unsigned long req_rate) 1886 { 1887 struct clk_core *top, *fail_clk; 1888 unsigned long rate; 1889 int ret = 0; 1890 1891 if (!core) 1892 return 0; 1893 1894 rate = clk_core_req_round_rate_nolock(core, req_rate); 1895 1896 /* bail early if nothing to do */ 1897 if (rate == clk_core_get_rate_nolock(core)) 1898 return 0; 1899 1900 /* fail on a direct rate set of a protected provider */ 1901 if (clk_core_rate_is_protected(core)) 1902 return -EBUSY; 1903 1904 /* calculate new rates and get the topmost changed clock */ 1905 top = clk_calc_new_rates(core, req_rate); 1906 if (!top) 1907 return -EINVAL; 1908 1909 ret = clk_pm_runtime_get(core); 1910 if (ret) 1911 return ret; 1912 1913 /* notify that we are about to change rates */ 1914 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1915 if (fail_clk) { 1916 pr_debug("%s: failed to set %s rate\n", __func__, 1917 fail_clk->name); 1918 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1919 ret = -EBUSY; 1920 goto err; 1921 } 1922 1923 /* change the rates */ 1924 clk_change_rate(top); 1925 1926 core->req_rate = req_rate; 1927 err: 1928 clk_pm_runtime_put(core); 1929 1930 return ret; 1931 } 1932 1933 /** 1934 * clk_set_rate - specify a new rate for clk 1935 * @clk: the clk whose rate is being changed 1936 * @rate: the new rate for clk 1937 * 1938 * In the simplest case clk_set_rate will only adjust the rate of clk. 1939 * 1940 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 1941 * propagate up to clk's parent; whether or not this happens depends on the 1942 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 1943 * after calling .round_rate then upstream parent propagation is ignored. If 1944 * *parent_rate comes back with a new rate for clk's parent then we propagate 1945 * up to clk's parent and set its rate. Upward propagation will continue 1946 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 1947 * .round_rate stops requesting changes to clk's parent_rate. 1948 * 1949 * Rate changes are accomplished via tree traversal that also recalculates the 1950 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 1951 * 1952 * Returns 0 on success, -EERROR otherwise. 1953 */ 1954 int clk_set_rate(struct clk *clk, unsigned long rate) 1955 { 1956 int ret; 1957 1958 if (!clk) 1959 return 0; 1960 1961 /* prevent racing with updates to the clock topology */ 1962 clk_prepare_lock(); 1963 1964 if (clk->exclusive_count) 1965 clk_core_rate_unprotect(clk->core); 1966 1967 ret = clk_core_set_rate_nolock(clk->core, rate); 1968 1969 if (clk->exclusive_count) 1970 clk_core_rate_protect(clk->core); 1971 1972 clk_prepare_unlock(); 1973 1974 return ret; 1975 } 1976 EXPORT_SYMBOL_GPL(clk_set_rate); 1977 1978 /** 1979 * clk_set_rate_exclusive - specify a new rate get exclusive control 1980 * @clk: the clk whose rate is being changed 1981 * @rate: the new rate for clk 1982 * 1983 * This is a combination of clk_set_rate() and clk_rate_exclusive_get() 1984 * within a critical section 1985 * 1986 * This can be used initially to ensure that at least 1 consumer is 1987 * statisfied when several consumers are competing for exclusivity over the 1988 * same clock provider. 1989 * 1990 * The exclusivity is not applied if setting the rate failed. 1991 * 1992 * Calls to clk_rate_exclusive_get() should be balanced with calls to 1993 * clk_rate_exclusive_put(). 1994 * 1995 * Returns 0 on success, -EERROR otherwise. 1996 */ 1997 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 1998 { 1999 int ret; 2000 2001 if (!clk) 2002 return 0; 2003 2004 /* prevent racing with updates to the clock topology */ 2005 clk_prepare_lock(); 2006 2007 /* 2008 * The temporary protection removal is not here, on purpose 2009 * This function is meant to be used instead of clk_rate_protect, 2010 * so before the consumer code path protect the clock provider 2011 */ 2012 2013 ret = clk_core_set_rate_nolock(clk->core, rate); 2014 if (!ret) { 2015 clk_core_rate_protect(clk->core); 2016 clk->exclusive_count++; 2017 } 2018 2019 clk_prepare_unlock(); 2020 2021 return ret; 2022 } 2023 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); 2024 2025 /** 2026 * clk_set_rate_range - set a rate range for a clock source 2027 * @clk: clock source 2028 * @min: desired minimum clock rate in Hz, inclusive 2029 * @max: desired maximum clock rate in Hz, inclusive 2030 * 2031 * Returns success (0) or negative errno. 2032 */ 2033 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) 2034 { 2035 int ret = 0; 2036 unsigned long old_min, old_max, rate; 2037 2038 if (!clk) 2039 return 0; 2040 2041 if (min > max) { 2042 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", 2043 __func__, clk->core->name, clk->dev_id, clk->con_id, 2044 min, max); 2045 return -EINVAL; 2046 } 2047 2048 clk_prepare_lock(); 2049 2050 if (clk->exclusive_count) 2051 clk_core_rate_unprotect(clk->core); 2052 2053 /* Save the current values in case we need to rollback the change */ 2054 old_min = clk->min_rate; 2055 old_max = clk->max_rate; 2056 clk->min_rate = min; 2057 clk->max_rate = max; 2058 2059 rate = clk_core_get_rate_nolock(clk->core); 2060 if (rate < min || rate > max) { 2061 /* 2062 * FIXME: 2063 * We are in bit of trouble here, current rate is outside the 2064 * the requested range. We are going try to request appropriate 2065 * range boundary but there is a catch. It may fail for the 2066 * usual reason (clock broken, clock protected, etc) but also 2067 * because: 2068 * - round_rate() was not favorable and fell on the wrong 2069 * side of the boundary 2070 * - the determine_rate() callback does not really check for 2071 * this corner case when determining the rate 2072 */ 2073 2074 if (rate < min) 2075 rate = min; 2076 else 2077 rate = max; 2078 2079 ret = clk_core_set_rate_nolock(clk->core, rate); 2080 if (ret) { 2081 /* rollback the changes */ 2082 clk->min_rate = old_min; 2083 clk->max_rate = old_max; 2084 } 2085 } 2086 2087 if (clk->exclusive_count) 2088 clk_core_rate_protect(clk->core); 2089 2090 clk_prepare_unlock(); 2091 2092 return ret; 2093 } 2094 EXPORT_SYMBOL_GPL(clk_set_rate_range); 2095 2096 /** 2097 * clk_set_min_rate - set a minimum clock rate for a clock source 2098 * @clk: clock source 2099 * @rate: desired minimum clock rate in Hz, inclusive 2100 * 2101 * Returns success (0) or negative errno. 2102 */ 2103 int clk_set_min_rate(struct clk *clk, unsigned long rate) 2104 { 2105 if (!clk) 2106 return 0; 2107 2108 return clk_set_rate_range(clk, rate, clk->max_rate); 2109 } 2110 EXPORT_SYMBOL_GPL(clk_set_min_rate); 2111 2112 /** 2113 * clk_set_max_rate - set a maximum clock rate for a clock source 2114 * @clk: clock source 2115 * @rate: desired maximum clock rate in Hz, inclusive 2116 * 2117 * Returns success (0) or negative errno. 2118 */ 2119 int clk_set_max_rate(struct clk *clk, unsigned long rate) 2120 { 2121 if (!clk) 2122 return 0; 2123 2124 return clk_set_rate_range(clk, clk->min_rate, rate); 2125 } 2126 EXPORT_SYMBOL_GPL(clk_set_max_rate); 2127 2128 /** 2129 * clk_get_parent - return the parent of a clk 2130 * @clk: the clk whose parent gets returned 2131 * 2132 * Simply returns clk->parent. Returns NULL if clk is NULL. 2133 */ 2134 struct clk *clk_get_parent(struct clk *clk) 2135 { 2136 struct clk *parent; 2137 2138 if (!clk) 2139 return NULL; 2140 2141 clk_prepare_lock(); 2142 /* TODO: Create a per-user clk and change callers to call clk_put */ 2143 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; 2144 clk_prepare_unlock(); 2145 2146 return parent; 2147 } 2148 EXPORT_SYMBOL_GPL(clk_get_parent); 2149 2150 static struct clk_core *__clk_init_parent(struct clk_core *core) 2151 { 2152 u8 index = 0; 2153 2154 if (core->num_parents > 1 && core->ops->get_parent) 2155 index = core->ops->get_parent(core->hw); 2156 2157 return clk_core_get_parent_by_index(core, index); 2158 } 2159 2160 static void clk_core_reparent(struct clk_core *core, 2161 struct clk_core *new_parent) 2162 { 2163 clk_reparent(core, new_parent); 2164 __clk_recalc_accuracies(core); 2165 __clk_recalc_rates(core, POST_RATE_CHANGE); 2166 } 2167 2168 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) 2169 { 2170 if (!hw) 2171 return; 2172 2173 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); 2174 } 2175 2176 /** 2177 * clk_has_parent - check if a clock is a possible parent for another 2178 * @clk: clock source 2179 * @parent: parent clock source 2180 * 2181 * This function can be used in drivers that need to check that a clock can be 2182 * the parent of another without actually changing the parent. 2183 * 2184 * Returns true if @parent is a possible parent for @clk, false otherwise. 2185 */ 2186 bool clk_has_parent(struct clk *clk, struct clk *parent) 2187 { 2188 struct clk_core *core, *parent_core; 2189 2190 /* NULL clocks should be nops, so return success if either is NULL. */ 2191 if (!clk || !parent) 2192 return true; 2193 2194 core = clk->core; 2195 parent_core = parent->core; 2196 2197 /* Optimize for the case where the parent is already the parent. */ 2198 if (core->parent == parent_core) 2199 return true; 2200 2201 return match_string(core->parent_names, core->num_parents, 2202 parent_core->name) >= 0; 2203 } 2204 EXPORT_SYMBOL_GPL(clk_has_parent); 2205 2206 static int clk_core_set_parent_nolock(struct clk_core *core, 2207 struct clk_core *parent) 2208 { 2209 int ret = 0; 2210 int p_index = 0; 2211 unsigned long p_rate = 0; 2212 2213 lockdep_assert_held(&prepare_lock); 2214 2215 if (!core) 2216 return 0; 2217 2218 if (core->parent == parent) 2219 return 0; 2220 2221 /* verify ops for for multi-parent clks */ 2222 if (core->num_parents > 1 && !core->ops->set_parent) 2223 return -EPERM; 2224 2225 /* check that we are allowed to re-parent if the clock is in use */ 2226 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) 2227 return -EBUSY; 2228 2229 if (clk_core_rate_is_protected(core)) 2230 return -EBUSY; 2231 2232 /* try finding the new parent index */ 2233 if (parent) { 2234 p_index = clk_fetch_parent_index(core, parent); 2235 if (p_index < 0) { 2236 pr_debug("%s: clk %s can not be parent of clk %s\n", 2237 __func__, parent->name, core->name); 2238 return p_index; 2239 } 2240 p_rate = parent->rate; 2241 } 2242 2243 ret = clk_pm_runtime_get(core); 2244 if (ret) 2245 return ret; 2246 2247 /* propagate PRE_RATE_CHANGE notifications */ 2248 ret = __clk_speculate_rates(core, p_rate); 2249 2250 /* abort if a driver objects */ 2251 if (ret & NOTIFY_STOP_MASK) 2252 goto runtime_put; 2253 2254 /* do the re-parent */ 2255 ret = __clk_set_parent(core, parent, p_index); 2256 2257 /* propagate rate an accuracy recalculation accordingly */ 2258 if (ret) { 2259 __clk_recalc_rates(core, ABORT_RATE_CHANGE); 2260 } else { 2261 __clk_recalc_rates(core, POST_RATE_CHANGE); 2262 __clk_recalc_accuracies(core); 2263 } 2264 2265 runtime_put: 2266 clk_pm_runtime_put(core); 2267 2268 return ret; 2269 } 2270 2271 /** 2272 * clk_set_parent - switch the parent of a mux clk 2273 * @clk: the mux clk whose input we are switching 2274 * @parent: the new input to clk 2275 * 2276 * Re-parent clk to use parent as its new input source. If clk is in 2277 * prepared state, the clk will get enabled for the duration of this call. If 2278 * that's not acceptable for a specific clk (Eg: the consumer can't handle 2279 * that, the reparenting is glitchy in hardware, etc), use the 2280 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 2281 * 2282 * After successfully changing clk's parent clk_set_parent will update the 2283 * clk topology, sysfs topology and propagate rate recalculation via 2284 * __clk_recalc_rates. 2285 * 2286 * Returns 0 on success, -EERROR otherwise. 2287 */ 2288 int clk_set_parent(struct clk *clk, struct clk *parent) 2289 { 2290 int ret; 2291 2292 if (!clk) 2293 return 0; 2294 2295 clk_prepare_lock(); 2296 2297 if (clk->exclusive_count) 2298 clk_core_rate_unprotect(clk->core); 2299 2300 ret = clk_core_set_parent_nolock(clk->core, 2301 parent ? parent->core : NULL); 2302 2303 if (clk->exclusive_count) 2304 clk_core_rate_protect(clk->core); 2305 2306 clk_prepare_unlock(); 2307 2308 return ret; 2309 } 2310 EXPORT_SYMBOL_GPL(clk_set_parent); 2311 2312 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) 2313 { 2314 int ret = -EINVAL; 2315 2316 lockdep_assert_held(&prepare_lock); 2317 2318 if (!core) 2319 return 0; 2320 2321 if (clk_core_rate_is_protected(core)) 2322 return -EBUSY; 2323 2324 trace_clk_set_phase(core, degrees); 2325 2326 if (core->ops->set_phase) { 2327 ret = core->ops->set_phase(core->hw, degrees); 2328 if (!ret) 2329 core->phase = degrees; 2330 } 2331 2332 trace_clk_set_phase_complete(core, degrees); 2333 2334 return ret; 2335 } 2336 2337 /** 2338 * clk_set_phase - adjust the phase shift of a clock signal 2339 * @clk: clock signal source 2340 * @degrees: number of degrees the signal is shifted 2341 * 2342 * Shifts the phase of a clock signal by the specified 2343 * degrees. Returns 0 on success, -EERROR otherwise. 2344 * 2345 * This function makes no distinction about the input or reference 2346 * signal that we adjust the clock signal phase against. For example 2347 * phase locked-loop clock signal generators we may shift phase with 2348 * respect to feedback clock signal input, but for other cases the 2349 * clock phase may be shifted with respect to some other, unspecified 2350 * signal. 2351 * 2352 * Additionally the concept of phase shift does not propagate through 2353 * the clock tree hierarchy, which sets it apart from clock rates and 2354 * clock accuracy. A parent clock phase attribute does not have an 2355 * impact on the phase attribute of a child clock. 2356 */ 2357 int clk_set_phase(struct clk *clk, int degrees) 2358 { 2359 int ret; 2360 2361 if (!clk) 2362 return 0; 2363 2364 /* sanity check degrees */ 2365 degrees %= 360; 2366 if (degrees < 0) 2367 degrees += 360; 2368 2369 clk_prepare_lock(); 2370 2371 if (clk->exclusive_count) 2372 clk_core_rate_unprotect(clk->core); 2373 2374 ret = clk_core_set_phase_nolock(clk->core, degrees); 2375 2376 if (clk->exclusive_count) 2377 clk_core_rate_protect(clk->core); 2378 2379 clk_prepare_unlock(); 2380 2381 return ret; 2382 } 2383 EXPORT_SYMBOL_GPL(clk_set_phase); 2384 2385 static int clk_core_get_phase(struct clk_core *core) 2386 { 2387 int ret; 2388 2389 clk_prepare_lock(); 2390 /* Always try to update cached phase if possible */ 2391 if (core->ops->get_phase) 2392 core->phase = core->ops->get_phase(core->hw); 2393 ret = core->phase; 2394 clk_prepare_unlock(); 2395 2396 return ret; 2397 } 2398 2399 /** 2400 * clk_get_phase - return the phase shift of a clock signal 2401 * @clk: clock signal source 2402 * 2403 * Returns the phase shift of a clock node in degrees, otherwise returns 2404 * -EERROR. 2405 */ 2406 int clk_get_phase(struct clk *clk) 2407 { 2408 if (!clk) 2409 return 0; 2410 2411 return clk_core_get_phase(clk->core); 2412 } 2413 EXPORT_SYMBOL_GPL(clk_get_phase); 2414 2415 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) 2416 { 2417 /* Assume a default value of 50% */ 2418 core->duty.num = 1; 2419 core->duty.den = 2; 2420 } 2421 2422 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); 2423 2424 static int clk_core_update_duty_cycle_nolock(struct clk_core *core) 2425 { 2426 struct clk_duty *duty = &core->duty; 2427 int ret = 0; 2428 2429 if (!core->ops->get_duty_cycle) 2430 return clk_core_update_duty_cycle_parent_nolock(core); 2431 2432 ret = core->ops->get_duty_cycle(core->hw, duty); 2433 if (ret) 2434 goto reset; 2435 2436 /* Don't trust the clock provider too much */ 2437 if (duty->den == 0 || duty->num > duty->den) { 2438 ret = -EINVAL; 2439 goto reset; 2440 } 2441 2442 return 0; 2443 2444 reset: 2445 clk_core_reset_duty_cycle_nolock(core); 2446 return ret; 2447 } 2448 2449 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) 2450 { 2451 int ret = 0; 2452 2453 if (core->parent && 2454 core->flags & CLK_DUTY_CYCLE_PARENT) { 2455 ret = clk_core_update_duty_cycle_nolock(core->parent); 2456 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 2457 } else { 2458 clk_core_reset_duty_cycle_nolock(core); 2459 } 2460 2461 return ret; 2462 } 2463 2464 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 2465 struct clk_duty *duty); 2466 2467 static int clk_core_set_duty_cycle_nolock(struct clk_core *core, 2468 struct clk_duty *duty) 2469 { 2470 int ret; 2471 2472 lockdep_assert_held(&prepare_lock); 2473 2474 if (clk_core_rate_is_protected(core)) 2475 return -EBUSY; 2476 2477 trace_clk_set_duty_cycle(core, duty); 2478 2479 if (!core->ops->set_duty_cycle) 2480 return clk_core_set_duty_cycle_parent_nolock(core, duty); 2481 2482 ret = core->ops->set_duty_cycle(core->hw, duty); 2483 if (!ret) 2484 memcpy(&core->duty, duty, sizeof(*duty)); 2485 2486 trace_clk_set_duty_cycle_complete(core, duty); 2487 2488 return ret; 2489 } 2490 2491 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 2492 struct clk_duty *duty) 2493 { 2494 int ret = 0; 2495 2496 if (core->parent && 2497 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { 2498 ret = clk_core_set_duty_cycle_nolock(core->parent, duty); 2499 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 2500 } 2501 2502 return ret; 2503 } 2504 2505 /** 2506 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 2507 * @clk: clock signal source 2508 * @num: numerator of the duty cycle ratio to be applied 2509 * @den: denominator of the duty cycle ratio to be applied 2510 * 2511 * Apply the duty cycle ratio if the ratio is valid and the clock can 2512 * perform this operation 2513 * 2514 * Returns (0) on success, a negative errno otherwise. 2515 */ 2516 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) 2517 { 2518 int ret; 2519 struct clk_duty duty; 2520 2521 if (!clk) 2522 return 0; 2523 2524 /* sanity check the ratio */ 2525 if (den == 0 || num > den) 2526 return -EINVAL; 2527 2528 duty.num = num; 2529 duty.den = den; 2530 2531 clk_prepare_lock(); 2532 2533 if (clk->exclusive_count) 2534 clk_core_rate_unprotect(clk->core); 2535 2536 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); 2537 2538 if (clk->exclusive_count) 2539 clk_core_rate_protect(clk->core); 2540 2541 clk_prepare_unlock(); 2542 2543 return ret; 2544 } 2545 EXPORT_SYMBOL_GPL(clk_set_duty_cycle); 2546 2547 static int clk_core_get_scaled_duty_cycle(struct clk_core *core, 2548 unsigned int scale) 2549 { 2550 struct clk_duty *duty = &core->duty; 2551 int ret; 2552 2553 clk_prepare_lock(); 2554 2555 ret = clk_core_update_duty_cycle_nolock(core); 2556 if (!ret) 2557 ret = mult_frac(scale, duty->num, duty->den); 2558 2559 clk_prepare_unlock(); 2560 2561 return ret; 2562 } 2563 2564 /** 2565 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 2566 * @clk: clock signal source 2567 * @scale: scaling factor to be applied to represent the ratio as an integer 2568 * 2569 * Returns the duty cycle ratio of a clock node multiplied by the provided 2570 * scaling factor, or negative errno on error. 2571 */ 2572 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) 2573 { 2574 if (!clk) 2575 return 0; 2576 2577 return clk_core_get_scaled_duty_cycle(clk->core, scale); 2578 } 2579 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); 2580 2581 /** 2582 * clk_is_match - check if two clk's point to the same hardware clock 2583 * @p: clk compared against q 2584 * @q: clk compared against p 2585 * 2586 * Returns true if the two struct clk pointers both point to the same hardware 2587 * clock node. Put differently, returns true if struct clk *p and struct clk *q 2588 * share the same struct clk_core object. 2589 * 2590 * Returns false otherwise. Note that two NULL clks are treated as matching. 2591 */ 2592 bool clk_is_match(const struct clk *p, const struct clk *q) 2593 { 2594 /* trivial case: identical struct clk's or both NULL */ 2595 if (p == q) 2596 return true; 2597 2598 /* true if clk->core pointers match. Avoid dereferencing garbage */ 2599 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) 2600 if (p->core == q->core) 2601 return true; 2602 2603 return false; 2604 } 2605 EXPORT_SYMBOL_GPL(clk_is_match); 2606 2607 /*** debugfs support ***/ 2608 2609 #ifdef CONFIG_DEBUG_FS 2610 #include <linux/debugfs.h> 2611 2612 static struct dentry *rootdir; 2613 static int inited = 0; 2614 static DEFINE_MUTEX(clk_debug_lock); 2615 static HLIST_HEAD(clk_debug_list); 2616 2617 static struct hlist_head *all_lists[] = { 2618 &clk_root_list, 2619 &clk_orphan_list, 2620 NULL, 2621 }; 2622 2623 static struct hlist_head *orphan_list[] = { 2624 &clk_orphan_list, 2625 NULL, 2626 }; 2627 2628 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, 2629 int level) 2630 { 2631 if (!c) 2632 return; 2633 2634 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n", 2635 level * 3 + 1, "", 2636 30 - level * 3, c->name, 2637 c->enable_count, c->prepare_count, c->protect_count, 2638 clk_core_get_rate(c), clk_core_get_accuracy(c), 2639 clk_core_get_phase(c), 2640 clk_core_get_scaled_duty_cycle(c, 100000)); 2641 } 2642 2643 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, 2644 int level) 2645 { 2646 struct clk_core *child; 2647 2648 if (!c) 2649 return; 2650 2651 clk_summary_show_one(s, c, level); 2652 2653 hlist_for_each_entry(child, &c->children, child_node) 2654 clk_summary_show_subtree(s, child, level + 1); 2655 } 2656 2657 static int clk_summary_show(struct seq_file *s, void *data) 2658 { 2659 struct clk_core *c; 2660 struct hlist_head **lists = (struct hlist_head **)s->private; 2661 2662 seq_puts(s, " enable prepare protect duty\n"); 2663 seq_puts(s, " clock count count count rate accuracy phase cycle\n"); 2664 seq_puts(s, "---------------------------------------------------------------------------------------------\n"); 2665 2666 clk_prepare_lock(); 2667 2668 for (; *lists; lists++) 2669 hlist_for_each_entry(c, *lists, child_node) 2670 clk_summary_show_subtree(s, c, 0); 2671 2672 clk_prepare_unlock(); 2673 2674 return 0; 2675 } 2676 DEFINE_SHOW_ATTRIBUTE(clk_summary); 2677 2678 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) 2679 { 2680 if (!c) 2681 return; 2682 2683 /* This should be JSON format, i.e. elements separated with a comma */ 2684 seq_printf(s, "\"%s\": { ", c->name); 2685 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 2686 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 2687 seq_printf(s, "\"protect_count\": %d,", c->protect_count); 2688 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); 2689 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); 2690 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); 2691 seq_printf(s, "\"duty_cycle\": %u", 2692 clk_core_get_scaled_duty_cycle(c, 100000)); 2693 } 2694 2695 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) 2696 { 2697 struct clk_core *child; 2698 2699 if (!c) 2700 return; 2701 2702 clk_dump_one(s, c, level); 2703 2704 hlist_for_each_entry(child, &c->children, child_node) { 2705 seq_putc(s, ','); 2706 clk_dump_subtree(s, child, level + 1); 2707 } 2708 2709 seq_putc(s, '}'); 2710 } 2711 2712 static int clk_dump_show(struct seq_file *s, void *data) 2713 { 2714 struct clk_core *c; 2715 bool first_node = true; 2716 struct hlist_head **lists = (struct hlist_head **)s->private; 2717 2718 seq_putc(s, '{'); 2719 clk_prepare_lock(); 2720 2721 for (; *lists; lists++) { 2722 hlist_for_each_entry(c, *lists, child_node) { 2723 if (!first_node) 2724 seq_putc(s, ','); 2725 first_node = false; 2726 clk_dump_subtree(s, c, 0); 2727 } 2728 } 2729 2730 clk_prepare_unlock(); 2731 2732 seq_puts(s, "}\n"); 2733 return 0; 2734 } 2735 DEFINE_SHOW_ATTRIBUTE(clk_dump); 2736 2737 static const struct { 2738 unsigned long flag; 2739 const char *name; 2740 } clk_flags[] = { 2741 #define ENTRY(f) { f, #f } 2742 ENTRY(CLK_SET_RATE_GATE), 2743 ENTRY(CLK_SET_PARENT_GATE), 2744 ENTRY(CLK_SET_RATE_PARENT), 2745 ENTRY(CLK_IGNORE_UNUSED), 2746 ENTRY(CLK_IS_BASIC), 2747 ENTRY(CLK_GET_RATE_NOCACHE), 2748 ENTRY(CLK_SET_RATE_NO_REPARENT), 2749 ENTRY(CLK_GET_ACCURACY_NOCACHE), 2750 ENTRY(CLK_RECALC_NEW_RATES), 2751 ENTRY(CLK_SET_RATE_UNGATE), 2752 ENTRY(CLK_IS_CRITICAL), 2753 ENTRY(CLK_OPS_PARENT_ENABLE), 2754 ENTRY(CLK_DUTY_CYCLE_PARENT), 2755 #undef ENTRY 2756 }; 2757 2758 static int clk_flags_show(struct seq_file *s, void *data) 2759 { 2760 struct clk_core *core = s->private; 2761 unsigned long flags = core->flags; 2762 unsigned int i; 2763 2764 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { 2765 if (flags & clk_flags[i].flag) { 2766 seq_printf(s, "%s\n", clk_flags[i].name); 2767 flags &= ~clk_flags[i].flag; 2768 } 2769 } 2770 if (flags) { 2771 /* Unknown flags */ 2772 seq_printf(s, "0x%lx\n", flags); 2773 } 2774 2775 return 0; 2776 } 2777 DEFINE_SHOW_ATTRIBUTE(clk_flags); 2778 2779 static int possible_parents_show(struct seq_file *s, void *data) 2780 { 2781 struct clk_core *core = s->private; 2782 int i; 2783 2784 for (i = 0; i < core->num_parents - 1; i++) 2785 seq_printf(s, "%s ", core->parent_names[i]); 2786 2787 seq_printf(s, "%s\n", core->parent_names[i]); 2788 2789 return 0; 2790 } 2791 DEFINE_SHOW_ATTRIBUTE(possible_parents); 2792 2793 static int clk_duty_cycle_show(struct seq_file *s, void *data) 2794 { 2795 struct clk_core *core = s->private; 2796 struct clk_duty *duty = &core->duty; 2797 2798 seq_printf(s, "%u/%u\n", duty->num, duty->den); 2799 2800 return 0; 2801 } 2802 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); 2803 2804 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) 2805 { 2806 struct dentry *root; 2807 2808 if (!core || !pdentry) 2809 return; 2810 2811 root = debugfs_create_dir(core->name, pdentry); 2812 core->dentry = root; 2813 2814 debugfs_create_ulong("clk_rate", 0444, root, &core->rate); 2815 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); 2816 debugfs_create_u32("clk_phase", 0444, root, &core->phase); 2817 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); 2818 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); 2819 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); 2820 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); 2821 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); 2822 debugfs_create_file("clk_duty_cycle", 0444, root, core, 2823 &clk_duty_cycle_fops); 2824 2825 if (core->num_parents > 1) 2826 debugfs_create_file("clk_possible_parents", 0444, root, core, 2827 &possible_parents_fops); 2828 2829 if (core->ops->debug_init) 2830 core->ops->debug_init(core->hw, core->dentry); 2831 } 2832 2833 /** 2834 * clk_debug_register - add a clk node to the debugfs clk directory 2835 * @core: the clk being added to the debugfs clk directory 2836 * 2837 * Dynamically adds a clk to the debugfs clk directory if debugfs has been 2838 * initialized. Otherwise it bails out early since the debugfs clk directory 2839 * will be created lazily by clk_debug_init as part of a late_initcall. 2840 */ 2841 static void clk_debug_register(struct clk_core *core) 2842 { 2843 mutex_lock(&clk_debug_lock); 2844 hlist_add_head(&core->debug_node, &clk_debug_list); 2845 if (inited) 2846 clk_debug_create_one(core, rootdir); 2847 mutex_unlock(&clk_debug_lock); 2848 } 2849 2850 /** 2851 * clk_debug_unregister - remove a clk node from the debugfs clk directory 2852 * @core: the clk being removed from the debugfs clk directory 2853 * 2854 * Dynamically removes a clk and all its child nodes from the 2855 * debugfs clk directory if clk->dentry points to debugfs created by 2856 * clk_debug_register in __clk_core_init. 2857 */ 2858 static void clk_debug_unregister(struct clk_core *core) 2859 { 2860 mutex_lock(&clk_debug_lock); 2861 hlist_del_init(&core->debug_node); 2862 debugfs_remove_recursive(core->dentry); 2863 core->dentry = NULL; 2864 mutex_unlock(&clk_debug_lock); 2865 } 2866 2867 /** 2868 * clk_debug_init - lazily populate the debugfs clk directory 2869 * 2870 * clks are often initialized very early during boot before memory can be 2871 * dynamically allocated and well before debugfs is setup. This function 2872 * populates the debugfs clk directory once at boot-time when we know that 2873 * debugfs is setup. It should only be called once at boot-time, all other clks 2874 * added dynamically will be done so with clk_debug_register. 2875 */ 2876 static int __init clk_debug_init(void) 2877 { 2878 struct clk_core *core; 2879 2880 rootdir = debugfs_create_dir("clk", NULL); 2881 2882 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, 2883 &clk_summary_fops); 2884 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, 2885 &clk_dump_fops); 2886 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, 2887 &clk_summary_fops); 2888 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, 2889 &clk_dump_fops); 2890 2891 mutex_lock(&clk_debug_lock); 2892 hlist_for_each_entry(core, &clk_debug_list, debug_node) 2893 clk_debug_create_one(core, rootdir); 2894 2895 inited = 1; 2896 mutex_unlock(&clk_debug_lock); 2897 2898 return 0; 2899 } 2900 late_initcall(clk_debug_init); 2901 #else 2902 static inline void clk_debug_register(struct clk_core *core) { } 2903 static inline void clk_debug_reparent(struct clk_core *core, 2904 struct clk_core *new_parent) 2905 { 2906 } 2907 static inline void clk_debug_unregister(struct clk_core *core) 2908 { 2909 } 2910 #endif 2911 2912 /** 2913 * __clk_core_init - initialize the data structures in a struct clk_core 2914 * @core: clk_core being initialized 2915 * 2916 * Initializes the lists in struct clk_core, queries the hardware for the 2917 * parent and rate and sets them both. 2918 */ 2919 static int __clk_core_init(struct clk_core *core) 2920 { 2921 int i, ret; 2922 struct clk_core *orphan; 2923 struct hlist_node *tmp2; 2924 unsigned long rate; 2925 2926 if (!core) 2927 return -EINVAL; 2928 2929 clk_prepare_lock(); 2930 2931 ret = clk_pm_runtime_get(core); 2932 if (ret) 2933 goto unlock; 2934 2935 /* check to see if a clock with this name is already registered */ 2936 if (clk_core_lookup(core->name)) { 2937 pr_debug("%s: clk %s already initialized\n", 2938 __func__, core->name); 2939 ret = -EEXIST; 2940 goto out; 2941 } 2942 2943 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ 2944 if (core->ops->set_rate && 2945 !((core->ops->round_rate || core->ops->determine_rate) && 2946 core->ops->recalc_rate)) { 2947 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 2948 __func__, core->name); 2949 ret = -EINVAL; 2950 goto out; 2951 } 2952 2953 if (core->ops->set_parent && !core->ops->get_parent) { 2954 pr_err("%s: %s must implement .get_parent & .set_parent\n", 2955 __func__, core->name); 2956 ret = -EINVAL; 2957 goto out; 2958 } 2959 2960 if (core->num_parents > 1 && !core->ops->get_parent) { 2961 pr_err("%s: %s must implement .get_parent as it has multi parents\n", 2962 __func__, core->name); 2963 ret = -EINVAL; 2964 goto out; 2965 } 2966 2967 if (core->ops->set_rate_and_parent && 2968 !(core->ops->set_parent && core->ops->set_rate)) { 2969 pr_err("%s: %s must implement .set_parent & .set_rate\n", 2970 __func__, core->name); 2971 ret = -EINVAL; 2972 goto out; 2973 } 2974 2975 /* throw a WARN if any entries in parent_names are NULL */ 2976 for (i = 0; i < core->num_parents; i++) 2977 WARN(!core->parent_names[i], 2978 "%s: invalid NULL in %s's .parent_names\n", 2979 __func__, core->name); 2980 2981 core->parent = __clk_init_parent(core); 2982 2983 /* 2984 * Populate core->parent if parent has already been clk_core_init'd. If 2985 * parent has not yet been clk_core_init'd then place clk in the orphan 2986 * list. If clk doesn't have any parents then place it in the root 2987 * clk list. 2988 * 2989 * Every time a new clk is clk_init'd then we walk the list of orphan 2990 * clocks and re-parent any that are children of the clock currently 2991 * being clk_init'd. 2992 */ 2993 if (core->parent) { 2994 hlist_add_head(&core->child_node, 2995 &core->parent->children); 2996 core->orphan = core->parent->orphan; 2997 } else if (!core->num_parents) { 2998 hlist_add_head(&core->child_node, &clk_root_list); 2999 core->orphan = false; 3000 } else { 3001 hlist_add_head(&core->child_node, &clk_orphan_list); 3002 core->orphan = true; 3003 } 3004 3005 /* 3006 * optional platform-specific magic 3007 * 3008 * The .init callback is not used by any of the basic clock types, but 3009 * exists for weird hardware that must perform initialization magic. 3010 * Please consider other ways of solving initialization problems before 3011 * using this callback, as its use is discouraged. 3012 */ 3013 if (core->ops->init) 3014 core->ops->init(core->hw); 3015 3016 /* 3017 * Set clk's accuracy. The preferred method is to use 3018 * .recalc_accuracy. For simple clocks and lazy developers the default 3019 * fallback is to use the parent's accuracy. If a clock doesn't have a 3020 * parent (or is orphaned) then accuracy is set to zero (perfect 3021 * clock). 3022 */ 3023 if (core->ops->recalc_accuracy) 3024 core->accuracy = core->ops->recalc_accuracy(core->hw, 3025 __clk_get_accuracy(core->parent)); 3026 else if (core->parent) 3027 core->accuracy = core->parent->accuracy; 3028 else 3029 core->accuracy = 0; 3030 3031 /* 3032 * Set clk's phase. 3033 * Since a phase is by definition relative to its parent, just 3034 * query the current clock phase, or just assume it's in phase. 3035 */ 3036 if (core->ops->get_phase) 3037 core->phase = core->ops->get_phase(core->hw); 3038 else 3039 core->phase = 0; 3040 3041 /* 3042 * Set clk's duty cycle. 3043 */ 3044 clk_core_update_duty_cycle_nolock(core); 3045 3046 /* 3047 * Set clk's rate. The preferred method is to use .recalc_rate. For 3048 * simple clocks and lazy developers the default fallback is to use the 3049 * parent's rate. If a clock doesn't have a parent (or is orphaned) 3050 * then rate is set to zero. 3051 */ 3052 if (core->ops->recalc_rate) 3053 rate = core->ops->recalc_rate(core->hw, 3054 clk_core_get_rate_nolock(core->parent)); 3055 else if (core->parent) 3056 rate = core->parent->rate; 3057 else 3058 rate = 0; 3059 core->rate = core->req_rate = rate; 3060 3061 /* 3062 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks 3063 * don't get accidentally disabled when walking the orphan tree and 3064 * reparenting clocks 3065 */ 3066 if (core->flags & CLK_IS_CRITICAL) { 3067 unsigned long flags; 3068 3069 clk_core_prepare(core); 3070 3071 flags = clk_enable_lock(); 3072 clk_core_enable(core); 3073 clk_enable_unlock(flags); 3074 } 3075 3076 /* 3077 * walk the list of orphan clocks and reparent any that newly finds a 3078 * parent. 3079 */ 3080 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 3081 struct clk_core *parent = __clk_init_parent(orphan); 3082 3083 /* 3084 * We need to use __clk_set_parent_before() and _after() to 3085 * to properly migrate any prepare/enable count of the orphan 3086 * clock. This is important for CLK_IS_CRITICAL clocks, which 3087 * are enabled during init but might not have a parent yet. 3088 */ 3089 if (parent) { 3090 /* update the clk tree topology */ 3091 __clk_set_parent_before(orphan, parent); 3092 __clk_set_parent_after(orphan, parent, NULL); 3093 __clk_recalc_accuracies(orphan); 3094 __clk_recalc_rates(orphan, 0); 3095 } 3096 } 3097 3098 kref_init(&core->ref); 3099 out: 3100 clk_pm_runtime_put(core); 3101 unlock: 3102 clk_prepare_unlock(); 3103 3104 if (!ret) 3105 clk_debug_register(core); 3106 3107 return ret; 3108 } 3109 3110 struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, 3111 const char *con_id) 3112 { 3113 struct clk *clk; 3114 3115 /* This is to allow this function to be chained to others */ 3116 if (IS_ERR_OR_NULL(hw)) 3117 return ERR_CAST(hw); 3118 3119 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 3120 if (!clk) 3121 return ERR_PTR(-ENOMEM); 3122 3123 clk->core = hw->core; 3124 clk->dev_id = dev_id; 3125 clk->con_id = kstrdup_const(con_id, GFP_KERNEL); 3126 clk->max_rate = ULONG_MAX; 3127 3128 clk_prepare_lock(); 3129 hlist_add_head(&clk->clks_node, &hw->core->clks); 3130 clk_prepare_unlock(); 3131 3132 return clk; 3133 } 3134 3135 /* keep in sync with __clk_put */ 3136 void __clk_free_clk(struct clk *clk) 3137 { 3138 clk_prepare_lock(); 3139 hlist_del(&clk->clks_node); 3140 clk_prepare_unlock(); 3141 3142 kfree_const(clk->con_id); 3143 kfree(clk); 3144 } 3145 3146 /** 3147 * clk_register - allocate a new clock, register it and return an opaque cookie 3148 * @dev: device that is registering this clock 3149 * @hw: link to hardware-specific clock data 3150 * 3151 * clk_register is the primary interface for populating the clock tree with new 3152 * clock nodes. It returns a pointer to the newly allocated struct clk which 3153 * cannot be dereferenced by driver code but may be used in conjunction with the 3154 * rest of the clock API. In the event of an error clk_register will return an 3155 * error code; drivers must test for an error code after calling clk_register. 3156 */ 3157 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 3158 { 3159 int i, ret; 3160 struct clk_core *core; 3161 3162 core = kzalloc(sizeof(*core), GFP_KERNEL); 3163 if (!core) { 3164 ret = -ENOMEM; 3165 goto fail_out; 3166 } 3167 3168 core->name = kstrdup_const(hw->init->name, GFP_KERNEL); 3169 if (!core->name) { 3170 ret = -ENOMEM; 3171 goto fail_name; 3172 } 3173 3174 if (WARN_ON(!hw->init->ops)) { 3175 ret = -EINVAL; 3176 goto fail_ops; 3177 } 3178 core->ops = hw->init->ops; 3179 3180 if (dev && pm_runtime_enabled(dev)) 3181 core->dev = dev; 3182 if (dev && dev->driver) 3183 core->owner = dev->driver->owner; 3184 core->hw = hw; 3185 core->flags = hw->init->flags; 3186 core->num_parents = hw->init->num_parents; 3187 core->min_rate = 0; 3188 core->max_rate = ULONG_MAX; 3189 hw->core = core; 3190 3191 /* allocate local copy in case parent_names is __initdata */ 3192 core->parent_names = kcalloc(core->num_parents, sizeof(char *), 3193 GFP_KERNEL); 3194 3195 if (!core->parent_names) { 3196 ret = -ENOMEM; 3197 goto fail_parent_names; 3198 } 3199 3200 3201 /* copy each string name in case parent_names is __initdata */ 3202 for (i = 0; i < core->num_parents; i++) { 3203 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i], 3204 GFP_KERNEL); 3205 if (!core->parent_names[i]) { 3206 ret = -ENOMEM; 3207 goto fail_parent_names_copy; 3208 } 3209 } 3210 3211 /* avoid unnecessary string look-ups of clk_core's possible parents. */ 3212 core->parents = kcalloc(core->num_parents, sizeof(*core->parents), 3213 GFP_KERNEL); 3214 if (!core->parents) { 3215 ret = -ENOMEM; 3216 goto fail_parents; 3217 }; 3218 3219 INIT_HLIST_HEAD(&core->clks); 3220 3221 hw->clk = __clk_create_clk(hw, NULL, NULL); 3222 if (IS_ERR(hw->clk)) { 3223 ret = PTR_ERR(hw->clk); 3224 goto fail_parents; 3225 } 3226 3227 ret = __clk_core_init(core); 3228 if (!ret) 3229 return hw->clk; 3230 3231 __clk_free_clk(hw->clk); 3232 hw->clk = NULL; 3233 3234 fail_parents: 3235 kfree(core->parents); 3236 fail_parent_names_copy: 3237 while (--i >= 0) 3238 kfree_const(core->parent_names[i]); 3239 kfree(core->parent_names); 3240 fail_parent_names: 3241 fail_ops: 3242 kfree_const(core->name); 3243 fail_name: 3244 kfree(core); 3245 fail_out: 3246 return ERR_PTR(ret); 3247 } 3248 EXPORT_SYMBOL_GPL(clk_register); 3249 3250 /** 3251 * clk_hw_register - register a clk_hw and return an error code 3252 * @dev: device that is registering this clock 3253 * @hw: link to hardware-specific clock data 3254 * 3255 * clk_hw_register is the primary interface for populating the clock tree with 3256 * new clock nodes. It returns an integer equal to zero indicating success or 3257 * less than zero indicating failure. Drivers must test for an error code after 3258 * calling clk_hw_register(). 3259 */ 3260 int clk_hw_register(struct device *dev, struct clk_hw *hw) 3261 { 3262 return PTR_ERR_OR_ZERO(clk_register(dev, hw)); 3263 } 3264 EXPORT_SYMBOL_GPL(clk_hw_register); 3265 3266 /* Free memory allocated for a clock. */ 3267 static void __clk_release(struct kref *ref) 3268 { 3269 struct clk_core *core = container_of(ref, struct clk_core, ref); 3270 int i = core->num_parents; 3271 3272 lockdep_assert_held(&prepare_lock); 3273 3274 kfree(core->parents); 3275 while (--i >= 0) 3276 kfree_const(core->parent_names[i]); 3277 3278 kfree(core->parent_names); 3279 kfree_const(core->name); 3280 kfree(core); 3281 } 3282 3283 /* 3284 * Empty clk_ops for unregistered clocks. These are used temporarily 3285 * after clk_unregister() was called on a clock and until last clock 3286 * consumer calls clk_put() and the struct clk object is freed. 3287 */ 3288 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 3289 { 3290 return -ENXIO; 3291 } 3292 3293 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 3294 { 3295 WARN_ON_ONCE(1); 3296 } 3297 3298 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 3299 unsigned long parent_rate) 3300 { 3301 return -ENXIO; 3302 } 3303 3304 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 3305 { 3306 return -ENXIO; 3307 } 3308 3309 static const struct clk_ops clk_nodrv_ops = { 3310 .enable = clk_nodrv_prepare_enable, 3311 .disable = clk_nodrv_disable_unprepare, 3312 .prepare = clk_nodrv_prepare_enable, 3313 .unprepare = clk_nodrv_disable_unprepare, 3314 .set_rate = clk_nodrv_set_rate, 3315 .set_parent = clk_nodrv_set_parent, 3316 }; 3317 3318 /** 3319 * clk_unregister - unregister a currently registered clock 3320 * @clk: clock to unregister 3321 */ 3322 void clk_unregister(struct clk *clk) 3323 { 3324 unsigned long flags; 3325 3326 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 3327 return; 3328 3329 clk_debug_unregister(clk->core); 3330 3331 clk_prepare_lock(); 3332 3333 if (clk->core->ops == &clk_nodrv_ops) { 3334 pr_err("%s: unregistered clock: %s\n", __func__, 3335 clk->core->name); 3336 goto unlock; 3337 } 3338 /* 3339 * Assign empty clock ops for consumers that might still hold 3340 * a reference to this clock. 3341 */ 3342 flags = clk_enable_lock(); 3343 clk->core->ops = &clk_nodrv_ops; 3344 clk_enable_unlock(flags); 3345 3346 if (!hlist_empty(&clk->core->children)) { 3347 struct clk_core *child; 3348 struct hlist_node *t; 3349 3350 /* Reparent all children to the orphan list. */ 3351 hlist_for_each_entry_safe(child, t, &clk->core->children, 3352 child_node) 3353 clk_core_set_parent_nolock(child, NULL); 3354 } 3355 3356 hlist_del_init(&clk->core->child_node); 3357 3358 if (clk->core->prepare_count) 3359 pr_warn("%s: unregistering prepared clock: %s\n", 3360 __func__, clk->core->name); 3361 3362 if (clk->core->protect_count) 3363 pr_warn("%s: unregistering protected clock: %s\n", 3364 __func__, clk->core->name); 3365 3366 kref_put(&clk->core->ref, __clk_release); 3367 unlock: 3368 clk_prepare_unlock(); 3369 } 3370 EXPORT_SYMBOL_GPL(clk_unregister); 3371 3372 /** 3373 * clk_hw_unregister - unregister a currently registered clk_hw 3374 * @hw: hardware-specific clock data to unregister 3375 */ 3376 void clk_hw_unregister(struct clk_hw *hw) 3377 { 3378 clk_unregister(hw->clk); 3379 } 3380 EXPORT_SYMBOL_GPL(clk_hw_unregister); 3381 3382 static void devm_clk_release(struct device *dev, void *res) 3383 { 3384 clk_unregister(*(struct clk **)res); 3385 } 3386 3387 static void devm_clk_hw_release(struct device *dev, void *res) 3388 { 3389 clk_hw_unregister(*(struct clk_hw **)res); 3390 } 3391 3392 /** 3393 * devm_clk_register - resource managed clk_register() 3394 * @dev: device that is registering this clock 3395 * @hw: link to hardware-specific clock data 3396 * 3397 * Managed clk_register(). Clocks returned from this function are 3398 * automatically clk_unregister()ed on driver detach. See clk_register() for 3399 * more information. 3400 */ 3401 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 3402 { 3403 struct clk *clk; 3404 struct clk **clkp; 3405 3406 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 3407 if (!clkp) 3408 return ERR_PTR(-ENOMEM); 3409 3410 clk = clk_register(dev, hw); 3411 if (!IS_ERR(clk)) { 3412 *clkp = clk; 3413 devres_add(dev, clkp); 3414 } else { 3415 devres_free(clkp); 3416 } 3417 3418 return clk; 3419 } 3420 EXPORT_SYMBOL_GPL(devm_clk_register); 3421 3422 /** 3423 * devm_clk_hw_register - resource managed clk_hw_register() 3424 * @dev: device that is registering this clock 3425 * @hw: link to hardware-specific clock data 3426 * 3427 * Managed clk_hw_register(). Clocks registered by this function are 3428 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() 3429 * for more information. 3430 */ 3431 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) 3432 { 3433 struct clk_hw **hwp; 3434 int ret; 3435 3436 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL); 3437 if (!hwp) 3438 return -ENOMEM; 3439 3440 ret = clk_hw_register(dev, hw); 3441 if (!ret) { 3442 *hwp = hw; 3443 devres_add(dev, hwp); 3444 } else { 3445 devres_free(hwp); 3446 } 3447 3448 return ret; 3449 } 3450 EXPORT_SYMBOL_GPL(devm_clk_hw_register); 3451 3452 static int devm_clk_match(struct device *dev, void *res, void *data) 3453 { 3454 struct clk *c = res; 3455 if (WARN_ON(!c)) 3456 return 0; 3457 return c == data; 3458 } 3459 3460 static int devm_clk_hw_match(struct device *dev, void *res, void *data) 3461 { 3462 struct clk_hw *hw = res; 3463 3464 if (WARN_ON(!hw)) 3465 return 0; 3466 return hw == data; 3467 } 3468 3469 /** 3470 * devm_clk_unregister - resource managed clk_unregister() 3471 * @clk: clock to unregister 3472 * 3473 * Deallocate a clock allocated with devm_clk_register(). Normally 3474 * this function will not need to be called and the resource management 3475 * code will ensure that the resource is freed. 3476 */ 3477 void devm_clk_unregister(struct device *dev, struct clk *clk) 3478 { 3479 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); 3480 } 3481 EXPORT_SYMBOL_GPL(devm_clk_unregister); 3482 3483 /** 3484 * devm_clk_hw_unregister - resource managed clk_hw_unregister() 3485 * @dev: device that is unregistering the hardware-specific clock data 3486 * @hw: link to hardware-specific clock data 3487 * 3488 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally 3489 * this function will not need to be called and the resource management 3490 * code will ensure that the resource is freed. 3491 */ 3492 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw) 3493 { 3494 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match, 3495 hw)); 3496 } 3497 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister); 3498 3499 /* 3500 * clkdev helpers 3501 */ 3502 int __clk_get(struct clk *clk) 3503 { 3504 struct clk_core *core = !clk ? NULL : clk->core; 3505 3506 if (core) { 3507 if (!try_module_get(core->owner)) 3508 return 0; 3509 3510 kref_get(&core->ref); 3511 } 3512 return 1; 3513 } 3514 3515 /* keep in sync with __clk_free_clk */ 3516 void __clk_put(struct clk *clk) 3517 { 3518 struct module *owner; 3519 3520 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 3521 return; 3522 3523 clk_prepare_lock(); 3524 3525 /* 3526 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a 3527 * given user should be balanced with calls to clk_rate_exclusive_put() 3528 * and by that same consumer 3529 */ 3530 if (WARN_ON(clk->exclusive_count)) { 3531 /* We voiced our concern, let's sanitize the situation */ 3532 clk->core->protect_count -= (clk->exclusive_count - 1); 3533 clk_core_rate_unprotect(clk->core); 3534 clk->exclusive_count = 0; 3535 } 3536 3537 hlist_del(&clk->clks_node); 3538 if (clk->min_rate > clk->core->req_rate || 3539 clk->max_rate < clk->core->req_rate) 3540 clk_core_set_rate_nolock(clk->core, clk->core->req_rate); 3541 3542 owner = clk->core->owner; 3543 kref_put(&clk->core->ref, __clk_release); 3544 3545 clk_prepare_unlock(); 3546 3547 module_put(owner); 3548 3549 kfree_const(clk->con_id); 3550 kfree(clk); 3551 } 3552 3553 /*** clk rate change notifiers ***/ 3554 3555 /** 3556 * clk_notifier_register - add a clk rate change notifier 3557 * @clk: struct clk * to watch 3558 * @nb: struct notifier_block * with callback info 3559 * 3560 * Request notification when clk's rate changes. This uses an SRCU 3561 * notifier because we want it to block and notifier unregistrations are 3562 * uncommon. The callbacks associated with the notifier must not 3563 * re-enter into the clk framework by calling any top-level clk APIs; 3564 * this will cause a nested prepare_lock mutex. 3565 * 3566 * In all notification cases (pre, post and abort rate change) the original 3567 * clock rate is passed to the callback via struct clk_notifier_data.old_rate 3568 * and the new frequency is passed via struct clk_notifier_data.new_rate. 3569 * 3570 * clk_notifier_register() must be called from non-atomic context. 3571 * Returns -EINVAL if called with null arguments, -ENOMEM upon 3572 * allocation failure; otherwise, passes along the return value of 3573 * srcu_notifier_chain_register(). 3574 */ 3575 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 3576 { 3577 struct clk_notifier *cn; 3578 int ret = -ENOMEM; 3579 3580 if (!clk || !nb) 3581 return -EINVAL; 3582 3583 clk_prepare_lock(); 3584 3585 /* search the list of notifiers for this clk */ 3586 list_for_each_entry(cn, &clk_notifier_list, node) 3587 if (cn->clk == clk) 3588 break; 3589 3590 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 3591 if (cn->clk != clk) { 3592 cn = kzalloc(sizeof(*cn), GFP_KERNEL); 3593 if (!cn) 3594 goto out; 3595 3596 cn->clk = clk; 3597 srcu_init_notifier_head(&cn->notifier_head); 3598 3599 list_add(&cn->node, &clk_notifier_list); 3600 } 3601 3602 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 3603 3604 clk->core->notifier_count++; 3605 3606 out: 3607 clk_prepare_unlock(); 3608 3609 return ret; 3610 } 3611 EXPORT_SYMBOL_GPL(clk_notifier_register); 3612 3613 /** 3614 * clk_notifier_unregister - remove a clk rate change notifier 3615 * @clk: struct clk * 3616 * @nb: struct notifier_block * with callback info 3617 * 3618 * Request no further notification for changes to 'clk' and frees memory 3619 * allocated in clk_notifier_register. 3620 * 3621 * Returns -EINVAL if called with null arguments; otherwise, passes 3622 * along the return value of srcu_notifier_chain_unregister(). 3623 */ 3624 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 3625 { 3626 struct clk_notifier *cn = NULL; 3627 int ret = -EINVAL; 3628 3629 if (!clk || !nb) 3630 return -EINVAL; 3631 3632 clk_prepare_lock(); 3633 3634 list_for_each_entry(cn, &clk_notifier_list, node) 3635 if (cn->clk == clk) 3636 break; 3637 3638 if (cn->clk == clk) { 3639 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 3640 3641 clk->core->notifier_count--; 3642 3643 /* XXX the notifier code should handle this better */ 3644 if (!cn->notifier_head.head) { 3645 srcu_cleanup_notifier_head(&cn->notifier_head); 3646 list_del(&cn->node); 3647 kfree(cn); 3648 } 3649 3650 } else { 3651 ret = -ENOENT; 3652 } 3653 3654 clk_prepare_unlock(); 3655 3656 return ret; 3657 } 3658 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 3659 3660 #ifdef CONFIG_OF 3661 /** 3662 * struct of_clk_provider - Clock provider registration structure 3663 * @link: Entry in global list of clock providers 3664 * @node: Pointer to device tree node of clock provider 3665 * @get: Get clock callback. Returns NULL or a struct clk for the 3666 * given clock specifier 3667 * @data: context pointer to be passed into @get callback 3668 */ 3669 struct of_clk_provider { 3670 struct list_head link; 3671 3672 struct device_node *node; 3673 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 3674 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); 3675 void *data; 3676 }; 3677 3678 static const struct of_device_id __clk_of_table_sentinel 3679 __used __section(__clk_of_table_end); 3680 3681 static LIST_HEAD(of_clk_providers); 3682 static DEFINE_MUTEX(of_clk_mutex); 3683 3684 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 3685 void *data) 3686 { 3687 return data; 3688 } 3689 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 3690 3691 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) 3692 { 3693 return data; 3694 } 3695 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); 3696 3697 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 3698 { 3699 struct clk_onecell_data *clk_data = data; 3700 unsigned int idx = clkspec->args[0]; 3701 3702 if (idx >= clk_data->clk_num) { 3703 pr_err("%s: invalid clock index %u\n", __func__, idx); 3704 return ERR_PTR(-EINVAL); 3705 } 3706 3707 return clk_data->clks[idx]; 3708 } 3709 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 3710 3711 struct clk_hw * 3712 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) 3713 { 3714 struct clk_hw_onecell_data *hw_data = data; 3715 unsigned int idx = clkspec->args[0]; 3716 3717 if (idx >= hw_data->num) { 3718 pr_err("%s: invalid index %u\n", __func__, idx); 3719 return ERR_PTR(-EINVAL); 3720 } 3721 3722 return hw_data->hws[idx]; 3723 } 3724 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); 3725 3726 /** 3727 * of_clk_add_provider() - Register a clock provider for a node 3728 * @np: Device node pointer associated with clock provider 3729 * @clk_src_get: callback for decoding clock 3730 * @data: context pointer for @clk_src_get callback. 3731 */ 3732 int of_clk_add_provider(struct device_node *np, 3733 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 3734 void *data), 3735 void *data) 3736 { 3737 struct of_clk_provider *cp; 3738 int ret; 3739 3740 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 3741 if (!cp) 3742 return -ENOMEM; 3743 3744 cp->node = of_node_get(np); 3745 cp->data = data; 3746 cp->get = clk_src_get; 3747 3748 mutex_lock(&of_clk_mutex); 3749 list_add(&cp->link, &of_clk_providers); 3750 mutex_unlock(&of_clk_mutex); 3751 pr_debug("Added clock from %pOF\n", np); 3752 3753 ret = of_clk_set_defaults(np, true); 3754 if (ret < 0) 3755 of_clk_del_provider(np); 3756 3757 return ret; 3758 } 3759 EXPORT_SYMBOL_GPL(of_clk_add_provider); 3760 3761 /** 3762 * of_clk_add_hw_provider() - Register a clock provider for a node 3763 * @np: Device node pointer associated with clock provider 3764 * @get: callback for decoding clk_hw 3765 * @data: context pointer for @get callback. 3766 */ 3767 int of_clk_add_hw_provider(struct device_node *np, 3768 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 3769 void *data), 3770 void *data) 3771 { 3772 struct of_clk_provider *cp; 3773 int ret; 3774 3775 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 3776 if (!cp) 3777 return -ENOMEM; 3778 3779 cp->node = of_node_get(np); 3780 cp->data = data; 3781 cp->get_hw = get; 3782 3783 mutex_lock(&of_clk_mutex); 3784 list_add(&cp->link, &of_clk_providers); 3785 mutex_unlock(&of_clk_mutex); 3786 pr_debug("Added clk_hw provider from %pOF\n", np); 3787 3788 ret = of_clk_set_defaults(np, true); 3789 if (ret < 0) 3790 of_clk_del_provider(np); 3791 3792 return ret; 3793 } 3794 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); 3795 3796 static void devm_of_clk_release_provider(struct device *dev, void *res) 3797 { 3798 of_clk_del_provider(*(struct device_node **)res); 3799 } 3800 3801 int devm_of_clk_add_hw_provider(struct device *dev, 3802 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 3803 void *data), 3804 void *data) 3805 { 3806 struct device_node **ptr, *np; 3807 int ret; 3808 3809 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), 3810 GFP_KERNEL); 3811 if (!ptr) 3812 return -ENOMEM; 3813 3814 np = dev->of_node; 3815 ret = of_clk_add_hw_provider(np, get, data); 3816 if (!ret) { 3817 *ptr = np; 3818 devres_add(dev, ptr); 3819 } else { 3820 devres_free(ptr); 3821 } 3822 3823 return ret; 3824 } 3825 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); 3826 3827 /** 3828 * of_clk_del_provider() - Remove a previously registered clock provider 3829 * @np: Device node pointer associated with clock provider 3830 */ 3831 void of_clk_del_provider(struct device_node *np) 3832 { 3833 struct of_clk_provider *cp; 3834 3835 mutex_lock(&of_clk_mutex); 3836 list_for_each_entry(cp, &of_clk_providers, link) { 3837 if (cp->node == np) { 3838 list_del(&cp->link); 3839 of_node_put(cp->node); 3840 kfree(cp); 3841 break; 3842 } 3843 } 3844 mutex_unlock(&of_clk_mutex); 3845 } 3846 EXPORT_SYMBOL_GPL(of_clk_del_provider); 3847 3848 static int devm_clk_provider_match(struct device *dev, void *res, void *data) 3849 { 3850 struct device_node **np = res; 3851 3852 if (WARN_ON(!np || !*np)) 3853 return 0; 3854 3855 return *np == data; 3856 } 3857 3858 void devm_of_clk_del_provider(struct device *dev) 3859 { 3860 int ret; 3861 3862 ret = devres_release(dev, devm_of_clk_release_provider, 3863 devm_clk_provider_match, dev->of_node); 3864 3865 WARN_ON(ret); 3866 } 3867 EXPORT_SYMBOL(devm_of_clk_del_provider); 3868 3869 static struct clk_hw * 3870 __of_clk_get_hw_from_provider(struct of_clk_provider *provider, 3871 struct of_phandle_args *clkspec) 3872 { 3873 struct clk *clk; 3874 3875 if (provider->get_hw) 3876 return provider->get_hw(clkspec, provider->data); 3877 3878 clk = provider->get(clkspec, provider->data); 3879 if (IS_ERR(clk)) 3880 return ERR_CAST(clk); 3881 return __clk_get_hw(clk); 3882 } 3883 3884 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, 3885 const char *dev_id, const char *con_id) 3886 { 3887 struct of_clk_provider *provider; 3888 struct clk *clk = ERR_PTR(-EPROBE_DEFER); 3889 struct clk_hw *hw; 3890 3891 if (!clkspec) 3892 return ERR_PTR(-EINVAL); 3893 3894 /* Check if we have such a provider in our array */ 3895 mutex_lock(&of_clk_mutex); 3896 list_for_each_entry(provider, &of_clk_providers, link) { 3897 if (provider->node == clkspec->np) { 3898 hw = __of_clk_get_hw_from_provider(provider, clkspec); 3899 clk = __clk_create_clk(hw, dev_id, con_id); 3900 } 3901 3902 if (!IS_ERR(clk)) { 3903 if (!__clk_get(clk)) { 3904 __clk_free_clk(clk); 3905 clk = ERR_PTR(-ENOENT); 3906 } 3907 3908 break; 3909 } 3910 } 3911 mutex_unlock(&of_clk_mutex); 3912 3913 return clk; 3914 } 3915 3916 /** 3917 * of_clk_get_from_provider() - Lookup a clock from a clock provider 3918 * @clkspec: pointer to a clock specifier data structure 3919 * 3920 * This function looks up a struct clk from the registered list of clock 3921 * providers, an input is a clock specifier data structure as returned 3922 * from the of_parse_phandle_with_args() function call. 3923 */ 3924 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 3925 { 3926 return __of_clk_get_from_provider(clkspec, NULL, __func__); 3927 } 3928 EXPORT_SYMBOL_GPL(of_clk_get_from_provider); 3929 3930 /** 3931 * of_clk_get_parent_count() - Count the number of clocks a device node has 3932 * @np: device node to count 3933 * 3934 * Returns: The number of clocks that are possible parents of this node 3935 */ 3936 unsigned int of_clk_get_parent_count(struct device_node *np) 3937 { 3938 int count; 3939 3940 count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); 3941 if (count < 0) 3942 return 0; 3943 3944 return count; 3945 } 3946 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 3947 3948 const char *of_clk_get_parent_name(struct device_node *np, int index) 3949 { 3950 struct of_phandle_args clkspec; 3951 struct property *prop; 3952 const char *clk_name; 3953 const __be32 *vp; 3954 u32 pv; 3955 int rc; 3956 int count; 3957 struct clk *clk; 3958 3959 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 3960 &clkspec); 3961 if (rc) 3962 return NULL; 3963 3964 index = clkspec.args_count ? clkspec.args[0] : 0; 3965 count = 0; 3966 3967 /* if there is an indices property, use it to transfer the index 3968 * specified into an array offset for the clock-output-names property. 3969 */ 3970 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 3971 if (index == pv) { 3972 index = count; 3973 break; 3974 } 3975 count++; 3976 } 3977 /* We went off the end of 'clock-indices' without finding it */ 3978 if (prop && !vp) 3979 return NULL; 3980 3981 if (of_property_read_string_index(clkspec.np, "clock-output-names", 3982 index, 3983 &clk_name) < 0) { 3984 /* 3985 * Best effort to get the name if the clock has been 3986 * registered with the framework. If the clock isn't 3987 * registered, we return the node name as the name of 3988 * the clock as long as #clock-cells = 0. 3989 */ 3990 clk = of_clk_get_from_provider(&clkspec); 3991 if (IS_ERR(clk)) { 3992 if (clkspec.args_count == 0) 3993 clk_name = clkspec.np->name; 3994 else 3995 clk_name = NULL; 3996 } else { 3997 clk_name = __clk_get_name(clk); 3998 clk_put(clk); 3999 } 4000 } 4001 4002 4003 of_node_put(clkspec.np); 4004 return clk_name; 4005 } 4006 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 4007 4008 /** 4009 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return 4010 * number of parents 4011 * @np: Device node pointer associated with clock provider 4012 * @parents: pointer to char array that hold the parents' names 4013 * @size: size of the @parents array 4014 * 4015 * Return: number of parents for the clock node. 4016 */ 4017 int of_clk_parent_fill(struct device_node *np, const char **parents, 4018 unsigned int size) 4019 { 4020 unsigned int i = 0; 4021 4022 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) 4023 i++; 4024 4025 return i; 4026 } 4027 EXPORT_SYMBOL_GPL(of_clk_parent_fill); 4028 4029 struct clock_provider { 4030 void (*clk_init_cb)(struct device_node *); 4031 struct device_node *np; 4032 struct list_head node; 4033 }; 4034 4035 /* 4036 * This function looks for a parent clock. If there is one, then it 4037 * checks that the provider for this parent clock was initialized, in 4038 * this case the parent clock will be ready. 4039 */ 4040 static int parent_ready(struct device_node *np) 4041 { 4042 int i = 0; 4043 4044 while (true) { 4045 struct clk *clk = of_clk_get(np, i); 4046 4047 /* this parent is ready we can check the next one */ 4048 if (!IS_ERR(clk)) { 4049 clk_put(clk); 4050 i++; 4051 continue; 4052 } 4053 4054 /* at least one parent is not ready, we exit now */ 4055 if (PTR_ERR(clk) == -EPROBE_DEFER) 4056 return 0; 4057 4058 /* 4059 * Here we make assumption that the device tree is 4060 * written correctly. So an error means that there is 4061 * no more parent. As we didn't exit yet, then the 4062 * previous parent are ready. If there is no clock 4063 * parent, no need to wait for them, then we can 4064 * consider their absence as being ready 4065 */ 4066 return 1; 4067 } 4068 } 4069 4070 /** 4071 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree 4072 * @np: Device node pointer associated with clock provider 4073 * @index: clock index 4074 * @flags: pointer to top-level framework flags 4075 * 4076 * Detects if the clock-critical property exists and, if so, sets the 4077 * corresponding CLK_IS_CRITICAL flag. 4078 * 4079 * Do not use this function. It exists only for legacy Device Tree 4080 * bindings, such as the one-clock-per-node style that are outdated. 4081 * Those bindings typically put all clock data into .dts and the Linux 4082 * driver has no clock data, thus making it impossible to set this flag 4083 * correctly from the driver. Only those drivers may call 4084 * of_clk_detect_critical from their setup functions. 4085 * 4086 * Return: error code or zero on success 4087 */ 4088 int of_clk_detect_critical(struct device_node *np, 4089 int index, unsigned long *flags) 4090 { 4091 struct property *prop; 4092 const __be32 *cur; 4093 uint32_t idx; 4094 4095 if (!np || !flags) 4096 return -EINVAL; 4097 4098 of_property_for_each_u32(np, "clock-critical", prop, cur, idx) 4099 if (index == idx) 4100 *flags |= CLK_IS_CRITICAL; 4101 4102 return 0; 4103 } 4104 4105 /** 4106 * of_clk_init() - Scan and init clock providers from the DT 4107 * @matches: array of compatible values and init functions for providers. 4108 * 4109 * This function scans the device tree for matching clock providers 4110 * and calls their initialization functions. It also does it by trying 4111 * to follow the dependencies. 4112 */ 4113 void __init of_clk_init(const struct of_device_id *matches) 4114 { 4115 const struct of_device_id *match; 4116 struct device_node *np; 4117 struct clock_provider *clk_provider, *next; 4118 bool is_init_done; 4119 bool force = false; 4120 LIST_HEAD(clk_provider_list); 4121 4122 if (!matches) 4123 matches = &__clk_of_table; 4124 4125 /* First prepare the list of the clocks providers */ 4126 for_each_matching_node_and_match(np, matches, &match) { 4127 struct clock_provider *parent; 4128 4129 if (!of_device_is_available(np)) 4130 continue; 4131 4132 parent = kzalloc(sizeof(*parent), GFP_KERNEL); 4133 if (!parent) { 4134 list_for_each_entry_safe(clk_provider, next, 4135 &clk_provider_list, node) { 4136 list_del(&clk_provider->node); 4137 of_node_put(clk_provider->np); 4138 kfree(clk_provider); 4139 } 4140 of_node_put(np); 4141 return; 4142 } 4143 4144 parent->clk_init_cb = match->data; 4145 parent->np = of_node_get(np); 4146 list_add_tail(&parent->node, &clk_provider_list); 4147 } 4148 4149 while (!list_empty(&clk_provider_list)) { 4150 is_init_done = false; 4151 list_for_each_entry_safe(clk_provider, next, 4152 &clk_provider_list, node) { 4153 if (force || parent_ready(clk_provider->np)) { 4154 4155 /* Don't populate platform devices */ 4156 of_node_set_flag(clk_provider->np, 4157 OF_POPULATED); 4158 4159 clk_provider->clk_init_cb(clk_provider->np); 4160 of_clk_set_defaults(clk_provider->np, true); 4161 4162 list_del(&clk_provider->node); 4163 of_node_put(clk_provider->np); 4164 kfree(clk_provider); 4165 is_init_done = true; 4166 } 4167 } 4168 4169 /* 4170 * We didn't manage to initialize any of the 4171 * remaining providers during the last loop, so now we 4172 * initialize all the remaining ones unconditionally 4173 * in case the clock parent was not mandatory 4174 */ 4175 if (!is_init_done) 4176 force = true; 4177 } 4178 } 4179 #endif 4180