1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 5 * 6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/clk-provider.h> 11 #include <linux/clk/clk-conf.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/spinlock.h> 15 #include <linux/err.h> 16 #include <linux/list.h> 17 #include <linux/slab.h> 18 #include <linux/of.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/sched.h> 23 #include <linux/clkdev.h> 24 25 #include "clk.h" 26 27 static DEFINE_SPINLOCK(enable_lock); 28 static DEFINE_MUTEX(prepare_lock); 29 30 static struct task_struct *prepare_owner; 31 static struct task_struct *enable_owner; 32 33 static int prepare_refcnt; 34 static int enable_refcnt; 35 36 static HLIST_HEAD(clk_root_list); 37 static HLIST_HEAD(clk_orphan_list); 38 static LIST_HEAD(clk_notifier_list); 39 40 /*** private data structures ***/ 41 42 struct clk_core { 43 const char *name; 44 const struct clk_ops *ops; 45 struct clk_hw *hw; 46 struct module *owner; 47 struct device *dev; 48 struct clk_core *parent; 49 const char **parent_names; 50 struct clk_core **parents; 51 u8 num_parents; 52 u8 new_parent_index; 53 unsigned long rate; 54 unsigned long req_rate; 55 unsigned long new_rate; 56 struct clk_core *new_parent; 57 struct clk_core *new_child; 58 unsigned long flags; 59 bool orphan; 60 unsigned int enable_count; 61 unsigned int prepare_count; 62 unsigned int protect_count; 63 unsigned long min_rate; 64 unsigned long max_rate; 65 unsigned long accuracy; 66 int phase; 67 struct clk_duty duty; 68 struct hlist_head children; 69 struct hlist_node child_node; 70 struct hlist_head clks; 71 unsigned int notifier_count; 72 #ifdef CONFIG_DEBUG_FS 73 struct dentry *dentry; 74 struct hlist_node debug_node; 75 #endif 76 struct kref ref; 77 }; 78 79 #define CREATE_TRACE_POINTS 80 #include <trace/events/clk.h> 81 82 struct clk { 83 struct clk_core *core; 84 const char *dev_id; 85 const char *con_id; 86 unsigned long min_rate; 87 unsigned long max_rate; 88 unsigned int exclusive_count; 89 struct hlist_node clks_node; 90 }; 91 92 /*** runtime pm ***/ 93 static int clk_pm_runtime_get(struct clk_core *core) 94 { 95 int ret = 0; 96 97 if (!core->dev) 98 return 0; 99 100 ret = pm_runtime_get_sync(core->dev); 101 return ret < 0 ? ret : 0; 102 } 103 104 static void clk_pm_runtime_put(struct clk_core *core) 105 { 106 if (!core->dev) 107 return; 108 109 pm_runtime_put_sync(core->dev); 110 } 111 112 /*** locking ***/ 113 static void clk_prepare_lock(void) 114 { 115 if (!mutex_trylock(&prepare_lock)) { 116 if (prepare_owner == current) { 117 prepare_refcnt++; 118 return; 119 } 120 mutex_lock(&prepare_lock); 121 } 122 WARN_ON_ONCE(prepare_owner != NULL); 123 WARN_ON_ONCE(prepare_refcnt != 0); 124 prepare_owner = current; 125 prepare_refcnt = 1; 126 } 127 128 static void clk_prepare_unlock(void) 129 { 130 WARN_ON_ONCE(prepare_owner != current); 131 WARN_ON_ONCE(prepare_refcnt == 0); 132 133 if (--prepare_refcnt) 134 return; 135 prepare_owner = NULL; 136 mutex_unlock(&prepare_lock); 137 } 138 139 static unsigned long clk_enable_lock(void) 140 __acquires(enable_lock) 141 { 142 unsigned long flags; 143 144 /* 145 * On UP systems, spin_trylock_irqsave() always returns true, even if 146 * we already hold the lock. So, in that case, we rely only on 147 * reference counting. 148 */ 149 if (!IS_ENABLED(CONFIG_SMP) || 150 !spin_trylock_irqsave(&enable_lock, flags)) { 151 if (enable_owner == current) { 152 enable_refcnt++; 153 __acquire(enable_lock); 154 if (!IS_ENABLED(CONFIG_SMP)) 155 local_save_flags(flags); 156 return flags; 157 } 158 spin_lock_irqsave(&enable_lock, flags); 159 } 160 WARN_ON_ONCE(enable_owner != NULL); 161 WARN_ON_ONCE(enable_refcnt != 0); 162 enable_owner = current; 163 enable_refcnt = 1; 164 return flags; 165 } 166 167 static void clk_enable_unlock(unsigned long flags) 168 __releases(enable_lock) 169 { 170 WARN_ON_ONCE(enable_owner != current); 171 WARN_ON_ONCE(enable_refcnt == 0); 172 173 if (--enable_refcnt) { 174 __release(enable_lock); 175 return; 176 } 177 enable_owner = NULL; 178 spin_unlock_irqrestore(&enable_lock, flags); 179 } 180 181 static bool clk_core_rate_is_protected(struct clk_core *core) 182 { 183 return core->protect_count; 184 } 185 186 static bool clk_core_is_prepared(struct clk_core *core) 187 { 188 bool ret = false; 189 190 /* 191 * .is_prepared is optional for clocks that can prepare 192 * fall back to software usage counter if it is missing 193 */ 194 if (!core->ops->is_prepared) 195 return core->prepare_count; 196 197 if (!clk_pm_runtime_get(core)) { 198 ret = core->ops->is_prepared(core->hw); 199 clk_pm_runtime_put(core); 200 } 201 202 return ret; 203 } 204 205 static bool clk_core_is_enabled(struct clk_core *core) 206 { 207 bool ret = false; 208 209 /* 210 * .is_enabled is only mandatory for clocks that gate 211 * fall back to software usage counter if .is_enabled is missing 212 */ 213 if (!core->ops->is_enabled) 214 return core->enable_count; 215 216 /* 217 * Check if clock controller's device is runtime active before 218 * calling .is_enabled callback. If not, assume that clock is 219 * disabled, because we might be called from atomic context, from 220 * which pm_runtime_get() is not allowed. 221 * This function is called mainly from clk_disable_unused_subtree, 222 * which ensures proper runtime pm activation of controller before 223 * taking enable spinlock, but the below check is needed if one tries 224 * to call it from other places. 225 */ 226 if (core->dev) { 227 pm_runtime_get_noresume(core->dev); 228 if (!pm_runtime_active(core->dev)) { 229 ret = false; 230 goto done; 231 } 232 } 233 234 ret = core->ops->is_enabled(core->hw); 235 done: 236 if (core->dev) 237 pm_runtime_put(core->dev); 238 239 return ret; 240 } 241 242 /*** helper functions ***/ 243 244 const char *__clk_get_name(const struct clk *clk) 245 { 246 return !clk ? NULL : clk->core->name; 247 } 248 EXPORT_SYMBOL_GPL(__clk_get_name); 249 250 const char *clk_hw_get_name(const struct clk_hw *hw) 251 { 252 return hw->core->name; 253 } 254 EXPORT_SYMBOL_GPL(clk_hw_get_name); 255 256 struct clk_hw *__clk_get_hw(struct clk *clk) 257 { 258 return !clk ? NULL : clk->core->hw; 259 } 260 EXPORT_SYMBOL_GPL(__clk_get_hw); 261 262 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) 263 { 264 return hw->core->num_parents; 265 } 266 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); 267 268 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) 269 { 270 return hw->core->parent ? hw->core->parent->hw : NULL; 271 } 272 EXPORT_SYMBOL_GPL(clk_hw_get_parent); 273 274 static struct clk_core *__clk_lookup_subtree(const char *name, 275 struct clk_core *core) 276 { 277 struct clk_core *child; 278 struct clk_core *ret; 279 280 if (!strcmp(core->name, name)) 281 return core; 282 283 hlist_for_each_entry(child, &core->children, child_node) { 284 ret = __clk_lookup_subtree(name, child); 285 if (ret) 286 return ret; 287 } 288 289 return NULL; 290 } 291 292 static struct clk_core *clk_core_lookup(const char *name) 293 { 294 struct clk_core *root_clk; 295 struct clk_core *ret; 296 297 if (!name) 298 return NULL; 299 300 /* search the 'proper' clk tree first */ 301 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 302 ret = __clk_lookup_subtree(name, root_clk); 303 if (ret) 304 return ret; 305 } 306 307 /* if not found, then search the orphan tree */ 308 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 309 ret = __clk_lookup_subtree(name, root_clk); 310 if (ret) 311 return ret; 312 } 313 314 return NULL; 315 } 316 317 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, 318 u8 index) 319 { 320 if (!core || index >= core->num_parents) 321 return NULL; 322 323 if (!core->parents[index]) 324 core->parents[index] = 325 clk_core_lookup(core->parent_names[index]); 326 327 return core->parents[index]; 328 } 329 330 struct clk_hw * 331 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) 332 { 333 struct clk_core *parent; 334 335 parent = clk_core_get_parent_by_index(hw->core, index); 336 337 return !parent ? NULL : parent->hw; 338 } 339 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); 340 341 unsigned int __clk_get_enable_count(struct clk *clk) 342 { 343 return !clk ? 0 : clk->core->enable_count; 344 } 345 346 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) 347 { 348 unsigned long ret; 349 350 if (!core) { 351 ret = 0; 352 goto out; 353 } 354 355 ret = core->rate; 356 357 if (!core->num_parents) 358 goto out; 359 360 if (!core->parent) 361 ret = 0; 362 363 out: 364 return ret; 365 } 366 367 unsigned long clk_hw_get_rate(const struct clk_hw *hw) 368 { 369 return clk_core_get_rate_nolock(hw->core); 370 } 371 EXPORT_SYMBOL_GPL(clk_hw_get_rate); 372 373 static unsigned long __clk_get_accuracy(struct clk_core *core) 374 { 375 if (!core) 376 return 0; 377 378 return core->accuracy; 379 } 380 381 unsigned long __clk_get_flags(struct clk *clk) 382 { 383 return !clk ? 0 : clk->core->flags; 384 } 385 EXPORT_SYMBOL_GPL(__clk_get_flags); 386 387 unsigned long clk_hw_get_flags(const struct clk_hw *hw) 388 { 389 return hw->core->flags; 390 } 391 EXPORT_SYMBOL_GPL(clk_hw_get_flags); 392 393 bool clk_hw_is_prepared(const struct clk_hw *hw) 394 { 395 return clk_core_is_prepared(hw->core); 396 } 397 398 bool clk_hw_rate_is_protected(const struct clk_hw *hw) 399 { 400 return clk_core_rate_is_protected(hw->core); 401 } 402 403 bool clk_hw_is_enabled(const struct clk_hw *hw) 404 { 405 return clk_core_is_enabled(hw->core); 406 } 407 408 bool __clk_is_enabled(struct clk *clk) 409 { 410 if (!clk) 411 return false; 412 413 return clk_core_is_enabled(clk->core); 414 } 415 EXPORT_SYMBOL_GPL(__clk_is_enabled); 416 417 static bool mux_is_better_rate(unsigned long rate, unsigned long now, 418 unsigned long best, unsigned long flags) 419 { 420 if (flags & CLK_MUX_ROUND_CLOSEST) 421 return abs(now - rate) < abs(best - rate); 422 423 return now <= rate && now > best; 424 } 425 426 int clk_mux_determine_rate_flags(struct clk_hw *hw, 427 struct clk_rate_request *req, 428 unsigned long flags) 429 { 430 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 431 int i, num_parents, ret; 432 unsigned long best = 0; 433 struct clk_rate_request parent_req = *req; 434 435 /* if NO_REPARENT flag set, pass through to current parent */ 436 if (core->flags & CLK_SET_RATE_NO_REPARENT) { 437 parent = core->parent; 438 if (core->flags & CLK_SET_RATE_PARENT) { 439 ret = __clk_determine_rate(parent ? parent->hw : NULL, 440 &parent_req); 441 if (ret) 442 return ret; 443 444 best = parent_req.rate; 445 } else if (parent) { 446 best = clk_core_get_rate_nolock(parent); 447 } else { 448 best = clk_core_get_rate_nolock(core); 449 } 450 451 goto out; 452 } 453 454 /* find the parent that can provide the fastest rate <= rate */ 455 num_parents = core->num_parents; 456 for (i = 0; i < num_parents; i++) { 457 parent = clk_core_get_parent_by_index(core, i); 458 if (!parent) 459 continue; 460 461 if (core->flags & CLK_SET_RATE_PARENT) { 462 parent_req = *req; 463 ret = __clk_determine_rate(parent->hw, &parent_req); 464 if (ret) 465 continue; 466 } else { 467 parent_req.rate = clk_core_get_rate_nolock(parent); 468 } 469 470 if (mux_is_better_rate(req->rate, parent_req.rate, 471 best, flags)) { 472 best_parent = parent; 473 best = parent_req.rate; 474 } 475 } 476 477 if (!best_parent) 478 return -EINVAL; 479 480 out: 481 if (best_parent) 482 req->best_parent_hw = best_parent->hw; 483 req->best_parent_rate = best; 484 req->rate = best; 485 486 return 0; 487 } 488 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); 489 490 struct clk *__clk_lookup(const char *name) 491 { 492 struct clk_core *core = clk_core_lookup(name); 493 494 return !core ? NULL : core->hw->clk; 495 } 496 497 static void clk_core_get_boundaries(struct clk_core *core, 498 unsigned long *min_rate, 499 unsigned long *max_rate) 500 { 501 struct clk *clk_user; 502 503 *min_rate = core->min_rate; 504 *max_rate = core->max_rate; 505 506 hlist_for_each_entry(clk_user, &core->clks, clks_node) 507 *min_rate = max(*min_rate, clk_user->min_rate); 508 509 hlist_for_each_entry(clk_user, &core->clks, clks_node) 510 *max_rate = min(*max_rate, clk_user->max_rate); 511 } 512 513 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, 514 unsigned long max_rate) 515 { 516 hw->core->min_rate = min_rate; 517 hw->core->max_rate = max_rate; 518 } 519 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); 520 521 /* 522 * Helper for finding best parent to provide a given frequency. This can be used 523 * directly as a determine_rate callback (e.g. for a mux), or from a more 524 * complex clock that may combine a mux with other operations. 525 */ 526 int __clk_mux_determine_rate(struct clk_hw *hw, 527 struct clk_rate_request *req) 528 { 529 return clk_mux_determine_rate_flags(hw, req, 0); 530 } 531 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 532 533 int __clk_mux_determine_rate_closest(struct clk_hw *hw, 534 struct clk_rate_request *req) 535 { 536 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); 537 } 538 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 539 540 /*** clk api ***/ 541 542 static void clk_core_rate_unprotect(struct clk_core *core) 543 { 544 lockdep_assert_held(&prepare_lock); 545 546 if (!core) 547 return; 548 549 if (WARN(core->protect_count == 0, 550 "%s already unprotected\n", core->name)) 551 return; 552 553 if (--core->protect_count > 0) 554 return; 555 556 clk_core_rate_unprotect(core->parent); 557 } 558 559 static int clk_core_rate_nuke_protect(struct clk_core *core) 560 { 561 int ret; 562 563 lockdep_assert_held(&prepare_lock); 564 565 if (!core) 566 return -EINVAL; 567 568 if (core->protect_count == 0) 569 return 0; 570 571 ret = core->protect_count; 572 core->protect_count = 1; 573 clk_core_rate_unprotect(core); 574 575 return ret; 576 } 577 578 /** 579 * clk_rate_exclusive_put - release exclusivity over clock rate control 580 * @clk: the clk over which the exclusivity is released 581 * 582 * clk_rate_exclusive_put() completes a critical section during which a clock 583 * consumer cannot tolerate any other consumer making any operation on the 584 * clock which could result in a rate change or rate glitch. Exclusive clocks 585 * cannot have their rate changed, either directly or indirectly due to changes 586 * further up the parent chain of clocks. As a result, clocks up parent chain 587 * also get under exclusive control of the calling consumer. 588 * 589 * If exlusivity is claimed more than once on clock, even by the same consumer, 590 * the rate effectively gets locked as exclusivity can't be preempted. 591 * 592 * Calls to clk_rate_exclusive_put() must be balanced with calls to 593 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return 594 * error status. 595 */ 596 void clk_rate_exclusive_put(struct clk *clk) 597 { 598 if (!clk) 599 return; 600 601 clk_prepare_lock(); 602 603 /* 604 * if there is something wrong with this consumer protect count, stop 605 * here before messing with the provider 606 */ 607 if (WARN_ON(clk->exclusive_count <= 0)) 608 goto out; 609 610 clk_core_rate_unprotect(clk->core); 611 clk->exclusive_count--; 612 out: 613 clk_prepare_unlock(); 614 } 615 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); 616 617 static void clk_core_rate_protect(struct clk_core *core) 618 { 619 lockdep_assert_held(&prepare_lock); 620 621 if (!core) 622 return; 623 624 if (core->protect_count == 0) 625 clk_core_rate_protect(core->parent); 626 627 core->protect_count++; 628 } 629 630 static void clk_core_rate_restore_protect(struct clk_core *core, int count) 631 { 632 lockdep_assert_held(&prepare_lock); 633 634 if (!core) 635 return; 636 637 if (count == 0) 638 return; 639 640 clk_core_rate_protect(core); 641 core->protect_count = count; 642 } 643 644 /** 645 * clk_rate_exclusive_get - get exclusivity over the clk rate control 646 * @clk: the clk over which the exclusity of rate control is requested 647 * 648 * clk_rate_exlusive_get() begins a critical section during which a clock 649 * consumer cannot tolerate any other consumer making any operation on the 650 * clock which could result in a rate change or rate glitch. Exclusive clocks 651 * cannot have their rate changed, either directly or indirectly due to changes 652 * further up the parent chain of clocks. As a result, clocks up parent chain 653 * also get under exclusive control of the calling consumer. 654 * 655 * If exlusivity is claimed more than once on clock, even by the same consumer, 656 * the rate effectively gets locked as exclusivity can't be preempted. 657 * 658 * Calls to clk_rate_exclusive_get() should be balanced with calls to 659 * clk_rate_exclusive_put(). Calls to this function may sleep. 660 * Returns 0 on success, -EERROR otherwise 661 */ 662 int clk_rate_exclusive_get(struct clk *clk) 663 { 664 if (!clk) 665 return 0; 666 667 clk_prepare_lock(); 668 clk_core_rate_protect(clk->core); 669 clk->exclusive_count++; 670 clk_prepare_unlock(); 671 672 return 0; 673 } 674 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); 675 676 static void clk_core_unprepare(struct clk_core *core) 677 { 678 lockdep_assert_held(&prepare_lock); 679 680 if (!core) 681 return; 682 683 if (WARN(core->prepare_count == 0, 684 "%s already unprepared\n", core->name)) 685 return; 686 687 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, 688 "Unpreparing critical %s\n", core->name)) 689 return; 690 691 if (core->flags & CLK_SET_RATE_GATE) 692 clk_core_rate_unprotect(core); 693 694 if (--core->prepare_count > 0) 695 return; 696 697 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); 698 699 trace_clk_unprepare(core); 700 701 if (core->ops->unprepare) 702 core->ops->unprepare(core->hw); 703 704 clk_pm_runtime_put(core); 705 706 trace_clk_unprepare_complete(core); 707 clk_core_unprepare(core->parent); 708 } 709 710 static void clk_core_unprepare_lock(struct clk_core *core) 711 { 712 clk_prepare_lock(); 713 clk_core_unprepare(core); 714 clk_prepare_unlock(); 715 } 716 717 /** 718 * clk_unprepare - undo preparation of a clock source 719 * @clk: the clk being unprepared 720 * 721 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 722 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 723 * if the operation may sleep. One example is a clk which is accessed over 724 * I2c. In the complex case a clk gate operation may require a fast and a slow 725 * part. It is this reason that clk_unprepare and clk_disable are not mutually 726 * exclusive. In fact clk_disable must be called before clk_unprepare. 727 */ 728 void clk_unprepare(struct clk *clk) 729 { 730 if (IS_ERR_OR_NULL(clk)) 731 return; 732 733 clk_core_unprepare_lock(clk->core); 734 } 735 EXPORT_SYMBOL_GPL(clk_unprepare); 736 737 static int clk_core_prepare(struct clk_core *core) 738 { 739 int ret = 0; 740 741 lockdep_assert_held(&prepare_lock); 742 743 if (!core) 744 return 0; 745 746 if (core->prepare_count == 0) { 747 ret = clk_pm_runtime_get(core); 748 if (ret) 749 return ret; 750 751 ret = clk_core_prepare(core->parent); 752 if (ret) 753 goto runtime_put; 754 755 trace_clk_prepare(core); 756 757 if (core->ops->prepare) 758 ret = core->ops->prepare(core->hw); 759 760 trace_clk_prepare_complete(core); 761 762 if (ret) 763 goto unprepare; 764 } 765 766 core->prepare_count++; 767 768 /* 769 * CLK_SET_RATE_GATE is a special case of clock protection 770 * Instead of a consumer claiming exclusive rate control, it is 771 * actually the provider which prevents any consumer from making any 772 * operation which could result in a rate change or rate glitch while 773 * the clock is prepared. 774 */ 775 if (core->flags & CLK_SET_RATE_GATE) 776 clk_core_rate_protect(core); 777 778 return 0; 779 unprepare: 780 clk_core_unprepare(core->parent); 781 runtime_put: 782 clk_pm_runtime_put(core); 783 return ret; 784 } 785 786 static int clk_core_prepare_lock(struct clk_core *core) 787 { 788 int ret; 789 790 clk_prepare_lock(); 791 ret = clk_core_prepare(core); 792 clk_prepare_unlock(); 793 794 return ret; 795 } 796 797 /** 798 * clk_prepare - prepare a clock source 799 * @clk: the clk being prepared 800 * 801 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 802 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 803 * operation may sleep. One example is a clk which is accessed over I2c. In 804 * the complex case a clk ungate operation may require a fast and a slow part. 805 * It is this reason that clk_prepare and clk_enable are not mutually 806 * exclusive. In fact clk_prepare must be called before clk_enable. 807 * Returns 0 on success, -EERROR otherwise. 808 */ 809 int clk_prepare(struct clk *clk) 810 { 811 if (!clk) 812 return 0; 813 814 return clk_core_prepare_lock(clk->core); 815 } 816 EXPORT_SYMBOL_GPL(clk_prepare); 817 818 static void clk_core_disable(struct clk_core *core) 819 { 820 lockdep_assert_held(&enable_lock); 821 822 if (!core) 823 return; 824 825 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) 826 return; 827 828 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, 829 "Disabling critical %s\n", core->name)) 830 return; 831 832 if (--core->enable_count > 0) 833 return; 834 835 trace_clk_disable_rcuidle(core); 836 837 if (core->ops->disable) 838 core->ops->disable(core->hw); 839 840 trace_clk_disable_complete_rcuidle(core); 841 842 clk_core_disable(core->parent); 843 } 844 845 static void clk_core_disable_lock(struct clk_core *core) 846 { 847 unsigned long flags; 848 849 flags = clk_enable_lock(); 850 clk_core_disable(core); 851 clk_enable_unlock(flags); 852 } 853 854 /** 855 * clk_disable - gate a clock 856 * @clk: the clk being gated 857 * 858 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 859 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 860 * clk if the operation is fast and will never sleep. One example is a 861 * SoC-internal clk which is controlled via simple register writes. In the 862 * complex case a clk gate operation may require a fast and a slow part. It is 863 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 864 * In fact clk_disable must be called before clk_unprepare. 865 */ 866 void clk_disable(struct clk *clk) 867 { 868 if (IS_ERR_OR_NULL(clk)) 869 return; 870 871 clk_core_disable_lock(clk->core); 872 } 873 EXPORT_SYMBOL_GPL(clk_disable); 874 875 static int clk_core_enable(struct clk_core *core) 876 { 877 int ret = 0; 878 879 lockdep_assert_held(&enable_lock); 880 881 if (!core) 882 return 0; 883 884 if (WARN(core->prepare_count == 0, 885 "Enabling unprepared %s\n", core->name)) 886 return -ESHUTDOWN; 887 888 if (core->enable_count == 0) { 889 ret = clk_core_enable(core->parent); 890 891 if (ret) 892 return ret; 893 894 trace_clk_enable_rcuidle(core); 895 896 if (core->ops->enable) 897 ret = core->ops->enable(core->hw); 898 899 trace_clk_enable_complete_rcuidle(core); 900 901 if (ret) { 902 clk_core_disable(core->parent); 903 return ret; 904 } 905 } 906 907 core->enable_count++; 908 return 0; 909 } 910 911 static int clk_core_enable_lock(struct clk_core *core) 912 { 913 unsigned long flags; 914 int ret; 915 916 flags = clk_enable_lock(); 917 ret = clk_core_enable(core); 918 clk_enable_unlock(flags); 919 920 return ret; 921 } 922 923 /** 924 * clk_gate_restore_context - restore context for poweroff 925 * @hw: the clk_hw pointer of clock whose state is to be restored 926 * 927 * The clock gate restore context function enables or disables 928 * the gate clocks based on the enable_count. This is done in cases 929 * where the clock context is lost and based on the enable_count 930 * the clock either needs to be enabled/disabled. This 931 * helps restore the state of gate clocks. 932 */ 933 void clk_gate_restore_context(struct clk_hw *hw) 934 { 935 struct clk_core *core = hw->core; 936 937 if (core->enable_count) 938 core->ops->enable(hw); 939 else 940 core->ops->disable(hw); 941 } 942 EXPORT_SYMBOL_GPL(clk_gate_restore_context); 943 944 static int clk_core_save_context(struct clk_core *core) 945 { 946 struct clk_core *child; 947 int ret = 0; 948 949 hlist_for_each_entry(child, &core->children, child_node) { 950 ret = clk_core_save_context(child); 951 if (ret < 0) 952 return ret; 953 } 954 955 if (core->ops && core->ops->save_context) 956 ret = core->ops->save_context(core->hw); 957 958 return ret; 959 } 960 961 static void clk_core_restore_context(struct clk_core *core) 962 { 963 struct clk_core *child; 964 965 if (core->ops && core->ops->restore_context) 966 core->ops->restore_context(core->hw); 967 968 hlist_for_each_entry(child, &core->children, child_node) 969 clk_core_restore_context(child); 970 } 971 972 /** 973 * clk_save_context - save clock context for poweroff 974 * 975 * Saves the context of the clock register for powerstates in which the 976 * contents of the registers will be lost. Occurs deep within the suspend 977 * code. Returns 0 on success. 978 */ 979 int clk_save_context(void) 980 { 981 struct clk_core *clk; 982 int ret; 983 984 hlist_for_each_entry(clk, &clk_root_list, child_node) { 985 ret = clk_core_save_context(clk); 986 if (ret < 0) 987 return ret; 988 } 989 990 hlist_for_each_entry(clk, &clk_orphan_list, child_node) { 991 ret = clk_core_save_context(clk); 992 if (ret < 0) 993 return ret; 994 } 995 996 return 0; 997 } 998 EXPORT_SYMBOL_GPL(clk_save_context); 999 1000 /** 1001 * clk_restore_context - restore clock context after poweroff 1002 * 1003 * Restore the saved clock context upon resume. 1004 * 1005 */ 1006 void clk_restore_context(void) 1007 { 1008 struct clk_core *core; 1009 1010 hlist_for_each_entry(core, &clk_root_list, child_node) 1011 clk_core_restore_context(core); 1012 1013 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1014 clk_core_restore_context(core); 1015 } 1016 EXPORT_SYMBOL_GPL(clk_restore_context); 1017 1018 /** 1019 * clk_enable - ungate a clock 1020 * @clk: the clk being ungated 1021 * 1022 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 1023 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 1024 * if the operation will never sleep. One example is a SoC-internal clk which 1025 * is controlled via simple register writes. In the complex case a clk ungate 1026 * operation may require a fast and a slow part. It is this reason that 1027 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 1028 * must be called before clk_enable. Returns 0 on success, -EERROR 1029 * otherwise. 1030 */ 1031 int clk_enable(struct clk *clk) 1032 { 1033 if (!clk) 1034 return 0; 1035 1036 return clk_core_enable_lock(clk->core); 1037 } 1038 EXPORT_SYMBOL_GPL(clk_enable); 1039 1040 static int clk_core_prepare_enable(struct clk_core *core) 1041 { 1042 int ret; 1043 1044 ret = clk_core_prepare_lock(core); 1045 if (ret) 1046 return ret; 1047 1048 ret = clk_core_enable_lock(core); 1049 if (ret) 1050 clk_core_unprepare_lock(core); 1051 1052 return ret; 1053 } 1054 1055 static void clk_core_disable_unprepare(struct clk_core *core) 1056 { 1057 clk_core_disable_lock(core); 1058 clk_core_unprepare_lock(core); 1059 } 1060 1061 static void clk_unprepare_unused_subtree(struct clk_core *core) 1062 { 1063 struct clk_core *child; 1064 1065 lockdep_assert_held(&prepare_lock); 1066 1067 hlist_for_each_entry(child, &core->children, child_node) 1068 clk_unprepare_unused_subtree(child); 1069 1070 if (core->prepare_count) 1071 return; 1072 1073 if (core->flags & CLK_IGNORE_UNUSED) 1074 return; 1075 1076 if (clk_pm_runtime_get(core)) 1077 return; 1078 1079 if (clk_core_is_prepared(core)) { 1080 trace_clk_unprepare(core); 1081 if (core->ops->unprepare_unused) 1082 core->ops->unprepare_unused(core->hw); 1083 else if (core->ops->unprepare) 1084 core->ops->unprepare(core->hw); 1085 trace_clk_unprepare_complete(core); 1086 } 1087 1088 clk_pm_runtime_put(core); 1089 } 1090 1091 static void clk_disable_unused_subtree(struct clk_core *core) 1092 { 1093 struct clk_core *child; 1094 unsigned long flags; 1095 1096 lockdep_assert_held(&prepare_lock); 1097 1098 hlist_for_each_entry(child, &core->children, child_node) 1099 clk_disable_unused_subtree(child); 1100 1101 if (core->flags & CLK_OPS_PARENT_ENABLE) 1102 clk_core_prepare_enable(core->parent); 1103 1104 if (clk_pm_runtime_get(core)) 1105 goto unprepare_out; 1106 1107 flags = clk_enable_lock(); 1108 1109 if (core->enable_count) 1110 goto unlock_out; 1111 1112 if (core->flags & CLK_IGNORE_UNUSED) 1113 goto unlock_out; 1114 1115 /* 1116 * some gate clocks have special needs during the disable-unused 1117 * sequence. call .disable_unused if available, otherwise fall 1118 * back to .disable 1119 */ 1120 if (clk_core_is_enabled(core)) { 1121 trace_clk_disable(core); 1122 if (core->ops->disable_unused) 1123 core->ops->disable_unused(core->hw); 1124 else if (core->ops->disable) 1125 core->ops->disable(core->hw); 1126 trace_clk_disable_complete(core); 1127 } 1128 1129 unlock_out: 1130 clk_enable_unlock(flags); 1131 clk_pm_runtime_put(core); 1132 unprepare_out: 1133 if (core->flags & CLK_OPS_PARENT_ENABLE) 1134 clk_core_disable_unprepare(core->parent); 1135 } 1136 1137 static bool clk_ignore_unused; 1138 static int __init clk_ignore_unused_setup(char *__unused) 1139 { 1140 clk_ignore_unused = true; 1141 return 1; 1142 } 1143 __setup("clk_ignore_unused", clk_ignore_unused_setup); 1144 1145 static int clk_disable_unused(void) 1146 { 1147 struct clk_core *core; 1148 1149 if (clk_ignore_unused) { 1150 pr_warn("clk: Not disabling unused clocks\n"); 1151 return 0; 1152 } 1153 1154 clk_prepare_lock(); 1155 1156 hlist_for_each_entry(core, &clk_root_list, child_node) 1157 clk_disable_unused_subtree(core); 1158 1159 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1160 clk_disable_unused_subtree(core); 1161 1162 hlist_for_each_entry(core, &clk_root_list, child_node) 1163 clk_unprepare_unused_subtree(core); 1164 1165 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1166 clk_unprepare_unused_subtree(core); 1167 1168 clk_prepare_unlock(); 1169 1170 return 0; 1171 } 1172 late_initcall_sync(clk_disable_unused); 1173 1174 static int clk_core_determine_round_nolock(struct clk_core *core, 1175 struct clk_rate_request *req) 1176 { 1177 long rate; 1178 1179 lockdep_assert_held(&prepare_lock); 1180 1181 if (!core) 1182 return 0; 1183 1184 /* 1185 * At this point, core protection will be disabled if 1186 * - if the provider is not protected at all 1187 * - if the calling consumer is the only one which has exclusivity 1188 * over the provider 1189 */ 1190 if (clk_core_rate_is_protected(core)) { 1191 req->rate = core->rate; 1192 } else if (core->ops->determine_rate) { 1193 return core->ops->determine_rate(core->hw, req); 1194 } else if (core->ops->round_rate) { 1195 rate = core->ops->round_rate(core->hw, req->rate, 1196 &req->best_parent_rate); 1197 if (rate < 0) 1198 return rate; 1199 1200 req->rate = rate; 1201 } else { 1202 return -EINVAL; 1203 } 1204 1205 return 0; 1206 } 1207 1208 static void clk_core_init_rate_req(struct clk_core * const core, 1209 struct clk_rate_request *req) 1210 { 1211 struct clk_core *parent; 1212 1213 if (WARN_ON(!core || !req)) 1214 return; 1215 1216 parent = core->parent; 1217 if (parent) { 1218 req->best_parent_hw = parent->hw; 1219 req->best_parent_rate = parent->rate; 1220 } else { 1221 req->best_parent_hw = NULL; 1222 req->best_parent_rate = 0; 1223 } 1224 } 1225 1226 static bool clk_core_can_round(struct clk_core * const core) 1227 { 1228 if (core->ops->determine_rate || core->ops->round_rate) 1229 return true; 1230 1231 return false; 1232 } 1233 1234 static int clk_core_round_rate_nolock(struct clk_core *core, 1235 struct clk_rate_request *req) 1236 { 1237 lockdep_assert_held(&prepare_lock); 1238 1239 if (!core) { 1240 req->rate = 0; 1241 return 0; 1242 } 1243 1244 clk_core_init_rate_req(core, req); 1245 1246 if (clk_core_can_round(core)) 1247 return clk_core_determine_round_nolock(core, req); 1248 else if (core->flags & CLK_SET_RATE_PARENT) 1249 return clk_core_round_rate_nolock(core->parent, req); 1250 1251 req->rate = core->rate; 1252 return 0; 1253 } 1254 1255 /** 1256 * __clk_determine_rate - get the closest rate actually supported by a clock 1257 * @hw: determine the rate of this clock 1258 * @req: target rate request 1259 * 1260 * Useful for clk_ops such as .set_rate and .determine_rate. 1261 */ 1262 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 1263 { 1264 if (!hw) { 1265 req->rate = 0; 1266 return 0; 1267 } 1268 1269 return clk_core_round_rate_nolock(hw->core, req); 1270 } 1271 EXPORT_SYMBOL_GPL(__clk_determine_rate); 1272 1273 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) 1274 { 1275 int ret; 1276 struct clk_rate_request req; 1277 1278 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); 1279 req.rate = rate; 1280 1281 ret = clk_core_round_rate_nolock(hw->core, &req); 1282 if (ret) 1283 return 0; 1284 1285 return req.rate; 1286 } 1287 EXPORT_SYMBOL_GPL(clk_hw_round_rate); 1288 1289 /** 1290 * clk_round_rate - round the given rate for a clk 1291 * @clk: the clk for which we are rounding a rate 1292 * @rate: the rate which is to be rounded 1293 * 1294 * Takes in a rate as input and rounds it to a rate that the clk can actually 1295 * use which is then returned. If clk doesn't support round_rate operation 1296 * then the parent rate is returned. 1297 */ 1298 long clk_round_rate(struct clk *clk, unsigned long rate) 1299 { 1300 struct clk_rate_request req; 1301 int ret; 1302 1303 if (!clk) 1304 return 0; 1305 1306 clk_prepare_lock(); 1307 1308 if (clk->exclusive_count) 1309 clk_core_rate_unprotect(clk->core); 1310 1311 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); 1312 req.rate = rate; 1313 1314 ret = clk_core_round_rate_nolock(clk->core, &req); 1315 1316 if (clk->exclusive_count) 1317 clk_core_rate_protect(clk->core); 1318 1319 clk_prepare_unlock(); 1320 1321 if (ret) 1322 return ret; 1323 1324 return req.rate; 1325 } 1326 EXPORT_SYMBOL_GPL(clk_round_rate); 1327 1328 /** 1329 * __clk_notify - call clk notifier chain 1330 * @core: clk that is changing rate 1331 * @msg: clk notifier type (see include/linux/clk.h) 1332 * @old_rate: old clk rate 1333 * @new_rate: new clk rate 1334 * 1335 * Triggers a notifier call chain on the clk rate-change notification 1336 * for 'clk'. Passes a pointer to the struct clk and the previous 1337 * and current rates to the notifier callback. Intended to be called by 1338 * internal clock code only. Returns NOTIFY_DONE from the last driver 1339 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1340 * a driver returns that. 1341 */ 1342 static int __clk_notify(struct clk_core *core, unsigned long msg, 1343 unsigned long old_rate, unsigned long new_rate) 1344 { 1345 struct clk_notifier *cn; 1346 struct clk_notifier_data cnd; 1347 int ret = NOTIFY_DONE; 1348 1349 cnd.old_rate = old_rate; 1350 cnd.new_rate = new_rate; 1351 1352 list_for_each_entry(cn, &clk_notifier_list, node) { 1353 if (cn->clk->core == core) { 1354 cnd.clk = cn->clk; 1355 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1356 &cnd); 1357 if (ret & NOTIFY_STOP_MASK) 1358 return ret; 1359 } 1360 } 1361 1362 return ret; 1363 } 1364 1365 /** 1366 * __clk_recalc_accuracies 1367 * @core: first clk in the subtree 1368 * 1369 * Walks the subtree of clks starting with clk and recalculates accuracies as 1370 * it goes. Note that if a clk does not implement the .recalc_accuracy 1371 * callback then it is assumed that the clock will take on the accuracy of its 1372 * parent. 1373 */ 1374 static void __clk_recalc_accuracies(struct clk_core *core) 1375 { 1376 unsigned long parent_accuracy = 0; 1377 struct clk_core *child; 1378 1379 lockdep_assert_held(&prepare_lock); 1380 1381 if (core->parent) 1382 parent_accuracy = core->parent->accuracy; 1383 1384 if (core->ops->recalc_accuracy) 1385 core->accuracy = core->ops->recalc_accuracy(core->hw, 1386 parent_accuracy); 1387 else 1388 core->accuracy = parent_accuracy; 1389 1390 hlist_for_each_entry(child, &core->children, child_node) 1391 __clk_recalc_accuracies(child); 1392 } 1393 1394 static long clk_core_get_accuracy(struct clk_core *core) 1395 { 1396 unsigned long accuracy; 1397 1398 clk_prepare_lock(); 1399 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) 1400 __clk_recalc_accuracies(core); 1401 1402 accuracy = __clk_get_accuracy(core); 1403 clk_prepare_unlock(); 1404 1405 return accuracy; 1406 } 1407 1408 /** 1409 * clk_get_accuracy - return the accuracy of clk 1410 * @clk: the clk whose accuracy is being returned 1411 * 1412 * Simply returns the cached accuracy of the clk, unless 1413 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1414 * issued. 1415 * If clk is NULL then returns 0. 1416 */ 1417 long clk_get_accuracy(struct clk *clk) 1418 { 1419 if (!clk) 1420 return 0; 1421 1422 return clk_core_get_accuracy(clk->core); 1423 } 1424 EXPORT_SYMBOL_GPL(clk_get_accuracy); 1425 1426 static unsigned long clk_recalc(struct clk_core *core, 1427 unsigned long parent_rate) 1428 { 1429 unsigned long rate = parent_rate; 1430 1431 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { 1432 rate = core->ops->recalc_rate(core->hw, parent_rate); 1433 clk_pm_runtime_put(core); 1434 } 1435 return rate; 1436 } 1437 1438 /** 1439 * __clk_recalc_rates 1440 * @core: first clk in the subtree 1441 * @msg: notification type (see include/linux/clk.h) 1442 * 1443 * Walks the subtree of clks starting with clk and recalculates rates as it 1444 * goes. Note that if a clk does not implement the .recalc_rate callback then 1445 * it is assumed that the clock will take on the rate of its parent. 1446 * 1447 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1448 * if necessary. 1449 */ 1450 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) 1451 { 1452 unsigned long old_rate; 1453 unsigned long parent_rate = 0; 1454 struct clk_core *child; 1455 1456 lockdep_assert_held(&prepare_lock); 1457 1458 old_rate = core->rate; 1459 1460 if (core->parent) 1461 parent_rate = core->parent->rate; 1462 1463 core->rate = clk_recalc(core, parent_rate); 1464 1465 /* 1466 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1467 * & ABORT_RATE_CHANGE notifiers 1468 */ 1469 if (core->notifier_count && msg) 1470 __clk_notify(core, msg, old_rate, core->rate); 1471 1472 hlist_for_each_entry(child, &core->children, child_node) 1473 __clk_recalc_rates(child, msg); 1474 } 1475 1476 static unsigned long clk_core_get_rate(struct clk_core *core) 1477 { 1478 unsigned long rate; 1479 1480 clk_prepare_lock(); 1481 1482 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) 1483 __clk_recalc_rates(core, 0); 1484 1485 rate = clk_core_get_rate_nolock(core); 1486 clk_prepare_unlock(); 1487 1488 return rate; 1489 } 1490 1491 /** 1492 * clk_get_rate - return the rate of clk 1493 * @clk: the clk whose rate is being returned 1494 * 1495 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1496 * is set, which means a recalc_rate will be issued. 1497 * If clk is NULL then returns 0. 1498 */ 1499 unsigned long clk_get_rate(struct clk *clk) 1500 { 1501 if (!clk) 1502 return 0; 1503 1504 return clk_core_get_rate(clk->core); 1505 } 1506 EXPORT_SYMBOL_GPL(clk_get_rate); 1507 1508 static int clk_fetch_parent_index(struct clk_core *core, 1509 struct clk_core *parent) 1510 { 1511 int i; 1512 1513 if (!parent) 1514 return -EINVAL; 1515 1516 for (i = 0; i < core->num_parents; i++) { 1517 if (core->parents[i] == parent) 1518 return i; 1519 1520 if (core->parents[i]) 1521 continue; 1522 1523 /* Fallback to comparing globally unique names */ 1524 if (!strcmp(parent->name, core->parent_names[i])) { 1525 core->parents[i] = parent; 1526 return i; 1527 } 1528 } 1529 1530 return -EINVAL; 1531 } 1532 1533 /* 1534 * Update the orphan status of @core and all its children. 1535 */ 1536 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) 1537 { 1538 struct clk_core *child; 1539 1540 core->orphan = is_orphan; 1541 1542 hlist_for_each_entry(child, &core->children, child_node) 1543 clk_core_update_orphan_status(child, is_orphan); 1544 } 1545 1546 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) 1547 { 1548 bool was_orphan = core->orphan; 1549 1550 hlist_del(&core->child_node); 1551 1552 if (new_parent) { 1553 bool becomes_orphan = new_parent->orphan; 1554 1555 /* avoid duplicate POST_RATE_CHANGE notifications */ 1556 if (new_parent->new_child == core) 1557 new_parent->new_child = NULL; 1558 1559 hlist_add_head(&core->child_node, &new_parent->children); 1560 1561 if (was_orphan != becomes_orphan) 1562 clk_core_update_orphan_status(core, becomes_orphan); 1563 } else { 1564 hlist_add_head(&core->child_node, &clk_orphan_list); 1565 if (!was_orphan) 1566 clk_core_update_orphan_status(core, true); 1567 } 1568 1569 core->parent = new_parent; 1570 } 1571 1572 static struct clk_core *__clk_set_parent_before(struct clk_core *core, 1573 struct clk_core *parent) 1574 { 1575 unsigned long flags; 1576 struct clk_core *old_parent = core->parent; 1577 1578 /* 1579 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock 1580 * 1581 * 2. Migrate prepare state between parents and prevent race with 1582 * clk_enable(). 1583 * 1584 * If the clock is not prepared, then a race with 1585 * clk_enable/disable() is impossible since we already have the 1586 * prepare lock (future calls to clk_enable() need to be preceded by 1587 * a clk_prepare()). 1588 * 1589 * If the clock is prepared, migrate the prepared state to the new 1590 * parent and also protect against a race with clk_enable() by 1591 * forcing the clock and the new parent on. This ensures that all 1592 * future calls to clk_enable() are practically NOPs with respect to 1593 * hardware and software states. 1594 * 1595 * See also: Comment for clk_set_parent() below. 1596 */ 1597 1598 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ 1599 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1600 clk_core_prepare_enable(old_parent); 1601 clk_core_prepare_enable(parent); 1602 } 1603 1604 /* migrate prepare count if > 0 */ 1605 if (core->prepare_count) { 1606 clk_core_prepare_enable(parent); 1607 clk_core_enable_lock(core); 1608 } 1609 1610 /* update the clk tree topology */ 1611 flags = clk_enable_lock(); 1612 clk_reparent(core, parent); 1613 clk_enable_unlock(flags); 1614 1615 return old_parent; 1616 } 1617 1618 static void __clk_set_parent_after(struct clk_core *core, 1619 struct clk_core *parent, 1620 struct clk_core *old_parent) 1621 { 1622 /* 1623 * Finish the migration of prepare state and undo the changes done 1624 * for preventing a race with clk_enable(). 1625 */ 1626 if (core->prepare_count) { 1627 clk_core_disable_lock(core); 1628 clk_core_disable_unprepare(old_parent); 1629 } 1630 1631 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ 1632 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1633 clk_core_disable_unprepare(parent); 1634 clk_core_disable_unprepare(old_parent); 1635 } 1636 } 1637 1638 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, 1639 u8 p_index) 1640 { 1641 unsigned long flags; 1642 int ret = 0; 1643 struct clk_core *old_parent; 1644 1645 old_parent = __clk_set_parent_before(core, parent); 1646 1647 trace_clk_set_parent(core, parent); 1648 1649 /* change clock input source */ 1650 if (parent && core->ops->set_parent) 1651 ret = core->ops->set_parent(core->hw, p_index); 1652 1653 trace_clk_set_parent_complete(core, parent); 1654 1655 if (ret) { 1656 flags = clk_enable_lock(); 1657 clk_reparent(core, old_parent); 1658 clk_enable_unlock(flags); 1659 __clk_set_parent_after(core, old_parent, parent); 1660 1661 return ret; 1662 } 1663 1664 __clk_set_parent_after(core, parent, old_parent); 1665 1666 return 0; 1667 } 1668 1669 /** 1670 * __clk_speculate_rates 1671 * @core: first clk in the subtree 1672 * @parent_rate: the "future" rate of clk's parent 1673 * 1674 * Walks the subtree of clks starting with clk, speculating rates as it 1675 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 1676 * 1677 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 1678 * pre-rate change notifications and returns early if no clks in the 1679 * subtree have subscribed to the notifications. Note that if a clk does not 1680 * implement the .recalc_rate callback then it is assumed that the clock will 1681 * take on the rate of its parent. 1682 */ 1683 static int __clk_speculate_rates(struct clk_core *core, 1684 unsigned long parent_rate) 1685 { 1686 struct clk_core *child; 1687 unsigned long new_rate; 1688 int ret = NOTIFY_DONE; 1689 1690 lockdep_assert_held(&prepare_lock); 1691 1692 new_rate = clk_recalc(core, parent_rate); 1693 1694 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 1695 if (core->notifier_count) 1696 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); 1697 1698 if (ret & NOTIFY_STOP_MASK) { 1699 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 1700 __func__, core->name, ret); 1701 goto out; 1702 } 1703 1704 hlist_for_each_entry(child, &core->children, child_node) { 1705 ret = __clk_speculate_rates(child, new_rate); 1706 if (ret & NOTIFY_STOP_MASK) 1707 break; 1708 } 1709 1710 out: 1711 return ret; 1712 } 1713 1714 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, 1715 struct clk_core *new_parent, u8 p_index) 1716 { 1717 struct clk_core *child; 1718 1719 core->new_rate = new_rate; 1720 core->new_parent = new_parent; 1721 core->new_parent_index = p_index; 1722 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 1723 core->new_child = NULL; 1724 if (new_parent && new_parent != core->parent) 1725 new_parent->new_child = core; 1726 1727 hlist_for_each_entry(child, &core->children, child_node) { 1728 child->new_rate = clk_recalc(child, new_rate); 1729 clk_calc_subtree(child, child->new_rate, NULL, 0); 1730 } 1731 } 1732 1733 /* 1734 * calculate the new rates returning the topmost clock that has to be 1735 * changed. 1736 */ 1737 static struct clk_core *clk_calc_new_rates(struct clk_core *core, 1738 unsigned long rate) 1739 { 1740 struct clk_core *top = core; 1741 struct clk_core *old_parent, *parent; 1742 unsigned long best_parent_rate = 0; 1743 unsigned long new_rate; 1744 unsigned long min_rate; 1745 unsigned long max_rate; 1746 int p_index = 0; 1747 long ret; 1748 1749 /* sanity */ 1750 if (IS_ERR_OR_NULL(core)) 1751 return NULL; 1752 1753 /* save parent rate, if it exists */ 1754 parent = old_parent = core->parent; 1755 if (parent) 1756 best_parent_rate = parent->rate; 1757 1758 clk_core_get_boundaries(core, &min_rate, &max_rate); 1759 1760 /* find the closest rate and parent clk/rate */ 1761 if (clk_core_can_round(core)) { 1762 struct clk_rate_request req; 1763 1764 req.rate = rate; 1765 req.min_rate = min_rate; 1766 req.max_rate = max_rate; 1767 1768 clk_core_init_rate_req(core, &req); 1769 1770 ret = clk_core_determine_round_nolock(core, &req); 1771 if (ret < 0) 1772 return NULL; 1773 1774 best_parent_rate = req.best_parent_rate; 1775 new_rate = req.rate; 1776 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; 1777 1778 if (new_rate < min_rate || new_rate > max_rate) 1779 return NULL; 1780 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { 1781 /* pass-through clock without adjustable parent */ 1782 core->new_rate = core->rate; 1783 return NULL; 1784 } else { 1785 /* pass-through clock with adjustable parent */ 1786 top = clk_calc_new_rates(parent, rate); 1787 new_rate = parent->new_rate; 1788 goto out; 1789 } 1790 1791 /* some clocks must be gated to change parent */ 1792 if (parent != old_parent && 1793 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 1794 pr_debug("%s: %s not gated but wants to reparent\n", 1795 __func__, core->name); 1796 return NULL; 1797 } 1798 1799 /* try finding the new parent index */ 1800 if (parent && core->num_parents > 1) { 1801 p_index = clk_fetch_parent_index(core, parent); 1802 if (p_index < 0) { 1803 pr_debug("%s: clk %s can not be parent of clk %s\n", 1804 __func__, parent->name, core->name); 1805 return NULL; 1806 } 1807 } 1808 1809 if ((core->flags & CLK_SET_RATE_PARENT) && parent && 1810 best_parent_rate != parent->rate) 1811 top = clk_calc_new_rates(parent, best_parent_rate); 1812 1813 out: 1814 clk_calc_subtree(core, new_rate, parent, p_index); 1815 1816 return top; 1817 } 1818 1819 /* 1820 * Notify about rate changes in a subtree. Always walk down the whole tree 1821 * so that in case of an error we can walk down the whole tree again and 1822 * abort the change. 1823 */ 1824 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, 1825 unsigned long event) 1826 { 1827 struct clk_core *child, *tmp_clk, *fail_clk = NULL; 1828 int ret = NOTIFY_DONE; 1829 1830 if (core->rate == core->new_rate) 1831 return NULL; 1832 1833 if (core->notifier_count) { 1834 ret = __clk_notify(core, event, core->rate, core->new_rate); 1835 if (ret & NOTIFY_STOP_MASK) 1836 fail_clk = core; 1837 } 1838 1839 hlist_for_each_entry(child, &core->children, child_node) { 1840 /* Skip children who will be reparented to another clock */ 1841 if (child->new_parent && child->new_parent != core) 1842 continue; 1843 tmp_clk = clk_propagate_rate_change(child, event); 1844 if (tmp_clk) 1845 fail_clk = tmp_clk; 1846 } 1847 1848 /* handle the new child who might not be in core->children yet */ 1849 if (core->new_child) { 1850 tmp_clk = clk_propagate_rate_change(core->new_child, event); 1851 if (tmp_clk) 1852 fail_clk = tmp_clk; 1853 } 1854 1855 return fail_clk; 1856 } 1857 1858 /* 1859 * walk down a subtree and set the new rates notifying the rate 1860 * change on the way 1861 */ 1862 static void clk_change_rate(struct clk_core *core) 1863 { 1864 struct clk_core *child; 1865 struct hlist_node *tmp; 1866 unsigned long old_rate; 1867 unsigned long best_parent_rate = 0; 1868 bool skip_set_rate = false; 1869 struct clk_core *old_parent; 1870 struct clk_core *parent = NULL; 1871 1872 old_rate = core->rate; 1873 1874 if (core->new_parent) { 1875 parent = core->new_parent; 1876 best_parent_rate = core->new_parent->rate; 1877 } else if (core->parent) { 1878 parent = core->parent; 1879 best_parent_rate = core->parent->rate; 1880 } 1881 1882 if (clk_pm_runtime_get(core)) 1883 return; 1884 1885 if (core->flags & CLK_SET_RATE_UNGATE) { 1886 unsigned long flags; 1887 1888 clk_core_prepare(core); 1889 flags = clk_enable_lock(); 1890 clk_core_enable(core); 1891 clk_enable_unlock(flags); 1892 } 1893 1894 if (core->new_parent && core->new_parent != core->parent) { 1895 old_parent = __clk_set_parent_before(core, core->new_parent); 1896 trace_clk_set_parent(core, core->new_parent); 1897 1898 if (core->ops->set_rate_and_parent) { 1899 skip_set_rate = true; 1900 core->ops->set_rate_and_parent(core->hw, core->new_rate, 1901 best_parent_rate, 1902 core->new_parent_index); 1903 } else if (core->ops->set_parent) { 1904 core->ops->set_parent(core->hw, core->new_parent_index); 1905 } 1906 1907 trace_clk_set_parent_complete(core, core->new_parent); 1908 __clk_set_parent_after(core, core->new_parent, old_parent); 1909 } 1910 1911 if (core->flags & CLK_OPS_PARENT_ENABLE) 1912 clk_core_prepare_enable(parent); 1913 1914 trace_clk_set_rate(core, core->new_rate); 1915 1916 if (!skip_set_rate && core->ops->set_rate) 1917 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); 1918 1919 trace_clk_set_rate_complete(core, core->new_rate); 1920 1921 core->rate = clk_recalc(core, best_parent_rate); 1922 1923 if (core->flags & CLK_SET_RATE_UNGATE) { 1924 unsigned long flags; 1925 1926 flags = clk_enable_lock(); 1927 clk_core_disable(core); 1928 clk_enable_unlock(flags); 1929 clk_core_unprepare(core); 1930 } 1931 1932 if (core->flags & CLK_OPS_PARENT_ENABLE) 1933 clk_core_disable_unprepare(parent); 1934 1935 if (core->notifier_count && old_rate != core->rate) 1936 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); 1937 1938 if (core->flags & CLK_RECALC_NEW_RATES) 1939 (void)clk_calc_new_rates(core, core->new_rate); 1940 1941 /* 1942 * Use safe iteration, as change_rate can actually swap parents 1943 * for certain clock types. 1944 */ 1945 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { 1946 /* Skip children who will be reparented to another clock */ 1947 if (child->new_parent && child->new_parent != core) 1948 continue; 1949 clk_change_rate(child); 1950 } 1951 1952 /* handle the new child who might not be in core->children yet */ 1953 if (core->new_child) 1954 clk_change_rate(core->new_child); 1955 1956 clk_pm_runtime_put(core); 1957 } 1958 1959 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, 1960 unsigned long req_rate) 1961 { 1962 int ret, cnt; 1963 struct clk_rate_request req; 1964 1965 lockdep_assert_held(&prepare_lock); 1966 1967 if (!core) 1968 return 0; 1969 1970 /* simulate what the rate would be if it could be freely set */ 1971 cnt = clk_core_rate_nuke_protect(core); 1972 if (cnt < 0) 1973 return cnt; 1974 1975 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); 1976 req.rate = req_rate; 1977 1978 ret = clk_core_round_rate_nolock(core, &req); 1979 1980 /* restore the protection */ 1981 clk_core_rate_restore_protect(core, cnt); 1982 1983 return ret ? 0 : req.rate; 1984 } 1985 1986 static int clk_core_set_rate_nolock(struct clk_core *core, 1987 unsigned long req_rate) 1988 { 1989 struct clk_core *top, *fail_clk; 1990 unsigned long rate; 1991 int ret = 0; 1992 1993 if (!core) 1994 return 0; 1995 1996 rate = clk_core_req_round_rate_nolock(core, req_rate); 1997 1998 /* bail early if nothing to do */ 1999 if (rate == clk_core_get_rate_nolock(core)) 2000 return 0; 2001 2002 /* fail on a direct rate set of a protected provider */ 2003 if (clk_core_rate_is_protected(core)) 2004 return -EBUSY; 2005 2006 /* calculate new rates and get the topmost changed clock */ 2007 top = clk_calc_new_rates(core, req_rate); 2008 if (!top) 2009 return -EINVAL; 2010 2011 ret = clk_pm_runtime_get(core); 2012 if (ret) 2013 return ret; 2014 2015 /* notify that we are about to change rates */ 2016 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 2017 if (fail_clk) { 2018 pr_debug("%s: failed to set %s rate\n", __func__, 2019 fail_clk->name); 2020 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 2021 ret = -EBUSY; 2022 goto err; 2023 } 2024 2025 /* change the rates */ 2026 clk_change_rate(top); 2027 2028 core->req_rate = req_rate; 2029 err: 2030 clk_pm_runtime_put(core); 2031 2032 return ret; 2033 } 2034 2035 /** 2036 * clk_set_rate - specify a new rate for clk 2037 * @clk: the clk whose rate is being changed 2038 * @rate: the new rate for clk 2039 * 2040 * In the simplest case clk_set_rate will only adjust the rate of clk. 2041 * 2042 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 2043 * propagate up to clk's parent; whether or not this happens depends on the 2044 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 2045 * after calling .round_rate then upstream parent propagation is ignored. If 2046 * *parent_rate comes back with a new rate for clk's parent then we propagate 2047 * up to clk's parent and set its rate. Upward propagation will continue 2048 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 2049 * .round_rate stops requesting changes to clk's parent_rate. 2050 * 2051 * Rate changes are accomplished via tree traversal that also recalculates the 2052 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 2053 * 2054 * Returns 0 on success, -EERROR otherwise. 2055 */ 2056 int clk_set_rate(struct clk *clk, unsigned long rate) 2057 { 2058 int ret; 2059 2060 if (!clk) 2061 return 0; 2062 2063 /* prevent racing with updates to the clock topology */ 2064 clk_prepare_lock(); 2065 2066 if (clk->exclusive_count) 2067 clk_core_rate_unprotect(clk->core); 2068 2069 ret = clk_core_set_rate_nolock(clk->core, rate); 2070 2071 if (clk->exclusive_count) 2072 clk_core_rate_protect(clk->core); 2073 2074 clk_prepare_unlock(); 2075 2076 return ret; 2077 } 2078 EXPORT_SYMBOL_GPL(clk_set_rate); 2079 2080 /** 2081 * clk_set_rate_exclusive - specify a new rate get exclusive control 2082 * @clk: the clk whose rate is being changed 2083 * @rate: the new rate for clk 2084 * 2085 * This is a combination of clk_set_rate() and clk_rate_exclusive_get() 2086 * within a critical section 2087 * 2088 * This can be used initially to ensure that at least 1 consumer is 2089 * statisfied when several consumers are competing for exclusivity over the 2090 * same clock provider. 2091 * 2092 * The exclusivity is not applied if setting the rate failed. 2093 * 2094 * Calls to clk_rate_exclusive_get() should be balanced with calls to 2095 * clk_rate_exclusive_put(). 2096 * 2097 * Returns 0 on success, -EERROR otherwise. 2098 */ 2099 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 2100 { 2101 int ret; 2102 2103 if (!clk) 2104 return 0; 2105 2106 /* prevent racing with updates to the clock topology */ 2107 clk_prepare_lock(); 2108 2109 /* 2110 * The temporary protection removal is not here, on purpose 2111 * This function is meant to be used instead of clk_rate_protect, 2112 * so before the consumer code path protect the clock provider 2113 */ 2114 2115 ret = clk_core_set_rate_nolock(clk->core, rate); 2116 if (!ret) { 2117 clk_core_rate_protect(clk->core); 2118 clk->exclusive_count++; 2119 } 2120 2121 clk_prepare_unlock(); 2122 2123 return ret; 2124 } 2125 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); 2126 2127 /** 2128 * clk_set_rate_range - set a rate range for a clock source 2129 * @clk: clock source 2130 * @min: desired minimum clock rate in Hz, inclusive 2131 * @max: desired maximum clock rate in Hz, inclusive 2132 * 2133 * Returns success (0) or negative errno. 2134 */ 2135 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) 2136 { 2137 int ret = 0; 2138 unsigned long old_min, old_max, rate; 2139 2140 if (!clk) 2141 return 0; 2142 2143 if (min > max) { 2144 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", 2145 __func__, clk->core->name, clk->dev_id, clk->con_id, 2146 min, max); 2147 return -EINVAL; 2148 } 2149 2150 clk_prepare_lock(); 2151 2152 if (clk->exclusive_count) 2153 clk_core_rate_unprotect(clk->core); 2154 2155 /* Save the current values in case we need to rollback the change */ 2156 old_min = clk->min_rate; 2157 old_max = clk->max_rate; 2158 clk->min_rate = min; 2159 clk->max_rate = max; 2160 2161 rate = clk_core_get_rate_nolock(clk->core); 2162 if (rate < min || rate > max) { 2163 /* 2164 * FIXME: 2165 * We are in bit of trouble here, current rate is outside the 2166 * the requested range. We are going try to request appropriate 2167 * range boundary but there is a catch. It may fail for the 2168 * usual reason (clock broken, clock protected, etc) but also 2169 * because: 2170 * - round_rate() was not favorable and fell on the wrong 2171 * side of the boundary 2172 * - the determine_rate() callback does not really check for 2173 * this corner case when determining the rate 2174 */ 2175 2176 if (rate < min) 2177 rate = min; 2178 else 2179 rate = max; 2180 2181 ret = clk_core_set_rate_nolock(clk->core, rate); 2182 if (ret) { 2183 /* rollback the changes */ 2184 clk->min_rate = old_min; 2185 clk->max_rate = old_max; 2186 } 2187 } 2188 2189 if (clk->exclusive_count) 2190 clk_core_rate_protect(clk->core); 2191 2192 clk_prepare_unlock(); 2193 2194 return ret; 2195 } 2196 EXPORT_SYMBOL_GPL(clk_set_rate_range); 2197 2198 /** 2199 * clk_set_min_rate - set a minimum clock rate for a clock source 2200 * @clk: clock source 2201 * @rate: desired minimum clock rate in Hz, inclusive 2202 * 2203 * Returns success (0) or negative errno. 2204 */ 2205 int clk_set_min_rate(struct clk *clk, unsigned long rate) 2206 { 2207 if (!clk) 2208 return 0; 2209 2210 return clk_set_rate_range(clk, rate, clk->max_rate); 2211 } 2212 EXPORT_SYMBOL_GPL(clk_set_min_rate); 2213 2214 /** 2215 * clk_set_max_rate - set a maximum clock rate for a clock source 2216 * @clk: clock source 2217 * @rate: desired maximum clock rate in Hz, inclusive 2218 * 2219 * Returns success (0) or negative errno. 2220 */ 2221 int clk_set_max_rate(struct clk *clk, unsigned long rate) 2222 { 2223 if (!clk) 2224 return 0; 2225 2226 return clk_set_rate_range(clk, clk->min_rate, rate); 2227 } 2228 EXPORT_SYMBOL_GPL(clk_set_max_rate); 2229 2230 /** 2231 * clk_get_parent - return the parent of a clk 2232 * @clk: the clk whose parent gets returned 2233 * 2234 * Simply returns clk->parent. Returns NULL if clk is NULL. 2235 */ 2236 struct clk *clk_get_parent(struct clk *clk) 2237 { 2238 struct clk *parent; 2239 2240 if (!clk) 2241 return NULL; 2242 2243 clk_prepare_lock(); 2244 /* TODO: Create a per-user clk and change callers to call clk_put */ 2245 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; 2246 clk_prepare_unlock(); 2247 2248 return parent; 2249 } 2250 EXPORT_SYMBOL_GPL(clk_get_parent); 2251 2252 static struct clk_core *__clk_init_parent(struct clk_core *core) 2253 { 2254 u8 index = 0; 2255 2256 if (core->num_parents > 1 && core->ops->get_parent) 2257 index = core->ops->get_parent(core->hw); 2258 2259 return clk_core_get_parent_by_index(core, index); 2260 } 2261 2262 static void clk_core_reparent(struct clk_core *core, 2263 struct clk_core *new_parent) 2264 { 2265 clk_reparent(core, new_parent); 2266 __clk_recalc_accuracies(core); 2267 __clk_recalc_rates(core, POST_RATE_CHANGE); 2268 } 2269 2270 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) 2271 { 2272 if (!hw) 2273 return; 2274 2275 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); 2276 } 2277 2278 /** 2279 * clk_has_parent - check if a clock is a possible parent for another 2280 * @clk: clock source 2281 * @parent: parent clock source 2282 * 2283 * This function can be used in drivers that need to check that a clock can be 2284 * the parent of another without actually changing the parent. 2285 * 2286 * Returns true if @parent is a possible parent for @clk, false otherwise. 2287 */ 2288 bool clk_has_parent(struct clk *clk, struct clk *parent) 2289 { 2290 struct clk_core *core, *parent_core; 2291 2292 /* NULL clocks should be nops, so return success if either is NULL. */ 2293 if (!clk || !parent) 2294 return true; 2295 2296 core = clk->core; 2297 parent_core = parent->core; 2298 2299 /* Optimize for the case where the parent is already the parent. */ 2300 if (core->parent == parent_core) 2301 return true; 2302 2303 return match_string(core->parent_names, core->num_parents, 2304 parent_core->name) >= 0; 2305 } 2306 EXPORT_SYMBOL_GPL(clk_has_parent); 2307 2308 static int clk_core_set_parent_nolock(struct clk_core *core, 2309 struct clk_core *parent) 2310 { 2311 int ret = 0; 2312 int p_index = 0; 2313 unsigned long p_rate = 0; 2314 2315 lockdep_assert_held(&prepare_lock); 2316 2317 if (!core) 2318 return 0; 2319 2320 if (core->parent == parent) 2321 return 0; 2322 2323 /* verify ops for for multi-parent clks */ 2324 if (core->num_parents > 1 && !core->ops->set_parent) 2325 return -EPERM; 2326 2327 /* check that we are allowed to re-parent if the clock is in use */ 2328 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) 2329 return -EBUSY; 2330 2331 if (clk_core_rate_is_protected(core)) 2332 return -EBUSY; 2333 2334 /* try finding the new parent index */ 2335 if (parent) { 2336 p_index = clk_fetch_parent_index(core, parent); 2337 if (p_index < 0) { 2338 pr_debug("%s: clk %s can not be parent of clk %s\n", 2339 __func__, parent->name, core->name); 2340 return p_index; 2341 } 2342 p_rate = parent->rate; 2343 } 2344 2345 ret = clk_pm_runtime_get(core); 2346 if (ret) 2347 return ret; 2348 2349 /* propagate PRE_RATE_CHANGE notifications */ 2350 ret = __clk_speculate_rates(core, p_rate); 2351 2352 /* abort if a driver objects */ 2353 if (ret & NOTIFY_STOP_MASK) 2354 goto runtime_put; 2355 2356 /* do the re-parent */ 2357 ret = __clk_set_parent(core, parent, p_index); 2358 2359 /* propagate rate an accuracy recalculation accordingly */ 2360 if (ret) { 2361 __clk_recalc_rates(core, ABORT_RATE_CHANGE); 2362 } else { 2363 __clk_recalc_rates(core, POST_RATE_CHANGE); 2364 __clk_recalc_accuracies(core); 2365 } 2366 2367 runtime_put: 2368 clk_pm_runtime_put(core); 2369 2370 return ret; 2371 } 2372 2373 /** 2374 * clk_set_parent - switch the parent of a mux clk 2375 * @clk: the mux clk whose input we are switching 2376 * @parent: the new input to clk 2377 * 2378 * Re-parent clk to use parent as its new input source. If clk is in 2379 * prepared state, the clk will get enabled for the duration of this call. If 2380 * that's not acceptable for a specific clk (Eg: the consumer can't handle 2381 * that, the reparenting is glitchy in hardware, etc), use the 2382 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 2383 * 2384 * After successfully changing clk's parent clk_set_parent will update the 2385 * clk topology, sysfs topology and propagate rate recalculation via 2386 * __clk_recalc_rates. 2387 * 2388 * Returns 0 on success, -EERROR otherwise. 2389 */ 2390 int clk_set_parent(struct clk *clk, struct clk *parent) 2391 { 2392 int ret; 2393 2394 if (!clk) 2395 return 0; 2396 2397 clk_prepare_lock(); 2398 2399 if (clk->exclusive_count) 2400 clk_core_rate_unprotect(clk->core); 2401 2402 ret = clk_core_set_parent_nolock(clk->core, 2403 parent ? parent->core : NULL); 2404 2405 if (clk->exclusive_count) 2406 clk_core_rate_protect(clk->core); 2407 2408 clk_prepare_unlock(); 2409 2410 return ret; 2411 } 2412 EXPORT_SYMBOL_GPL(clk_set_parent); 2413 2414 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) 2415 { 2416 int ret = -EINVAL; 2417 2418 lockdep_assert_held(&prepare_lock); 2419 2420 if (!core) 2421 return 0; 2422 2423 if (clk_core_rate_is_protected(core)) 2424 return -EBUSY; 2425 2426 trace_clk_set_phase(core, degrees); 2427 2428 if (core->ops->set_phase) { 2429 ret = core->ops->set_phase(core->hw, degrees); 2430 if (!ret) 2431 core->phase = degrees; 2432 } 2433 2434 trace_clk_set_phase_complete(core, degrees); 2435 2436 return ret; 2437 } 2438 2439 /** 2440 * clk_set_phase - adjust the phase shift of a clock signal 2441 * @clk: clock signal source 2442 * @degrees: number of degrees the signal is shifted 2443 * 2444 * Shifts the phase of a clock signal by the specified 2445 * degrees. Returns 0 on success, -EERROR otherwise. 2446 * 2447 * This function makes no distinction about the input or reference 2448 * signal that we adjust the clock signal phase against. For example 2449 * phase locked-loop clock signal generators we may shift phase with 2450 * respect to feedback clock signal input, but for other cases the 2451 * clock phase may be shifted with respect to some other, unspecified 2452 * signal. 2453 * 2454 * Additionally the concept of phase shift does not propagate through 2455 * the clock tree hierarchy, which sets it apart from clock rates and 2456 * clock accuracy. A parent clock phase attribute does not have an 2457 * impact on the phase attribute of a child clock. 2458 */ 2459 int clk_set_phase(struct clk *clk, int degrees) 2460 { 2461 int ret; 2462 2463 if (!clk) 2464 return 0; 2465 2466 /* sanity check degrees */ 2467 degrees %= 360; 2468 if (degrees < 0) 2469 degrees += 360; 2470 2471 clk_prepare_lock(); 2472 2473 if (clk->exclusive_count) 2474 clk_core_rate_unprotect(clk->core); 2475 2476 ret = clk_core_set_phase_nolock(clk->core, degrees); 2477 2478 if (clk->exclusive_count) 2479 clk_core_rate_protect(clk->core); 2480 2481 clk_prepare_unlock(); 2482 2483 return ret; 2484 } 2485 EXPORT_SYMBOL_GPL(clk_set_phase); 2486 2487 static int clk_core_get_phase(struct clk_core *core) 2488 { 2489 int ret; 2490 2491 clk_prepare_lock(); 2492 /* Always try to update cached phase if possible */ 2493 if (core->ops->get_phase) 2494 core->phase = core->ops->get_phase(core->hw); 2495 ret = core->phase; 2496 clk_prepare_unlock(); 2497 2498 return ret; 2499 } 2500 2501 /** 2502 * clk_get_phase - return the phase shift of a clock signal 2503 * @clk: clock signal source 2504 * 2505 * Returns the phase shift of a clock node in degrees, otherwise returns 2506 * -EERROR. 2507 */ 2508 int clk_get_phase(struct clk *clk) 2509 { 2510 if (!clk) 2511 return 0; 2512 2513 return clk_core_get_phase(clk->core); 2514 } 2515 EXPORT_SYMBOL_GPL(clk_get_phase); 2516 2517 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) 2518 { 2519 /* Assume a default value of 50% */ 2520 core->duty.num = 1; 2521 core->duty.den = 2; 2522 } 2523 2524 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); 2525 2526 static int clk_core_update_duty_cycle_nolock(struct clk_core *core) 2527 { 2528 struct clk_duty *duty = &core->duty; 2529 int ret = 0; 2530 2531 if (!core->ops->get_duty_cycle) 2532 return clk_core_update_duty_cycle_parent_nolock(core); 2533 2534 ret = core->ops->get_duty_cycle(core->hw, duty); 2535 if (ret) 2536 goto reset; 2537 2538 /* Don't trust the clock provider too much */ 2539 if (duty->den == 0 || duty->num > duty->den) { 2540 ret = -EINVAL; 2541 goto reset; 2542 } 2543 2544 return 0; 2545 2546 reset: 2547 clk_core_reset_duty_cycle_nolock(core); 2548 return ret; 2549 } 2550 2551 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) 2552 { 2553 int ret = 0; 2554 2555 if (core->parent && 2556 core->flags & CLK_DUTY_CYCLE_PARENT) { 2557 ret = clk_core_update_duty_cycle_nolock(core->parent); 2558 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 2559 } else { 2560 clk_core_reset_duty_cycle_nolock(core); 2561 } 2562 2563 return ret; 2564 } 2565 2566 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 2567 struct clk_duty *duty); 2568 2569 static int clk_core_set_duty_cycle_nolock(struct clk_core *core, 2570 struct clk_duty *duty) 2571 { 2572 int ret; 2573 2574 lockdep_assert_held(&prepare_lock); 2575 2576 if (clk_core_rate_is_protected(core)) 2577 return -EBUSY; 2578 2579 trace_clk_set_duty_cycle(core, duty); 2580 2581 if (!core->ops->set_duty_cycle) 2582 return clk_core_set_duty_cycle_parent_nolock(core, duty); 2583 2584 ret = core->ops->set_duty_cycle(core->hw, duty); 2585 if (!ret) 2586 memcpy(&core->duty, duty, sizeof(*duty)); 2587 2588 trace_clk_set_duty_cycle_complete(core, duty); 2589 2590 return ret; 2591 } 2592 2593 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 2594 struct clk_duty *duty) 2595 { 2596 int ret = 0; 2597 2598 if (core->parent && 2599 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { 2600 ret = clk_core_set_duty_cycle_nolock(core->parent, duty); 2601 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 2602 } 2603 2604 return ret; 2605 } 2606 2607 /** 2608 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 2609 * @clk: clock signal source 2610 * @num: numerator of the duty cycle ratio to be applied 2611 * @den: denominator of the duty cycle ratio to be applied 2612 * 2613 * Apply the duty cycle ratio if the ratio is valid and the clock can 2614 * perform this operation 2615 * 2616 * Returns (0) on success, a negative errno otherwise. 2617 */ 2618 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) 2619 { 2620 int ret; 2621 struct clk_duty duty; 2622 2623 if (!clk) 2624 return 0; 2625 2626 /* sanity check the ratio */ 2627 if (den == 0 || num > den) 2628 return -EINVAL; 2629 2630 duty.num = num; 2631 duty.den = den; 2632 2633 clk_prepare_lock(); 2634 2635 if (clk->exclusive_count) 2636 clk_core_rate_unprotect(clk->core); 2637 2638 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); 2639 2640 if (clk->exclusive_count) 2641 clk_core_rate_protect(clk->core); 2642 2643 clk_prepare_unlock(); 2644 2645 return ret; 2646 } 2647 EXPORT_SYMBOL_GPL(clk_set_duty_cycle); 2648 2649 static int clk_core_get_scaled_duty_cycle(struct clk_core *core, 2650 unsigned int scale) 2651 { 2652 struct clk_duty *duty = &core->duty; 2653 int ret; 2654 2655 clk_prepare_lock(); 2656 2657 ret = clk_core_update_duty_cycle_nolock(core); 2658 if (!ret) 2659 ret = mult_frac(scale, duty->num, duty->den); 2660 2661 clk_prepare_unlock(); 2662 2663 return ret; 2664 } 2665 2666 /** 2667 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 2668 * @clk: clock signal source 2669 * @scale: scaling factor to be applied to represent the ratio as an integer 2670 * 2671 * Returns the duty cycle ratio of a clock node multiplied by the provided 2672 * scaling factor, or negative errno on error. 2673 */ 2674 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) 2675 { 2676 if (!clk) 2677 return 0; 2678 2679 return clk_core_get_scaled_duty_cycle(clk->core, scale); 2680 } 2681 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); 2682 2683 /** 2684 * clk_is_match - check if two clk's point to the same hardware clock 2685 * @p: clk compared against q 2686 * @q: clk compared against p 2687 * 2688 * Returns true if the two struct clk pointers both point to the same hardware 2689 * clock node. Put differently, returns true if struct clk *p and struct clk *q 2690 * share the same struct clk_core object. 2691 * 2692 * Returns false otherwise. Note that two NULL clks are treated as matching. 2693 */ 2694 bool clk_is_match(const struct clk *p, const struct clk *q) 2695 { 2696 /* trivial case: identical struct clk's or both NULL */ 2697 if (p == q) 2698 return true; 2699 2700 /* true if clk->core pointers match. Avoid dereferencing garbage */ 2701 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) 2702 if (p->core == q->core) 2703 return true; 2704 2705 return false; 2706 } 2707 EXPORT_SYMBOL_GPL(clk_is_match); 2708 2709 /*** debugfs support ***/ 2710 2711 #ifdef CONFIG_DEBUG_FS 2712 #include <linux/debugfs.h> 2713 2714 static struct dentry *rootdir; 2715 static int inited = 0; 2716 static DEFINE_MUTEX(clk_debug_lock); 2717 static HLIST_HEAD(clk_debug_list); 2718 2719 static struct hlist_head *all_lists[] = { 2720 &clk_root_list, 2721 &clk_orphan_list, 2722 NULL, 2723 }; 2724 2725 static struct hlist_head *orphan_list[] = { 2726 &clk_orphan_list, 2727 NULL, 2728 }; 2729 2730 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, 2731 int level) 2732 { 2733 if (!c) 2734 return; 2735 2736 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n", 2737 level * 3 + 1, "", 2738 30 - level * 3, c->name, 2739 c->enable_count, c->prepare_count, c->protect_count, 2740 clk_core_get_rate(c), clk_core_get_accuracy(c), 2741 clk_core_get_phase(c), 2742 clk_core_get_scaled_duty_cycle(c, 100000)); 2743 } 2744 2745 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, 2746 int level) 2747 { 2748 struct clk_core *child; 2749 2750 if (!c) 2751 return; 2752 2753 clk_summary_show_one(s, c, level); 2754 2755 hlist_for_each_entry(child, &c->children, child_node) 2756 clk_summary_show_subtree(s, child, level + 1); 2757 } 2758 2759 static int clk_summary_show(struct seq_file *s, void *data) 2760 { 2761 struct clk_core *c; 2762 struct hlist_head **lists = (struct hlist_head **)s->private; 2763 2764 seq_puts(s, " enable prepare protect duty\n"); 2765 seq_puts(s, " clock count count count rate accuracy phase cycle\n"); 2766 seq_puts(s, "---------------------------------------------------------------------------------------------\n"); 2767 2768 clk_prepare_lock(); 2769 2770 for (; *lists; lists++) 2771 hlist_for_each_entry(c, *lists, child_node) 2772 clk_summary_show_subtree(s, c, 0); 2773 2774 clk_prepare_unlock(); 2775 2776 return 0; 2777 } 2778 DEFINE_SHOW_ATTRIBUTE(clk_summary); 2779 2780 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) 2781 { 2782 if (!c) 2783 return; 2784 2785 /* This should be JSON format, i.e. elements separated with a comma */ 2786 seq_printf(s, "\"%s\": { ", c->name); 2787 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 2788 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 2789 seq_printf(s, "\"protect_count\": %d,", c->protect_count); 2790 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); 2791 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); 2792 seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); 2793 seq_printf(s, "\"duty_cycle\": %u", 2794 clk_core_get_scaled_duty_cycle(c, 100000)); 2795 } 2796 2797 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) 2798 { 2799 struct clk_core *child; 2800 2801 if (!c) 2802 return; 2803 2804 clk_dump_one(s, c, level); 2805 2806 hlist_for_each_entry(child, &c->children, child_node) { 2807 seq_putc(s, ','); 2808 clk_dump_subtree(s, child, level + 1); 2809 } 2810 2811 seq_putc(s, '}'); 2812 } 2813 2814 static int clk_dump_show(struct seq_file *s, void *data) 2815 { 2816 struct clk_core *c; 2817 bool first_node = true; 2818 struct hlist_head **lists = (struct hlist_head **)s->private; 2819 2820 seq_putc(s, '{'); 2821 clk_prepare_lock(); 2822 2823 for (; *lists; lists++) { 2824 hlist_for_each_entry(c, *lists, child_node) { 2825 if (!first_node) 2826 seq_putc(s, ','); 2827 first_node = false; 2828 clk_dump_subtree(s, c, 0); 2829 } 2830 } 2831 2832 clk_prepare_unlock(); 2833 2834 seq_puts(s, "}\n"); 2835 return 0; 2836 } 2837 DEFINE_SHOW_ATTRIBUTE(clk_dump); 2838 2839 static const struct { 2840 unsigned long flag; 2841 const char *name; 2842 } clk_flags[] = { 2843 #define ENTRY(f) { f, #f } 2844 ENTRY(CLK_SET_RATE_GATE), 2845 ENTRY(CLK_SET_PARENT_GATE), 2846 ENTRY(CLK_SET_RATE_PARENT), 2847 ENTRY(CLK_IGNORE_UNUSED), 2848 ENTRY(CLK_IS_BASIC), 2849 ENTRY(CLK_GET_RATE_NOCACHE), 2850 ENTRY(CLK_SET_RATE_NO_REPARENT), 2851 ENTRY(CLK_GET_ACCURACY_NOCACHE), 2852 ENTRY(CLK_RECALC_NEW_RATES), 2853 ENTRY(CLK_SET_RATE_UNGATE), 2854 ENTRY(CLK_IS_CRITICAL), 2855 ENTRY(CLK_OPS_PARENT_ENABLE), 2856 ENTRY(CLK_DUTY_CYCLE_PARENT), 2857 #undef ENTRY 2858 }; 2859 2860 static int clk_flags_show(struct seq_file *s, void *data) 2861 { 2862 struct clk_core *core = s->private; 2863 unsigned long flags = core->flags; 2864 unsigned int i; 2865 2866 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { 2867 if (flags & clk_flags[i].flag) { 2868 seq_printf(s, "%s\n", clk_flags[i].name); 2869 flags &= ~clk_flags[i].flag; 2870 } 2871 } 2872 if (flags) { 2873 /* Unknown flags */ 2874 seq_printf(s, "0x%lx\n", flags); 2875 } 2876 2877 return 0; 2878 } 2879 DEFINE_SHOW_ATTRIBUTE(clk_flags); 2880 2881 static int possible_parents_show(struct seq_file *s, void *data) 2882 { 2883 struct clk_core *core = s->private; 2884 int i; 2885 2886 for (i = 0; i < core->num_parents - 1; i++) 2887 seq_printf(s, "%s ", core->parent_names[i]); 2888 2889 seq_printf(s, "%s\n", core->parent_names[i]); 2890 2891 return 0; 2892 } 2893 DEFINE_SHOW_ATTRIBUTE(possible_parents); 2894 2895 static int clk_duty_cycle_show(struct seq_file *s, void *data) 2896 { 2897 struct clk_core *core = s->private; 2898 struct clk_duty *duty = &core->duty; 2899 2900 seq_printf(s, "%u/%u\n", duty->num, duty->den); 2901 2902 return 0; 2903 } 2904 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); 2905 2906 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) 2907 { 2908 struct dentry *root; 2909 2910 if (!core || !pdentry) 2911 return; 2912 2913 root = debugfs_create_dir(core->name, pdentry); 2914 core->dentry = root; 2915 2916 debugfs_create_ulong("clk_rate", 0444, root, &core->rate); 2917 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); 2918 debugfs_create_u32("clk_phase", 0444, root, &core->phase); 2919 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); 2920 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); 2921 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); 2922 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); 2923 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); 2924 debugfs_create_file("clk_duty_cycle", 0444, root, core, 2925 &clk_duty_cycle_fops); 2926 2927 if (core->num_parents > 1) 2928 debugfs_create_file("clk_possible_parents", 0444, root, core, 2929 &possible_parents_fops); 2930 2931 if (core->ops->debug_init) 2932 core->ops->debug_init(core->hw, core->dentry); 2933 } 2934 2935 /** 2936 * clk_debug_register - add a clk node to the debugfs clk directory 2937 * @core: the clk being added to the debugfs clk directory 2938 * 2939 * Dynamically adds a clk to the debugfs clk directory if debugfs has been 2940 * initialized. Otherwise it bails out early since the debugfs clk directory 2941 * will be created lazily by clk_debug_init as part of a late_initcall. 2942 */ 2943 static void clk_debug_register(struct clk_core *core) 2944 { 2945 mutex_lock(&clk_debug_lock); 2946 hlist_add_head(&core->debug_node, &clk_debug_list); 2947 if (inited) 2948 clk_debug_create_one(core, rootdir); 2949 mutex_unlock(&clk_debug_lock); 2950 } 2951 2952 /** 2953 * clk_debug_unregister - remove a clk node from the debugfs clk directory 2954 * @core: the clk being removed from the debugfs clk directory 2955 * 2956 * Dynamically removes a clk and all its child nodes from the 2957 * debugfs clk directory if clk->dentry points to debugfs created by 2958 * clk_debug_register in __clk_core_init. 2959 */ 2960 static void clk_debug_unregister(struct clk_core *core) 2961 { 2962 mutex_lock(&clk_debug_lock); 2963 hlist_del_init(&core->debug_node); 2964 debugfs_remove_recursive(core->dentry); 2965 core->dentry = NULL; 2966 mutex_unlock(&clk_debug_lock); 2967 } 2968 2969 /** 2970 * clk_debug_init - lazily populate the debugfs clk directory 2971 * 2972 * clks are often initialized very early during boot before memory can be 2973 * dynamically allocated and well before debugfs is setup. This function 2974 * populates the debugfs clk directory once at boot-time when we know that 2975 * debugfs is setup. It should only be called once at boot-time, all other clks 2976 * added dynamically will be done so with clk_debug_register. 2977 */ 2978 static int __init clk_debug_init(void) 2979 { 2980 struct clk_core *core; 2981 2982 rootdir = debugfs_create_dir("clk", NULL); 2983 2984 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, 2985 &clk_summary_fops); 2986 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, 2987 &clk_dump_fops); 2988 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, 2989 &clk_summary_fops); 2990 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, 2991 &clk_dump_fops); 2992 2993 mutex_lock(&clk_debug_lock); 2994 hlist_for_each_entry(core, &clk_debug_list, debug_node) 2995 clk_debug_create_one(core, rootdir); 2996 2997 inited = 1; 2998 mutex_unlock(&clk_debug_lock); 2999 3000 return 0; 3001 } 3002 late_initcall(clk_debug_init); 3003 #else 3004 static inline void clk_debug_register(struct clk_core *core) { } 3005 static inline void clk_debug_reparent(struct clk_core *core, 3006 struct clk_core *new_parent) 3007 { 3008 } 3009 static inline void clk_debug_unregister(struct clk_core *core) 3010 { 3011 } 3012 #endif 3013 3014 /** 3015 * __clk_core_init - initialize the data structures in a struct clk_core 3016 * @core: clk_core being initialized 3017 * 3018 * Initializes the lists in struct clk_core, queries the hardware for the 3019 * parent and rate and sets them both. 3020 */ 3021 static int __clk_core_init(struct clk_core *core) 3022 { 3023 int i, ret; 3024 struct clk_core *orphan; 3025 struct hlist_node *tmp2; 3026 unsigned long rate; 3027 3028 if (!core) 3029 return -EINVAL; 3030 3031 clk_prepare_lock(); 3032 3033 ret = clk_pm_runtime_get(core); 3034 if (ret) 3035 goto unlock; 3036 3037 /* check to see if a clock with this name is already registered */ 3038 if (clk_core_lookup(core->name)) { 3039 pr_debug("%s: clk %s already initialized\n", 3040 __func__, core->name); 3041 ret = -EEXIST; 3042 goto out; 3043 } 3044 3045 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ 3046 if (core->ops->set_rate && 3047 !((core->ops->round_rate || core->ops->determine_rate) && 3048 core->ops->recalc_rate)) { 3049 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 3050 __func__, core->name); 3051 ret = -EINVAL; 3052 goto out; 3053 } 3054 3055 if (core->ops->set_parent && !core->ops->get_parent) { 3056 pr_err("%s: %s must implement .get_parent & .set_parent\n", 3057 __func__, core->name); 3058 ret = -EINVAL; 3059 goto out; 3060 } 3061 3062 if (core->num_parents > 1 && !core->ops->get_parent) { 3063 pr_err("%s: %s must implement .get_parent as it has multi parents\n", 3064 __func__, core->name); 3065 ret = -EINVAL; 3066 goto out; 3067 } 3068 3069 if (core->ops->set_rate_and_parent && 3070 !(core->ops->set_parent && core->ops->set_rate)) { 3071 pr_err("%s: %s must implement .set_parent & .set_rate\n", 3072 __func__, core->name); 3073 ret = -EINVAL; 3074 goto out; 3075 } 3076 3077 /* throw a WARN if any entries in parent_names are NULL */ 3078 for (i = 0; i < core->num_parents; i++) 3079 WARN(!core->parent_names[i], 3080 "%s: invalid NULL in %s's .parent_names\n", 3081 __func__, core->name); 3082 3083 core->parent = __clk_init_parent(core); 3084 3085 /* 3086 * Populate core->parent if parent has already been clk_core_init'd. If 3087 * parent has not yet been clk_core_init'd then place clk in the orphan 3088 * list. If clk doesn't have any parents then place it in the root 3089 * clk list. 3090 * 3091 * Every time a new clk is clk_init'd then we walk the list of orphan 3092 * clocks and re-parent any that are children of the clock currently 3093 * being clk_init'd. 3094 */ 3095 if (core->parent) { 3096 hlist_add_head(&core->child_node, 3097 &core->parent->children); 3098 core->orphan = core->parent->orphan; 3099 } else if (!core->num_parents) { 3100 hlist_add_head(&core->child_node, &clk_root_list); 3101 core->orphan = false; 3102 } else { 3103 hlist_add_head(&core->child_node, &clk_orphan_list); 3104 core->orphan = true; 3105 } 3106 3107 /* 3108 * optional platform-specific magic 3109 * 3110 * The .init callback is not used by any of the basic clock types, but 3111 * exists for weird hardware that must perform initialization magic. 3112 * Please consider other ways of solving initialization problems before 3113 * using this callback, as its use is discouraged. 3114 */ 3115 if (core->ops->init) 3116 core->ops->init(core->hw); 3117 3118 /* 3119 * Set clk's accuracy. The preferred method is to use 3120 * .recalc_accuracy. For simple clocks and lazy developers the default 3121 * fallback is to use the parent's accuracy. If a clock doesn't have a 3122 * parent (or is orphaned) then accuracy is set to zero (perfect 3123 * clock). 3124 */ 3125 if (core->ops->recalc_accuracy) 3126 core->accuracy = core->ops->recalc_accuracy(core->hw, 3127 __clk_get_accuracy(core->parent)); 3128 else if (core->parent) 3129 core->accuracy = core->parent->accuracy; 3130 else 3131 core->accuracy = 0; 3132 3133 /* 3134 * Set clk's phase. 3135 * Since a phase is by definition relative to its parent, just 3136 * query the current clock phase, or just assume it's in phase. 3137 */ 3138 if (core->ops->get_phase) 3139 core->phase = core->ops->get_phase(core->hw); 3140 else 3141 core->phase = 0; 3142 3143 /* 3144 * Set clk's duty cycle. 3145 */ 3146 clk_core_update_duty_cycle_nolock(core); 3147 3148 /* 3149 * Set clk's rate. The preferred method is to use .recalc_rate. For 3150 * simple clocks and lazy developers the default fallback is to use the 3151 * parent's rate. If a clock doesn't have a parent (or is orphaned) 3152 * then rate is set to zero. 3153 */ 3154 if (core->ops->recalc_rate) 3155 rate = core->ops->recalc_rate(core->hw, 3156 clk_core_get_rate_nolock(core->parent)); 3157 else if (core->parent) 3158 rate = core->parent->rate; 3159 else 3160 rate = 0; 3161 core->rate = core->req_rate = rate; 3162 3163 /* 3164 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks 3165 * don't get accidentally disabled when walking the orphan tree and 3166 * reparenting clocks 3167 */ 3168 if (core->flags & CLK_IS_CRITICAL) { 3169 unsigned long flags; 3170 3171 clk_core_prepare(core); 3172 3173 flags = clk_enable_lock(); 3174 clk_core_enable(core); 3175 clk_enable_unlock(flags); 3176 } 3177 3178 /* 3179 * walk the list of orphan clocks and reparent any that newly finds a 3180 * parent. 3181 */ 3182 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 3183 struct clk_core *parent = __clk_init_parent(orphan); 3184 3185 /* 3186 * We need to use __clk_set_parent_before() and _after() to 3187 * to properly migrate any prepare/enable count of the orphan 3188 * clock. This is important for CLK_IS_CRITICAL clocks, which 3189 * are enabled during init but might not have a parent yet. 3190 */ 3191 if (parent) { 3192 /* update the clk tree topology */ 3193 __clk_set_parent_before(orphan, parent); 3194 __clk_set_parent_after(orphan, parent, NULL); 3195 __clk_recalc_accuracies(orphan); 3196 __clk_recalc_rates(orphan, 0); 3197 } 3198 } 3199 3200 kref_init(&core->ref); 3201 out: 3202 clk_pm_runtime_put(core); 3203 unlock: 3204 clk_prepare_unlock(); 3205 3206 if (!ret) 3207 clk_debug_register(core); 3208 3209 return ret; 3210 } 3211 3212 struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, 3213 const char *con_id) 3214 { 3215 struct clk *clk; 3216 3217 /* This is to allow this function to be chained to others */ 3218 if (IS_ERR_OR_NULL(hw)) 3219 return ERR_CAST(hw); 3220 3221 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 3222 if (!clk) 3223 return ERR_PTR(-ENOMEM); 3224 3225 clk->core = hw->core; 3226 clk->dev_id = dev_id; 3227 clk->con_id = kstrdup_const(con_id, GFP_KERNEL); 3228 clk->max_rate = ULONG_MAX; 3229 3230 clk_prepare_lock(); 3231 hlist_add_head(&clk->clks_node, &hw->core->clks); 3232 clk_prepare_unlock(); 3233 3234 return clk; 3235 } 3236 3237 /* keep in sync with __clk_put */ 3238 void __clk_free_clk(struct clk *clk) 3239 { 3240 clk_prepare_lock(); 3241 hlist_del(&clk->clks_node); 3242 clk_prepare_unlock(); 3243 3244 kfree_const(clk->con_id); 3245 kfree(clk); 3246 } 3247 3248 /** 3249 * clk_register - allocate a new clock, register it and return an opaque cookie 3250 * @dev: device that is registering this clock 3251 * @hw: link to hardware-specific clock data 3252 * 3253 * clk_register is the primary interface for populating the clock tree with new 3254 * clock nodes. It returns a pointer to the newly allocated struct clk which 3255 * cannot be dereferenced by driver code but may be used in conjunction with the 3256 * rest of the clock API. In the event of an error clk_register will return an 3257 * error code; drivers must test for an error code after calling clk_register. 3258 */ 3259 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 3260 { 3261 int i, ret; 3262 struct clk_core *core; 3263 3264 core = kzalloc(sizeof(*core), GFP_KERNEL); 3265 if (!core) { 3266 ret = -ENOMEM; 3267 goto fail_out; 3268 } 3269 3270 core->name = kstrdup_const(hw->init->name, GFP_KERNEL); 3271 if (!core->name) { 3272 ret = -ENOMEM; 3273 goto fail_name; 3274 } 3275 3276 if (WARN_ON(!hw->init->ops)) { 3277 ret = -EINVAL; 3278 goto fail_ops; 3279 } 3280 core->ops = hw->init->ops; 3281 3282 if (dev && pm_runtime_enabled(dev)) 3283 core->dev = dev; 3284 if (dev && dev->driver) 3285 core->owner = dev->driver->owner; 3286 core->hw = hw; 3287 core->flags = hw->init->flags; 3288 core->num_parents = hw->init->num_parents; 3289 core->min_rate = 0; 3290 core->max_rate = ULONG_MAX; 3291 hw->core = core; 3292 3293 /* allocate local copy in case parent_names is __initdata */ 3294 core->parent_names = kcalloc(core->num_parents, sizeof(char *), 3295 GFP_KERNEL); 3296 3297 if (!core->parent_names) { 3298 ret = -ENOMEM; 3299 goto fail_parent_names; 3300 } 3301 3302 3303 /* copy each string name in case parent_names is __initdata */ 3304 for (i = 0; i < core->num_parents; i++) { 3305 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i], 3306 GFP_KERNEL); 3307 if (!core->parent_names[i]) { 3308 ret = -ENOMEM; 3309 goto fail_parent_names_copy; 3310 } 3311 } 3312 3313 /* avoid unnecessary string look-ups of clk_core's possible parents. */ 3314 core->parents = kcalloc(core->num_parents, sizeof(*core->parents), 3315 GFP_KERNEL); 3316 if (!core->parents) { 3317 ret = -ENOMEM; 3318 goto fail_parents; 3319 }; 3320 3321 INIT_HLIST_HEAD(&core->clks); 3322 3323 hw->clk = __clk_create_clk(hw, NULL, NULL); 3324 if (IS_ERR(hw->clk)) { 3325 ret = PTR_ERR(hw->clk); 3326 goto fail_parents; 3327 } 3328 3329 ret = __clk_core_init(core); 3330 if (!ret) 3331 return hw->clk; 3332 3333 __clk_free_clk(hw->clk); 3334 hw->clk = NULL; 3335 3336 fail_parents: 3337 kfree(core->parents); 3338 fail_parent_names_copy: 3339 while (--i >= 0) 3340 kfree_const(core->parent_names[i]); 3341 kfree(core->parent_names); 3342 fail_parent_names: 3343 fail_ops: 3344 kfree_const(core->name); 3345 fail_name: 3346 kfree(core); 3347 fail_out: 3348 return ERR_PTR(ret); 3349 } 3350 EXPORT_SYMBOL_GPL(clk_register); 3351 3352 /** 3353 * clk_hw_register - register a clk_hw and return an error code 3354 * @dev: device that is registering this clock 3355 * @hw: link to hardware-specific clock data 3356 * 3357 * clk_hw_register is the primary interface for populating the clock tree with 3358 * new clock nodes. It returns an integer equal to zero indicating success or 3359 * less than zero indicating failure. Drivers must test for an error code after 3360 * calling clk_hw_register(). 3361 */ 3362 int clk_hw_register(struct device *dev, struct clk_hw *hw) 3363 { 3364 return PTR_ERR_OR_ZERO(clk_register(dev, hw)); 3365 } 3366 EXPORT_SYMBOL_GPL(clk_hw_register); 3367 3368 /* Free memory allocated for a clock. */ 3369 static void __clk_release(struct kref *ref) 3370 { 3371 struct clk_core *core = container_of(ref, struct clk_core, ref); 3372 int i = core->num_parents; 3373 3374 lockdep_assert_held(&prepare_lock); 3375 3376 kfree(core->parents); 3377 while (--i >= 0) 3378 kfree_const(core->parent_names[i]); 3379 3380 kfree(core->parent_names); 3381 kfree_const(core->name); 3382 kfree(core); 3383 } 3384 3385 /* 3386 * Empty clk_ops for unregistered clocks. These are used temporarily 3387 * after clk_unregister() was called on a clock and until last clock 3388 * consumer calls clk_put() and the struct clk object is freed. 3389 */ 3390 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 3391 { 3392 return -ENXIO; 3393 } 3394 3395 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 3396 { 3397 WARN_ON_ONCE(1); 3398 } 3399 3400 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 3401 unsigned long parent_rate) 3402 { 3403 return -ENXIO; 3404 } 3405 3406 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 3407 { 3408 return -ENXIO; 3409 } 3410 3411 static const struct clk_ops clk_nodrv_ops = { 3412 .enable = clk_nodrv_prepare_enable, 3413 .disable = clk_nodrv_disable_unprepare, 3414 .prepare = clk_nodrv_prepare_enable, 3415 .unprepare = clk_nodrv_disable_unprepare, 3416 .set_rate = clk_nodrv_set_rate, 3417 .set_parent = clk_nodrv_set_parent, 3418 }; 3419 3420 /** 3421 * clk_unregister - unregister a currently registered clock 3422 * @clk: clock to unregister 3423 */ 3424 void clk_unregister(struct clk *clk) 3425 { 3426 unsigned long flags; 3427 3428 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 3429 return; 3430 3431 clk_debug_unregister(clk->core); 3432 3433 clk_prepare_lock(); 3434 3435 if (clk->core->ops == &clk_nodrv_ops) { 3436 pr_err("%s: unregistered clock: %s\n", __func__, 3437 clk->core->name); 3438 goto unlock; 3439 } 3440 /* 3441 * Assign empty clock ops for consumers that might still hold 3442 * a reference to this clock. 3443 */ 3444 flags = clk_enable_lock(); 3445 clk->core->ops = &clk_nodrv_ops; 3446 clk_enable_unlock(flags); 3447 3448 if (!hlist_empty(&clk->core->children)) { 3449 struct clk_core *child; 3450 struct hlist_node *t; 3451 3452 /* Reparent all children to the orphan list. */ 3453 hlist_for_each_entry_safe(child, t, &clk->core->children, 3454 child_node) 3455 clk_core_set_parent_nolock(child, NULL); 3456 } 3457 3458 hlist_del_init(&clk->core->child_node); 3459 3460 if (clk->core->prepare_count) 3461 pr_warn("%s: unregistering prepared clock: %s\n", 3462 __func__, clk->core->name); 3463 3464 if (clk->core->protect_count) 3465 pr_warn("%s: unregistering protected clock: %s\n", 3466 __func__, clk->core->name); 3467 3468 kref_put(&clk->core->ref, __clk_release); 3469 unlock: 3470 clk_prepare_unlock(); 3471 } 3472 EXPORT_SYMBOL_GPL(clk_unregister); 3473 3474 /** 3475 * clk_hw_unregister - unregister a currently registered clk_hw 3476 * @hw: hardware-specific clock data to unregister 3477 */ 3478 void clk_hw_unregister(struct clk_hw *hw) 3479 { 3480 clk_unregister(hw->clk); 3481 } 3482 EXPORT_SYMBOL_GPL(clk_hw_unregister); 3483 3484 static void devm_clk_release(struct device *dev, void *res) 3485 { 3486 clk_unregister(*(struct clk **)res); 3487 } 3488 3489 static void devm_clk_hw_release(struct device *dev, void *res) 3490 { 3491 clk_hw_unregister(*(struct clk_hw **)res); 3492 } 3493 3494 /** 3495 * devm_clk_register - resource managed clk_register() 3496 * @dev: device that is registering this clock 3497 * @hw: link to hardware-specific clock data 3498 * 3499 * Managed clk_register(). Clocks returned from this function are 3500 * automatically clk_unregister()ed on driver detach. See clk_register() for 3501 * more information. 3502 */ 3503 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 3504 { 3505 struct clk *clk; 3506 struct clk **clkp; 3507 3508 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 3509 if (!clkp) 3510 return ERR_PTR(-ENOMEM); 3511 3512 clk = clk_register(dev, hw); 3513 if (!IS_ERR(clk)) { 3514 *clkp = clk; 3515 devres_add(dev, clkp); 3516 } else { 3517 devres_free(clkp); 3518 } 3519 3520 return clk; 3521 } 3522 EXPORT_SYMBOL_GPL(devm_clk_register); 3523 3524 /** 3525 * devm_clk_hw_register - resource managed clk_hw_register() 3526 * @dev: device that is registering this clock 3527 * @hw: link to hardware-specific clock data 3528 * 3529 * Managed clk_hw_register(). Clocks registered by this function are 3530 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() 3531 * for more information. 3532 */ 3533 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) 3534 { 3535 struct clk_hw **hwp; 3536 int ret; 3537 3538 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL); 3539 if (!hwp) 3540 return -ENOMEM; 3541 3542 ret = clk_hw_register(dev, hw); 3543 if (!ret) { 3544 *hwp = hw; 3545 devres_add(dev, hwp); 3546 } else { 3547 devres_free(hwp); 3548 } 3549 3550 return ret; 3551 } 3552 EXPORT_SYMBOL_GPL(devm_clk_hw_register); 3553 3554 static int devm_clk_match(struct device *dev, void *res, void *data) 3555 { 3556 struct clk *c = res; 3557 if (WARN_ON(!c)) 3558 return 0; 3559 return c == data; 3560 } 3561 3562 static int devm_clk_hw_match(struct device *dev, void *res, void *data) 3563 { 3564 struct clk_hw *hw = res; 3565 3566 if (WARN_ON(!hw)) 3567 return 0; 3568 return hw == data; 3569 } 3570 3571 /** 3572 * devm_clk_unregister - resource managed clk_unregister() 3573 * @clk: clock to unregister 3574 * 3575 * Deallocate a clock allocated with devm_clk_register(). Normally 3576 * this function will not need to be called and the resource management 3577 * code will ensure that the resource is freed. 3578 */ 3579 void devm_clk_unregister(struct device *dev, struct clk *clk) 3580 { 3581 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); 3582 } 3583 EXPORT_SYMBOL_GPL(devm_clk_unregister); 3584 3585 /** 3586 * devm_clk_hw_unregister - resource managed clk_hw_unregister() 3587 * @dev: device that is unregistering the hardware-specific clock data 3588 * @hw: link to hardware-specific clock data 3589 * 3590 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally 3591 * this function will not need to be called and the resource management 3592 * code will ensure that the resource is freed. 3593 */ 3594 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw) 3595 { 3596 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match, 3597 hw)); 3598 } 3599 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister); 3600 3601 /* 3602 * clkdev helpers 3603 */ 3604 int __clk_get(struct clk *clk) 3605 { 3606 struct clk_core *core = !clk ? NULL : clk->core; 3607 3608 if (core) { 3609 if (!try_module_get(core->owner)) 3610 return 0; 3611 3612 kref_get(&core->ref); 3613 } 3614 return 1; 3615 } 3616 3617 /* keep in sync with __clk_free_clk */ 3618 void __clk_put(struct clk *clk) 3619 { 3620 struct module *owner; 3621 3622 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 3623 return; 3624 3625 clk_prepare_lock(); 3626 3627 /* 3628 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a 3629 * given user should be balanced with calls to clk_rate_exclusive_put() 3630 * and by that same consumer 3631 */ 3632 if (WARN_ON(clk->exclusive_count)) { 3633 /* We voiced our concern, let's sanitize the situation */ 3634 clk->core->protect_count -= (clk->exclusive_count - 1); 3635 clk_core_rate_unprotect(clk->core); 3636 clk->exclusive_count = 0; 3637 } 3638 3639 hlist_del(&clk->clks_node); 3640 if (clk->min_rate > clk->core->req_rate || 3641 clk->max_rate < clk->core->req_rate) 3642 clk_core_set_rate_nolock(clk->core, clk->core->req_rate); 3643 3644 owner = clk->core->owner; 3645 kref_put(&clk->core->ref, __clk_release); 3646 3647 clk_prepare_unlock(); 3648 3649 module_put(owner); 3650 3651 kfree_const(clk->con_id); 3652 kfree(clk); 3653 } 3654 3655 /*** clk rate change notifiers ***/ 3656 3657 /** 3658 * clk_notifier_register - add a clk rate change notifier 3659 * @clk: struct clk * to watch 3660 * @nb: struct notifier_block * with callback info 3661 * 3662 * Request notification when clk's rate changes. This uses an SRCU 3663 * notifier because we want it to block and notifier unregistrations are 3664 * uncommon. The callbacks associated with the notifier must not 3665 * re-enter into the clk framework by calling any top-level clk APIs; 3666 * this will cause a nested prepare_lock mutex. 3667 * 3668 * In all notification cases (pre, post and abort rate change) the original 3669 * clock rate is passed to the callback via struct clk_notifier_data.old_rate 3670 * and the new frequency is passed via struct clk_notifier_data.new_rate. 3671 * 3672 * clk_notifier_register() must be called from non-atomic context. 3673 * Returns -EINVAL if called with null arguments, -ENOMEM upon 3674 * allocation failure; otherwise, passes along the return value of 3675 * srcu_notifier_chain_register(). 3676 */ 3677 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 3678 { 3679 struct clk_notifier *cn; 3680 int ret = -ENOMEM; 3681 3682 if (!clk || !nb) 3683 return -EINVAL; 3684 3685 clk_prepare_lock(); 3686 3687 /* search the list of notifiers for this clk */ 3688 list_for_each_entry(cn, &clk_notifier_list, node) 3689 if (cn->clk == clk) 3690 break; 3691 3692 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 3693 if (cn->clk != clk) { 3694 cn = kzalloc(sizeof(*cn), GFP_KERNEL); 3695 if (!cn) 3696 goto out; 3697 3698 cn->clk = clk; 3699 srcu_init_notifier_head(&cn->notifier_head); 3700 3701 list_add(&cn->node, &clk_notifier_list); 3702 } 3703 3704 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 3705 3706 clk->core->notifier_count++; 3707 3708 out: 3709 clk_prepare_unlock(); 3710 3711 return ret; 3712 } 3713 EXPORT_SYMBOL_GPL(clk_notifier_register); 3714 3715 /** 3716 * clk_notifier_unregister - remove a clk rate change notifier 3717 * @clk: struct clk * 3718 * @nb: struct notifier_block * with callback info 3719 * 3720 * Request no further notification for changes to 'clk' and frees memory 3721 * allocated in clk_notifier_register. 3722 * 3723 * Returns -EINVAL if called with null arguments; otherwise, passes 3724 * along the return value of srcu_notifier_chain_unregister(). 3725 */ 3726 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 3727 { 3728 struct clk_notifier *cn = NULL; 3729 int ret = -EINVAL; 3730 3731 if (!clk || !nb) 3732 return -EINVAL; 3733 3734 clk_prepare_lock(); 3735 3736 list_for_each_entry(cn, &clk_notifier_list, node) 3737 if (cn->clk == clk) 3738 break; 3739 3740 if (cn->clk == clk) { 3741 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 3742 3743 clk->core->notifier_count--; 3744 3745 /* XXX the notifier code should handle this better */ 3746 if (!cn->notifier_head.head) { 3747 srcu_cleanup_notifier_head(&cn->notifier_head); 3748 list_del(&cn->node); 3749 kfree(cn); 3750 } 3751 3752 } else { 3753 ret = -ENOENT; 3754 } 3755 3756 clk_prepare_unlock(); 3757 3758 return ret; 3759 } 3760 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 3761 3762 #ifdef CONFIG_OF 3763 /** 3764 * struct of_clk_provider - Clock provider registration structure 3765 * @link: Entry in global list of clock providers 3766 * @node: Pointer to device tree node of clock provider 3767 * @get: Get clock callback. Returns NULL or a struct clk for the 3768 * given clock specifier 3769 * @data: context pointer to be passed into @get callback 3770 */ 3771 struct of_clk_provider { 3772 struct list_head link; 3773 3774 struct device_node *node; 3775 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 3776 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); 3777 void *data; 3778 }; 3779 3780 static const struct of_device_id __clk_of_table_sentinel 3781 __used __section(__clk_of_table_end); 3782 3783 static LIST_HEAD(of_clk_providers); 3784 static DEFINE_MUTEX(of_clk_mutex); 3785 3786 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 3787 void *data) 3788 { 3789 return data; 3790 } 3791 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 3792 3793 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) 3794 { 3795 return data; 3796 } 3797 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); 3798 3799 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 3800 { 3801 struct clk_onecell_data *clk_data = data; 3802 unsigned int idx = clkspec->args[0]; 3803 3804 if (idx >= clk_data->clk_num) { 3805 pr_err("%s: invalid clock index %u\n", __func__, idx); 3806 return ERR_PTR(-EINVAL); 3807 } 3808 3809 return clk_data->clks[idx]; 3810 } 3811 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 3812 3813 struct clk_hw * 3814 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) 3815 { 3816 struct clk_hw_onecell_data *hw_data = data; 3817 unsigned int idx = clkspec->args[0]; 3818 3819 if (idx >= hw_data->num) { 3820 pr_err("%s: invalid index %u\n", __func__, idx); 3821 return ERR_PTR(-EINVAL); 3822 } 3823 3824 return hw_data->hws[idx]; 3825 } 3826 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); 3827 3828 /** 3829 * of_clk_add_provider() - Register a clock provider for a node 3830 * @np: Device node pointer associated with clock provider 3831 * @clk_src_get: callback for decoding clock 3832 * @data: context pointer for @clk_src_get callback. 3833 */ 3834 int of_clk_add_provider(struct device_node *np, 3835 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 3836 void *data), 3837 void *data) 3838 { 3839 struct of_clk_provider *cp; 3840 int ret; 3841 3842 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 3843 if (!cp) 3844 return -ENOMEM; 3845 3846 cp->node = of_node_get(np); 3847 cp->data = data; 3848 cp->get = clk_src_get; 3849 3850 mutex_lock(&of_clk_mutex); 3851 list_add(&cp->link, &of_clk_providers); 3852 mutex_unlock(&of_clk_mutex); 3853 pr_debug("Added clock from %pOF\n", np); 3854 3855 ret = of_clk_set_defaults(np, true); 3856 if (ret < 0) 3857 of_clk_del_provider(np); 3858 3859 return ret; 3860 } 3861 EXPORT_SYMBOL_GPL(of_clk_add_provider); 3862 3863 /** 3864 * of_clk_add_hw_provider() - Register a clock provider for a node 3865 * @np: Device node pointer associated with clock provider 3866 * @get: callback for decoding clk_hw 3867 * @data: context pointer for @get callback. 3868 */ 3869 int of_clk_add_hw_provider(struct device_node *np, 3870 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 3871 void *data), 3872 void *data) 3873 { 3874 struct of_clk_provider *cp; 3875 int ret; 3876 3877 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 3878 if (!cp) 3879 return -ENOMEM; 3880 3881 cp->node = of_node_get(np); 3882 cp->data = data; 3883 cp->get_hw = get; 3884 3885 mutex_lock(&of_clk_mutex); 3886 list_add(&cp->link, &of_clk_providers); 3887 mutex_unlock(&of_clk_mutex); 3888 pr_debug("Added clk_hw provider from %pOF\n", np); 3889 3890 ret = of_clk_set_defaults(np, true); 3891 if (ret < 0) 3892 of_clk_del_provider(np); 3893 3894 return ret; 3895 } 3896 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); 3897 3898 static void devm_of_clk_release_provider(struct device *dev, void *res) 3899 { 3900 of_clk_del_provider(*(struct device_node **)res); 3901 } 3902 3903 /* 3904 * We allow a child device to use its parent device as the clock provider node 3905 * for cases like MFD sub-devices where the child device driver wants to use 3906 * devm_*() APIs but not list the device in DT as a sub-node. 3907 */ 3908 static struct device_node *get_clk_provider_node(struct device *dev) 3909 { 3910 struct device_node *np, *parent_np; 3911 3912 np = dev->of_node; 3913 parent_np = dev->parent ? dev->parent->of_node : NULL; 3914 3915 if (!of_find_property(np, "#clock-cells", NULL)) 3916 if (of_find_property(parent_np, "#clock-cells", NULL)) 3917 np = parent_np; 3918 3919 return np; 3920 } 3921 3922 /** 3923 * devm_of_clk_add_hw_provider() - Managed clk provider node registration 3924 * @dev: Device acting as the clock provider (used for DT node and lifetime) 3925 * @get: callback for decoding clk_hw 3926 * @data: context pointer for @get callback 3927 * 3928 * Registers clock provider for given device's node. If the device has no DT 3929 * node or if the device node lacks of clock provider information (#clock-cells) 3930 * then the parent device's node is scanned for this information. If parent node 3931 * has the #clock-cells then it is used in registration. Provider is 3932 * automatically released at device exit. 3933 * 3934 * Return: 0 on success or an errno on failure. 3935 */ 3936 int devm_of_clk_add_hw_provider(struct device *dev, 3937 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 3938 void *data), 3939 void *data) 3940 { 3941 struct device_node **ptr, *np; 3942 int ret; 3943 3944 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), 3945 GFP_KERNEL); 3946 if (!ptr) 3947 return -ENOMEM; 3948 3949 np = get_clk_provider_node(dev); 3950 ret = of_clk_add_hw_provider(np, get, data); 3951 if (!ret) { 3952 *ptr = np; 3953 devres_add(dev, ptr); 3954 } else { 3955 devres_free(ptr); 3956 } 3957 3958 return ret; 3959 } 3960 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); 3961 3962 /** 3963 * of_clk_del_provider() - Remove a previously registered clock provider 3964 * @np: Device node pointer associated with clock provider 3965 */ 3966 void of_clk_del_provider(struct device_node *np) 3967 { 3968 struct of_clk_provider *cp; 3969 3970 mutex_lock(&of_clk_mutex); 3971 list_for_each_entry(cp, &of_clk_providers, link) { 3972 if (cp->node == np) { 3973 list_del(&cp->link); 3974 of_node_put(cp->node); 3975 kfree(cp); 3976 break; 3977 } 3978 } 3979 mutex_unlock(&of_clk_mutex); 3980 } 3981 EXPORT_SYMBOL_GPL(of_clk_del_provider); 3982 3983 static int devm_clk_provider_match(struct device *dev, void *res, void *data) 3984 { 3985 struct device_node **np = res; 3986 3987 if (WARN_ON(!np || !*np)) 3988 return 0; 3989 3990 return *np == data; 3991 } 3992 3993 /** 3994 * devm_of_clk_del_provider() - Remove clock provider registered using devm 3995 * @dev: Device to whose lifetime the clock provider was bound 3996 */ 3997 void devm_of_clk_del_provider(struct device *dev) 3998 { 3999 int ret; 4000 struct device_node *np = get_clk_provider_node(dev); 4001 4002 ret = devres_release(dev, devm_of_clk_release_provider, 4003 devm_clk_provider_match, np); 4004 4005 WARN_ON(ret); 4006 } 4007 EXPORT_SYMBOL(devm_of_clk_del_provider); 4008 4009 static struct clk_hw * 4010 __of_clk_get_hw_from_provider(struct of_clk_provider *provider, 4011 struct of_phandle_args *clkspec) 4012 { 4013 struct clk *clk; 4014 4015 if (provider->get_hw) 4016 return provider->get_hw(clkspec, provider->data); 4017 4018 clk = provider->get(clkspec, provider->data); 4019 if (IS_ERR(clk)) 4020 return ERR_CAST(clk); 4021 return __clk_get_hw(clk); 4022 } 4023 4024 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, 4025 const char *dev_id, const char *con_id) 4026 { 4027 struct of_clk_provider *provider; 4028 struct clk *clk = ERR_PTR(-EPROBE_DEFER); 4029 struct clk_hw *hw; 4030 4031 if (!clkspec) 4032 return ERR_PTR(-EINVAL); 4033 4034 /* Check if we have such a provider in our array */ 4035 mutex_lock(&of_clk_mutex); 4036 list_for_each_entry(provider, &of_clk_providers, link) { 4037 if (provider->node == clkspec->np) { 4038 hw = __of_clk_get_hw_from_provider(provider, clkspec); 4039 clk = __clk_create_clk(hw, dev_id, con_id); 4040 } 4041 4042 if (!IS_ERR(clk)) { 4043 if (!__clk_get(clk)) { 4044 __clk_free_clk(clk); 4045 clk = ERR_PTR(-ENOENT); 4046 } 4047 4048 break; 4049 } 4050 } 4051 mutex_unlock(&of_clk_mutex); 4052 4053 return clk; 4054 } 4055 4056 /** 4057 * of_clk_get_from_provider() - Lookup a clock from a clock provider 4058 * @clkspec: pointer to a clock specifier data structure 4059 * 4060 * This function looks up a struct clk from the registered list of clock 4061 * providers, an input is a clock specifier data structure as returned 4062 * from the of_parse_phandle_with_args() function call. 4063 */ 4064 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 4065 { 4066 return __of_clk_get_from_provider(clkspec, NULL, __func__); 4067 } 4068 EXPORT_SYMBOL_GPL(of_clk_get_from_provider); 4069 4070 /** 4071 * of_clk_get_parent_count() - Count the number of clocks a device node has 4072 * @np: device node to count 4073 * 4074 * Returns: The number of clocks that are possible parents of this node 4075 */ 4076 unsigned int of_clk_get_parent_count(struct device_node *np) 4077 { 4078 int count; 4079 4080 count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); 4081 if (count < 0) 4082 return 0; 4083 4084 return count; 4085 } 4086 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 4087 4088 const char *of_clk_get_parent_name(struct device_node *np, int index) 4089 { 4090 struct of_phandle_args clkspec; 4091 struct property *prop; 4092 const char *clk_name; 4093 const __be32 *vp; 4094 u32 pv; 4095 int rc; 4096 int count; 4097 struct clk *clk; 4098 4099 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 4100 &clkspec); 4101 if (rc) 4102 return NULL; 4103 4104 index = clkspec.args_count ? clkspec.args[0] : 0; 4105 count = 0; 4106 4107 /* if there is an indices property, use it to transfer the index 4108 * specified into an array offset for the clock-output-names property. 4109 */ 4110 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 4111 if (index == pv) { 4112 index = count; 4113 break; 4114 } 4115 count++; 4116 } 4117 /* We went off the end of 'clock-indices' without finding it */ 4118 if (prop && !vp) 4119 return NULL; 4120 4121 if (of_property_read_string_index(clkspec.np, "clock-output-names", 4122 index, 4123 &clk_name) < 0) { 4124 /* 4125 * Best effort to get the name if the clock has been 4126 * registered with the framework. If the clock isn't 4127 * registered, we return the node name as the name of 4128 * the clock as long as #clock-cells = 0. 4129 */ 4130 clk = of_clk_get_from_provider(&clkspec); 4131 if (IS_ERR(clk)) { 4132 if (clkspec.args_count == 0) 4133 clk_name = clkspec.np->name; 4134 else 4135 clk_name = NULL; 4136 } else { 4137 clk_name = __clk_get_name(clk); 4138 clk_put(clk); 4139 } 4140 } 4141 4142 4143 of_node_put(clkspec.np); 4144 return clk_name; 4145 } 4146 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 4147 4148 /** 4149 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return 4150 * number of parents 4151 * @np: Device node pointer associated with clock provider 4152 * @parents: pointer to char array that hold the parents' names 4153 * @size: size of the @parents array 4154 * 4155 * Return: number of parents for the clock node. 4156 */ 4157 int of_clk_parent_fill(struct device_node *np, const char **parents, 4158 unsigned int size) 4159 { 4160 unsigned int i = 0; 4161 4162 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) 4163 i++; 4164 4165 return i; 4166 } 4167 EXPORT_SYMBOL_GPL(of_clk_parent_fill); 4168 4169 struct clock_provider { 4170 void (*clk_init_cb)(struct device_node *); 4171 struct device_node *np; 4172 struct list_head node; 4173 }; 4174 4175 /* 4176 * This function looks for a parent clock. If there is one, then it 4177 * checks that the provider for this parent clock was initialized, in 4178 * this case the parent clock will be ready. 4179 */ 4180 static int parent_ready(struct device_node *np) 4181 { 4182 int i = 0; 4183 4184 while (true) { 4185 struct clk *clk = of_clk_get(np, i); 4186 4187 /* this parent is ready we can check the next one */ 4188 if (!IS_ERR(clk)) { 4189 clk_put(clk); 4190 i++; 4191 continue; 4192 } 4193 4194 /* at least one parent is not ready, we exit now */ 4195 if (PTR_ERR(clk) == -EPROBE_DEFER) 4196 return 0; 4197 4198 /* 4199 * Here we make assumption that the device tree is 4200 * written correctly. So an error means that there is 4201 * no more parent. As we didn't exit yet, then the 4202 * previous parent are ready. If there is no clock 4203 * parent, no need to wait for them, then we can 4204 * consider their absence as being ready 4205 */ 4206 return 1; 4207 } 4208 } 4209 4210 /** 4211 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree 4212 * @np: Device node pointer associated with clock provider 4213 * @index: clock index 4214 * @flags: pointer to top-level framework flags 4215 * 4216 * Detects if the clock-critical property exists and, if so, sets the 4217 * corresponding CLK_IS_CRITICAL flag. 4218 * 4219 * Do not use this function. It exists only for legacy Device Tree 4220 * bindings, such as the one-clock-per-node style that are outdated. 4221 * Those bindings typically put all clock data into .dts and the Linux 4222 * driver has no clock data, thus making it impossible to set this flag 4223 * correctly from the driver. Only those drivers may call 4224 * of_clk_detect_critical from their setup functions. 4225 * 4226 * Return: error code or zero on success 4227 */ 4228 int of_clk_detect_critical(struct device_node *np, 4229 int index, unsigned long *flags) 4230 { 4231 struct property *prop; 4232 const __be32 *cur; 4233 uint32_t idx; 4234 4235 if (!np || !flags) 4236 return -EINVAL; 4237 4238 of_property_for_each_u32(np, "clock-critical", prop, cur, idx) 4239 if (index == idx) 4240 *flags |= CLK_IS_CRITICAL; 4241 4242 return 0; 4243 } 4244 4245 /** 4246 * of_clk_init() - Scan and init clock providers from the DT 4247 * @matches: array of compatible values and init functions for providers. 4248 * 4249 * This function scans the device tree for matching clock providers 4250 * and calls their initialization functions. It also does it by trying 4251 * to follow the dependencies. 4252 */ 4253 void __init of_clk_init(const struct of_device_id *matches) 4254 { 4255 const struct of_device_id *match; 4256 struct device_node *np; 4257 struct clock_provider *clk_provider, *next; 4258 bool is_init_done; 4259 bool force = false; 4260 LIST_HEAD(clk_provider_list); 4261 4262 if (!matches) 4263 matches = &__clk_of_table; 4264 4265 /* First prepare the list of the clocks providers */ 4266 for_each_matching_node_and_match(np, matches, &match) { 4267 struct clock_provider *parent; 4268 4269 if (!of_device_is_available(np)) 4270 continue; 4271 4272 parent = kzalloc(sizeof(*parent), GFP_KERNEL); 4273 if (!parent) { 4274 list_for_each_entry_safe(clk_provider, next, 4275 &clk_provider_list, node) { 4276 list_del(&clk_provider->node); 4277 of_node_put(clk_provider->np); 4278 kfree(clk_provider); 4279 } 4280 of_node_put(np); 4281 return; 4282 } 4283 4284 parent->clk_init_cb = match->data; 4285 parent->np = of_node_get(np); 4286 list_add_tail(&parent->node, &clk_provider_list); 4287 } 4288 4289 while (!list_empty(&clk_provider_list)) { 4290 is_init_done = false; 4291 list_for_each_entry_safe(clk_provider, next, 4292 &clk_provider_list, node) { 4293 if (force || parent_ready(clk_provider->np)) { 4294 4295 /* Don't populate platform devices */ 4296 of_node_set_flag(clk_provider->np, 4297 OF_POPULATED); 4298 4299 clk_provider->clk_init_cb(clk_provider->np); 4300 of_clk_set_defaults(clk_provider->np, true); 4301 4302 list_del(&clk_provider->node); 4303 of_node_put(clk_provider->np); 4304 kfree(clk_provider); 4305 is_init_done = true; 4306 } 4307 } 4308 4309 /* 4310 * We didn't manage to initialize any of the 4311 * remaining providers during the last loop, so now we 4312 * initialize all the remaining ones unconditionally 4313 * in case the clock parent was not mandatory 4314 */ 4315 if (!is_init_done) 4316 force = true; 4317 } 4318 } 4319 #endif 4320