1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 5 * 6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/clk-provider.h> 11 #include <linux/clk/clk-conf.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/spinlock.h> 15 #include <linux/err.h> 16 #include <linux/list.h> 17 #include <linux/slab.h> 18 #include <linux/of.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/sched.h> 23 #include <linux/clkdev.h> 24 25 #include "clk.h" 26 27 static DEFINE_SPINLOCK(enable_lock); 28 static DEFINE_MUTEX(prepare_lock); 29 30 static struct task_struct *prepare_owner; 31 static struct task_struct *enable_owner; 32 33 static int prepare_refcnt; 34 static int enable_refcnt; 35 36 static HLIST_HEAD(clk_root_list); 37 static HLIST_HEAD(clk_orphan_list); 38 static LIST_HEAD(clk_notifier_list); 39 40 static struct hlist_head *all_lists[] = { 41 &clk_root_list, 42 &clk_orphan_list, 43 NULL, 44 }; 45 46 /*** private data structures ***/ 47 48 struct clk_parent_map { 49 const struct clk_hw *hw; 50 struct clk_core *core; 51 const char *fw_name; 52 const char *name; 53 int index; 54 }; 55 56 struct clk_core { 57 const char *name; 58 const struct clk_ops *ops; 59 struct clk_hw *hw; 60 struct module *owner; 61 struct device *dev; 62 struct device_node *of_node; 63 struct clk_core *parent; 64 struct clk_parent_map *parents; 65 u8 num_parents; 66 u8 new_parent_index; 67 unsigned long rate; 68 unsigned long req_rate; 69 unsigned long new_rate; 70 struct clk_core *new_parent; 71 struct clk_core *new_child; 72 unsigned long flags; 73 bool orphan; 74 bool rpm_enabled; 75 unsigned int enable_count; 76 unsigned int prepare_count; 77 unsigned int protect_count; 78 unsigned long min_rate; 79 unsigned long max_rate; 80 unsigned long accuracy; 81 int phase; 82 struct clk_duty duty; 83 struct hlist_head children; 84 struct hlist_node child_node; 85 struct hlist_head clks; 86 unsigned int notifier_count; 87 #ifdef CONFIG_DEBUG_FS 88 struct dentry *dentry; 89 struct hlist_node debug_node; 90 #endif 91 struct kref ref; 92 }; 93 94 #define CREATE_TRACE_POINTS 95 #include <trace/events/clk.h> 96 97 struct clk { 98 struct clk_core *core; 99 struct device *dev; 100 const char *dev_id; 101 const char *con_id; 102 unsigned long min_rate; 103 unsigned long max_rate; 104 unsigned int exclusive_count; 105 struct hlist_node clks_node; 106 }; 107 108 /*** runtime pm ***/ 109 static int clk_pm_runtime_get(struct clk_core *core) 110 { 111 int ret; 112 113 if (!core->rpm_enabled) 114 return 0; 115 116 ret = pm_runtime_get_sync(core->dev); 117 if (ret < 0) { 118 pm_runtime_put_noidle(core->dev); 119 return ret; 120 } 121 return 0; 122 } 123 124 static void clk_pm_runtime_put(struct clk_core *core) 125 { 126 if (!core->rpm_enabled) 127 return; 128 129 pm_runtime_put_sync(core->dev); 130 } 131 132 /*** locking ***/ 133 static void clk_prepare_lock(void) 134 { 135 if (!mutex_trylock(&prepare_lock)) { 136 if (prepare_owner == current) { 137 prepare_refcnt++; 138 return; 139 } 140 mutex_lock(&prepare_lock); 141 } 142 WARN_ON_ONCE(prepare_owner != NULL); 143 WARN_ON_ONCE(prepare_refcnt != 0); 144 prepare_owner = current; 145 prepare_refcnt = 1; 146 } 147 148 static void clk_prepare_unlock(void) 149 { 150 WARN_ON_ONCE(prepare_owner != current); 151 WARN_ON_ONCE(prepare_refcnt == 0); 152 153 if (--prepare_refcnt) 154 return; 155 prepare_owner = NULL; 156 mutex_unlock(&prepare_lock); 157 } 158 159 static unsigned long clk_enable_lock(void) 160 __acquires(enable_lock) 161 { 162 unsigned long flags; 163 164 /* 165 * On UP systems, spin_trylock_irqsave() always returns true, even if 166 * we already hold the lock. So, in that case, we rely only on 167 * reference counting. 168 */ 169 if (!IS_ENABLED(CONFIG_SMP) || 170 !spin_trylock_irqsave(&enable_lock, flags)) { 171 if (enable_owner == current) { 172 enable_refcnt++; 173 __acquire(enable_lock); 174 if (!IS_ENABLED(CONFIG_SMP)) 175 local_save_flags(flags); 176 return flags; 177 } 178 spin_lock_irqsave(&enable_lock, flags); 179 } 180 WARN_ON_ONCE(enable_owner != NULL); 181 WARN_ON_ONCE(enable_refcnt != 0); 182 enable_owner = current; 183 enable_refcnt = 1; 184 return flags; 185 } 186 187 static void clk_enable_unlock(unsigned long flags) 188 __releases(enable_lock) 189 { 190 WARN_ON_ONCE(enable_owner != current); 191 WARN_ON_ONCE(enable_refcnt == 0); 192 193 if (--enable_refcnt) { 194 __release(enable_lock); 195 return; 196 } 197 enable_owner = NULL; 198 spin_unlock_irqrestore(&enable_lock, flags); 199 } 200 201 static bool clk_core_rate_is_protected(struct clk_core *core) 202 { 203 return core->protect_count; 204 } 205 206 static bool clk_core_is_prepared(struct clk_core *core) 207 { 208 bool ret = false; 209 210 /* 211 * .is_prepared is optional for clocks that can prepare 212 * fall back to software usage counter if it is missing 213 */ 214 if (!core->ops->is_prepared) 215 return core->prepare_count; 216 217 if (!clk_pm_runtime_get(core)) { 218 ret = core->ops->is_prepared(core->hw); 219 clk_pm_runtime_put(core); 220 } 221 222 return ret; 223 } 224 225 static bool clk_core_is_enabled(struct clk_core *core) 226 { 227 bool ret = false; 228 229 /* 230 * .is_enabled is only mandatory for clocks that gate 231 * fall back to software usage counter if .is_enabled is missing 232 */ 233 if (!core->ops->is_enabled) 234 return core->enable_count; 235 236 /* 237 * Check if clock controller's device is runtime active before 238 * calling .is_enabled callback. If not, assume that clock is 239 * disabled, because we might be called from atomic context, from 240 * which pm_runtime_get() is not allowed. 241 * This function is called mainly from clk_disable_unused_subtree, 242 * which ensures proper runtime pm activation of controller before 243 * taking enable spinlock, but the below check is needed if one tries 244 * to call it from other places. 245 */ 246 if (core->rpm_enabled) { 247 pm_runtime_get_noresume(core->dev); 248 if (!pm_runtime_active(core->dev)) { 249 ret = false; 250 goto done; 251 } 252 } 253 254 ret = core->ops->is_enabled(core->hw); 255 done: 256 if (core->rpm_enabled) 257 pm_runtime_put(core->dev); 258 259 return ret; 260 } 261 262 /*** helper functions ***/ 263 264 const char *__clk_get_name(const struct clk *clk) 265 { 266 return !clk ? NULL : clk->core->name; 267 } 268 EXPORT_SYMBOL_GPL(__clk_get_name); 269 270 const char *clk_hw_get_name(const struct clk_hw *hw) 271 { 272 return hw->core->name; 273 } 274 EXPORT_SYMBOL_GPL(clk_hw_get_name); 275 276 struct clk_hw *__clk_get_hw(struct clk *clk) 277 { 278 return !clk ? NULL : clk->core->hw; 279 } 280 EXPORT_SYMBOL_GPL(__clk_get_hw); 281 282 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) 283 { 284 return hw->core->num_parents; 285 } 286 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); 287 288 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) 289 { 290 return hw->core->parent ? hw->core->parent->hw : NULL; 291 } 292 EXPORT_SYMBOL_GPL(clk_hw_get_parent); 293 294 static struct clk_core *__clk_lookup_subtree(const char *name, 295 struct clk_core *core) 296 { 297 struct clk_core *child; 298 struct clk_core *ret; 299 300 if (!strcmp(core->name, name)) 301 return core; 302 303 hlist_for_each_entry(child, &core->children, child_node) { 304 ret = __clk_lookup_subtree(name, child); 305 if (ret) 306 return ret; 307 } 308 309 return NULL; 310 } 311 312 static struct clk_core *clk_core_lookup(const char *name) 313 { 314 struct clk_core *root_clk; 315 struct clk_core *ret; 316 317 if (!name) 318 return NULL; 319 320 /* search the 'proper' clk tree first */ 321 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 322 ret = __clk_lookup_subtree(name, root_clk); 323 if (ret) 324 return ret; 325 } 326 327 /* if not found, then search the orphan tree */ 328 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 329 ret = __clk_lookup_subtree(name, root_clk); 330 if (ret) 331 return ret; 332 } 333 334 return NULL; 335 } 336 337 #ifdef CONFIG_OF 338 static int of_parse_clkspec(const struct device_node *np, int index, 339 const char *name, struct of_phandle_args *out_args); 340 static struct clk_hw * 341 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec); 342 #else 343 static inline int of_parse_clkspec(const struct device_node *np, int index, 344 const char *name, 345 struct of_phandle_args *out_args) 346 { 347 return -ENOENT; 348 } 349 static inline struct clk_hw * 350 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) 351 { 352 return ERR_PTR(-ENOENT); 353 } 354 #endif 355 356 /** 357 * clk_core_get - Find the clk_core parent of a clk 358 * @core: clk to find parent of 359 * @p_index: parent index to search for 360 * 361 * This is the preferred method for clk providers to find the parent of a 362 * clk when that parent is external to the clk controller. The parent_names 363 * array is indexed and treated as a local name matching a string in the device 364 * node's 'clock-names' property or as the 'con_id' matching the device's 365 * dev_name() in a clk_lookup. This allows clk providers to use their own 366 * namespace instead of looking for a globally unique parent string. 367 * 368 * For example the following DT snippet would allow a clock registered by the 369 * clock-controller@c001 that has a clk_init_data::parent_data array 370 * with 'xtal' in the 'name' member to find the clock provided by the 371 * clock-controller@f00abcd without needing to get the globally unique name of 372 * the xtal clk. 373 * 374 * parent: clock-controller@f00abcd { 375 * reg = <0xf00abcd 0xabcd>; 376 * #clock-cells = <0>; 377 * }; 378 * 379 * clock-controller@c001 { 380 * reg = <0xc001 0xf00d>; 381 * clocks = <&parent>; 382 * clock-names = "xtal"; 383 * #clock-cells = <1>; 384 * }; 385 * 386 * Returns: -ENOENT when the provider can't be found or the clk doesn't 387 * exist in the provider or the name can't be found in the DT node or 388 * in a clkdev lookup. NULL when the provider knows about the clk but it 389 * isn't provided on this system. 390 * A valid clk_core pointer when the clk can be found in the provider. 391 */ 392 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) 393 { 394 const char *name = core->parents[p_index].fw_name; 395 int index = core->parents[p_index].index; 396 struct clk_hw *hw = ERR_PTR(-ENOENT); 397 struct device *dev = core->dev; 398 const char *dev_id = dev ? dev_name(dev) : NULL; 399 struct device_node *np = core->of_node; 400 struct of_phandle_args clkspec; 401 402 if (np && (name || index >= 0) && 403 !of_parse_clkspec(np, index, name, &clkspec)) { 404 hw = of_clk_get_hw_from_clkspec(&clkspec); 405 of_node_put(clkspec.np); 406 } else if (name) { 407 /* 408 * If the DT search above couldn't find the provider fallback to 409 * looking up via clkdev based clk_lookups. 410 */ 411 hw = clk_find_hw(dev_id, name); 412 } 413 414 if (IS_ERR(hw)) 415 return ERR_CAST(hw); 416 417 return hw->core; 418 } 419 420 static void clk_core_fill_parent_index(struct clk_core *core, u8 index) 421 { 422 struct clk_parent_map *entry = &core->parents[index]; 423 struct clk_core *parent = ERR_PTR(-ENOENT); 424 425 if (entry->hw) { 426 parent = entry->hw->core; 427 /* 428 * We have a direct reference but it isn't registered yet? 429 * Orphan it and let clk_reparent() update the orphan status 430 * when the parent is registered. 431 */ 432 if (!parent) 433 parent = ERR_PTR(-EPROBE_DEFER); 434 } else { 435 parent = clk_core_get(core, index); 436 if (PTR_ERR(parent) == -ENOENT && entry->name) 437 parent = clk_core_lookup(entry->name); 438 } 439 440 /* Only cache it if it's not an error */ 441 if (!IS_ERR(parent)) 442 entry->core = parent; 443 } 444 445 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, 446 u8 index) 447 { 448 if (!core || index >= core->num_parents || !core->parents) 449 return NULL; 450 451 if (!core->parents[index].core) 452 clk_core_fill_parent_index(core, index); 453 454 return core->parents[index].core; 455 } 456 457 struct clk_hw * 458 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) 459 { 460 struct clk_core *parent; 461 462 parent = clk_core_get_parent_by_index(hw->core, index); 463 464 return !parent ? NULL : parent->hw; 465 } 466 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); 467 468 unsigned int __clk_get_enable_count(struct clk *clk) 469 { 470 return !clk ? 0 : clk->core->enable_count; 471 } 472 473 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) 474 { 475 if (!core) 476 return 0; 477 478 if (!core->num_parents || core->parent) 479 return core->rate; 480 481 /* 482 * Clk must have a parent because num_parents > 0 but the parent isn't 483 * known yet. Best to return 0 as the rate of this clk until we can 484 * properly recalc the rate based on the parent's rate. 485 */ 486 return 0; 487 } 488 489 unsigned long clk_hw_get_rate(const struct clk_hw *hw) 490 { 491 return clk_core_get_rate_nolock(hw->core); 492 } 493 EXPORT_SYMBOL_GPL(clk_hw_get_rate); 494 495 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core) 496 { 497 if (!core) 498 return 0; 499 500 return core->accuracy; 501 } 502 503 unsigned long __clk_get_flags(struct clk *clk) 504 { 505 return !clk ? 0 : clk->core->flags; 506 } 507 EXPORT_SYMBOL_GPL(__clk_get_flags); 508 509 unsigned long clk_hw_get_flags(const struct clk_hw *hw) 510 { 511 return hw->core->flags; 512 } 513 EXPORT_SYMBOL_GPL(clk_hw_get_flags); 514 515 bool clk_hw_is_prepared(const struct clk_hw *hw) 516 { 517 return clk_core_is_prepared(hw->core); 518 } 519 EXPORT_SYMBOL_GPL(clk_hw_is_prepared); 520 521 bool clk_hw_rate_is_protected(const struct clk_hw *hw) 522 { 523 return clk_core_rate_is_protected(hw->core); 524 } 525 EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected); 526 527 bool clk_hw_is_enabled(const struct clk_hw *hw) 528 { 529 return clk_core_is_enabled(hw->core); 530 } 531 EXPORT_SYMBOL_GPL(clk_hw_is_enabled); 532 533 bool __clk_is_enabled(struct clk *clk) 534 { 535 if (!clk) 536 return false; 537 538 return clk_core_is_enabled(clk->core); 539 } 540 EXPORT_SYMBOL_GPL(__clk_is_enabled); 541 542 static bool mux_is_better_rate(unsigned long rate, unsigned long now, 543 unsigned long best, unsigned long flags) 544 { 545 if (flags & CLK_MUX_ROUND_CLOSEST) 546 return abs(now - rate) < abs(best - rate); 547 548 return now <= rate && now > best; 549 } 550 551 int clk_mux_determine_rate_flags(struct clk_hw *hw, 552 struct clk_rate_request *req, 553 unsigned long flags) 554 { 555 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 556 int i, num_parents, ret; 557 unsigned long best = 0; 558 struct clk_rate_request parent_req = *req; 559 560 /* if NO_REPARENT flag set, pass through to current parent */ 561 if (core->flags & CLK_SET_RATE_NO_REPARENT) { 562 parent = core->parent; 563 if (core->flags & CLK_SET_RATE_PARENT) { 564 ret = __clk_determine_rate(parent ? parent->hw : NULL, 565 &parent_req); 566 if (ret) 567 return ret; 568 569 best = parent_req.rate; 570 } else if (parent) { 571 best = clk_core_get_rate_nolock(parent); 572 } else { 573 best = clk_core_get_rate_nolock(core); 574 } 575 576 goto out; 577 } 578 579 /* find the parent that can provide the fastest rate <= rate */ 580 num_parents = core->num_parents; 581 for (i = 0; i < num_parents; i++) { 582 parent = clk_core_get_parent_by_index(core, i); 583 if (!parent) 584 continue; 585 586 if (core->flags & CLK_SET_RATE_PARENT) { 587 parent_req = *req; 588 ret = __clk_determine_rate(parent->hw, &parent_req); 589 if (ret) 590 continue; 591 } else { 592 parent_req.rate = clk_core_get_rate_nolock(parent); 593 } 594 595 if (mux_is_better_rate(req->rate, parent_req.rate, 596 best, flags)) { 597 best_parent = parent; 598 best = parent_req.rate; 599 } 600 } 601 602 if (!best_parent) 603 return -EINVAL; 604 605 out: 606 if (best_parent) 607 req->best_parent_hw = best_parent->hw; 608 req->best_parent_rate = best; 609 req->rate = best; 610 611 return 0; 612 } 613 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); 614 615 struct clk *__clk_lookup(const char *name) 616 { 617 struct clk_core *core = clk_core_lookup(name); 618 619 return !core ? NULL : core->hw->clk; 620 } 621 622 static void clk_core_get_boundaries(struct clk_core *core, 623 unsigned long *min_rate, 624 unsigned long *max_rate) 625 { 626 struct clk *clk_user; 627 628 lockdep_assert_held(&prepare_lock); 629 630 *min_rate = core->min_rate; 631 *max_rate = core->max_rate; 632 633 hlist_for_each_entry(clk_user, &core->clks, clks_node) 634 *min_rate = max(*min_rate, clk_user->min_rate); 635 636 hlist_for_each_entry(clk_user, &core->clks, clks_node) 637 *max_rate = min(*max_rate, clk_user->max_rate); 638 } 639 640 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, 641 unsigned long max_rate) 642 { 643 hw->core->min_rate = min_rate; 644 hw->core->max_rate = max_rate; 645 } 646 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); 647 648 /* 649 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk 650 * @hw: mux type clk to determine rate on 651 * @req: rate request, also used to return preferred parent and frequencies 652 * 653 * Helper for finding best parent to provide a given frequency. This can be used 654 * directly as a determine_rate callback (e.g. for a mux), or from a more 655 * complex clock that may combine a mux with other operations. 656 * 657 * Returns: 0 on success, -EERROR value on error 658 */ 659 int __clk_mux_determine_rate(struct clk_hw *hw, 660 struct clk_rate_request *req) 661 { 662 return clk_mux_determine_rate_flags(hw, req, 0); 663 } 664 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 665 666 int __clk_mux_determine_rate_closest(struct clk_hw *hw, 667 struct clk_rate_request *req) 668 { 669 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); 670 } 671 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 672 673 /*** clk api ***/ 674 675 static void clk_core_rate_unprotect(struct clk_core *core) 676 { 677 lockdep_assert_held(&prepare_lock); 678 679 if (!core) 680 return; 681 682 if (WARN(core->protect_count == 0, 683 "%s already unprotected\n", core->name)) 684 return; 685 686 if (--core->protect_count > 0) 687 return; 688 689 clk_core_rate_unprotect(core->parent); 690 } 691 692 static int clk_core_rate_nuke_protect(struct clk_core *core) 693 { 694 int ret; 695 696 lockdep_assert_held(&prepare_lock); 697 698 if (!core) 699 return -EINVAL; 700 701 if (core->protect_count == 0) 702 return 0; 703 704 ret = core->protect_count; 705 core->protect_count = 1; 706 clk_core_rate_unprotect(core); 707 708 return ret; 709 } 710 711 /** 712 * clk_rate_exclusive_put - release exclusivity over clock rate control 713 * @clk: the clk over which the exclusivity is released 714 * 715 * clk_rate_exclusive_put() completes a critical section during which a clock 716 * consumer cannot tolerate any other consumer making any operation on the 717 * clock which could result in a rate change or rate glitch. Exclusive clocks 718 * cannot have their rate changed, either directly or indirectly due to changes 719 * further up the parent chain of clocks. As a result, clocks up parent chain 720 * also get under exclusive control of the calling consumer. 721 * 722 * If exlusivity is claimed more than once on clock, even by the same consumer, 723 * the rate effectively gets locked as exclusivity can't be preempted. 724 * 725 * Calls to clk_rate_exclusive_put() must be balanced with calls to 726 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return 727 * error status. 728 */ 729 void clk_rate_exclusive_put(struct clk *clk) 730 { 731 if (!clk) 732 return; 733 734 clk_prepare_lock(); 735 736 /* 737 * if there is something wrong with this consumer protect count, stop 738 * here before messing with the provider 739 */ 740 if (WARN_ON(clk->exclusive_count <= 0)) 741 goto out; 742 743 clk_core_rate_unprotect(clk->core); 744 clk->exclusive_count--; 745 out: 746 clk_prepare_unlock(); 747 } 748 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); 749 750 static void clk_core_rate_protect(struct clk_core *core) 751 { 752 lockdep_assert_held(&prepare_lock); 753 754 if (!core) 755 return; 756 757 if (core->protect_count == 0) 758 clk_core_rate_protect(core->parent); 759 760 core->protect_count++; 761 } 762 763 static void clk_core_rate_restore_protect(struct clk_core *core, int count) 764 { 765 lockdep_assert_held(&prepare_lock); 766 767 if (!core) 768 return; 769 770 if (count == 0) 771 return; 772 773 clk_core_rate_protect(core); 774 core->protect_count = count; 775 } 776 777 /** 778 * clk_rate_exclusive_get - get exclusivity over the clk rate control 779 * @clk: the clk over which the exclusity of rate control is requested 780 * 781 * clk_rate_exclusive_get() begins a critical section during which a clock 782 * consumer cannot tolerate any other consumer making any operation on the 783 * clock which could result in a rate change or rate glitch. Exclusive clocks 784 * cannot have their rate changed, either directly or indirectly due to changes 785 * further up the parent chain of clocks. As a result, clocks up parent chain 786 * also get under exclusive control of the calling consumer. 787 * 788 * If exlusivity is claimed more than once on clock, even by the same consumer, 789 * the rate effectively gets locked as exclusivity can't be preempted. 790 * 791 * Calls to clk_rate_exclusive_get() should be balanced with calls to 792 * clk_rate_exclusive_put(). Calls to this function may sleep. 793 * Returns 0 on success, -EERROR otherwise 794 */ 795 int clk_rate_exclusive_get(struct clk *clk) 796 { 797 if (!clk) 798 return 0; 799 800 clk_prepare_lock(); 801 clk_core_rate_protect(clk->core); 802 clk->exclusive_count++; 803 clk_prepare_unlock(); 804 805 return 0; 806 } 807 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); 808 809 static void clk_core_unprepare(struct clk_core *core) 810 { 811 lockdep_assert_held(&prepare_lock); 812 813 if (!core) 814 return; 815 816 if (WARN(core->prepare_count == 0, 817 "%s already unprepared\n", core->name)) 818 return; 819 820 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, 821 "Unpreparing critical %s\n", core->name)) 822 return; 823 824 if (core->flags & CLK_SET_RATE_GATE) 825 clk_core_rate_unprotect(core); 826 827 if (--core->prepare_count > 0) 828 return; 829 830 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); 831 832 trace_clk_unprepare(core); 833 834 if (core->ops->unprepare) 835 core->ops->unprepare(core->hw); 836 837 clk_pm_runtime_put(core); 838 839 trace_clk_unprepare_complete(core); 840 clk_core_unprepare(core->parent); 841 } 842 843 static void clk_core_unprepare_lock(struct clk_core *core) 844 { 845 clk_prepare_lock(); 846 clk_core_unprepare(core); 847 clk_prepare_unlock(); 848 } 849 850 /** 851 * clk_unprepare - undo preparation of a clock source 852 * @clk: the clk being unprepared 853 * 854 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 855 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 856 * if the operation may sleep. One example is a clk which is accessed over 857 * I2c. In the complex case a clk gate operation may require a fast and a slow 858 * part. It is this reason that clk_unprepare and clk_disable are not mutually 859 * exclusive. In fact clk_disable must be called before clk_unprepare. 860 */ 861 void clk_unprepare(struct clk *clk) 862 { 863 if (IS_ERR_OR_NULL(clk)) 864 return; 865 866 clk_core_unprepare_lock(clk->core); 867 } 868 EXPORT_SYMBOL_GPL(clk_unprepare); 869 870 static int clk_core_prepare(struct clk_core *core) 871 { 872 int ret = 0; 873 874 lockdep_assert_held(&prepare_lock); 875 876 if (!core) 877 return 0; 878 879 if (core->prepare_count == 0) { 880 ret = clk_pm_runtime_get(core); 881 if (ret) 882 return ret; 883 884 ret = clk_core_prepare(core->parent); 885 if (ret) 886 goto runtime_put; 887 888 trace_clk_prepare(core); 889 890 if (core->ops->prepare) 891 ret = core->ops->prepare(core->hw); 892 893 trace_clk_prepare_complete(core); 894 895 if (ret) 896 goto unprepare; 897 } 898 899 core->prepare_count++; 900 901 /* 902 * CLK_SET_RATE_GATE is a special case of clock protection 903 * Instead of a consumer claiming exclusive rate control, it is 904 * actually the provider which prevents any consumer from making any 905 * operation which could result in a rate change or rate glitch while 906 * the clock is prepared. 907 */ 908 if (core->flags & CLK_SET_RATE_GATE) 909 clk_core_rate_protect(core); 910 911 return 0; 912 unprepare: 913 clk_core_unprepare(core->parent); 914 runtime_put: 915 clk_pm_runtime_put(core); 916 return ret; 917 } 918 919 static int clk_core_prepare_lock(struct clk_core *core) 920 { 921 int ret; 922 923 clk_prepare_lock(); 924 ret = clk_core_prepare(core); 925 clk_prepare_unlock(); 926 927 return ret; 928 } 929 930 /** 931 * clk_prepare - prepare a clock source 932 * @clk: the clk being prepared 933 * 934 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 935 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 936 * operation may sleep. One example is a clk which is accessed over I2c. In 937 * the complex case a clk ungate operation may require a fast and a slow part. 938 * It is this reason that clk_prepare and clk_enable are not mutually 939 * exclusive. In fact clk_prepare must be called before clk_enable. 940 * Returns 0 on success, -EERROR otherwise. 941 */ 942 int clk_prepare(struct clk *clk) 943 { 944 if (!clk) 945 return 0; 946 947 return clk_core_prepare_lock(clk->core); 948 } 949 EXPORT_SYMBOL_GPL(clk_prepare); 950 951 static void clk_core_disable(struct clk_core *core) 952 { 953 lockdep_assert_held(&enable_lock); 954 955 if (!core) 956 return; 957 958 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) 959 return; 960 961 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, 962 "Disabling critical %s\n", core->name)) 963 return; 964 965 if (--core->enable_count > 0) 966 return; 967 968 trace_clk_disable_rcuidle(core); 969 970 if (core->ops->disable) 971 core->ops->disable(core->hw); 972 973 trace_clk_disable_complete_rcuidle(core); 974 975 clk_core_disable(core->parent); 976 } 977 978 static void clk_core_disable_lock(struct clk_core *core) 979 { 980 unsigned long flags; 981 982 flags = clk_enable_lock(); 983 clk_core_disable(core); 984 clk_enable_unlock(flags); 985 } 986 987 /** 988 * clk_disable - gate a clock 989 * @clk: the clk being gated 990 * 991 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 992 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 993 * clk if the operation is fast and will never sleep. One example is a 994 * SoC-internal clk which is controlled via simple register writes. In the 995 * complex case a clk gate operation may require a fast and a slow part. It is 996 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 997 * In fact clk_disable must be called before clk_unprepare. 998 */ 999 void clk_disable(struct clk *clk) 1000 { 1001 if (IS_ERR_OR_NULL(clk)) 1002 return; 1003 1004 clk_core_disable_lock(clk->core); 1005 } 1006 EXPORT_SYMBOL_GPL(clk_disable); 1007 1008 static int clk_core_enable(struct clk_core *core) 1009 { 1010 int ret = 0; 1011 1012 lockdep_assert_held(&enable_lock); 1013 1014 if (!core) 1015 return 0; 1016 1017 if (WARN(core->prepare_count == 0, 1018 "Enabling unprepared %s\n", core->name)) 1019 return -ESHUTDOWN; 1020 1021 if (core->enable_count == 0) { 1022 ret = clk_core_enable(core->parent); 1023 1024 if (ret) 1025 return ret; 1026 1027 trace_clk_enable_rcuidle(core); 1028 1029 if (core->ops->enable) 1030 ret = core->ops->enable(core->hw); 1031 1032 trace_clk_enable_complete_rcuidle(core); 1033 1034 if (ret) { 1035 clk_core_disable(core->parent); 1036 return ret; 1037 } 1038 } 1039 1040 core->enable_count++; 1041 return 0; 1042 } 1043 1044 static int clk_core_enable_lock(struct clk_core *core) 1045 { 1046 unsigned long flags; 1047 int ret; 1048 1049 flags = clk_enable_lock(); 1050 ret = clk_core_enable(core); 1051 clk_enable_unlock(flags); 1052 1053 return ret; 1054 } 1055 1056 /** 1057 * clk_gate_restore_context - restore context for poweroff 1058 * @hw: the clk_hw pointer of clock whose state is to be restored 1059 * 1060 * The clock gate restore context function enables or disables 1061 * the gate clocks based on the enable_count. This is done in cases 1062 * where the clock context is lost and based on the enable_count 1063 * the clock either needs to be enabled/disabled. This 1064 * helps restore the state of gate clocks. 1065 */ 1066 void clk_gate_restore_context(struct clk_hw *hw) 1067 { 1068 struct clk_core *core = hw->core; 1069 1070 if (core->enable_count) 1071 core->ops->enable(hw); 1072 else 1073 core->ops->disable(hw); 1074 } 1075 EXPORT_SYMBOL_GPL(clk_gate_restore_context); 1076 1077 static int clk_core_save_context(struct clk_core *core) 1078 { 1079 struct clk_core *child; 1080 int ret = 0; 1081 1082 hlist_for_each_entry(child, &core->children, child_node) { 1083 ret = clk_core_save_context(child); 1084 if (ret < 0) 1085 return ret; 1086 } 1087 1088 if (core->ops && core->ops->save_context) 1089 ret = core->ops->save_context(core->hw); 1090 1091 return ret; 1092 } 1093 1094 static void clk_core_restore_context(struct clk_core *core) 1095 { 1096 struct clk_core *child; 1097 1098 if (core->ops && core->ops->restore_context) 1099 core->ops->restore_context(core->hw); 1100 1101 hlist_for_each_entry(child, &core->children, child_node) 1102 clk_core_restore_context(child); 1103 } 1104 1105 /** 1106 * clk_save_context - save clock context for poweroff 1107 * 1108 * Saves the context of the clock register for powerstates in which the 1109 * contents of the registers will be lost. Occurs deep within the suspend 1110 * code. Returns 0 on success. 1111 */ 1112 int clk_save_context(void) 1113 { 1114 struct clk_core *clk; 1115 int ret; 1116 1117 hlist_for_each_entry(clk, &clk_root_list, child_node) { 1118 ret = clk_core_save_context(clk); 1119 if (ret < 0) 1120 return ret; 1121 } 1122 1123 hlist_for_each_entry(clk, &clk_orphan_list, child_node) { 1124 ret = clk_core_save_context(clk); 1125 if (ret < 0) 1126 return ret; 1127 } 1128 1129 return 0; 1130 } 1131 EXPORT_SYMBOL_GPL(clk_save_context); 1132 1133 /** 1134 * clk_restore_context - restore clock context after poweroff 1135 * 1136 * Restore the saved clock context upon resume. 1137 * 1138 */ 1139 void clk_restore_context(void) 1140 { 1141 struct clk_core *core; 1142 1143 hlist_for_each_entry(core, &clk_root_list, child_node) 1144 clk_core_restore_context(core); 1145 1146 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1147 clk_core_restore_context(core); 1148 } 1149 EXPORT_SYMBOL_GPL(clk_restore_context); 1150 1151 /** 1152 * clk_enable - ungate a clock 1153 * @clk: the clk being ungated 1154 * 1155 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 1156 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 1157 * if the operation will never sleep. One example is a SoC-internal clk which 1158 * is controlled via simple register writes. In the complex case a clk ungate 1159 * operation may require a fast and a slow part. It is this reason that 1160 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 1161 * must be called before clk_enable. Returns 0 on success, -EERROR 1162 * otherwise. 1163 */ 1164 int clk_enable(struct clk *clk) 1165 { 1166 if (!clk) 1167 return 0; 1168 1169 return clk_core_enable_lock(clk->core); 1170 } 1171 EXPORT_SYMBOL_GPL(clk_enable); 1172 1173 static int clk_core_prepare_enable(struct clk_core *core) 1174 { 1175 int ret; 1176 1177 ret = clk_core_prepare_lock(core); 1178 if (ret) 1179 return ret; 1180 1181 ret = clk_core_enable_lock(core); 1182 if (ret) 1183 clk_core_unprepare_lock(core); 1184 1185 return ret; 1186 } 1187 1188 static void clk_core_disable_unprepare(struct clk_core *core) 1189 { 1190 clk_core_disable_lock(core); 1191 clk_core_unprepare_lock(core); 1192 } 1193 1194 static void __init clk_unprepare_unused_subtree(struct clk_core *core) 1195 { 1196 struct clk_core *child; 1197 1198 lockdep_assert_held(&prepare_lock); 1199 1200 hlist_for_each_entry(child, &core->children, child_node) 1201 clk_unprepare_unused_subtree(child); 1202 1203 if (core->prepare_count) 1204 return; 1205 1206 if (core->flags & CLK_IGNORE_UNUSED) 1207 return; 1208 1209 if (clk_pm_runtime_get(core)) 1210 return; 1211 1212 if (clk_core_is_prepared(core)) { 1213 trace_clk_unprepare(core); 1214 if (core->ops->unprepare_unused) 1215 core->ops->unprepare_unused(core->hw); 1216 else if (core->ops->unprepare) 1217 core->ops->unprepare(core->hw); 1218 trace_clk_unprepare_complete(core); 1219 } 1220 1221 clk_pm_runtime_put(core); 1222 } 1223 1224 static void __init clk_disable_unused_subtree(struct clk_core *core) 1225 { 1226 struct clk_core *child; 1227 unsigned long flags; 1228 1229 lockdep_assert_held(&prepare_lock); 1230 1231 hlist_for_each_entry(child, &core->children, child_node) 1232 clk_disable_unused_subtree(child); 1233 1234 if (core->flags & CLK_OPS_PARENT_ENABLE) 1235 clk_core_prepare_enable(core->parent); 1236 1237 if (clk_pm_runtime_get(core)) 1238 goto unprepare_out; 1239 1240 flags = clk_enable_lock(); 1241 1242 if (core->enable_count) 1243 goto unlock_out; 1244 1245 if (core->flags & CLK_IGNORE_UNUSED) 1246 goto unlock_out; 1247 1248 /* 1249 * some gate clocks have special needs during the disable-unused 1250 * sequence. call .disable_unused if available, otherwise fall 1251 * back to .disable 1252 */ 1253 if (clk_core_is_enabled(core)) { 1254 trace_clk_disable(core); 1255 if (core->ops->disable_unused) 1256 core->ops->disable_unused(core->hw); 1257 else if (core->ops->disable) 1258 core->ops->disable(core->hw); 1259 trace_clk_disable_complete(core); 1260 } 1261 1262 unlock_out: 1263 clk_enable_unlock(flags); 1264 clk_pm_runtime_put(core); 1265 unprepare_out: 1266 if (core->flags & CLK_OPS_PARENT_ENABLE) 1267 clk_core_disable_unprepare(core->parent); 1268 } 1269 1270 static bool clk_ignore_unused __initdata; 1271 static int __init clk_ignore_unused_setup(char *__unused) 1272 { 1273 clk_ignore_unused = true; 1274 return 1; 1275 } 1276 __setup("clk_ignore_unused", clk_ignore_unused_setup); 1277 1278 static int __init clk_disable_unused(void) 1279 { 1280 struct clk_core *core; 1281 1282 if (clk_ignore_unused) { 1283 pr_warn("clk: Not disabling unused clocks\n"); 1284 return 0; 1285 } 1286 1287 clk_prepare_lock(); 1288 1289 hlist_for_each_entry(core, &clk_root_list, child_node) 1290 clk_disable_unused_subtree(core); 1291 1292 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1293 clk_disable_unused_subtree(core); 1294 1295 hlist_for_each_entry(core, &clk_root_list, child_node) 1296 clk_unprepare_unused_subtree(core); 1297 1298 hlist_for_each_entry(core, &clk_orphan_list, child_node) 1299 clk_unprepare_unused_subtree(core); 1300 1301 clk_prepare_unlock(); 1302 1303 return 0; 1304 } 1305 late_initcall_sync(clk_disable_unused); 1306 1307 static int clk_core_determine_round_nolock(struct clk_core *core, 1308 struct clk_rate_request *req) 1309 { 1310 long rate; 1311 1312 lockdep_assert_held(&prepare_lock); 1313 1314 if (!core) 1315 return 0; 1316 1317 /* 1318 * At this point, core protection will be disabled if 1319 * - if the provider is not protected at all 1320 * - if the calling consumer is the only one which has exclusivity 1321 * over the provider 1322 */ 1323 if (clk_core_rate_is_protected(core)) { 1324 req->rate = core->rate; 1325 } else if (core->ops->determine_rate) { 1326 return core->ops->determine_rate(core->hw, req); 1327 } else if (core->ops->round_rate) { 1328 rate = core->ops->round_rate(core->hw, req->rate, 1329 &req->best_parent_rate); 1330 if (rate < 0) 1331 return rate; 1332 1333 req->rate = rate; 1334 } else { 1335 return -EINVAL; 1336 } 1337 1338 return 0; 1339 } 1340 1341 static void clk_core_init_rate_req(struct clk_core * const core, 1342 struct clk_rate_request *req) 1343 { 1344 struct clk_core *parent; 1345 1346 if (WARN_ON(!core || !req)) 1347 return; 1348 1349 parent = core->parent; 1350 if (parent) { 1351 req->best_parent_hw = parent->hw; 1352 req->best_parent_rate = parent->rate; 1353 } else { 1354 req->best_parent_hw = NULL; 1355 req->best_parent_rate = 0; 1356 } 1357 } 1358 1359 static bool clk_core_can_round(struct clk_core * const core) 1360 { 1361 return core->ops->determine_rate || core->ops->round_rate; 1362 } 1363 1364 static int clk_core_round_rate_nolock(struct clk_core *core, 1365 struct clk_rate_request *req) 1366 { 1367 lockdep_assert_held(&prepare_lock); 1368 1369 if (!core) { 1370 req->rate = 0; 1371 return 0; 1372 } 1373 1374 clk_core_init_rate_req(core, req); 1375 1376 if (clk_core_can_round(core)) 1377 return clk_core_determine_round_nolock(core, req); 1378 else if (core->flags & CLK_SET_RATE_PARENT) 1379 return clk_core_round_rate_nolock(core->parent, req); 1380 1381 req->rate = core->rate; 1382 return 0; 1383 } 1384 1385 /** 1386 * __clk_determine_rate - get the closest rate actually supported by a clock 1387 * @hw: determine the rate of this clock 1388 * @req: target rate request 1389 * 1390 * Useful for clk_ops such as .set_rate and .determine_rate. 1391 */ 1392 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 1393 { 1394 if (!hw) { 1395 req->rate = 0; 1396 return 0; 1397 } 1398 1399 return clk_core_round_rate_nolock(hw->core, req); 1400 } 1401 EXPORT_SYMBOL_GPL(__clk_determine_rate); 1402 1403 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) 1404 { 1405 int ret; 1406 struct clk_rate_request req; 1407 1408 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); 1409 req.rate = rate; 1410 1411 ret = clk_core_round_rate_nolock(hw->core, &req); 1412 if (ret) 1413 return 0; 1414 1415 return req.rate; 1416 } 1417 EXPORT_SYMBOL_GPL(clk_hw_round_rate); 1418 1419 /** 1420 * clk_round_rate - round the given rate for a clk 1421 * @clk: the clk for which we are rounding a rate 1422 * @rate: the rate which is to be rounded 1423 * 1424 * Takes in a rate as input and rounds it to a rate that the clk can actually 1425 * use which is then returned. If clk doesn't support round_rate operation 1426 * then the parent rate is returned. 1427 */ 1428 long clk_round_rate(struct clk *clk, unsigned long rate) 1429 { 1430 struct clk_rate_request req; 1431 int ret; 1432 1433 if (!clk) 1434 return 0; 1435 1436 clk_prepare_lock(); 1437 1438 if (clk->exclusive_count) 1439 clk_core_rate_unprotect(clk->core); 1440 1441 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); 1442 req.rate = rate; 1443 1444 ret = clk_core_round_rate_nolock(clk->core, &req); 1445 1446 if (clk->exclusive_count) 1447 clk_core_rate_protect(clk->core); 1448 1449 clk_prepare_unlock(); 1450 1451 if (ret) 1452 return ret; 1453 1454 return req.rate; 1455 } 1456 EXPORT_SYMBOL_GPL(clk_round_rate); 1457 1458 /** 1459 * __clk_notify - call clk notifier chain 1460 * @core: clk that is changing rate 1461 * @msg: clk notifier type (see include/linux/clk.h) 1462 * @old_rate: old clk rate 1463 * @new_rate: new clk rate 1464 * 1465 * Triggers a notifier call chain on the clk rate-change notification 1466 * for 'clk'. Passes a pointer to the struct clk and the previous 1467 * and current rates to the notifier callback. Intended to be called by 1468 * internal clock code only. Returns NOTIFY_DONE from the last driver 1469 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1470 * a driver returns that. 1471 */ 1472 static int __clk_notify(struct clk_core *core, unsigned long msg, 1473 unsigned long old_rate, unsigned long new_rate) 1474 { 1475 struct clk_notifier *cn; 1476 struct clk_notifier_data cnd; 1477 int ret = NOTIFY_DONE; 1478 1479 cnd.old_rate = old_rate; 1480 cnd.new_rate = new_rate; 1481 1482 list_for_each_entry(cn, &clk_notifier_list, node) { 1483 if (cn->clk->core == core) { 1484 cnd.clk = cn->clk; 1485 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1486 &cnd); 1487 if (ret & NOTIFY_STOP_MASK) 1488 return ret; 1489 } 1490 } 1491 1492 return ret; 1493 } 1494 1495 /** 1496 * __clk_recalc_accuracies 1497 * @core: first clk in the subtree 1498 * 1499 * Walks the subtree of clks starting with clk and recalculates accuracies as 1500 * it goes. Note that if a clk does not implement the .recalc_accuracy 1501 * callback then it is assumed that the clock will take on the accuracy of its 1502 * parent. 1503 */ 1504 static void __clk_recalc_accuracies(struct clk_core *core) 1505 { 1506 unsigned long parent_accuracy = 0; 1507 struct clk_core *child; 1508 1509 lockdep_assert_held(&prepare_lock); 1510 1511 if (core->parent) 1512 parent_accuracy = core->parent->accuracy; 1513 1514 if (core->ops->recalc_accuracy) 1515 core->accuracy = core->ops->recalc_accuracy(core->hw, 1516 parent_accuracy); 1517 else 1518 core->accuracy = parent_accuracy; 1519 1520 hlist_for_each_entry(child, &core->children, child_node) 1521 __clk_recalc_accuracies(child); 1522 } 1523 1524 static long clk_core_get_accuracy_recalc(struct clk_core *core) 1525 { 1526 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) 1527 __clk_recalc_accuracies(core); 1528 1529 return clk_core_get_accuracy_no_lock(core); 1530 } 1531 1532 /** 1533 * clk_get_accuracy - return the accuracy of clk 1534 * @clk: the clk whose accuracy is being returned 1535 * 1536 * Simply returns the cached accuracy of the clk, unless 1537 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1538 * issued. 1539 * If clk is NULL then returns 0. 1540 */ 1541 long clk_get_accuracy(struct clk *clk) 1542 { 1543 long accuracy; 1544 1545 if (!clk) 1546 return 0; 1547 1548 clk_prepare_lock(); 1549 accuracy = clk_core_get_accuracy_recalc(clk->core); 1550 clk_prepare_unlock(); 1551 1552 return accuracy; 1553 } 1554 EXPORT_SYMBOL_GPL(clk_get_accuracy); 1555 1556 static unsigned long clk_recalc(struct clk_core *core, 1557 unsigned long parent_rate) 1558 { 1559 unsigned long rate = parent_rate; 1560 1561 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { 1562 rate = core->ops->recalc_rate(core->hw, parent_rate); 1563 clk_pm_runtime_put(core); 1564 } 1565 return rate; 1566 } 1567 1568 /** 1569 * __clk_recalc_rates 1570 * @core: first clk in the subtree 1571 * @msg: notification type (see include/linux/clk.h) 1572 * 1573 * Walks the subtree of clks starting with clk and recalculates rates as it 1574 * goes. Note that if a clk does not implement the .recalc_rate callback then 1575 * it is assumed that the clock will take on the rate of its parent. 1576 * 1577 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1578 * if necessary. 1579 */ 1580 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) 1581 { 1582 unsigned long old_rate; 1583 unsigned long parent_rate = 0; 1584 struct clk_core *child; 1585 1586 lockdep_assert_held(&prepare_lock); 1587 1588 old_rate = core->rate; 1589 1590 if (core->parent) 1591 parent_rate = core->parent->rate; 1592 1593 core->rate = clk_recalc(core, parent_rate); 1594 1595 /* 1596 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1597 * & ABORT_RATE_CHANGE notifiers 1598 */ 1599 if (core->notifier_count && msg) 1600 __clk_notify(core, msg, old_rate, core->rate); 1601 1602 hlist_for_each_entry(child, &core->children, child_node) 1603 __clk_recalc_rates(child, msg); 1604 } 1605 1606 static unsigned long clk_core_get_rate_recalc(struct clk_core *core) 1607 { 1608 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) 1609 __clk_recalc_rates(core, 0); 1610 1611 return clk_core_get_rate_nolock(core); 1612 } 1613 1614 /** 1615 * clk_get_rate - return the rate of clk 1616 * @clk: the clk whose rate is being returned 1617 * 1618 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1619 * is set, which means a recalc_rate will be issued. 1620 * If clk is NULL then returns 0. 1621 */ 1622 unsigned long clk_get_rate(struct clk *clk) 1623 { 1624 unsigned long rate; 1625 1626 if (!clk) 1627 return 0; 1628 1629 clk_prepare_lock(); 1630 rate = clk_core_get_rate_recalc(clk->core); 1631 clk_prepare_unlock(); 1632 1633 return rate; 1634 } 1635 EXPORT_SYMBOL_GPL(clk_get_rate); 1636 1637 static int clk_fetch_parent_index(struct clk_core *core, 1638 struct clk_core *parent) 1639 { 1640 int i; 1641 1642 if (!parent) 1643 return -EINVAL; 1644 1645 for (i = 0; i < core->num_parents; i++) { 1646 /* Found it first try! */ 1647 if (core->parents[i].core == parent) 1648 return i; 1649 1650 /* Something else is here, so keep looking */ 1651 if (core->parents[i].core) 1652 continue; 1653 1654 /* Maybe core hasn't been cached but the hw is all we know? */ 1655 if (core->parents[i].hw) { 1656 if (core->parents[i].hw == parent->hw) 1657 break; 1658 1659 /* Didn't match, but we're expecting a clk_hw */ 1660 continue; 1661 } 1662 1663 /* Maybe it hasn't been cached (clk_set_parent() path) */ 1664 if (parent == clk_core_get(core, i)) 1665 break; 1666 1667 /* Fallback to comparing globally unique names */ 1668 if (core->parents[i].name && 1669 !strcmp(parent->name, core->parents[i].name)) 1670 break; 1671 } 1672 1673 if (i == core->num_parents) 1674 return -EINVAL; 1675 1676 core->parents[i].core = parent; 1677 return i; 1678 } 1679 1680 /** 1681 * clk_hw_get_parent_index - return the index of the parent clock 1682 * @hw: clk_hw associated with the clk being consumed 1683 * 1684 * Fetches and returns the index of parent clock. Returns -EINVAL if the given 1685 * clock does not have a current parent. 1686 */ 1687 int clk_hw_get_parent_index(struct clk_hw *hw) 1688 { 1689 struct clk_hw *parent = clk_hw_get_parent(hw); 1690 1691 if (WARN_ON(parent == NULL)) 1692 return -EINVAL; 1693 1694 return clk_fetch_parent_index(hw->core, parent->core); 1695 } 1696 EXPORT_SYMBOL_GPL(clk_hw_get_parent_index); 1697 1698 /* 1699 * Update the orphan status of @core and all its children. 1700 */ 1701 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) 1702 { 1703 struct clk_core *child; 1704 1705 core->orphan = is_orphan; 1706 1707 hlist_for_each_entry(child, &core->children, child_node) 1708 clk_core_update_orphan_status(child, is_orphan); 1709 } 1710 1711 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) 1712 { 1713 bool was_orphan = core->orphan; 1714 1715 hlist_del(&core->child_node); 1716 1717 if (new_parent) { 1718 bool becomes_orphan = new_parent->orphan; 1719 1720 /* avoid duplicate POST_RATE_CHANGE notifications */ 1721 if (new_parent->new_child == core) 1722 new_parent->new_child = NULL; 1723 1724 hlist_add_head(&core->child_node, &new_parent->children); 1725 1726 if (was_orphan != becomes_orphan) 1727 clk_core_update_orphan_status(core, becomes_orphan); 1728 } else { 1729 hlist_add_head(&core->child_node, &clk_orphan_list); 1730 if (!was_orphan) 1731 clk_core_update_orphan_status(core, true); 1732 } 1733 1734 core->parent = new_parent; 1735 } 1736 1737 static struct clk_core *__clk_set_parent_before(struct clk_core *core, 1738 struct clk_core *parent) 1739 { 1740 unsigned long flags; 1741 struct clk_core *old_parent = core->parent; 1742 1743 /* 1744 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock 1745 * 1746 * 2. Migrate prepare state between parents and prevent race with 1747 * clk_enable(). 1748 * 1749 * If the clock is not prepared, then a race with 1750 * clk_enable/disable() is impossible since we already have the 1751 * prepare lock (future calls to clk_enable() need to be preceded by 1752 * a clk_prepare()). 1753 * 1754 * If the clock is prepared, migrate the prepared state to the new 1755 * parent and also protect against a race with clk_enable() by 1756 * forcing the clock and the new parent on. This ensures that all 1757 * future calls to clk_enable() are practically NOPs with respect to 1758 * hardware and software states. 1759 * 1760 * See also: Comment for clk_set_parent() below. 1761 */ 1762 1763 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ 1764 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1765 clk_core_prepare_enable(old_parent); 1766 clk_core_prepare_enable(parent); 1767 } 1768 1769 /* migrate prepare count if > 0 */ 1770 if (core->prepare_count) { 1771 clk_core_prepare_enable(parent); 1772 clk_core_enable_lock(core); 1773 } 1774 1775 /* update the clk tree topology */ 1776 flags = clk_enable_lock(); 1777 clk_reparent(core, parent); 1778 clk_enable_unlock(flags); 1779 1780 return old_parent; 1781 } 1782 1783 static void __clk_set_parent_after(struct clk_core *core, 1784 struct clk_core *parent, 1785 struct clk_core *old_parent) 1786 { 1787 /* 1788 * Finish the migration of prepare state and undo the changes done 1789 * for preventing a race with clk_enable(). 1790 */ 1791 if (core->prepare_count) { 1792 clk_core_disable_lock(core); 1793 clk_core_disable_unprepare(old_parent); 1794 } 1795 1796 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ 1797 if (core->flags & CLK_OPS_PARENT_ENABLE) { 1798 clk_core_disable_unprepare(parent); 1799 clk_core_disable_unprepare(old_parent); 1800 } 1801 } 1802 1803 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, 1804 u8 p_index) 1805 { 1806 unsigned long flags; 1807 int ret = 0; 1808 struct clk_core *old_parent; 1809 1810 old_parent = __clk_set_parent_before(core, parent); 1811 1812 trace_clk_set_parent(core, parent); 1813 1814 /* change clock input source */ 1815 if (parent && core->ops->set_parent) 1816 ret = core->ops->set_parent(core->hw, p_index); 1817 1818 trace_clk_set_parent_complete(core, parent); 1819 1820 if (ret) { 1821 flags = clk_enable_lock(); 1822 clk_reparent(core, old_parent); 1823 clk_enable_unlock(flags); 1824 __clk_set_parent_after(core, old_parent, parent); 1825 1826 return ret; 1827 } 1828 1829 __clk_set_parent_after(core, parent, old_parent); 1830 1831 return 0; 1832 } 1833 1834 /** 1835 * __clk_speculate_rates 1836 * @core: first clk in the subtree 1837 * @parent_rate: the "future" rate of clk's parent 1838 * 1839 * Walks the subtree of clks starting with clk, speculating rates as it 1840 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 1841 * 1842 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 1843 * pre-rate change notifications and returns early if no clks in the 1844 * subtree have subscribed to the notifications. Note that if a clk does not 1845 * implement the .recalc_rate callback then it is assumed that the clock will 1846 * take on the rate of its parent. 1847 */ 1848 static int __clk_speculate_rates(struct clk_core *core, 1849 unsigned long parent_rate) 1850 { 1851 struct clk_core *child; 1852 unsigned long new_rate; 1853 int ret = NOTIFY_DONE; 1854 1855 lockdep_assert_held(&prepare_lock); 1856 1857 new_rate = clk_recalc(core, parent_rate); 1858 1859 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 1860 if (core->notifier_count) 1861 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); 1862 1863 if (ret & NOTIFY_STOP_MASK) { 1864 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 1865 __func__, core->name, ret); 1866 goto out; 1867 } 1868 1869 hlist_for_each_entry(child, &core->children, child_node) { 1870 ret = __clk_speculate_rates(child, new_rate); 1871 if (ret & NOTIFY_STOP_MASK) 1872 break; 1873 } 1874 1875 out: 1876 return ret; 1877 } 1878 1879 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, 1880 struct clk_core *new_parent, u8 p_index) 1881 { 1882 struct clk_core *child; 1883 1884 core->new_rate = new_rate; 1885 core->new_parent = new_parent; 1886 core->new_parent_index = p_index; 1887 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 1888 core->new_child = NULL; 1889 if (new_parent && new_parent != core->parent) 1890 new_parent->new_child = core; 1891 1892 hlist_for_each_entry(child, &core->children, child_node) { 1893 child->new_rate = clk_recalc(child, new_rate); 1894 clk_calc_subtree(child, child->new_rate, NULL, 0); 1895 } 1896 } 1897 1898 /* 1899 * calculate the new rates returning the topmost clock that has to be 1900 * changed. 1901 */ 1902 static struct clk_core *clk_calc_new_rates(struct clk_core *core, 1903 unsigned long rate) 1904 { 1905 struct clk_core *top = core; 1906 struct clk_core *old_parent, *parent; 1907 unsigned long best_parent_rate = 0; 1908 unsigned long new_rate; 1909 unsigned long min_rate; 1910 unsigned long max_rate; 1911 int p_index = 0; 1912 long ret; 1913 1914 /* sanity */ 1915 if (IS_ERR_OR_NULL(core)) 1916 return NULL; 1917 1918 /* save parent rate, if it exists */ 1919 parent = old_parent = core->parent; 1920 if (parent) 1921 best_parent_rate = parent->rate; 1922 1923 clk_core_get_boundaries(core, &min_rate, &max_rate); 1924 1925 /* find the closest rate and parent clk/rate */ 1926 if (clk_core_can_round(core)) { 1927 struct clk_rate_request req; 1928 1929 req.rate = rate; 1930 req.min_rate = min_rate; 1931 req.max_rate = max_rate; 1932 1933 clk_core_init_rate_req(core, &req); 1934 1935 ret = clk_core_determine_round_nolock(core, &req); 1936 if (ret < 0) 1937 return NULL; 1938 1939 best_parent_rate = req.best_parent_rate; 1940 new_rate = req.rate; 1941 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; 1942 1943 if (new_rate < min_rate || new_rate > max_rate) 1944 return NULL; 1945 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { 1946 /* pass-through clock without adjustable parent */ 1947 core->new_rate = core->rate; 1948 return NULL; 1949 } else { 1950 /* pass-through clock with adjustable parent */ 1951 top = clk_calc_new_rates(parent, rate); 1952 new_rate = parent->new_rate; 1953 goto out; 1954 } 1955 1956 /* some clocks must be gated to change parent */ 1957 if (parent != old_parent && 1958 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { 1959 pr_debug("%s: %s not gated but wants to reparent\n", 1960 __func__, core->name); 1961 return NULL; 1962 } 1963 1964 /* try finding the new parent index */ 1965 if (parent && core->num_parents > 1) { 1966 p_index = clk_fetch_parent_index(core, parent); 1967 if (p_index < 0) { 1968 pr_debug("%s: clk %s can not be parent of clk %s\n", 1969 __func__, parent->name, core->name); 1970 return NULL; 1971 } 1972 } 1973 1974 if ((core->flags & CLK_SET_RATE_PARENT) && parent && 1975 best_parent_rate != parent->rate) 1976 top = clk_calc_new_rates(parent, best_parent_rate); 1977 1978 out: 1979 clk_calc_subtree(core, new_rate, parent, p_index); 1980 1981 return top; 1982 } 1983 1984 /* 1985 * Notify about rate changes in a subtree. Always walk down the whole tree 1986 * so that in case of an error we can walk down the whole tree again and 1987 * abort the change. 1988 */ 1989 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, 1990 unsigned long event) 1991 { 1992 struct clk_core *child, *tmp_clk, *fail_clk = NULL; 1993 int ret = NOTIFY_DONE; 1994 1995 if (core->rate == core->new_rate) 1996 return NULL; 1997 1998 if (core->notifier_count) { 1999 ret = __clk_notify(core, event, core->rate, core->new_rate); 2000 if (ret & NOTIFY_STOP_MASK) 2001 fail_clk = core; 2002 } 2003 2004 hlist_for_each_entry(child, &core->children, child_node) { 2005 /* Skip children who will be reparented to another clock */ 2006 if (child->new_parent && child->new_parent != core) 2007 continue; 2008 tmp_clk = clk_propagate_rate_change(child, event); 2009 if (tmp_clk) 2010 fail_clk = tmp_clk; 2011 } 2012 2013 /* handle the new child who might not be in core->children yet */ 2014 if (core->new_child) { 2015 tmp_clk = clk_propagate_rate_change(core->new_child, event); 2016 if (tmp_clk) 2017 fail_clk = tmp_clk; 2018 } 2019 2020 return fail_clk; 2021 } 2022 2023 /* 2024 * walk down a subtree and set the new rates notifying the rate 2025 * change on the way 2026 */ 2027 static void clk_change_rate(struct clk_core *core) 2028 { 2029 struct clk_core *child; 2030 struct hlist_node *tmp; 2031 unsigned long old_rate; 2032 unsigned long best_parent_rate = 0; 2033 bool skip_set_rate = false; 2034 struct clk_core *old_parent; 2035 struct clk_core *parent = NULL; 2036 2037 old_rate = core->rate; 2038 2039 if (core->new_parent) { 2040 parent = core->new_parent; 2041 best_parent_rate = core->new_parent->rate; 2042 } else if (core->parent) { 2043 parent = core->parent; 2044 best_parent_rate = core->parent->rate; 2045 } 2046 2047 if (clk_pm_runtime_get(core)) 2048 return; 2049 2050 if (core->flags & CLK_SET_RATE_UNGATE) { 2051 unsigned long flags; 2052 2053 clk_core_prepare(core); 2054 flags = clk_enable_lock(); 2055 clk_core_enable(core); 2056 clk_enable_unlock(flags); 2057 } 2058 2059 if (core->new_parent && core->new_parent != core->parent) { 2060 old_parent = __clk_set_parent_before(core, core->new_parent); 2061 trace_clk_set_parent(core, core->new_parent); 2062 2063 if (core->ops->set_rate_and_parent) { 2064 skip_set_rate = true; 2065 core->ops->set_rate_and_parent(core->hw, core->new_rate, 2066 best_parent_rate, 2067 core->new_parent_index); 2068 } else if (core->ops->set_parent) { 2069 core->ops->set_parent(core->hw, core->new_parent_index); 2070 } 2071 2072 trace_clk_set_parent_complete(core, core->new_parent); 2073 __clk_set_parent_after(core, core->new_parent, old_parent); 2074 } 2075 2076 if (core->flags & CLK_OPS_PARENT_ENABLE) 2077 clk_core_prepare_enable(parent); 2078 2079 trace_clk_set_rate(core, core->new_rate); 2080 2081 if (!skip_set_rate && core->ops->set_rate) 2082 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); 2083 2084 trace_clk_set_rate_complete(core, core->new_rate); 2085 2086 core->rate = clk_recalc(core, best_parent_rate); 2087 2088 if (core->flags & CLK_SET_RATE_UNGATE) { 2089 unsigned long flags; 2090 2091 flags = clk_enable_lock(); 2092 clk_core_disable(core); 2093 clk_enable_unlock(flags); 2094 clk_core_unprepare(core); 2095 } 2096 2097 if (core->flags & CLK_OPS_PARENT_ENABLE) 2098 clk_core_disable_unprepare(parent); 2099 2100 if (core->notifier_count && old_rate != core->rate) 2101 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); 2102 2103 if (core->flags & CLK_RECALC_NEW_RATES) 2104 (void)clk_calc_new_rates(core, core->new_rate); 2105 2106 /* 2107 * Use safe iteration, as change_rate can actually swap parents 2108 * for certain clock types. 2109 */ 2110 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { 2111 /* Skip children who will be reparented to another clock */ 2112 if (child->new_parent && child->new_parent != core) 2113 continue; 2114 clk_change_rate(child); 2115 } 2116 2117 /* handle the new child who might not be in core->children yet */ 2118 if (core->new_child) 2119 clk_change_rate(core->new_child); 2120 2121 clk_pm_runtime_put(core); 2122 } 2123 2124 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, 2125 unsigned long req_rate) 2126 { 2127 int ret, cnt; 2128 struct clk_rate_request req; 2129 2130 lockdep_assert_held(&prepare_lock); 2131 2132 if (!core) 2133 return 0; 2134 2135 /* simulate what the rate would be if it could be freely set */ 2136 cnt = clk_core_rate_nuke_protect(core); 2137 if (cnt < 0) 2138 return cnt; 2139 2140 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); 2141 req.rate = req_rate; 2142 2143 ret = clk_core_round_rate_nolock(core, &req); 2144 2145 /* restore the protection */ 2146 clk_core_rate_restore_protect(core, cnt); 2147 2148 return ret ? 0 : req.rate; 2149 } 2150 2151 static int clk_core_set_rate_nolock(struct clk_core *core, 2152 unsigned long req_rate) 2153 { 2154 struct clk_core *top, *fail_clk; 2155 unsigned long rate; 2156 int ret = 0; 2157 2158 if (!core) 2159 return 0; 2160 2161 rate = clk_core_req_round_rate_nolock(core, req_rate); 2162 2163 /* bail early if nothing to do */ 2164 if (rate == clk_core_get_rate_nolock(core)) 2165 return 0; 2166 2167 /* fail on a direct rate set of a protected provider */ 2168 if (clk_core_rate_is_protected(core)) 2169 return -EBUSY; 2170 2171 /* calculate new rates and get the topmost changed clock */ 2172 top = clk_calc_new_rates(core, req_rate); 2173 if (!top) 2174 return -EINVAL; 2175 2176 ret = clk_pm_runtime_get(core); 2177 if (ret) 2178 return ret; 2179 2180 /* notify that we are about to change rates */ 2181 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 2182 if (fail_clk) { 2183 pr_debug("%s: failed to set %s rate\n", __func__, 2184 fail_clk->name); 2185 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 2186 ret = -EBUSY; 2187 goto err; 2188 } 2189 2190 /* change the rates */ 2191 clk_change_rate(top); 2192 2193 core->req_rate = req_rate; 2194 err: 2195 clk_pm_runtime_put(core); 2196 2197 return ret; 2198 } 2199 2200 /** 2201 * clk_set_rate - specify a new rate for clk 2202 * @clk: the clk whose rate is being changed 2203 * @rate: the new rate for clk 2204 * 2205 * In the simplest case clk_set_rate will only adjust the rate of clk. 2206 * 2207 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 2208 * propagate up to clk's parent; whether or not this happens depends on the 2209 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 2210 * after calling .round_rate then upstream parent propagation is ignored. If 2211 * *parent_rate comes back with a new rate for clk's parent then we propagate 2212 * up to clk's parent and set its rate. Upward propagation will continue 2213 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 2214 * .round_rate stops requesting changes to clk's parent_rate. 2215 * 2216 * Rate changes are accomplished via tree traversal that also recalculates the 2217 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 2218 * 2219 * Returns 0 on success, -EERROR otherwise. 2220 */ 2221 int clk_set_rate(struct clk *clk, unsigned long rate) 2222 { 2223 int ret; 2224 2225 if (!clk) 2226 return 0; 2227 2228 /* prevent racing with updates to the clock topology */ 2229 clk_prepare_lock(); 2230 2231 if (clk->exclusive_count) 2232 clk_core_rate_unprotect(clk->core); 2233 2234 ret = clk_core_set_rate_nolock(clk->core, rate); 2235 2236 if (clk->exclusive_count) 2237 clk_core_rate_protect(clk->core); 2238 2239 clk_prepare_unlock(); 2240 2241 return ret; 2242 } 2243 EXPORT_SYMBOL_GPL(clk_set_rate); 2244 2245 /** 2246 * clk_set_rate_exclusive - specify a new rate and get exclusive control 2247 * @clk: the clk whose rate is being changed 2248 * @rate: the new rate for clk 2249 * 2250 * This is a combination of clk_set_rate() and clk_rate_exclusive_get() 2251 * within a critical section 2252 * 2253 * This can be used initially to ensure that at least 1 consumer is 2254 * satisfied when several consumers are competing for exclusivity over the 2255 * same clock provider. 2256 * 2257 * The exclusivity is not applied if setting the rate failed. 2258 * 2259 * Calls to clk_rate_exclusive_get() should be balanced with calls to 2260 * clk_rate_exclusive_put(). 2261 * 2262 * Returns 0 on success, -EERROR otherwise. 2263 */ 2264 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 2265 { 2266 int ret; 2267 2268 if (!clk) 2269 return 0; 2270 2271 /* prevent racing with updates to the clock topology */ 2272 clk_prepare_lock(); 2273 2274 /* 2275 * The temporary protection removal is not here, on purpose 2276 * This function is meant to be used instead of clk_rate_protect, 2277 * so before the consumer code path protect the clock provider 2278 */ 2279 2280 ret = clk_core_set_rate_nolock(clk->core, rate); 2281 if (!ret) { 2282 clk_core_rate_protect(clk->core); 2283 clk->exclusive_count++; 2284 } 2285 2286 clk_prepare_unlock(); 2287 2288 return ret; 2289 } 2290 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); 2291 2292 /** 2293 * clk_set_rate_range - set a rate range for a clock source 2294 * @clk: clock source 2295 * @min: desired minimum clock rate in Hz, inclusive 2296 * @max: desired maximum clock rate in Hz, inclusive 2297 * 2298 * Returns success (0) or negative errno. 2299 */ 2300 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) 2301 { 2302 int ret = 0; 2303 unsigned long old_min, old_max, rate; 2304 2305 if (!clk) 2306 return 0; 2307 2308 if (min > max) { 2309 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", 2310 __func__, clk->core->name, clk->dev_id, clk->con_id, 2311 min, max); 2312 return -EINVAL; 2313 } 2314 2315 clk_prepare_lock(); 2316 2317 if (clk->exclusive_count) 2318 clk_core_rate_unprotect(clk->core); 2319 2320 /* Save the current values in case we need to rollback the change */ 2321 old_min = clk->min_rate; 2322 old_max = clk->max_rate; 2323 clk->min_rate = min; 2324 clk->max_rate = max; 2325 2326 rate = clk_core_get_rate_nolock(clk->core); 2327 if (rate < min || rate > max) { 2328 /* 2329 * FIXME: 2330 * We are in bit of trouble here, current rate is outside the 2331 * the requested range. We are going try to request appropriate 2332 * range boundary but there is a catch. It may fail for the 2333 * usual reason (clock broken, clock protected, etc) but also 2334 * because: 2335 * - round_rate() was not favorable and fell on the wrong 2336 * side of the boundary 2337 * - the determine_rate() callback does not really check for 2338 * this corner case when determining the rate 2339 */ 2340 2341 if (rate < min) 2342 rate = min; 2343 else 2344 rate = max; 2345 2346 ret = clk_core_set_rate_nolock(clk->core, rate); 2347 if (ret) { 2348 /* rollback the changes */ 2349 clk->min_rate = old_min; 2350 clk->max_rate = old_max; 2351 } 2352 } 2353 2354 if (clk->exclusive_count) 2355 clk_core_rate_protect(clk->core); 2356 2357 clk_prepare_unlock(); 2358 2359 return ret; 2360 } 2361 EXPORT_SYMBOL_GPL(clk_set_rate_range); 2362 2363 /** 2364 * clk_set_min_rate - set a minimum clock rate for a clock source 2365 * @clk: clock source 2366 * @rate: desired minimum clock rate in Hz, inclusive 2367 * 2368 * Returns success (0) or negative errno. 2369 */ 2370 int clk_set_min_rate(struct clk *clk, unsigned long rate) 2371 { 2372 if (!clk) 2373 return 0; 2374 2375 return clk_set_rate_range(clk, rate, clk->max_rate); 2376 } 2377 EXPORT_SYMBOL_GPL(clk_set_min_rate); 2378 2379 /** 2380 * clk_set_max_rate - set a maximum clock rate for a clock source 2381 * @clk: clock source 2382 * @rate: desired maximum clock rate in Hz, inclusive 2383 * 2384 * Returns success (0) or negative errno. 2385 */ 2386 int clk_set_max_rate(struct clk *clk, unsigned long rate) 2387 { 2388 if (!clk) 2389 return 0; 2390 2391 return clk_set_rate_range(clk, clk->min_rate, rate); 2392 } 2393 EXPORT_SYMBOL_GPL(clk_set_max_rate); 2394 2395 /** 2396 * clk_get_parent - return the parent of a clk 2397 * @clk: the clk whose parent gets returned 2398 * 2399 * Simply returns clk->parent. Returns NULL if clk is NULL. 2400 */ 2401 struct clk *clk_get_parent(struct clk *clk) 2402 { 2403 struct clk *parent; 2404 2405 if (!clk) 2406 return NULL; 2407 2408 clk_prepare_lock(); 2409 /* TODO: Create a per-user clk and change callers to call clk_put */ 2410 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; 2411 clk_prepare_unlock(); 2412 2413 return parent; 2414 } 2415 EXPORT_SYMBOL_GPL(clk_get_parent); 2416 2417 static struct clk_core *__clk_init_parent(struct clk_core *core) 2418 { 2419 u8 index = 0; 2420 2421 if (core->num_parents > 1 && core->ops->get_parent) 2422 index = core->ops->get_parent(core->hw); 2423 2424 return clk_core_get_parent_by_index(core, index); 2425 } 2426 2427 static void clk_core_reparent(struct clk_core *core, 2428 struct clk_core *new_parent) 2429 { 2430 clk_reparent(core, new_parent); 2431 __clk_recalc_accuracies(core); 2432 __clk_recalc_rates(core, POST_RATE_CHANGE); 2433 } 2434 2435 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) 2436 { 2437 if (!hw) 2438 return; 2439 2440 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); 2441 } 2442 2443 /** 2444 * clk_has_parent - check if a clock is a possible parent for another 2445 * @clk: clock source 2446 * @parent: parent clock source 2447 * 2448 * This function can be used in drivers that need to check that a clock can be 2449 * the parent of another without actually changing the parent. 2450 * 2451 * Returns true if @parent is a possible parent for @clk, false otherwise. 2452 */ 2453 bool clk_has_parent(struct clk *clk, struct clk *parent) 2454 { 2455 struct clk_core *core, *parent_core; 2456 int i; 2457 2458 /* NULL clocks should be nops, so return success if either is NULL. */ 2459 if (!clk || !parent) 2460 return true; 2461 2462 core = clk->core; 2463 parent_core = parent->core; 2464 2465 /* Optimize for the case where the parent is already the parent. */ 2466 if (core->parent == parent_core) 2467 return true; 2468 2469 for (i = 0; i < core->num_parents; i++) 2470 if (!strcmp(core->parents[i].name, parent_core->name)) 2471 return true; 2472 2473 return false; 2474 } 2475 EXPORT_SYMBOL_GPL(clk_has_parent); 2476 2477 static int clk_core_set_parent_nolock(struct clk_core *core, 2478 struct clk_core *parent) 2479 { 2480 int ret = 0; 2481 int p_index = 0; 2482 unsigned long p_rate = 0; 2483 2484 lockdep_assert_held(&prepare_lock); 2485 2486 if (!core) 2487 return 0; 2488 2489 if (core->parent == parent) 2490 return 0; 2491 2492 /* verify ops for multi-parent clks */ 2493 if (core->num_parents > 1 && !core->ops->set_parent) 2494 return -EPERM; 2495 2496 /* check that we are allowed to re-parent if the clock is in use */ 2497 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) 2498 return -EBUSY; 2499 2500 if (clk_core_rate_is_protected(core)) 2501 return -EBUSY; 2502 2503 /* try finding the new parent index */ 2504 if (parent) { 2505 p_index = clk_fetch_parent_index(core, parent); 2506 if (p_index < 0) { 2507 pr_debug("%s: clk %s can not be parent of clk %s\n", 2508 __func__, parent->name, core->name); 2509 return p_index; 2510 } 2511 p_rate = parent->rate; 2512 } 2513 2514 ret = clk_pm_runtime_get(core); 2515 if (ret) 2516 return ret; 2517 2518 /* propagate PRE_RATE_CHANGE notifications */ 2519 ret = __clk_speculate_rates(core, p_rate); 2520 2521 /* abort if a driver objects */ 2522 if (ret & NOTIFY_STOP_MASK) 2523 goto runtime_put; 2524 2525 /* do the re-parent */ 2526 ret = __clk_set_parent(core, parent, p_index); 2527 2528 /* propagate rate an accuracy recalculation accordingly */ 2529 if (ret) { 2530 __clk_recalc_rates(core, ABORT_RATE_CHANGE); 2531 } else { 2532 __clk_recalc_rates(core, POST_RATE_CHANGE); 2533 __clk_recalc_accuracies(core); 2534 } 2535 2536 runtime_put: 2537 clk_pm_runtime_put(core); 2538 2539 return ret; 2540 } 2541 2542 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent) 2543 { 2544 return clk_core_set_parent_nolock(hw->core, parent->core); 2545 } 2546 EXPORT_SYMBOL_GPL(clk_hw_set_parent); 2547 2548 /** 2549 * clk_set_parent - switch the parent of a mux clk 2550 * @clk: the mux clk whose input we are switching 2551 * @parent: the new input to clk 2552 * 2553 * Re-parent clk to use parent as its new input source. If clk is in 2554 * prepared state, the clk will get enabled for the duration of this call. If 2555 * that's not acceptable for a specific clk (Eg: the consumer can't handle 2556 * that, the reparenting is glitchy in hardware, etc), use the 2557 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 2558 * 2559 * After successfully changing clk's parent clk_set_parent will update the 2560 * clk topology, sysfs topology and propagate rate recalculation via 2561 * __clk_recalc_rates. 2562 * 2563 * Returns 0 on success, -EERROR otherwise. 2564 */ 2565 int clk_set_parent(struct clk *clk, struct clk *parent) 2566 { 2567 int ret; 2568 2569 if (!clk) 2570 return 0; 2571 2572 clk_prepare_lock(); 2573 2574 if (clk->exclusive_count) 2575 clk_core_rate_unprotect(clk->core); 2576 2577 ret = clk_core_set_parent_nolock(clk->core, 2578 parent ? parent->core : NULL); 2579 2580 if (clk->exclusive_count) 2581 clk_core_rate_protect(clk->core); 2582 2583 clk_prepare_unlock(); 2584 2585 return ret; 2586 } 2587 EXPORT_SYMBOL_GPL(clk_set_parent); 2588 2589 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) 2590 { 2591 int ret = -EINVAL; 2592 2593 lockdep_assert_held(&prepare_lock); 2594 2595 if (!core) 2596 return 0; 2597 2598 if (clk_core_rate_is_protected(core)) 2599 return -EBUSY; 2600 2601 trace_clk_set_phase(core, degrees); 2602 2603 if (core->ops->set_phase) { 2604 ret = core->ops->set_phase(core->hw, degrees); 2605 if (!ret) 2606 core->phase = degrees; 2607 } 2608 2609 trace_clk_set_phase_complete(core, degrees); 2610 2611 return ret; 2612 } 2613 2614 /** 2615 * clk_set_phase - adjust the phase shift of a clock signal 2616 * @clk: clock signal source 2617 * @degrees: number of degrees the signal is shifted 2618 * 2619 * Shifts the phase of a clock signal by the specified 2620 * degrees. Returns 0 on success, -EERROR otherwise. 2621 * 2622 * This function makes no distinction about the input or reference 2623 * signal that we adjust the clock signal phase against. For example 2624 * phase locked-loop clock signal generators we may shift phase with 2625 * respect to feedback clock signal input, but for other cases the 2626 * clock phase may be shifted with respect to some other, unspecified 2627 * signal. 2628 * 2629 * Additionally the concept of phase shift does not propagate through 2630 * the clock tree hierarchy, which sets it apart from clock rates and 2631 * clock accuracy. A parent clock phase attribute does not have an 2632 * impact on the phase attribute of a child clock. 2633 */ 2634 int clk_set_phase(struct clk *clk, int degrees) 2635 { 2636 int ret; 2637 2638 if (!clk) 2639 return 0; 2640 2641 /* sanity check degrees */ 2642 degrees %= 360; 2643 if (degrees < 0) 2644 degrees += 360; 2645 2646 clk_prepare_lock(); 2647 2648 if (clk->exclusive_count) 2649 clk_core_rate_unprotect(clk->core); 2650 2651 ret = clk_core_set_phase_nolock(clk->core, degrees); 2652 2653 if (clk->exclusive_count) 2654 clk_core_rate_protect(clk->core); 2655 2656 clk_prepare_unlock(); 2657 2658 return ret; 2659 } 2660 EXPORT_SYMBOL_GPL(clk_set_phase); 2661 2662 static int clk_core_get_phase(struct clk_core *core) 2663 { 2664 int ret; 2665 2666 lockdep_assert_held(&prepare_lock); 2667 if (!core->ops->get_phase) 2668 return 0; 2669 2670 /* Always try to update cached phase if possible */ 2671 ret = core->ops->get_phase(core->hw); 2672 if (ret >= 0) 2673 core->phase = ret; 2674 2675 return ret; 2676 } 2677 2678 /** 2679 * clk_get_phase - return the phase shift of a clock signal 2680 * @clk: clock signal source 2681 * 2682 * Returns the phase shift of a clock node in degrees, otherwise returns 2683 * -EERROR. 2684 */ 2685 int clk_get_phase(struct clk *clk) 2686 { 2687 int ret; 2688 2689 if (!clk) 2690 return 0; 2691 2692 clk_prepare_lock(); 2693 ret = clk_core_get_phase(clk->core); 2694 clk_prepare_unlock(); 2695 2696 return ret; 2697 } 2698 EXPORT_SYMBOL_GPL(clk_get_phase); 2699 2700 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) 2701 { 2702 /* Assume a default value of 50% */ 2703 core->duty.num = 1; 2704 core->duty.den = 2; 2705 } 2706 2707 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); 2708 2709 static int clk_core_update_duty_cycle_nolock(struct clk_core *core) 2710 { 2711 struct clk_duty *duty = &core->duty; 2712 int ret = 0; 2713 2714 if (!core->ops->get_duty_cycle) 2715 return clk_core_update_duty_cycle_parent_nolock(core); 2716 2717 ret = core->ops->get_duty_cycle(core->hw, duty); 2718 if (ret) 2719 goto reset; 2720 2721 /* Don't trust the clock provider too much */ 2722 if (duty->den == 0 || duty->num > duty->den) { 2723 ret = -EINVAL; 2724 goto reset; 2725 } 2726 2727 return 0; 2728 2729 reset: 2730 clk_core_reset_duty_cycle_nolock(core); 2731 return ret; 2732 } 2733 2734 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) 2735 { 2736 int ret = 0; 2737 2738 if (core->parent && 2739 core->flags & CLK_DUTY_CYCLE_PARENT) { 2740 ret = clk_core_update_duty_cycle_nolock(core->parent); 2741 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 2742 } else { 2743 clk_core_reset_duty_cycle_nolock(core); 2744 } 2745 2746 return ret; 2747 } 2748 2749 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 2750 struct clk_duty *duty); 2751 2752 static int clk_core_set_duty_cycle_nolock(struct clk_core *core, 2753 struct clk_duty *duty) 2754 { 2755 int ret; 2756 2757 lockdep_assert_held(&prepare_lock); 2758 2759 if (clk_core_rate_is_protected(core)) 2760 return -EBUSY; 2761 2762 trace_clk_set_duty_cycle(core, duty); 2763 2764 if (!core->ops->set_duty_cycle) 2765 return clk_core_set_duty_cycle_parent_nolock(core, duty); 2766 2767 ret = core->ops->set_duty_cycle(core->hw, duty); 2768 if (!ret) 2769 memcpy(&core->duty, duty, sizeof(*duty)); 2770 2771 trace_clk_set_duty_cycle_complete(core, duty); 2772 2773 return ret; 2774 } 2775 2776 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, 2777 struct clk_duty *duty) 2778 { 2779 int ret = 0; 2780 2781 if (core->parent && 2782 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { 2783 ret = clk_core_set_duty_cycle_nolock(core->parent, duty); 2784 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); 2785 } 2786 2787 return ret; 2788 } 2789 2790 /** 2791 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 2792 * @clk: clock signal source 2793 * @num: numerator of the duty cycle ratio to be applied 2794 * @den: denominator of the duty cycle ratio to be applied 2795 * 2796 * Apply the duty cycle ratio if the ratio is valid and the clock can 2797 * perform this operation 2798 * 2799 * Returns (0) on success, a negative errno otherwise. 2800 */ 2801 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) 2802 { 2803 int ret; 2804 struct clk_duty duty; 2805 2806 if (!clk) 2807 return 0; 2808 2809 /* sanity check the ratio */ 2810 if (den == 0 || num > den) 2811 return -EINVAL; 2812 2813 duty.num = num; 2814 duty.den = den; 2815 2816 clk_prepare_lock(); 2817 2818 if (clk->exclusive_count) 2819 clk_core_rate_unprotect(clk->core); 2820 2821 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); 2822 2823 if (clk->exclusive_count) 2824 clk_core_rate_protect(clk->core); 2825 2826 clk_prepare_unlock(); 2827 2828 return ret; 2829 } 2830 EXPORT_SYMBOL_GPL(clk_set_duty_cycle); 2831 2832 static int clk_core_get_scaled_duty_cycle(struct clk_core *core, 2833 unsigned int scale) 2834 { 2835 struct clk_duty *duty = &core->duty; 2836 int ret; 2837 2838 clk_prepare_lock(); 2839 2840 ret = clk_core_update_duty_cycle_nolock(core); 2841 if (!ret) 2842 ret = mult_frac(scale, duty->num, duty->den); 2843 2844 clk_prepare_unlock(); 2845 2846 return ret; 2847 } 2848 2849 /** 2850 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 2851 * @clk: clock signal source 2852 * @scale: scaling factor to be applied to represent the ratio as an integer 2853 * 2854 * Returns the duty cycle ratio of a clock node multiplied by the provided 2855 * scaling factor, or negative errno on error. 2856 */ 2857 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) 2858 { 2859 if (!clk) 2860 return 0; 2861 2862 return clk_core_get_scaled_duty_cycle(clk->core, scale); 2863 } 2864 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); 2865 2866 /** 2867 * clk_is_match - check if two clk's point to the same hardware clock 2868 * @p: clk compared against q 2869 * @q: clk compared against p 2870 * 2871 * Returns true if the two struct clk pointers both point to the same hardware 2872 * clock node. Put differently, returns true if struct clk *p and struct clk *q 2873 * share the same struct clk_core object. 2874 * 2875 * Returns false otherwise. Note that two NULL clks are treated as matching. 2876 */ 2877 bool clk_is_match(const struct clk *p, const struct clk *q) 2878 { 2879 /* trivial case: identical struct clk's or both NULL */ 2880 if (p == q) 2881 return true; 2882 2883 /* true if clk->core pointers match. Avoid dereferencing garbage */ 2884 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) 2885 if (p->core == q->core) 2886 return true; 2887 2888 return false; 2889 } 2890 EXPORT_SYMBOL_GPL(clk_is_match); 2891 2892 /*** debugfs support ***/ 2893 2894 #ifdef CONFIG_DEBUG_FS 2895 #include <linux/debugfs.h> 2896 2897 static struct dentry *rootdir; 2898 static int inited = 0; 2899 static DEFINE_MUTEX(clk_debug_lock); 2900 static HLIST_HEAD(clk_debug_list); 2901 2902 static struct hlist_head *orphan_list[] = { 2903 &clk_orphan_list, 2904 NULL, 2905 }; 2906 2907 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, 2908 int level) 2909 { 2910 int phase; 2911 2912 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ", 2913 level * 3 + 1, "", 2914 30 - level * 3, c->name, 2915 c->enable_count, c->prepare_count, c->protect_count, 2916 clk_core_get_rate_recalc(c), 2917 clk_core_get_accuracy_recalc(c)); 2918 2919 phase = clk_core_get_phase(c); 2920 if (phase >= 0) 2921 seq_printf(s, "%5d", phase); 2922 else 2923 seq_puts(s, "-----"); 2924 2925 seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000)); 2926 } 2927 2928 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, 2929 int level) 2930 { 2931 struct clk_core *child; 2932 2933 clk_summary_show_one(s, c, level); 2934 2935 hlist_for_each_entry(child, &c->children, child_node) 2936 clk_summary_show_subtree(s, child, level + 1); 2937 } 2938 2939 static int clk_summary_show(struct seq_file *s, void *data) 2940 { 2941 struct clk_core *c; 2942 struct hlist_head **lists = (struct hlist_head **)s->private; 2943 2944 seq_puts(s, " enable prepare protect duty\n"); 2945 seq_puts(s, " clock count count count rate accuracy phase cycle\n"); 2946 seq_puts(s, "---------------------------------------------------------------------------------------------\n"); 2947 2948 clk_prepare_lock(); 2949 2950 for (; *lists; lists++) 2951 hlist_for_each_entry(c, *lists, child_node) 2952 clk_summary_show_subtree(s, c, 0); 2953 2954 clk_prepare_unlock(); 2955 2956 return 0; 2957 } 2958 DEFINE_SHOW_ATTRIBUTE(clk_summary); 2959 2960 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) 2961 { 2962 int phase; 2963 unsigned long min_rate, max_rate; 2964 2965 clk_core_get_boundaries(c, &min_rate, &max_rate); 2966 2967 /* This should be JSON format, i.e. elements separated with a comma */ 2968 seq_printf(s, "\"%s\": { ", c->name); 2969 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 2970 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 2971 seq_printf(s, "\"protect_count\": %d,", c->protect_count); 2972 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c)); 2973 seq_printf(s, "\"min_rate\": %lu,", min_rate); 2974 seq_printf(s, "\"max_rate\": %lu,", max_rate); 2975 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c)); 2976 phase = clk_core_get_phase(c); 2977 if (phase >= 0) 2978 seq_printf(s, "\"phase\": %d,", phase); 2979 seq_printf(s, "\"duty_cycle\": %u", 2980 clk_core_get_scaled_duty_cycle(c, 100000)); 2981 } 2982 2983 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) 2984 { 2985 struct clk_core *child; 2986 2987 clk_dump_one(s, c, level); 2988 2989 hlist_for_each_entry(child, &c->children, child_node) { 2990 seq_putc(s, ','); 2991 clk_dump_subtree(s, child, level + 1); 2992 } 2993 2994 seq_putc(s, '}'); 2995 } 2996 2997 static int clk_dump_show(struct seq_file *s, void *data) 2998 { 2999 struct clk_core *c; 3000 bool first_node = true; 3001 struct hlist_head **lists = (struct hlist_head **)s->private; 3002 3003 seq_putc(s, '{'); 3004 clk_prepare_lock(); 3005 3006 for (; *lists; lists++) { 3007 hlist_for_each_entry(c, *lists, child_node) { 3008 if (!first_node) 3009 seq_putc(s, ','); 3010 first_node = false; 3011 clk_dump_subtree(s, c, 0); 3012 } 3013 } 3014 3015 clk_prepare_unlock(); 3016 3017 seq_puts(s, "}\n"); 3018 return 0; 3019 } 3020 DEFINE_SHOW_ATTRIBUTE(clk_dump); 3021 3022 #undef CLOCK_ALLOW_WRITE_DEBUGFS 3023 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS 3024 /* 3025 * This can be dangerous, therefore don't provide any real compile time 3026 * configuration option for this feature. 3027 * People who want to use this will need to modify the source code directly. 3028 */ 3029 static int clk_rate_set(void *data, u64 val) 3030 { 3031 struct clk_core *core = data; 3032 int ret; 3033 3034 clk_prepare_lock(); 3035 ret = clk_core_set_rate_nolock(core, val); 3036 clk_prepare_unlock(); 3037 3038 return ret; 3039 } 3040 3041 #define clk_rate_mode 0644 3042 #else 3043 #define clk_rate_set NULL 3044 #define clk_rate_mode 0444 3045 #endif 3046 3047 static int clk_rate_get(void *data, u64 *val) 3048 { 3049 struct clk_core *core = data; 3050 3051 *val = core->rate; 3052 return 0; 3053 } 3054 3055 DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n"); 3056 3057 static const struct { 3058 unsigned long flag; 3059 const char *name; 3060 } clk_flags[] = { 3061 #define ENTRY(f) { f, #f } 3062 ENTRY(CLK_SET_RATE_GATE), 3063 ENTRY(CLK_SET_PARENT_GATE), 3064 ENTRY(CLK_SET_RATE_PARENT), 3065 ENTRY(CLK_IGNORE_UNUSED), 3066 ENTRY(CLK_GET_RATE_NOCACHE), 3067 ENTRY(CLK_SET_RATE_NO_REPARENT), 3068 ENTRY(CLK_GET_ACCURACY_NOCACHE), 3069 ENTRY(CLK_RECALC_NEW_RATES), 3070 ENTRY(CLK_SET_RATE_UNGATE), 3071 ENTRY(CLK_IS_CRITICAL), 3072 ENTRY(CLK_OPS_PARENT_ENABLE), 3073 ENTRY(CLK_DUTY_CYCLE_PARENT), 3074 #undef ENTRY 3075 }; 3076 3077 static int clk_flags_show(struct seq_file *s, void *data) 3078 { 3079 struct clk_core *core = s->private; 3080 unsigned long flags = core->flags; 3081 unsigned int i; 3082 3083 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { 3084 if (flags & clk_flags[i].flag) { 3085 seq_printf(s, "%s\n", clk_flags[i].name); 3086 flags &= ~clk_flags[i].flag; 3087 } 3088 } 3089 if (flags) { 3090 /* Unknown flags */ 3091 seq_printf(s, "0x%lx\n", flags); 3092 } 3093 3094 return 0; 3095 } 3096 DEFINE_SHOW_ATTRIBUTE(clk_flags); 3097 3098 static void possible_parent_show(struct seq_file *s, struct clk_core *core, 3099 unsigned int i, char terminator) 3100 { 3101 struct clk_core *parent; 3102 3103 /* 3104 * Go through the following options to fetch a parent's name. 3105 * 3106 * 1. Fetch the registered parent clock and use its name 3107 * 2. Use the global (fallback) name if specified 3108 * 3. Use the local fw_name if provided 3109 * 4. Fetch parent clock's clock-output-name if DT index was set 3110 * 3111 * This may still fail in some cases, such as when the parent is 3112 * specified directly via a struct clk_hw pointer, but it isn't 3113 * registered (yet). 3114 */ 3115 parent = clk_core_get_parent_by_index(core, i); 3116 if (parent) 3117 seq_puts(s, parent->name); 3118 else if (core->parents[i].name) 3119 seq_puts(s, core->parents[i].name); 3120 else if (core->parents[i].fw_name) 3121 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); 3122 else if (core->parents[i].index >= 0) 3123 seq_puts(s, 3124 of_clk_get_parent_name(core->of_node, 3125 core->parents[i].index)); 3126 else 3127 seq_puts(s, "(missing)"); 3128 3129 seq_putc(s, terminator); 3130 } 3131 3132 static int possible_parents_show(struct seq_file *s, void *data) 3133 { 3134 struct clk_core *core = s->private; 3135 int i; 3136 3137 for (i = 0; i < core->num_parents - 1; i++) 3138 possible_parent_show(s, core, i, ' '); 3139 3140 possible_parent_show(s, core, i, '\n'); 3141 3142 return 0; 3143 } 3144 DEFINE_SHOW_ATTRIBUTE(possible_parents); 3145 3146 static int current_parent_show(struct seq_file *s, void *data) 3147 { 3148 struct clk_core *core = s->private; 3149 3150 if (core->parent) 3151 seq_printf(s, "%s\n", core->parent->name); 3152 3153 return 0; 3154 } 3155 DEFINE_SHOW_ATTRIBUTE(current_parent); 3156 3157 static int clk_duty_cycle_show(struct seq_file *s, void *data) 3158 { 3159 struct clk_core *core = s->private; 3160 struct clk_duty *duty = &core->duty; 3161 3162 seq_printf(s, "%u/%u\n", duty->num, duty->den); 3163 3164 return 0; 3165 } 3166 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); 3167 3168 static int clk_min_rate_show(struct seq_file *s, void *data) 3169 { 3170 struct clk_core *core = s->private; 3171 unsigned long min_rate, max_rate; 3172 3173 clk_prepare_lock(); 3174 clk_core_get_boundaries(core, &min_rate, &max_rate); 3175 clk_prepare_unlock(); 3176 seq_printf(s, "%lu\n", min_rate); 3177 3178 return 0; 3179 } 3180 DEFINE_SHOW_ATTRIBUTE(clk_min_rate); 3181 3182 static int clk_max_rate_show(struct seq_file *s, void *data) 3183 { 3184 struct clk_core *core = s->private; 3185 unsigned long min_rate, max_rate; 3186 3187 clk_prepare_lock(); 3188 clk_core_get_boundaries(core, &min_rate, &max_rate); 3189 clk_prepare_unlock(); 3190 seq_printf(s, "%lu\n", max_rate); 3191 3192 return 0; 3193 } 3194 DEFINE_SHOW_ATTRIBUTE(clk_max_rate); 3195 3196 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) 3197 { 3198 struct dentry *root; 3199 3200 if (!core || !pdentry) 3201 return; 3202 3203 root = debugfs_create_dir(core->name, pdentry); 3204 core->dentry = root; 3205 3206 debugfs_create_file("clk_rate", clk_rate_mode, root, core, 3207 &clk_rate_fops); 3208 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops); 3209 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops); 3210 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); 3211 debugfs_create_u32("clk_phase", 0444, root, &core->phase); 3212 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); 3213 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); 3214 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); 3215 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); 3216 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); 3217 debugfs_create_file("clk_duty_cycle", 0444, root, core, 3218 &clk_duty_cycle_fops); 3219 3220 if (core->num_parents > 0) 3221 debugfs_create_file("clk_parent", 0444, root, core, 3222 ¤t_parent_fops); 3223 3224 if (core->num_parents > 1) 3225 debugfs_create_file("clk_possible_parents", 0444, root, core, 3226 &possible_parents_fops); 3227 3228 if (core->ops->debug_init) 3229 core->ops->debug_init(core->hw, core->dentry); 3230 } 3231 3232 /** 3233 * clk_debug_register - add a clk node to the debugfs clk directory 3234 * @core: the clk being added to the debugfs clk directory 3235 * 3236 * Dynamically adds a clk to the debugfs clk directory if debugfs has been 3237 * initialized. Otherwise it bails out early since the debugfs clk directory 3238 * will be created lazily by clk_debug_init as part of a late_initcall. 3239 */ 3240 static void clk_debug_register(struct clk_core *core) 3241 { 3242 mutex_lock(&clk_debug_lock); 3243 hlist_add_head(&core->debug_node, &clk_debug_list); 3244 if (inited) 3245 clk_debug_create_one(core, rootdir); 3246 mutex_unlock(&clk_debug_lock); 3247 } 3248 3249 /** 3250 * clk_debug_unregister - remove a clk node from the debugfs clk directory 3251 * @core: the clk being removed from the debugfs clk directory 3252 * 3253 * Dynamically removes a clk and all its child nodes from the 3254 * debugfs clk directory if clk->dentry points to debugfs created by 3255 * clk_debug_register in __clk_core_init. 3256 */ 3257 static void clk_debug_unregister(struct clk_core *core) 3258 { 3259 mutex_lock(&clk_debug_lock); 3260 hlist_del_init(&core->debug_node); 3261 debugfs_remove_recursive(core->dentry); 3262 core->dentry = NULL; 3263 mutex_unlock(&clk_debug_lock); 3264 } 3265 3266 /** 3267 * clk_debug_init - lazily populate the debugfs clk directory 3268 * 3269 * clks are often initialized very early during boot before memory can be 3270 * dynamically allocated and well before debugfs is setup. This function 3271 * populates the debugfs clk directory once at boot-time when we know that 3272 * debugfs is setup. It should only be called once at boot-time, all other clks 3273 * added dynamically will be done so with clk_debug_register. 3274 */ 3275 static int __init clk_debug_init(void) 3276 { 3277 struct clk_core *core; 3278 3279 rootdir = debugfs_create_dir("clk", NULL); 3280 3281 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, 3282 &clk_summary_fops); 3283 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, 3284 &clk_dump_fops); 3285 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, 3286 &clk_summary_fops); 3287 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, 3288 &clk_dump_fops); 3289 3290 mutex_lock(&clk_debug_lock); 3291 hlist_for_each_entry(core, &clk_debug_list, debug_node) 3292 clk_debug_create_one(core, rootdir); 3293 3294 inited = 1; 3295 mutex_unlock(&clk_debug_lock); 3296 3297 return 0; 3298 } 3299 late_initcall(clk_debug_init); 3300 #else 3301 static inline void clk_debug_register(struct clk_core *core) { } 3302 static inline void clk_debug_unregister(struct clk_core *core) 3303 { 3304 } 3305 #endif 3306 3307 static void clk_core_reparent_orphans_nolock(void) 3308 { 3309 struct clk_core *orphan; 3310 struct hlist_node *tmp2; 3311 3312 /* 3313 * walk the list of orphan clocks and reparent any that newly finds a 3314 * parent. 3315 */ 3316 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 3317 struct clk_core *parent = __clk_init_parent(orphan); 3318 3319 /* 3320 * We need to use __clk_set_parent_before() and _after() to 3321 * to properly migrate any prepare/enable count of the orphan 3322 * clock. This is important for CLK_IS_CRITICAL clocks, which 3323 * are enabled during init but might not have a parent yet. 3324 */ 3325 if (parent) { 3326 /* update the clk tree topology */ 3327 __clk_set_parent_before(orphan, parent); 3328 __clk_set_parent_after(orphan, parent, NULL); 3329 __clk_recalc_accuracies(orphan); 3330 __clk_recalc_rates(orphan, 0); 3331 } 3332 } 3333 } 3334 3335 /** 3336 * __clk_core_init - initialize the data structures in a struct clk_core 3337 * @core: clk_core being initialized 3338 * 3339 * Initializes the lists in struct clk_core, queries the hardware for the 3340 * parent and rate and sets them both. 3341 */ 3342 static int __clk_core_init(struct clk_core *core) 3343 { 3344 int ret; 3345 struct clk_core *parent; 3346 unsigned long rate; 3347 int phase; 3348 3349 if (!core) 3350 return -EINVAL; 3351 3352 clk_prepare_lock(); 3353 3354 ret = clk_pm_runtime_get(core); 3355 if (ret) 3356 goto unlock; 3357 3358 /* check to see if a clock with this name is already registered */ 3359 if (clk_core_lookup(core->name)) { 3360 pr_debug("%s: clk %s already initialized\n", 3361 __func__, core->name); 3362 ret = -EEXIST; 3363 goto out; 3364 } 3365 3366 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ 3367 if (core->ops->set_rate && 3368 !((core->ops->round_rate || core->ops->determine_rate) && 3369 core->ops->recalc_rate)) { 3370 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 3371 __func__, core->name); 3372 ret = -EINVAL; 3373 goto out; 3374 } 3375 3376 if (core->ops->set_parent && !core->ops->get_parent) { 3377 pr_err("%s: %s must implement .get_parent & .set_parent\n", 3378 __func__, core->name); 3379 ret = -EINVAL; 3380 goto out; 3381 } 3382 3383 if (core->num_parents > 1 && !core->ops->get_parent) { 3384 pr_err("%s: %s must implement .get_parent as it has multi parents\n", 3385 __func__, core->name); 3386 ret = -EINVAL; 3387 goto out; 3388 } 3389 3390 if (core->ops->set_rate_and_parent && 3391 !(core->ops->set_parent && core->ops->set_rate)) { 3392 pr_err("%s: %s must implement .set_parent & .set_rate\n", 3393 __func__, core->name); 3394 ret = -EINVAL; 3395 goto out; 3396 } 3397 3398 /* 3399 * optional platform-specific magic 3400 * 3401 * The .init callback is not used by any of the basic clock types, but 3402 * exists for weird hardware that must perform initialization magic for 3403 * CCF to get an accurate view of clock for any other callbacks. It may 3404 * also be used needs to perform dynamic allocations. Such allocation 3405 * must be freed in the terminate() callback. 3406 * This callback shall not be used to initialize the parameters state, 3407 * such as rate, parent, etc ... 3408 * 3409 * If it exist, this callback should called before any other callback of 3410 * the clock 3411 */ 3412 if (core->ops->init) { 3413 ret = core->ops->init(core->hw); 3414 if (ret) 3415 goto out; 3416 } 3417 3418 parent = core->parent = __clk_init_parent(core); 3419 3420 /* 3421 * Populate core->parent if parent has already been clk_core_init'd. If 3422 * parent has not yet been clk_core_init'd then place clk in the orphan 3423 * list. If clk doesn't have any parents then place it in the root 3424 * clk list. 3425 * 3426 * Every time a new clk is clk_init'd then we walk the list of orphan 3427 * clocks and re-parent any that are children of the clock currently 3428 * being clk_init'd. 3429 */ 3430 if (parent) { 3431 hlist_add_head(&core->child_node, &parent->children); 3432 core->orphan = parent->orphan; 3433 } else if (!core->num_parents) { 3434 hlist_add_head(&core->child_node, &clk_root_list); 3435 core->orphan = false; 3436 } else { 3437 hlist_add_head(&core->child_node, &clk_orphan_list); 3438 core->orphan = true; 3439 } 3440 3441 /* 3442 * Set clk's accuracy. The preferred method is to use 3443 * .recalc_accuracy. For simple clocks and lazy developers the default 3444 * fallback is to use the parent's accuracy. If a clock doesn't have a 3445 * parent (or is orphaned) then accuracy is set to zero (perfect 3446 * clock). 3447 */ 3448 if (core->ops->recalc_accuracy) 3449 core->accuracy = core->ops->recalc_accuracy(core->hw, 3450 clk_core_get_accuracy_no_lock(parent)); 3451 else if (parent) 3452 core->accuracy = parent->accuracy; 3453 else 3454 core->accuracy = 0; 3455 3456 /* 3457 * Set clk's phase by clk_core_get_phase() caching the phase. 3458 * Since a phase is by definition relative to its parent, just 3459 * query the current clock phase, or just assume it's in phase. 3460 */ 3461 phase = clk_core_get_phase(core); 3462 if (phase < 0) { 3463 ret = phase; 3464 pr_warn("%s: Failed to get phase for clk '%s'\n", __func__, 3465 core->name); 3466 goto out; 3467 } 3468 3469 /* 3470 * Set clk's duty cycle. 3471 */ 3472 clk_core_update_duty_cycle_nolock(core); 3473 3474 /* 3475 * Set clk's rate. The preferred method is to use .recalc_rate. For 3476 * simple clocks and lazy developers the default fallback is to use the 3477 * parent's rate. If a clock doesn't have a parent (or is orphaned) 3478 * then rate is set to zero. 3479 */ 3480 if (core->ops->recalc_rate) 3481 rate = core->ops->recalc_rate(core->hw, 3482 clk_core_get_rate_nolock(parent)); 3483 else if (parent) 3484 rate = parent->rate; 3485 else 3486 rate = 0; 3487 core->rate = core->req_rate = rate; 3488 3489 /* 3490 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks 3491 * don't get accidentally disabled when walking the orphan tree and 3492 * reparenting clocks 3493 */ 3494 if (core->flags & CLK_IS_CRITICAL) { 3495 unsigned long flags; 3496 3497 ret = clk_core_prepare(core); 3498 if (ret) { 3499 pr_warn("%s: critical clk '%s' failed to prepare\n", 3500 __func__, core->name); 3501 goto out; 3502 } 3503 3504 flags = clk_enable_lock(); 3505 ret = clk_core_enable(core); 3506 clk_enable_unlock(flags); 3507 if (ret) { 3508 pr_warn("%s: critical clk '%s' failed to enable\n", 3509 __func__, core->name); 3510 clk_core_unprepare(core); 3511 goto out; 3512 } 3513 } 3514 3515 clk_core_reparent_orphans_nolock(); 3516 3517 3518 kref_init(&core->ref); 3519 out: 3520 clk_pm_runtime_put(core); 3521 unlock: 3522 if (ret) 3523 hlist_del_init(&core->child_node); 3524 3525 clk_prepare_unlock(); 3526 3527 if (!ret) 3528 clk_debug_register(core); 3529 3530 return ret; 3531 } 3532 3533 /** 3534 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core 3535 * @core: clk to add consumer to 3536 * @clk: consumer to link to a clk 3537 */ 3538 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) 3539 { 3540 clk_prepare_lock(); 3541 hlist_add_head(&clk->clks_node, &core->clks); 3542 clk_prepare_unlock(); 3543 } 3544 3545 /** 3546 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core 3547 * @clk: consumer to unlink 3548 */ 3549 static void clk_core_unlink_consumer(struct clk *clk) 3550 { 3551 lockdep_assert_held(&prepare_lock); 3552 hlist_del(&clk->clks_node); 3553 } 3554 3555 /** 3556 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core 3557 * @core: clk to allocate a consumer for 3558 * @dev_id: string describing device name 3559 * @con_id: connection ID string on device 3560 * 3561 * Returns: clk consumer left unlinked from the consumer list 3562 */ 3563 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, 3564 const char *con_id) 3565 { 3566 struct clk *clk; 3567 3568 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 3569 if (!clk) 3570 return ERR_PTR(-ENOMEM); 3571 3572 clk->core = core; 3573 clk->dev_id = dev_id; 3574 clk->con_id = kstrdup_const(con_id, GFP_KERNEL); 3575 clk->max_rate = ULONG_MAX; 3576 3577 return clk; 3578 } 3579 3580 /** 3581 * free_clk - Free a clk consumer 3582 * @clk: clk consumer to free 3583 * 3584 * Note, this assumes the clk has been unlinked from the clk_core consumer 3585 * list. 3586 */ 3587 static void free_clk(struct clk *clk) 3588 { 3589 kfree_const(clk->con_id); 3590 kfree(clk); 3591 } 3592 3593 /** 3594 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given 3595 * a clk_hw 3596 * @dev: clk consumer device 3597 * @hw: clk_hw associated with the clk being consumed 3598 * @dev_id: string describing device name 3599 * @con_id: connection ID string on device 3600 * 3601 * This is the main function used to create a clk pointer for use by clk 3602 * consumers. It connects a consumer to the clk_core and clk_hw structures 3603 * used by the framework and clk provider respectively. 3604 */ 3605 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, 3606 const char *dev_id, const char *con_id) 3607 { 3608 struct clk *clk; 3609 struct clk_core *core; 3610 3611 /* This is to allow this function to be chained to others */ 3612 if (IS_ERR_OR_NULL(hw)) 3613 return ERR_CAST(hw); 3614 3615 core = hw->core; 3616 clk = alloc_clk(core, dev_id, con_id); 3617 if (IS_ERR(clk)) 3618 return clk; 3619 clk->dev = dev; 3620 3621 if (!try_module_get(core->owner)) { 3622 free_clk(clk); 3623 return ERR_PTR(-ENOENT); 3624 } 3625 3626 kref_get(&core->ref); 3627 clk_core_link_consumer(core, clk); 3628 3629 return clk; 3630 } 3631 3632 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist) 3633 { 3634 const char *dst; 3635 3636 if (!src) { 3637 if (must_exist) 3638 return -EINVAL; 3639 return 0; 3640 } 3641 3642 *dst_p = dst = kstrdup_const(src, GFP_KERNEL); 3643 if (!dst) 3644 return -ENOMEM; 3645 3646 return 0; 3647 } 3648 3649 static int clk_core_populate_parent_map(struct clk_core *core, 3650 const struct clk_init_data *init) 3651 { 3652 u8 num_parents = init->num_parents; 3653 const char * const *parent_names = init->parent_names; 3654 const struct clk_hw **parent_hws = init->parent_hws; 3655 const struct clk_parent_data *parent_data = init->parent_data; 3656 int i, ret = 0; 3657 struct clk_parent_map *parents, *parent; 3658 3659 if (!num_parents) 3660 return 0; 3661 3662 /* 3663 * Avoid unnecessary string look-ups of clk_core's possible parents by 3664 * having a cache of names/clk_hw pointers to clk_core pointers. 3665 */ 3666 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL); 3667 core->parents = parents; 3668 if (!parents) 3669 return -ENOMEM; 3670 3671 /* Copy everything over because it might be __initdata */ 3672 for (i = 0, parent = parents; i < num_parents; i++, parent++) { 3673 parent->index = -1; 3674 if (parent_names) { 3675 /* throw a WARN if any entries are NULL */ 3676 WARN(!parent_names[i], 3677 "%s: invalid NULL in %s's .parent_names\n", 3678 __func__, core->name); 3679 ret = clk_cpy_name(&parent->name, parent_names[i], 3680 true); 3681 } else if (parent_data) { 3682 parent->hw = parent_data[i].hw; 3683 parent->index = parent_data[i].index; 3684 ret = clk_cpy_name(&parent->fw_name, 3685 parent_data[i].fw_name, false); 3686 if (!ret) 3687 ret = clk_cpy_name(&parent->name, 3688 parent_data[i].name, 3689 false); 3690 } else if (parent_hws) { 3691 parent->hw = parent_hws[i]; 3692 } else { 3693 ret = -EINVAL; 3694 WARN(1, "Must specify parents if num_parents > 0\n"); 3695 } 3696 3697 if (ret) { 3698 do { 3699 kfree_const(parents[i].name); 3700 kfree_const(parents[i].fw_name); 3701 } while (--i >= 0); 3702 kfree(parents); 3703 3704 return ret; 3705 } 3706 } 3707 3708 return 0; 3709 } 3710 3711 static void clk_core_free_parent_map(struct clk_core *core) 3712 { 3713 int i = core->num_parents; 3714 3715 if (!core->num_parents) 3716 return; 3717 3718 while (--i >= 0) { 3719 kfree_const(core->parents[i].name); 3720 kfree_const(core->parents[i].fw_name); 3721 } 3722 3723 kfree(core->parents); 3724 } 3725 3726 static struct clk * 3727 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) 3728 { 3729 int ret; 3730 struct clk_core *core; 3731 const struct clk_init_data *init = hw->init; 3732 3733 /* 3734 * The init data is not supposed to be used outside of registration path. 3735 * Set it to NULL so that provider drivers can't use it either and so that 3736 * we catch use of hw->init early on in the core. 3737 */ 3738 hw->init = NULL; 3739 3740 core = kzalloc(sizeof(*core), GFP_KERNEL); 3741 if (!core) { 3742 ret = -ENOMEM; 3743 goto fail_out; 3744 } 3745 3746 core->name = kstrdup_const(init->name, GFP_KERNEL); 3747 if (!core->name) { 3748 ret = -ENOMEM; 3749 goto fail_name; 3750 } 3751 3752 if (WARN_ON(!init->ops)) { 3753 ret = -EINVAL; 3754 goto fail_ops; 3755 } 3756 core->ops = init->ops; 3757 3758 if (dev && pm_runtime_enabled(dev)) 3759 core->rpm_enabled = true; 3760 core->dev = dev; 3761 core->of_node = np; 3762 if (dev && dev->driver) 3763 core->owner = dev->driver->owner; 3764 core->hw = hw; 3765 core->flags = init->flags; 3766 core->num_parents = init->num_parents; 3767 core->min_rate = 0; 3768 core->max_rate = ULONG_MAX; 3769 hw->core = core; 3770 3771 ret = clk_core_populate_parent_map(core, init); 3772 if (ret) 3773 goto fail_parents; 3774 3775 INIT_HLIST_HEAD(&core->clks); 3776 3777 /* 3778 * Don't call clk_hw_create_clk() here because that would pin the 3779 * provider module to itself and prevent it from ever being removed. 3780 */ 3781 hw->clk = alloc_clk(core, NULL, NULL); 3782 if (IS_ERR(hw->clk)) { 3783 ret = PTR_ERR(hw->clk); 3784 goto fail_create_clk; 3785 } 3786 3787 clk_core_link_consumer(hw->core, hw->clk); 3788 3789 ret = __clk_core_init(core); 3790 if (!ret) 3791 return hw->clk; 3792 3793 clk_prepare_lock(); 3794 clk_core_unlink_consumer(hw->clk); 3795 clk_prepare_unlock(); 3796 3797 free_clk(hw->clk); 3798 hw->clk = NULL; 3799 3800 fail_create_clk: 3801 clk_core_free_parent_map(core); 3802 fail_parents: 3803 fail_ops: 3804 kfree_const(core->name); 3805 fail_name: 3806 kfree(core); 3807 fail_out: 3808 return ERR_PTR(ret); 3809 } 3810 3811 /** 3812 * dev_or_parent_of_node() - Get device node of @dev or @dev's parent 3813 * @dev: Device to get device node of 3814 * 3815 * Return: device node pointer of @dev, or the device node pointer of 3816 * @dev->parent if dev doesn't have a device node, or NULL if neither 3817 * @dev or @dev->parent have a device node. 3818 */ 3819 static struct device_node *dev_or_parent_of_node(struct device *dev) 3820 { 3821 struct device_node *np; 3822 3823 if (!dev) 3824 return NULL; 3825 3826 np = dev_of_node(dev); 3827 if (!np) 3828 np = dev_of_node(dev->parent); 3829 3830 return np; 3831 } 3832 3833 /** 3834 * clk_register - allocate a new clock, register it and return an opaque cookie 3835 * @dev: device that is registering this clock 3836 * @hw: link to hardware-specific clock data 3837 * 3838 * clk_register is the *deprecated* interface for populating the clock tree with 3839 * new clock nodes. Use clk_hw_register() instead. 3840 * 3841 * Returns: a pointer to the newly allocated struct clk which 3842 * cannot be dereferenced by driver code but may be used in conjunction with the 3843 * rest of the clock API. In the event of an error clk_register will return an 3844 * error code; drivers must test for an error code after calling clk_register. 3845 */ 3846 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 3847 { 3848 return __clk_register(dev, dev_or_parent_of_node(dev), hw); 3849 } 3850 EXPORT_SYMBOL_GPL(clk_register); 3851 3852 /** 3853 * clk_hw_register - register a clk_hw and return an error code 3854 * @dev: device that is registering this clock 3855 * @hw: link to hardware-specific clock data 3856 * 3857 * clk_hw_register is the primary interface for populating the clock tree with 3858 * new clock nodes. It returns an integer equal to zero indicating success or 3859 * less than zero indicating failure. Drivers must test for an error code after 3860 * calling clk_hw_register(). 3861 */ 3862 int clk_hw_register(struct device *dev, struct clk_hw *hw) 3863 { 3864 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev), 3865 hw)); 3866 } 3867 EXPORT_SYMBOL_GPL(clk_hw_register); 3868 3869 /* 3870 * of_clk_hw_register - register a clk_hw and return an error code 3871 * @node: device_node of device that is registering this clock 3872 * @hw: link to hardware-specific clock data 3873 * 3874 * of_clk_hw_register() is the primary interface for populating the clock tree 3875 * with new clock nodes when a struct device is not available, but a struct 3876 * device_node is. It returns an integer equal to zero indicating success or 3877 * less than zero indicating failure. Drivers must test for an error code after 3878 * calling of_clk_hw_register(). 3879 */ 3880 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw) 3881 { 3882 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw)); 3883 } 3884 EXPORT_SYMBOL_GPL(of_clk_hw_register); 3885 3886 /* Free memory allocated for a clock. */ 3887 static void __clk_release(struct kref *ref) 3888 { 3889 struct clk_core *core = container_of(ref, struct clk_core, ref); 3890 3891 lockdep_assert_held(&prepare_lock); 3892 3893 clk_core_free_parent_map(core); 3894 kfree_const(core->name); 3895 kfree(core); 3896 } 3897 3898 /* 3899 * Empty clk_ops for unregistered clocks. These are used temporarily 3900 * after clk_unregister() was called on a clock and until last clock 3901 * consumer calls clk_put() and the struct clk object is freed. 3902 */ 3903 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 3904 { 3905 return -ENXIO; 3906 } 3907 3908 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 3909 { 3910 WARN_ON_ONCE(1); 3911 } 3912 3913 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 3914 unsigned long parent_rate) 3915 { 3916 return -ENXIO; 3917 } 3918 3919 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 3920 { 3921 return -ENXIO; 3922 } 3923 3924 static const struct clk_ops clk_nodrv_ops = { 3925 .enable = clk_nodrv_prepare_enable, 3926 .disable = clk_nodrv_disable_unprepare, 3927 .prepare = clk_nodrv_prepare_enable, 3928 .unprepare = clk_nodrv_disable_unprepare, 3929 .set_rate = clk_nodrv_set_rate, 3930 .set_parent = clk_nodrv_set_parent, 3931 }; 3932 3933 static void clk_core_evict_parent_cache_subtree(struct clk_core *root, 3934 struct clk_core *target) 3935 { 3936 int i; 3937 struct clk_core *child; 3938 3939 for (i = 0; i < root->num_parents; i++) 3940 if (root->parents[i].core == target) 3941 root->parents[i].core = NULL; 3942 3943 hlist_for_each_entry(child, &root->children, child_node) 3944 clk_core_evict_parent_cache_subtree(child, target); 3945 } 3946 3947 /* Remove this clk from all parent caches */ 3948 static void clk_core_evict_parent_cache(struct clk_core *core) 3949 { 3950 struct hlist_head **lists; 3951 struct clk_core *root; 3952 3953 lockdep_assert_held(&prepare_lock); 3954 3955 for (lists = all_lists; *lists; lists++) 3956 hlist_for_each_entry(root, *lists, child_node) 3957 clk_core_evict_parent_cache_subtree(root, core); 3958 3959 } 3960 3961 /** 3962 * clk_unregister - unregister a currently registered clock 3963 * @clk: clock to unregister 3964 */ 3965 void clk_unregister(struct clk *clk) 3966 { 3967 unsigned long flags; 3968 const struct clk_ops *ops; 3969 3970 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 3971 return; 3972 3973 clk_debug_unregister(clk->core); 3974 3975 clk_prepare_lock(); 3976 3977 ops = clk->core->ops; 3978 if (ops == &clk_nodrv_ops) { 3979 pr_err("%s: unregistered clock: %s\n", __func__, 3980 clk->core->name); 3981 goto unlock; 3982 } 3983 /* 3984 * Assign empty clock ops for consumers that might still hold 3985 * a reference to this clock. 3986 */ 3987 flags = clk_enable_lock(); 3988 clk->core->ops = &clk_nodrv_ops; 3989 clk_enable_unlock(flags); 3990 3991 if (ops->terminate) 3992 ops->terminate(clk->core->hw); 3993 3994 if (!hlist_empty(&clk->core->children)) { 3995 struct clk_core *child; 3996 struct hlist_node *t; 3997 3998 /* Reparent all children to the orphan list. */ 3999 hlist_for_each_entry_safe(child, t, &clk->core->children, 4000 child_node) 4001 clk_core_set_parent_nolock(child, NULL); 4002 } 4003 4004 clk_core_evict_parent_cache(clk->core); 4005 4006 hlist_del_init(&clk->core->child_node); 4007 4008 if (clk->core->prepare_count) 4009 pr_warn("%s: unregistering prepared clock: %s\n", 4010 __func__, clk->core->name); 4011 4012 if (clk->core->protect_count) 4013 pr_warn("%s: unregistering protected clock: %s\n", 4014 __func__, clk->core->name); 4015 4016 kref_put(&clk->core->ref, __clk_release); 4017 free_clk(clk); 4018 unlock: 4019 clk_prepare_unlock(); 4020 } 4021 EXPORT_SYMBOL_GPL(clk_unregister); 4022 4023 /** 4024 * clk_hw_unregister - unregister a currently registered clk_hw 4025 * @hw: hardware-specific clock data to unregister 4026 */ 4027 void clk_hw_unregister(struct clk_hw *hw) 4028 { 4029 clk_unregister(hw->clk); 4030 } 4031 EXPORT_SYMBOL_GPL(clk_hw_unregister); 4032 4033 static void devm_clk_release(struct device *dev, void *res) 4034 { 4035 clk_unregister(*(struct clk **)res); 4036 } 4037 4038 static void devm_clk_hw_release(struct device *dev, void *res) 4039 { 4040 clk_hw_unregister(*(struct clk_hw **)res); 4041 } 4042 4043 /** 4044 * devm_clk_register - resource managed clk_register() 4045 * @dev: device that is registering this clock 4046 * @hw: link to hardware-specific clock data 4047 * 4048 * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead. 4049 * 4050 * Clocks returned from this function are automatically clk_unregister()ed on 4051 * driver detach. See clk_register() for more information. 4052 */ 4053 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 4054 { 4055 struct clk *clk; 4056 struct clk **clkp; 4057 4058 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 4059 if (!clkp) 4060 return ERR_PTR(-ENOMEM); 4061 4062 clk = clk_register(dev, hw); 4063 if (!IS_ERR(clk)) { 4064 *clkp = clk; 4065 devres_add(dev, clkp); 4066 } else { 4067 devres_free(clkp); 4068 } 4069 4070 return clk; 4071 } 4072 EXPORT_SYMBOL_GPL(devm_clk_register); 4073 4074 /** 4075 * devm_clk_hw_register - resource managed clk_hw_register() 4076 * @dev: device that is registering this clock 4077 * @hw: link to hardware-specific clock data 4078 * 4079 * Managed clk_hw_register(). Clocks registered by this function are 4080 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() 4081 * for more information. 4082 */ 4083 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) 4084 { 4085 struct clk_hw **hwp; 4086 int ret; 4087 4088 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL); 4089 if (!hwp) 4090 return -ENOMEM; 4091 4092 ret = clk_hw_register(dev, hw); 4093 if (!ret) { 4094 *hwp = hw; 4095 devres_add(dev, hwp); 4096 } else { 4097 devres_free(hwp); 4098 } 4099 4100 return ret; 4101 } 4102 EXPORT_SYMBOL_GPL(devm_clk_hw_register); 4103 4104 static int devm_clk_match(struct device *dev, void *res, void *data) 4105 { 4106 struct clk *c = res; 4107 if (WARN_ON(!c)) 4108 return 0; 4109 return c == data; 4110 } 4111 4112 static int devm_clk_hw_match(struct device *dev, void *res, void *data) 4113 { 4114 struct clk_hw *hw = res; 4115 4116 if (WARN_ON(!hw)) 4117 return 0; 4118 return hw == data; 4119 } 4120 4121 /** 4122 * devm_clk_unregister - resource managed clk_unregister() 4123 * @clk: clock to unregister 4124 * 4125 * Deallocate a clock allocated with devm_clk_register(). Normally 4126 * this function will not need to be called and the resource management 4127 * code will ensure that the resource is freed. 4128 */ 4129 void devm_clk_unregister(struct device *dev, struct clk *clk) 4130 { 4131 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); 4132 } 4133 EXPORT_SYMBOL_GPL(devm_clk_unregister); 4134 4135 /** 4136 * devm_clk_hw_unregister - resource managed clk_hw_unregister() 4137 * @dev: device that is unregistering the hardware-specific clock data 4138 * @hw: link to hardware-specific clock data 4139 * 4140 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally 4141 * this function will not need to be called and the resource management 4142 * code will ensure that the resource is freed. 4143 */ 4144 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw) 4145 { 4146 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match, 4147 hw)); 4148 } 4149 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister); 4150 4151 /* 4152 * clkdev helpers 4153 */ 4154 4155 void __clk_put(struct clk *clk) 4156 { 4157 struct module *owner; 4158 4159 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 4160 return; 4161 4162 clk_prepare_lock(); 4163 4164 /* 4165 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a 4166 * given user should be balanced with calls to clk_rate_exclusive_put() 4167 * and by that same consumer 4168 */ 4169 if (WARN_ON(clk->exclusive_count)) { 4170 /* We voiced our concern, let's sanitize the situation */ 4171 clk->core->protect_count -= (clk->exclusive_count - 1); 4172 clk_core_rate_unprotect(clk->core); 4173 clk->exclusive_count = 0; 4174 } 4175 4176 hlist_del(&clk->clks_node); 4177 if (clk->min_rate > clk->core->req_rate || 4178 clk->max_rate < clk->core->req_rate) 4179 clk_core_set_rate_nolock(clk->core, clk->core->req_rate); 4180 4181 owner = clk->core->owner; 4182 kref_put(&clk->core->ref, __clk_release); 4183 4184 clk_prepare_unlock(); 4185 4186 module_put(owner); 4187 4188 free_clk(clk); 4189 } 4190 4191 /*** clk rate change notifiers ***/ 4192 4193 /** 4194 * clk_notifier_register - add a clk rate change notifier 4195 * @clk: struct clk * to watch 4196 * @nb: struct notifier_block * with callback info 4197 * 4198 * Request notification when clk's rate changes. This uses an SRCU 4199 * notifier because we want it to block and notifier unregistrations are 4200 * uncommon. The callbacks associated with the notifier must not 4201 * re-enter into the clk framework by calling any top-level clk APIs; 4202 * this will cause a nested prepare_lock mutex. 4203 * 4204 * In all notification cases (pre, post and abort rate change) the original 4205 * clock rate is passed to the callback via struct clk_notifier_data.old_rate 4206 * and the new frequency is passed via struct clk_notifier_data.new_rate. 4207 * 4208 * clk_notifier_register() must be called from non-atomic context. 4209 * Returns -EINVAL if called with null arguments, -ENOMEM upon 4210 * allocation failure; otherwise, passes along the return value of 4211 * srcu_notifier_chain_register(). 4212 */ 4213 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 4214 { 4215 struct clk_notifier *cn; 4216 int ret = -ENOMEM; 4217 4218 if (!clk || !nb) 4219 return -EINVAL; 4220 4221 clk_prepare_lock(); 4222 4223 /* search the list of notifiers for this clk */ 4224 list_for_each_entry(cn, &clk_notifier_list, node) 4225 if (cn->clk == clk) 4226 break; 4227 4228 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 4229 if (cn->clk != clk) { 4230 cn = kzalloc(sizeof(*cn), GFP_KERNEL); 4231 if (!cn) 4232 goto out; 4233 4234 cn->clk = clk; 4235 srcu_init_notifier_head(&cn->notifier_head); 4236 4237 list_add(&cn->node, &clk_notifier_list); 4238 } 4239 4240 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 4241 4242 clk->core->notifier_count++; 4243 4244 out: 4245 clk_prepare_unlock(); 4246 4247 return ret; 4248 } 4249 EXPORT_SYMBOL_GPL(clk_notifier_register); 4250 4251 /** 4252 * clk_notifier_unregister - remove a clk rate change notifier 4253 * @clk: struct clk * 4254 * @nb: struct notifier_block * with callback info 4255 * 4256 * Request no further notification for changes to 'clk' and frees memory 4257 * allocated in clk_notifier_register. 4258 * 4259 * Returns -EINVAL if called with null arguments; otherwise, passes 4260 * along the return value of srcu_notifier_chain_unregister(). 4261 */ 4262 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 4263 { 4264 struct clk_notifier *cn = NULL; 4265 int ret = -EINVAL; 4266 4267 if (!clk || !nb) 4268 return -EINVAL; 4269 4270 clk_prepare_lock(); 4271 4272 list_for_each_entry(cn, &clk_notifier_list, node) 4273 if (cn->clk == clk) 4274 break; 4275 4276 if (cn->clk == clk) { 4277 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 4278 4279 clk->core->notifier_count--; 4280 4281 /* XXX the notifier code should handle this better */ 4282 if (!cn->notifier_head.head) { 4283 srcu_cleanup_notifier_head(&cn->notifier_head); 4284 list_del(&cn->node); 4285 kfree(cn); 4286 } 4287 4288 } else { 4289 ret = -ENOENT; 4290 } 4291 4292 clk_prepare_unlock(); 4293 4294 return ret; 4295 } 4296 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 4297 4298 #ifdef CONFIG_OF 4299 static void clk_core_reparent_orphans(void) 4300 { 4301 clk_prepare_lock(); 4302 clk_core_reparent_orphans_nolock(); 4303 clk_prepare_unlock(); 4304 } 4305 4306 /** 4307 * struct of_clk_provider - Clock provider registration structure 4308 * @link: Entry in global list of clock providers 4309 * @node: Pointer to device tree node of clock provider 4310 * @get: Get clock callback. Returns NULL or a struct clk for the 4311 * given clock specifier 4312 * @data: context pointer to be passed into @get callback 4313 */ 4314 struct of_clk_provider { 4315 struct list_head link; 4316 4317 struct device_node *node; 4318 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 4319 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); 4320 void *data; 4321 }; 4322 4323 extern struct of_device_id __clk_of_table; 4324 static const struct of_device_id __clk_of_table_sentinel 4325 __used __section(__clk_of_table_end); 4326 4327 static LIST_HEAD(of_clk_providers); 4328 static DEFINE_MUTEX(of_clk_mutex); 4329 4330 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 4331 void *data) 4332 { 4333 return data; 4334 } 4335 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 4336 4337 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) 4338 { 4339 return data; 4340 } 4341 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); 4342 4343 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 4344 { 4345 struct clk_onecell_data *clk_data = data; 4346 unsigned int idx = clkspec->args[0]; 4347 4348 if (idx >= clk_data->clk_num) { 4349 pr_err("%s: invalid clock index %u\n", __func__, idx); 4350 return ERR_PTR(-EINVAL); 4351 } 4352 4353 return clk_data->clks[idx]; 4354 } 4355 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 4356 4357 struct clk_hw * 4358 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) 4359 { 4360 struct clk_hw_onecell_data *hw_data = data; 4361 unsigned int idx = clkspec->args[0]; 4362 4363 if (idx >= hw_data->num) { 4364 pr_err("%s: invalid index %u\n", __func__, idx); 4365 return ERR_PTR(-EINVAL); 4366 } 4367 4368 return hw_data->hws[idx]; 4369 } 4370 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); 4371 4372 /** 4373 * of_clk_add_provider() - Register a clock provider for a node 4374 * @np: Device node pointer associated with clock provider 4375 * @clk_src_get: callback for decoding clock 4376 * @data: context pointer for @clk_src_get callback. 4377 * 4378 * This function is *deprecated*. Use of_clk_add_hw_provider() instead. 4379 */ 4380 int of_clk_add_provider(struct device_node *np, 4381 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 4382 void *data), 4383 void *data) 4384 { 4385 struct of_clk_provider *cp; 4386 int ret; 4387 4388 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 4389 if (!cp) 4390 return -ENOMEM; 4391 4392 cp->node = of_node_get(np); 4393 cp->data = data; 4394 cp->get = clk_src_get; 4395 4396 mutex_lock(&of_clk_mutex); 4397 list_add(&cp->link, &of_clk_providers); 4398 mutex_unlock(&of_clk_mutex); 4399 pr_debug("Added clock from %pOF\n", np); 4400 4401 clk_core_reparent_orphans(); 4402 4403 ret = of_clk_set_defaults(np, true); 4404 if (ret < 0) 4405 of_clk_del_provider(np); 4406 4407 return ret; 4408 } 4409 EXPORT_SYMBOL_GPL(of_clk_add_provider); 4410 4411 /** 4412 * of_clk_add_hw_provider() - Register a clock provider for a node 4413 * @np: Device node pointer associated with clock provider 4414 * @get: callback for decoding clk_hw 4415 * @data: context pointer for @get callback. 4416 */ 4417 int of_clk_add_hw_provider(struct device_node *np, 4418 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 4419 void *data), 4420 void *data) 4421 { 4422 struct of_clk_provider *cp; 4423 int ret; 4424 4425 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 4426 if (!cp) 4427 return -ENOMEM; 4428 4429 cp->node = of_node_get(np); 4430 cp->data = data; 4431 cp->get_hw = get; 4432 4433 mutex_lock(&of_clk_mutex); 4434 list_add(&cp->link, &of_clk_providers); 4435 mutex_unlock(&of_clk_mutex); 4436 pr_debug("Added clk_hw provider from %pOF\n", np); 4437 4438 clk_core_reparent_orphans(); 4439 4440 ret = of_clk_set_defaults(np, true); 4441 if (ret < 0) 4442 of_clk_del_provider(np); 4443 4444 return ret; 4445 } 4446 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); 4447 4448 static void devm_of_clk_release_provider(struct device *dev, void *res) 4449 { 4450 of_clk_del_provider(*(struct device_node **)res); 4451 } 4452 4453 /* 4454 * We allow a child device to use its parent device as the clock provider node 4455 * for cases like MFD sub-devices where the child device driver wants to use 4456 * devm_*() APIs but not list the device in DT as a sub-node. 4457 */ 4458 static struct device_node *get_clk_provider_node(struct device *dev) 4459 { 4460 struct device_node *np, *parent_np; 4461 4462 np = dev->of_node; 4463 parent_np = dev->parent ? dev->parent->of_node : NULL; 4464 4465 if (!of_find_property(np, "#clock-cells", NULL)) 4466 if (of_find_property(parent_np, "#clock-cells", NULL)) 4467 np = parent_np; 4468 4469 return np; 4470 } 4471 4472 /** 4473 * devm_of_clk_add_hw_provider() - Managed clk provider node registration 4474 * @dev: Device acting as the clock provider (used for DT node and lifetime) 4475 * @get: callback for decoding clk_hw 4476 * @data: context pointer for @get callback 4477 * 4478 * Registers clock provider for given device's node. If the device has no DT 4479 * node or if the device node lacks of clock provider information (#clock-cells) 4480 * then the parent device's node is scanned for this information. If parent node 4481 * has the #clock-cells then it is used in registration. Provider is 4482 * automatically released at device exit. 4483 * 4484 * Return: 0 on success or an errno on failure. 4485 */ 4486 int devm_of_clk_add_hw_provider(struct device *dev, 4487 struct clk_hw *(*get)(struct of_phandle_args *clkspec, 4488 void *data), 4489 void *data) 4490 { 4491 struct device_node **ptr, *np; 4492 int ret; 4493 4494 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), 4495 GFP_KERNEL); 4496 if (!ptr) 4497 return -ENOMEM; 4498 4499 np = get_clk_provider_node(dev); 4500 ret = of_clk_add_hw_provider(np, get, data); 4501 if (!ret) { 4502 *ptr = np; 4503 devres_add(dev, ptr); 4504 } else { 4505 devres_free(ptr); 4506 } 4507 4508 return ret; 4509 } 4510 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); 4511 4512 /** 4513 * of_clk_del_provider() - Remove a previously registered clock provider 4514 * @np: Device node pointer associated with clock provider 4515 */ 4516 void of_clk_del_provider(struct device_node *np) 4517 { 4518 struct of_clk_provider *cp; 4519 4520 mutex_lock(&of_clk_mutex); 4521 list_for_each_entry(cp, &of_clk_providers, link) { 4522 if (cp->node == np) { 4523 list_del(&cp->link); 4524 of_node_put(cp->node); 4525 kfree(cp); 4526 break; 4527 } 4528 } 4529 mutex_unlock(&of_clk_mutex); 4530 } 4531 EXPORT_SYMBOL_GPL(of_clk_del_provider); 4532 4533 static int devm_clk_provider_match(struct device *dev, void *res, void *data) 4534 { 4535 struct device_node **np = res; 4536 4537 if (WARN_ON(!np || !*np)) 4538 return 0; 4539 4540 return *np == data; 4541 } 4542 4543 /** 4544 * devm_of_clk_del_provider() - Remove clock provider registered using devm 4545 * @dev: Device to whose lifetime the clock provider was bound 4546 */ 4547 void devm_of_clk_del_provider(struct device *dev) 4548 { 4549 int ret; 4550 struct device_node *np = get_clk_provider_node(dev); 4551 4552 ret = devres_release(dev, devm_of_clk_release_provider, 4553 devm_clk_provider_match, np); 4554 4555 WARN_ON(ret); 4556 } 4557 EXPORT_SYMBOL(devm_of_clk_del_provider); 4558 4559 /** 4560 * of_parse_clkspec() - Parse a DT clock specifier for a given device node 4561 * @np: device node to parse clock specifier from 4562 * @index: index of phandle to parse clock out of. If index < 0, @name is used 4563 * @name: clock name to find and parse. If name is NULL, the index is used 4564 * @out_args: Result of parsing the clock specifier 4565 * 4566 * Parses a device node's "clocks" and "clock-names" properties to find the 4567 * phandle and cells for the index or name that is desired. The resulting clock 4568 * specifier is placed into @out_args, or an errno is returned when there's a 4569 * parsing error. The @index argument is ignored if @name is non-NULL. 4570 * 4571 * Example: 4572 * 4573 * phandle1: clock-controller@1 { 4574 * #clock-cells = <2>; 4575 * } 4576 * 4577 * phandle2: clock-controller@2 { 4578 * #clock-cells = <1>; 4579 * } 4580 * 4581 * clock-consumer@3 { 4582 * clocks = <&phandle1 1 2 &phandle2 3>; 4583 * clock-names = "name1", "name2"; 4584 * } 4585 * 4586 * To get a device_node for `clock-controller@2' node you may call this 4587 * function a few different ways: 4588 * 4589 * of_parse_clkspec(clock-consumer@3, -1, "name2", &args); 4590 * of_parse_clkspec(clock-consumer@3, 1, NULL, &args); 4591 * of_parse_clkspec(clock-consumer@3, 1, "name2", &args); 4592 * 4593 * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT 4594 * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in 4595 * the "clock-names" property of @np. 4596 */ 4597 static int of_parse_clkspec(const struct device_node *np, int index, 4598 const char *name, struct of_phandle_args *out_args) 4599 { 4600 int ret = -ENOENT; 4601 4602 /* Walk up the tree of devices looking for a clock property that matches */ 4603 while (np) { 4604 /* 4605 * For named clocks, first look up the name in the 4606 * "clock-names" property. If it cannot be found, then index 4607 * will be an error code and of_parse_phandle_with_args() will 4608 * return -EINVAL. 4609 */ 4610 if (name) 4611 index = of_property_match_string(np, "clock-names", name); 4612 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 4613 index, out_args); 4614 if (!ret) 4615 break; 4616 if (name && index >= 0) 4617 break; 4618 4619 /* 4620 * No matching clock found on this node. If the parent node 4621 * has a "clock-ranges" property, then we can try one of its 4622 * clocks. 4623 */ 4624 np = np->parent; 4625 if (np && !of_get_property(np, "clock-ranges", NULL)) 4626 break; 4627 index = 0; 4628 } 4629 4630 return ret; 4631 } 4632 4633 static struct clk_hw * 4634 __of_clk_get_hw_from_provider(struct of_clk_provider *provider, 4635 struct of_phandle_args *clkspec) 4636 { 4637 struct clk *clk; 4638 4639 if (provider->get_hw) 4640 return provider->get_hw(clkspec, provider->data); 4641 4642 clk = provider->get(clkspec, provider->data); 4643 if (IS_ERR(clk)) 4644 return ERR_CAST(clk); 4645 return __clk_get_hw(clk); 4646 } 4647 4648 static struct clk_hw * 4649 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) 4650 { 4651 struct of_clk_provider *provider; 4652 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER); 4653 4654 if (!clkspec) 4655 return ERR_PTR(-EINVAL); 4656 4657 mutex_lock(&of_clk_mutex); 4658 list_for_each_entry(provider, &of_clk_providers, link) { 4659 if (provider->node == clkspec->np) { 4660 hw = __of_clk_get_hw_from_provider(provider, clkspec); 4661 if (!IS_ERR(hw)) 4662 break; 4663 } 4664 } 4665 mutex_unlock(&of_clk_mutex); 4666 4667 return hw; 4668 } 4669 4670 /** 4671 * of_clk_get_from_provider() - Lookup a clock from a clock provider 4672 * @clkspec: pointer to a clock specifier data structure 4673 * 4674 * This function looks up a struct clk from the registered list of clock 4675 * providers, an input is a clock specifier data structure as returned 4676 * from the of_parse_phandle_with_args() function call. 4677 */ 4678 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 4679 { 4680 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec); 4681 4682 return clk_hw_create_clk(NULL, hw, NULL, __func__); 4683 } 4684 EXPORT_SYMBOL_GPL(of_clk_get_from_provider); 4685 4686 struct clk_hw *of_clk_get_hw(struct device_node *np, int index, 4687 const char *con_id) 4688 { 4689 int ret; 4690 struct clk_hw *hw; 4691 struct of_phandle_args clkspec; 4692 4693 ret = of_parse_clkspec(np, index, con_id, &clkspec); 4694 if (ret) 4695 return ERR_PTR(ret); 4696 4697 hw = of_clk_get_hw_from_clkspec(&clkspec); 4698 of_node_put(clkspec.np); 4699 4700 return hw; 4701 } 4702 4703 static struct clk *__of_clk_get(struct device_node *np, 4704 int index, const char *dev_id, 4705 const char *con_id) 4706 { 4707 struct clk_hw *hw = of_clk_get_hw(np, index, con_id); 4708 4709 return clk_hw_create_clk(NULL, hw, dev_id, con_id); 4710 } 4711 4712 struct clk *of_clk_get(struct device_node *np, int index) 4713 { 4714 return __of_clk_get(np, index, np->full_name, NULL); 4715 } 4716 EXPORT_SYMBOL(of_clk_get); 4717 4718 /** 4719 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node 4720 * @np: pointer to clock consumer node 4721 * @name: name of consumer's clock input, or NULL for the first clock reference 4722 * 4723 * This function parses the clocks and clock-names properties, 4724 * and uses them to look up the struct clk from the registered list of clock 4725 * providers. 4726 */ 4727 struct clk *of_clk_get_by_name(struct device_node *np, const char *name) 4728 { 4729 if (!np) 4730 return ERR_PTR(-ENOENT); 4731 4732 return __of_clk_get(np, 0, np->full_name, name); 4733 } 4734 EXPORT_SYMBOL(of_clk_get_by_name); 4735 4736 /** 4737 * of_clk_get_parent_count() - Count the number of clocks a device node has 4738 * @np: device node to count 4739 * 4740 * Returns: The number of clocks that are possible parents of this node 4741 */ 4742 unsigned int of_clk_get_parent_count(const struct device_node *np) 4743 { 4744 int count; 4745 4746 count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); 4747 if (count < 0) 4748 return 0; 4749 4750 return count; 4751 } 4752 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 4753 4754 const char *of_clk_get_parent_name(const struct device_node *np, int index) 4755 { 4756 struct of_phandle_args clkspec; 4757 struct property *prop; 4758 const char *clk_name; 4759 const __be32 *vp; 4760 u32 pv; 4761 int rc; 4762 int count; 4763 struct clk *clk; 4764 4765 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 4766 &clkspec); 4767 if (rc) 4768 return NULL; 4769 4770 index = clkspec.args_count ? clkspec.args[0] : 0; 4771 count = 0; 4772 4773 /* if there is an indices property, use it to transfer the index 4774 * specified into an array offset for the clock-output-names property. 4775 */ 4776 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 4777 if (index == pv) { 4778 index = count; 4779 break; 4780 } 4781 count++; 4782 } 4783 /* We went off the end of 'clock-indices' without finding it */ 4784 if (prop && !vp) 4785 return NULL; 4786 4787 if (of_property_read_string_index(clkspec.np, "clock-output-names", 4788 index, 4789 &clk_name) < 0) { 4790 /* 4791 * Best effort to get the name if the clock has been 4792 * registered with the framework. If the clock isn't 4793 * registered, we return the node name as the name of 4794 * the clock as long as #clock-cells = 0. 4795 */ 4796 clk = of_clk_get_from_provider(&clkspec); 4797 if (IS_ERR(clk)) { 4798 if (clkspec.args_count == 0) 4799 clk_name = clkspec.np->name; 4800 else 4801 clk_name = NULL; 4802 } else { 4803 clk_name = __clk_get_name(clk); 4804 clk_put(clk); 4805 } 4806 } 4807 4808 4809 of_node_put(clkspec.np); 4810 return clk_name; 4811 } 4812 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 4813 4814 /** 4815 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return 4816 * number of parents 4817 * @np: Device node pointer associated with clock provider 4818 * @parents: pointer to char array that hold the parents' names 4819 * @size: size of the @parents array 4820 * 4821 * Return: number of parents for the clock node. 4822 */ 4823 int of_clk_parent_fill(struct device_node *np, const char **parents, 4824 unsigned int size) 4825 { 4826 unsigned int i = 0; 4827 4828 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) 4829 i++; 4830 4831 return i; 4832 } 4833 EXPORT_SYMBOL_GPL(of_clk_parent_fill); 4834 4835 struct clock_provider { 4836 void (*clk_init_cb)(struct device_node *); 4837 struct device_node *np; 4838 struct list_head node; 4839 }; 4840 4841 /* 4842 * This function looks for a parent clock. If there is one, then it 4843 * checks that the provider for this parent clock was initialized, in 4844 * this case the parent clock will be ready. 4845 */ 4846 static int parent_ready(struct device_node *np) 4847 { 4848 int i = 0; 4849 4850 while (true) { 4851 struct clk *clk = of_clk_get(np, i); 4852 4853 /* this parent is ready we can check the next one */ 4854 if (!IS_ERR(clk)) { 4855 clk_put(clk); 4856 i++; 4857 continue; 4858 } 4859 4860 /* at least one parent is not ready, we exit now */ 4861 if (PTR_ERR(clk) == -EPROBE_DEFER) 4862 return 0; 4863 4864 /* 4865 * Here we make assumption that the device tree is 4866 * written correctly. So an error means that there is 4867 * no more parent. As we didn't exit yet, then the 4868 * previous parent are ready. If there is no clock 4869 * parent, no need to wait for them, then we can 4870 * consider their absence as being ready 4871 */ 4872 return 1; 4873 } 4874 } 4875 4876 /** 4877 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree 4878 * @np: Device node pointer associated with clock provider 4879 * @index: clock index 4880 * @flags: pointer to top-level framework flags 4881 * 4882 * Detects if the clock-critical property exists and, if so, sets the 4883 * corresponding CLK_IS_CRITICAL flag. 4884 * 4885 * Do not use this function. It exists only for legacy Device Tree 4886 * bindings, such as the one-clock-per-node style that are outdated. 4887 * Those bindings typically put all clock data into .dts and the Linux 4888 * driver has no clock data, thus making it impossible to set this flag 4889 * correctly from the driver. Only those drivers may call 4890 * of_clk_detect_critical from their setup functions. 4891 * 4892 * Return: error code or zero on success 4893 */ 4894 int of_clk_detect_critical(struct device_node *np, int index, 4895 unsigned long *flags) 4896 { 4897 struct property *prop; 4898 const __be32 *cur; 4899 uint32_t idx; 4900 4901 if (!np || !flags) 4902 return -EINVAL; 4903 4904 of_property_for_each_u32(np, "clock-critical", prop, cur, idx) 4905 if (index == idx) 4906 *flags |= CLK_IS_CRITICAL; 4907 4908 return 0; 4909 } 4910 4911 /** 4912 * of_clk_init() - Scan and init clock providers from the DT 4913 * @matches: array of compatible values and init functions for providers. 4914 * 4915 * This function scans the device tree for matching clock providers 4916 * and calls their initialization functions. It also does it by trying 4917 * to follow the dependencies. 4918 */ 4919 void __init of_clk_init(const struct of_device_id *matches) 4920 { 4921 const struct of_device_id *match; 4922 struct device_node *np; 4923 struct clock_provider *clk_provider, *next; 4924 bool is_init_done; 4925 bool force = false; 4926 LIST_HEAD(clk_provider_list); 4927 4928 if (!matches) 4929 matches = &__clk_of_table; 4930 4931 /* First prepare the list of the clocks providers */ 4932 for_each_matching_node_and_match(np, matches, &match) { 4933 struct clock_provider *parent; 4934 4935 if (!of_device_is_available(np)) 4936 continue; 4937 4938 parent = kzalloc(sizeof(*parent), GFP_KERNEL); 4939 if (!parent) { 4940 list_for_each_entry_safe(clk_provider, next, 4941 &clk_provider_list, node) { 4942 list_del(&clk_provider->node); 4943 of_node_put(clk_provider->np); 4944 kfree(clk_provider); 4945 } 4946 of_node_put(np); 4947 return; 4948 } 4949 4950 parent->clk_init_cb = match->data; 4951 parent->np = of_node_get(np); 4952 list_add_tail(&parent->node, &clk_provider_list); 4953 } 4954 4955 while (!list_empty(&clk_provider_list)) { 4956 is_init_done = false; 4957 list_for_each_entry_safe(clk_provider, next, 4958 &clk_provider_list, node) { 4959 if (force || parent_ready(clk_provider->np)) { 4960 4961 /* Don't populate platform devices */ 4962 of_node_set_flag(clk_provider->np, 4963 OF_POPULATED); 4964 4965 clk_provider->clk_init_cb(clk_provider->np); 4966 of_clk_set_defaults(clk_provider->np, true); 4967 4968 list_del(&clk_provider->node); 4969 of_node_put(clk_provider->np); 4970 kfree(clk_provider); 4971 is_init_done = true; 4972 } 4973 } 4974 4975 /* 4976 * We didn't manage to initialize any of the 4977 * remaining providers during the last loop, so now we 4978 * initialize all the remaining ones unconditionally 4979 * in case the clock parent was not mandatory 4980 */ 4981 if (!is_init_done) 4982 force = true; 4983 } 4984 } 4985 #endif 4986